hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aff207ea4a0a28746ce77ebdc560cc119cfda66c | 1,719 | py | Python | apps/blog/resources.py | ride90/eve_features | 6cff35f8c4711ae030d6157565e4f0e77a92ab98 | [
"MIT"
] | 1 | 2021-10-03T05:30:46.000Z | 2021-10-03T05:30:46.000Z | apps/blog/resources.py | ride90/eve_features | 6cff35f8c4711ae030d6157565e4f0e77a92ab98 | [
"MIT"
] | null | null | null | apps/blog/resources.py | ride90/eve_features | 6cff35f8c4711ae030d6157565e4f0e77a92ab98 | [
"MIT"
] | null | null | null | RESOURCES = {
'posts': {
'schema': {
'title': {
'type': 'string',
'minlength': 3,
'maxlength': 30,
'required': True,
'unique': False
},
'body': {
'type': 'string',
'required': True,
'unique': True
},
'published': {
'type': 'boolean',
'default': False
},
'category': {
'type': 'objectid',
'data_relation': {
'resource': 'categories',
'field': '_id',
'embeddable': True
},
'required': True
},
'tags': {
'type': 'list',
'default': [],
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'tags',
'field': '_id',
'embeddable': True
}
}
}
},
},
'categories': {
'schema': {
'name': {
'type': 'string',
'minlength': 2,
'maxlength': 10,
'required': True,
'unique': True
}
},
'item_title': 'category',
},
'tags': {
'schema': {
'name': {
'type': 'string',
'minlength': 2,
'maxlength': 10,
'required': True,
'unique': True
}
}
}
}
| 25.279412 | 45 | 0.275742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.307155 |
aff28c5107917776458b32939693487f544f849c | 1,198 | py | Python | tests/no_train_or_test/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 25 | 2018-09-26T14:07:11.000Z | 2021-12-02T15:19:08.000Z | tests/no_train_or_test/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 8 | 2018-11-23T15:35:28.000Z | 2020-02-27T14:55:11.000Z | tests/no_train_or_test/model.py | NehzUx/autodl | c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9 | [
"Apache-2.0"
] | 5 | 2019-03-05T11:05:59.000Z | 2020-01-08T13:05:35.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by: Zhengying Liu, Isabelle Guyon
"""An example of code submission for the AutoDL challenge.
It implements 3 compulsory methods: __init__, train, and test.
model.py follows the template of the abstract class algorithm.py found
in folder AutoDL_ingestion_program/.
To create a valid submission, zip model.py together with an empty
file called metadata (this just indicates your submission is a code submission
and has nothing to do with the dataset metadata.
"""
class Model(object):
"""Fully connected neural network with no hidden layer."""
def __init__(self, metadata):
pass
| 37.4375 | 78 | 0.769616 | 123 | 0.102671 | 0 | 0 | 0 | 0 | 0 | 0 | 1,115 | 0.930718 |
aff371e591da00d7135ed8ebb4ce8865d7e19a6f | 1,183 | py | Python | vavs_project/fbdata/generic.py | valuesandvalue/valuesandvalue | 7d7602d0b620b38dcb761c63e74077a00bae891f | [
"MIT"
] | 1 | 2016-03-17T10:00:28.000Z | 2016-03-17T10:00:28.000Z | vavs_project/fbdata/generic.py | valuesandvalue/valuesandvalue | 7d7602d0b620b38dcb761c63e74077a00bae891f | [
"MIT"
] | null | null | null | vavs_project/fbdata/generic.py | valuesandvalue/valuesandvalue | 7d7602d0b620b38dcb761c63e74077a00bae891f | [
"MIT"
] | null | null | null | # fbdata.generic
# FBDATA
from .models import (
FBAlbum,
FBEvent,
FBLink,
FBPhoto,
FBStatus,
FBVideo,
StreamPost
)
_FB_CLASSES = {
'album': FBAlbum,
'event': FBEvent,
'link': FBLink,
'photo': FBPhoto,
'status': FBStatus,
'video': FBVideo,
'post': StreamPost
}
def class_for_type(object_type):
return _FB_CLASSES.get(object_type, None)
def album_exists(user, object_id):
return FBAlbum.objects.filter(user=user, object_id=object_id).exists()
def event_exists(user, object_id):
return FBEvent.objects.filter(user=user, event_id=object_id).exists()
def link_exists(user, link_id):
return FBLink.objects.filter(user=user, link_id=link_id).exists()
def post_exists(user, post_id):
return StreamPost.objects.filter(user=user, post_id=post_id).exists()
def photo_exists(user, object_id):
return FBPhoto.objects.filter(user=user, object_id=object_id).exists()
def status_exists(user, status_id):
return FBStatus.objects.filter(user=user, status_id=status_id).exists()
def video_exists(user, video_id):
return FBVideo.objects.filter(user=user, video_id=video_id).exists()
| 25.170213 | 75 | 0.705833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.060862 |
aff5fee585f36426f3564f43fe4ffe9c8b477bbf | 5,353 | py | Python | C5G2-3D/selfscatt.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | null | null | null | C5G2-3D/selfscatt.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | 2 | 2021-01-04T12:29:30.000Z | 2021-02-01T11:13:45.000Z | C5G2-3D/selfscatt.py | robfairh/npre555-cp03 | 2aea7ae2df4a720c5d09003f98192f8986a6d107 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import os
from os import path
import shutil
'''
Cross-sections from Cavarec, 2014..
Materials:
- uo2 - U - UO2 Fuel
- mox3 - P1 - 4.3% MOX Fuel (outer)
- mox2 - P2 - 7.0% MOX Fuel
- mox1 - P3 - 8.7% MOX Fuel (inner)
- gtub - X - Guide Tube
- reflec - R - Reflector
- fchamb - C - Moveable Fission Chamber
'''
def uo2_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the uo2 fuel pin
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.010, 0.100]),
'NSF': np.array([0.005, 0.125]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.54, 0.02, 0.00, 1.00])
}
return mat
def mox1_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the mox1 fuel pin
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.015, 0.300]),
'NSF': np.array([0.0075, 0.45]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.52, 0.015, 0.00, 0.76])
}
return mat
def mox2_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the mox2 fuel pin
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.015, 0.250]),
'NSF': np.array([0.0075, 0.375]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.52, 0.015, 0.00, 0.83])
}
return mat
def mox3_properties():
'''
Returns:
--------
mat: [dictionary]
- dictionary containing the cross-sections of the mox3 fuel pin
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.015, 0.200]),
'NSF': np.array([0.0075, 0.300]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.52, 0.015, 0.00, 0.90])
}
return mat
def fission_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the fission chamber
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.001, 0.02]),
'NSF': np.array([1e-7, 3e-6]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.56, 0.025, 0.00, 1.20])
}
return mat
def guide_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the guide tube
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.001, 0.02]),
'NSF': np.array([0.0, 0.0]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.56, 0.025, 0.00, 1.20])
}
return mat
def reflector_properties():
'''
Returns:
--------
mat: [dictionary]
dictionary containing the cross-sections of the reflector
'''
mat = {
'DIFFCOEF': np.array([1.20, 0.40]),
'ABS': np.array([0.001, 0.04]),
'NSF': np.array([0.0, 0.0]),
# from SS: S11, S22 and SR: S12, S21
'SP0': np.array([0.56, 0.05, 0.00, 2.30])
}
return mat
def homogenizer(XS, vi):
'''
This function homogenizes the cross-sections of different materials.
Parameters:
-----------
XS: [dictionary]
dictionary with the material cross-sections of the different materials
the primary keys are the names of the materials
vi: [array]
each element is the volume fraction of each material
Returns:
-------
HXS: [dictionary]
dictionary with the homogenized cross-sections
'''
HXS = {'DIFFCOEF': np.zeros(2),
'ABS': np.zeros(2),
'NSF': np.zeros(2),
'SP0': np.zeros(4)
}
for data in HXS.keys():
value = 0
for count, mat in enumerate(XS.keys()):
try:
value += XS[mat][data] * vi[count]
except KeyError:
value += 0
HXS[data] = value
return HXS
def homogenizes_uo2():
'''
This function specifies the volume fractions of the materials in the
UO2 assembly and homogenizes the cross-sections.
Returns:
-------
mat: [dictionary]
dictionary with the homogeneous cross-sections
'''
# uo2, gtube, fchamb
V = np.array([17*17-25, 24, 1])/(17*17)
XS = {}
XS['uo2'] = uo2_properties()
XS['gtube'] = guide_properties()
XS['fcham'] = fission_properties()
mat = homogenizer(XS, V)
return mat
def homogenizes_mox():
'''
This function specifies the volume fractions of the materials in the
MOX assembly and homogenizes the cross-sections.
Returns:
-------
mat: [dictionary]
dictionary with the homogeneous cross-sections
'''
# mox1, mox2, mox3, gtube, fchamb
V = np.array([100, 123, 66, 24, 1])/(17*17)
XS = {}
XS['mox1'] = mox1_properties()
XS['mox2'] = mox2_properties()
XS['mox3'] = mox3_properties()
XS['gtube'] = guide_properties()
XS['fcham'] = fission_properties()
mat = homogenizer(XS, V)
return mat
if __name__ == "__main__":
print(homogenizes_uo2()['SP0'])
print(homogenizes_mox()['SP0'])
print(reflector_properties()['SP0'])
| 23.375546 | 78 | 0.53839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,677 | 0.500093 |
aff709693fd92476b3e892543ca1fbdd4a69c8c9 | 139 | py | Python | pyeccodes/defs/grib2/dimensionType_table.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 7 | 2020-04-14T09:41:17.000Z | 2021-08-06T09:38:19.000Z | pyeccodes/defs/grib2/dimensionType_table.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | null | null | null | pyeccodes/defs/grib2/dimensionType_table.py | ecmwf/pyeccodes | dce2c72d3adcc0cb801731366be53327ce13a00b | [
"Apache-2.0"
] | 3 | 2020-04-30T12:44:48.000Z | 2020-12-15T08:40:26.000Z | def load(h):
return ({'abbr': 'layer', 'code': 0, 'title': 'layer'},
{'abbr': 'missing', 'code': 255, 'title': 'missing'})
| 34.75 | 65 | 0.489209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.503597 |
aff85f109666d7cdf9e65173eda851368c39694c | 220 | py | Python | tests/test_layers/test_2p5d/checks_2p5d/common.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 1,630 | 2021-10-30T01:00:27.000Z | 2022-03-31T23:02:41.000Z | tests/test_layers/test_2p5d/checks_2p5d/common.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 166 | 2021-10-30T01:03:01.000Z | 2022-03-31T14:19:07.000Z | tests/test_layers/test_2p5d/checks_2p5d/common.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 253 | 2021-10-30T06:10:29.000Z | 2022-03-31T13:30:06.000Z | import torch
TESSERACT_DIM = 2
TESSERACT_DEP = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
VOCAB_SIZE = 16
IMG_SIZE = 16
def check_equal(A, B):
assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) | 15.714286 | 53 | 0.718182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
affc39443576305ee2e0ade1abbbe279cdbe06bc | 627 | py | Python | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | 1 | 2021-10-11T09:40:27.000Z | 2021-10-11T09:40:27.000Z | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | null | null | null | default_values.py | Omar-X/App_init | 9de2fff36a8a5d2911e5dd495b62a7ce91d4a68e | [
"MIT"
] | null | null | null | import os
# getting path so you can run the script python3 App_init, python3 .
if os.getcwd()[-8:] != "App_init":
default_path = "App_init/"
print(default_path)
else:
default_path = ""
# reading all built in modules
default_modules = open(f"{default_path}default_modules.txt", "r").readlines()
for a, i in enumerate(default_modules):
if i[0] != "#":
# make a list of all names
default_modules[a] = i.replace("\n", "")
default_modules[a] = default_modules[a].replace(" ", "")
# removing all comments
for i in default_modules:
if i[0] == "#":
default_modules.remove(i)
| 27.26087 | 77 | 0.639553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.360447 |
affc7c1ad9927ed457d4cd8319b35626ca50bd99 | 4,770 | py | Python | city_scrapers/spiders/chi_ssa_21.py | Anphisa/city-scrapers | d8daf6c7a8207efc209277c017faffda430b2ef3 | [
"MIT"
] | null | null | null | city_scrapers/spiders/chi_ssa_21.py | Anphisa/city-scrapers | d8daf6c7a8207efc209277c017faffda430b2ef3 | [
"MIT"
] | null | null | null | city_scrapers/spiders/chi_ssa_21.py | Anphisa/city-scrapers | d8daf6c7a8207efc209277c017faffda430b2ef3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import dateutil.parser
from city_scrapers.constants import COMMISSION
from city_scrapers.spider import Spider
class ChiSsa21Spider(Spider):
name = 'chi_ssa_21'
agency_name = 'Chicago Special Service Area #21 Lincoln Square Ravenswood'
timezone = 'America/Chicago'
allowed_domains = ['www.lincolnsquare.org']
start_urls = ['http://www.lincolnsquare.org/SSA-no-21-Commission-meetings']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
for item in response.xpath('//div[@id="content-327081"]/p'):
data = {
'_type': 'event',
'name': 'Lincoln Square Neighborhood Improvement Program',
'event_description': self._parse_description(item),
'classification': COMMISSION,
'start': self._parse_start(item),
'end': self._parse_end(item),
'all_day': False,
'location': self._parse_location(item),
'documents': self._parse_documents(item),
'sources': [{
'url': response.url,
'note': ''
}],
}
data['status'] = self._generate_status(data, text='')
data['id'] = self._generate_id(data)
yield data
def _parse_description(self, item):
"""
Parse or generate event description.
"""
description = ''
# The itinerary of the meeting is always stored in the <ul>
# element immediately following
detailElement = item.xpath('following-sibling::*[1]')
name = detailElement.xpath('name()').extract_first()
if (name == 'ul'):
topics = list(
map(
lambda topic: ': '.join(
filter(
# Remove any strings that are empty
None,
[
# Title of topic
''.join(topic.xpath('strong/text()').extract()).strip(),
# Detail of topic
''.join(topic.xpath('text()').extract()).strip()
]
)
),
detailElement.xpath('li')
)
)
description = '\n'.join(topics)
return description
def _parse_start(self, item):
"""
Parse start date and time.
"""
startTime = self._parse_date(item)
startTime = startTime.replace(hour=9, minute=0)
ret = {'date': startTime.date(), 'time': startTime.time(), 'note': None}
return ret
def _parse_end(self, item):
"""
Parse end date and time.
"""
endTime = self._parse_date(item)
endTime = endTime.replace(hour=11, minute=0)
ret = {
'date': endTime.date(),
'time': endTime.time(),
'note': 'estimated 2 hours after start time'
}
return ret
def _parse_date(self, item):
rawDate = item.xpath('strong/text()').extract_first()
return dateutil.parser.parse(rawDate)
def _parse_location(self, item):
"""
Parse or generate location. Latitude and longitude can be
left blank and will be geocoded later.
"""
defaultLocation = 'Bistro Campagne, 4518 N. Lincoln Avenue'
# If location has changed, this is where it is noted
location = ''.join(item.xpath('em//text()').extract()).strip()
if not location:
location = defaultLocation
# Extract name of location if possible
splitLocation = location.split(',')
address = ''
if len(splitLocation) == 2:
address = splitLocation[1].strip()
name = splitLocation[0].strip()
else:
address = location.strip()
name = ''
# Append 'Chicago, IL' if not already present
if 'chicago' not in address.lower():
address += ', Chicago, IL'
return {
'address': address,
'name': name,
'neighborhood': '',
}
def _parse_documents(self, item):
"""
Parse or generate documents.
"""
url = item.xpath('a/@href').extract_first()
if url:
return [{'url': url, 'note': 'Minutes'}]
return []
| 30.382166 | 88 | 0.509644 | 4,632 | 0.971069 | 1,131 | 0.237107 | 0 | 0 | 0 | 0 | 1,612 | 0.337945 |
affcb8500f5c823d8eb2fe67a550af89dc3aca90 | 5,353 | py | Python | misp/utils/visual_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | 2 | 2019-12-21T10:46:57.000Z | 2019-12-22T14:01:23.000Z | misp/utils/visual_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | null | null | null | misp/utils/visual_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, AutoMinorLocator
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
from typing import Dict, Tuple
from .utils import predict
__all__ = ['get_heatmap_tensor', 'detransform', 'plot_confusion_matrix', 'plot_stats']
def _get_activations(store: Dict):
def hook(module, input, output):
store['activations'] = output.detach()
return hook
def _get_grads(store: Dict):
def hook(module, grad_input, grad_output):
if isinstance(grad_output, tuple):
store['grads'] = grad_output[0].detach()
elif isinstance(grad_output, torch.Tensor):
store['grads'] = grad_output.detach()
else:
raise Exception("Something wrong with the grad_output.")
return hook
def _hook(model: nn.Module, layer: nn.Module, img: torch.Tensor, category: int, device: torch.device):
'''Get the activations and grads of the layer of the model.
'''
model.to(device)
# register hooks
store = {'activations': None, 'grads': None}
forward_hook = layer.register_forward_hook(_get_activations(store))
backward_hook = layer.register_backward_hook(_get_grads(store))
try:
# trigger them
model.eval()
one_batch_img = img[None, ...].to(device)
pred = model(one_batch_img)
pred[0, category].backward()
finally:
# remove hooks
forward_hook.remove()
backward_hook.remove()
return store['activations'], store['grads']
def get_heatmap_tensor(model: nn.Module, layer: nn.Module, dataset: torch.utils.data.Dataset, idx: int,
device: torch.device, is_test: bool = False):
if not is_test:
acts, grads = _hook(model, layer, dataset[idx][0], dataset[idx][1], device)
else:
pred_cls = predict(model, dataset[idx][0], device)
acts, grads = _hook(model, layer, dataset[idx][0], pred_cls, device)
acts = acts.cpu()
grads = grads.cpu()
# simulate Global Average Pooling layer
pooled_grads = torch.mean(grads, dim=[0, 2, 3])
# weight the channels by corresponding gradients (NxCxHxW, so dim 1 is the Channel dimension)
for i in range(acts.size(dim=1)):
acts[:, i, :, :] *= pooled_grads[i]
# average the channels of the activations
heatmap = torch.mean(acts, dim=1).squeeze() # squeeze: the dimensions of input of size 1 are removed
heatmap_relu = np.maximum(heatmap, 0)
# normalize
heatmap_relu /= torch.max(heatmap_relu)
return heatmap_relu
def detransform(img, mean: Tuple = (0.485, 0.456, 0.406), std: Tuple = (0.229, 0.224, 0.225)):
'''Detransform an img of a pytorch dataset.
'''
mean = torch.tensor(mean)
std = torch.tensor(std)
# get the image back in [0,1] range (reverse Normalize(mean, std) process)
denorm_img = img * std[..., None, None] + mean[..., None, None]
# CxHxW -> HxWxC (reverse ToTensor() process)
hwc_img = np.transpose(denorm_img, (1, 2, 0))
# Tensor -> numpy.ndarray
return hwc_img.numpy()
def plot_confusion_matrix(y_true, y_pred, classes, cmap=plt.cm.Blues):
"""
ORIGINAL_SOURCE: https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title='Confusion matrix',
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], 'd'),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def plot_stats(stats, typ='loss', y_range=None, filename=None):
epochs = len(list(stats.values())[0])
x = range(epochs)
if typ == 'loss':
plt.plot(x, stats['train_loss'], label='train')
plt.plot(x, stats['val_loss'], label='val')
plt.ylabel('Loss')
plt.ylim(y_range)
elif typ == 'acc':
plt.plot(x, stats['train_acc'], label='train')
plt.plot(x, stats['val_acc'], label='val')
plt.ylabel('Accuracy')
plt.ylim(y_range)
elif typ == 'lr':
plt.plot(x, stats['lr'], label='lr')
plt.ylabel('Learning Rate')
plt.ylim(y_range)
else:
raise ValueError('Typ should be one of {loss, acc, lr}.')
plt.axes().xaxis.set_major_locator(MaxNLocator(nbins='auto', integer=True, min_n_ticks=10))
plt.axes().xaxis.set_minor_locator(AutoMinorLocator())
plt.xlabel('Epoch')
plt.legend()
if not filename:
plt.savefig(typ)
else:
plt.savefig(filename)
| 33.879747 | 109 | 0.636279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,337 | 0.249766 |
affd5e2f205058bf88abc9754f3558ca73cf1cfe | 6,732 | py | Python | infrastructure/__main__.py | jacopotagliabue/paas-data-ingestion | 81a9c81c3d1846a77be7f15648e994801e14450d | [
"MIT"
] | 30 | 2021-12-16T10:56:22.000Z | 2022-03-31T11:09:16.000Z | infrastructure/__main__.py | jacopotagliabue/paas-data-ingestion | 81a9c81c3d1846a77be7f15648e994801e14450d | [
"MIT"
] | null | null | null | infrastructure/__main__.py | jacopotagliabue/paas-data-ingestion | 81a9c81c3d1846a77be7f15648e994801e14450d | [
"MIT"
] | 3 | 2021-12-17T08:53:56.000Z | 2022-01-25T16:14:48.000Z | import pulumi
import pulumi_aws as aws
import pulumi_snowflake as snowflake
from my_snowflake_roles import MySnowflakeRoles
from my_snowflake_snowpipe import MySnowpipe
from my_lambda import MyLambda
PROJECT_NAME = pulumi.get_project()
STACK_NAME = pulumi.get_stack()
PREFIX = f'{PROJECT_NAME}-{STACK_NAME}'.lower().replace('_', '-')
# Constants
SF_DB_NAME = f'{PROJECT_NAME}_{STACK_NAME}'.upper()
SF_STREAM_SCHEMA_NAME = 'RAW'.upper()
SF_STREAM_LOGS_TABLE_NAME = 'LOGS'.upper()
SF_LOGS_STORAGE_INTEGRATION_NAME = f'{SF_DB_NAME}_LOGS_S3'.upper()
SF_LOGS_STAGE_NAME = f'{SF_DB_NAME}_LOGS_STAGE'.upper()
SF_LOGS_PIPE_NAME = f'{SF_DB_NAME}_LOGS_PIPE'.upper()
# S3 Bucket
s3_logs_bucket = aws.s3.Bucket(
'logs',
bucket=PREFIX,
acl='private',
)
# Snowflake
sf_database = snowflake.Database(
'Database',
name=SF_DB_NAME,
)
sf_warehouse = snowflake.Warehouse(
'Warehouse',
name=pulumi.Output.concat(sf_database.name, '_WH'),
warehouse_size='x-small',
auto_suspend=5,
auto_resume=True,
)
sf_roles = MySnowflakeRoles(
SF_DB_NAME,
database=sf_database,
warehouse=sf_warehouse,
)
sf_stream_schema = snowflake.Schema(
'Stream',
name=SF_STREAM_SCHEMA_NAME,
database=sf_database,
opts=pulumi.ResourceOptions(
parent=sf_database,
depends_on=[
sf_roles.read_only,
sf_roles.read_write,
],
),
)
sf_stream_logs_snowpipe = MySnowpipe(
'Logs',
prefix=PREFIX,
s3_bucket=s3_logs_bucket,
s3_data_prefix='stream_data',
s3_error_prefix='stream_errors',
database=sf_database,
schema=sf_stream_schema,
storage_integration_name=SF_LOGS_STORAGE_INTEGRATION_NAME,
stage_name=SF_LOGS_STAGE_NAME,
pipe_name=SF_LOGS_PIPE_NAME,
table_name=SF_STREAM_LOGS_TABLE_NAME,
table_columns=[
snowflake.TableColumnArgs(
name='SERVICE_ID',
type='VARCHAR(254)',
nullable=False,
),
snowflake.TableColumnArgs(
name='REQUEST_ID',
type='VARCHAR(36)',
nullable=True,
),
snowflake.TableColumnArgs(
name='REQUEST_TIMESTAMP',
type='TIMESTAMP_NTZ(9)',
nullable=True,
),
snowflake.TableColumnArgs(
name='RESPONSE_ID',
type='VARCHAR(36)',
nullable=True,
),
snowflake.TableColumnArgs(
name='RESPONSE_TIMESTAMP',
type='TIMESTAMP_NTZ(9)',
nullable=True,
),
snowflake.TableColumnArgs(
name='CLIENT_ID',
type='VARCHAR(36)',
nullable=True,
),
snowflake.TableColumnArgs(
name='DATA',
type='VARIANT',
nullable=False,
),
snowflake.TableColumnArgs(
name='LOG_ID',
type='VARCHAR(36)',
nullable=False,
),
snowflake.TableColumnArgs(
name='LOG_FILENAME',
type='VARCHAR(16777216)',
nullable=False,
),
snowflake.TableColumnArgs(
name='LOG_FILE_ROW_NUMBER',
type='NUMBER(20,0)',
nullable=False,
),
snowflake.TableColumnArgs(
name='LOG_TIMESTAMP',
type='TIMESTAMP_NTZ(9)',
nullable=False,
),
],
table_cluster_bies=[
'TO_DATE(REQUEST_TIMESTAMP)',
'SERVICE_ID',
],
copy_statement=f"""
COPY INTO {SF_DB_NAME}.{SF_STREAM_SCHEMA_NAME}.{SF_STREAM_LOGS_TABLE_NAME} (
SERVICE_ID,
REQUEST_ID,
REQUEST_TIMESTAMP,
RESPONSE_ID,
RESPONSE_TIMESTAMP,
CLIENT_ID,
DATA,
LOG_ID,
LOG_FILENAME,
LOG_FILE_ROW_NUMBER,
LOG_TIMESTAMP
)
FROM (
SELECT
NULLIF(SUBSTR(LOWER(TRIM($1:service:id::STRING)), 1, 254), ''),
NULLIF(SUBSTR(LOWER(TRIM($1:request:id::STRING)), 1, 36), ''),
TO_TIMESTAMP_NTZ($1:request:timestamp::INT, 3),
NULLIF(SUBSTR(LOWER(TRIM($1:response:id::STRING)), 1, 36), ''),
TO_TIMESTAMP_NTZ($1:response:timestamp::INT, 3),
NULLIF(SUBSTR(LOWER(TRIM($1:context:client_id::STRING)), 1, 36), ''),
$1,
LOWER(UUID_STRING('da69e958-fee3-428b-9dc3-e7586429fcfc', CONCAT(metadata$filename, ':', metadata$file_row_number))),
metadata$filename,
metadata$file_row_number,
TO_TIMESTAMP_NTZ(CONVERT_TIMEZONE('UTC', CURRENT_TIMESTAMP()))
FROM @{SF_DB_NAME}.{SF_STREAM_SCHEMA_NAME}.{SF_LOGS_STAGE_NAME}
)
""",
opts=pulumi.ResourceOptions(
parent=sf_stream_schema,
depends_on=[
sf_roles.read_only,
sf_roles.read_write,
],
),
)
# API Gateway
api = aws.apigatewayv2.Api(
'Api',
name=PREFIX,
protocol_type='HTTP',
)
api_stage = aws.apigatewayv2.Stage(
'Rest',
name='rest',
api_id=api.id,
auto_deploy=True,
opts=pulumi.ResourceOptions(parent=api),
)
# API: Collect
lambda_api_collect = MyLambda(
'ApiCollect',
prefix=PREFIX,
lambda_name='collect',
clouwatch_logs_retention_in_days=3,
apigatewayv2_api=api,
apigatewayv2_route_key='POST /collect',
code=pulumi.FileArchive('../api/collect'),
handler='main.handler',
environment=aws.lambda_.FunctionEnvironmentArgs(
variables={
'fh_stream_name': sf_stream_logs_snowpipe.firehose.name,
},
),
opts=pulumi.ResourceOptions(parent=api_stage),
)
aws.iam.RolePolicy(
f'ApiLambdaCollect_Firehose',
name='FirehoseWriteAccess',
role=lambda_api_collect.iam_role.id,
policy=aws.iam.get_policy_document(
statements=[
aws.iam.GetPolicyDocumentStatementArgs(
actions=[
'firehose:PutRecord',
'firehose:PutRecordBatch',
],
resources=[
sf_stream_logs_snowpipe.firehose.arn,
],
effect='Allow',
),
],
).json,
opts=pulumi.ResourceOptions(parent=lambda_api_collect),
)
# Final
pulumi.export('firehose_arn', sf_stream_logs_snowpipe.firehose.arn)
pulumi.export('firehose_name', sf_stream_logs_snowpipe.firehose.name)
pulumi.export('snowflake_database_name', sf_database.name)
pulumi.export('snowflake_stream_schema_name', sf_stream_schema.name)
pulumi.export('snowflake_stream_table_name',
sf_stream_logs_snowpipe.table.name)
pulumi.export('api_endpoint', api_stage.invoke_url)
| 27.818182 | 133 | 0.610517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,269 | 0.337047 |
affe1f7c5acaa073ae333db92fc0f2ba9b660efd | 28 | py | Python | tests/math/__init__.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | 1 | 2021-03-15T08:51:42.000Z | 2021-03-15T08:51:42.000Z | tests/math/__init__.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | tests/math/__init__.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | """
Tests for dit.math.
"""
| 7 | 19 | 0.535714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.964286 |
b3002f19f23fd7a651f2fd21a8598fc87385b360 | 306 | py | Python | FATS/featureFunction.py | serdarozsoy/FATS | e2a1bf4f142c20eada5d0d63435599e9139d4a9d | [
"MIT"
] | null | null | null | FATS/featureFunction.py | serdarozsoy/FATS | e2a1bf4f142c20eada5d0d63435599e9139d4a9d | [
"MIT"
] | null | null | null | FATS/featureFunction.py | serdarozsoy/FATS | e2a1bf4f142c20eada5d0d63435599e9139d4a9d | [
"MIT"
] | null | null | null | <<<<<<< HEAD
import os,sys,time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import Base
=======
import os,sys,time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import Base
>>>>>>> e5e6c78995f79de751f6aa5e3ad47cb15bd3fffc
from FeatureFunctionLib import * | 21.857143 | 48 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b300d8ecf9747b4d9b531ad1418bf35e81de5855 | 1,367 | py | Python | personal-work/assignment2/rPi/arduino_to_python_to_mySQL.py | crabman84/codeIoT | 6bb5d3f6467d686e4eae30db011af32e8ae7d5f8 | [
"MIT"
] | null | null | null | personal-work/assignment2/rPi/arduino_to_python_to_mySQL.py | crabman84/codeIoT | 6bb5d3f6467d686e4eae30db011af32e8ae7d5f8 | [
"MIT"
] | null | null | null | personal-work/assignment2/rPi/arduino_to_python_to_mySQL.py | crabman84/codeIoT | 6bb5d3f6467d686e4eae30db011af32e8ae7d5f8 | [
"MIT"
] | null | null | null | import serial
import io
import MySQLdb
device = '/dev/ttyACM1'
#ser = serial.Serial('/dev/ttyACM1', 9600)
arduino = serial.Serial(device, 9600)
#dataTemp = arduino.readline()
temp = 5
motorPos = 50
hIndex = 4
i = 0
while(i<3):
dataIndicator = arduino.readline()
indicator = dataIndicator.decode().strip()
ind = indicator
print("ind: " + ind)
#print(dataIndicator)
if(ind == 'temp'):
# print('test 1st if in while loop')
dataTemp = arduino.readline()
temp = dataTemp.decode('UTF-8')
i = i + 1
elif(indicator == "index"):
dataHeatIndex = arduino.readline()
hIndex = dataHeatIndex.decode('UTF-8')
i = i + 1
elif(indicator == "pos"):
dataMotorPos = arduino.readline()
motorPos = dataMotorPos.decode('UTF-8')
i = i + 1
print('Encoded Serial Temp: '+ temp)
print('Encoded Serial Heat Index: '+ hIndex)
print('Encoded Serial Motor Position: '+ motorPos)
#Make DB connection
dbConn = MySQLdb.connect("localhost", "root", "password", "tempdb") or die("Could not connect to the database")
print(dbConn)
#with dbConn:
try:
cursor = dbConn.cursor()
#cursor.execute("INSERT INTO tempLog (Temperature) VALUES (%s)" % (temp))
except (MySQLdb.Error) as e:
print(e)
dbConn.rollback()
else:
dbConn.commit()
finally:
cursor.close()
| 22.409836 | 111 | 0.630578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.334309 |
b302348ee4bab662ae5fc00d100adcd638224ab8 | 56 | py | Python | result_helpers/__init__.py | CFM-MSG/CMAN_pytorch | e176debef6888ae96781a6cfafabcbd438fbfcb0 | [
"MIT"
] | null | null | null | result_helpers/__init__.py | CFM-MSG/CMAN_pytorch | e176debef6888ae96781a6cfafabcbd438fbfcb0 | [
"MIT"
] | null | null | null | result_helpers/__init__.py | CFM-MSG/CMAN_pytorch | e176debef6888ae96781a6cfafabcbd438fbfcb0 | [
"MIT"
] | null | null | null | from result_helpers.mem_one_class import MEMResultHelper | 56 | 56 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b30280032746da65b4d1cd88375992e47f1f3a7e | 2,820 | py | Python | azure-event-hub-master/service/service.py | sesam-community/azure-eventhub-source | 22017b7586df95d8b9917797e561b3e444864b1b | [
"Apache-2.0"
] | null | null | null | azure-event-hub-master/service/service.py | sesam-community/azure-eventhub-source | 22017b7586df95d8b9917797e561b3e444864b1b | [
"Apache-2.0"
] | null | null | null | azure-event-hub-master/service/service.py | sesam-community/azure-eventhub-source | 22017b7586df95d8b9917797e561b3e444864b1b | [
"Apache-2.0"
] | 1 | 2019-04-05T08:09:30.000Z | 2019-04-05T08:09:30.000Z | import json
from flask import Flask, request, Response
from azure.eventhub import EventHubClient, Offset
from ast import literal_eval
import logging
import cherrypy
import os
from uamqp import types, errors
app = Flask(__name__)
logger = logging.getLogger('service')
logging.basicConfig(level=logging.ERROR)
# Access tokens for event hub namespace, from Azure portal for namespace
address = os.environ.get('address')
user = os.environ.get('user')
key = os.environ.get('key')
consumergroup = os.environ.get('consumergroup')
partition = os.environ.get('partition')
if not address:
logger.error("No event hub address supplied")
@app.route('/', methods=['GET'])
def get():
logger.info("start of the function get()")
if request.args.get('since') is None:
sequenceid = "-1"
else:
sequenceid = int(request.args.get('since'))
client = EventHubClient(address, debug=False, username=user, password=key)
client.clients.clear()
receiver = client.add_receiver(consumergroup, partition, prefetch=5000, offset=Offset(sequenceid), keep_alive=72000)
client.run()
def generate():
try:
batched_events = receiver.receive(max_batch_size=100,timeout=500)
index = 0
yield '['
while batched_events:
for event_data in batched_events:
if index > 0:
yield ','
last_sn = event_data.sequence_number
data = str(event_data.message)
output_entity = literal_eval(data)
output_entity.update({"_updated": last_sn})
yield json.dumps(output_entity)
index = index + 1
batched_events = receiver.receive(max_batch_size=100,timeout=500)
yield ']'
except (errors.TokenExpired, errors.AuthenticationException):
logger.error("Receiver disconnected due to token error.")
receiver.close(exception=None)
except (errors.LinkDetach, errors.ConnectionClose):
logger.error("Receiver detached.")
receiver.close(exception=None)
except Exception as e:
logger.error("Unexpected error occurred (%r). Shutting down.", e)
receiver.close(exception=None)
return Response(generate(), mimetype='application/json')
if __name__ == '__main__':
cherrypy.tree.graft(app, '/')
# Set the configuration of the web server to production mode
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
| 33.571429 | 120 | 0.637589 | 0 | 0 | 1,708 | 0.605674 | 1,741 | 0.617376 | 0 | 0 | 578 | 0.204965 |
b302d7a1061000487ddbabb61f61edef21f08f72 | 861 | py | Python | pollect/sources/HttpSource.py | ystradmann/pollect | 064b5f2dd149255a642236eab5d12cbda3094461 | [
"MIT"
] | null | null | null | pollect/sources/HttpSource.py | ystradmann/pollect | 064b5f2dd149255a642236eab5d12cbda3094461 | [
"MIT"
] | null | null | null | pollect/sources/HttpSource.py | ystradmann/pollect | 064b5f2dd149255a642236eab5d12cbda3094461 | [
"MIT"
] | null | null | null | import time
from typing import Optional
from pollect.core import Helper
from pollect.core.ValueSet import ValueSet, Value
from pollect.sources.Source import Source
class HttpSource(Source):
status_code: Optional[int] = None
def __init__(self, config):
super().__init__(config)
self.url = config.get('url')
self.timeout = config.get('timeout', 10)
self.status_code = config.get('statusCode')
def _probe(self):
data = ValueSet()
try:
start = time.time() * 1000
Helper.get_url(self.url, timeout=self.timeout, expected_status=self.status_code)
end = time.time() * 1000
data.add(Value(int(end - start)))
except Exception as e:
self.log.error('Could not probe ' + str(e))
data.add(Value(self.timeout))
return data
| 29.689655 | 92 | 0.626016 | 693 | 0.804878 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.051103 |
b304511c23a05e5c02a15ade41a42766675c4bda | 242 | py | Python | ikologikapi/domain/AbstractIkologikCustomerObject.py | Ikologik/ikologik-api-python | 5bf32d7dde3366110c6fde4fc76d74fb461eb8b5 | [
"MIT"
] | null | null | null | ikologikapi/domain/AbstractIkologikCustomerObject.py | Ikologik/ikologik-api-python | 5bf32d7dde3366110c6fde4fc76d74fb461eb8b5 | [
"MIT"
] | null | null | null | ikologikapi/domain/AbstractIkologikCustomerObject.py | Ikologik/ikologik-api-python | 5bf32d7dde3366110c6fde4fc76d74fb461eb8b5 | [
"MIT"
] | null | null | null | from ikologikapi.domain.AbstractIkologikObject import AbstractIkologikObject
class AbstractIkologikCustomerObject(AbstractIkologikObject):
def __init__(self, customer: str):
super().__init__()
self.customer = customer
| 24.2 | 76 | 0.772727 | 162 | 0.669421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b30592b52ccb0ad577925bc052ff24161d1e552c | 2,316 | py | Python | 2018/day04.py | jawang35/advent-of-code | 06e2ba940f452c66b7863d837f6a3245c9921597 | [
"MIT"
] | null | null | null | 2018/day04.py | jawang35/advent-of-code | 06e2ba940f452c66b7863d837f6a3245c9921597 | [
"MIT"
] | 1 | 2018-12-21T19:05:27.000Z | 2018-12-31T18:59:07.000Z | 2018/day04.py | jawang35/advent-of-code | 06e2ba940f452c66b7863d837f6a3245c9921597 | [
"MIT"
] | null | null | null | from datetime import datetime
from enum import Enum, auto
import re
guard_id_regex = re.compile(r'#\d+')
class Record():
def __init__(self, record_string):
self.timestamp = datetime.strptime(record_string[1:17], '%Y-%m-%d %H:%M')
guard_id = guard_id_regex.search(record_string)
self.guard_id = guard_id[0][1:] if guard_id else None
if 'wakes up' in record_string:
self.type = 'WAKES_UP'
elif 'falls asleep' in record_string:
self.type = 'FALLS_ASLEEP'
else:
self.type = 'BEGINS_SHIFT'
def track_sleep_minutes(minutes_already_slept, start_minute, end_minute):
for minute in range(start_minute, end_minute):
minutes_already_slept[minute] += 1
def build_sleep_log(records):
sleep_log = {}
current_guard_id = records[0].guard_id
asleep_since = None
for record in records:
if record.type == 'BEGINS_SHIFT':
current_guard_id = record.guard_id
asleep_since = None
if record.type == 'FALLS_ASLEEP':
asleep_since = record.timestamp
if record.type == 'WAKES_UP':
time_asleep = record.timestamp - asleep_since
if current_guard_id in sleep_log:
sleep_log[current_guard_id]['time_asleep'] += time_asleep
else:
sleep_log[current_guard_id] = {'time_asleep': time_asleep, 'minutes': [0] * 60}
for minute in range(asleep_since.minute, record.timestamp.minute):
sleep_log[current_guard_id]['minutes'][minute] += 1
return sleep_log
if __name__ == '__main__':
with open('2018/sampleinputs/day04.txt') as file:
records = sorted([Record(r) for r in file.read().split('\n')[:-1]], key=lambda r: r.timestamp)
sleep_log = build_sleep_log(records)
sleeping_beauty1, log1 = sorted(sleep_log.items(), key=lambda x: x[1]['time_asleep'], reverse=True)[0]
part1 = int(sleeping_beauty1) * int(log1['minutes'].index(max(log1['minutes'])))
print('Part 1: {}'.format(part1))
sleeping_beauty2, log2 = sorted(sleep_log.items(), key=lambda x: max(x[1]['minutes']), reverse=True)[0]
part2 = int(sleeping_beauty2) * int(log2['minutes'].index(max(log2['minutes'])))
print('Part 2: {}'.format(part2))
| 37.967213 | 111 | 0.636442 | 467 | 0.201641 | 0 | 0 | 0 | 0 | 0 | 0 | 292 | 0.126079 |
b305a34bdb889b52d5f6fb744e2674940dcc4b15 | 6,043 | py | Python | lib/symbioticpy/symbiotic/symbiotic.py | IMULMUL/symbiotic | 25a72f06440739b881156a56ea87ee254f21bdd9 | [
"MIT"
] | null | null | null | lib/symbioticpy/symbiotic/symbiotic.py | IMULMUL/symbiotic | 25a72f06440739b881156a56ea87ee254f21bdd9 | [
"MIT"
] | 14 | 2015-03-31T11:54:34.000Z | 2015-10-04T19:26:56.000Z | lib/symbioticpy/symbiotic/symbiotic.py | IMULMUL/symbiotic | 25a72f06440739b881156a56ea87ee254f21bdd9 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import sys
import re
from . transform import SymbioticCC
from . verifier import SymbioticVerifier
from . options import SymbioticOptions
from . utils import err, dbg, print_elapsed_time, restart_counting_time
from . utils.utils import print_stdout
from . utils.process import ProcessRunner
from . exceptions import SymbioticExceptionalResult
class Symbiotic(object):
"""
Instance of symbiotic tool. Instruments, prepares, compiles and runs
symbolic execution on given source(s)
"""
def __init__(self, tool, src, opts=None, env=None):
# source file
self.sources = src
# source compiled to llvm bytecode
self.curfile = None
# environment
self.env = env
if opts is None:
self.options = SymbioticOptions()
else:
self.options = opts
# tool to use
self._tool = tool
def terminate(self):
pr = ProcessRunner()
if pr.hasProcess():
pr.terminate()
def kill(self):
pr = ProcessRunner()
if pr.hasProcess():
pr.kill()
def kill_wait(self):
pr = ProcessRunner()
if not pr.hasProcess():
return
if pr.exitStatus() is None:
from time import sleep
while pr.exitStatus() is None:
pr.kill()
print('Waiting for the child process to terminate')
sleep(0.5)
print('Killed the child process')
def replay_nonsliced(self, tool, cc):
bitcode = cc.prepare_unsliced_file()
params = []
if hasattr(tool, "replay_error_params"):
params = tool.replay_error_params(cc.curfile)
print_stdout('INFO: Replaying error path', color='WHITE')
restart_counting_time()
verifier = SymbioticVerifier(bitcode, self.sources,
tool, self.options,
self.env, params)
res, _ = verifier.run()
print_elapsed_time('INFO: Replaying error path time', color='WHITE')
return res
def _run_symbiotic(self):
options = self.options
cc = SymbioticCC(self.sources, self._tool, options, self.env)
bitcode = cc.run()
if options.no_verification:
return 'No verification'
verifier = SymbioticVerifier(bitcode, self.sources,
self._tool, options, self.env)
# result and the tool that decided this result
res, tool = verifier.run()
# if we crashed on the sliced file, try running on the unsliced file
# (do this optional, as well as for slicer and instrumentation)
resstartswith = res.lower().startswith
if (not options.noslice) and \
(options.sv_comp or options.test_comp) and \
(resstartswith('error') or resstartswith('unknown')):
print_stdout("INFO: Failed on the sliced code, trying on the unsliced code",
color="WHITE")
options.replay_error = False # now we do not need to replay the error
options.noslice = True # now we behave like without slicing
bitcode = cc.prepare_unsliced_file()
verifier = SymbioticVerifier(bitcode, self.sources,
self._tool, options, self.env)
res, tool = verifier.run()
print_elapsed_time('INFO: Running on unsliced code time', color='WHITE')
if tool and options.replay_error and not tool.can_replay():
dbg('Replay required but the tool does not support it')
has_error = res and\
(res.startswith('false') or\
(res.startswith('done') and options.property.errorcall()))
if has_error and options.replay_error and\
not options.noslice and tool.can_replay():
print_stdout("Trying to confirm the error path")
newres = self.replay_nonsliced(tool, cc)
dbg("Original result: '{0}'".format(res))
dbg("Replayed result: '{0}'".format(newres))
if res != newres:
# if we did not replay the original error, but we found a different error
# on this path, report it, since it should be real
has_error = newres and\
(newres.startswith('false') or\
(newres.startswith('done') and\
options.property.errorcall()))
if has_error:
res = newres
else:
res = 'cex not-confirmed'
has_error = False
if res == 'cex not-confirmed':
# if we failed confirming CEX, rerun on unsliced file
bitcode = cc.prepare_unsliced_file()
verifier = SymbioticVerifier(bitcode, self.sources,
self._tool, options, self.env)
res, tool = verifier.run()
has_error = res and\
(res.startswith('false') or\
(res.startswith('done') and options.property.errorcall()))
if has_error and hasattr(tool, "describe_error"):
tool.describe_error(cc.curfile)
if has_error and options.executable_witness and\
hasattr(tool, "generate_exec_witness"):
tool.generate_exec_witness(cc.curfile, self.sources)
if not options.nowitness and hasattr(tool, "generate_witness"):
tool.generate_witness(cc.curfile, self.sources, has_error)
return res
def run(self):
try:
return self._run_symbiotic()
except KeyboardInterrupt:
self.terminate()
self.kill()
print('Interrupted...')
return 'interrupted'
except SymbioticExceptionalResult as res:
# we got result from some exceptional case
return str(res)
| 35.547059 | 89 | 0.573556 | 5,668 | 0.937945 | 0 | 0 | 0 | 0 | 0 | 0 | 1,294 | 0.214132 |
b306523785b4bd7109a4337c13f607cc42984c0b | 590 | py | Python | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/python-sphinx-removed-in/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | pkgname = "python-sphinx-removed-in"
pkgver = "0.2.1"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
checkdepends = ["python-sphinx"]
depends = ["python-sphinx"]
pkgdesc = "Sphinx extension for versionremoved and removed-in directives"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-3-Clause"
url = "https://github.com/MrSenko/sphinx-removed-in"
source = f"$(PYPI_SITE)/s/sphinx-removed-in/sphinx-removed-in-{pkgver}.tar.gz"
sha256 = "0588239cb534cd97b1d3900d0444311c119e45296a9f73f1ea81ea81a2cd3db1"
# dependency of pytest
options = ["!check"]
| 36.875 | 78 | 0.759322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.701695 |
b3071a1b815f8d4b62fc6b810125289866f9f503 | 180 | py | Python | portal/serializers.py | Radek198/Food-Co-op | eee5077d9cd9f5f9b71a649e7fb086f5e04d3f83 | [
"MIT"
] | null | null | null | portal/serializers.py | Radek198/Food-Co-op | eee5077d9cd9f5f9b71a649e7fb086f5e04d3f83 | [
"MIT"
] | null | null | null | portal/serializers.py | Radek198/Food-Co-op | eee5077d9cd9f5f9b71a649e7fb086f5e04d3f83 | [
"MIT"
] | null | null | null | from rest_framework import serializers
class ProductSerializer(serializers.Serializer):
product = serializers.ListField(
child=serializers.CharField(max_length=200))
| 25.714286 | 52 | 0.794444 | 138 | 0.766667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b3088829bd55a0d983de8c705f56417ea4a54ac3 | 46 | py | Python | graphgallery/gallery/linkpred/pyg/__init__.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | graphgallery/gallery/linkpred/pyg/__init__.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | graphgallery/gallery/linkpred/pytorch/__init__.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | from .gae import GAE
from .vgae import VGAE
| 15.333333 | 23 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b3098f530cc75aa41ac3e63282ed9b4d3590224b | 22,953 | py | Python | SightingsTOcsv.py | kfiala/AviSysDataAccess | baf7bb5e4c97c7e175152c133304f00195fb4bcd | [
"Apache-2.0"
] | 1 | 2021-05-02T17:31:58.000Z | 2021-05-02T17:31:58.000Z | SightingsTOcsv.py | kfiala/AviSysDataAccess | baf7bb5e4c97c7e175152c133304f00195fb4bcd | [
"Apache-2.0"
] | 1 | 2021-11-15T19:07:28.000Z | 2021-11-19T02:44:59.000Z | SightingsTOcsv.py | kfiala/AviSysDataAccess | baf7bb5e4c97c7e175152c133304f00195fb4bcd | [
"Apache-2.0"
] | null | null | null | # Export the contents of AviSys files SIGHTING.DAT and FNotes.DAT to CSV format
# Author: Kent Fiala <Kent.Fiala@gmail.com>
# Version: 1.2 3 April 2021
import sys
import csv
import ctypes
# Input files
DATA_FILE = 'SIGHTING.DAT'
MASTER_FILE = 'MASTER.AVI'
PLACES_FILE = 'PLACES.AVI'
NOTE_INDEX = 'FNotes.IX'
NOTE_FILE = 'FNotes.DAT'
ASSOCIATE_FILE = 'ASSOCIAT.AVI'
# Output files
EXPORT_FILE = 'AviSys.sightings.'
NOTE_OUTPUT = 'FieldNotes.txt'
stateCode = {
'Alabama':'AL',
'Alaska':'AK',
'Arizona':'AZ',
'Arkansas':'AR',
'California':'CA',
'Colorado':'CO',
'Connecticut':'CT',
'Delaware':'DE',
'D.C.':'DC',
'Florida':'FL',
'Georgia':'GA',
'Hawaii':'HI',
'Idaho':'ID',
'Illinois':'IL',
'Indiana':'IN',
'Iowa':'IA',
'Kansas':'KS',
'Kentucky':'KY',
'Louisiana':'LA',
'Maine':'ME',
'Maryland':'MD',
'Massachusetts':'MA',
'Michigan':'MI',
'Minnesota':'MN',
'Mississippi':'MS',
'Missouri':'MO',
'Montana':'MT',
'Nebraska':'NE',
'Nevada':'NV',
'New Hampshire':'NH',
'New Jersey':'NJ',
'New Mexico':'NM',
'New York':'NY',
'North Carolina':'NC',
'North Dakota':'ND',
'Ohio':'OH',
'Oklahoma':'OK',
'Oregon':'OR',
'Pennsylvania':'PA',
'Rhode Island':'RI',
'South Carolina':'SC',
'South Dakota':'SD',
'Tennessee':'TN',
'Texas':'TX',
'Utah':'UT',
'Vermont':'VT',
'Virginia':'VA',
'Washington':'WA',
'West Virginia':'WV',
'Wisconsin':'WI',
'Wyoming':'WY'
}
provinceCode = {
'Alberta':'AB',
'British Columbia':'BC',
'Manitoba':'MB',
'New Brunswick':'NB',
'Newfoundland':'NL', # AviSys uses 'NF'
'Northwest Terr.':'NT',
'Nova Scotia':'NS',
'Nunavut':'NU',
'Ontario':'ON',
'Prince Edward Is.':'PE',
'Quebec':'QC', # AviSys uses 'PQ'
'Saskatchewan':'SK',
'Yukon Territory':'YT'
}
class NoteBlock:
# FNotes.DAT contains 512-byte blocks. The first block is a header. Subsequent blocks have this structure:
# If byte 0 is 00: (First block in a note)
# Offset
# 0: Flag (00)
# 1-3: 000000
# 4-7: Note number
# 8-505: Data
# 506-507: Number of valid bytes from offset 0 through 505
# 508-511: Index of next block
# If byte 0 is 01: (Block that continues a note)
# 0: Flag (01)
# 1-505: Data
# 506-507: Number of valid bytes from offset 1 through 505
# 508-511: Index of next block
# Data lines are contained in fixed-length records of 125 bytes, which span blocks
# E.g., first block contains 3 records of 125 bytes, plus the first 123 bytes of the 4th record.
# Each data line is prefixed with its length in the first byte
def __init__(self,file,blockNumber): # Read the specified block from FNotes.DAT
self.file = file
offset = blockNumber * 512
file.seek(offset)
block = file.read(512)
validBytes = int.from_bytes(block[506:508],'little')
self.next = int.from_bytes(block[508:512],'little')
if block[0] == 0:
self.data = block[8:validBytes] # First block in chain
else:
self.data = block[1:validBytes+1] # Any subsequent block
def extract(self): # Extract the chain of blocks, and the individual records from the chain
data = self.extractBlocks()
output = ''
ptr = 0
while ptr < len(data):
strlen = data[ptr] # First byte has the length
ptr += 1 # String starts in second byte
output += data[ptr:ptr+strlen].decode('Windows-1252') + '\n'
ptr += 124
return output
def extractBlocks(self): # Extract data from this block and blocks chained to it
data = self.data
if self.next:
block = NoteBlock(self.file,self.next)
data += block.extractBlocks()
return data
def readMaster():
# Fill in the species name lookup table
# MASTER.AVI contains the taxonomy in 110 byte records
# Byte Content
# 0 Life list mask: 20 All species have this bit; 2a species I have seen
# 1-2 Custom checklist mask (bits 0-14) (Custom checklists that include this species); bit 15: species in most recent report
# 3-4 Custom checklist seen mask
# 5-6 Species number
# 7 Common name length
# 8-43 Common name
# 44-51 State checklist mask (64 bits) (State checklists that include this species)
# 52 Genus name length
# 53-76 Genus name
# 77 Species name length
# 78-101 Species name
# 102-103 ABA bytes
# 104-109 Always 00
# ABA byte 0
# 01 ABA area species
# 00 not ABA area species
# ABA byte 1
# 01 Seen in ABA area
# 00 Not seen in ABA area
# Bytes 0-4 (Life list mask and checklist masks)
# Let 0200 be the mask for the NC checklist. Then bytes 0-4 work like this:
# 20 0000 0000 Non-NC species I have not seen anywhere; also family level entry
# 20 0200 0000 NC species that I have not seen anywhere
# 2a 0000 0000 Non-NC species I have seen somewhere but not in NC
# 2a 0000 0200 Non-NC species I have seen in NC
# 2a 0200 0000 NC species that I have seen but not in NC
# 2a 0200 0200 NC species seen in NC
name = {}
genusName = {}
speciesName = {}
try:
master_input = open(MASTER_FILE, "rb")
except FileNotFoundError:
print('Error: File',MASTER_FILE,'not found.')
raise SystemExit
except:
print("Error opening",MASTER_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True:
taxon = master_input.read(110) # Read a record of 110 bytes
if not taxon:
break
speciesNo = int.from_bytes(taxon[5:7],"little")
name[speciesNo] = taxon[8:(8+taxon[7])].decode('Windows-1252')
genusName[speciesNo] = taxon[53:(53+taxon[52])].decode('Windows-1252')
speciesName[speciesNo] = taxon[78:(78+taxon[77])].decode('Windows-1252')
master_input.close()
return (name,genusName,speciesName)
class Place:
def __init__(self,placeNumber,name,link):
self.placeNumber = placeNumber
self.name = name
self.link = link
self.table = (placeNumber-1)//450
def __str__(self):
return str(self.placeNumber) + ': ' + self.name + ' ' + str(self.link) + ' (table ' + str(self.table) + ')'
def readPlaces():
# The places file (PLACES.AVI) contains fixed length records of 39 bytes
# Bytes
# 0-1 Place number
# 6 Length of place name
# 7-36 Place name
# 37-38 Place number of linked location
output = {}
try:
places_input = open(PLACES_FILE,"rb")
except FileNotFoundError:
print('Error: File',PLACES_FILE,'not found.')
raise SystemExit
except:
print("Error opening",PLACES_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True: # Read all the places in the file
place = places_input.read(39) # Read a record of 39 bytes
if not place:
break
placeNumber = int.from_bytes(place[0:2],"little")
if placeNumber == 0:
continue;
name = place[7:(7+place[6])].decode('Windows-1252')
link = int.from_bytes(place[37:39],"little")
placeInfo = Place(placeNumber,name,link)
output[placeNumber] = placeInfo
places_input.close()
# Now make the 6-level list of links for each place
for placeNumber in output:
place = output[placeNumber]
links = []
for i in range(6):
if i == place.table: # i is the entry for this place
links.append(place.name)
next = place.link # now list the higher-level places this one is linked to
if next == 0:
while table < 5:
table += 1
links.append('')
break
place = output[next]
table = place.table
else:
links.append('') # Links are null until we get to the first one
output[placeNumber].linklist = links
return output
class Association:
def __init__(self,placeName,locationName,lat,lng,state,nation):
self.placeName = placeName
self.locationName = locationName
self.lat = lat
self.lng = lng
self.state = state
self.nation = nation
def readAssociate():
# The hotspot association file (ASSOCIAT.AVI) contains fixed length records of 152 bytes
# Bytes
# 0 Place len
# 1-30 AviSys place (30 chars)
# 31-33 ?
# 34 locid len
# 35-41 locid
# 42 hotspot len
# 43-102 eBird hotspot (60 chars)
# 103 lat len
# 104-115 lat
# 116-123 binary (float) lat
# 124 lng len
# 125-136 lng
# 137-144 binary (float) lng
# 145 state len
# 146-148 state
# 149 nation len
# 150-151 nation
output = {}
try:
associate_input = open(ASSOCIATE_FILE,"rb")
except FileNotFoundError:
print('Note: File',ASSOCIATE_FILE,'not found.')
return output
except:
print("Error opening",ASSOCIATE_FILE,'--',sys.exc_info()[1])
raise SystemExit
while True: # Read all the places in the file
association = associate_input.read(152) # Read a record of 152 bytes
if not association:
break
if len(association) != 152:
print("Odd, length is",len(association))
else:
place = association[1:1+association[0]].decode('Windows-1252')
location = association[43:43+association[42]].decode('Windows-1252')
lat = association[104:104+association[103]].decode('Windows-1252')
lng = association[125:125+association[124]].decode('Windows-1252')
state = association[146:146+association[145]].decode('Windows-1252')
nation = association[150:150+association[149]].decode('Windows-1252')
Info = Association(place,location,lat,lng,state,nation)
output[place] = Info
associate_input.close()
return output
def readNoteIndex():
# FNotes.IX contains fixed-length blocks.
# The first block begins with a 32 byte descriptive header:
# Bytes 0-3 contain 0xffffffff
# Bytes 4-7 contain ??
# Bytes 8-11 Number of blocks in the file
# Bytes 12-15 Size of each block (874 bytes)
# Bytes 16-21 ??
# Bytes 22-25 Number of field notes in the file
# Bytes 26-29 Number of notes per block (62)
# The rest of the first block is empty.
# In subsequent blocks:
# Byte 0: Number of valid index entries in this block
# Index entries begin at Byte 6 and are an array of 14-byte entries
# Index entry has block number in binary in bytes 0-3,
# length of note number (always 5) in byte 8,
# and note number in ascii in bytes 9-13
# Valid index entries are grouped at the beginning of a block,
# and the block may be padded out with non-valid, i.e., unused, entries.
try:
note_index = open(NOTE_INDEX,"rb")
except FileNotFoundError:
print('Error: File',NOTE_INDEX,'not found.')
except:
print("Error opening",NOTE_INDEX,'--',sys.exc_info()[1])
raise SystemExit
header = note_index.read(32)
marker = int.from_bytes(header[0:4],'little')
if marker != 4294967295:
print('Unexpected value',marker,'at beginning of',NOTE_INDEX)
# raise SystemExit
numBlocks = int.from_bytes(header[8:12],'little') # number of 874 byte blocks (e.g., 11)
blockSize = int.from_bytes(header[12:16],'little') # blocksize (874, 0x036a)
numNotes = int.from_bytes(header[22:26],'little') # Number of notes (e.g., 600)
blockFactor = int.from_bytes(header[26:30],'little') # Number of notes per block (62, 0x3E)
reclen = int((blockSize-6) / blockFactor) # 14
if reclen != 14:
print('Reclen was expected to be 14 but is', reclen)
raise SystemExit
note_index.read(blockSize - 32) # Have already read 32 bytes of first block. Now read the rest (and discard).
index = {}
while True:
block = note_index.read(blockSize)
if not block:
break
numValid = block[0]
if not numValid:
break
# Loop through each index entry in this block
for ptr in range(6,blockSize,reclen):
ix = block[ptr:ptr+reclen]
if not ix:
break
blockNumber = int.from_bytes(ix[0:4],'little')
nchar = ix[8]
ascii = ix[9:9+nchar].decode('Windows-1252')
index[int(ascii)] = blockNumber
numValid -= 1
if not numValid:
break # Finished with all valid entries this block
note_index.close()
return index
def integrateNote(comment,fieldnoteText):
# Integrate the comment and field note.
# If the observation was imported from eBird via http://avisys.info/ebirdtoavisys/
# the AviSys comment may duplicate the beginning of the eBird comment.
# Here we remove duplication.
if fieldnoteText != '': # If there is a field note
work = comment # Working copy of the comment
keepLen = 0 # Length of the beginning of the comment to keep, if any duplication
ptr = 0 # Where we are in the comment
hasAttributes = True if ptr < len(work) and work[ptr] == '/' else False
while hasAttributes: # There are AviSys attributes at the beginning of comment
attributeLen = 3 if ptr+2 < len(work) and comment[ptr+2] == '/' else 2 # Attributes are either 2 or 3 bytes
ptr += attributeLen # Bump ptr past this attribute
while ptr < len(work) and work[ptr] == ' ': # and past any trailing blanks
ptr += 1
hasAttributes = True if ptr < len(work) and work[ptr] == '/' else False # Check if there is another attribute
if ptr < len(work) and work[ptr] == '(': # If the first part of comment is parenthesized, skip over it
ptr += 1
while ptr < len(work) and work[ptr] != ')':
ptr += 1
if work[ptr] == ')':
ptr += 1
while ptr < len(work) and work[ptr] == ' ':
ptr += 1
keepLen = ptr # Keep at least this much of the comment
work = work[ptr:] # Check if this part of the comment is duplicated in the field note
text = fieldnoteText
linend = fieldnoteText.find('\n') # end of first line
# If the first line contains ' :: ' it is probably a heading so skip that line
if fieldnoteText[0:linend].find(' :: ') > 0:
text = fieldnoteText[linend+1:]
linend = text.find('\n') # end of second line
text = text[0:linend] + ' ' + text[linend+1:] # Examine the first two lines as one line
ptr = 0
while ptr < len(text) and text[ptr] == ' ': # Skip over any leading blanks
ptr += 1
if len(work): # If we have a comment
if text[ptr:ptr+len(work)] == work: # If the comment is identical to the beginning of the field note
if keepLen: # Discard the comment text. Keep only the comment prefix (attributes and/or parenthesized content)
comment = comment[0:keepLen]
else:
comment = '' # Discard the entire comment.
comment = comment.strip() + ' ' + fieldnoteText # Concatenate comment prefix and field note.
comment = comment.strip(' \n')
return comment
#########################################################################################################
######################################## The program starts here ########################################
#########################################################################################################
outArray = []
noteDict = {}
# ref https://stackoverflow.com/questions/55172090/detect-if-python-program-is-executed-via-windows-gui-double-click-vs-command-p
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
process_array = (ctypes.c_uint * 1)()
num_processes = kernel32.GetConsoleProcessList(process_array, 1)
if len(sys.argv) < 2: # If no command-line argument
if num_processes <= 2: # Run from double-click
outputType = 'eBird'
else: # Run from command line
outputType = 'AviSys'
else:
outputType = sys.argv[1]
if outputType.lower() == 'avisys':
outputType = 'AviSys'
elif outputType.lower() == 'ebird':
outputType = 'eBird'
else:
print("Please specify either AviSys or eBird")
raise SystemExit
try:
FNotes = open(NOTE_FILE,"rb")
except FileNotFoundError:
print('Error: File',NOTE_FILE,'not found.')
raise SystemExit
except:
print("Error opening",NOTE_FILE,'--',sys.exc_info()[1])
raise SystemExit
noteIndex = readNoteIndex()
(name,genusName,speciesName) = readMaster()
places = readPlaces()
association = readAssociate()
try:
sighting_file = open(DATA_FILE,"rb")
except FileNotFoundError:
print('Error: File',DATA_FILE,'not found.')
raise SystemExit
except:
print("Error opening",DATA_FILE,'--',sys.exc_info()[1])
raise SystemExit
# Format of SIGHTING.DAT
# Header record
# 0-3 ffffffff
# 8-11 Number of records
# 12 Reclen (6F, 111)
# padded to 111 bytes
#
# Sighting record
# 0-3 always 00000000
# 4-5 Species number
# 6-9 Fieldnote number
# 10-13 Date
# 14-15 Place number
# 16 Country len
# 17-19 Country
# 20-23 nation bits e.g. 0d200800 for lower 48
# 24-27 always 00000000
# 28 Comment len
# 29-108 Comment
# 109-110 Count
#
# Update 2021 08 14:
# I figured out how bytes 0-3 are used.
# For valid sighting records, the first 4 bytes are zeroes.
# Corrupted records can be kept in the file but ignored;
# they are stored in a linked list where bytes 0-3 are the link pointer.
# The last record in the linked list has ffffffff in bytes 0-3.
# The first four bytes of the header (first four bytes of the file) point to the beginning of the linked list of corrupt records.
# If there are no corrupt records, the file begins with ffffffff.
# The value of the link pointer is the record number; thus multiply by 111 to get the byte offset in the file.
# To ignore invalid records, skip any record that does not begin with 00000000.
#
# Nation bits:
# 00000100 Australasia
# 00000200 Eurasia
# 00000400 South Polar
# 00000800 [AOU]
#
# 00010000 [Asia]
# 00020000 Atlantic Ocean
# 00040000 Pacific Ocean
# 00080000 Indian Ocean
#
# 00100000 [Oceanic]
# 00200000 North America
# 00400000 South America
# 00800000 Africa
#
# 01000000 [ABA Area]
# 02000000 [Canada]
# 04000000 [US]
# 08000000 [Lower 48]
#
# 10000000 [West Indies]
# 20000000 [Mexico]
# 40000000 [Central America]
# 80000000 [Western Palearctic]
header = sighting_file.read(111) # Read a 111 byte record
marker = int.from_bytes(header[0:4],'little')
corruptRecords = 0
EXPORT_FILE += outputType+'.csv'
try:
CSV = open(EXPORT_FILE,'w', newline='')
except PermissionError:
print('Denied permission to open',EXPORT_FILE,'-- Maybe it is open in another program? If so, close it and try again.')
raise SystemExit
except:
print('Error opening',EXPORT_FILE,'--',sys.exc_info()[1])
raise SystemExit
try:
noteOut = open(NOTE_OUTPUT,'w', newline='')
except PermissionError:
print('Denied permission to open',NOTE_OUTPUT,'-- Maybe it is open in another program? If so, close it and try again,')
raise SystemExit
except:
print('Error opening',NOTE_OUTPUT,'--',sys.exc_info()[1])
nrecs = int.from_bytes(header[8:12],"little")
reclen = header[12]
if reclen != 111:
print('Record length is', reclen, 'expecting it to be 111.')
raise SystemExit
recordCount = 0
while True:
sighting = sighting_file.read(111)
if not sighting:
break
recordCount+=1
corruptPointer = int.from_bytes(sighting[0:4],'little')
corruptedRecord = corruptPointer != 0
speciesNo = int.from_bytes(sighting[4:6],'little')
fieldnote = int.from_bytes(sighting[6:10],'little')
if fieldnote:
block = NoteBlock(FNotes,noteIndex[fieldnote])
fieldnoteText = block.extract()
noteDict[recordCount] = fieldnoteText
else:
fieldnoteText = ''
fieldnoteText = fieldnoteText.rstrip(' \n')
date = int.from_bytes(sighting[10:14],'little')
day = date % 100
month = (date // 100) % 100
year = (date // 10000) + 1930
date = str(month) + '/' + str(day) + '/' + str(year)
sortdate = str(year) + '-' + str(month).rjust(2,'0') + '-' + str(day).rjust(2,'0')
place = int.from_bytes(sighting[14:16],'little')
countryLen = sighting[16]
country = sighting[17:19].decode('Windows-1252')
commentLen = sighting[28]
shortComment = sighting[29:29+commentLen].decode('Windows-1252').strip()
comment = integrateNote(shortComment,fieldnoteText)
if outputType == 'eBird':
comment = comment.replace("\n"," ")
tally = int.from_bytes(sighting[109:111],'little')
if speciesNo in name:
commonName = name[speciesNo]
else:
commonName = '?'
if not corruptedRecord:
print("No name found for species number", speciesNo)
raise SystemExit
if place not in places:
if not corruptedRecord:
print("Place", place, "is not set")
raise SystemExit
else:
location = 'Unknown location'
else:
linkList = places[place].linklist
location = linkList[0] if linkList[0] != '' else \
linkList[1] if linkList[1] != '' else \
linkList[2] if linkList[2] != '' else \
linkList[3] if linkList[3] != '' else \
linkList[4] if linkList[4] != '' else \
linkList[5] if linkList[5] != '' else \
linkList[6]
if outputType == 'eBird' and location in association:
location = association[location].locationName # Use associated eBird location name instead of AviSys place name
if country == 'US':
state = stateCode[linkList[3]]
elif country == 'CA':
state = provinceCode[linkList[3]]
else:
state = ''
if corruptedRecord:
corruptRecords += 1
print('Corrupt record found:',commonName,location,date,state,country,comment)
else:
outArray.append([commonName,genusName[speciesNo],speciesName[speciesNo],tally,comment,location,sortdate,date,state,country,speciesNo,recordCount,shortComment])
def sortkey(array):
return array[6]
outArray.sort(key=sortkey)
if outputType == 'eBird':
csvFields = ['Common name','Genus','Species','Species Count','Species Comment','Location','Lat','Lng','Date','Start time','State','Country','Protocol','N. Observers','Duration','Complete','Distance','Area','Checklist comment','Important: Delete this header row before importing to eBird']
else:
csvFields = ['Common name','Genus','Species','Place','Date','Count','Comment','State','Nation','Blank','SpeciesNo']
CSVwriter = csv.DictWriter(CSV,fieldnames=csvFields)
CSVwriter.writeheader()
if outputType == 'eBird':
for row in outArray:
CSVwriter.writerow({'Common name':row[0],'Genus':row[1],'Species':row[2],'Species Count':row[3],'Species Comment':row[4],
'Location':row[5],'Lat':'','Lng':'','Date':row[7],'Start time':'','State':row[8],'Country':row[9],
'Protocol':'historical','N. Observers':1,'Duration':'','Complete':'N','Distance':'','Area':'','Checklist comment':'Imported from AviSys'})
else:
for row in outArray:
dateVal = row[6].split('-')
date = str(int(dateVal[1]))+'/'+str(int(dateVal[2]))+'/'+dateVal[0]
CSVwriter.writerow({'Common name':row[0],'Genus':row[1],'Species':row[2],'Place':row[5],'Date':date,'Count':row[3],'Comment':row[4],
'State':row[7],'Nation':row[8],'Blank':'','SpeciesNo':row[9]})
# Write all field notes to a file
# The entry for each note begins with species name -- date -- place on the first line, followed by a blank line.
# The text of the field note follows
# The note is terminated by a line of 80 equal signs (which is something that could not be part of the actual note).
# Note: If AviSys type output, the place is the AviSys place. If eBird type output, the associated eBird location, if any, is used as the place.
for row in outArray:
recordNo = row[11]
if recordNo in noteDict:
shortComment = row[12]
noteOut.write(row[0] +' -- '+ row[6] +' -- '+ row[5] + '\n\n')
if len(shortComment):
noteOut.write( 'Short comment: ' + shortComment + '\n\n')
noteOut.write(noteDict[recordNo] + '\n' + '==========================================================================================\n')
sighting_file.close()
noteOut.close()
CSV.close()
if recordCount != nrecs:
print('Should be', nrecs, 'records, but counted', recordCount)
else:
print(nrecs,"records processed")
if corruptRecords:
if corruptRecords == 1:
print('File', DATA_FILE, 'contains one corrupt record, which has been ignored. ')
print('To remove it from AviSys, run Utilities->Restructure sighting file.')
else:
print('File', DATA_FILE, 'contains', corruptRecords, 'corrupt records, which have been ignored. ')
print('To remove them from AviSys, run Utilities->Restructure sighting file.')
print(nrecs-corruptRecords, 'records are valid.')
| 31.746888 | 289 | 0.685139 | 2,265 | 0.09868 | 0 | 0 | 0 | 0 | 0 | 0 | 11,803 | 0.514225 |
b30aff42a0561262838decac18137273d9752c56 | 2,680 | py | Python | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null | modele/Class.py | AntoineDelay/chess | 66dedf1c468a075bb202f85753caa075316dac28 | [
"MIT"
] | null | null | null |
class Case :
def __init__(self,x,y):
self.id = str(x)+','+str(y)
self.x = x
self.y = y
self.piece = None
def check_case(self):
"""renvoie la piece si la case est occupé,renvoie -1 sinon """
if(self.piece != None):
return self.piece
return -1
def affecter_piece(self,piece):
self.piece = piece
self.piece.case_affectation(self)
def desaffecter_piece(self):
if self.piece != None :
self.piece = None
def show(self):
if self.piece != None and self.piece.case != None :
return "| "+str(self.piece.point)+" |"
else :
return "| 0 |"
def get_piece(self):
return self.piece
def get_x(self):
return self.x
def get_y(self):
return self.y
class Board :
def __init__(self,liste_case):
self.board = liste_case
def get_case(self,x,y):
for i in range(len(self.board)):
if self.board[i].x == x and self.board[i].y == y:
return self.board[i]
return -1
def show_board(self):
x = 0
s_board = ""
for case in self.board :
if case.x > x :
s_board += "\n"
x+=1
s_board += case.show()
print(s_board)
class Piece :
def __init__(self,name,color,point,board):
self.name = name
self.color = color
self.point = point
self.case = None
self.board = board
self.depla = []
def possible_depla(self):
"""calcule les déplacement actuellement possible sans contrainte externe, resultat dans depla"""
pass
def case_affectation(self,case):
self.case = case
def get_depla(self):
return self.depla
class Pion(Piece) :
def __init__(self,color,board):
super().__init__('Pion',color,1,board)
def possible_depla(self):
id_case = str(self.case.get_x())+','+str(self.case.get_y()+1)
id_case_2 = None
if self.case.get_y() == 2 :
id_case_2 = str(self.case.get_x())+','+str(self.case.get_y()+1)
for case in self.board.board:
if case.id == id_case and case.piece == None :
self.depla.append(case)
if id_case_2 != None and case.id == id_case_2 and case.piece == None:
self.depla.append(case)
class Roi(Piece):
def __init__(self,color,board):
super().__init__('Roi',color,1000,board)
class Dame(Piece) :
def __init__(self,color,board):
super().__init__('Dame',color,9,board)
| 29.130435 | 104 | 0.538806 | 2,652 | 0.988814 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.077181 |
b30c782f3342ff4875fb902c643ddb150ecbbb77 | 1,106 | py | Python | bot/migrators/config_migrator.py | yukie-nobuharu/TTMediaBot | 9d34aadb1cfa41fcceee212931ff12526d3d137f | [
"MIT"
] | null | null | null | bot/migrators/config_migrator.py | yukie-nobuharu/TTMediaBot | 9d34aadb1cfa41fcceee212931ff12526d3d137f | [
"MIT"
] | null | null | null | bot/migrators/config_migrator.py | yukie-nobuharu/TTMediaBot | 9d34aadb1cfa41fcceee212931ff12526d3d137f | [
"MIT"
] | null | null | null | import sys
from bot.config import ConfigManager, config_data_type
def to_v1(config_data: config_data_type) -> config_data_type:
return update_version(config_data, 1)
migrate_functs = {1: to_v1}
def migrate(
config_manager: ConfigManager,
config_data: config_data_type,
) -> config_data_type:
if "config_version" not in config_data:
update_version(config_data, 0)
elif (
not isinstance(config_data["config_version"], int)
or config_data["config_version"] > config_manager.version
):
sys.exit("Error: invalid config_version value")
elif config_data["config_version"] == config_manager.version:
return config_data
else:
for ver in migrate_functs:
if ver > config_data["config_version"]:
config_data = migrate_functs[ver](config_data)
config_manager._dump(config_data)
return config_data
def update_version(config_data: config_data_type, version: int) -> config_data_type:
_config_data = {"config_version": version}
_config_data.update(config_data)
return _config_data
| 29.105263 | 84 | 0.712477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.120253 |
b3100d65adacd7054c8de611ff54f81327ab317b | 5,201 | py | Python | grab_closest_rmsd.py | Miro-Astore/mdanalysis_scripts | faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23 | [
"MIT"
] | 1 | 2021-06-16T11:34:29.000Z | 2021-06-16T11:34:29.000Z | grab_closest_rmsd.py | Miro-Astore/mdanalysis_scripts | faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23 | [
"MIT"
] | null | null | null | grab_closest_rmsd.py | Miro-Astore/mdanalysis_scripts | faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23 | [
"MIT"
] | 1 | 2021-06-16T11:34:31.000Z | 2021-06-16T11:34:31.000Z | import MDAnalysis as mda
import MDAnalysis.analysis.rms
import numpy as np
ref = mda.Universe ('./pca_2_ref.pdb')
traj_u = mda.Universe ('ionized.psf','sum.xtc')
ref_sel = "name CA and resid 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 391 392 393 394 395 396 397 398 399 400 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449"
R = MDAnalysis.analysis.rms.RMSD(traj_u,ref,select=ref_sel)
R.run()
closest_frame=np.argmin(R.rmsd[:,-1])
print ("closest frame to reference is " + str(closest_frame) + " with an RMSD of " + str(R.rmsd[closest_frame] ))
traj_u.trajectory[closest_frame]
write_sel=traj_u.select_atoms('all')
write_sel.write ('closest_to_ref.pdb')
| 305.941176 | 4,705 | 0.766583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,810 | 0.924822 |
b3106616365026d8efdce59e1af5c72f5a869234 | 257 | py | Python | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 9 | 2021-03-24T11:21:03.000Z | 2022-02-14T05:05:48.000Z | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 38 | 2021-10-07T18:04:12.000Z | 2021-12-05T05:53:27.000Z | Dynamic Programming/416. Partition Equal Subset Sum/Python Solution/Solution.py | lionelsamrat10/LeetCode-Solutions | 47f5c94995225b875b1eb0e92c5f643bec646a86 | [
"MIT"
] | 27 | 2021-10-06T19:55:48.000Z | 2021-11-18T16:53:20.000Z | class Solution:
def canPartition(self, nums: List[int]) -> bool:
dp, s = set([0]), sum(nums)
if s&1:
return False
for num in nums:
dp.update([v+num for v in dp if v+num <= s>>1])
return s>>1 in dp
| 28.555556 | 59 | 0.498054 | 256 | 0.996109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b310942ec473743eb33ea648c960c10e28fedd79 | 712 | py | Python | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 203 | 2019-06-04T07:43:25.000Z | 2022-03-30T22:16:32.000Z | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 14 | 2020-02-26T09:42:46.000Z | 2022-01-11T12:25:16.000Z | self_paced_ensemble/canonical_resampling/__init__.py | thulio/self-paced-ensemble | 0270edecc2f88783e2f4657510e089c2cacfcabe | [
"MIT"
] | 46 | 2019-11-25T01:13:31.000Z | 2021-12-29T06:49:07.000Z | """
--------------------------------------------------------------------------
The `self_paced_ensemble.canonical_resampling` module implement a
resampling-based classifier for imbalanced classification.
15 resampling algorithms are included:
'RUS', 'CNN', 'ENN', 'NCR', 'Tomek', 'ALLKNN', 'OSS',
'NM', 'CC', 'SMOTE', 'ADASYN', 'BorderSMOTE', 'SMOTEENN',
'SMOTETomek', 'ORG'.
Note: the implementation of these resampling algorithms is based on
imblearn python package.
See https://github.com/scikit-learn-contrib/imbalanced-learn.
--------------------------------------------------------------------------
"""
from .canonical_resampling import ResampleClassifier
__all__ = [
"ResampleClassifier",
]
| 32.363636 | 74 | 0.585674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.891854 |
b31196095333800e20e0d5857b88125a542e315e | 3,902 | py | Python | omsdk/sdkps.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 61 | 2018-02-21T00:02:20.000Z | 2022-01-26T03:47:19.000Z | omsdk/sdkps.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 31 | 2018-03-24T05:43:39.000Z | 2022-03-16T07:10:37.000Z | omsdk/sdkps.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 25 | 2018-03-13T10:06:12.000Z | 2022-01-26T03:47:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import subprocess
import io
from xml.dom.minidom import parse
import xml.dom.minidom
import json
import logging
logger = logging.getLogger(__name__)
class PsShell:
def __init__(self):
pass
def execute(self, cmd):
logger.debug("Executing: " + cmd)
proc = subprocess.Popen(["powershell", "-outputformat", "XML", "-command", "" + cmd + ""],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
wrapper = io.TextIOWrapper(proc.stdout, encoding="utf-8")
t = wrapper.readline()
output = io.StringIO()
for line in wrapper:
tt = line.rstrip()
output.write(tt)
data_json = {}
if output.getvalue() == "":
return data_json
domtree = xml.dom.minidom.parseString(output.getvalue())
collection = domtree.documentElement
counter = 0
for obj in collection.childNodes:
counter = counter + 1
mydata = self.print_objx("", obj)
name = "name" + str(counter)
if "ToString" in mydata:
name = mydata["ToString"]
if "f2" in mydata:
value = mydata["f2"]
if not name in data_json:
data_json[name] = []
data_json[name].append(value)
for name in data_json:
if len(data_json[name]) == 0:
data_json[name] = None
elif len(data_json[name]) == 1:
data_json[name] = data_json[name][0]
return data_json
def print_objx(self, n, obj):
tst = {}
counter = 0
if obj.hasAttributes():
for i in range(0, obj.attributes.length):
attr = obj.attributes.item(i)
tst[attr.name] = attr.value
for objns in obj.childNodes:
if objns.nodeType == objns.ELEMENT_NODE:
# empty node
if objns.firstChild == None:
counter = counter + 1
tst["f" + str(counter)] = objns.firstChild
elif objns.firstChild.nodeType == objns.firstChild.TEXT_NODE:
var = objns.getAttribute("N")
if var is None or var == "":
var = objns.tagName
if objns.tagName == "ToString":
tst[objns.tagName] = objns.firstChild.data
else:
tst[var] = objns.firstChild.data
else:
k = self.print_objx(n + " ", objns)
var = objns.getAttribute("N")
if var is None or var == "":
counter = counter + 1
var = "f" + str(counter)
tst[var] = k
# counter = counter - 1
else:
logger.debug(">>> not element>>" + str(objns.tagName))
return tst
| 36.811321 | 99 | 0.535879 | 2,865 | 0.734051 | 0 | 0 | 0 | 0 | 0 | 0 | 1,041 | 0.266718 |
b311980089ca2553183d98a9aa947954a5430613 | 3,166 | py | Python | popita/location/serializers.py | gpiechnik2/popita | ad044f0884d1dcac6943e036187d867d5209ffd1 | [
"Apache-2.0"
] | null | null | null | popita/location/serializers.py | gpiechnik2/popita | ad044f0884d1dcac6943e036187d867d5209ffd1 | [
"Apache-2.0"
] | null | null | null | popita/location/serializers.py | gpiechnik2/popita | ad044f0884d1dcac6943e036187d867d5209ffd1 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers
from djoser.serializers import UserSerializer
from math import cos, asin, sqrt, pi
from accounts.models import User
from .models import Localization
class UserInfoSerializer(UserSerializer):
class Meta:
model = User
exclude = ('email', 'password', 'is_superuser', 'last_name', 'is_staff', 'date_joined', 'groups', 'user_permissions', 'last_login', 'is_active', 'gender', 'background_color', 'job', 'preferred_drink', 'description')
class LocalizationSerializer(serializers.ModelSerializer):
user = UserInfoSerializer(many = False, read_only = True)
timestamp = serializers.DateTimeField(format = '%Y-%m-%d %H:%m', input_formats = None, read_only = True)
location = serializers.CharField(required = True)
class Meta:
model = Localization
fields = ['id', 'user', 'longitude', 'latitude', 'attitude', 'location', 'timestamp']
def create(self, request):
user = self.context['request'].user
longitude = request['longitude']
latitude = request['latitude']
attitude = request['attitude']
location = request['location']
last_localization = Localization.objects.filter(user = user)
if not last_localization:
current_localization = Localization.objects.create(
user = user,
longitude = longitude,
latitude = latitude,
attitude = attitude,
location = location
)
else:
current_localization = last_localization[0]
current_localization.longitude = longitude
current_localization.latitude = latitude
current_localization.attitude = attitude
current_localization.location = location
current_localization.save()
return current_localization
def validate_user(self, validated_data):
if not User.objects.filter(email = str(validated_data)):
raise serializers.ValidationError('User must be in database.')
return validated_data
#change receivers
def to_representation(self, instance):
ret = super(LocalizationSerializer, self).to_representation(instance)
# check the request is list view or detail view
is_list_view = isinstance(self.instance, list)
if is_list_view:
#user coordinates
latitude = instance.latitude
longitude = instance.longitude
#your coordinates
user = self.context['request'].user
self_localization = Localization.objects.filter(user = user)[0]
self_latitude = self_localization.latitude
self_longitude = self_localization.longitude
#check distance
p = pi / 180
a = 0.5 - cos((self_latitude - latitude) * p) / 2 + cos(latitude * p) * cos(self_latitude * p) * (1 - cos((self_longitude - longitude) * p)) / 2
distance = 12742 * asin(sqrt(a)) #2*R*asin...
extra_ret = {
"distance" : distance,
}
ret.update(extra_ret)
return ret
| 34.043011 | 223 | 0.628869 | 2,973 | 0.93904 | 0 | 0 | 0 | 0 | 0 | 0 | 474 | 0.149716 |
b312241b809985558a61adf947e84c53f8e2bb9c | 997 | py | Python | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2021-07-12T09:23:48.000Z | 2021-10-04T10:05:46.000Z | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-02-21T16:20:02.000Z | 2019-05-01T12:10:05.000Z | python/ql/test/query-tests/Security/CWE-089/sql_injection.py | p-snft/ql | 6243c722c6d18f152fe47d2c800540d5bc5c3c3f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django.conf.urls import patterns, url
from django.db import connection, models
from django.db.models.expressions import RawSQL
class Name(models.Model):
pass
def save_name(request):
if request.method == 'POST':
name = request.POST.get('name')
curs = connection.cursor()
#GOOD -- Using parameters
curs.execute(
"insert into names_file ('name') values ('%s')", name)
#BAD -- Using string formatting
curs.execute(
"insert into names_file ('name') values ('%s')" % name)
#BAD -- other ways of executing raw SQL code with string interpolation
Name.objects.annotate(RawSQL("insert into names_file ('name') values ('%s')" % name))
Name.objects.raw("insert into names_file ('name') values ('%s')" % name)
Name.objects.extra("insert into names_file ('name') values ('%s')" % name)
urlpatterns = patterns(url(r'^save_name/$',
save_name, name='save_name'))
| 34.37931 | 93 | 0.620863 | 34 | 0.034102 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.400201 |
b312c7d194c215be37c90d51e5fc14dc91cf0b28 | 3,325 | py | Python | qdev_wrappers/transmon/sweep_helpers.py | GateBuilder/qdev-wrappers | 2f4cfbad74d40d5bdb13dd68feec5ad319b209c5 | [
"MIT"
] | 13 | 2018-01-10T09:32:50.000Z | 2022-01-24T00:15:59.000Z | qdev_wrappers/transmon/sweep_helpers.py | GateBuilder/qdev-wrappers | 2f4cfbad74d40d5bdb13dd68feec5ad319b209c5 | [
"MIT"
] | 122 | 2017-11-01T10:17:50.000Z | 2020-09-28T06:33:39.000Z | qdev_wrappers/transmon/sweep_helpers.py | GateBuilder/qdev-wrappers | 2f4cfbad74d40d5bdb13dd68feec5ad319b209c5 | [
"MIT"
] | 30 | 2017-10-31T12:25:32.000Z | 2022-03-20T04:43:37.000Z | import qcodes as qc
from qdev_wrappers.sweep_functions import _do_measurement, _do_measurement_single, \
_select_plottables
def measure(meas_param, do_plots=True):
"""
Function which measures the specified parameter and optionally
plots the results.
Args:
meas_param: parameter to measure
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
measurement = qc.Measure(meas_param)
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement_single(
measurement, meas_params, do_plots=do_plots)
return data, plot
def sweep1d(meas_param, sweep_param, start, stop, step, delay=0.01,
do_plots=True):
"""
Function which does a 1 dimensional sweep and optionally plots the results.
Args:
meas_param: parameter which we want the value of at each point
sweep_param: parameter to be swept in outer loop (default on y axis)
start: starting value for sweep_param1
stop: final value for sweep_param1
step: value to step sweep_param1 by
delay (default 0.01): mimimum time to spend on each point
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
loop = qc.Loop(sweep_param.sweep(
start, stop, step), delay).each(meas_param)
set_params = ((sweep_param, start, stop),)
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement(loop, set_params, meas_params,
do_plots=do_plots)
return data, plot
def sweep2d(meas_param, sweep_param1, start1, stop1, step1,
sweep_param2, start2, stop2, step2, delay=0.01,
do_plots=True):
"""
Function which does a 2 dimensional sweep and optionally plots the results.
Args:
meas_param: parameter which we want the value of at each point
sweep_param1: parameter to be swept in outer loop (default on y axis)
start1: starting value for sweep_param1
stop1: final value for sweep_param1
step1: value to step sweep_param1 by
sweep_param2: parameter to be swept in inner loop (default on x axis)
start2: starting value for sweep_param2
stop2: final value for sweep_param2
step2: value to step sweep_param2 by
delay (default 0.01): mimimum time to spend on each point
do_plots: Default True: If False no plots are produced.
Data is still saved and can be displayed with show_num.
Returns:
data (qcodes dataset)
plot: QT plot
"""
innerloop = qc.Loop(sweep_param2.sweep(
start2, stop2, step2), delay).each(meas_param)
outerloop = qc.Loop(sweep_param1.sweep(
start1, stop1, step1), delay).each(innerloop)
set_params = ((sweep_param1, start1, stop1),
(sweep_param2, start2, stop2))
meas_params = _select_plottables(meas_param)
plot, data = _do_measurement(outerloop, set_params, meas_params,
do_plots=do_plots)
return data, plot
| 33.928571 | 84 | 0.663459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,866 | 0.561203 |
b312fd40137f1f88994faa7dd28816b492eac96b | 18,544 | py | Python | ocrj/misc/nihongo.py | eggplants/OCR_Japanease | 15ed5df66a66047fa877ded2e935f7b2fb006c2b | [
"MIT"
] | 2 | 2021-08-08T07:14:36.000Z | 2021-08-08T07:25:43.000Z | ocrj/misc/nihongo.py | eggplants/ocrj | 15ed5df66a66047fa877ded2e935f7b2fb006c2b | [
"MIT"
] | null | null | null | ocrj/misc/nihongo.py | eggplants/ocrj | 15ed5df66a66047fa877ded2e935f7b2fb006c2b | [
"MIT"
] | null | null | null | import string
hiragana = \
['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ',
'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ',
'ま', 'み', 'む', 'め', 'も', 'ら', 'り', 'る', 'れ', 'ろ', 'が', 'ぎ', 'ぐ', 'げ', 'ご',
'ざ', 'じ', 'ず', 'ぜ', 'ぞ', 'だ', 'ぢ', 'づ', 'で', 'ど', 'ば', 'び', 'ぶ', 'べ', 'ぼ',
'ぱ', 'ぴ', 'ぷ', 'ぺ', 'ぽ', 'や', 'ゆ', 'よ', 'わ', 'を', 'ん']
katakana = \
['ア', 'イ', 'ウ', 'エ', 'オ', 'カ', 'キ', 'ク', 'ケ', 'コ', 'サ', 'シ', 'ス', 'セ', 'ソ',
'タ', 'チ', 'ツ', 'テ', 'ト', 'ナ', 'ニ', 'ヌ', 'ネ', 'ノ', 'ハ', 'ヒ', 'フ', 'ヘ', 'ホ',
'マ', 'ミ', 'ム', 'メ', 'モ', 'ラ', 'リ', 'ル', 'レ', 'ロ', 'ガ', 'ギ', 'グ', 'ゲ', 'ゴ',
'ザ', 'ジ', 'ズ', 'ゼ', 'ゾ', 'ダ', 'ヂ', 'ヅ', 'デ', 'ド', 'バ', 'ビ', 'ブ', 'ベ', 'ボ',
'パ', 'ピ', 'プ', 'ペ', 'ポ', 'ヤ', 'ユ', 'ヨ', 'ワ', 'ヲ', 'ン', 'ー']
alphabet_upper = list(string.ascii_uppercase)
alphabet_lower = list(string.ascii_lowercase)
numetric = list(string.digits)
alphabet_num = list(string.ascii_letters + string.digits)
kigou = \
['(', ')', '[', ']', '「', '」', '『', '』', '<', '>', '¥', '/', '÷', '*', '+',
'×', '?', '=', '〜', '|', ':', ';', '。', '、', '.', ',']
jyouyou_kanji = \
['亜', '哀', '挨', '愛', '曖', '悪', '握', '圧', '扱', '宛', '嵐', '安', '案', '暗', '以',
'衣', '位', '囲', '医', '依', '委', '威', '為', '畏', '胃', '尉', '異', '移', '萎', '偉',
'椅', '彙', '意', '違', '維', '慰', '遺', '緯', '域', '育', '一', '壱', '逸', '茨', '芋',
'引', '印', '因', '咽', '姻', '員', '院', '淫', '陰', '飲', '隠', '韻', '右', '宇', '羽',
'雨', '唄', '鬱', '畝', '浦', '運', '雲', '永', '泳', '英', '映', '栄', '営', '詠', '影',
'鋭', '衛', '易', '疫', '益', '液', '駅', '悦', '越', '謁', '閲', '円', '延', '沿', '炎',
'怨', '宴', '媛', '援', '園', '煙', '猿', '遠', '鉛', '塩', '演', '縁', '艶', '汚', '王',
'凹', '央', '応', '往', '押', '旺', '欧', '殴', '桜', '翁', '奥', '横', '岡', '屋', '億',
'憶', '臆', '虞', '乙', '俺', '卸', '音', '恩', '温', '穏', '下', '化', '火', '加', '可',
'仮', '何', '花', '佳', '価', '果', '河', '苛', '科', '架', '夏', '家', '荷', '華', '菓',
'貨', '渦', '過', '嫁', '暇', '禍', '靴', '寡', '歌', '箇', '稼', '課', '蚊', '牙', '瓦',
'我', '画', '芽', '賀', '雅', '餓', '介', '回', '灰', '会', '快', '戒', '改', '怪', '拐',
'悔', '海', '界', '皆', '械', '絵', '開', '階', '塊', '楷', '解', '潰', '壊', '懐', '諧',
'貝', '外', '劾', '害', '崖', '涯', '街', '慨', '蓋', '該', '概', '骸', '垣', '柿', '各',
'角', '拡', '革', '格', '核', '殻', '郭', '覚', '較', '隔', '閣', '確', '獲', '嚇', '穫',
'学', '岳', '楽', '額', '顎', '掛', '潟', '括', '活', '喝', '渇', '割', '葛', '滑', '褐',
'轄', '且', '株', '釜', '鎌', '刈', '干', '刊', '甘', '汗', '缶', '完', '肝', '官', '冠',
'巻', '看', '陥', '乾', '勘', '患', '貫', '寒', '喚', '堪', '換', '敢', '棺', '款', '間',
'閑', '勧', '寛', '幹', '感', '漢', '慣', '管', '関', '歓', '監', '緩', '憾', '還', '館',
'環', '簡', '観', '韓', '艦', '鑑', '丸', '含', '岸', '岩', '玩', '眼', '頑', '顔', '願',
'企', '伎', '危', '机', '気', '岐', '希', '忌', '汽', '奇', '祈', '季', '紀', '軌', '既',
'无', '記', '起', '飢', '鬼', '帰', '基', '寄', '規', '亀', '喜', '幾', '揮', '期', '棋',
'貴', '棄', '毀', '旗', '器', '畿', '輝', '機', '騎', '技', '宜', '偽', '欺', '義', '疑',
'儀', '戯', '擬', '犠', '議', '菊', '吉', '喫', '詰', '却', '客', '脚', '逆', '虐', '九',
'久', '及', '弓', '丘', '旧', '休', '吸', '朽', '臼', '求', '究', '泣', '急', '級', '糾',
'宮', '救', '球', '給', '嗅', '窮', '牛', '去', '巨', '居', '拒', '拠', '挙', '虚', '許',
'距', '魚', '御', '漁', '凶', '共', '叫', '狂', '京', '享', '供', '協', '況', '峡', '挟',
'狭', '恐', '恭', '胸', '脅', '強', '教', '郷', '境', '橋', '矯', '鏡', '競', '響', '驚',
'仰', '暁', '業', '凝', '曲', '局', '極', '玉', '巾', '斤', '均', '近', '金', '菌', '勤',
'琴', '筋', '僅', '禁', '緊', '錦', '謹', '襟', '吟', '銀', '区', '句', '苦', '駆', '具',
'惧', '愚', '空', '偶', '遇', '隅', '串', '屈', '掘', '窟', '熊', '繰', '君', '訓', '勲',
'薫', '軍', '郡', '群', '兄', '刑', '形', '系', '径', '茎', '係', '型', '契', '計', '恵',
'啓', '掲', '渓', '経', '蛍', '敬', '景', '軽', '傾', '携', '継', '詣', '慶', '憬', '稽',
'憩', '警', '鶏', '芸', '艸', '迎', '鯨', '隙', '劇', '撃', '激', '桁', '欠', '穴', '血',
'決', '結', '傑', '潔', '月', '犬', '件', '見', '券', '肩', '建', '研', '県', '倹', '兼',
'剣', '拳', '軒', '健', '険', '圏', '堅', '検', '嫌', '献', '絹', '遣', '権', '憲', '賢',
'謙', '鍵', '繭', '顕', '験', '懸', '元', '幻', '玄', '言', '弦', '限', '原', '現', '舷',
'減', '源', '厳', '己', '戸', '古', '呼', '固', '股', '虎', '孤', '弧', '故', '枯', '個',
'庫', '湖', '雇', '誇', '鼓', '錮', '顧', '五', '互', '午', '呉', '後', '娯', '悟', '碁',
'語', '誤', '護', '口', '工', '公', '勾', '孔', '功', '巧', '広', '甲', '交', '光', '向',
'后', '好', '江', '考', '行', '坑', '孝', '抗', '攻', '更', '効', '幸', '拘', '肯', '侯',
'厚', '恒', '洪', '皇', '紅', '荒', '郊', '香', '候', '校', '耕', '航', '貢', '降', '高',
'康', '控', '梗', '黄', '喉', '慌', '港', '硬', '絞', '項', '溝', '鉱', '構', '綱', '酵',
'稿', '興', '衡', '鋼', '講', '購', '乞', '号', '合', '拷', '剛', '傲', '豪', '克', '告',
'谷', '刻', '国', '黒', '穀', '酷', '獄', '骨', '駒', '込', '頃', '今', '困', '昆', '恨',
'根', '婚', '混', '痕', '紺', '魂', '墾', '懇', '左', '佐', '沙', '査', '砂', '唆', '差',
'詐', '鎖', '座', '挫', '才', '再', '災', '妻', '采', '砕', '宰', '栽', '彩', '採', '済',
'祭', '斎', '細', '菜', '最', '裁', '債', '催', '塞', '歳', '載', '際', '埼', '在', '材',
'剤', '財', '罪', '崎', '作', '削', '昨', '柵', '索', '策', '酢', '搾', '錯', '咲', '冊',
'札', '刷', '刹', '拶', '殺', '察', '撮', '擦', '雑', '皿', '三', '山', '参', '桟', '蚕',
'惨', '産', '傘', '散', '算', '酸', '賛', '残', '斬', '暫', '士', '子', '支', '止', '氏',
'仕', '史', '司', '四', '市', '矢', '旨', '死', '糸', '糸', '至', '伺', '志', '私', '使',
'刺', '始', '姉', '枝', '祉', '肢', '姿', '思', '指', '施', '師', '恣', '紙', '脂', '視',
'紫', '詞', '歯', '嗣', '試', '詩', '資', '飼', '誌', '雌', '摯', '賜', '諮', '示', '字',
'寺', '次', '耳', '自', '似', '児', '事', '侍', '治', '持', '時', '滋', '慈', '辞', '磁',
'餌', '璽', '鹿', '式', '識', '軸', '七', '𠮟', '失', '室', '疾', '執', '湿', '嫉', '漆',
'質', '実', '芝', '写', '社', '車', '舎', '者', '射', '捨', '赦', '斜', '煮', '遮', '謝',
'邪', '蛇', '勺', '尺', '借', '酌', '釈', '爵', '若', '弱', '寂', '手', '主', '守', '朱',
'取', '狩', '首', '殊', '珠', '酒', '腫', '種', '趣', '寿', '受', '呪', '授', '需', '儒',
'樹', '収', '囚', '州', '舟', '秀', '周', '宗', '拾', '秋', '臭', '修', '袖', '終', '羞',
'習', '週', '就', '衆', '集', '愁', '酬', '醜', '蹴', '襲', '十', '汁', '充', '住', '柔',
'重', '従', '渋', '銃', '獣', '縦', '叔', '祝', '宿', '淑', '粛', '縮', '塾', '熟', '出',
'述', '術', '俊', '春', '瞬', '旬', '巡', '盾', '准', '殉', '純', '循', '順', '準', '潤',
'遵', '処', '初', '所', '書', '庶', '暑', '署', '緒', '諸', '女', '如', '助', '序', '叙',
'徐', '除', '小', '升', '少', '召', '匠', '床', '抄', '肖', '尚', '招', '承', '昇', '松',
'沼', '昭', '宵', '将', '消', '症', '祥', '称', '笑', '唱', '商', '渉', '章', '紹', '訟',
'勝', '掌', '晶', '焼', '焦', '硝', '粧', '詔', '証', '言', '象', '傷', '奨', '照', '詳',
'彰', '障', '憧', '衝', '賞', '償', '礁', '鐘', '上', '丈', '冗', '条', '状', '乗', '城',
'浄', '剰', '常', '情', '場', '畳', '蒸', '縄', '壌', '嬢', '錠', '譲', '醸', '色', '拭',
'食', '植', '殖', '飾', '触', '嘱', '織', '職', '辱', '尻', '心', '申', '伸', '臣', '芯',
'身', '辛', '侵', '信', '津', '神', '唇', '娠', '振', '浸', '真', '針', '深', '紳', '進',
'森', '診', '寝', '慎', '新', '審', '震', '薪', '親', '人', '刃', '仁', '尽', '迅', '甚',
'陣', '尋', '腎', '須', '図', '水', '吹', '垂', '炊', '帥', '粋', '衰', '推', '酔', '遂',
'睡', '穂', '錘', '随', '髄', '枢', '崇', '数', '据', '杉', '裾', '寸', '瀬', '是', '井',
'世', '正', '生', '成', '西', '声', '制', '姓', '征', '性', '青', '斉', '政', '星', '牲',
'省', '凄', '逝', '清', '盛', '婿', '晴', '勢', '聖', '誠', '精', '製', '誓', '静', '請',
'整', '醒', '税', '夕', '斥', '石', '赤', '昔', '析', '席', '脊', '隻', '惜', '戚', '責',
'跡', '積', '績', '籍', '切', '折', '拙', '窃', '接', '設', '雪', '摂', '節', '説', '舌',
'絶', '千', '川', '仙', '占', '先', '宣', '専', '泉', '浅', '洗', '染', '扇', '栓', '旋',
'船', '戦', '煎', '羨', '腺', '詮', '践', '箋', '銭', '銑', '潜', '線', '遷', '選', '薦',
'繊', '鮮', '全', '前', '善', '然', '禅', '漸', '膳', '繕', '狙', '阻', '祖', '租', '素',
'措', '粗', '組', '疎', '訴', '塑', '遡', '礎', '双', '壮', '早', '争', '走', '奏', '相',
'荘', '草', '送', '倉', '捜', '挿', '桑', '巣', '掃', '曹', '曽', '爽', '窓', '創', '喪',
'痩', '葬', '装', '僧', '想', '層', '総', '遭', '槽', '踪', '操', '燥', '霜', '騒', '藻',
'造', '像', '増', '憎', '蔵', '贈', '臓', '即', '束', '足', '促', '則', '息', '捉', '速',
'側', '測', '俗', '族', '属', '賊', '続', '卒', '率', '存', '村', '孫', '尊', '損', '遜',
'他', '多', '汰', '打', '妥', '唾', '堕', '惰', '駄', '太', '対', '体', '人', '耐', '待',
'怠', '胎', '退', '帯', '泰', '堆', '袋', '逮', '替', '貸', '隊', '滞', '態', '戴', '大',
'代', '台', '口', '第', '題', '滝', '宅', '択', '沢', '卓', '拓', '託', '濯', '諾', '濁',
'但', '達', '脱', '奪', '棚', '誰', '丹', '旦', '担', '単', '炭', '胆', '探', '淡', '短',
'嘆', '端', '綻', '誕', '鍛', '団', '男', '段', '断', '弾', '暖', '談', '壇', '地', '池',
'知', '値', '恥', '致', '遅', '痴', '稚', '置', '緻', '竹', '畜', '逐', '蓄', '築', '秩',
'窒', '茶', '着', '嫡', '中', '仲', '虫', '虫', '沖', '宙', '忠', '抽', '注', '昼', '柱',
'衷', '酎', '鋳', '駐', '著', '貯', '丁', '弔', '庁', '兆', '町', '長', '挑', '帳', '張',
'彫', '眺', '釣', '頂', '鳥', '朝', '脹', '貼', '超', '腸', '跳', '徴', '嘲', '潮', '澄',
'調', '聴', '懲', '直', '勅', '捗', '沈', '珍', '朕', '陳', '賃', '鎮', '追', '椎', '墜',
'通', '痛', '塚', '漬', '坪', '爪', '鶴', '低', '呈', '廷', '弟', '定', '底', '抵', '邸',
'亭', '貞', '帝', '訂', '庭', '逓', '停', '偵', '堤', '提', '程', '艇', '締', '諦', '泥',
'的', '笛', '摘', '滴', '適', '敵', '溺', '迭', '哲', '鉄', '徹', '撤', '天', '典', '店',
'点', '展', '添', '転', '塡', '田', '伝', '殿', '電', '斗', '吐', '妬', '徒', '途', '都',
'渡', '塗', '賭', '土', '奴', '努', '度', '怒', '刀', '冬', '灯', '火', '当', '投', '豆',
'東', '到', '逃', '倒', '凍', '唐', '島', '桃', '討', '透', '党', '悼', '盗', '陶', '塔',
'搭', '棟', '湯', '痘', '登', '答', '等', '筒', '統', '稲', '踏', '糖', '頭', '謄', '藤',
'闘', '鬥', '騰', '同', '洞', '胴', '動', '堂', '童', '道', '働', '銅', '導', '瞳', '峠',
'匿', '特', '得', '督', '徳', '篤', '毒', '独', '読', '栃', '凸', '突', '届', '屯', '豚',
'頓', '貪', '鈍', '曇', '丼', '那', '奈', '内', '梨', '謎', '鍋', '南', '軟', '難', '二',
'尼', '弐', '匂', '肉', '虹', '日', '入', '乳', '尿', '任', '妊', '忍', '認', '寧', '熱',
'年', '念', '捻', '粘', '燃', '悩', '納', '能', '脳', '農', '濃', '把', '波', '派', '破',
'覇', '馬', '婆', '罵', '拝', '杯', '背', '肺', '俳', '配', '排', '敗', '廃', '輩', '売',
'倍', '梅', '培', '陪', '媒', '買', '賠', '白', '伯', '拍', '泊', '迫', '剝', '舶', '博',
'薄', '麦', '漠', '縛', '爆', '箱', '箸', '畑', '肌', '八', '鉢', '発', '髪', '伐', '抜',
'罰', '閥', '反', '半', '氾', '犯', '帆', '汎', '伴', '判', '坂', '阪', '板', '版', '班',
'畔', '般', '販', '斑', '飯', '搬', '煩', '頒', '範', '繁', '藩', '晩', '番', '蛮', '盤',
'比', '皮', '妃', '否', '批', '彼', '披', '肥', '非', '卑', '飛', '疲', '秘', '被', '悲',
'扉', '費', '碑', '罷', '避', '尾', '眉', '美', '備', '微', '鼻', '膝', '肘', '匹', '必',
'泌', '筆', '姫', '百', '氷', '表', '俵', '票', '評', '漂', '標', '苗', '秒', '病', '描',
'猫', '品', '浜', '水', '貧', '賓', '頻', '敏', '瓶', '不', '夫', '父', '付', '布', '扶',
'府', '怖', '阜', '附', '訃', '負', '赴', '浮', '婦', '符', '富', '普', '腐', '敷', '膚',
'賦', '譜', '侮', '武', '部', '舞', '封', '風', '伏', '服', '副', '幅', '復', '福', '腹',
'複', '覆', '払', '沸', '仏', '物', '粉', '紛', '雰', '噴', '墳', '憤', '奮', '分', '文',
'聞', '丙', '平', '兵', '併', '並', '柄', '陛', '閉', '塀', '幣', '弊', '蔽', '餅', '米',
'壁', '璧', '癖', '別', '蔑', '片', '辺', '返', '変', '偏', '遍', '編', '弁', '廾', '便',
'勉', '歩', '保', '哺', '捕', '補', '舗', '母', '募', '墓', '慕', '暮', '簿', '方', '包',
'芳', '邦', '奉', '宝', '抱', '放', '法', '泡', '胞', '俸', '倣', '峰', '砲', '崩', '訪',
'報', '蜂', '豊', '飽', '褒', '縫', '亡', '乏', '忙', '坊', '妨', '忘', '防', '房', '肪',
'某', '冒', '剖', '紡', '望', '傍', '帽', '棒', '貿', '貌', '暴', '膨', '謀', '頰', '北',
'木', '朴', '牧', '睦', '僕', '墨', '撲', '没', '勃', '堀', '本', '奔', '翻', '凡', '盆',
'麻', '摩', '磨', '魔', '毎', '妹', '枚', '昧', '埋', '幕', '膜', '枕', '又', '末', '抹',
'万', '満', '慢', '漫', '未', '味', '魅', '岬', '密', '蜜', '脈', '妙', '民', '眠', '矛',
'務', '無', '夢', '霧', '娘', '名', '命', '明', '迷', '冥', '盟', '銘', '鳴', '滅', '免',
'面', '綿', '麺', '茂', '模', '毛', '妄', '盲', '耗', '猛', '網', '目', '黙', '門', '紋',
'問', '匁', '冶', '夜', '野', '弥', '厄', '役', '約', '訳', '薬', '躍', '闇', '由', '油',
'喩', '愉', '諭', '輸', '癒', '唯', '友', '有', '勇', '幽', '悠', '郵', '湧', '猶', '裕',
'遊', '雄', '誘', '憂', '融', '優', '与', '予', '余', '人', '誉', '預', '幼', '用', '羊',
'妖', '洋', '要', '容', '庸', '揚', '揺', '葉', '陽', '溶', '腰', '様', '瘍', '踊', '窯',
'養', '擁', '謡', '曜', '抑', '沃', '浴', '欲', '翌', '翼', '拉', '裸', '羅', '来', '雷',
'頼', '絡', '落', '酪', '辣', '乱', '卵', '覧', '濫', '藍', '欄', '吏', '利', '里', '理',
'痢', '裏', '履', '璃', '離', '陸', '立', '律', '慄', '略', '柳', '流', '留', '竜', '粒',
'隆', '硫', '侶', '旅', '虜', '慮', '了', '両', '良', '料', '涼', '猟', '陵', '量', '僚',
'領', '寮', '療', '瞭', '糧', '力', '緑', '林', '厘', '倫', '輪', '隣', '臨', '瑠', '涙',
'累', '塁', '類', '令', '礼', '示', '冷', '励', '戻', '例', '鈴', '零', '霊', '隷', '齢',
'麗', '暦', '歴', '列', '劣', '烈', '裂', '恋', '連', '廉', '練', '錬', '呂', '炉', '賂',
'路', '露', '老', '労', '弄', '郎', '朗', '浪', '廊', '楼', '漏', '籠', '六', '録', '麓',
'論', '和', '話', '賄', '脇', '惑', '枠', '湾', '腕']
nihongo = hiragana + katakana + alphabet_num + kigou + jyouyou_kanji
nihongo_class = ['']+nihongo
filter_word = \
[('り', 'リ', katakana, katakana),
('リ', 'り', hiragana, hiragana),
('へ', 'ヘ', katakana, katakana),
('ヘ', 'へ', hiragana, hiragana),
('べ', 'ベ', katakana, katakana),
('ベ', 'べ', hiragana, hiragana),
('ぺ', 'ペ', katakana, katakana),
('ペ', 'ぺ', hiragana, hiragana),
('か', 'ガ', katakana, katakana),
('ガ', 'か', hiragana, hiragana),
('口', 'ロ', katakana, katakana),
('ロ', '口', jyouyou_kanji, jyouyou_kanji),
('工', 'エ', katakana, katakana),
('エ', '工', jyouyou_kanji, jyouyou_kanji),
('二', 'ニ', katakana, katakana),
('こ', 'ニ', katakana, katakana),
('ニ', '二', jyouyou_kanji, jyouyou_kanji),
('こ', '二', jyouyou_kanji, jyouyou_kanji),
('ニ', 'こ', hiragana, hiragana),
('二', 'こ', hiragana, hiragana),
('一', 'ー', katakana, katakana),
('ー', '一', jyouyou_kanji, jyouyou_kanji),
('七', 'セ', katakana, katakana),
('セ', '七', jyouyou_kanji, jyouyou_kanji),
('八', 'ハ', katakana, katakana),
('ハ', '八', jyouyou_kanji, jyouyou_kanji),
('力', 'カ', katakana, katakana),
('刀', 'カ', katakana, katakana),
('カ', '力', jyouyou_kanji, jyouyou_kanji),
('カ', '刀', jyouyou_kanji, jyouyou_kanji),
('干', 'チ', katakana, katakana),
('千', 'チ', katakana, katakana),
('チ', '干', jyouyou_kanji, jyouyou_kanji),
('チ', '千', jyouyou_kanji, jyouyou_kanji),
('手', 'キ', katakana, katakana),
('キ', '手', jyouyou_kanji, jyouyou_kanji),
('か', 'ガ', katakana, katakana),
('ガ', 'か', hiragana, hiragana),
('ホ', '木', jyouyou_kanji, jyouyou_kanji),
('木', 'ホ', katakana, katakana),
('J', 'ノ', katakana, katakana),
('ノ', 'J', alphabet_upper, alphabet_upper),
('ノ', 'J', None, alphabet_lower),
('j', 'ノ', katakana, katakana),
('ノ', 'j', alphabet_lower, alphabet_lower),
('T', '丁', jyouyou_kanji, jyouyou_kanji),
('丁', 'T', alphabet_upper, alphabet_upper),
('丁', 'T', None, alphabet_lower),
('T', 'イ', katakana, katakana),
('イ', 'T', alphabet_upper, alphabet_upper),
('イ', 'T', None, alphabet_lower),
('0', 'o', alphabet_lower+alphabet_upper, alphabet_lower),
('0', 'O', alphabet_upper, alphabet_upper),
('o', '0', numetric, numetric),
('O', '0', numetric, numetric),
('1', 'l', alphabet_lower+alphabet_upper, alphabet_lower),
('|', 'l', alphabet_lower+alphabet_upper, alphabet_lower),
('1', 'I', alphabet_upper, alphabet_upper),
('|', 'I', alphabet_upper, alphabet_upper),
('1', 'I', None, alphabet_lower),
('|', 'I', None, alphabet_lower),
('l', '1', numetric, numetric),
('I', '1', numetric, numetric),
('|', '1', numetric, numetric),
('ス', '又', jyouyou_kanji, jyouyou_kanji),
('又', 'ス', katakana, katakana),
('フ', '7', numetric, numetric),
('7', 'フ', katakana, katakana),
('5', 's', alphabet_lower+alphabet_upper, alphabet_lower),
('5', 'S', alphabet_upper, alphabet_upper),
('s', '5', numetric, numetric),
('S', '5', numetric, numetric),
('K', 'k', alphabet_lower, alphabet_lower),
('k', 'K', alphabet_upper, alphabet_upper),
('O', 'o', alphabet_lower, alphabet_lower),
('o', 'O', alphabet_upper, alphabet_upper),
('P', 'p', alphabet_lower, alphabet_lower),
('p', 'P', alphabet_upper, alphabet_upper),
('S', 's', alphabet_lower, alphabet_lower),
('s', 'S', alphabet_upper, alphabet_upper),
('U', 'u', alphabet_lower, alphabet_lower),
('u', 'U', alphabet_upper, alphabet_upper),
('V', 'v', alphabet_lower, alphabet_lower),
('v', 'V', alphabet_upper, alphabet_upper),
('V', 'v', alphabet_lower, alphabet_lower),
('v', 'V', alphabet_upper, alphabet_upper),
('W', 'w', alphabet_lower, alphabet_lower),
('w', 'W', alphabet_upper, alphabet_upper),
('X', 'x', alphabet_lower, alphabet_lower),
('x', 'X', alphabet_upper, alphabet_upper),
('Y', 'y', alphabet_lower, alphabet_lower),
('y', 'Y', alphabet_upper, alphabet_upper),
('Z', 'z', alphabet_lower, alphabet_lower),
('z', 'Z', alphabet_upper, alphabet_upper),
('十', '+', kigou, kigou),
('t', '+', kigou, kigou),
('メ', '+', kigou, kigou),
('+', '十', jyouyou_kanji, jyouyou_kanji),
('t', '十', jyouyou_kanji, jyouyou_kanji),
('メ', '十', jyouyou_kanji, jyouyou_kanji),
('+', 't', alphabet_lower+alphabet_upper, alphabet_lower),
('十', 't', alphabet_lower+alphabet_upper, alphabet_lower),
('メ', 'y', alphabet_lower+alphabet_upper, alphabet_lower),
('二', '=', kigou, kigou),
('ニ', '=', kigou, kigou),
('こ', '=', kigou, kigou),
('=', '二', jyouyou_kanji, jyouyou_kanji),
('=', 'ニ', katakana, katakana),
('=', 'こ', hiragana, hiragana),
('。', 'o', alphabet_lower+alphabet_upper, alphabet_lower),
('。', 'O', alphabet_upper, alphabet_upper),
('。', '0', numetric, numetric),
('o', '。', hiragana + katakana + jyouyou_kanji, None),
('O', '。', hiragana + katakana + jyouyou_kanji, None),
('0', '。', hiragana + katakana + jyouyou_kanji, None),
('2', 'っ', hiragana + jyouyou_kanji, ['た', 'ち', 'つ', 'て', 'と']),
('?', 'っ', hiragana + jyouyou_kanji, ['た', 'ち', 'つ', 'て', 'と']),
('つ', 'っ', hiragana + jyouyou_kanji, ['た', 'ち', 'つ', 'て', 'と']),
('ツ', 'ッ', katakana + jyouyou_kanji, ['タ', 'チ', 'ツ', 'テ', 'ト']),
('や', 'ゃ', ['き', 'し', 'ち', 'に', 'み', 'り', 'ぎ', 'ぢ', 'じ'], None),
('ゆ', 'ゅ', ['き', 'し', 'ち', 'に', 'み', 'り', 'ぎ', 'ぢ', 'じ'], None),
('よ', 'ょ', ['き', 'し', 'ち', 'に', 'み', 'り', 'ぎ', 'ぢ', 'じ'], None),
('ヤ', 'ャ', ['キ', 'シ', 'チ', 'ニ', 'ミ', 'リ', 'ギ', 'ヂ', 'ジ'], None),
('ユ', 'ュ', ['キ', 'シ', 'チ', 'ニ', 'ミ', 'リ', 'ギ', 'ヂ', 'ジ'], None),
('ヨ', 'ョ', ['キ', 'シ', 'チ', 'ニ', 'ミ', 'リ', 'ギ', 'ヂ', 'ジ'], None)]
| 61 | 79 | 0.303117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,973 | 0.55031 |
b314c64792d73fb80b9c2aaecf28ca4b8f5356a3 | 4,293 | py | Python | emlearn/distance.py | Brax94/emlearn | cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef | [
"MIT"
] | 161 | 2019-03-12T16:07:20.000Z | 2022-03-31T06:24:38.000Z | emlearn/distance.py | Brax94/emlearn | cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef | [
"MIT"
] | 35 | 2019-05-14T11:34:04.000Z | 2022-02-04T20:09:34.000Z | emlearn/distance.py | Brax94/emlearn | cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef | [
"MIT"
] | 27 | 2019-03-11T01:09:27.000Z | 2021-12-27T22:56:04.000Z |
import os.path
import os
import numpy
from . import common, cgen
"""
References
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_empirical_covariance.py#L297
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_elliptic_envelope.py#L149
"""
from sklearn.mixture._gaussian_mixture import _compute_log_det_cholesky
from sklearn.utils.extmath import row_norms
np = numpy
def squared_mahalanobis_distance(x1, x2, precision):
"""
@precision is the inverted covariance matrix
computes (x1 - x2).T * VI * (x1 - x2)
where VI is the precision matrix, the inverse of the covariance matrix
Loosely based on the scikit-learn implementation,
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/neighbors/_dist_metrics.pyx
"""
distance = 0.0
size = x1.shape[0]
temp = numpy.zeros(shape=size)
assert x1.shape == x2.shape
assert precision.shape[0] == precision.shape[1]
assert size == precision.shape[0]
for i in range(size):
accumulate = 0
for j in range(size):
accumulate += precision[i, j] * (x1[j] - x2[j])
distance += accumulate * (x1[i] - x2[i])
return distance
def generate_code(means, precision, offset, name='my_elliptic', modifiers='static const'):
n_features = means.shape[0]
decision_boundary = offset # FIXME, check
classifier_name = f'{name}_classifier'
means_name = f'{name}_means'
precisions_name = f'{name}_precisions'
predict_function_name = f'{name}_predict'
includes = '''
// This code is generated by emlearn
#include <eml_distance.h>
'''
pre = '\n\n'.join([
includes,
cgen.array_declare(means_name, n_features, modifiers=modifiers, values=means),
cgen.array_declare(precisions_name, n_features*n_features,
modifiers=modifiers,
values=precision.flatten(order='C'),
),
])
main = f'''
#include <stdio.h>
// Data definitions
{modifiers} EmlEllipticEnvelope {classifier_name} = {{
{n_features},
{decision_boundary},
{means_name},
{precisions_name}
}};
// Prediction function
float {predict_function_name}(const float *features, int n_features) {{
float dist = 0.0;
const int class = eml_elliptic_envelope_predict(&{classifier_name},
features, n_features, &dist);
return dist;
}}
'''
code = pre + main
return code
class Wrapper:
def __init__(self, estimator, classifier='inline', dtype='float'):
self.dtype = dtype
precision = estimator.get_precision()
self._means = estimator.location_.copy()
self._precision = precision
self._offset = estimator.offset_
if classifier == 'inline':
name = 'my_inline_elliptic'
func = '{}_predict(values, length)'.format(name)
code = self.save(name=name)
self.classifier_ = common.CompiledClassifier(code, name=name, call=func, out_dtype='float')
else:
raise ValueError("Unsupported classifier method '{}'".format(classifier))
def mahalanobis(self, X):
def dist(x):
return squared_mahalanobis_distance(x, self._means, precision=self._precision)
p = numpy.array([ dist(x) for x in X ])
predictions = self.classifier_.predict(X)
return predictions
def predict(self, X):
def predict_one(d):
dist = -d
dd = dist - self._offset
is_inlier = 1 if dd > 0 else -1
return is_inlier
distances = self.mahalanobis(X)
return numpy.array([predict_one(d) for d in distances])
def save(self, name=None, file=None):
if name is None:
if file is None:
raise ValueError('Either name or file must be provided')
else:
name = os.path.splitext(os.path.basename(file))[0]
code = generate_code(self._means, self._precision, self._offset, name=name)
if file:
with open(file, 'w') as f:
f.write(code)
return code
| 28.430464 | 139 | 0.63126 | 1,675 | 0.39017 | 0 | 0 | 0 | 0 | 0 | 0 | 1,513 | 0.352434 |
b315ac276441ccfb8b05f770a418eb36163f6159 | 929 | py | Python | scripts/dbload_profiles.py | tonykipkemboi/LinkedIn_NSBE_Hackathon | 9375196fd2c89fe3376fbdc8964a59676ef1a43d | [
"MIT"
] | null | null | null | scripts/dbload_profiles.py | tonykipkemboi/LinkedIn_NSBE_Hackathon | 9375196fd2c89fe3376fbdc8964a59676ef1a43d | [
"MIT"
] | null | null | null | scripts/dbload_profiles.py | tonykipkemboi/LinkedIn_NSBE_Hackathon | 9375196fd2c89fe3376fbdc8964a59676ef1a43d | [
"MIT"
] | null | null | null | from db_connection import DbConnection
import random
def load():
db_conn = DbConnection('profiles')
db_conn.execute("drop table if exists profiles")
db_conn.execute("create table profiles (id integer PRIMARY KEY, name text not null, skillset text not null, connection_weight integer not null)")
names_for_profiles = []
all_skills = []
with open('./data/list_of_names.txt', newline='') as f:
names_for_profiles = f.read().splitlines()
with open('./data/list_of_skills.txt', newline='') as f:
all_skills = f.read().splitlines()
id = 1
for name in names_for_profiles:
skill_sample = random.sample(all_skills, 4)
connection_weight = random.randint(0,10)
profile_skills = ','.join(skill_sample)
data_tuple = (id, name, profile_skills, connection_weight)
db_conn.execute("insert into profiles values (?, ?, ?, ?);", data_tuple)
id = id + 1
db_conn.commit()
db_conn.close() | 32.034483 | 147 | 0.702906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.292788 |
b318379a0dd3c97eba087c2682c3a79eb0cce153 | 2,666 | py | Python | application.py | iamsashank09/handwritten-digit-recognizer-cnn | b168eb2337397364de366952f5289998731abe04 | [
"MIT"
] | null | null | null | application.py | iamsashank09/handwritten-digit-recognizer-cnn | b168eb2337397364de366952f5289998731abe04 | [
"MIT"
] | null | null | null | application.py | iamsashank09/handwritten-digit-recognizer-cnn | b168eb2337397364de366952f5289998731abe04 | [
"MIT"
] | 1 | 2021-08-10T07:49:01.000Z | 2021-08-10T07:49:01.000Z | import sys
from keras.models import load_model
import cv2
from preprocessors import x_cord_contour, makeSquare, resize_to_pixel
import pyfiglet
class findHandwrittenDigits:
def __init__(self, imageFileName):
self.classifier = load_model('mnistHandModel.h5')
self.image = cv2.imread(imageFileName)
self.dispImage = self.image.copy()
self.full_number = []
def findDigits(self):
image = self.image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 30, 150)
# Find Contours
contours, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Filtering out smaller contours
contours = [i for i in contours if cv2.contourArea(i)>10]
#Sort out contours left to right by using their x cordinates
contours = sorted(contours, key = x_cord_contour, reverse = False)
# Create empty array to store entire number
# loop over the contours
for c in contours:
# compute the bounding box for the rectangle
(x, y, w, h) = cv2.boundingRect(c)
if w >= 5 and h >= 25:
roi = blurred[y:y + h, x:x + w]
ret, roi = cv2.threshold(roi, 127, 255,cv2.THRESH_BINARY_INV)
roi = makeSquare(roi)
roi = resize_to_pixel(28, roi)
roi = roi / 255.0
roi = roi.reshape(1,28,28,1)
## Get Prediction
res = str(self.classifier.predict_classes(roi, 1, verbose = 0)[0])
self.full_number.append(res)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(image, res, (x , y + 155), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 0), 2)
self.dispImage = image.copy()
def displayResult(self, displayImg = False):
if displayImg:
cv2.imshow("image", self.dispImage)
cv2.waitKey(0)
cv2.destroyAllWindows()
figText = pyfiglet.figlet_format(str(''.join(self.full_number)))
print ("The number is: " )
print(figText)
if __name__ == "__main__":
try:
filename = sys.argv[1]
except:
print('''Mention the file name while running the app.
Usage: python application.py 'filenamehere' ''')
exit()
finderObject = findHandwrittenDigits(filename)
finderObject.findDigits()
finderObject.displayResult(True) | 33.325 | 100 | 0.570893 | 2,165 | 0.812078 | 0 | 0 | 0 | 0 | 0 | 0 | 402 | 0.150788 |
b31a614e9fdad950b6103bf4c802c1acbca2334e | 1,122 | py | Python | Packs/ShiftLeft/Integrations/shiftleft/shiftleft_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/ShiftLeft/Integrations/shiftleft/shiftleft_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/ShiftLeft/Integrations/shiftleft/shiftleft_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | """Base Integration for ShiftLeft CORE - Cortex XSOAR Extension
"""
import json
import io
from shiftleft import list_app_findings_command, ShiftLeftClient
def util_load_json(path):
with io.open(path, mode="r", encoding="utf-8") as f:
return json.loads(f.read())
def test_list_app_findings_command(requests_mock):
"""Tests list_app_findings_command function.
Checks the output of the command function with the expected output.
"""
mock_response = util_load_json("test_data/test_list_findings.json")
requests_mock.get(
"https://www.shiftleft.io/orgs/2c089ac1-3378-44d5-94da-9507e84351c3/apps/shiftleft-java-example/findings",
json=mock_response,
)
client = ShiftLeftClient(
base_url="https://www.shiftleft.io", # disable-secrets-detection
verify=False,
)
args = {
"app_name": "shiftleft-java-example",
"severity": "critical",
"type": ["vuln"],
"version": None,
}
response = list_app_findings_command(
client, "2c089ac1-3378-44d5-94da-9507e84351c3", args
)
assert response.outputs
| 28.769231 | 114 | 0.682709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.452763 |
b31ad98a0700b36b49c1882b67967d373ee628c9 | 2,625 | py | Python | app/auth/forms.py | karomag/microblog | 7511dbd99c4fec6558fd4a94249622ebffdf52ac | [
"MIT"
] | null | null | null | app/auth/forms.py | karomag/microblog | 7511dbd99c4fec6558fd4a94249622ebffdf52ac | [
"MIT"
] | null | null | null | app/auth/forms.py | karomag/microblog | 7511dbd99c4fec6558fd4a94249622ebffdf52ac | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""Forms auth."""
from flask_babel import _
from flask_babel import lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import (
BooleanField,
PasswordField,
StringField,
SubmitField,
)
from wtforms.validators import (
DataRequired,
Email,
EqualTo,
ValidationError,
)
from app.models import User
class LoginForm(FlaskForm):
"""Login form.
Args:
FlaskForm (class): Flask-specific subclass of WTForms
"""
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember me'))
submit = SubmitField(_l('Sigh In'))
class RegistrationForm(FlaskForm):
"""Registration form.
Args:
FlaskForm (class): Flask-specific subclass of WTForms
"""
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')],
)
submit = SubmitField(_l('Register'))
# TODO: Method 'validate_username' may be 'static'
def validate_username(self, username):
"""Checks the username field's data for uniqueness.
Args:
username: Username field data
Raises:
ValidationError, if the username is not unique
"""
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
# TODO: Fix function.
def validate_email(self, email):
"""Checks the email field's data for uniqueness.
Args:
email: Email field data
Raises:
ValidationError, if an email is not unique
"""
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'),
validators=[DataRequired(), EqualTo('password')],
)
submit = SubmitField(_l('Request Password Reset'))
| 28.225806 | 80 | 0.656 | 2,247 | 0.856 | 0 | 0 | 0 | 0 | 0 | 0 | 957 | 0.364571 |
b31ca78650faf7f0d3a1fa21464ede27682437eb | 621 | py | Python | rescale-video.py | abhra2020-smart/ba-title-bar | 71521849d6376fee70eaeefffdb59edfbcedd94e | [
"MIT"
] | null | null | null | rescale-video.py | abhra2020-smart/ba-title-bar | 71521849d6376fee70eaeefffdb59edfbcedd94e | [
"MIT"
] | null | null | null | rescale-video.py | abhra2020-smart/ba-title-bar | 71521849d6376fee70eaeefffdb59edfbcedd94e | [
"MIT"
] | null | null | null | import cv2
# used to scale down the video resolution
# the repo doesn't include vid 480x360 file,
# but you can get if from https://www.youtube.com/watch?v=FtutLA63Cp8
cap = cv2.VideoCapture('bad_apple_480x360.mp4')
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter('bad_apple_48x36.mp4', fourcc, 30, (48, 36)) # output: 30 fps, 48x36
while True:
ret, frame = cap.read()
if ret == True:
b = cv2.resize(frame, (48, 36), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
out.write(b)
else:
break
cap.release()
out.release()
cv2.destroyAllWindows() | 28.227273 | 91 | 0.650564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.373591 |
b31d9833c06a2ca7f76b3f73d870e38b35b71e27 | 632 | py | Python | scripts/nabeatu.py | yuzukiimai/robosys2 | 2634c9c43966ab5a158d45d89f07faf358b8e092 | [
"BSD-3-Clause"
] | null | null | null | scripts/nabeatu.py | yuzukiimai/robosys2 | 2634c9c43966ab5a158d45d89f07faf358b8e092 | [
"BSD-3-Clause"
] | null | null | null | scripts/nabeatu.py | yuzukiimai/robosys2 | 2634c9c43966ab5a158d45d89f07faf358b8e092 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
n = 0
def cb(message):
global n
n = message.data
rospy.init_node('nabe')
sub = rospy.Subscriber('rand_number', Int32, cb)
pub = rospy.Publisher('atu', Int32, queue_size=1)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
if n % 3 == 0 and n != 0:
print('ナベアツ「%d !!!!!」\n' % n)
elif n == 13 or n == 23 or n == 43 or n == 53 or n == 73 or n == 83:
print('ナベアツ「%d !!!!!」\n' % n)
elif n == 31 or n == 32 or n == 34 or n == 35 or n == 37 or n == 38:
print('ナベアツ「%d !!!!!」\n' % n)
pub.publish(n)
rate.sleep()
| 22.571429 | 72 | 0.544304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.203593 |
b31df60e00166612398f3b8eb16174c35e86d989 | 42,680 | py | Python | competition/scenarios.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | competition/scenarios.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | competition/scenarios.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | from fuzzy_asteroids.util import Scenario
import numpy as np
# "Simple" Scenarios --------------------------------------------------------------------------------------------------#
# Threat priority tests
threat_test_1 = Scenario(
name="threat_test_1",
asteroid_states=[{"position": (0, 300), "angle": -90.0, "speed": 40},
{"position": (700, 300), "angle": 0.0, "speed": 0},
],
ship_state={"position": (600, 300)},
seed=0
)
threat_test_2 = Scenario(
name="threat_test_2",
asteroid_states=[{"position": (800, 300), "angle": 90.0, "speed": 40},
{"position": (100, 300), "angle": 0.0, "speed": 0},
],
ship_state={"position": (200, 300)},
seed=0
)
threat_test_3 = Scenario(
name="threat_test_3",
asteroid_states=[{"position": (400, 0), "angle": 0.0, "speed": 40},
{"position": (400, 550), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 450)},
seed=0
)
threat_test_4 = Scenario(
name="threat_test_4",
asteroid_states=[{"position": (400, 600), "angle": 180.0, "speed": 40},
{"position": (400, 50), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 150)},
seed=0
)
# Accuracy tests
accuracy_test_1 = Scenario(
name="accuracy_test_1",
asteroid_states=[{"position": (400, 500), "angle": 90.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_2 = Scenario(
name="accuracy_test_2",
asteroid_states=[{"position": (400, 500), "angle": -90.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_3 = Scenario(
name="accuracy_test_3",
asteroid_states=[{"position": (100, 100), "angle": 0.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_4 = Scenario(
name="accuracy_test_4",
asteroid_states=[{"position": (700, 100), "angle": 0.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_5 = Scenario(
name="accuracy_test_5",
asteroid_states=[{"position": (100, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_6 = Scenario(
name="accuracy_test_6",
asteroid_states=[{"position": (700, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100)},
seed=0
)
accuracy_test_7 = Scenario(
name="accuracy_test_7",
asteroid_states=[{"position": (400, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100), "angle": 90.0},
seed=0
)
accuracy_test_8 = Scenario(
name="accuracy_test_8",
asteroid_states=[{"position": (400, 500), "angle": 180.0, "speed": 120, "size": 1},
],
ship_state={"position": (400, 100), "angle": -90.0},
seed=0
)
accuracy_test_9 = Scenario(
name="accuracy_test_9",
asteroid_states=[{"position": (100, 500), "angle": -135.0, "speed": 120, "size": 1},
],
ship_state={"position": (700, 100), "angle": -90.0},
seed=0
)
accuracy_test_10 = Scenario(
name="accuracy_test_10",
asteroid_states=[{"position": (700, 500), "angle": 135.0, "speed": 120, "size": 1},
],
ship_state={"position": (100, 100), "angle": 90.0},
seed=0
)
# "Easy" wall scenario with default ship state, starts on left and moves right
wall_left_easy = Scenario(
name="wall_left_easy",
asteroid_states=[{"position": (0, 100), "angle": -90.0, "speed": 60},
{"position": (0, 200), "angle": -90.0, "speed": 60},
{"position": (0, 300), "angle": -90.0, "speed": 60},
{"position": (0, 400), "angle": -90.0, "speed": 60},
{"position": (0, 500), "angle": -90.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts on right and moves left
wall_right_easy = Scenario(
name="wall_right_easy",
asteroid_states=[{"position": (800, 100), "angle": 90.0, "speed": 60},
{"position": (800, 200), "angle": 90.0, "speed": 60},
{"position": (800, 300), "angle": 90.0, "speed": 60},
{"position": (800, 400), "angle": 90.0, "speed": 60},
{"position": (800, 500), "angle": 90.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts at the top and moves downward
wall_top_easy = Scenario(
name="wall_top_easy",
asteroid_states=[{"position": (100, 600), "angle": 180.0, "speed": 60},
{"position": (200, 600), "angle": 180.0, "speed": 60},
{"position": (300, 600), "angle": 180.0, "speed": 60},
{"position": (400, 600), "angle": 180.0, "speed": 60},
{"position": (500, 600), "angle": 180.0, "speed": 60},
{"position": (600, 600), "angle": 180.0, "speed": 60},
{"position": (700, 600), "angle": 180.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# "Easy" wall scenario with default ship state, starts at the top and moves downward
wall_bottom_easy = Scenario(
name="wall_bottom_easy",
asteroid_states=[{"position": (100, 0), "angle": 0.0, "speed": 60},
{"position": (200, 0), "angle": 0.0, "speed": 60},
{"position": (300, 0), "angle": 0.0, "speed": 60},
{"position": (400, 0), "angle": 0.0, "speed": 60},
{"position": (500, 0), "angle": 0.0, "speed": 60},
{"position": (600, 0), "angle": 0.0, "speed": 60},
{"position": (700, 0), "angle": 0.0, "speed": 60},
],
ship_state={"position": (400, 300)},
seed=0
)
# Ring scenarios ------------------------------------------------------------------------------------------------------#
# Scenario where a ring of asteroids close in on the vehicle
# calculating initial states
R = 300
theta = np.linspace(0, 2 * np.pi, 17)[:-1]
ast_x = [R * np.cos(angle) + 400 for angle in theta]
ast_y = [R * np.sin(angle) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 30})
ring_closing = Scenario(
name="ring_closing",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring scenarios
# Static ring left
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + np.pi) + 400 for angle in theta]
ast_y = [R * np.sin(angle + np.pi) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_left = Scenario(
name="ring_static_left",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring right
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle) + 400 for angle in theta]
ast_y = [R * np.sin(angle) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_right = Scenario(
name="ring_static_right",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring top
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + np.pi / 2) + 400 for angle in theta]
ast_y = [R * np.sin(angle + np.pi / 2) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_top = Scenario(
name="ring_static_top",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# Static ring bottom
R = 150
theta = np.linspace(0, 2 * np.pi, 17)[1:-2]
ast_x = [R * np.cos(angle + 3 * np.pi / 2) + 400 for angle in theta]
ast_y = [R * np.sin(angle + 3 * np.pi / 2) + 300 for angle in theta]
init_angle = [90 + val * 180 / np.pi for val in theta]
ast_states = []
for ii in range(len(init_angle)):
ast_states.append({"position": (ast_x[ii], ast_y[ii]), "angle": init_angle[ii], "speed": 0})
ring_static_bottom = Scenario(
name="ring_static_bottom",
asteroid_states=ast_states,
ship_state={"position": (400, 300)},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Normal corridor scenarios -------------------------------------------------------------------------------------------#
# Scenario where ship is in a corridor and forced to shoot its way through
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 0})
# calculate wall asteroid states
ast_states.append({"position": (50, 266), "angle": -90.0, "speed": 0})
ast_states.append({"position": (50, 332), "angle": -90.0, "speed": 0})
corridor_left = Scenario(
name="corridor_left",
asteroid_states=ast_states,
ship_state={"position": (700, 300)},
seed=0
)
# calculate wall asteroid states
ast_states = ast_states[:-2]
ast_states.append({"position": (800, 266), "angle": 90.0, "speed": 20})
ast_states.append({"position": (800, 332), "angle": 90.0, "speed": 20})
corridor_right = Scenario(
name="corridor_right",
asteroid_states=ast_states,
ship_state={"position": (100, 300)},
seed=0
)
# Corridor top scenario
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 0})
# calculate wall asteroid states
ast_states.append({"position": (366, 600), "angle": 180.0, "speed": 20})
ast_states.append({"position": (432, 600), "angle": 180.0, "speed": 20})
corridor_top = Scenario(
name="corridor_top",
asteroid_states=ast_states,
ship_state={"position": (400, 100)},
seed=0
)
# Corridor bottom scenario
# calculate wall asteroid states
ast_states = ast_states[:-2]
ast_states.append({"position": (366, 0), "angle": 0.0, "speed": 20})
ast_states.append({"position": (432, 0), "angle": 0.0, "speed": 20})
corridor_bottom = Scenario(
name="corridor_bottom",
asteroid_states=ast_states,
ship_state={"position": (400, 500)},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Moving Corridor Scenarios -------------------------------------------------------------------------------------------#
# Corridor moving right
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 120})
moving_corridor_1 = Scenario(
name="moving_corridor_1",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 90},
seed=0
)
# Corridor moving left
# calculating corridor states
num_x = 17
num_y = 10
x = np.linspace(0, 800, num_x)
y = np.concatenate((np.linspace(0, 200, int(num_y / 2)), np.linspace(400, 600, int(num_y / 2))))
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 90.0, "speed": 120})
moving_corridor_2 = Scenario(
name="moving_corridor_2",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": -90},
seed=0
)
# Corridor moving down
# calculating corridor states
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 180.0, "speed": 120})
moving_corridor_3 = Scenario(
name="moving_corridor_3",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 0},
seed=0
)
# Corridor moving up
# calculating corridor states
num_x = 14
num_y = 13
x = np.concatenate((np.linspace(0, 300, int(num_x / 2)), np.linspace(500, 800, int(num_x / 2))))
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": 0.0, "speed": 120})
moving_corridor_4 = Scenario(
name="moving_corridor_4",
asteroid_states=ast_states,
ship_state={"position": (400, 300), "angle": 180},
seed=0
)
# Angled corridor scenario 1
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(1.5 * ast_x[ii, jj] - ast_y[ii, jj]) <= 160) and not (
abs(-1.5 * ast_x[ii, jj] + 1200 - ast_y[ii, jj]) <= 160):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_angled_1 = Scenario(
name="moving_corridor_angled_1",
asteroid_states=ast_states,
ship_state={"position": (750, 50), "angle": 90},
seed=0
)
# Angled corridor scenario 2
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(-1.5 * ast_x[ii, jj] + 600 - ast_y[ii, jj]) <= 160) and not (
abs(1.5 * ast_x[ii, jj] - 600 - ast_y[ii, jj]) <= 160):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_angled_2 = Scenario(
name="moving_corridor_angled_2",
asteroid_states=ast_states,
ship_state={"position": (750, 550), "angle": 90},
seed=0
)
# Curved corridor scenario 1
# calculating corridor states
num_x = 17
num_y = 13
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs(-(1 / 300) * (ast_x[ii, jj] - 400) ** 2 + 600 - ast_y[ii, jj]) <= 200):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 30})
moving_corridor_curve_1 = Scenario(
name="moving_corridor_curve_1",
asteroid_states=ast_states,
ship_state={"position": (550, 500), "angle": 90},
seed=0
)
# Curved corridor scenario 2
# calculating corridor states
num_x = 30
num_y = 45
x = np.linspace(0, 800, num_x)
y = np.linspace(0, 600, num_y)
ast_x, ast_y = np.meshgrid(x, y, sparse=False, indexing='ij')
ast_states = []
for ii in range(num_x):
for jj in range(num_y):
if not (abs((1 / 300) * (ast_x[ii, jj] - 400) ** 2 - ast_y[ii, jj]) <= 200) and not (
abs((1 / 300) * (ast_x[ii, jj] - 400) ** 2 - ast_y[ii, jj]) >= 300):
ast_states.append({"position": (ast_x[ii, jj], ast_y[ii, jj]), "angle": -90.0, "speed": 120, "size": 1})
moving_corridor_curve_2 = Scenario(
name="moving_corridor_curve_2",
asteroid_states=ast_states,
ship_state={"position": (550, 100), "angle": 90},
seed=0
)
# ---------------------------------------------------------------------------------------------------------------------#
# Apocalypse scenarios-------------------------------------------------------------------------------------------------#
# Scenario meant to be difficult, probably can't be totally cleared
# currently the vehicle spawns on top of asteroids. It won't kill the vehicle until you fire though
scenario_apocalypse_1 = Scenario(name="apocalypse_1", num_asteroids=50, seed=1)
# ---------------------------------------------------------------------------------------------------------------------#
# Forcing wrap scenarios-----------------------------------------------------------------------------------------------#
# Wrap right scenarios
wall_right_wrap_1 = Scenario(
name="wall_right_wrap_1",
asteroid_states=[{"position": (600, 0), "angle": -90.0, "speed": 80},
{"position": (600, 100), "angle": -90.0, "speed": 80},
{"position": (600, 200), "angle": -90.0, "speed": 80},
{"position": (600, 300), "angle": -90.0, "speed": 80},
{"position": (600, 400), "angle": -90.0, "speed": 80},
{"position": (600, 500), "angle": -90.0, "speed": 80},
{"position": (600, 600), "angle": -90.0, "speed": 80},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_right_wrap_2 = Scenario(
name="wall_right_wrap_2",
asteroid_states=[{"position": (750, 0), "angle": -90.0, "speed": 80},
{"position": (750, 100), "angle": -90.0, "speed": 80},
{"position": (750, 200), "angle": -90.0, "speed": 80},
{"position": (750, 300), "angle": -90.0, "speed": 80},
{"position": (750, 400), "angle": -90.0, "speed": 80},
{"position": (750, 500), "angle": -90.0, "speed": 80},
{"position": (750, 600), "angle": -90.0, "speed": 80},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_right_wrap_3 = Scenario(
name="wall_right_wrap_3",
asteroid_states=[{"position": (600, 0), "angle": -90.0, "speed": 80},
{"position": (600, 100), "angle": -90.0, "speed": 80},
{"position": (600, 200), "angle": -90.0, "speed": 80},
{"position": (600, 300), "angle": -90.0, "speed": 80},
{"position": (600, 400), "angle": -90.0, "speed": 80},
{"position": (600, 500), "angle": -90.0, "speed": 80},
{"position": (600, 600), "angle": -90.0, "speed": 80},
{"position": (200, 0), "angle": -90.0, "speed": 0},
{"position": (200, 100), "angle": -90.0, "speed": 0},
{"position": (200, 200), "angle": -90.0, "speed": 0},
{"position": (200, 300), "angle": -90.0, "speed": 0},
{"position": (200, 400), "angle": -90.0, "speed": 0},
{"position": (200, 500), "angle": -90.0, "speed": 0},
{"position": (200, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_right_wrap_4 = Scenario(
name="wall_right_wrap_4",
asteroid_states=[{"position": (750, 0), "angle": -90.0, "speed": 80},
{"position": (750, 100), "angle": -90.0, "speed": 80},
{"position": (750, 200), "angle": -90.0, "speed": 80},
{"position": (750, 300), "angle": -90.0, "speed": 80},
{"position": (750, 400), "angle": -90.0, "speed": 80},
{"position": (750, 500), "angle": -90.0, "speed": 80},
{"position": (750, 600), "angle": -90.0, "speed": 80},
{"position": (200, 0), "angle": -90.0, "speed": 0},
{"position": (200, 100), "angle": -90.0, "speed": 0},
{"position": (200, 200), "angle": -90.0, "speed": 0},
{"position": (200, 300), "angle": -90.0, "speed": 0},
{"position": (200, 400), "angle": -90.0, "speed": 0},
{"position": (200, 500), "angle": -90.0, "speed": 0},
{"position": (200, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (50, 300)},
seed=0
)
# Wrap left scenarios
wall_left_wrap_1 = Scenario(
name="wall_left_wrap_1",
asteroid_states=[{"position": (200, 0), "angle": 90.0, "speed": 80},
{"position": (200, 100), "angle": 90.0, "speed": 80},
{"position": (200, 200), "angle": 90.0, "speed": 80},
{"position": (200, 300), "angle": 90.0, "speed": 80},
{"position": (200, 400), "angle": 90.0, "speed": 80},
{"position": (200, 500), "angle": 90.0, "speed": 80},
{"position": (200, 600), "angle": 90.0, "speed": 80},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_left_wrap_2 = Scenario(
name="wall_left_wrap_2",
asteroid_states=[{"position": (50, 0), "angle": 90.0, "speed": 80},
{"position": (50, 100), "angle": 90.0, "speed": 80},
{"position": (50, 200), "angle": 90.0, "speed": 80},
{"position": (50, 300), "angle": 90.0, "speed": 80},
{"position": (50, 400), "angle": 90.0, "speed": 80},
{"position": (50, 500), "angle": 90.0, "speed": 80},
{"position": (50, 600), "angle": 90.0, "speed": 80},
],
ship_state={"position": (750, 300)},
seed=0
)
wall_left_wrap_3 = Scenario(
name="wall_left_wrap_3",
asteroid_states=[{"position": (200, 0), "angle": 90.0, "speed": 80},
{"position": (200, 100), "angle": 90.0, "speed": 80},
{"position": (200, 200), "angle": 90.0, "speed": 80},
{"position": (200, 300), "angle": 90.0, "speed": 80},
{"position": (200, 400), "angle": 90.0, "speed": 80},
{"position": (200, 500), "angle": 90.0, "speed": 80},
{"position": (200, 600), "angle": 90.0, "speed": 80},
{"position": (600, 0), "angle": -90.0, "speed": 0},
{"position": (600, 100), "angle": -90.0, "speed": 0},
{"position": (600, 200), "angle": -90.0, "speed": 0},
{"position": (600, 300), "angle": -90.0, "speed": 0},
{"position": (600, 400), "angle": -90.0, "speed": 0},
{"position": (600, 500), "angle": -90.0, "speed": 0},
{"position": (600, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (50, 300)},
seed=0
)
wall_left_wrap_4 = Scenario(
name="wall_left_wrap_4",
asteroid_states=[{"position": (50, 0), "angle": 90.0, "speed": 80},
{"position": (50, 100), "angle": 90.0, "speed": 80},
{"position": (50, 200), "angle": 90.0, "speed": 80},
{"position": (50, 300), "angle": 90.0, "speed": 80},
{"position": (50, 400), "angle": 90.0, "speed": 80},
{"position": (50, 500), "angle": 90.0, "speed": 80},
{"position": (50, 600), "angle": 90.0, "speed": 80},
{"position": (600, 0), "angle": -90.0, "speed": 0},
{"position": (600, 100), "angle": -90.0, "speed": 0},
{"position": (600, 200), "angle": -90.0, "speed": 0},
{"position": (600, 300), "angle": -90.0, "speed": 0},
{"position": (600, 400), "angle": -90.0, "speed": 0},
{"position": (600, 500), "angle": -90.0, "speed": 0},
{"position": (600, 600), "angle": -90.0, "speed": 0},
],
ship_state={"position": (750, 300)},
seed=0
)
# Wrap top scenarios
wall_top_wrap_1 = Scenario(
name="wall_top_wrap_1",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_top_wrap_2 = Scenario(
name="wall_top_wrap_2",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_top_wrap_3 = Scenario(
name="wall_top_wrap_3",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (100, 200), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (300, 200), "angle": 0.0, "speed": 0},
{"position": (400, 200), "angle": 0.0, "speed": 0},
{"position": (500, 200), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (700, 200), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_top_wrap_4 = Scenario(
name="wall_top_wrap_4",
asteroid_states=[{"position": (0, 400), "angle": 0.0, "speed": 80},
{"position": (100, 400), "angle": 0.0, "speed": 80},
{"position": (200, 400), "angle": 0.0, "speed": 80},
{"position": (300, 400), "angle": 0.0, "speed": 80},
{"position": (400, 400), "angle": 0.0, "speed": 80},
{"position": (500, 400), "angle": 0.0, "speed": 80},
{"position": (600, 400), "angle": 0.0, "speed": 80},
{"position": (700, 400), "angle": 0.0, "speed": 80},
{"position": (800, 400), "angle": 0.0, "speed": 80},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (100, 200), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (300, 200), "angle": 0.0, "speed": 0},
{"position": (400, 200), "angle": 0.0, "speed": 0},
{"position": (500, 200), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (700, 200), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 50)},
seed=0
)
# Wrap bottom scenarios
wall_bottom_wrap_1 = Scenario(
name="wall_bottom_wrap_1",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_bottom_wrap_2 = Scenario(
name="wall_bottom_wrap_2",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
],
ship_state={"position": (400, 550)},
seed=0
)
wall_bottom_wrap_3 = Scenario(
name="wall_bottom_wrap_3",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (100, 400), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (300, 400), "angle": 0.0, "speed": 0},
{"position": (400, 400), "angle": 0.0, "speed": 0},
{"position": (500, 400), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (700, 400), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 50)},
seed=0
)
wall_bottom_wrap_4 = Scenario(
name="wall_bottom_wrap_4",
asteroid_states=[{"position": (0, 200), "angle": 180.0, "speed": 80},
{"position": (100, 200), "angle": 180.0, "speed": 80},
{"position": (200, 200), "angle": 180.0, "speed": 80},
{"position": (300, 200), "angle": 180.0, "speed": 80},
{"position": (400, 200), "angle": 180.0, "speed": 80},
{"position": (500, 200), "angle": 180.0, "speed": 80},
{"position": (600, 200), "angle": 180.0, "speed": 80},
{"position": (700, 200), "angle": 180.0, "speed": 80},
{"position": (800, 200), "angle": 180.0, "speed": 80},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (100, 400), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (300, 400), "angle": 0.0, "speed": 0},
{"position": (400, 400), "angle": 0.0, "speed": 0},
{"position": (500, 400), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (700, 400), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 550)},
seed=0
)
# A scenario with a big non moving box
scenario_big_box = Scenario(
name="big_box",
asteroid_states=[{"position": (100, 600), "angle": 0.0, "speed": 0},
{"position": (200, 600), "angle": 0.0, "speed": 0},
{"position": (300, 600), "angle": 0.0, "speed": 0},
{"position": (400, 600), "angle": 0.0, "speed": 0},
{"position": (500, 600), "angle": 0.0, "speed": 0},
{"position": (600, 600), "angle": 0.0, "speed": 0},
{"position": (700, 600), "angle": 0.0, "speed": 0},
{"position": (100, 0), "angle": 0.0, "speed": 0},
{"position": (200, 0), "angle": 0.0, "speed": 0},
{"position": (300, 0), "angle": 0.0, "speed": 0},
{"position": (400, 0), "angle": 0.0, "speed": 0},
{"position": (500, 0), "angle": 0.0, "speed": 0},
{"position": (600, 0), "angle": 0.0, "speed": 0},
{"position": (700, 0), "angle": 0.0, "speed": 0},
{"position": (800, 0), "angle": 0.0, "speed": 0},
{"position": (0, 0), "angle": 0.0, "speed": 0},
{"position": (0, 100), "angle": 0.0, "speed": 0},
{"position": (0, 200), "angle": 0.0, "speed": 0},
{"position": (0, 300), "angle": 0.0, "speed": 0},
{"position": (0, 400), "angle": 0.0, "speed": 0},
{"position": (0, 500), "angle": 0.0, "speed": 0},
{"position": (0, 600), "angle": 0.0, "speed": 0},
{"position": (800, 100), "angle": 0.0, "speed": 0},
{"position": (800, 200), "angle": 0.0, "speed": 0},
{"position": (800, 300), "angle": 0.0, "speed": 0},
{"position": (800, 400), "angle": 0.0, "speed": 0},
{"position": (800, 500), "angle": 0.0, "speed": 0},
{"position": (800, 600), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 300)},
seed=0
)
# A scenario with a little non moving box
scenario_small_box = Scenario(
name="small_box",
asteroid_states=[{"position": (200, 500), "angle": 0.0, "speed": 0},
{"position": (300, 500), "angle": 0.0, "speed": 0},
{"position": (400, 500), "angle": 0.0, "speed": 0},
{"position": (500, 500), "angle": 0.0, "speed": 0},
{"position": (200, 100), "angle": 0.0, "speed": 0},
{"position": (300, 100), "angle": 0.0, "speed": 0},
{"position": (400, 100), "angle": 0.0, "speed": 0},
{"position": (500, 100), "angle": 0.0, "speed": 0},
{"position": (600, 100), "angle": 0.0, "speed": 0},
{"position": (200, 200), "angle": 0.0, "speed": 0},
{"position": (200, 300), "angle": 0.0, "speed": 0},
{"position": (200, 400), "angle": 0.0, "speed": 0},
{"position": (600, 200), "angle": 0.0, "speed": 0},
{"position": (600, 300), "angle": 0.0, "speed": 0},
{"position": (600, 400), "angle": 0.0, "speed": 0},
{"position": (600, 500), "angle": 0.0, "speed": 0},
],
ship_state={"position": (400, 300)},
seed=0
)
# A scenario with a big non moving box
scenario_2_still_corridors = Scenario(
name="scenario_2_still_corridors",
asteroid_states=[{"position": (0, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (50, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (100, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (150, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (200, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (250, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (300, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (0, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (50, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (100, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (150, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (200, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (250, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (300, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (500, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (550, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (600, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (650, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (700, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (750, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (800, 250), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (500, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (550, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (600, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (650, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (700, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (750, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (800, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 0), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 50), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 100), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 150), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 200), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 0), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 50), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 100), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 150), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 200), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 400), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 450), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 500), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 550), "angle": 0.0, "speed": 0, "size": 2},
{"position": (350, 600), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 350), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 400), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 450), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 500), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 550), "angle": 0.0, "speed": 0, "size": 2},
{"position": (450, 600), "angle": 0.0, "speed": 0, "size": 2},
],
ship_state={"position": (400, 300)},
seed=0
)
| 43.462322 | 120 | 0.471954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,385 | 0.313613 |
b31e1295214eda15108196ecd3b6619c813ed189 | 5,473 | py | Python | guess_movie/quizz/models.py | tanguyesteoule/movizz | 1b3bc00fcdd6f0c566fdffabf56c4117fa790b02 | [
"MIT"
] | 1 | 2022-01-29T20:14:28.000Z | 2022-01-29T20:14:28.000Z | guess_movie/quizz/models.py | renauddahou/movizz | 1b3bc00fcdd6f0c566fdffabf56c4117fa790b02 | [
"MIT"
] | null | null | null | guess_movie/quizz/models.py | renauddahou/movizz | 1b3bc00fcdd6f0c566fdffabf56c4117fa790b02 | [
"MIT"
] | 1 | 2022-01-16T13:45:40.000Z | 2022-01-16T13:45:40.000Z | from django.db import models
# class Game(models.Model):
# name = models.CharField(max_length=200)
#
# def __str__(self):
# return self.name
class Movie(models.Model):
imdb_id = models.CharField(max_length=200, null=True, blank=True)
name = models.CharField(max_length=200, null=True, blank=True)
director = models.CharField(max_length=200, null=True, blank=True)
year = models.IntegerField(null=True, blank=True)
popularity = models.FloatField(null=True, blank=True)
# summary = models.TextField(null=True, blank=True)
image = models.ImageField(upload_to='covers', null=True, blank=True)
has_quote = models.BooleanField(null=True, blank=True)
has_image = models.BooleanField(default=0, null=True, blank=True)
def __str__(self):
return self.name
class Quote(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
quote_text = models.TextField(null=True, blank=True)
def __str__(self):
return self.quote_text
class Screenshot(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
image = models.ImageField(upload_to='screenshot', null=True, blank=True)
# quote_text = models.TextField(null=True, blank=True)
class Game(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
current_q = models.IntegerField(null=True, blank=True)
nb_q = models.IntegerField(null=True, blank=True)
host = models.CharField(max_length=200, null=True, blank=True)
mode = models.CharField(default="quote", max_length=200, null=True, blank=True)
# "quote" ou "image"
game_mode = models.CharField(max_length=200, null=True, blank=True)
# 'chill' ou int
game_mode_debrief = models.CharField(max_length=200, null=True, blank=True)
# 'chill' ou int
def __str__(self):
return self.name
class Question(models.Model):
movie1 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m1')
movie2 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m2')
movie3 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='m3')
movie_guessed = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='mg')
quote = models.ForeignKey(Quote, on_delete=models.CASCADE)
game = models.ForeignKey(Game, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return f'{self.movie1.id}_{self.movie2.id}_{self.movie3.id}_{self.quote.id}_{self.movie_guessed.id}'
class QuestionImage(models.Model):
movie1 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='mi1')
movie2 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='mi2')
movie3 = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='mi3')
movie_guessed = models.ForeignKey(Movie, on_delete=models.CASCADE, related_name='mig')
list_image_id = models.TextField(null=True, blank=True)
game = models.ForeignKey(Game, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return f'{self.game}_{self.movie_guessed.id}'
class Answer(models.Model):
user_id = models.CharField(max_length=200, null=True, blank=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
movie_prop = models.ForeignKey(Movie, on_delete=models.CASCADE)
def __str__(self):
return f'{self.user_id}_{self.question}_{self.movie_prop}'
class AnswerImage(models.Model):
user_id = models.CharField(max_length=200, null=True, blank=True)
questionimage = models.ForeignKey(QuestionImage, on_delete=models.CASCADE)
movie_prop = models.ForeignKey(Movie, on_delete=models.CASCADE)
score = models.IntegerField(null=True, blank=True)
def __str__(self):
return f'{self.user_id}_{self.questionimage}_{self.movie_prop}'
class Genre(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name
class Player(models.Model):
user_id = models.CharField(max_length=200, null=True, blank=True)
user_name = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.user_name
class Preselect(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
list_movie = models.TextField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
author = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name
class MovieGenre(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
def __str__(self):
return f'{self.movie.id}_{self.genre.id}'
class GamePlayer(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE)
player = models.ForeignKey(Player, on_delete=models.CASCADE)
def __str__(self):
return f'{self.game.id}_{self.player.id}'
class MovieCountry(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
def __str__(self):
return f'{self.movie.id}_{self.country.id}'
| 36.731544 | 108 | 0.723004 | 5,274 | 0.96364 | 0 | 0 | 0 | 0 | 0 | 0 | 685 | 0.12516 |
b31e77923a41068a3cef4c14e6a23d2de634a6cd | 386 | py | Python | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | front_end/migrations/0002_rename_nome_popular_especies_nome_popular.py | majubr/website_Django | eb0683459af82e4abb3e8ccb016d52d4365ff729 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-15 03:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('front_end', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='especies',
old_name='Nome_Popular',
new_name='Nome_popular',
),
]
| 20.315789 | 48 | 0.562176 | 295 | 0.764249 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.287565 |
b31f0c338718475bf6c2006b77b19c324343a773 | 24,889 | py | Python | ronin_3d/source/ronin_lstm_tcn.py | zju3dv/rnin-vio | b030ecc94f151159973a9086c8ba76e22bdbc56e | [
"Apache-2.0"
] | 10 | 2022-03-01T12:21:18.000Z | 2022-03-23T08:55:55.000Z | ronin_3d/source/ronin_lstm_tcn.py | zju3dv/rnin-vio | b030ecc94f151159973a9086c8ba76e22bdbc56e | [
"Apache-2.0"
] | 2 | 2022-03-07T09:43:41.000Z | 2022-03-31T14:34:53.000Z | ronin_3d/source/ronin_lstm_tcn.py | zju3dv/rnin-vio | b030ecc94f151159973a9086c8ba76e22bdbc56e | [
"Apache-2.0"
] | 4 | 2022-03-01T12:20:20.000Z | 2022-03-19T12:33:22.000Z | import json
import os
import sys
import time
from os import path as osp
from pathlib import Path
from shutil import copyfile
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from model_temporal import LSTMSeqNetwork, BilinearLSTMSeqNetwork, TCNSeqNetwork
from utils import load_config, MSEAverageMeter
from data_glob_speed import GlobSpeedSequence, SequenceToSequenceDataset, SenseINSSequence
from transformations import ComposeTransform, RandomHoriRotateSeq
from metric import compute_absolute_trajectory_error, compute_relative_trajectory_error
def WriteList(path, name, folders):
with open(path+"/"+name, 'w') as f:
for folder in folders:
f.writelines(folder+"\n")
f.close()
def GetFolderName(path):
names = os.listdir(path+"/")
folders=[]
for name in names:
if os.path.isdir(os.path.join(os.path.abspath(path), name)):
folders.append(name)
folders.sort()
return folders
'''
Temporal models with loss functions in global coordinate frame
Configurations
- Model types
TCN - type=tcn
LSTM_simple - type=lstm, lstm_bilinear
'''
torch.multiprocessing.set_sharing_strategy('file_system')
_nano_to_sec = 1e09
_input_channel, _output_channel = 6, 3
# _input_channel, _output_channel = 6, 2
device = 'cpu'
class GlobalPosLoss(torch.nn.Module):
def __init__(self, mode='full', history=None):
"""
Calculate position loss in global coordinate frame
Target :- Global Velocity
Prediction :- Global Velocity
"""
super(GlobalPosLoss, self).__init__()
self.mse_loss = torch.nn.MSELoss(reduction='none')
assert mode in ['full', 'part']
self.mode = mode
if self.mode == 'part':
assert history is not None
self.history = history
elif self.mode == 'full':
self.history = 1
def forward(self, pred, targ):
gt_pos = torch.cumsum(targ[:, 1:, ], 1)
pred_pos = torch.cumsum(pred[:, 1:, ], 1)
if self.mode == 'part':
gt_pos = gt_pos[:, self.history:, :] - gt_pos[:, :-self.history, :]
pred_pos = pred_pos[:, self.history:, :] - pred_pos[:, :-self.history, :]
loss = self.mse_loss(pred_pos, gt_pos)
return torch.mean(loss)
def write_config(args, **kwargs):
if args.out_dir:
with open(osp.join(args.out_dir, 'config.json'), 'w') as f:
values = vars(args)
values['file'] = "pytorch_global_position"
if kwargs:
values['kwargs'] = kwargs
json.dump(values, f, sort_keys=True)
def get_dataset(root_dir, data_list, args, **kwargs):
input_format, output_format = [0, 3, 6], [0, _output_channel]
mode = kwargs.get('mode', 'train')
random_shift, shuffle, transforms, grv_only = 0, False, [], False
if mode == 'train':
random_shift = args.step_size // 2
shuffle = True
transforms.append(RandomHoriRotateSeq(input_format, output_format))
elif mode == 'val':
shuffle = True
elif mode == 'test':
shuffle = False
grv_only = True
transforms = ComposeTransform(transforms)
if args.dataset == 'ronin':
seq_type = GlobSpeedSequence
elif args.dataset == 'ridi':
from data_ridi import RIDIGlobSpeedSequence
seq_type = RIDIGlobSpeedSequence
elif args.dataset == 'sense':
seq_type = SenseINSSequence
dataset = SequenceToSequenceDataset(seq_type, root_dir, data_list, args.cache_path, args.step_size, args.window_size,
random_shift=random_shift, transform=transforms, shuffle=shuffle,
grv_only=grv_only, args=args, **kwargs)
return dataset
def get_dataset_from_list(root_dir, list_path, args, **kwargs):
with open(list_path) as f:
data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
return get_dataset(root_dir, data_list, args, **kwargs)
def get_model(args, **kwargs):
config = {}
if kwargs.get('dropout'):
config['dropout'] = kwargs.get('dropout')
if args.type == 'tcn':
network = TCNSeqNetwork(_input_channel, _output_channel, args.kernel_size,
layer_channels=args.channels, **config)
print("TCN Network. Receptive field: {} ".format(network.get_receptive_field()))
elif args.type == 'lstm_bi':
print("Bilinear LSTM Network")
network = BilinearLSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
else:
print("Simple LSTM Network")
network = LSTMSeqNetwork(_input_channel, _output_channel, args.batch_size, device,
lstm_layers=args.layers, lstm_size=args.layer_size, **config).to(device)
pytorch_total_params = sum(p.numel() for p in network.parameters() if p.requires_grad)
print('Network constructed. trainable parameters: {}'.format(pytorch_total_params))
return network
def get_loss_function(history, args, **kwargs):
if args.type == 'tcn':
config = {'mode': 'part',
'history': history}
else:
config = {'mode': 'full'}
criterion = GlobalPosLoss(**config)
return criterion
def format_string(*argv, sep=' '):
result = ''
for val in argv:
if isinstance(val, (tuple, list, np.ndarray)):
for v in val:
result += format_string(v, sep=sep) + sep
else:
result += str(val) + sep
return result[:-1]
def train(args, **kwargs):
# Loading data
start_t = time.time()
train_dataset = get_dataset_from_list(args.root_dir, args.train_list, args, mode='train', **kwargs)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True,
drop_last=True)
end_t = time.time()
print('Training set loaded. Time usage: {:.3f}s'.format(end_t - start_t))
val_dataset, val_loader = None, None
if args.val_list is not None:
val_dataset = get_dataset_from_list(args.validation_dir, args.val_list, args, mode='val', **kwargs)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True)
print('Validation set loaded')
global device
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.out_dir:
if not osp.isdir(args.out_dir):
os.makedirs(args.out_dir)
if not osp.isdir(osp.join(args.out_dir, 'checkpoints')):
os.makedirs(osp.join(args.out_dir, 'checkpoints'))
if not osp.isdir(osp.join(args.out_dir, 'logs')):
os.makedirs(osp.join(args.out_dir, 'logs'))
write_config(args, **kwargs)
print('\nNumber of train samples: {}'.format(len(train_dataset)))
train_mini_batches = len(train_loader)
if val_dataset:
print('Number of val samples: {}'.format(len(val_dataset)))
val_mini_batches = len(val_loader)
network = get_model(args, **kwargs).to(device)
history = network.get_receptive_field() if args.type == 'tcn' else args.window_size // 2
criterion = get_loss_function(history, args, **kwargs)
optimizer = torch.optim.Adam(network.parameters(), args.lr)
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10, factor=0.75, verbose=True, eps=1e-12)
quiet_mode = kwargs.get('quiet', False)
use_scheduler = kwargs.get('use_scheduler', False)
log_file = None
if args.out_dir:
log_file = osp.join(args.out_dir, 'logs', 'log.txt')
if osp.exists(log_file):
if args.continue_from is None:
os.remove(log_file)
else:
copyfile(log_file, osp.join(args.out_dir, 'logs', 'log_old.txt'))
start_epoch = 0
if args.continue_from is not None and osp.exists(args.continue_from):
with open(osp.join(str(Path(args.continue_from).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoints = torch.load(args.continue_from, map_location=lambda storage, location: storage)
else:
checkpoints = torch.load(args.continue_from, map_location={model_data['device']: args.device})
start_epoch = checkpoints.get('epoch', 0)
network.load_state_dict(checkpoints.get('model_state_dict'))
optimizer.load_state_dict(checkpoints.get('optimizer_state_dict'))
if kwargs.get('force_lr', False):
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
step = 0
best_val_loss = np.inf
train_errs = np.zeros(args.epochs)
print("Starting from epoch {}".format(start_epoch
))
try:
for epoch in range(start_epoch, args.epochs):
log_line = ''
network.train()
train_vel = MSEAverageMeter(3, [2], _output_channel)
train_loss = 0
start_t = time.time()
for bid, batch in tqdm(enumerate(train_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
predicted = network(feat)
train_vel.add(predicted.cpu().detach().numpy(), targ.cpu().detach().numpy())
loss = criterion(predicted, targ)
train_loss += loss.cpu().detach().numpy()
loss.backward()
optimizer.step()
step += 1
train_errs[epoch] = train_loss / train_mini_batches
end_t = time.time()
if not quiet_mode:
print('-' * 25)
print('Epoch {}, time usage: {:.3f}s, loss: {}, val_loss {}/{:.6f}'.format(
epoch, end_t - start_t, train_errs[epoch], train_vel.get_channel_avg(), train_vel.get_total_avg()))
print('Learning rate: {}'.format(optimizer.param_groups[0]['lr']))
log_line = format_string(log_line, epoch, optimizer.param_groups[0]['lr'], train_errs[epoch],
*train_vel.get_channel_avg())
saved_model = False
if val_loader:
network.eval()
val_vel = MSEAverageMeter(3, [2], _output_channel)
val_loss = 0
for bid, batch in tqdm(enumerate(val_loader)):
feat, targ, _, _ = batch
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
pred = network(feat)
val_vel.add(pred.cpu().detach().numpy(), targ.cpu().detach().numpy())
val_loss += criterion(pred, targ).cpu().detach().numpy()
val_loss = val_loss / val_mini_batches
log_line = format_string(log_line, val_loss, *val_vel.get_channel_avg())
if not quiet_mode:
print('Validation loss: {} val_loss: {}/{:.6f}'.format(val_loss, val_vel.get_channel_avg(),
val_vel.get_total_avg()))
if val_loss < best_val_loss:
best_val_loss = val_loss
saved_model = True
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Best Validation Model saved to ', model_path)
scheduler.step(val_loss)
if args.out_dir and not saved_model and (epoch + 1) % args.save_interval == 0: # save even with validation
model_path = osp.join(args.out_dir, 'checkpoints', 'icheckpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'loss': train_errs[epoch],
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Model saved to ', model_path)
if log_file:
log_line += '\n'
with open(log_file, 'a') as f:
f.write(log_line)
if np.isnan(train_loss):
print("Invalid value. Stopping training.")
break
except KeyboardInterrupt:
print('-' * 60)
print('Early terminate')
print('Training completed')
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_latest.pt')
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()}, model_path)
def recon_traj_with_preds_global(dataset, preds, ind=None, seq_id=0, type='preds', **kwargs):
ind = ind if ind is not None else np.array([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=np.int)
if type == 'gt':
# pos = dataset.gt_pos[seq_id][:, :2]
pos = dataset.gt_pos[seq_id][:, :3]
else:
ts = dataset.ts[seq_id]
# Compute the global velocity from local velocity.
dts = np.mean(ts[ind[1:]] - ts[ind[:-1]])
pos = preds * dts
# pos[0, :] = dataset.gt_pos[seq_id][0, :2]
pos[0, :] = dataset.gt_pos[seq_id][0, :3]
pos = np.cumsum(pos, axis=0)
veloc = preds
ori = dataset.orientations[seq_id]
return pos, veloc, ori
def test(args, **kwargs):
global device, _output_channel
import matplotlib.pyplot as plt
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
if args.test_path is not None:
if args.test_path[-1] == '/':
args.test_path = args.test_path[:-1]
root_dir = osp.split(args.test_path)[0]
test_data_list = [osp.split(args.test_path)[1]]
elif args.test_list is not None:
root_dir = args.root_dir if args.root_dir else osp.split(args.test_list)[0]
with open(args.test_list) as f:
test_data_list = [s.strip().split(',')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
else:
raise ValueError('Either test_path or test_list must be specified.')
# Load the first sequence to update the input and output size
_ = get_dataset(root_dir, [test_data_list[0]], args, mode='test')
if args.out_dir and not osp.exists(args.out_dir):
os.makedirs(args.out_dir)
with open(osp.join(str(Path(args.model_path).parents[1]), 'config.json'), 'r') as f:
model_data = json.load(f)
if device.type == 'cpu':
checkpoint = torch.load(args.model_path, map_location=lambda storage, location: storage)
else:
checkpoint = torch.load(args.model_path, map_location={model_data['device']: args.device})
network = get_model(args, **kwargs)
network.load_state_dict(checkpoint.get('model_state_dict'))
network.eval().to(device)
print('Model {} loaded to device {}.'.format(args.model_path, device))
log_file = None
if args.test_list and args.out_dir:
log_file = osp.join(args.out_dir, osp.split(args.test_list)[-1].split('.')[0] + '_log.txt')
with open(log_file, 'w') as f:
f.write(args.model_path + '\n')
f.write('Seq traj_len velocity ate rte\n')
losses_vel = MSEAverageMeter(2, [1], _output_channel)
ate_all, rte_all = [], []
pred_per_min = 200 * 60
seq_dataset = get_dataset(root_dir, test_data_list, args, mode='test', **kwargs)
for idx, data in enumerate(test_data_list):
assert data == osp.split(seq_dataset.data_path[idx])[1]
feat, vel = seq_dataset.get_test_seq(idx)
feat = torch.Tensor(feat).to(device)
preds = np.squeeze(network(feat).cpu().detach().numpy())[-vel.shape[0]:, :_output_channel]
ind = np.arange(vel.shape[0])
val_losses = np.mean((vel - preds) ** 2, axis=0)
losses_vel.add(vel, preds)
print('Reconstructing trajectory')
pos_pred, gv_pred, _ = recon_traj_with_preds_global(seq_dataset, preds, ind=ind, type='pred', seq_id=idx)
pos_gt, gv_gt, _ = recon_traj_with_preds_global(seq_dataset, vel, ind=ind, type='gt', seq_id=idx)
if args.out_dir is not None and osp.isdir(args.out_dir):
np.save(osp.join(args.out_dir, '{}_{}.npy'.format(data, args.type)),
np.concatenate([pos_pred, pos_gt], axis=1))
ate = compute_absolute_trajectory_error(pos_pred, pos_gt)
if pos_pred.shape[0] < pred_per_min:
ratio = pred_per_min / pos_pred.shape[0]
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pos_pred.shape[0] - 1) * ratio
else:
rte = compute_relative_trajectory_error(pos_pred, pos_gt, delta=pred_per_min)
pos_cum_error = np.linalg.norm(pos_pred - pos_gt, axis=1)
ate_all.append(ate)
rte_all.append(rte)
print('Sequence {}, Velocity loss {} / {}, ATE: {}, RTE:{}'.format(data, val_losses, np.mean(val_losses), ate,
rte))
log_line = format_string(data, np.mean(val_losses), ate, rte)
if not args.fast_test:
kp = preds.shape[1]
if kp == 2:
targ_names = ['vx', 'vy']
elif kp == 3:
targ_names = ['vx', 'vy', 'vz']
plt.figure('{}'.format(data), figsize=(16, 9))
plt.subplot2grid((kp, 2), (0, 0), rowspan=kp - 1)
plt.plot(pos_pred[:, 0], pos_pred[:, 1])
plt.plot(pos_gt[:, 0], pos_gt[:, 1])
plt.title(data)
plt.axis('equal')
plt.legend(['Predicted', 'Ground truth'])
plt.subplot2grid((kp, 2), (kp - 1, 0))
plt.plot(pos_cum_error)
plt.legend(['ATE:{:.3f}, RTE:{:.3f}'.format(ate_all[-1], rte_all[-1])])
for i in range(kp):
plt.subplot2grid((kp, 2), (i, 1))
plt.plot(ind, preds[:, i])
plt.plot(ind, vel[:, i])
plt.legend(['Predicted', 'Ground truth'])
plt.title('{}, error: {:.6f}'.format(targ_names[i], val_losses[i]))
plt.tight_layout()
if args.show_plot:
plt.show()
if args.out_dir is not None and osp.isdir(args.out_dir):
plt.savefig(osp.join(args.out_dir, '{}_{}.png'.format(data, args.type)))
if log_file is not None:
with open(log_file, 'a') as f:
log_line += '\n'
f.write(log_line)
plt.close('all')
ate_all = np.array(ate_all)
rte_all = np.array(rte_all)
measure = format_string('ATE', 'RTE', sep='\t')
values = format_string(np.mean(ate_all), np.mean(rte_all), sep='\t')
print(measure, '\n', values)
if log_file is not None:
with open(log_file, 'a') as f:
f.write(measure + '\n')
f.write(values)
if __name__ == '__main__':
"""
Run file with individual arguments or/and config file. If argument appears in both config file and args,
args is given precedence.
"""
default_config_file = osp.abspath(osp.join(osp.abspath(__file__), '../../config/temporal_model_defaults.json'))
import argparse
parser = argparse.ArgumentParser(description="Run seq2seq model in train/test mode [required]. Optional "
"configurations can be specified as --key [value..] pairs",
add_help=True)
parser.add_argument('--config', type=str, help='Configuration file [Default: {}]'.format(default_config_file),
default=default_config_file)
# common
parser.add_argument('--type', type=str, choices=['tcn', 'lstm', 'lstm_bi'], help='Model type', default='lstm')
parser.add_argument('--root_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug", help='Path to data directory')
parser.add_argument('--validation_dir', type=str, default="/data/INSData/ins_data_test/IDOL_SenseINS/building1/train_debug")
# parser.add_argument('--root_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug",
# help='Path to data directory')
# parser.add_argument('--validation_dir', type=str,
# default="/home/SENSETIME/xurunsen/project/ronin/RONIN/train_debug")
parser.add_argument('--cache_path', type=str, default=None)
parser.add_argument('--feature_sigma', type=float, help='Gaussian for smoothing features')
parser.add_argument('--target_sigma', type=float, help='Gaussian for smoothing target')
parser.add_argument('--window_size', type=int)
parser.add_argument('--step_size', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--num_workers', type=int)
parser.add_argument('--out_dir', type=str, default='../output/ronin_lstm/idol/2021.05.14/train_debug')
parser.add_argument('--device', type=str, help='Cuda device (e.g:- cuda:0) or cpu')
parser.add_argument('--dataset', type=str, choices=['ronin', 'ridi', 'sense'], default='sense')
parser.add_argument('--imu_freq', type=int, default=200)
# tcn
tcn_cmd = parser.add_argument_group('tcn', 'configuration for TCN')
tcn_cmd.add_argument('--kernel_size', type=int)
tcn_cmd.add_argument('--channels', type=str, help='Channel sizes for TCN layers (comma separated)')
# lstm
lstm_cmd = parser.add_argument_group('lstm', 'configuration for LSTM')
lstm_cmd.add_argument('--layers', type=int)
lstm_cmd.add_argument('--layer_size', type=int)
mode = parser.add_subparsers(title='mode', dest='mode', help='Operation: [train] train model, [test] evaluate model')
mode.required = False
# train
train_cmd = mode.add_parser('train')
train_cmd.add_argument('--train_list', type=str)
train_cmd.add_argument('--val_list', type=str)
train_cmd.add_argument('--continue_from', type=str, default=None)
train_cmd.add_argument('--epochs', type=int)
train_cmd.add_argument('--save_interval', type=int)
train_cmd.add_argument('--lr', '--learning_rate', type=float)
# test
test_cmd = mode.add_parser('test')
test_cmd.add_argument('--test_path', type=str, default=None)
test_cmd.add_argument('--test_list', type=str, default=None)
test_cmd.add_argument('--model_path', type=str, default='/home/SENSETIME/xurunsen/project/ronin/output/ronin_lstm/idol/2021.05.14/train_debug/checkpoints/checkpoint_714.pt')
test_cmd.add_argument('--fast_test', action='store_true')
test_cmd.add_argument('--show_plot', action='store_true')
'''
Extra arguments
Set True: use_scheduler,
quite (no output on stdout),
force_lr (force lr when a model is loaded from continue_from)
float: dropout,
max_ori_error (err. threshold for priority grv in degrees)
max_velocity_norm (filter outliers in training)
'''
args, unknown_args = parser.parse_known_args()
np.set_printoptions(formatter={'all': lambda x: '{:.6f}'.format(x)})
args, kwargs = load_config(default_config_file, args, unknown_args)
print(args, kwargs)
# add by runsen
# write list
if args.mode == "train":
if args.train_list is None:
WriteList(args.root_dir, "train_list.txt", GetFolderName(args.root_dir))
args.train_list = args.root_dir + "/train_list.txt"
if args.validation_dir is not None:
WriteList(args.validation_dir, "validation_list.txt", GetFolderName(args.validation_dir))
args.val_list = args.validation_dir + "/validation_list.txt"
elif args.mode == "test":
if args.test_list is None:
WriteList(args.root_dir, "test_list.txt", GetFolderName(args.root_dir))
args.test_list = args.root_dir + "/test_list.txt"
if args.mode == 'train':
train(args, **kwargs)
elif args.mode == 'test':
if not args.model_path:
raise ValueError("Model path required")
args.batch_size = 1
test(args, **kwargs)
| 42.472696 | 177 | 0.608783 | 994 | 0.039937 | 0 | 0 | 0 | 0 | 0 | 0 | 4,708 | 0.18916 |
b32183a4281f2e37bf180469868b2508b19cc91c | 343 | py | Python | Leetcode/2001-3000/2046. Sort Linked List Already Sorted Using Absolute Values/2046.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/2001-3000/2046. Sort Linked List Already Sorted Using Absolute Values/2046.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/2001-3000/2046. Sort Linked List Already Sorted Using Absolute Values/2046.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def sortLinkedList(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = head
curr = head.next
while curr:
if curr.val < 0:
prev.next = curr.next
curr.next = head
head = curr
curr = prev.next
else:
prev = curr
curr = curr.next
return head
| 20.176471 | 75 | 0.556851 | 342 | 0.997085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b3231dd9d1c15c0eaaa1376b68c6482d2fb00f29 | 104 | py | Python | constants.py | pvantonov/kodi-amvnews | 504eeb59dc0b2b9fe60a0aa7debbe35140dc4156 | [
"MIT"
] | 3 | 2019-05-14T21:41:18.000Z | 2020-08-06T13:25:45.000Z | constants.py | pvantonov/kodi-amvnews | 504eeb59dc0b2b9fe60a0aa7debbe35140dc4156 | [
"MIT"
] | null | null | null | constants.py | pvantonov/kodi-amvnews | 504eeb59dc0b2b9fe60a0aa7debbe35140dc4156 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Definition of constants.
"""
from xbmcswift2.plugin import Plugin
PLUGIN = Plugin()
| 13 | 36 | 0.721154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.442308 |
b3234231c15b5185c9a6303b3c3d18e7683e4414 | 1,021 | py | Python | python/chaosencrypt/test/discrete_pisarchik.py | nfejes/chaotic-image-encryption | c7b7bc8230e4b18227a27e5e19f2588766b3f53b | [
"MIT"
] | 3 | 2021-11-29T17:03:13.000Z | 2022-01-10T13:56:02.000Z | python/chaosencrypt/test/discrete_pisarchik.py | nfejes/chaotic-image-encryption | c7b7bc8230e4b18227a27e5e19f2588766b3f53b | [
"MIT"
] | null | null | null | python/chaosencrypt/test/discrete_pisarchik.py | nfejes/chaotic-image-encryption | c7b7bc8230e4b18227a27e5e19f2588766b3f53b | [
"MIT"
] | 4 | 2018-04-12T04:30:35.000Z | 2021-02-22T22:59:39.000Z | from scipy.misc import imread,imshow
import chaosencrypt as cenc
import numpy as np
from chaosencrypt.discrete_pisarchik import bitexpand,bitreduce
# Read image
print('Loading image...')
im_org = imread('../image.jpg')
# Downsample
im = im_org[::3,::3,:].copy()
# Key
key = {'a':3.8,'n':10,'r':3,'bits':32}
# Encrypt
print('Encrypting image (discrete pisarchik)...')
enc_im = cenc.encrypt(im,key,'discrete_pisarchik')
# Decrypt
print('Decrypting image (discrete pisarchik)...')
dec_im = cenc.decrypt(enc_im,key,'discrete_pisarchik')
# Diff
diff = np.array(np.abs((im*1.0) - (dec_im*1.0)), dtype='int')
maxdiff = np.max(diff)
print('Max diff:', maxdiff)
# Show
if maxdiff == 0:
diff_im = np.zeros(im.shape, dtype='uint8')
else:
diff_im = np.array((diff - np.min(diff)) / (np.max(diff) - np.min(diff))*255.99, dtype='uint8')
print('[ original | encrypted ]')
print('[ decrypted | abs(org-dec) ]')
imshow(np.concatenate(
[np.concatenate((im,bitreduce(enc_im)),1),
np.concatenate((dec_im,diff_im),1)]
,0))
| 23.744186 | 96 | 0.682664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.313418 |
b32342c6d36a233f788e221c35a9371d05ba8ac7 | 76 | py | Python | exercicios/Curso_Udemy_Python/sec3_aula66.py | IgoPereiraBarros/maratona-data-science-brasil | cc07476579134a2764f00d229d415657555dcdd1 | [
"MIT"
] | null | null | null | exercicios/Curso_Udemy_Python/sec3_aula66.py | IgoPereiraBarros/maratona-data-science-brasil | cc07476579134a2764f00d229d415657555dcdd1 | [
"MIT"
] | null | null | null | exercicios/Curso_Udemy_Python/sec3_aula66.py | IgoPereiraBarros/maratona-data-science-brasil | cc07476579134a2764f00d229d415657555dcdd1 | [
"MIT"
] | null | null | null | lista = ['python', 'c', 'c++', 'ruby', 'php']
print(sorted(lista, key=len)) | 25.333333 | 45 | 0.552632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.355263 |
b3240e96d503aecba5fa6738ed7f723f65cd5c2c | 77 | py | Python | src/fate_of_dice/system/call_of_cthulhu/__init__.py | bonczeq/FateOfDice | ce1704ac490f55bc600c0963958d4175104e85e5 | [
"MIT"
] | null | null | null | src/fate_of_dice/system/call_of_cthulhu/__init__.py | bonczeq/FateOfDice | ce1704ac490f55bc600c0963958d4175104e85e5 | [
"MIT"
] | null | null | null | src/fate_of_dice/system/call_of_cthulhu/__init__.py | bonczeq/FateOfDice | ce1704ac490f55bc600c0963958d4175104e85e5 | [
"MIT"
] | null | null | null | from .skill_check import check_skill, SkillCheckResult, SkillCheckResultType
| 38.5 | 76 | 0.883117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b327c58748a0a44c36ab794d83e81277cb12cd44 | 1,258 | py | Python | setup.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 3 | 2019-08-12T13:46:13.000Z | 2020-03-20T08:09:16.000Z | setup.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 7 | 2019-08-12T13:06:32.000Z | 2020-03-28T14:33:16.000Z | setup.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 2 | 2019-09-26T14:31:09.000Z | 2019-10-01T08:49:47.000Z | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='listthedocs',
version='2.0.1',
author='Alessandro Bacchini',
author_email='allebacco@gmail.com',
description='List your documentations',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/allebacco/listthedocs',
packages=find_packages(),
package_data={'listthedocs': [
'listthedocs/templates/*.*',
'listthedocs/static/styles/*.*',
]
},
include_package_data=True,
install_requires=[
'natsort',
'requests',
'attrs',
'python-dateutil',
'Flask-SQLAlchemy',
'Flask',
],
tests_require=[
'pytest'
],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
],
python_requires='>=3.5',
)
| 27.955556 | 57 | 0.601749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.483307 |
b328226b7a463946689852a8e54f45bd61fef3b4 | 14,450 | py | Python | tests/test_table_aggregation/test_schema_matcher.py | afcarl/corvid | e257074edeac1e8dce4a737b60e93a9bea37b6b9 | [
"Apache-2.0"
] | 1 | 2019-04-15T13:49:39.000Z | 2019-04-15T13:49:39.000Z | tests/test_table_aggregation/test_schema_matcher.py | afcarl/corvid | e257074edeac1e8dce4a737b60e93a9bea37b6b9 | [
"Apache-2.0"
] | null | null | null | tests/test_table_aggregation/test_schema_matcher.py | afcarl/corvid | e257074edeac1e8dce4a737b60e93a9bea37b6b9 | [
"Apache-2.0"
] | 1 | 2020-09-02T13:49:52.000Z | 2020-09-02T13:49:52.000Z | import unittest
from corvid.types.table import Token, Cell, Table
from corvid.table_aggregation.pairwise_mapping import PairwiseMapping
from corvid.table_aggregation.schema_matcher import SchemaMatcher, \
ColNameSchemaMatcher
class SchemaMatcherTest(unittest.TestCase):
def setUp(self):
self.table_source = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1)
], nrow=4, ncol=3)
def test_aggregate_tables(self):
schema_matcher = SchemaMatcher()
target_schema = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='not_copied')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='not_copied')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='not_copied')], rowspan=1, colspan=1)
], nrow=2, ncol=3)
pred_aggregate_table = schema_matcher.aggregate_tables(
pairwise_mappings=[
PairwiseMapping(self.table_source, target_schema,
score=-999, column_mappings=[(1, 2), (2, 1)])
],
target_schema=target_schema)
gold_aggregate_table = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1)
], nrow=4, ncol=3)
print(pred_aggregate_table)
print(gold_aggregate_table)
self.assertEquals(pred_aggregate_table, gold_aggregate_table)
def test_aggregate_tables_order(self):
# test correct ordering of 3+ tables
pass
class ColumnNameSchemaMatcher(unittest.TestCase):
def setUp(self):
self.table_source = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1)
], nrow=4, ncol=3)
self.table_less_header = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1)
], nrow=4, ncol=2)
self.table_more_header = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1)
], nrow=4, ncol=4)
self.table_permute_header = Table.create_from_cells([
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1)
], nrow=4, ncol=3)
self.table_no_header = Table.create_from_cells([
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1)
], nrow=3, ncol=3)
self.table_only_header = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1)
], nrow=1, ncol=3)
def test_map_tables(self):
target_schema_easy = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1)
], nrow=1, ncol=3)
target_schema_less = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1)
], nrow=1, ncol=2)
target_schema_more = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header0')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1)
], nrow=1, ncol=4)
target_schema_permuted = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1)
], nrow=1, ncol=3)
schema_matcher = ColNameSchemaMatcher()
self.assertListEqual(schema_matcher.map_tables(
tables=[self.table_source],
target_schema=target_schema_easy
),
[
PairwiseMapping(self.table_source,
target_schema_easy,
score=2.0,
column_mappings=[(1, 1), (2, 2)])
])
self.assertListEqual(schema_matcher.map_tables(
tables=[self.table_source],
target_schema=target_schema_permuted
),
[
PairwiseMapping(self.table_source,
target_schema_permuted,
score=2.0,
column_mappings=[(1, 2), (2, 1)])
])
self.assertListEqual(schema_matcher.map_tables(
tables=[self.table_source],
target_schema=target_schema_more
),
[
PairwiseMapping(self.table_source,
target_schema_more,
score=2.0,
column_mappings=[(1, 2), (2, 3)])
])
self.assertListEqual(schema_matcher.map_tables(
tables=[self.table_source],
target_schema=target_schema_less
),
[
PairwiseMapping(self.table_source,
target_schema_less,
score=1.0,
column_mappings=[(2, 1)])
])
self.assertListEqual(schema_matcher.map_tables(
tables=[self.table_source,
self.table_less_header,
self.table_more_header],
target_schema=target_schema_permuted
),
[
PairwiseMapping(self.table_source,
target_schema_permuted,
score=2.0,
column_mappings=[(1, 2), (2, 1)]),
PairwiseMapping(self.table_less_header,
target_schema_permuted,
score=1.0,
column_mappings=[(1, 1)]),
PairwiseMapping(self.table_more_header,
target_schema_permuted,
score=2.0,
column_mappings=[(1, 1), (2, 2)]),
])
class ColumnValueSchemaMatcher(unittest.TestCase):
def setUp(self):
self.table_permute_rows = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1)
], nrow=4, ncol=3)
self.table_extra_rows = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='z')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='5')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='6')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='w')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='7')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='8')], rowspan=1, colspan=1)
], nrow=5, ncol=3)
self.table_missing_rows = Table.create_from_cells(cells=[
Cell(tokens=[Token(text='subject')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='header2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='x')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='y')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='4')], rowspan=1, colspan=1)
], nrow=3, ncol=3)
| 50.173611 | 77 | 0.554879 | 14,210 | 0.983391 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.051696 |
b328bfacdff8f1f770e34a14c19b52872f8eed25 | 4,827 | py | Python | jiggle_version/parse_version/parse_dunder_version.py | matthewdeanmartin/jiggle_version | 7bb82b75321e007f9e9e6d52a7bc569a5fe41052 | [
"MIT"
] | 1 | 2019-02-23T16:13:56.000Z | 2019-02-23T16:13:56.000Z | jiggle_version/parse_version/parse_dunder_version.py | matthewdeanmartin/jiggle_version | 7bb82b75321e007f9e9e6d52a7bc569a5fe41052 | [
"MIT"
] | 17 | 2018-07-14T17:04:49.000Z | 2022-03-24T15:59:11.000Z | jiggle_version/parse_version/parse_dunder_version.py | matthewdeanmartin/jiggle_version | 7bb82b75321e007f9e9e6d52a7bc569a5fe41052 | [
"MIT"
] | 1 | 2019-12-01T02:18:36.000Z | 2019-12-01T02:18:36.000Z | """
A whole file dedicated to parsing __version__ in all it's weird possible ways
1) Only acts on source, no file handling.
2) some functions for *by line*
3) some functions for *by file*
4) Handle quotes
5) Handle whitespace
6) Handle version as tuple
"""
import ast
import re
from typing import Any, Optional, Tuple
version_tokens = [
"__version__", # canonical
"__VERSION__", # rare and wrong, but who am I to argue
"VERSION", # rare
"version",
"PACKAGE_VERSION",
]
def find_by_ast(line: str, version_token: str = "__version__") -> Optional[str]:
"""
Safer way to 'execute' python code to get a simple value
:param line:
:param version_token:
:return:
"""
if not line:
return ""
# clean up line.
simplified_line = simplify_line(line)
if simplified_line.startswith(version_token):
# noinspection PyBroadException
try:
tree: Any = ast.parse(simplified_line)
if hasattr(tree.body[0].value, "s"):
return str(tree.body[0].value.s)
if hasattr(tree.body[0].value, "elts"):
version_parts = []
for elt in tree.body[0].value.elts:
if hasattr(elt, "n"):
version_parts.append(str(elt.n))
else:
version_parts.append(str(elt.s))
return ".".join(version_parts)
if hasattr(tree.body[0].value, "n"):
return str(tree.body[0].value.n)
# print(tree)
except Exception:
# raise
return None
return None
def simplify_line(line: str, keep_comma: bool = False) -> str:
"""
Change ' to "
Remove tabs and spaces (assume no significant whitespace inside a version string!)
"""
if not line:
return ""
if "#" in line:
parts = line.split("#")
simplified_line = parts[0]
else:
simplified_line = line
simplified_line = (
simplified_line.replace(" ", "")
.replace("'", '"')
.replace("\t", "")
.replace("\n", "")
.replace("'''", '"') # version strings shouldn't be split across lines normally
.replace('"""', '"')
)
if not keep_comma:
simplified_line = simplified_line.strip(" ,")
return simplified_line
def find_version_by_regex(
file_source: str, version_token: str = "__version__"
) -> Optional[str]:
"""
Regex for dunder version
"""
if not file_source:
return None
version_match = re.search(
r"^" + version_token + r" = ['\"]([^'\"]*)['\"]", file_source, re.M
)
if version_match:
candidate = version_match.group(1)
if candidate in ("", "."): # yes, it will match to a .
return None
return candidate
return None
def find_version_by_string_lib(
line: str, version_token: str = "__version__"
) -> Optional[str]:
"""
No regex parsing. Or at least, mostly, not regex.
"""
if not line:
return None
simplified_line = simplify_line(line)
version = None
if simplified_line.strip().startswith(version_token):
if '"' not in simplified_line:
pass
# logger.debug("Weird version string, no double quote : " + unicode((full_path, line, simplified_line)))
else:
if "=" in simplified_line:
post_equals = simplified_line.split("=")[1]
if post_equals.startswith('"'):
parts = post_equals.split('"')
version = parts[0]
if not version:
version = None
return version
def validate_string(version: Optional[str]) -> Optional[str]:
"""
Trying to catch expressions here
:param version:
:return:
"""
if not version:
return None
for char in str(version):
if char in " \t()":
return None
# raise TypeError("Bad parse : " + version)
return version
def find_in_line(line: str) -> Tuple[Optional[str], Optional[str]]:
"""
Use three strategies to parse version string
"""
if not line:
return None, None
for version_token in version_tokens:
by_ast = find_by_ast(line, version_token)
by_ast = validate_string(by_ast)
if by_ast:
return by_ast, version_token
by_string_lib = find_version_by_string_lib(line, version_token)
by_string_lib = validate_string(by_string_lib)
if by_string_lib:
return by_string_lib, version_token
by_regex = find_version_by_regex(line, version_token)
by_regex = validate_string(by_regex)
if by_regex:
return by_regex, version_token
return None, None
| 27.582857 | 116 | 0.57862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,334 | 0.276362 |
b328d4402e25c702ac70b4a9a12fa0081138d156 | 1,025 | py | Python | guppy/__init__.py | EhsanKia/guppy3 | 87bb2e5b9e3d76c8b1c6698c40fdd395b1ee3cd3 | [
"MIT"
] | null | null | null | guppy/__init__.py | EhsanKia/guppy3 | 87bb2e5b9e3d76c8b1c6698c40fdd395b1ee3cd3 | [
"MIT"
] | null | null | null | guppy/__init__.py | EhsanKia/guppy3 | 87bb2e5b9e3d76c8b1c6698c40fdd395b1ee3cd3 | [
"MIT"
] | null | null | null | """\
Top level package of Guppy, a library and programming environment
currently providing in particular the Heapy subsystem, which supports
object and heap memory sizing, profiling and debugging.
What is exported is the following:
hpy() Create an object that provides a Heapy entry point.
Root() Create an object that provides a top level entry point.
"""
__all__ = ('hpy', 'Root')
from guppy.etc.Glue import Root # Get main Guppy entry point
from guppy import sets as sets
def hpy(ht=None):
"""\
Main entry point to the Heapy system.
Returns an object that provides a session context and will import
required modules on demand. Some commononly used methods are:
.heap() get a view of the current reachable heap
.iso(obj..) get information about specific objects
The optional argument, useful for debugging heapy itself, is:
ht an alternative hiding tag
"""
r = Root()
if ht is not None:
r.guppy.heapy.View._hiding_tag_ = ht
return r.guppy.heapy.Use
| 27.702703 | 69 | 0.718049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.778537 |
b32a64ff545e87a33e77785c0b313ddafd790edf | 38 | py | Python | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/removeChainedAssignmentStatementFirstTarget_after.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/removeChainedAssignmentStatementFirstTarget_after.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/quickFixes/PyRemoveUnusedLocalQuickFixTest/removeChainedAssignmentStatementFirstTarget_after.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | def f():
<caret>b = 0
return b | 12.666667 | 16 | 0.473684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b32a7ddcce779b4404e4e3304054b0b32c70d5ff | 11,115 | py | Python | constrained_attack.py | ameya005/Semantic_Adversarial_Attacks | 3459c0eafbad39baff3fec034dca0611f21989b7 | [
"MIT"
] | 9 | 2019-11-02T17:17:10.000Z | 2022-03-15T08:22:06.000Z | constrained_attack.py | ameya005/Semantic_Adversarial_Attacks | 3459c0eafbad39baff3fec034dca0611f21989b7 | [
"MIT"
] | 4 | 2020-03-24T17:34:42.000Z | 2021-09-08T01:17:35.000Z | constrained_attack.py | ameya005/Semantic_Adversarial_Attacks | 3459c0eafbad39baff3fec034dca0611f21989b7 | [
"MIT"
] | 1 | 2020-10-26T20:37:47.000Z | 2020-10-26T20:37:47.000Z | """
Attacking the model using a Fader Network
Note: We are basically searching for the interpolation values
which allow us to break a simple classifier.
"""
import argparse
import json
import logging
import os
from collections import OrderedDict
from datetime import datetime
import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
from torch import nn
from tqdm import tqdm
from FaderNetworks.src.model import AutoEncoder
from simple_classifier import Classifier, get_data_loader, restore_model
from utils import get_logger
from losses import attack_cw_l2, nontarget_logit_loss
_LEGACY_STATE_DICT_PATCH = OrderedDict([("enc_layers.1.1.num_batches_tracked", 1), ("enc_layers.2.1.num_batches_tracked", 1),
("enc_layers.3.1.num_batches_tracked",
1), ("enc_layers.4.1.num_batches_tracked", 1),
("enc_layers.5.1.num_batches_tracked",
1), ("enc_layers.6.1.num_batches_tracked", 1),
("dec_layers.0.1.num_batches_tracked",
1), ("dec_layers.1.1.num_batches_tracked", 1),
("dec_layers.2.1.num_batches_tracked",
1), ("dec_layers.3.1.num_batches_tracked", 1),
("dec_layers.4.1.num_batches_tracked", 1), ("dec_layers.5.1.num_batches_tracked", 1)])
# torch.nn.Module.dump_patches=True
class ModAlpha(nn.Module):
"""
Workaround class for constructing alpha
"""
def __init__(self, alpha1, alpha2, alpha3):
super(ModAlpha, self).__init__()
self.a1 = torch.tensor(alpha1, requires_grad=True)
self.a2 = torch.tensor(alpha2, requires_grad=True)
self.a3 = torch.tensor(alpha3, requires_grad=True)
def forward(self):
a1 = torch.stack([self.a1, torch.tensor(0.)])
a1_ = torch.stack([torch.tensor(0.), self.a1])
alpha1 = torch.tensor([1., 0.]) - a1 + a1_
a2 = torch.stack([self.a2, torch.tensor(0.)])
a2_ = torch.stack([torch.tensor(0.), self.a2])
alpha2 = torch.tensor([1., 0.]) - a2 + a2_
a3 = torch.stack([self.a3, torch.tensor(0.)])
a3_ = torch.stack([torch.tensor(0.), self.a3])
alpha3 = torch.tensor([1., 0.]) - a3 + a3_
return torch.cat([alpha1, alpha2, alpha3]).unsqueeze(0)
class Attacker(nn.Module):
"""
Defines a attack system which can be optimized.
Input passes through a pretrained fader network for modification.
Input -> (Fader network) -> (Target model) -> Classification label
Since Fader network requires that the attribute vector elements (alpha_i) be converted to (alpha_i, 1-alpha_i),
we use the Mod alpha class to handle this change while preserving gradients.
"""
def __init__(self, params):
super(Attacker, self).__init__()
self.params = params
self.target_model = Classifier(
(params.img_sz, params.img_sz, params.img_fm))
self.adv_generator = AutoEncoder(params)
self.attrib_gen = ModAlpha(0., 0., 0.)
def restore(self, legacy=False):
self.target_model.load_state_dict(torch.load(self.params.model))
if legacy:
old_model_state_dict = torch.load(self.params.fader)
old_model_state_dict.update(_LEGACY_STATE_DICT_PATCH)
model_state_d = old_model_state_dict
else:
model_state_d = torch.load(self.params.fader)
self.adv_generator.load_state_dict(model_state_d, strict=False)
def forward(self, x, attrib_vector=None):
self.attrib_vec = self.attrib_gen()
l_z = self.adv_generator.encode(x)
recon = self.adv_generator.decode(l_z, self.attrib_vec)[-1]
cl_label = self.target_model(recon)
return recon, cl_label
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-m', '--model', help='Path to the model to be attacked. As of now, we will use models trained with the simple_classifier script', required=True)
parser.add_argument(
'-f', '--fader', help='Path to fader networks. Use one of the many Fader networks trained', required=True)
parser.add_argument(
'-o', '--outdir', help='Output directory', default='out', required=False)
parser.add_argument('-d', '--data_dir',
help='Path to data directory', required=True)
parser.add_argument('-a', '--attrib_path',
help='Path to attrib file', required=True)
parser.add_argument('-t', '--type', help='Attack type: \n\t cw: Carlini-Wagner L2 attack \n\t fn: Fader Network based attack',
choices=['cw', 'fn'], default='fn')
return parser
def attack_linearly(img, model, attrib_tuple, device):
# Just run a linear search of alpha
model.eval()
img = img.to(device)
attrib = [1-attrib_tuple[0], attrib_tuple[0]]
attrib_step = 1e-3
recon_imgs = []
true_label = torch.argmax(model.target_model(img))
breaks = []
pred_values = []
cnt = 0
alphas = np.arange(attrib_tuple[0], attrib_tuple[1], attrib_step)
print(alphas)
recons = []
b_recons = []
for alpha in alphas:
attrib = torch.tensor([1-alpha, alpha]).unsqueeze(0).to(device)
recon, pred = model(img, attrib)
if torch.argmax(pred) != true_label:
breaks.append(1)
recons.append(np.hstack([img[0].detach().cpu().numpy().transpose(
(1, 2, 0)), recon[0].detach().cpu().numpy().transpose((1, 2, 0))]))
else:
breaks.append(0)
b_recons.append(np.hstack([img[0].detach().cpu().numpy().transpose(
(1, 2, 0)), recon[0].detach().cpu().numpy().transpose((1, 2, 0))]))
cnt += 1
if len(recons) != 0:
recon2 = np.vstack(recons[:5])
else:
recon2 = np.vstack(b_recons[-5:])
recon3 = np.vstack(b_recons[:5])
return breaks, pred_values, recon2, recon3
def attack_binarily(img, model, attrib_tuple, device, logger):
""" Currently assuming that the alpha value of the attribute is positive.
# Binary search only works for a single attribute
# Binary search for alpha
"""
model.eval()
img = img.to(device)
true_label = torch.argmax(model.target_model(img))
attrib_left = torch.tensor(
[1-attrib_tuple[0], attrib_tuple[0]]).unsqueeze(0).to(device)
attrib_right = torch.tensor(
[1-attrib_tuple[1], attrib_tuple[1]]).unsqueeze(0).to(device)
recon_left, pred_recon_left = model(img, attrib_left)
recon_right, pred_recon_right = model(img, attrib_right)
pred_recon_left = torch.argmax(pred_recon_left)
pred_recon_right = torch.argmax(pred_recon_right)
if pred_recon_left == true_label and pred_recon_right == true_label:
logger.info('Higher max value required')
attrib_left = attrib_right
attrib_right = attrib_right*2
elif pred_recon_left != true_label and pred_recon_right != true_label:
logger.info('Lower min value required')
attrib_right = attrib_left
attrib_left = attrib_left/2.0
perm_attrib = attrib_right[0, 1]
attrib = attrib_left + (attrib_right - attrib_left)/2.0
cnt = 0
max_iter = 10000
while np.abs(attrib_left[0, 1].cpu().numpy() - attrib_right[0, 1].cpu().numpy()) > 1e-3:
logger.info('Num_iter:{}, alpha:{}'.format(
cnt, attrib[0, 1].cpu().numpy()))
if cnt > max_iter:
break
recon_attrib, pred_attrib = model(img, attrib)
label_attrib = torch.argmax(pred_attrib)
if label_attrib != true_label:
logger.info('Broken at least once, {}, {}'.format(
true_label, label_attrib))
attrib_right = attrib
if np.abs(attrib[0, 1]) - np.abs(perm_attrib) <= 0:
perm_attrib = attrib[0, 1]
#attrib_left = attrib
else:
attrib_left = attrib
attrib = attrib_left + (attrib_right - attrib_left)/2.0
cnt += 1
recon_attrib, pred_attrib = model(img, torch.tensor(
[1-perm_attrib, perm_attrib]).unsqueeze(0).to(device))
label_attrib = torch.argmax(pred_attrib)
logger.info('True_label:{}, pred_label:{}'.format(
true_label, label_attrib))
return perm_attrib, recon_attrib, true_label, label_attrib
def main():
parser = build_parser()
args = parser.parse_args()
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
args.logger = get_logger(args.outdir)
args.gen = torch.load(args.fader)
# Issue: Since Fader Network guys store the entrie model, it assumes certain paths.
# We try to fix it by saving only the weights (state_dict) using mod_fader_network.py
# Therefore, we need to reconstruct the model using the parameters.
args.img_sz = 256
args.img_fm = 3
args.init_fm = 32
args.max_fm = 512
args.n_layers = 7
args.n_attr = 6
args.n_skip = 0
args.dec_dropout = 0.0
args.deconv_method = 'convtranspose'
args.attr = [('Eyeglasses', 2)]
args.instance_norm = False
args.batch_size = 1
args.train_attribute = 'Male'
# Minor bug with storing the model is creating this issue. Anyway, there is not much speedup with cuda
device = torch.device('cpu')
# Data Loader
loader = get_data_loader(args, train='test', shuffle=False)
cnt = 0
f = open(os.path.join(args.outdir, 'vals.csv'), 'w')
full_logits = []
for input, label in tqdm(loader, total=len(loader)):
attacker = Attacker(args).to(device)
attacker.restore()
if cnt > 500:
break
if args.type == 'fn':
success, out_img, alpha, orig_logits, logits, loss, logit_array = attack_optim(
input, attacker, [(-1.5, 1.75), (-2, 3), (-6, 7)], device, args.logger)
elif args.type == 'cw':
success, out_img, alpha, orig_logits, logits, loss, logit_array = attack_cw_l2(
input, attacker, 0.5, device, args.logger)
print('Loss:{}'.format(loss))
if success:
plt.imshow(out_img)
plt.title('alpha:{}'.format(alpha))
plt.savefig(os.path.join(
args.outdir, '{}_broken.png'.format(str(cnt))))
else:
plt.imshow(out_img)
plt.title('unbroken_alpha:{}'.format(alpha))
plt.savefig(os.path.join(
args.outdir, '{}_unbroken.png'.format(str(cnt))))
np.save(os.path.join(args.outdir, '{}.npy'.format(str(cnt))), out_img)
print(loss.shape)
out_dict = {'success': success, 'orig_logits': orig_logits[0].tolist(
), 'logits': logits[0].tolist(), 'alpha': alpha[0].tolist(), 'loss': loss.tolist(), 'logit_array': logit_array}
outstr = json.dumps(out_dict)
f.write(outstr+'\n')
cnt += 1
f.close()
if __name__ == '__main__':
main()
| 40.565693 | 153 | 0.619793 | 2,383 | 0.214395 | 0 | 0 | 0 | 0 | 0 | 0 | 2,427 | 0.218354 |
b32a9291641b2be9820f1e8aa540ae73a72b18c4 | 290 | py | Python | Ejercicio5.py | mariagarciau/introduccion-algoritmica | 1543005a290daca8969f8ca3364f46d8149f8434 | [
"Apache-2.0"
] | null | null | null | Ejercicio5.py | mariagarciau/introduccion-algoritmica | 1543005a290daca8969f8ca3364f46d8149f8434 | [
"Apache-2.0"
] | null | null | null | Ejercicio5.py | mariagarciau/introduccion-algoritmica | 1543005a290daca8969f8ca3364f46d8149f8434 | [
"Apache-2.0"
] | null | null | null | def descuento(niños=int(input("Cuantos niños son "))):
if niños==2:
descuentoTotal=10
elif niños==3:
descuentoTotal=15
elif niños==4:
descuentoTotal=18
elif niños>=5:
descuentoTotal=18+(niños-4)*1
return print(descuentoTotal)
descuento()
| 24.166667 | 54 | 0.631034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.070707 |
b32bac57ca8d636728e4c8f5baeac6b0cfea964f | 3,857 | py | Python | experiments/graph/data_loader.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 6 | 2020-08-28T22:44:07.000Z | 2022-01-24T20:53:00.000Z | experiments/graph/data_loader.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 1 | 2021-02-20T09:38:46.000Z | 2021-02-20T09:38:46.000Z | experiments/graph/data_loader.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 2 | 2021-10-04T09:25:06.000Z | 2022-02-09T09:09:09.000Z | import logging
from torch import multiprocessing
from typing import Callable
import gin
from torch.utils.data import random_split, Subset, DataLoader
from ariadne.graph_net.dataset import GraphBatchBucketSampler
from ariadne_v2 import jit_cacher
from ariadne_v2.data_loader import BaseDataLoader
from experiments.graph.dataset import TorchGraphDataset
LOGGER = logging.getLogger('ariadne.dataloader')
@gin.configurable
class GraphDataLoader_NEW(BaseDataLoader):
def __init__(self,
batch_size: int,
dataset_cls: TorchGraphDataset.__class__,
collate_fn: Callable,
subset_cls: Subset.__class__ = Subset,
with_random=True):
# HACK: determine how to transfer mutex with the pytorch multiprocessing between the process
lock = multiprocessing.Lock()
jit_cacher.init_locks(lock)
# END HACK
super(GraphDataLoader_NEW, self).__init__(batch_size)
self.subset_cls = subset_cls
self.dataset = dataset_cls()
with jit_cacher.instance() as cacher:
self.dataset.connect(cacher,
self.dataset.dataset_name,
drop_old=False, mode='r')
assert len(self.dataset) > 0, f"empty dataset {self.dataset.dataset_name}"
n_train = int(len(self.dataset) * 0.8)
n_valid = len(self.dataset) - n_train
LOGGER.info(f"Initializing dataloader with {self.dataset.__class__.__name__}. Len = {len(self.dataset)}.")
LOGGER.info(f"Num train samples: {n_train}")
LOGGER.info(f"Num valid samples: {n_valid}")
if with_random:
self.train_data, self.val_data = random_split(self.dataset, [n_train, n_valid])
self.train_data.__class__ = self.subset_cls
self.val_data.__class__ = self.subset_cls
else:
self.train_data = self.subset_cls(self.dataset, range(0, n_train))
self.val_data = self.subset_cls(self.dataset, range(n_train, n_train + n_valid))
self.collate_fn = collate_fn
def __del__(self):
if self.dataset:
self.dataset.disconnect()
def get_val_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.val_data,
batch_size=self.batch_size,
collate_fn=self.collate_fn)
def get_train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_data,
batch_size=self.batch_size,
collate_fn=self.collate_fn)
@gin.configurable
class GraphsDataLoader_Sampler_New(GraphDataLoader_NEW):
def __init__(self,
dataset_cls: TorchGraphDataset.__class__,
collate_fn: Callable,
subset_cls: Subset.__class__ = Subset,
with_random=True):
super(GraphsDataLoader_Sampler_New, self).__init__(batch_size=1,
dataset_cls=dataset_cls,
collate_fn=collate_fn,
with_random=with_random,
subset_cls=subset_cls)
self.train_sampler = GraphBatchBucketSampler(self.train_data)
self.val_sampler = GraphBatchBucketSampler(self.val_data)
def get_val_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.val_data,
batch_size=1,
collate_fn=self.collate_fn,
batch_sampler= self.val_sampler)
def get_train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_data,
batch_size=1,
collate_fn=self.collate_fn,
batch_sampler= self.train_sampler)
| 37.813725 | 114 | 0.618356 | 3,409 | 0.883848 | 0 | 0 | 3,445 | 0.893181 | 0 | 0 | 324 | 0.084003 |
b32c5cd5fbfe4f285fdd52d173c46b83f38134cf | 3,403 | py | Python | murano-7.0.0/murano/policy/modify/actions/action_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 91 | 2015-04-26T16:05:03.000Z | 2021-12-28T07:12:33.000Z | murano-7.0.0/murano/policy/modify/actions/action_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | murano-7.0.0/murano/policy/modify/actions/action_manager.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 61 | 2015-05-19T22:56:34.000Z | 2021-06-01T05:38:53.000Z | # Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from stevedore import extension
import yaml
LOG = logging.getLogger(__name__)
class ModifyActionManager(object):
"""Manages modify actions
The manager encapsulates extensible plugin mechanism for
modify actions loading. Provides ability to apply action on
given object model based on action specification retrieved
from congress
"""
def __init__(self):
self._cache = {}
def load_action(self, name):
"""Loads action by its name
Loaded actions are cached. Plugin mechanism is based on
distutils entry points. Entry point namespace is
'murano_policy_modify_actions'
:param name: action name
:return:
"""
if name in self._cache:
return self._cache[name]
action = self._load_action(name)
self._cache[name] = action
return action
@staticmethod
def _load_action(name):
mgr = extension.ExtensionManager(
namespace='murano_policy_modify_actions',
invoke_on_load=False
)
for ext in mgr.extensions:
if name == ext.name:
target = ext.entry_point_target.replace(':', '.')
return importutils.import_class(target)
raise ValueError('No such action definition: {action_name}'
.format(action_name=name))
def apply_action(self, obj, action_spec):
"""Apply action on given model
Parse action and its parameters from action specification
retrieved from congress. Action specification is YAML format.
E.g. remove-object: {object_id: abc123}")
Action names are keys in top-level dictionary. Values are
dictionaries containing key/value parameters of the action
:param obj: subject of modification
:param action_spec: YAML action spec
:raise ValueError: in case of malformed action spec
"""
actions = yaml.safe_load(action_spec)
if not isinstance(actions, dict):
raise ValueError('Expected action spec format is '
'"action-name: {{p1: v1, ...}}" '
'but got "{action_spec}"'
.format(action_spec=action_spec))
for name, kwargs in actions.items():
LOG.debug('Executing action {name}, params {params}'
.format(name=name, params=kwargs))
# loads action class
action_class = self.load_action(name)
# creates action instance
action_instance = action_class(**kwargs)
# apply action on object model
action_instance.modify(obj)
| 35.821053 | 78 | 0.640317 | 2,612 | 0.767558 | 0 | 0 | 490 | 0.143991 | 0 | 0 | 1,914 | 0.562445 |
b32f8859e3ed1181b4a6089919d01fcaa1f90e87 | 17,034 | py | Python | wsgi_basic/common/wsgi.py | QthCN/wsgi-basic | e080304aeaa9922fc9367dbb5cb57a7ab9494b38 | [
"Apache-2.0"
] | null | null | null | wsgi_basic/common/wsgi.py | QthCN/wsgi-basic | e080304aeaa9922fc9367dbb5cb57a7ab9494b38 | [
"Apache-2.0"
] | null | null | null | wsgi_basic/common/wsgi.py | QthCN/wsgi-basic | e080304aeaa9922fc9367dbb5cb57a7ab9494b38 | [
"Apache-2.0"
] | null | null | null | import copy
import itertools
import wsgiref.util
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
import routes.middleware
import six
import webob.dec
import webob.exc
from wsgi_basic import exception
from wsgi_basic.common import authorization
from wsgi_basic.common import dependency
from wsgi_basic.common import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'wsgi_basic.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'wsgi_basic.params'
JSON_ENCODE_CONTENT_TYPES = set(['application/json',
'application/json-home'])
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = wsgi_basic.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import wsgi_basic.fancy_api
wsgi_basic.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires("token_api", "policy_api")
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(req.params.items())
context['headers'] = dict(req.headers.items())
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
# the webob.Request instance is modified in order to use this
# scheme instead of the one defined by API, the call to
# webob.Request.relative_url() will return a URL with the correct
# scheme.
req.environ['wsgi.url_scheme'] = scheme
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. the complete
# set is not yet know.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
LOG.info('%(req_method)s %(uri)s', {
'req_method': req.environ['REQUEST_METHOD'].upper(),
'uri': wsgiref.util.request_uri(req.environ),
})
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, context=context)
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
context=context)
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
context=context)
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req.environ['REQUEST_METHOD'])
def _get_response_code(self, req):
code = None
return code
def _normalize_arg(self, arg):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return {self._normalize_arg(k): v for (k, v) in d.items()}
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = '%s field is required and cannot be empty' % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = '%s field(s) cannot be empty' % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
token_ref = utils.get_token_ref(context)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
substitutions = dict(
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = wsgi_basic.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import wsgi_basic.analytics
wsgi_basic.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, request=request)
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
request=request)
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
request=request)
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = 'The resource could not be found.'
return render_exception(exception.NotFound(msg),
request=req)
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method and method.upper() == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in stored_headers.items():
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = str(error_message)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
return render_response(status=(error.code, error.title),
body=body,
headers=headers) | 35.561587 | 79 | 0.619702 | 13,737 | 0.806446 | 0 | 0 | 9,657 | 0.566925 | 0 | 0 | 7,840 | 0.460256 |
b33225ce37303284a53f4130678fc85ff6b91c0c | 4,410 | py | Python | data/nyu-depth-v2/extract.py | fferflo/tf-semseg | b392cac2e8cca5389e7a099e8f7a87d72f4a70fc | [
"MIT"
] | null | null | null | data/nyu-depth-v2/extract.py | fferflo/tf-semseg | b392cac2e8cca5389e7a099e8f7a87d72f4a70fc | [
"MIT"
] | null | null | null | data/nyu-depth-v2/extract.py | fferflo/tf-semseg | b392cac2e8cca5389e7a099e8f7a87d72f4a70fc | [
"MIT"
] | null | null | null | import h5py, imageio, argparse, os
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--nyu", type=str, required=True, help="Path to nyu_depth_v2_labeled.mat file")
args = parser.parse_args()
map_894_to_40 = np.array([0, 40, 40, 3, 22, 5, 40, 12, 38, 40, 40, 2, 39, 40, 40, 26, 40, 24, 40, 7, 40, 1, 40, 40, 34, 38, 29, 40, 8, 40, 40, 40, 40, 38, 40, 40, 14, 40, 38, 40, 40, 40, 15, 39, 40, 30, 40, 40, 39, 40, 39, 38, 40, 38, 40, 37, 40, 38, 38, 9, 40, 40, 38, 40, 11, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 13, 40, 40, 6, 40, 23, 40, 39, 10, 16, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 39, 40, 40, 40, 40, 39, 38, 40, 40, 40, 40, 40, 40, 18, 40, 40, 19, 28, 33, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 27, 36, 40, 40, 40, 40, 21, 40, 20, 35, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 4, 32, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 17, 40, 40, 25, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 38, 40, 40, 39, 40, 39, 40, 38, 39, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 38, 40, 40, 38, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 38, 40, 40, 39, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 31, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 38, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 39, 40, 40, 39, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 38, 39, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 38, 40, 39, 40, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39, 40, 40, 39, 39, 40, 40, 40, 40, 38, 40, 40, 38, 39, 39, 40, 39, 40, 39, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 38, 40, 39, 40, 40, 40, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40, 39, 39, 40, 40, 38, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 40, 40, 40, 40, 40, 40, 40, 39, 38, 39, 40, 38, 39, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 38, 40, 40, 39, 40, 40, 40, 39, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 40, 40, 38, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 38, 38, 40, 40, 40, 38, 40, 40, 40, 38, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 38, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 38, 38, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 39, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39, 39, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 38, 40, 39, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40])
# Load data
file = h5py.File(args.nyu, mode="r")
color = np.transpose(np.asarray(file["images"][0]), (2, 1, 0))
depth = np.transpose(np.asarray(file["depths"][0]), (1, 0))
labels = np.transpose(np.asarray(file["labels"][0]), (1, 0))
# Process data
depth = (depth * 10000).astype("uint16")
labels = map_894_to_40[labels].astype("uint8")
# Save data
path = os.path.dirname(os.path.abspath(__file__))
imageio.imwrite(os.path.join(path, "color.png"), color)
imageio.imwrite(os.path.join(path, "depth.png"), depth)
imageio.imwrite(os.path.join(path, "labels.png"), labels)
| 176.4 | 3,597 | 0.535147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.036508 |
b33276f850ff627d90b9b5474ba0f791c30a646f | 3,579 | py | Python | OLD/datasets/cityscapes/legacy/1_downscale_images.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | 2 | 2017-11-17T06:55:44.000Z | 2019-06-11T13:07:05.000Z | OLD/datasets/cityscapes/legacy/1_downscale_images.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | OLD/datasets/cityscapes/legacy/1_downscale_images.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../..')
import os
import pickle
import numpy as np
import tensorflow as tf
#from pgmagick import Image
import skimage as ski
import skimage.data, skimage.transform
from tqdm import trange
from cityscapes_info import class_info, class_color_map
from datasets.dataset_helper import convert_colors_to_indices
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
flags = tf.app.flags
flags.DEFINE_string('data_dir',
'/home/kivan/datasets/Cityscapes/ppm/rgb/', 'Dataset dir')
#'/home/kivan/datasets/Cityscapes/leftImg8bit_trainvaltest/leftImg8bit/', 'Dataset dir')
flags.DEFINE_string('gt_dir',
'/home/kivan/datasets/Cityscapes/ppm/gt/', 'Dataset dir')
#'/home/kivan/datasets/Cityscapes/gtFine_trainvaltest/gtFine_19/', 'Dataset dir')
flags.DEFINE_integer('img_width', 1024, '')
flags.DEFINE_integer('img_height', 432, '')
flags.DEFINE_string('save_dir',
'/home/kivan/datasets/Cityscapes/tensorflow/' + str(FLAGS.img_width) +
'x' + str(FLAGS.img_height) + '/', '')
flags.DEFINE_integer('cx_start', 0, '')
flags.DEFINE_integer('cx_end', 2048, '')
flags.DEFINE_integer('cy_start', 30, '')
flags.DEFINE_integer('cy_end', 894, '')
FLAGS = flags.FLAGS
def prepare_dataset(name):
#rgb_means = [123.68, 116.779, 103.939]
print('Preparing ' + name)
root_dir = FLAGS.data_dir + name + '/'
gt_dir = FLAGS.gt_dir + name + '/'
cities = next(os.walk(root_dir))[1]
save_dir = FLAGS.save_dir + name + '/'
print('Save dir = ', save_dir)
os.makedirs(save_dir, exist_ok=True)
#print('Writing', filename)
cx_start = FLAGS.cx_start
cx_end = FLAGS.cx_end
cy_start = FLAGS.cy_start
cy_end = FLAGS.cy_end
for city in cities:
print(city)
img_list = next(os.walk(root_dir + city))[2]
#for img_name in img_list:
rgb_save_dir = FLAGS.save_dir + '/rgb/' + name + '/' + city + '/'
gt_save_dir = FLAGS.save_dir + '/gt_img/' + name + '/' + city + '/'
gt_data_save_dir = FLAGS.save_dir + '/gt_data/' + name + '/' + city + '/'
os.makedirs(rgb_save_dir, exist_ok=True)
os.makedirs(gt_save_dir, exist_ok=True)
os.makedirs(gt_data_save_dir, exist_ok=True)
for i in trange(len(img_list)):
img_name = img_list[i]
rgb_path = root_dir + city + '/' + img_name
rgb = ski.data.load(rgb_path)
rgb = rgb[cy_start:cy_end,cx_start:cx_end,:]
rgb = ski.transform.resize(rgb, (FLAGS.img_height, FLAGS.img_width), order=3)
ski.io.imsave(rgb_save_dir + img_name, rgb)
gt_path = gt_dir + city + '/' + img_name
gt_rgb = ski.data.load(gt_path)
#dump_nparray(array, filename)
gt_rgb = gt_rgb[cy_start:cy_end,cx_start:cx_end,:]
gt_rgb = ski.transform.resize(gt_rgb, (FLAGS.img_height, FLAGS.img_width),
order=0, preserve_range=True)
gt_rgb = gt_rgb.astype(np.uint8)
#print(gt_rgb)
ski.io.imsave(gt_save_dir + img_name, gt_rgb)
#gt_rgb = ski.util.img_as_ubyte(gt_rgb)
labels, label_weights, num_labels, class_hist = convert_colors_to_indices(
gt_rgb, class_color_map)
pickle_filepath = gt_data_save_dir + img_name[:-4] + '.pickle'
with open(pickle_filepath, 'wb') as f:
pickle.dump([labels, label_weights, num_labels, class_hist], f)
def main(argv):
crop_width = FLAGS.cx_end - FLAGS.cx_start
crop_height = FLAGS.cy_end - FLAGS.cy_start
print('Crop ratio = ', crop_width / crop_height)
print('Resize ratio = ', FLAGS.img_width / FLAGS.img_height)
prepare_dataset('train')
prepare_dataset('val')
if __name__ == '__main__':
tf.app.run()
| 36.520408 | 92 | 0.685666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 817 | 0.228276 |
b332bec0aafd1a4e7888591cd080fe917d2d4267 | 4,701 | py | Python | applications/init/modules/Paginater.py | himelpdas/Practice-Genie | b87ea53a9476a5ad36e93e8fb166c6c38d6f7259 | [
"BSD-3-Clause"
] | null | null | null | applications/init/modules/Paginater.py | himelpdas/Practice-Genie | b87ea53a9476a5ad36e93e8fb166c6c38d6f7259 | [
"BSD-3-Clause"
] | null | null | null | applications/init/modules/Paginater.py | himelpdas/Practice-Genie | b87ea53a9476a5ad36e93e8fb166c6c38d6f7259 | [
"BSD-3-Clause"
] | null | null | null | import math
from gluon import URL, SPAN
class Paginater():
"""
Adapted from http://web2py.com/books/default/chapter/29/14/other-recipes#Pagination
"""
item_limits = [6, 12, 25, 50, 100]
def __init__(self, request, query_set, db):
self._request = request
self._query_set = query_set
self._db = db
self._old_vars = filter(lambda x: "_" not in x[0], request.vars.items()) # get rid of crap like _formkey
self.page = None
self.items_per_page = None
self.limitby = None
self.orderby = None
self.order_table = None
self.order_field = None
self.order_links = {}
self.order_reverse = False
self.item_count = None
self.items_per_page_urls = []
self.pages = None
self.page_urls = []
self.has_next = None
self.has_prev = None
self.next_page = None
self.next_url = None
self.prev_page = None
self.prev_url = None
self.set_ordering()
self.set_paging()
def set_ordering(self):
order_string = self._request.vars["orderby"] or (self._request.args[0] + ".id")
self.order_reverse = "~" in order_string
order_string = order_string.strip("~")
self.order_table, self.order_field = order_string.split(".")
if self.order_reverse:
self.orderby = ~self._db[self.order_table][self.order_field]
else:
self.orderby = self._db[self.order_table][self.order_field]
if 'first_name' == self.order_field:
self.orderby = self.orderby|self._db[self.order_table]["last_name"]
elif "last_name" == self.order_field: # if 2 people have the same last name, then sort by first name
self.orderby = self.orderby|self._db[self.order_table]["first_name"]
for table_name in self._db.tables:
for table_field in self._db[table_name].fields:
table_field_is_order_field = (table_name == self.order_table) & (self.order_field == table_field)
self.order_links.setdefault(table_name, {}).setdefault(table_field, {}).update({ # http://stackoverflow.com/questions/12905999/python-dict-how-to-create-key-or-append-an-element-to-key
"url": URL(args=self._request.args, vars=dict(self._old_vars + {'orderby': ("" if (not table_field_is_order_field or self.order_reverse) else "~") + "%s.%s"%(table_name, table_field)}.items())), # flipping order
"arrow": SPAN(_class="text-info glyphicon glyphicon-arrow-" + ("down" if self.order_reverse else "up")) if table_field_is_order_field else ""
})
def set_paging(self):
self.page = (int(self._request.vars["page"] or 0))
self.items_per_page = int(self._request.vars["per"] if int(self._request.vars["per"] or -1) in Paginater.item_limits else Paginater.item_limits[1])
self.limitby=(self.page*self.items_per_page,(self.page+1)*self.items_per_page) # 1*5 <-> 2*5+1
for each in self.item_limits:
href = URL(args=self._request.args, vars=dict(self._old_vars + {'per': each, 'page': 0}.items()))
self.items_per_page_urls.append(dict(href=href, number=each, current=each == self.items_per_page))
self.item_count = self._query_set.count()
division = self.item_count / float(self.items_per_page)
self.pages = int(math.floor(division)) # don't need a new page for not full pages ie. 11/12
if division % 1 == 0: # fixed - there may be a bug with left inner join as not all left from (db.table>0) will show up if right is missing, use left outer join instead.
self.pages -= 1 # don't need a new page for a full page ie. 12/12 items
for each in xrange(self.pages + 1): # xrange doesn't include last
href = URL(args=self._request.args, vars=dict(self._old_vars + {'page':each}.items()))
self.page_urls.append(dict(href=href, number=each, current=each == self.page))
self.has_next = self.page < self.pages # need a new page for overfull page ie. 13/12 items, need page for 1/12
self.has_prev = bool(self.page)
self.next_page = None if not self.has_next else self.page+1 # href='{{=URL(vars=dict(page=paginater.next_page))}}'
self.next_url = URL(args=self._request.args, vars=dict(self._old_vars + {'page':self.next_page}.items()))
self.prev_page = None if not self.has_prev else self.page-1
self.prev_url = URL(args=self._request.args, vars=dict(self._old_vars + {'page':self.prev_page}.items()))
| 52.820225 | 233 | 0.629866 | 4,655 | 0.990215 | 0 | 0 | 0 | 0 | 0 | 0 | 946 | 0.201234 |
b33442383156f3c0acad7de6e549de783efa735c | 836 | py | Python | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | application/file/routes.py | h4k1m0u/flask-webapp | d095449a3d324c50de745f7ac5f84b6fa2cb08b4 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, url_for, redirect
from flask import current_app as app
from .forms import UploadForm
from werkzeug.utils import secure_filename
import os
file_bp = Blueprint('file', __name__,
template_folder='templates', static_folder='static')
@file_bp.route('/upload', methods=['GET', 'POST'])
def upload():
form = UploadForm()
if form.validate_on_submit():
# get file from form
f = form.photo.data
filename = secure_filename(f.filename)
# save file inside instance folder
f.save(os.path.join(app.instance_path, 'photos', filename))
return redirect(url_for('.success'))
return render_template('file/upload.html', form=form)
@file_bp.route('/success')
def success():
return render_template('file/success.html')
| 26.125 | 72 | 0.688995 | 0 | 0 | 0 | 0 | 533 | 0.63756 | 0 | 0 | 164 | 0.196172 |
b3351bdda16da9303d9cddacd8c6b55abf47ec70 | 5,060 | py | Python | src/operator/transformer/align.py | k-harada/abstraction-and-reasoning-challenge | 42828a3f39be3c77bd0f4e1648b2cb1d9d15ed95 | [
"MIT"
] | 3 | 2020-05-28T03:46:23.000Z | 2020-05-29T22:53:32.000Z | src/operator/transformer/align.py | k-harada/abstraction-and-reasoning-challenge | 42828a3f39be3c77bd0f4e1648b2cb1d9d15ed95 | [
"MIT"
] | null | null | null | src/operator/transformer/align.py | k-harada/abstraction-and-reasoning-challenge | 42828a3f39be3c77bd0f4e1648b2cb1d9d15ed95 | [
"MIT"
] | 2 | 2020-05-29T07:22:22.000Z | 2020-05-29T22:53:36.000Z | from heapq import heappop, heappush
import numpy as np
from src.data import Problem, Case, Matter
from src.operator.mapper.map_connect import MapConnect
def align_xy(x0_arr: np.array, y0_arr: np.array, x1_arr: np.array, y1_arr: np.array, c_arr: np.array) -> np.array:
n = x0_arr.shape[0]
# x
heap_x = []
for i in range(n):
heappush(heap_x, (x0_arr[i], x1_arr[i], i))
x1_temp = 0
x_list_all = []
x_list_now = []
while len(heap_x):
x0, x1, i = heappop(heap_x)
if x0 >= x1_temp:
# new group
x1_temp = x1
if len(x_list_now) > 0:
x_list_all.append(x_list_now)
x_list_now = [i]
else:
x1_temp = max(x1_temp, x1)
x_list_now.append(i)
x_list_all.append(x_list_now)
# y
heap_y = []
for i in range(n):
heappush(heap_y, (y0_arr[i], y1_arr[i], i))
y1_temp = 0
y_list_all = []
y_list_now = []
while len(heap_y):
y0, y1, i = heappop(heap_y)
if y0 >= y1_temp:
# new group
y1_temp = y1
if len(y_list_now) > 0:
y_list_all.append(y_list_now)
y_list_now = [i]
else:
y1_temp = max(y1_temp, y1)
y_list_now.append(i)
y_list_all.append(y_list_now)
# find size
len_x_max = max([len(x) for x in x_list_all])
len_y_max = max([len(y) for y in y_list_all])
assert 0 < len_x_max * len_y_max <= n
# eval 158 is a good example
# try
xl, yl = 0, 0
for xl in range(len_x_max, n + 1):
if n % xl != 0:
continue
yl = n // xl
assert yl >= len_y_max # you must break before yl < len_y_max
# bind until xl
xl_temp = 0
for x in x_list_all:
xl_temp += len(x)
if xl_temp == xl:
xl_temp = 0
elif xl_temp > xl:
break
if xl_temp > 0:
continue
# success for x, bind until yl
yl_temp = 0
for y in y_list_all:
yl_temp += len(y)
if yl_temp == yl:
yl_temp = 0
elif yl_temp > yl:
break
if yl_temp > 0:
continue
# success for y
break
assert xl * yl > 0
# assign i, j
# bind until xl
i_arr = np.zeros(n, dtype=np.int)
row_id = 0
xl_temp = 0
for x_list in x_list_all:
xl_temp += len(x_list)
for i in x_list:
i_arr[i] = row_id
if xl_temp == xl:
xl_temp = 0
row_id += 1
# bind until yl
j_arr = np.zeros(n, dtype=np.int)
col_id = 0
yl_temp = 0
for y_list in y_list_all:
yl_temp += len(y_list)
for i in y_list:
j_arr[i] = col_id
if yl_temp == yl:
yl_temp = 0
col_id += 1
res_arr = -1 * np.ones((yl, xl), dtype=np.int)
for i in range(n):
assert res_arr[i_arr[i], j_arr[i]] == -1
res_arr[i_arr[i], j_arr[i]] = c_arr[i]
return res_arr
class Align:
def __init__(self):
pass
@classmethod
def case(cls, c: Case) -> Case:
m: Matter
x0_arr = np.array([m.x0 for m in c.matter_list if not m.is_mesh])
y0_arr = np.array([m.y0 for m in c.matter_list if not m.is_mesh])
x1_arr = np.array([m.x0 + m.shape[0] for m in c.matter_list if not m.is_mesh])
y1_arr = np.array([m.y0 + m.shape[1] for m in c.matter_list if not m.is_mesh])
non_mesh_list = [m for m in c.matter_list if not m.is_mesh]
c_arr = np.arange(len(non_mesh_list))
# same shape
assert len(non_mesh_list) >= 2
assert len({m.shape[0] for m in non_mesh_list}) == 1
assert len({m.shape[1] for m in non_mesh_list}) == 1
matter_shape = non_mesh_list[0].shape
res_arr = align_xy(x0_arr, y0_arr, x1_arr, y1_arr, c_arr)
new_case: Case = c.copy()
new_case.shape = res_arr.shape[0] * matter_shape[0], res_arr.shape[1] * matter_shape[1]
new_case.matter_list = []
for i in range(res_arr.shape[0]):
for j in range(res_arr.shape[1]):
m = non_mesh_list[res_arr[i, j]]
m_add = m.deepcopy()
m_add.x0 = i * matter_shape[0]
m_add.y0 = j * matter_shape[1]
new_case.matter_list.append(m_add)
return new_case
@classmethod
def problem(cls, p: Problem) -> Problem:
q: Problem = p.copy()
q.train_x_list = [cls.case(c) for c in p.train_x_list]
q.test_x_list = [cls.case(c) for c in p.test_x_list]
return q
if __name__ == "__main__":
x0_ = np.array([0, 0])
x1_ = np.array([1, 1])
y0_ = np.array([0, 2])
y1_ = np.array([1, 3])
c_ = np.array([4, 5])
res_arr_ = align_xy(x0_, y0_, x1_, y1_, c_)
print(res_arr_)
pp = Problem.load(158, "eval")
qq = MapConnect.problem(pp, allow_diagonal=True)
rr = Align.problem(qq)
print(rr)
| 28.111111 | 114 | 0.534387 | 1,597 | 0.315613 | 0 | 0 | 1,535 | 0.30336 | 0 | 0 | 241 | 0.047628 |
b336dfd4f06633eb05d75f4fdb5a06c0dcb65ff7 | 1,590 | py | Python | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | 8 | 2020-04-14T23:17:00.000Z | 2021-06-21T12:34:04.000Z | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | null | null | null | python_experiments/paper_figures/tkde/data_legacy/eval_varying_eps_c_pcg.py | RapidsAtHKUST/SimRank | 3a601b08f9a3c281e2b36b914e06aba3a3a36118 | [
"MIT"
] | 1 | 2021-01-17T16:26:50.000Z | 2021-01-17T16:26:50.000Z | import json
if __name__ == '__main__':
with open('varying_eps_c.dicts') as ifs:
pcg_varying_eps_cpu = eval(ifs.readline())
pcg_varying_eps_mem = eval(ifs.readline())
pcg_varying_c_cpu = eval(ifs.readline())
pcg_varying_c_mem = eval(ifs.readline())
pcg_tag = 'pcg'
with open('pcg-varying-eps-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_cpu
}
}, indent=4))
with open('pcg-varying-eps-mem.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_mem
}
}, indent=4))
with open('pcg-varying-eps-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag: {
'0.6':
pcg_varying_eps_cpu
}
}, indent=4))
def combine(data: dict, extra):
res = dict()
for c, val in data.items():
res[c] = {extra: val}
return res
with open('pcg-varying-c-cpu.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag:
combine(pcg_varying_c_cpu, '0.01')
}, indent=4))
with open('pcg-varying-c-mem.json', 'w') as ofs:
ofs.write(json.dumps({
pcg_tag:
combine(pcg_varying_c_mem, '0.01')
}, indent=4))
| 27.894737 | 58 | 0.445283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.128302 |
b337fc90d9c521913630b0b32189385248da44bc | 5,747 | py | Python | mir/tools/utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
] | 1 | 2022-01-12T03:12:47.000Z | 2022-01-12T03:12:47.000Z | mir/tools/utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
] | null | null | null | mir/tools/utils.py | fenrir-z/ymir-cmd | 6fbffd3c1ff5dd1c9a44b55de411523b50567661 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import pathlib
import requests
import shutil
from typing import Dict, List, Optional, Union
from PIL import Image, UnidentifiedImageError
from mir import scm
# project
def project_root() -> str:
root = str(pathlib.Path(__file__).parent.parent.parent.absolute())
return root
# mir repo infos
def mir_repo_head_name(git: Union[str, scm.CmdScm]) -> Optional[str]:
""" get current mir repo head name (may be branch, or commit id) """
git_scm = None
if isinstance(git, str):
git_scm = scm.Scm(git, scm_executable="git")
elif isinstance(git, scm.CmdScm):
git_scm = git
else:
raise ValueError("invalid git: needs str or CmdScm")
git_result = git_scm.rev_parse(["--abbrev-ref", "HEAD"])
if isinstance(git_result, str):
return git_result
elif isinstance(git_result, bytes):
return git_result.decode("utf-8")
return str(git_result)
def mir_repo_commit_id(git: Union[str, scm.CmdScm], branch: str = "HEAD") -> str:
""" get mir repo branch's commit id """
git_scm = None
if isinstance(git, str):
git_scm = scm.Scm(git, scm_executable="git")
elif isinstance(git, scm.CmdScm):
git_scm = git
else:
raise ValueError("invalid git: needs str or CmdScm")
git_result = git_scm.rev_parse(branch)
if isinstance(git_result, str):
return git_result
elif isinstance(git_result, bytes):
return git_result.decode("utf-8")
return str(git_result)
# Store assets in asset_ids to out_root/sub_folder,
# return relative path to the out_root, staring with sub_folder.
# Set overwrite to False to avoid overwriting.
def store_assets_to_dir(asset_ids: List[str],
out_root: str,
sub_folder: str,
asset_location: str,
overwrite: bool = False,
create_prefix: bool = True,
need_suffix: bool = True) -> Dict[str, str]:
"""
load assets in location and save them to destination local folder
Args:
asset_ids: a list of asset ids (asset hashes)
out_root: the root of output path
sub_folder: sub folder to the output path, if no sub, set to '.'
asset_location: server location prefix of assets, if set to none, try to read it from mir repo config
overwrite (bool): if True, still copy assets even if assets already exists in export dir
create_prefix (bool): use last 2 chars of asset id as a sub dir
"""
# if out_root exists, but not a folder, raise error
if os.path.exists(out_root) and not os.path.isdir(out_root):
raise ValueError("invalid out_root")
os.makedirs(out_root, exist_ok=True)
sub_dir_abs = os.path.join(out_root, sub_folder)
os.makedirs(sub_dir_abs, exist_ok=True)
assets_location = _get_assets_location(asset_ids, asset_location)
unknown_format_count = 0
total_count = len(asset_ids)
asset_id_to_rel_paths: Dict[str, str] = {}
for idx, asset_id in enumerate(asset_ids):
if create_prefix:
suffix = asset_id[-2:]
sub_sub_folder_abs = os.path.join(sub_dir_abs, suffix)
os.makedirs(sub_sub_folder_abs, exist_ok=True)
sub_sub_folder_rel = os.path.join(sub_folder, suffix)
else:
sub_sub_folder_abs = sub_dir_abs
sub_sub_folder_rel = sub_folder
if need_suffix:
try:
asset_image = Image.open(assets_location[asset_id])
file_format = asset_image.format.lower()
except UnidentifiedImageError:
file_format = 'unknown'
unknown_format_count += 1
file_name = (f"{asset_id}.{file_format.lower()}" if need_suffix else asset_id)
asset_path_abs = os.path.join(sub_sub_folder_abs, file_name) # path started from out_root
asset_path_rel = os.path.join(sub_sub_folder_rel, file_name) # path started from sub_folder
_store_asset_to_location(assets_location[asset_id], asset_path_abs, overwrite=overwrite)
asset_id_to_rel_paths[asset_id] = asset_path_rel
if idx > 0 and idx % 5000 == 0:
logging.info(f"exporting {idx} / {total_count} assets")
if unknown_format_count > 0:
logging.warning(f"unknown format asset count: {unknown_format_count}")
return asset_id_to_rel_paths
def _store_asset_to_location(src: str, dst: str, overwrite: bool = False) -> None:
if not src or not dst:
return
os.makedirs(os.path.dirname(dst), exist_ok=True)
if not overwrite and os.path.isfile(dst):
return
if src.startswith('http'): # from http request
response = requests.get(src)
if len(response.content) > 0:
with open(dst, "wb") as f:
f.write(response.content)
elif src.startswith('/'): # from filesystem, require abs path.
shutil.copyfile(src, dst)
else:
raise ValueError(f"Invalid src, not a abs path: {src}")
def _get_assets_location(asset_ids: List[str], asset_location: str) -> Dict[str, str]:
"""
get asset locations
Args:
asset_ids: a list of asset ids (asset hashes)
asset_location: the server location of assets.
Returns:
a dict, key: asset id, value: asset location url
Raises:
Attribute exception if asset_location is not set, and can not be found in config file
"""
# asset_location is a required field.
# CMD layer should NOT aware where the asset is stored.
if not asset_location:
raise ValueError("asset_location is not set.")
return {id: os.path.join(asset_location, id) for id in asset_ids}
| 36.839744 | 109 | 0.655124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,770 | 0.307987 |
b33850aba2836a183da06d1f438221c324a27842 | 172 | py | Python | seniorproject/preprocessing/preprocess_sample.py | teammanicotti/writingstyle | 001fecf34ed3db3699b53a586e5078dbf190a72f | [
"MIT"
] | null | null | null | seniorproject/preprocessing/preprocess_sample.py | teammanicotti/writingstyle | 001fecf34ed3db3699b53a586e5078dbf190a72f | [
"MIT"
] | 4 | 2020-11-13T18:49:50.000Z | 2022-02-10T01:26:11.000Z | seniorproject/preprocessing/preprocess_sample.py | teammanicotti/writingstyle | 001fecf34ed3db3699b53a586e5078dbf190a72f | [
"MIT"
] | null | null | null | """Preprocessing Sample."""
__author__ = 'Devon Welcheck'
def preprocess(req, resp, resource, params): # pylint: disable=unused-argument
"""Preprocess skeleton."""
| 21.5 | 79 | 0.703488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.593023 |
b3386087966e76df474b5ed6cb60a9ae1c8443b0 | 484 | py | Python | Topic08-plots/rp-debt.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | Topic08-plots/rp-debt.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | Topic08-plots/rp-debt.py | mizydorek/pands-problems-2020 | a418dcc58e49dfbcb269e4524f676c1c6a0a6255 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
def main():
rng = np.arange(50)
rnd = np.random.randint(0, 10, size=(3, rng.size))
yrs = 1950 + rng
fig, ax = plt.subplots(figsize=(5, 3))
ax.stackplot(yrs, rng + rnd, labels=['Eastasia', 'Eurasia', 'Oceania'])
ax.set_title('Combined debt growth over time')
ax.legend(loc='upper left')
ax.set_ylabel('Total debt')
ax.set_xlim(xmin=yrs[0], xmax=yrs[-1])
fig.tight_layout()
main()
plt.show() | 25.473684 | 75 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.173554 |
b3392521392bb0d5241c9e791d0081276aa30c9e | 454 | py | Python | src/plot_results.py | andrea-gasparini/backdoor-federated-learning | 52dbeeb328282e7cd132ca4de7931d618d243994 | [
"Apache-2.0"
] | 2 | 2021-12-30T19:59:51.000Z | 2021-12-31T15:50:43.000Z | src/plot_results.py | andrea-gasparini/backdoor-federated-learning | 52dbeeb328282e7cd132ca4de7931d618d243994 | [
"Apache-2.0"
] | null | null | null | src/plot_results.py | andrea-gasparini/backdoor-federated-learning | 52dbeeb328282e7cd132ca4de7931d618d243994 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Load in the data
df = pd.read_csv("results.txt", header=None)
df.columns = ["poison_perc", "success_rate", "auc"]
# Plot the data
plt.plot(df['poison_perc'], df['success_rate'], label="Success rate")
plt.plot(df['poison_perc'], df['auc'], label='Clean AUC')
plt.legend()
plt.xlabel("Poisoning percentage")
plt.show()
# Save the figure
# plt.savefig('result_graph.png', dpi=100)
| 25.222222 | 69 | 0.715859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.504405 |
b33cf42bd0fd23988f48859e9516133f47659bcc | 481 | py | Python | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | src/richie/apps/courses/migrations/0017_auto_20200827_1011.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | # Generated by Django 2.2.15 on 2020-08-27 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("courses", "0016_auto_20200417_1237"),
]
operations = [
migrations.AlterField(
model_name="courserun",
name="resource_link",
field=models.CharField(
blank=True, max_length=200, null=True, verbose_name="Resource link"
),
),
]
| 22.904762 | 83 | 0.590437 | 387 | 0.804574 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.255717 |
b3422c9627e413c2af84a60838fdb43e2d877f43 | 2,718 | py | Python | proxyclient/experiments/timer_test.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | 1,604 | 2021-01-14T19:04:59.000Z | 2022-03-31T18:34:16.000Z | proxyclient/experiments/timer_test.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | 105 | 2021-01-15T03:52:27.000Z | 2022-03-30T22:16:52.000Z | proxyclient/experiments/timer_test.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | 96 | 2021-01-14T21:13:53.000Z | 2022-03-31T12:14:14.000Z | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
import sys, pathlib
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from m1n1.setup import *
HV_VTMR_CTL = (3, 5, 15, 1, 3)
HV_VTMR_CTL_VMASK = (1 << 0)
HV_VTMR_CTL_PMASK = (1 << 1)
HV_VTMR_LIST = (3, 5, 15, 1, 2)
TGE = (1<<27)
u.msr(CNTHCTL_EL2, 3 << 10) # EL1PTEN | EL1PCTEN
def run_test(ctl, tval):
u.inst(0xd5033fdf) # isb
u.msr(ctl, 0)
u.msr(tval, int(freq * 0.8))
u.msr(ctl, 1)
for i in range(6):
p.nop()
time.sleep(0.2)
#u.inst(0xd5033fdf, call=p.el1_call)
print(" . (ISR_EL1=%d) CTL=%x VTMR_LIST=%x" % (u.mrs(ISR_EL1), u.mrs(ctl), u.mrs(HV_VTMR_LIST)))
u.msr(ctl, 0)
def test_hv_timers():
u.msr(DAIF, 0x3c0)
print("Testing HV timers...")
print(" TGE = 1")
u.msr(HCR_EL2, u.mrs(HCR_EL2) | TGE | (1 << 3) | (1 << 4))
print(" P:")
run_test(CNTP_CTL_EL0, CNTP_TVAL_EL0)
print(" V:")
run_test(CNTV_CTL_EL0, CNTV_TVAL_EL0)
def test_guest_timers():
u.msr(DAIF, 0)
print("Testing guest timers...")
print(" TGE = 1, vGIC mode=0, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) | TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 0)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
#run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
#run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 1, vGIC mode=0, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=0, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) & ~TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 0)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=0, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=1, timers unmasked")
u.msr(HCR_EL2, (u.mrs(HCR_EL2) & ~TGE) | (1 << 3) | (1 << 4))
u.msr(HACR_EL2, 1<<20)
u.msr(HV_VTMR_CTL, 3)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
print(" TGE = 0, vGIC mode=1, timers masked")
u.msr(HV_VTMR_CTL, 0)
print(" P:")
run_test(CNTP_CTL_EL02, CNTP_TVAL_EL02)
print(" V:")
run_test(CNTV_CTL_EL02, CNTV_TVAL_EL02)
return
freq = u.mrs(CNTFRQ_EL0)
print("Timer freq: %d" % freq)
test_hv_timers()
test_guest_timers()
| 24.709091 | 109 | 0.597498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.243194 |
b343279a8195c95b740ea2b5b46901eb458edfeb | 8,799 | py | Python | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 15 | 2019-02-25T20:25:29.000Z | 2021-02-27T17:57:38.000Z | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 3 | 2020-02-21T22:35:38.000Z | 2020-10-05T02:25:30.000Z | nrf5_mesh/CMake/SES/SESGenerator.py | aberke/city-science-bike-swarm | 797e803014fc0c3878016309a62460a736140958 | [
"MIT"
] | 5 | 2019-06-29T21:03:57.000Z | 2021-06-15T06:16:20.000Z | #!/usr/bin/env python3
# Usage: python SESGenerator.py <target_configuration>.json <output_directory>
#
# <target_configuration>.json is a json file generated from CMake on the form:
# {
# "target": {
# "name": "light_control_client_nrf52832_xxAA_s132_5.0.0",
# "sources": "main.c;provisioner.c;..",
# "includes": "include1;include2;..",
# "definitions":"NRF52;NRF52_SERIES;..",
# },
# "platform": {
# "name": "nrf52832_xxAA",
# "arch": "cortex-m4f",
# "flash_size": 524288,
# "ram_size": 65536,
# },
# "softdevice": {
# "hex_file": "<path-to-s132_nrf52_5.0.0_softdevice.hex>",
# "flash_size": 143360,
# "ram_size": 12720
# }
# }
import jinja2
import sys
import argparse
import json
import os
from collections import namedtuple
from shutil import copyfile
TEST_JSON_STR = """{
"target": {
"name": "light_control_client_nrf52832_xxAA_s132_5.0.0",
"sources": "main.c;provisioner.c",
"includes": "include1;include2",
"defines":"NRF52;NRF52_SERIES"
},
"platform": {
"name": "nrf52832_xxAA",
"arch": "cortex-m4f",
"flash_size": 524288,
"ram_size": 65536
},
"softdevice": {
"hex_file": "path-to/s132_nrf52_5.0.0_softdevice.hex",
"flash_size": 143360,
"ram_size": 12720
}
}"""
# Constants
NRF51_BOOTLOADER_FLASH_SIZE = 24576
NRF51_BOOTLOADER_RAM_SIZE = 768
NRF52_BOOTLOADER_FLASH_SIZE = 32768
NRF52_BOOTLOADER_RAM_SIZE = 4096
RAM_ADDRESS_START = 536870912
def application_flash_limits_get(softdevice_flash_size,
bootloader_flash_size,
platform_flash_size):
return (hex(softdevice_flash_size), hex(platform_flash_size - bootloader_flash_size))
def application_ram_limits_get(softdevice_ram_size,
bootloader_ram_size,
platform_ram_size):
return (hex(RAM_ADDRESS_START + softdevice_ram_size), hex(platform_ram_size - bootloader_ram_size))
DataRegion = namedtuple("DataRegion", ["start", "size"])
Target = namedtuple("Target", ["name", "includes", "defines", "sources"])
Platform = namedtuple("Platform", ["name", "arch", "flash_size", "ram_size"])
SoftDevice = namedtuple("Softdevice", ["hex_file", "flash_size", "ram_size"])
Configuration = namedtuple("Configuration", ["target", "platform", "softdevice"])
File = namedtuple("File", ["path"])
Group = namedtuple("Group", ["name", "files", "match_string"])
GROUP_TEMPLATES = [
Group(name="Application", files=[], match_string="examples"),
Group(name="Core", files=[], match_string="mesh/core"),
Group(name="Serial", files=[], match_string="mesh/serial"),
Group(name="Mesh stack", files=[], match_string="mesh/stack"),
Group(name="GATT", files=[], match_string="mesh/gatt"),
Group(name="DFU", files=[], match_string="mesh/dfu"),
Group(name="Toolchain", files=[File("$(StudioDir)/source/thumb_crt0.s")], match_string="toolchain"),
Group(name="Access", files=[], match_string="mesh/access"),
Group(name="Bearer", files=[], match_string="mesh/bearer"),
Group(name="SEGGER RTT", files=[], match_string="rtt"),
Group(name="uECC", files=[], match_string="micro-ecc"),
Group(name="nRF5 SDK", files=[], match_string="$(SDK_ROOT"),
Group(name="Provisioning", files=[], match_string="mesh/prov"),
Group(name="Configuration Model", files=[], match_string="models/foundation/config"),
Group(name="Health Model", files=[], match_string="models/foundation/health"),
Group(name="Generic OnOff Model", files=[], match_string="models/model_spec/generic_onoff"),
Group(name="Simple OnOff Model", files=[], match_string="models/vendor/simple_on_off"),
Group(name="Remote provisioning Model", files=[], match_string="models/proprietary/pb_remote")]
def unix_relative_path_get(path1, path2):
if not path1.startswith('$('):
path1 = os.path.relpath(path1, path2)
return path1.replace("\\", "/")
def load_config(input_file):
with open(input_file, "r") as f:
config = json.load(f)
return config
def load_softdevice(sd_config):
with open(sd_config["definition_file"], "r") as f:
config = json.load(f)
return [sd for sd in config["softdevices"] if sd["name"] == sd_config["name"]][0]
def load_platform(platform_config):
with open(platform_config["definition_file"], "r") as f:
config = json.load(f)
return [platform for platform in config["platforms"] if platform["name"] == platform_config["name"]][0]
def create_file_groups(files, out_dir):
other = Group(name="Other", files=[], match_string=None)
groups = GROUP_TEMPLATES[:]
for f in files:
found_group = False
if "gcc_startup" in f.lower() or "arm_startup" in f.lower():
continue
for g in groups:
if g.match_string in f:
f = unix_relative_path_get(f, out_dir)
g.files.append(File(f))
found_group = True
break
if not found_group:
f = unix_relative_path_get(f, out_dir)
other.files.append(File(f))
groups.append(other)
# Remove empty groups
for g in groups[:]:
if len(g.files) == 0:
groups.remove(g)
return groups
def calculate_flash_limits(config):
bl_flash_size = NRF51_BOOTLOADER_FLASH_SIZE if "nrf51" in config["platform"]["config"]["name"].lower() else NRF52_BOOTLOADER_FLASH_SIZE
bl_flash_size = bl_flash_size if "nrf52810_xxAA" not in config["platform"]["config"]["name"] else 0
flash_limits = application_flash_limits_get(config["softdevice"]["config"]["flash_size"], bl_flash_size, config["platform"]["config"]["flash_size"])
return DataRegion(*flash_limits)
def calculate_ram_limits(config):
bl_ram_size = NRF51_BOOTLOADER_RAM_SIZE if "nrf51" in config["platform"]["config"]["name"].lower() else NRF52_BOOTLOADER_RAM_SIZE
bl_ram_size = bl_ram_size if "nrf52810_xxAA" not in config["platform"]["config"]["name"] else 0
ram_limits = application_ram_limits_get(config["softdevice"]["config"]["ram_size"], bl_ram_size, config["platform"]["config"]["ram_size"])
return DataRegion(*ram_limits)
def generate_ses_project(config, out_dir="."):
files = config["target"]["sources"].split(";")
config["target"]["includes"] = [unix_relative_path_get(i, out_dir) for i in config["target"]["includes"].split(";")]
config["target"]["heap_size"] = 1024
config["target"]["stack_size"] = 2048
config["target"]["groups"] = create_file_groups(files, out_dir)
config["target"]["flash"] = calculate_flash_limits(config)
config["target"]["ram"] = calculate_ram_limits(config)
config["platform"]["fpu"] = config["platform"]["config"]["arch"] == "cortex-m4f"
config["softdevice"]["hex_file"] = unix_relative_path_get(config["softdevice"]["hex_file"], out_dir)
config["sdk_default_path"] = unix_relative_path_get('../../../nRF5_SDK_16.0.0_98a08e2', out_dir)
s = ""
with open("ses.xml", "r") as f:
s = f.read()
t = jinja2.Template(s)
s = t.render(config)
return s
def generate_ses_session(out_dir):
session_file_contents = ['<!DOCTYPE CrossStudio_Session_File>',
'<session>',
'\t<Files>',
'\t\t<SessionOpenFile path="{}"/>',
'\t</Files>',
'</session>']
return '\n'.join(session_file_contents).format(unix_relative_path_get('../../doc/getting_started/SES.md', out_dir))
def test():
config = json.loads(TEST_JSON_STR)
print(config)
s = generate_ses_project(config)
with open("test.xml", "w") as f:
f.write(s)
print ("Done")
def main():
input_file = sys.argv[1]
out_dir = sys.argv[2]
config = load_config(input_file)
config["softdevice"]["config"] = load_softdevice(config["softdevice"])
config["platform"]["config"] = load_platform(config["platform"])
ses_project = generate_ses_project(config, out_dir)
out_dir += "/"
# SES doesn't support "." in filenames
output_filename = out_dir + config["target"]["name"].replace(".", "_")
project_file = output_filename + ".emProject"
with open(project_file, "w") as f:
f.write(ses_project)
# Create session
ses_session = generate_ses_session(out_dir)
session_file = output_filename + ".emSession"
with open(session_file, "w") as f:
f.write(ses_session)
# Generate flash placement:
copyfile("flash_placement.xml", out_dir + "flash_placement.xml")
print("Wrote: " + project_file)
if __name__ == "__main__":
main()
| 37.763948 | 152 | 0.642459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,179 | 0.361291 |
b344553cf35913d934c9423d0a59566943fcaa1d | 1,446 | py | Python | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | PythonFIAP/Capitulo5_Manipula_Arquivos/Boston.py | DanielGMesquita/StudyPath | 0b3d0bb1deac7eb0d1b301edca5e5ed320568f4c | [
"MIT"
] | null | null | null | #Análise do relatório de vôos e viagens do aeroporto de Boston com base nos relatórios econômicos oficiais
with open('economic-indicators.csv', 'r') as boston:
total_voos = 0
maior = 0
total_passageiros = 0
maior_media_diaria = 0
ano_usuario = input('Qual ano deseja pesquisar?: ')
#Retornar o total de voos do arquivo
for linha in boston.readlines()[1:-1]:
lista = linha.split(',')
total_voos = total_voos + float(lista[3])
#Retornar o mês/ano com maior trânsito no aeroporto
if float(lista[2]) > float(maior):
maior = lista[2]
ano = lista[0]
mes = lista[1]
#Retorna o total de passageiros que transitaram no aeroporto no ano definido pelo usuário
if ano_usuario == lista[0]:
total_passageiros = total_passageiros + float(lista[2])
#Retorna o mês com maior média de diária de hotéis
if float(lista[5]) > float(maior_media_diaria):
maior_media_diaria = lista[5]
mes_maior_diaria = lista[1]
print('O total de voos é {}'.format(total_voos))
print('O mês/ano com maior trânsito no aeroporto foi {}/{}'.format(mes, ano))
print('O total de passageiros que passaram no ano de {} é {} passageiros'.format(str(ano_usuario),
str(total_passageiros)))
print('O mês do ano {} com maior média diária de hotel foi {}'.format(ano_usuario, mes_maior_diaria))
| 45.1875 | 106 | 0.64592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.416382 |
b3459b788b9b0f9bc1d67f711ca82a899ecdbfb0 | 9,008 | py | Python | source/rttov_test/profile-datasets-py/div52/011.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | source/rttov_test/profile-datasets-py/div52/011.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T12:19:59.000Z | 2022-03-12T12:19:59.000Z | source/rttov_test/profile-datasets-py/div52/011.py | bucricket/projectMAScorrection | 89489026c8e247ec7c364e537798e766331fe569 | [
"BSD-3-Clause"
] | null | null | null | """
Profile ../profile-datasets-py/div52/011.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div52/011.py"
self["Q"] = numpy.array([ 1.60776800e+00, 5.75602400e+00, 7.00189400e+00,
7.33717600e+00, 6.76175900e+00, 8.41951900e+00,
6.54641500e+00, 8.55272200e+00, 6.77920300e+00,
7.25516400e+00, 7.37081000e+00, 5.92257200e+00,
6.50599600e+00, 6.64283300e+00, 6.28100600e+00,
5.43963200e+00, 5.08957400e+00, 5.38038600e+00,
5.50633800e+00, 5.62897900e+00, 5.19042900e+00,
4.84112700e+00, 4.63224600e+00, 4.76220200e+00,
4.69470800e+00, 4.51451000e+00, 4.58671400e+00,
4.67290700e+00, 4.57081400e+00, 4.47585900e+00,
4.42605100e+00, 4.37786600e+00, 4.38445800e+00,
4.39293100e+00, 4.40466700e+00, 4.41761000e+00,
4.37084000e+00, 4.20133400e+00, 4.03640900e+00,
3.99758200e+00, 3.97225900e+00, 3.89513500e+00,
3.76079000e+00, 3.63594700e+00, 3.81527700e+00,
3.99037900e+00, 4.23041800e+00, 4.51608500e+00,
4.76413100e+00, 4.79784600e+00, 4.83083700e+00,
6.93679600e+00, 9.88992600e+00, 1.41402000e+01,
2.17185200e+01, 2.91457900e+01, 3.96929600e+01,
5.04349000e+01, 6.08671100e+01, 7.10095300e+01,
8.43479200e+01, 1.11923300e+02, 1.38998100e+02,
1.94360100e+02, 2.52783000e+02, 3.20280200e+02,
3.93544100e+02, 4.69894600e+02, 5.53426000e+02,
6.41501600e+02, 7.71092500e+02, 8.98548200e+02,
1.06642900e+03, 1.23389900e+03, 1.45307300e+03,
1.68006300e+03, 2.07637800e+03, 2.51954400e+03,
3.27713300e+03, 4.12034400e+03, 5.07040900e+03,
6.02835400e+03, 6.34354100e+03, 6.44987700e+03,
5.89160400e+03, 5.06925600e+03, 4.04401800e+03,
3.04533400e+03, 2.16305800e+03, 1.42927300e+03,
9.42830700e+02, 8.61332300e+02, 9.09295200e+02,
1.23575300e+03, 1.77325600e+03, 3.36407200e+03,
5.26097100e+03, 5.71855000e+03, 5.98901400e+03,
5.82855400e+03, 5.67443100e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56504000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61259000e+01, 6.09895000e+01, 6.61252000e+01,
7.15398000e+01, 7.72395000e+01, 8.32310000e+01,
8.95203000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17777000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23441000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90892000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53627000e+02, 7.77789000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31523000e+02, 9.58591000e+02,
9.86066000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 311.3895, 311.3882, 311.3878, 311.3877, 311.3879, 311.3874,
311.388 , 311.3873, 311.3879, 311.3877, 311.3877, 311.3882,
311.388 , 311.3879, 311.388 , 311.3883, 311.3884, 311.3883,
311.3883, 311.3882, 311.3884, 311.3885, 311.3886, 311.3885,
311.3885, 311.3886, 311.3886, 311.3885, 311.3886, 311.3886,
311.3886, 311.3886, 311.3886, 311.3886, 311.3886, 311.3886,
311.3886, 311.3887, 311.3887, 311.3888, 311.3888, 311.3888,
311.8368, 312.3079, 312.7998, 313.3147, 313.8527, 314.4136,
314.9985, 315.6075, 316.2405, 316.8978, 317.5809, 318.2885,
319.0201, 319.7787, 320.5633, 321.3738, 321.3704, 321.3672,
321.3629, 321.354 , 321.3453, 321.3275, 321.3088, 321.2871,
321.2635, 321.239 , 321.2121, 321.1838, 321.1422, 321.1012,
321.0473, 320.9934, 320.923 , 320.85 , 320.7227, 320.5802,
320.3368, 320.0658, 319.7604, 319.4525, 319.3512, 319.3171,
319.4965, 319.7608, 320.0903, 320.4113, 320.6948, 320.9306,
321.087 , 321.1132, 321.0978, 320.9928, 320.8201, 320.3088,
319.6992, 319.5521, 319.4652, 319.5168, 319.5663])
self["T"] = numpy.array([ 170.672, 167.1 , 177.713, 195.506, 207.725, 218.427,
235.452, 261.271, 286.139, 302.526, 307.761, 299.105,
277.321, 254.123, 242.604, 242.111, 244.188, 247.235,
249.79 , 251.021, 249.495, 247.35 , 245.156, 243.382,
241.117, 238.603, 237.177, 235.891, 234.271, 232.728,
231.48 , 230.272, 230.403, 230.58 , 230.857, 231.173,
231.319, 231.125, 230.936, 230.468, 229.983, 229.041,
227.593, 226.199, 225.705, 225.223, 225.188, 225.476,
225.743, 225.913, 226.079, 225.665, 225.013, 224.05 ,
222.293, 220.571, 219.676, 218.897, 218.878, 219.527,
220.435, 222.487, 224.501, 226.873, 229.258, 231.594,
233.885, 236.173, 238.493, 240.796, 243.218, 245.601,
248.126, 250.621, 253.108, 255.562, 257.808, 259.968,
261.977, 263.921, 265.771, 267.581, 269.295, 270.928,
272.357, 273.557, 274.577, 275.147, 275.554, 275.814,
276.066, 276.464, 277.23 , 278.183, 278.559, 276.474,
273.474, 274.53 , 274.81 , 274.81 , 274.81 ])
self["O3"] = numpy.array([ 0.2033725 , 0.2424187 , 0.3208637 , 0.4562969 , 0.736027 ,
1.047125 , 1.27765 , 1.424369 , 1.592244 , 1.945423 ,
2.494494 , 3.276502 , 4.362954 , 5.659009 , 6.054292 ,
6.016611 , 5.807721 , 5.465095 , 5.068172 , 4.685214 ,
4.427215 , 4.293835 , 4.20855 , 4.135007 , 4.034985 ,
3.921192 , 3.781656 , 3.644418 , 3.521658 , 3.404957 ,
3.31342 , 3.224845 , 3.115542 , 3.008671 , 2.838926 ,
2.645074 , 2.436685 , 2.192161 , 1.954276 , 1.756302 ,
1.566934 , 1.406719 , 1.277961 , 1.153048 , 1.06527 ,
0.9795561 , 0.90675 , 0.8436835 , 0.7768094 , 0.6765951 ,
0.5785423 , 0.4980748 , 0.4259511 , 0.3548743 , 0.284146 ,
0.2148278 , 0.170396 , 0.1296997 , 0.09665075, 0.07037028,
0.04774848, 0.0390997 , 0.03060822, 0.02923244, 0.02885916,
0.02934259, 0.03040131, 0.03152678, 0.03279865, 0.03412979,
0.03602205, 0.0378829 , 0.03856705, 0.03917781, 0.03968628,
0.04016728, 0.04066729, 0.04116737, 0.04188355, 0.04265563,
0.04372776, 0.04484095, 0.0448461 , 0.04464854, 0.0438066 ,
0.04338835, 0.04330029, 0.04399183, 0.04493602, 0.0461 ,
0.04719323, 0.04845931, 0.05025693, 0.05333931, 0.05646073,
0.05959075, 0.06483308, 0.05655864, 0.0507791 , 0.05078729,
0.05079517])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 274.81
self["S2M"]["Q"] = 6138.87608117
self["S2M"]["O"] = 0.0507714396589
self["S2M"]["P"] = 1016.79
self["S2M"]["U"] = 8.31799
self["S2M"]["V"] = -1.2184
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 272.988
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 80.1708
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([1992, 7, 15])
self["TIME"] = numpy.array([18, 0, 0])
| 54.26506 | 92 | 0.571603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.049734 |
b345e48eaa39766f1b731f4fbeab11fa0907ab5a | 463 | py | Python | Tag_17.py | Gravitar64/Advent-of-Code-2015 | 423ef65815ecd92dc15ae8edd64c2af346bf8fcc | [
"Apache-2.0"
] | null | null | null | Tag_17.py | Gravitar64/Advent-of-Code-2015 | 423ef65815ecd92dc15ae8edd64c2af346bf8fcc | [
"Apache-2.0"
] | null | null | null | Tag_17.py | Gravitar64/Advent-of-Code-2015 | 423ef65815ecd92dc15ae8edd64c2af346bf8fcc | [
"Apache-2.0"
] | null | null | null | from time import perf_counter as pfc
from itertools import combinations
def load_puzzle(datei):
with open(datei) as f:
return [int(x) for x in f]
def solve(puzzle):
part2 = counter = 0
for i in range(1,len(puzzle)):
for v in combinations(puzzle,i):
if sum(v) != 150: continue
counter += 1
if not part2: part2 = counter
return counter, part2
puzzle = load_puzzle('Tag_17.txt')
start = pfc()
print(solve(puzzle), pfc()-start) | 21.045455 | 36 | 0.669546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.025918 |
b347b691c3c4c7996f91f105edcfcd1839de843a | 1,464 | py | Python | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | null | null | null | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | 2 | 2021-06-10T20:43:00.000Z | 2021-09-22T19:55:41.000Z | blog/templatetags/blog_tags.py | josephdubon/boilerplate_dubon_django_blog | 1dbe470006be066b12dd6486eb26a41d304206f8 | [
"Unlicense",
"MIT"
] | null | null | null | from django import template
from django.db.models import Count
from django.utils.safestring import mark_safe
import markdown
from ..models import Post
register = template.Library()
####
# Register as simple tags
####
# A simple template tag that returns the number of posts published so far.=
@register.simple_tag
def total_posts():
return Post.published.count()
# A simple template tag that displays the 5 most commented posts
@register.simple_tag
def get_most_commented_posts(count=5):
# Build a QuerySet using the annotate() function to aggregate the
# - total number of comments for each post.
return Post.published.annotate(
# use the Count aggregation function to store the number of comments
# - in the computed field total_comments for each Post object.
total_comments=Count('comments')
).order_by('-total_comments')[:count]
####
# Register as inclusion_tags
####
# An inclusion tag that returns the 5 latest posts.
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {
'latest_posts': latest_posts
}
####
# Register Template Filters
####
# A template filter to enable use of markdown .md syntax in blog posts and then converts
# - post contents to HTML in the templates
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
| 26.618182 | 88 | 0.734973 | 0 | 0 | 0 | 0 | 828 | 0.565574 | 0 | 0 | 752 | 0.513661 |
b347c2e19c64fdbc38703b93493ffd39b10d1790 | 104 | py | Python | gpt/config.py | jimth001/formality_emnlp19 | fa2f48f2ac6efd98c0cf986681747ea41adbac48 | [
"MIT"
] | 22 | 2019-08-28T16:36:51.000Z | 2022-01-13T07:30:36.000Z | gpt/config.py | jimth001/formality_emnlp19 | fa2f48f2ac6efd98c0cf986681747ea41adbac48 | [
"MIT"
] | 11 | 2020-01-28T22:16:38.000Z | 2022-02-09T23:31:41.000Z | gpt/config.py | jimth001/formality_emnlp19 | fa2f48f2ac6efd98c0cf986681747ea41adbac48 | [
"MIT"
] | 5 | 2019-11-12T13:28:36.000Z | 2022-01-13T07:30:39.000Z | from gpt.src import encoder
text_enc = encoder.get_encoder('./models/117M')
config_path='./models/117M'
| 26 | 47 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.288462 |
b34b540ac9f9bca5d13deef19fa4a3b49d5c22b5 | 11,555 | py | Python | epann/EStool.py | WorldEditors/ProjectNuwa | 8324b0f18d753649b08477ac210979150bce4f2c | [
"Apache-2.0"
] | null | null | null | epann/EStool.py | WorldEditors/ProjectNuwa | 8324b0f18d753649b08477ac210979150bce4f2c | [
"Apache-2.0"
] | null | null | null | epann/EStool.py | WorldEditors/ProjectNuwa | 8324b0f18d753649b08477ac210979150bce4f2c | [
"Apache-2.0"
] | null | null | null | """
Tookits for Evolution Strategies
"""
import pickle
import sys
import os
import time
from copy import deepcopy
import numpy
import math
import numpy.random as random
FLOAT_MAX = 1.0e+8
def compute_ranks(x):
"""
Returns rank as a vector of len(x) with integers from 0 to len(x)
"""
assert x.ndim == 1
ranks = numpy.empty(len(x), dtype=int)
ranks[x.argsort()] = numpy.arange(len(x))
return ranks
def compute_centered_ranks(x):
"""
Maps x to [-0.5, 0.5] and returns the rank
"""
eff_x = numpy.array(x, dtype="float32")
y = compute_ranks(eff_x.ravel()).reshape(eff_x.shape).astype(numpy.float32)
y /= (eff_x.size - 1)
y -= .5
return y
def categorical(p):
res = numpy.asarray(p)
return (res.cumsum(-1) >= numpy.random.uniform(size=res.shape[:-1])[..., None]).argmax(-1)
def sort_idx(score_list):
#mean - variance normalization
scores = list(zip(score_list, range(len(score_list))))
scores.sort(reverse=True)
return [scores[i][1] for i in range(len(scores))]
def partition(data_list,begin,end):
partition_key = data_list[end]
index = begin
for i in range(begin,end):
if data_list[i] < partition_key:
data_list[i],data_list[index] = data_list[index],data_list[i]
index+=1
data_list[index],data_list[end] = data_list[end],data_list[index]
return index
def find_top_k(data_list,K):
length = len(data_list)
if(K > length):
return numpy.min(data_list)
begin = 0
end = length-1
index = partition(data_list,begin,end)
while index != length - K:
if index > length - K:
end = index-1
index = partition(data_list,begin,index-1)
else:
begin = index+1
index = partition(data_list,index+1,end)
return data_list[index]
def check_validity(parameters):
if(numpy.sum(numpy.isnan(parameters)) > 0 or numpy.sum(numpy.isinf(parameters)) > 0):
return False
return True
class ESTool(object):
def __init__(self,
pool_size,
top_k_size,
initial_sigma,
default_cov_lr=None,
max_step_size=1.0,
min_cov=1.0e-12,
segments=None
):
self._pool_size = pool_size
self._init_sigma = deepcopy(initial_sigma)
self._sigma_t = deepcopy(initial_sigma)
self._top_k_size = top_k_size
self._step = 0
self._default_cov_lr = default_cov_lr
self.max_step_size = max_step_size
self.min_step_size = 1.0e-6
self.min_cov = min_cov
self.segments = segments
self.extra_shaping_val = [0.708, 1.0, 1.225]
self.extra_shaping_prob = [0.05, 0.90, 0.05]
def sampling_extra_factor(self, segments):
extra_factor = []
for _ in range(self.segment_max_idx + 1):
extra_factor.append(self.extra_shaping_val[categorical(self.extra_shaping_prob)])
return numpy.array(extra_factor, dtype="float32")[segments]
def pre_calculate_parameters(self):
self.w_r_i = numpy.zeros((self._top_k_size,), dtype="float32")
for i in range(self._top_k_size):
self.w_r_i[i] = math.log(self._top_k_size + 1) - math.log(i + 1)
#self.w_r_i[i] = 1 / (i + 1)
self.w_r_i *= 1.0 / numpy.sum(self.w_r_i)
self.l_e = 1.0 / numpy.sum(self.w_r_i * self.w_r_i)
self.p_std = math.sqrt(self.dim) * (1 - 0.25 / self.dim + 1.0 / (21 * self.dim ** 2))
self.c_sigma = (self.l_e + 2) / (self.dim + self.l_e + 3)
self.ps_f = math.sqrt(self.c_sigma * (2 - self.c_sigma) * self.l_e)
if(self.l_e > self.dim + 2):
self.d_sigma = 1 + 2.0 * math.sqrt((self.l_e - self.dim - 2) / (self.dim + 1)) + self.c_sigma
else:
self.d_sigma = 1 + self.c_sigma
self.c_c = 4 / (self.dim + 4)
self.l_m = math.sqrt(self.c_c * (2 - self.c_c) * self.l_e)
self.s_m = math.sqrt(self.c_sigma * (2 - self.c_sigma) * self.l_e)
self.p_m = math.sqrt(2 * self.c_sigma) * (1.4 + 2.0 / (self.dim + 1)) * self.p_std
if(self._default_cov_lr is None):
self.c_cov = 2 / (self.l_e * (self.dim + 1.414) ** 2) + (1 - 1/self.l_e) * min(
1, (2 * self.l_e - 1) / ((self.dim + 1.414) ** 2 + self.l_e))
else:
self.c_cov = self._default_cov_lr
self._sqrt_cov = numpy.sqrt(self._cov)
if(self.segments is None):
self.segments = numpy.arange(self.dim, dtype="int32")
self.segment_max_idx = numpy.max(self.segments)
else:
self.segment_max_idx = numpy.max(self.segments)
def generate_new_offspring(self):
if(self.segments is not None):
extra_shaping_factor = self.sampling_extra_factor(self.segments)
deta_weights = self._sigma_t * self._sqrt_cov * extra_shaping_factor * numpy.random.normal(size=(self.dim))
else:
deta_weights = self._sigma_t * self._sqrt_cov * numpy.random.normal(size=(self.dim))
self._evolution_pool.append([self._base_weights + deta_weights, -FLOAT_MAX])
def init_popultation(self, weights, static_weights=None):
assert len(weights.shape) == 1, "Can only support vectorized parameters"
self.dim = weights.shape[0]
self._evolution_pool = []
self._pc = numpy.zeros((self.dim, ), dtype="float32")
self._ps = numpy.zeros((self.dim, ), dtype="float32")
self._sigma_t = self._init_sigma
self._cov = numpy.ones((self.dim, ), dtype="float32")
self._base_weights = numpy.copy(weights)
if(static_weights is not None):
self._static_weights = deepcopy(static_weights)
else:
self._static_weights = dict()
self.pre_calculate_parameters()
for _ in range((self._pool_size - 1)):
self.generate_new_offspring()
self._evolution_pool.append([self._base_weights, -FLOAT_MAX])
def evolve(self, verbose=False):
#remove nan
start_time = time.time()
self._step += 1
for i in range(len(self._evolution_pool)-1, -1, -1):
if(numpy.isnan(self._evolution_pool[i][1]) or numpy.isinf(self._evolution_pool[i][1])):
if(verbose):
print("encounter %s in score in index %d, delete from the pool" % (self._evolution_pool[i][1], i))
del(self._evolution_pool[i])
if(len(self._evolution_pool) < 1):
raise Exception("Evolution Pool is empty, something nasty happened (probably too much nans)")
score_pool = [self._evolution_pool[i][1] for i in range(len(self._evolution_pool))]
fitnesses = compute_centered_ranks(score_pool)
score, top_k_idxes = self.stat_top_k(self._top_k_size)
if(len(top_k_idxes) < self._top_k_size):
w_r_i = self.w_r_i[:len(top_k_idxes)]
w_r_i *= 1.0 / numpy.sum(w_r_i)
else:
w_r_i = self.w_r_i
new_base = numpy.zeros_like(self._base_weights)
deta_base_sq = numpy.zeros_like(self._base_weights)
for i, idx in enumerate(top_k_idxes):
new_base += w_r_i[i] * self._evolution_pool[idx][0]
deta_para = self._evolution_pool[idx][0] - self._base_weights
deta_base_sq += w_r_i[i] * (deta_para * deta_para)
# update p_sigma
base_deta = new_base - self._base_weights
n_ps = (1 - self.c_sigma)*self._ps + self.s_m / self._sigma_t * numpy.reciprocal(self._sqrt_cov) * base_deta
#update step size
ps_norm = numpy.sqrt(numpy.sum(n_ps * n_ps))
n_sigma_t = self._sigma_t * numpy.exp(self.c_sigma / self.d_sigma * (ps_norm / self.p_std - 1))
#update p_c
h_t = float(ps_norm < self.p_m * math.sqrt(self._step + 1))
n_pc = (1 - self.c_c) * self._pc + h_t * self.l_m / self._sigma_t * base_deta
#update cov
n_cov = (1 - self.c_cov) * self._cov + self.c_cov / self.l_e * self._pc * self._pc \
+ self.c_cov * (1 - 1/self.l_e) / self._sigma_t / self._sigma_t * deta_base_sq
self._base_weights = new_base
self._evolution_pool.clear()
self._cov = numpy.clip(n_cov, self.min_cov, 1.0)
self._sigma_t = numpy.clip(n_sigma_t, self.min_step_size, self.max_step_size)
self._ps = n_ps
self._pc = n_pc
self._sqrt_cov = numpy.sqrt(self._cov)
self._evolution_pool.append([self._base_weights, -FLOAT_MAX])
for _ in range((self._pool_size - 1)):
self.generate_new_offspring()
finish_time = time.time()
if(verbose):
numpy.set_printoptions(precision=3, suppress=True)
print("%s, sqrt_covariances: %.3f, step_size: %.3f, c_cov: %.3f, calculate time consumption: %.1f, top %.0f average scores: %.4f" %
(time.asctime(time.localtime(time.time())), numpy.mean(self._sqrt_cov), self._sigma_t, self.c_cov,
finish_time - start_time, self._top_k_size, score))
sys.stdout.flush()
return False
@property
def pool_size(self):
return (len(self._evolution_pool))
def get_weights(self, i):
return self._evolution_pool[i][0]
@property
def get_base_weight(self):
return self._base_weights
@property
def get_static_weights(self):
if(isinstance(self._static_weights, dict)):
return self._static_weights
else:
return self._static_weights.tolist()
def set_score(self, i, score):
self._evolution_pool[i][1] = score
def load(self, file_name):
file_op = open(file_name, "rb")
self._evolution_pool = pickle.load(file_op)
base_weights = pickle.load(file_op)
if(isinstance(base_weights, tuple)):
self._base_weights, self._static_weights = base_weights
else:
self._base_weights = base_weights
self._static_weights = dict()
self._cov = pickle.load(file_op)
self._pc = pickle.load(file_op)
self._ps = pickle.load(file_op)
self._sigma_t = pickle.load(file_op)
self.dim = pickle.load(file_op)
self._step = pickle.load(file_op)
self._cov = numpy.clip(self._cov, 0.64, 1.0)
self._sigma_t = deepcopy(self._init_sigma)
file_op.close()
for i in range(len(self._evolution_pool)-1, -1, -1):
if(not check_validity(self._evolution_pool[i][0])):
del self._evolution_pool[i]
self.pre_calculate_parameters()
def save(self, file_name):
file_op = open(file_name, "wb")
pickle.dump(self._evolution_pool, file_op)
pickle.dump((self._base_weights, self._static_weights), file_op)
pickle.dump(self._cov, file_op)
pickle.dump(self._pc, file_op)
pickle.dump(self._ps, file_op)
pickle.dump(self._sigma_t, file_op)
pickle.dump(self.dim, file_op)
pickle.dump(self._step, file_op)
file_op.close()
def stat_top_k(self, k):
score_pool = [self._evolution_pool[i][1] for i in range(len(self._evolution_pool))]
sorted_idx = sort_idx(score_pool)
return numpy.mean(numpy.asarray(score_pool, dtype="float32")[sorted_idx[:k]]), sorted_idx[:k]
def stat_avg(self):
score_pool = [self._evolution_pool[i][1] for i in range(len(self._evolution_pool))]
return numpy.mean(numpy.asarray(score_pool, dtype="float32"))
| 38.645485 | 144 | 0.614453 | 9,542 | 0.82579 | 0 | 0 | 349 | 0.030203 | 0 | 0 | 686 | 0.059368 |
b34b7e35d2fe6795551ee57d83ecde2ef7416521 | 2,059 | py | Python | zeppos_root/root.py | changrunner/zeppos_root | dc4146d87454a921aba7fc0c01180b4b7dad1358 | [
"Apache-2.0"
] | null | null | null | zeppos_root/root.py | changrunner/zeppos_root | dc4146d87454a921aba7fc0c01180b4b7dad1358 | [
"Apache-2.0"
] | null | null | null | zeppos_root/root.py | changrunner/zeppos_root | dc4146d87454a921aba7fc0c01180b4b7dad1358 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from os import path
from zeppos_logging.app_logger import AppLogger
class Root:
@staticmethod
def find_root_of_project(current_module_filename,
root_marker_filename_list=[".root", "manage.py", "Pipfile"]):
AppLogger.logger.debug(f"current_module_filename: {current_module_filename}")
AppLogger.logger.debug(f"root_marker_filename_list: {root_marker_filename_list}")
for root_marker_filename in root_marker_filename_list:
root_path = Root._get_root_directory_using_root_marker_file(
directory=Root.get_path_object_of_full_file_name(
current_module_filename
),
root_marker_filename=root_marker_filename,
loop_counter=1
)
if root_path:
AppLogger.logger.debug(f"root_path: {root_path}")
return root_path
AppLogger.logger.debug(f"root_path: None")
return None
@staticmethod
def get_real_path(partial_full_filename):
return path.realpath(partial_full_filename)
@staticmethod
def get_path_object_of_full_file_name(full_file_name):
return \
Path(
Root.get_real_path(
full_file_name
)
)
@staticmethod
def _get_root_directory_using_root_marker_file(directory, root_marker_filename, loop_counter):
try:
while True:
full_file_name_list = list(directory.glob(root_marker_filename))
if len(full_file_name_list) > 0:
return path.dirname(full_file_name_list[0])
loop_counter += 1
if loop_counter > 50:
return None
return Root._get_root_directory_using_root_marker_file(directory.parent, root_marker_filename,
loop_counter)
return None
except:
return None
| 36.767857 | 110 | 0.606119 | 1,963 | 0.953375 | 0 | 0 | 1,929 | 0.936863 | 0 | 0 | 180 | 0.087421 |
b34ced91974433146e88095dc35d1816f2301813 | 829 | py | Python | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | weather_board_log.py | petervdb/weather_monitor | 35df2493385892969544579d4b465a5cd31daad4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import SI1132
import BME280
import sys
import time
import os
if len(sys.argv) != 2:
print("Usage: weather_board.py <i2c device file>")
sys.exit()
si1132 = SI1132.SI1132(sys.argv[1])
bme280 = BME280.BME280(sys.argv[1], 0x03, 0x02, 0x02, 0x02)
def get_altitude(pressure, seaLevel):
atmospheric = pressure / 100.0
return 44330.0 * (1.0 - pow(atmospheric/seaLevel, 0.1903))
print("======== si1132 ========")
print("UV_index:%.2f" % (si1132.readUV() / 100.0))
print("Visible:%d" % int(si1132.readVisible()))
print("IR:%d" % int(si1132.readIR()))
print("======== bme280 ========")
print("temperature:%.2f" % bme280.read_temperature())
print("humidity:%.2f" % bme280.read_humidity())
p = bme280.read_pressure()
print("pressure:%.2f" % (p / 100.0))
print("altitude:%.2f" % get_altitude(p, 1024.25))
| 28.586207 | 62 | 0.652593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.252111 |
b34e77f588ce69279ed91eb9f75268c46fe83c1d | 444 | py | Python | UTrackGUI/widgets/video_widget.py | uetke/UTrack | efab70bf2e1dddf76e1b7e3a0efbdd611ea856de | [
"MIT"
] | null | null | null | UTrackGUI/widgets/video_widget.py | uetke/UTrack | efab70bf2e1dddf76e1b7e3a0efbdd611ea856de | [
"MIT"
] | null | null | null | UTrackGUI/widgets/video_widget.py | uetke/UTrack | efab70bf2e1dddf76e1b7e3a0efbdd611ea856de | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QWidget, QHBoxLayout
from pyqtgraph import GraphicsLayoutWidget
import pyqtgraph as pg
class VideoWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.layout = QHBoxLayout(self)
# Settings for the image
self.imv = pg.ImageView()
# Add everything to the widget
self.layout.addWidget(self.imv)
self.setLayout(self.layout) | 29.6 | 48 | 0.693694 | 328 | 0.738739 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.121622 |
b34fbde2e6b2d38c2db36ca1c3b839a7216bdccf | 1,788 | py | Python | tablas.py | yakairi/Tienda.py | afc7c5734833e2974c0a6b9aceab1b7097db9add | [
"MIT"
] | null | null | null | tablas.py | yakairi/Tienda.py | afc7c5734833e2974c0a6b9aceab1b7097db9add | [
"MIT"
] | null | null | null | tablas.py | yakairi/Tienda.py | afc7c5734833e2974c0a6b9aceab1b7097db9add | [
"MIT"
] | null | null | null | # Tienda
CREATE TABLE `cliente` (
`idCliente` int NOT NULL AUTO_INCREMENT,
`nombre` varchar(45) DEFAULT NULL,
`apellido` varchar(45) DEFAULT NULL,
`telefono` varchar(45) DEFAULT NULL,
`email` varchar(45) DEFAULT NULL,
`ciudad` varchar(45) DEFAULT NULL,
`pais` varchar(45) DEFAULT NULL,
PRIMARY KEY (`idCliente`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `departamento` (
`idDepartamento` INT NOT NULL AUTO_INCREMENT,
`nombre` VARCHAR(45) NULL,
PRIMARY KEY (`idDepartamento`)
)ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `empleado` (
`idEmpleado` int NOT NULL AUTO_INCREMENT,
`nombre` varchar(45) DEFAULT NULL,
`apellido` varchar(45) DEFAULT NULL,
`fechaIngreso` date DEFAULT NULL,
`fechaNacimiento` date DEFAULT NULL,
`sexo` ENUM('hombre', 'mujer') DEFAULT NULL,
`email` varchar(45) DEFAULT NULL,
`telefono` varchar(45) DEFAULT NULL,
`salario` decimal(10,2) DEFAULT NULL,
`idDepartamento` int NULL,
PRIMARY KEY (`idEmpleado`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `categoria` (
`idCategoria` int NOT NULL AUTO_INCREMENT,
`nombre` varchar(45) DEFAULT NULL,
`descripcion` varchar(45) DEFAULT NULL,
PRIMARY KEY (`idCategoria`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `factura` (
`idFactura` int NOT NULL AUTO_INCREMENT,
`fecha` datetime DEFAULT NULL,
`idCliente` int DEFAULT NULL,
`idEmpleado` int DEFAULT NULL,
PRIMARY KEY (`idFactura`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `detalle_factura` (
`idDetalle` int NOT NULL AUTO_INCREMENT,
`idFactura` int DEFAULT NULL,
`idProducto` int DEFAULT NULL,
`precioUnitario` decimal(10,2) DEFAULT NULL,
`cantidad` int DEFAULT NULL,
PRIMARY KEY (`idDetalle`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
| 29.311475 | 47 | 0.730984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.276846 |
b350585827e6f65d1b53d617dcc78e85b78cd6c3 | 398 | py | Python | Python POO/Getters e Setters/exemplo02/pessoa.py | luccasocastro/Curso-Python | 7ad2b980bb2f95f833811291273d6ca1beb0fe77 | [
"MIT"
] | null | null | null | Python POO/Getters e Setters/exemplo02/pessoa.py | luccasocastro/Curso-Python | 7ad2b980bb2f95f833811291273d6ca1beb0fe77 | [
"MIT"
] | null | null | null | Python POO/Getters e Setters/exemplo02/pessoa.py | luccasocastro/Curso-Python | 7ad2b980bb2f95f833811291273d6ca1beb0fe77 | [
"MIT"
] | null | null | null | class Pessoa: #substantivo
def __init__(self, nome: str, idade: int) -> None:
self.nome = nome #substantivo
self.idade = idade #substantivo
def dirigir(self, veiculo: str) -> None: #verbos
print(f'Dirigindo um(a) {veiculo}')
def cantar(self) -> None: #verbos
print('lalalala')
def apresentar_idade(self) -> int: #verbos
return self.idade
| 26.533333 | 54 | 0.61809 | 396 | 0.994975 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.238693 |
b3505f341d67a1c5794fe4b0bc6c75cbc2db2874 | 8,184 | py | Python | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 69 | 2015-05-05T14:09:25.000Z | 2022-02-15T06:13:04.000Z | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 457 | 2015-05-01T22:07:45.000Z | 2022-03-31T02:19:10.000Z | tpdatasrc/tpgamefiles/scr/tpModifiers/duelist.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | 25 | 2016-02-04T21:19:53.000Z | 2021-11-15T23:14:51.000Z | from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
import d20_action_utils
###################################################
def GetConditionName():
return "Duelist"
print "Registering " + GetConditionName()
classEnum = stat_level_duelist
preciseStrikeEnum = 2400
###################################################
#### standard callbacks - BAB and Save values
def OnGetToHitBonusBase(attachee, args, evt_obj):
classLvl = attachee.stat_level_get(classEnum)
babvalue = game.get_bab_for_class(classEnum, classLvl)
evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class"
return 0
def OnGetSaveThrowFort(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowReflex(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowWill(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def IsArmorless( obj ):
armor = obj.item_worn_at(5)
if armor != OBJ_HANDLE_NULL:
armorFlags = armor.obj_get_int(obj_f_armor_flags)
if armorFlags != ARMOR_TYPE_NONE:
return 0
shield = obj.item_worn_at(11)
if shield != OBJ_HANDLE_NULL:
return 0
return 1
def IsRangedWeapon( weap ):
weapFlags = weap.obj_get_int(obj_f_weapon_flags)
if (weapFlags & OWF_RANGED_WEAPON) == 0:
return 0
return 1
def CannyDefenseAcBonus(attachee, args, evt_obj):
if not IsArmorless(attachee):
return 0
weap = attachee.item_worn_at(3)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
weap = attachee.item_worn_at(4)
if weap == OBJ_HANDLE_NULL or IsRangedWeapon(weap):
return 0
duelistLvl = attachee.stat_level_get(classEnum)
intScore = attachee.stat_level_get(stat_intelligence)
intBonus = (intScore - 10)/2
if intBonus <= 0:
return
if duelistLvl < intBonus:
intBonus = duelistLvl
evt_obj.bonus_list.modify(intBonus , 3, 104) # Dexterity bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def ImprovedReactionInitBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 2:
return 0
bonVal = 2
if duelistLvl >= 8:
bonVal = 4
evt_obj.bonus_list.add(bonVal, 0, 137 ) # adds untyped bonus to initiative
return 0
def EnhancedMobility(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 3:
return 0
if not IsArmorless(attachee):
return 0
if evt_obj.attack_packet.get_flags() & D20CAF_AOO_MOVEMENT:
evt_obj.bonus_list.add(4, 8, 137 ) # adds +4 dodge bonus
return 0
def GraceReflexBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 4:
return 0
if not IsArmorless(attachee):
return 0
evt_obj.bonus_list.add(2, 34, 137) # Competence bonus
return 0
# def PreciseStrikeRadial(attachee, args, evt_obj):
# duelistLvl = attachee.stat_level_get(classEnum)
# if (duelistLvl < 5):
# return 0
## add radial menu action Precise Strike
# radialAction = tpdp.RadialMenuEntryPythonAction(-1, D20A_PYTHON_ACTION, preciseStrikeEnum, 0, "TAG_INTERFACE_HELP")
# radialParentId = radialAction.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Class)
# return 0
# def OnPreciseStrikeCheck(attachee, args, evt_obj):
# if (not IsUsingLightOrOneHandedPiercing(attachee)):
# evt_obj.return_val = AEC_WRONG_WEAPON_TYPE
# return 0
# tgt = evt_obj.d20a.target
# stdChk = ActionCheckTargetStdAtk(attachee, tgt)
# if (stdChk != AEC_OK):
# evt_obj.return_val = stdChk
# return 0
# def OnPreciseStrikePerform(attachee, args, evt_obj):
# print "I performed!"
# return 0
preciseStrikeString = "Precise Strike"
def PreciseStrikeDamageBonus(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 5:
return 0
# check if attacking with one weapon and without a shield
if (attachee.item_worn_at(4) != OBJ_HANDLE_NULL and attachee.item_worn_at(3) != OBJ_HANDLE_NULL) or attachee.item_worn_at(11) != OBJ_HANDLE_NULL:
return 0
# check if light or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
tgt = evt_obj.attack_packet.target
if tgt == OBJ_HANDLE_NULL: # shouldn't happen but better be safe
return 0
if tgt.d20_query(Q_Critter_Is_Immune_Critical_Hits):
return 0
damage_dice = dice_new('1d6')
if duelistLvl >= 10:
damage_dice.number = 2
evt_obj.damage_packet.add_dice(damage_dice, -1, 127 )
return 0
def ElaborateParry(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 7:
return 0
if not attachee.d20_query(Q_FightingDefensively): # this also covers Total Defense
return 0
evt_obj.bonus_list.add(duelistLvl , 8, 137) # Dodge bonus, ~Class~[TAG_LEVEL_BONUSES]
return 0
def IsUsingLightOrOneHandedPiercing( obj ):
weap = obj.item_worn_at(3)
offhand = obj.item_worn_at(4)
if weap == OBJ_HANDLE_NULL and offhand == OBJ_HANDLE_NULL:
return 0
if weap == OBJ_HANDLE_NULL:
weap = offhand
offhand = OBJ_HANDLE_NULL
if IsWeaponLightOrOneHandedPiercing(obj, weap):
return 1
# check the offhand
if offhand != OBJ_HANDLE_NULL:
if IsWeaponLightOrOneHandedPiercing(obj, offhand):
return 1
return 0
def IsWeaponLightOrOneHandedPiercing( obj, weap):
# truth table
# nor. | enlarged | return
# 0 x 1 assume un-enlarged state
# 1 0 1 shouldn't be possible... unless it's actually reduce person (I don't really care about that)
# 1 1 is_piercing
# 1 2 is_piercing
# 2 x 0
# 3 x 0
normalWieldType = obj.get_wield_type(weap, 1) # "normal" means weapon is not enlarged
if normalWieldType >= 2: # two handed or unwieldable
return 0
if normalWieldType == 0:
return 1
# otherwise if the weapon is also enlarged;
wieldType = obj.get_wield_type(weap, 0)
if wieldType == 0:
return 1
# weapon is not light, but is one handed - check if piercing
attackType = weap.obj_get_int(obj_f_weapon_attacktype)
if attackType == D20DT_PIERCING: # should be strictly piercing from what I understand (supposed to be rapier-like)
return 1
return 0
def DuelistDeflectArrows(attachee, args, evt_obj):
duelistLvl = attachee.stat_level_get(classEnum)
if duelistLvl < 9:
return 0
offendingWeapon = evt_obj.attack_packet.get_weapon_used()
if offendingWeapon == OBJ_HANDLE_NULL:
return 0
if not (evt_obj.attack_packet.get_flags() & D20CAF_RANGED):
return 0
# check if attacker visible
attacker = evt_obj.attack_packet.attacker
if attacker == OBJ_HANDLE_NULL:
return 0
if attacker.d20_query(Q_Critter_Is_Invisible) and not attachee.d20_query(Q_Critter_Can_See_Invisible):
return 0
if attachee.d20_query(Q_Critter_Is_Blinded):
return 0
# check flatfooted
if attachee.d20_query(Q_Flatfooted):
return 0
# check light weapon or one handed piercing
if not IsUsingLightOrOneHandedPiercing(attachee):
return 0
atkflags = evt_obj.attack_packet.get_flags()
atkflags |= D20CAF_DEFLECT_ARROWS
atkflags &= ~(D20CAF_HIT | D20CAF_CRITICAL)
evt_obj.attack_packet.set_flags(atkflags)
return 0
classSpecObj = PythonModifier(GetConditionName(), 0)
classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, CannyDefenseAcBonus, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, EnhancedMobility, ())
classSpecObj.AddHook(ET_OnGetAC, EK_NONE, ElaborateParry, ())
classSpecObj.AddHook(ET_OnGetInitiativeMod, EK_NONE, ImprovedReactionInitBonus, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, GraceReflexBonus, ())
classSpecObj.AddHook(ET_OnDealingDamage, EK_NONE, PreciseStrikeDamageBonus, ())
classSpecObj.AddHook(ET_OnDeflectArrows, EK_NONE, DuelistDeflectArrows, ())
| 32.347826 | 146 | 0.758187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,960 | 0.239492 |
b350a3188e35c8f2c37003d0fe783f3e0525f0d1 | 2,349 | py | Python | bot/__main__.py | ThatFarziGamer/YouNeedSnek | 64c626e81c8652fb54312dad19e6ef0a7ce6c257 | [
"MIT"
] | null | null | null | bot/__main__.py | ThatFarziGamer/YouNeedSnek | 64c626e81c8652fb54312dad19e6ef0a7ce6c257 | [
"MIT"
] | null | null | null | bot/__main__.py | ThatFarziGamer/YouNeedSnek | 64c626e81c8652fb54312dad19e6ef0a7ce6c257 | [
"MIT"
] | null | null | null | import logging
import datetime
import os
import sys
import yaml
from pathlib import Path
import discord
from discord.ext import commands
def setup_logger() -> logging.Logger:
"""Create and return the root Logger object for the bot."""
LOGDIR = Path('logs')
LOGDIR.mkdir(exist_ok=True)
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
logfile = LOGDIR / f'{timestamp}.log'
logger = logging.getLogger('bot') # the actual logger instance
logger.setLevel(logging.DEBUG) # capture all log levels
console_log = logging.StreamHandler()
console_log.setLevel(logging.DEBUG) # log levels to be shown at the console
file_log = logging.FileHandler(logfile)
file_log.setLevel(logging.DEBUG) # log levels to be written to file
formatter = logging.Formatter('{asctime} - {name} - {levelname} - {message}', style='{')
console_log.setFormatter(formatter)
file_log.setFormatter(formatter)
logger.addHandler(console_log)
logger.addHandler(file_log)
# additionally, do some of the same configuration for the discord.py logger
logging.getLogger('discord').setLevel(logging.ERROR)
logging.getLogger('aiosqlite').setLevel(logging.ERROR)
logging.getLogger('websockets').setLevel(logging.ERROR)
return logger
logger = setup_logger()
with open("config.yml") as f:
yaml_data = yaml.full_load(f)
token = yaml_data["bot"]["token"]
prefix = yaml_data["bot"]["prefix"]
class YNBBot(commands.Bot):
"""An instance of the bot."""
def __init__(self):
super().__init__(command_prefix=prefix,
description="YNB Bot.")
async def on_ready(self):
# list of all the cogs.
cogs = [cog for cog in os.listdir("bot/cogs") if cog.endswith(".py")]
for cog in cogs:
self.load_extension("bot.cogs." + os.path.splitext(cog)[0])
logger.info(f'Running as {self.user.name} with ID: {self.user.id}')
await self.change_presence(activity=discord.Game(name="You need bear!"))
def run(self):
# running the bot.
with open("config.yml") as f:
data = yaml.full_load(f)
token = data["bot"]["token"]
super().run(token, bot=True, reconnect=True)
if __name__ == "__main__":
bot = YNBBot()
# bot.remove_command("help")
bot.run()
| 31.32 | 92 | 0.664112 | 798 | 0.339719 | 0 | 0 | 0 | 0 | 393 | 0.167305 | 663 | 0.282248 |
b3532df5397a8d5aa65b3ed84d50aae7151c36c0 | 95,382 | py | Python | sdk/python/pulumi_aws_native/emr/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/emr/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/emr/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ClusterApplication',
'ClusterAutoScalingPolicy',
'ClusterBootstrapActionConfig',
'ClusterCloudWatchAlarmDefinition',
'ClusterComputeLimits',
'ClusterConfiguration',
'ClusterEbsBlockDeviceConfig',
'ClusterEbsConfiguration',
'ClusterHadoopJarStepConfig',
'ClusterInstanceFleetConfig',
'ClusterInstanceFleetProvisioningSpecifications',
'ClusterInstanceGroupConfig',
'ClusterInstanceTypeConfig',
'ClusterJobFlowInstancesConfig',
'ClusterKerberosAttributes',
'ClusterKeyValue',
'ClusterManagedScalingPolicy',
'ClusterMetricDimension',
'ClusterOnDemandProvisioningSpecification',
'ClusterPlacementType',
'ClusterScalingAction',
'ClusterScalingConstraints',
'ClusterScalingRule',
'ClusterScalingTrigger',
'ClusterScriptBootstrapActionConfig',
'ClusterSimpleScalingPolicyConfiguration',
'ClusterSpotProvisioningSpecification',
'ClusterStepConfig',
'ClusterTag',
'ClusterVolumeSpecification',
'InstanceFleetConfigConfiguration',
'InstanceFleetConfigEbsBlockDeviceConfig',
'InstanceFleetConfigEbsConfiguration',
'InstanceFleetConfigInstanceFleetProvisioningSpecifications',
'InstanceFleetConfigInstanceTypeConfig',
'InstanceFleetConfigOnDemandProvisioningSpecification',
'InstanceFleetConfigSpotProvisioningSpecification',
'InstanceFleetConfigVolumeSpecification',
'InstanceGroupConfigAutoScalingPolicy',
'InstanceGroupConfigCloudWatchAlarmDefinition',
'InstanceGroupConfigConfiguration',
'InstanceGroupConfigEbsBlockDeviceConfig',
'InstanceGroupConfigEbsConfiguration',
'InstanceGroupConfigMetricDimension',
'InstanceGroupConfigScalingAction',
'InstanceGroupConfigScalingConstraints',
'InstanceGroupConfigScalingRule',
'InstanceGroupConfigScalingTrigger',
'InstanceGroupConfigSimpleScalingPolicyConfiguration',
'InstanceGroupConfigVolumeSpecification',
'StepHadoopJarStepConfig',
'StepKeyValue',
'StudioTag',
]
@pulumi.output_type
class ClusterApplication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalInfo":
suggest = "additional_info"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterApplication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterApplication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterApplication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_info: Optional[Any] = None,
args: Optional[Sequence[str]] = None,
name: Optional[str] = None,
version: Optional[str] = None):
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if args is not None:
pulumi.set(__self__, "args", args)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[Any]:
return pulumi.get(self, "additional_info")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class ClusterAutoScalingPolicy(dict):
def __init__(__self__, *,
constraints: 'outputs.ClusterScalingConstraints',
rules: Sequence['outputs.ClusterScalingRule']):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> 'outputs.ClusterScalingConstraints':
return pulumi.get(self, "constraints")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.ClusterScalingRule']:
return pulumi.get(self, "rules")
@pulumi.output_type
class ClusterBootstrapActionConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scriptBootstrapAction":
suggest = "script_bootstrap_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterBootstrapActionConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterBootstrapActionConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterBootstrapActionConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
script_bootstrap_action: 'outputs.ClusterScriptBootstrapActionConfig'):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "script_bootstrap_action", script_bootstrap_action)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptBootstrapAction")
def script_bootstrap_action(self) -> 'outputs.ClusterScriptBootstrapActionConfig':
return pulumi.get(self, "script_bootstrap_action")
@pulumi.output_type
class ClusterCloudWatchAlarmDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "comparisonOperator":
suggest = "comparison_operator"
elif key == "metricName":
suggest = "metric_name"
elif key == "evaluationPeriods":
suggest = "evaluation_periods"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterCloudWatchAlarmDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterCloudWatchAlarmDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterCloudWatchAlarmDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comparison_operator: str,
metric_name: str,
period: int,
threshold: float,
dimensions: Optional[Sequence['outputs.ClusterMetricDimension']] = None,
evaluation_periods: Optional[int] = None,
namespace: Optional[str] = None,
statistic: Optional[str] = None,
unit: Optional[str] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> float:
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.ClusterMetricDimension']]:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[int]:
return pulumi.get(self, "evaluation_periods")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def statistic(self) -> Optional[str]:
return pulumi.get(self, "statistic")
@property
@pulumi.getter
def unit(self) -> Optional[str]:
return pulumi.get(self, "unit")
@pulumi.output_type
class ClusterComputeLimits(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maximumCapacityUnits":
suggest = "maximum_capacity_units"
elif key == "minimumCapacityUnits":
suggest = "minimum_capacity_units"
elif key == "unitType":
suggest = "unit_type"
elif key == "maximumCoreCapacityUnits":
suggest = "maximum_core_capacity_units"
elif key == "maximumOnDemandCapacityUnits":
suggest = "maximum_on_demand_capacity_units"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterComputeLimits. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterComputeLimits.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterComputeLimits.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
maximum_capacity_units: int,
minimum_capacity_units: int,
unit_type: str,
maximum_core_capacity_units: Optional[int] = None,
maximum_on_demand_capacity_units: Optional[int] = None):
pulumi.set(__self__, "maximum_capacity_units", maximum_capacity_units)
pulumi.set(__self__, "minimum_capacity_units", minimum_capacity_units)
pulumi.set(__self__, "unit_type", unit_type)
if maximum_core_capacity_units is not None:
pulumi.set(__self__, "maximum_core_capacity_units", maximum_core_capacity_units)
if maximum_on_demand_capacity_units is not None:
pulumi.set(__self__, "maximum_on_demand_capacity_units", maximum_on_demand_capacity_units)
@property
@pulumi.getter(name="maximumCapacityUnits")
def maximum_capacity_units(self) -> int:
return pulumi.get(self, "maximum_capacity_units")
@property
@pulumi.getter(name="minimumCapacityUnits")
def minimum_capacity_units(self) -> int:
return pulumi.get(self, "minimum_capacity_units")
@property
@pulumi.getter(name="unitType")
def unit_type(self) -> str:
return pulumi.get(self, "unit_type")
@property
@pulumi.getter(name="maximumCoreCapacityUnits")
def maximum_core_capacity_units(self) -> Optional[int]:
return pulumi.get(self, "maximum_core_capacity_units")
@property
@pulumi.getter(name="maximumOnDemandCapacityUnits")
def maximum_on_demand_capacity_units(self) -> Optional[int]:
return pulumi.get(self, "maximum_on_demand_capacity_units")
@pulumi.output_type
class ClusterConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class ClusterEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.ClusterVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.ClusterVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class ClusterEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.ClusterEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.ClusterEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class ClusterHadoopJarStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mainClass":
suggest = "main_class"
elif key == "stepProperties":
suggest = "step_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterHadoopJarStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterHadoopJarStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterHadoopJarStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jar: str,
args: Optional[Sequence[str]] = None,
main_class: Optional[str] = None,
step_properties: Optional[Sequence['outputs.ClusterKeyValue']] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> str:
return pulumi.get(self, "jar")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[str]:
return pulumi.get(self, "main_class")
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[Sequence['outputs.ClusterKeyValue']]:
return pulumi.get(self, "step_properties")
@pulumi.output_type
class ClusterInstanceFleetConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceTypeConfigs":
suggest = "instance_type_configs"
elif key == "launchSpecifications":
suggest = "launch_specifications"
elif key == "targetOnDemandCapacity":
suggest = "target_on_demand_capacity"
elif key == "targetSpotCapacity":
suggest = "target_spot_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceFleetConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceFleetConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceFleetConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type_configs: Optional[Sequence['outputs.ClusterInstanceTypeConfig']] = None,
launch_specifications: Optional['outputs.ClusterInstanceFleetProvisioningSpecifications'] = None,
name: Optional[str] = None,
target_on_demand_capacity: Optional[int] = None,
target_spot_capacity: Optional[int] = None):
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[Sequence['outputs.ClusterInstanceTypeConfig']]:
return pulumi.get(self, "instance_type_configs")
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional['outputs.ClusterInstanceFleetProvisioningSpecifications']:
return pulumi.get(self, "launch_specifications")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_on_demand_capacity")
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_spot_capacity")
@pulumi.output_type
class ClusterInstanceFleetProvisioningSpecifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "onDemandSpecification":
suggest = "on_demand_specification"
elif key == "spotSpecification":
suggest = "spot_specification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceFleetProvisioningSpecifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
on_demand_specification: Optional['outputs.ClusterOnDemandProvisioningSpecification'] = None,
spot_specification: Optional['outputs.ClusterSpotProvisioningSpecification'] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional['outputs.ClusterOnDemandProvisioningSpecification']:
return pulumi.get(self, "on_demand_specification")
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional['outputs.ClusterSpotProvisioningSpecification']:
return pulumi.get(self, "spot_specification")
@pulumi.output_type
class ClusterInstanceGroupConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceCount":
suggest = "instance_count"
elif key == "instanceType":
suggest = "instance_type"
elif key == "autoScalingPolicy":
suggest = "auto_scaling_policy"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceGroupConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceGroupConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceGroupConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_count: int,
instance_type: str,
auto_scaling_policy: Optional['outputs.ClusterAutoScalingPolicy'] = None,
bid_price: Optional[str] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.ClusterEbsConfiguration'] = None,
market: Optional[str] = None,
name: Optional[str] = None):
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "instance_type", instance_type)
if auto_scaling_policy is not None:
pulumi.set(__self__, "auto_scaling_policy", auto_scaling_policy)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> int:
return pulumi.get(self, "instance_count")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="autoScalingPolicy")
def auto_scaling_policy(self) -> Optional['outputs.ClusterAutoScalingPolicy']:
return pulumi.get(self, "auto_scaling_policy")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.ClusterEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@pulumi.output_type
class ClusterInstanceTypeConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceType":
suggest = "instance_type"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "bidPriceAsPercentageOfOnDemandPrice":
suggest = "bid_price_as_percentage_of_on_demand_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
elif key == "weightedCapacity":
suggest = "weighted_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceTypeConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceTypeConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceTypeConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.ClusterEbsConfiguration'] = None,
weighted_capacity: Optional[int] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.ClusterEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
return pulumi.get(self, "weighted_capacity")
@pulumi.output_type
class ClusterJobFlowInstancesConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalMasterSecurityGroups":
suggest = "additional_master_security_groups"
elif key == "additionalSlaveSecurityGroups":
suggest = "additional_slave_security_groups"
elif key == "coreInstanceFleet":
suggest = "core_instance_fleet"
elif key == "coreInstanceGroup":
suggest = "core_instance_group"
elif key == "ec2KeyName":
suggest = "ec2_key_name"
elif key == "ec2SubnetId":
suggest = "ec2_subnet_id"
elif key == "ec2SubnetIds":
suggest = "ec2_subnet_ids"
elif key == "emrManagedMasterSecurityGroup":
suggest = "emr_managed_master_security_group"
elif key == "emrManagedSlaveSecurityGroup":
suggest = "emr_managed_slave_security_group"
elif key == "hadoopVersion":
suggest = "hadoop_version"
elif key == "keepJobFlowAliveWhenNoSteps":
suggest = "keep_job_flow_alive_when_no_steps"
elif key == "masterInstanceFleet":
suggest = "master_instance_fleet"
elif key == "masterInstanceGroup":
suggest = "master_instance_group"
elif key == "serviceAccessSecurityGroup":
suggest = "service_access_security_group"
elif key == "terminationProtected":
suggest = "termination_protected"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterJobFlowInstancesConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterJobFlowInstancesConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterJobFlowInstancesConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_master_security_groups: Optional[Sequence[str]] = None,
additional_slave_security_groups: Optional[Sequence[str]] = None,
core_instance_fleet: Optional['outputs.ClusterInstanceFleetConfig'] = None,
core_instance_group: Optional['outputs.ClusterInstanceGroupConfig'] = None,
ec2_key_name: Optional[str] = None,
ec2_subnet_id: Optional[str] = None,
ec2_subnet_ids: Optional[Sequence[str]] = None,
emr_managed_master_security_group: Optional[str] = None,
emr_managed_slave_security_group: Optional[str] = None,
hadoop_version: Optional[str] = None,
keep_job_flow_alive_when_no_steps: Optional[bool] = None,
master_instance_fleet: Optional['outputs.ClusterInstanceFleetConfig'] = None,
master_instance_group: Optional['outputs.ClusterInstanceGroupConfig'] = None,
placement: Optional['outputs.ClusterPlacementType'] = None,
service_access_security_group: Optional[str] = None,
termination_protected: Optional[bool] = None):
if additional_master_security_groups is not None:
pulumi.set(__self__, "additional_master_security_groups", additional_master_security_groups)
if additional_slave_security_groups is not None:
pulumi.set(__self__, "additional_slave_security_groups", additional_slave_security_groups)
if core_instance_fleet is not None:
pulumi.set(__self__, "core_instance_fleet", core_instance_fleet)
if core_instance_group is not None:
pulumi.set(__self__, "core_instance_group", core_instance_group)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if ec2_subnet_id is not None:
pulumi.set(__self__, "ec2_subnet_id", ec2_subnet_id)
if ec2_subnet_ids is not None:
pulumi.set(__self__, "ec2_subnet_ids", ec2_subnet_ids)
if emr_managed_master_security_group is not None:
pulumi.set(__self__, "emr_managed_master_security_group", emr_managed_master_security_group)
if emr_managed_slave_security_group is not None:
pulumi.set(__self__, "emr_managed_slave_security_group", emr_managed_slave_security_group)
if hadoop_version is not None:
pulumi.set(__self__, "hadoop_version", hadoop_version)
if keep_job_flow_alive_when_no_steps is not None:
pulumi.set(__self__, "keep_job_flow_alive_when_no_steps", keep_job_flow_alive_when_no_steps)
if master_instance_fleet is not None:
pulumi.set(__self__, "master_instance_fleet", master_instance_fleet)
if master_instance_group is not None:
pulumi.set(__self__, "master_instance_group", master_instance_group)
if placement is not None:
pulumi.set(__self__, "placement", placement)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
@property
@pulumi.getter(name="additionalMasterSecurityGroups")
def additional_master_security_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "additional_master_security_groups")
@property
@pulumi.getter(name="additionalSlaveSecurityGroups")
def additional_slave_security_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "additional_slave_security_groups")
@property
@pulumi.getter(name="coreInstanceFleet")
def core_instance_fleet(self) -> Optional['outputs.ClusterInstanceFleetConfig']:
return pulumi.get(self, "core_instance_fleet")
@property
@pulumi.getter(name="coreInstanceGroup")
def core_instance_group(self) -> Optional['outputs.ClusterInstanceGroupConfig']:
return pulumi.get(self, "core_instance_group")
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[str]:
return pulumi.get(self, "ec2_key_name")
@property
@pulumi.getter(name="ec2SubnetId")
def ec2_subnet_id(self) -> Optional[str]:
return pulumi.get(self, "ec2_subnet_id")
@property
@pulumi.getter(name="ec2SubnetIds")
def ec2_subnet_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "ec2_subnet_ids")
@property
@pulumi.getter(name="emrManagedMasterSecurityGroup")
def emr_managed_master_security_group(self) -> Optional[str]:
return pulumi.get(self, "emr_managed_master_security_group")
@property
@pulumi.getter(name="emrManagedSlaveSecurityGroup")
def emr_managed_slave_security_group(self) -> Optional[str]:
return pulumi.get(self, "emr_managed_slave_security_group")
@property
@pulumi.getter(name="hadoopVersion")
def hadoop_version(self) -> Optional[str]:
return pulumi.get(self, "hadoop_version")
@property
@pulumi.getter(name="keepJobFlowAliveWhenNoSteps")
def keep_job_flow_alive_when_no_steps(self) -> Optional[bool]:
return pulumi.get(self, "keep_job_flow_alive_when_no_steps")
@property
@pulumi.getter(name="masterInstanceFleet")
def master_instance_fleet(self) -> Optional['outputs.ClusterInstanceFleetConfig']:
return pulumi.get(self, "master_instance_fleet")
@property
@pulumi.getter(name="masterInstanceGroup")
def master_instance_group(self) -> Optional['outputs.ClusterInstanceGroupConfig']:
return pulumi.get(self, "master_instance_group")
@property
@pulumi.getter
def placement(self) -> Optional['outputs.ClusterPlacementType']:
return pulumi.get(self, "placement")
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[str]:
return pulumi.get(self, "service_access_security_group")
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[bool]:
return pulumi.get(self, "termination_protected")
@pulumi.output_type
class ClusterKerberosAttributes(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kdcAdminPassword":
suggest = "kdc_admin_password"
elif key == "aDDomainJoinPassword":
suggest = "a_d_domain_join_password"
elif key == "aDDomainJoinUser":
suggest = "a_d_domain_join_user"
elif key == "crossRealmTrustPrincipalPassword":
suggest = "cross_realm_trust_principal_password"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterKerberosAttributes. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterKerberosAttributes.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterKerberosAttributes.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kdc_admin_password: str,
realm: str,
a_d_domain_join_password: Optional[str] = None,
a_d_domain_join_user: Optional[str] = None,
cross_realm_trust_principal_password: Optional[str] = None):
pulumi.set(__self__, "kdc_admin_password", kdc_admin_password)
pulumi.set(__self__, "realm", realm)
if a_d_domain_join_password is not None:
pulumi.set(__self__, "a_d_domain_join_password", a_d_domain_join_password)
if a_d_domain_join_user is not None:
pulumi.set(__self__, "a_d_domain_join_user", a_d_domain_join_user)
if cross_realm_trust_principal_password is not None:
pulumi.set(__self__, "cross_realm_trust_principal_password", cross_realm_trust_principal_password)
@property
@pulumi.getter(name="kdcAdminPassword")
def kdc_admin_password(self) -> str:
return pulumi.get(self, "kdc_admin_password")
@property
@pulumi.getter
def realm(self) -> str:
return pulumi.get(self, "realm")
@property
@pulumi.getter(name="aDDomainJoinPassword")
def a_d_domain_join_password(self) -> Optional[str]:
return pulumi.get(self, "a_d_domain_join_password")
@property
@pulumi.getter(name="aDDomainJoinUser")
def a_d_domain_join_user(self) -> Optional[str]:
return pulumi.get(self, "a_d_domain_join_user")
@property
@pulumi.getter(name="crossRealmTrustPrincipalPassword")
def cross_realm_trust_principal_password(self) -> Optional[str]:
return pulumi.get(self, "cross_realm_trust_principal_password")
@pulumi.output_type
class ClusterKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterManagedScalingPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeLimits":
suggest = "compute_limits"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterManagedScalingPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterManagedScalingPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterManagedScalingPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_limits: Optional['outputs.ClusterComputeLimits'] = None):
if compute_limits is not None:
pulumi.set(__self__, "compute_limits", compute_limits)
@property
@pulumi.getter(name="computeLimits")
def compute_limits(self) -> Optional['outputs.ClusterComputeLimits']:
return pulumi.get(self, "compute_limits")
@pulumi.output_type
class ClusterMetricDimension(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterOnDemandProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationStrategy":
suggest = "allocation_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOnDemandProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOnDemandProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOnDemandProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_strategy: str):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
return pulumi.get(self, "allocation_strategy")
@pulumi.output_type
class ClusterPlacementType(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterPlacementType. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterPlacementType.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterPlacementType.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
availability_zone: str):
pulumi.set(__self__, "availability_zone", availability_zone)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
return pulumi.get(self, "availability_zone")
@pulumi.output_type
class ClusterScalingAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "simpleScalingPolicyConfiguration":
suggest = "simple_scaling_policy_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
simple_scaling_policy_configuration: 'outputs.ClusterSimpleScalingPolicyConfiguration',
market: Optional[str] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> 'outputs.ClusterSimpleScalingPolicyConfiguration':
return pulumi.get(self, "simple_scaling_policy_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@pulumi.output_type
class ClusterScalingConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxCapacity":
suggest = "max_capacity"
elif key == "minCapacity":
suggest = "min_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_capacity: int,
min_capacity: int):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> int:
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> int:
return pulumi.get(self, "min_capacity")
@pulumi.output_type
class ClusterScalingRule(dict):
def __init__(__self__, *,
action: 'outputs.ClusterScalingAction',
name: str,
trigger: 'outputs.ClusterScalingTrigger',
description: Optional[str] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> 'outputs.ClusterScalingAction':
return pulumi.get(self, "action")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def trigger(self) -> 'outputs.ClusterScalingTrigger':
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class ClusterScalingTrigger(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchAlarmDefinition":
suggest = "cloud_watch_alarm_definition"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingTrigger. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingTrigger.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingTrigger.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_alarm_definition: 'outputs.ClusterCloudWatchAlarmDefinition'):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> 'outputs.ClusterCloudWatchAlarmDefinition':
return pulumi.get(self, "cloud_watch_alarm_definition")
@pulumi.output_type
class ClusterScriptBootstrapActionConfig(dict):
def __init__(__self__, *,
path: str,
args: Optional[Sequence[str]] = None):
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@pulumi.output_type
class ClusterSimpleScalingPolicyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scalingAdjustment":
suggest = "scaling_adjustment"
elif key == "adjustmentType":
suggest = "adjustment_type"
elif key == "coolDown":
suggest = "cool_down"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterSimpleScalingPolicyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterSimpleScalingPolicyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterSimpleScalingPolicyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scaling_adjustment: int,
adjustment_type: Optional[str] = None,
cool_down: Optional[int] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> int:
return pulumi.get(self, "scaling_adjustment")
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[str]:
return pulumi.get(self, "adjustment_type")
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[int]:
return pulumi.get(self, "cool_down")
@pulumi.output_type
class ClusterSpotProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "timeoutAction":
suggest = "timeout_action"
elif key == "timeoutDurationMinutes":
suggest = "timeout_duration_minutes"
elif key == "allocationStrategy":
suggest = "allocation_strategy"
elif key == "blockDurationMinutes":
suggest = "block_duration_minutes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterSpotProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterSpotProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterSpotProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
timeout_action: str,
timeout_duration_minutes: int,
allocation_strategy: Optional[str] = None,
block_duration_minutes: Optional[int] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[str]:
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
return pulumi.get(self, "block_duration_minutes")
@pulumi.output_type
class ClusterStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hadoopJarStep":
suggest = "hadoop_jar_step"
elif key == "actionOnFailure":
suggest = "action_on_failure"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
hadoop_jar_step: 'outputs.ClusterHadoopJarStepConfig',
name: str,
action_on_failure: Optional[str] = None):
pulumi.set(__self__, "hadoop_jar_step", hadoop_jar_step)
pulumi.set(__self__, "name", name)
if action_on_failure is not None:
pulumi.set(__self__, "action_on_failure", action_on_failure)
@property
@pulumi.getter(name="hadoopJarStep")
def hadoop_jar_step(self) -> 'outputs.ClusterHadoopJarStepConfig':
return pulumi.get(self, "hadoop_jar_step")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="actionOnFailure")
def action_on_failure(self) -> Optional[str]:
return pulumi.get(self, "action_on_failure")
@pulumi.output_type
class ClusterTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceFleetConfigConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class InstanceFleetConfigEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.InstanceFleetConfigVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.InstanceFleetConfigVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class InstanceFleetConfigEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class InstanceFleetConfigInstanceFleetProvisioningSpecifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "onDemandSpecification":
suggest = "on_demand_specification"
elif key == "spotSpecification":
suggest = "spot_specification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceFleetProvisioningSpecifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
on_demand_specification: Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification'] = None,
spot_specification: Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification'] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification']:
return pulumi.get(self, "on_demand_specification")
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification']:
return pulumi.get(self, "spot_specification")
@pulumi.output_type
class InstanceFleetConfigInstanceTypeConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceType":
suggest = "instance_type"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "bidPriceAsPercentageOfOnDemandPrice":
suggest = "bid_price_as_percentage_of_on_demand_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
elif key == "weightedCapacity":
suggest = "weighted_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceTypeConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.InstanceFleetConfigEbsConfiguration'] = None,
weighted_capacity: Optional[int] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.InstanceFleetConfigEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
return pulumi.get(self, "weighted_capacity")
@pulumi.output_type
class InstanceFleetConfigOnDemandProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationStrategy":
suggest = "allocation_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigOnDemandProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_strategy: str):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
return pulumi.get(self, "allocation_strategy")
@pulumi.output_type
class InstanceFleetConfigSpotProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "timeoutAction":
suggest = "timeout_action"
elif key == "timeoutDurationMinutes":
suggest = "timeout_duration_minutes"
elif key == "allocationStrategy":
suggest = "allocation_strategy"
elif key == "blockDurationMinutes":
suggest = "block_duration_minutes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigSpotProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
timeout_action: str,
timeout_duration_minutes: int,
allocation_strategy: Optional[str] = None,
block_duration_minutes: Optional[int] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[str]:
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
return pulumi.get(self, "block_duration_minutes")
@pulumi.output_type
class InstanceFleetConfigVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceGroupConfigAutoScalingPolicy(dict):
def __init__(__self__, *,
constraints: 'outputs.InstanceGroupConfigScalingConstraints',
rules: Sequence['outputs.InstanceGroupConfigScalingRule']):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> 'outputs.InstanceGroupConfigScalingConstraints':
return pulumi.get(self, "constraints")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.InstanceGroupConfigScalingRule']:
return pulumi.get(self, "rules")
@pulumi.output_type
class InstanceGroupConfigCloudWatchAlarmDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "comparisonOperator":
suggest = "comparison_operator"
elif key == "metricName":
suggest = "metric_name"
elif key == "evaluationPeriods":
suggest = "evaluation_periods"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigCloudWatchAlarmDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comparison_operator: str,
metric_name: str,
period: int,
threshold: float,
dimensions: Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']] = None,
evaluation_periods: Optional[int] = None,
namespace: Optional[str] = None,
statistic: Optional[str] = None,
unit: Optional[str] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> float:
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']]:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[int]:
return pulumi.get(self, "evaluation_periods")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def statistic(self) -> Optional[str]:
return pulumi.get(self, "statistic")
@property
@pulumi.getter
def unit(self) -> Optional[str]:
return pulumi.get(self, "unit")
@pulumi.output_type
class InstanceGroupConfigConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.InstanceGroupConfigConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceGroupConfigConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class InstanceGroupConfigEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.InstanceGroupConfigVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.InstanceGroupConfigVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class InstanceGroupConfigEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.InstanceGroupConfigEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.InstanceGroupConfigEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class InstanceGroupConfigMetricDimension(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class InstanceGroupConfigScalingAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "simpleScalingPolicyConfiguration":
suggest = "simple_scaling_policy_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
simple_scaling_policy_configuration: 'outputs.InstanceGroupConfigSimpleScalingPolicyConfiguration',
market: Optional[str] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> 'outputs.InstanceGroupConfigSimpleScalingPolicyConfiguration':
return pulumi.get(self, "simple_scaling_policy_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@pulumi.output_type
class InstanceGroupConfigScalingConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxCapacity":
suggest = "max_capacity"
elif key == "minCapacity":
suggest = "min_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_capacity: int,
min_capacity: int):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> int:
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> int:
return pulumi.get(self, "min_capacity")
@pulumi.output_type
class InstanceGroupConfigScalingRule(dict):
def __init__(__self__, *,
action: 'outputs.InstanceGroupConfigScalingAction',
name: str,
trigger: 'outputs.InstanceGroupConfigScalingTrigger',
description: Optional[str] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> 'outputs.InstanceGroupConfigScalingAction':
return pulumi.get(self, "action")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def trigger(self) -> 'outputs.InstanceGroupConfigScalingTrigger':
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class InstanceGroupConfigScalingTrigger(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchAlarmDefinition":
suggest = "cloud_watch_alarm_definition"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingTrigger. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingTrigger.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingTrigger.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_alarm_definition: 'outputs.InstanceGroupConfigCloudWatchAlarmDefinition'):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> 'outputs.InstanceGroupConfigCloudWatchAlarmDefinition':
return pulumi.get(self, "cloud_watch_alarm_definition")
@pulumi.output_type
class InstanceGroupConfigSimpleScalingPolicyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scalingAdjustment":
suggest = "scaling_adjustment"
elif key == "adjustmentType":
suggest = "adjustment_type"
elif key == "coolDown":
suggest = "cool_down"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigSimpleScalingPolicyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigSimpleScalingPolicyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigSimpleScalingPolicyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scaling_adjustment: int,
adjustment_type: Optional[str] = None,
cool_down: Optional[int] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> int:
return pulumi.get(self, "scaling_adjustment")
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[str]:
return pulumi.get(self, "adjustment_type")
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[int]:
return pulumi.get(self, "cool_down")
@pulumi.output_type
class InstanceGroupConfigVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class StepHadoopJarStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mainClass":
suggest = "main_class"
elif key == "stepProperties":
suggest = "step_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StepHadoopJarStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StepHadoopJarStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StepHadoopJarStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jar: str,
args: Optional[Sequence[str]] = None,
main_class: Optional[str] = None,
step_properties: Optional[Sequence['outputs.StepKeyValue']] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> str:
return pulumi.get(self, "jar")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[str]:
return pulumi.get(self, "main_class")
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[Sequence['outputs.StepKeyValue']]:
return pulumi.get(self, "step_properties")
@pulumi.output_type
class StepKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class StudioTag(dict):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
| 37.434066 | 268 | 0.664685 | 91,774 | 0.962173 | 0 | 0 | 92,834 | 0.973286 | 0 | 0 | 24,260 | 0.254346 |
b3539ffc66bede449c55231a9b0e75f5780ee2ee | 128 | py | Python | 434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | 2 | 2021-12-05T14:29:06.000Z | 2022-01-01T05:46:13.000Z | 434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | null | null | null | 434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py | hyeseonko/LeetCode | 48dfc93f1638e13041d8ce1420517a886abbdc77 | [
"MIT"
] | null | null | null | class Solution:
def countSegments(self, s: str) -> int:
return sum([1 for letter in s.strip().split(" ") if letter]) | 42.666667 | 68 | 0.625 | 128 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.023438 |
b354da045a8c2383221fb2caac0d79e36dd3ab7f | 25,921 | py | Python | riskfolio/RiskFunctions.py | xiaolongguo/Riskfolio-Lib | 4e74c4f27a48ced7dcc0ab4a9e96c922cd54f0b4 | [
"BSD-3-Clause"
] | 2 | 2022-02-07T11:16:46.000Z | 2022-02-23T06:57:41.000Z | riskfolio/RiskFunctions.py | xiaolongguo/Riskfolio-Lib | 4e74c4f27a48ced7dcc0ab4a9e96c922cd54f0b4 | [
"BSD-3-Clause"
] | null | null | null | riskfolio/RiskFunctions.py | xiaolongguo/Riskfolio-Lib | 4e74c4f27a48ced7dcc0ab4a9e96c922cd54f0b4 | [
"BSD-3-Clause"
] | 1 | 2022-02-07T11:38:34.000Z | 2022-02-07T11:38:34.000Z | import numpy as np
from scipy.optimize import minimize
from scipy.optimize import Bounds
__all__ = [
"MAD",
"SemiDeviation",
"VaR_Hist",
"CVaR_Hist",
"WR",
"LPM",
"Entropic_RM",
"EVaR_Hist",
"MaxAbsDD",
"AvgAbsDD",
"ConAbsDD",
"MaxRelDD",
"AvgRelDD",
"ConRelDD",
"Sharpe_Risk",
"Sharpe",
"Risk_Contribution",
]
def MAD(X):
r"""
Calculates the Mean Absolute Deviation (MAD) of a returns series.
.. math::
\text{MAD}(X) = \frac{1}{T}\sum_{t=1}^{T}
| X_{t} - \mathbb{E}(X_{t}) |
Parameters
----------
X : 1d-array
a returns series, must have Tx1 size.
Returns
-------
value : float
MAD of a returns series.
Raises
------
ValueError
When the value cannot be calculated.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.absolute(a - np.mean(a, axis=0)), axis=0)
value = value.item()
return value
def SemiDeviation(X):
r"""
Calculates the Semi Deviation of a returns series.
.. math::
\text{SemiDev}(X) = \left [ \frac{1}{T-1}\sum_{t=1}^{T}
(X_{t} - \mathbb{E}(X_{t}))^2 \right ]^{1/2}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Semi Deviation of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
mu = np.mean(a, axis=0)
value = mu - a
n = value.shape[0] - 1
value = np.sum(np.power(value[np.where(value <= mu)], 2)) / n
value = np.power(value, 0.5).item()
return value
def VaR_Hist(X, alpha=0.01):
r"""
Calculates the Value at Risk (VaR) of a returns series.
.. math::
\text{VaR}_{\alpha}(X) = -\inf_{t \in (0,T)} \left \{ X_{t} \in
\mathbb{R}: F_{X}(X_{t})>\alpha \right \}
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of VaR. The default is 0.01.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
VaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
value = -sorted_a[index]
value = value.item()
return value
def CVaR_Hist(X, alpha=0.01):
r"""
Calculates the Conditional Value at Risk (CVaR) of a returns series.
.. math::
\text{CVaR}_{\alpha}(X) = \text{VaR}_{\alpha}(X) +
\frac{1}{\alpha T} \sum_{t=1}^{T} \max(-X_{t} -
\text{VaR}_{\alpha}(X), 0)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of CVaR. The default is 0.01.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CVaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
index = int(np.ceil(alpha * len(sorted_a)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_a[i] - sorted_a[index]
value = -sorted_a[index] - sum_var / (alpha * len(sorted_a))
value = value.item()
return value
def WR(X):
r"""
Calculates the Worst Realization (WR) or Worst Scenario of a returns series.
.. math::
\text{WR}(X) = \max(-X)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
WR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
sorted_a = np.sort(a, axis=0)
value = -sorted_a[0]
value = value.item()
return value
def LPM(X, MAR=0, p=1):
r"""
Calculates the p-th Lower Partial Moment of a returns series.
.. math::
\text{LPM}(X, \text{MAR}, p) = \left [ \frac{1}{T}\sum_{t=1}^{T}
\max(\text{MAR} - X_{t}, 0) \right ]^{\frac{1}{p}}
Where:
:math:`\text{MAR}` is the minimum acceptable return.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
MAR : float, optional
Minimum acceptable return. The default is 0.
p : float, optional
order of the :math:`\text{LPM}`. The default is 1.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
p-th Lower Partial Moment of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = MAR - a
if p > 1:
n = value.shape[0] - 1
else:
n = value.shape[0]
value = np.sum(np.power(value[np.where(value > 0)], p)) / n
value = np.power(value, 1 / p).item()
return value
def Entropic_RM(X, theta=1):
r"""
Calculates the Entropic Risk Measure (ERM) of a returns series.
.. math::
\text{ERM}(X) = \theta \log\left(\mathbb{E}
[e^{-\frac{1}{\theta} X}]\right)
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
theta : float, optional
Risk aversion parameter, must be greater than zero. The default is 1.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ERM of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / theta * np.array(a)), axis=0)
value = theta * (np.log(value))
value = value.item()
return value
def _Entropic_RM(X, theta=1, alpha=0.01):
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
value = np.mean(np.exp(-1 / theta * np.array(a)), axis=0)
value = theta * (np.log(value) - np.log(alpha))
value = value.item()
return value
def EVaR_Hist(X, alpha=0.01):
r"""
Calculates the Entropic Value at Risk (EVaR) of a returns series.
.. math::
\text{EVaR}_{\alpha}(X) = \inf_{z>0} \left \{ z^{-1}
\ln \left (\frac{M_X(z)}{\alpha} \right ) \right \}
Where:
:math:`M_X(z)` is the moment generating function of X.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
alpha : float, optional
Significance level of EVaR. The default is 0.01.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
EVaR of a returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
bnd = Bounds([0.00000000001], [np.inf])
result = minimize(_Entropic_RM, [0.01], args=(X, alpha), bounds=bnd)
t = result.x
t = t.item()
value = _Entropic_RM(t, X, alpha)
return value
def MaxAbsDD(X):
r"""
Calculates the Maximum Drawdown (MDD) of a returns series
using uncumpound cumulated returns.
.. math::
\text{MDD}(X) = \max_{j \in (0,T)} \left [\max_{t \in (0,T)}
\left ( \sum_{i=0}^{t}X_{i} - \sum_{i=0}^{j}X_{i} \right ) \right ]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
MDD of a uncumpound cumulated returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > value:
value = DD
value = value.item()
return value
def AvgAbsDD(X):
r"""
Calculates the Average Drawdown (ADD) of a returns series
using uncumpound cumulated returns.
.. math::
\text{ADD}(X) = \frac{1}{T}\sum_{i=0}^{T}\max_{t \in (0,T)}
\left ( \sum_{i=0}^{t}X_{i} - \sum_{i=0}^{j}X_{i} \right )
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ADD of a uncumpound cumulated returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = peak - i
if DD > 0:
value += DD
n += 1
if n == 0:
value = 0
else:
value = value / n
value = value.item()
return value
def ConAbsDD(X, alpha=0.01):
r"""
Calculates the Conditional Drawdown at Risk (CDaR) of a returns series
using uncumpound cumulated returns.
.. math::
\text{CDaR}_{\alpha}(X) = \text{DaR}_{\alpha}(X) + \frac{1}{\alpha T}
\sum_{i=0}^{T} \max \left [ \max_{t \in (0,T)}
\left ( \sum_{i=0}^{t}X_{i} - \sum_{i=0}^{j}X_{i} \right )
- \text{DaR}_{\alpha}(X), 0 \right ]
Where:
:math:`\text{DaR}_{\alpha}` is the Drawdown at Risk of an uncumpound
cumulated return series :math:`X`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of CDaR. The default is 0.01.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CDaR of a uncumpound cumulated returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = np.insert(np.array(a), 0, 1, axis=0)
NAV = np.cumsum(np.array(prices), axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i))
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_DD[i] - sorted_DD[index]
value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))
value = value.item()
return value
def MaxRelDD(X):
r"""
Calculates the Maximum Drawdown (MDD) of a returns series
using cumpound cumulated returns.
.. math::
\text{MDD}(X) = \max_{j \in (0,T)}\left[\max_{t \in (0,T)}
\left ( \prod_{i=0}^{t}(1+X_{i}) - \prod_{i=0}^{j}(1+X_{i}) \right ) \right]
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
MDD of a cumpound cumulated returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
value = 0
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD = (peak - i) / peak
if DD > value:
value = DD
value = value.item()
return value
def AvgRelDD(X):
r"""
Calculates the Average Drawdown (ADD) of a returns series
using cumpound acumulated returns.
.. math::
\text{ADD}(X) = \frac{1}{T}\sum_{i=0}^{T}\max_{t \in (0,T)}
\left ( \prod_{i=0}^{t}(1+X_{i}) - \prod_{i=0}^{j}(1+X_{i}) \right )
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
ADD of a cumpound acumulated returns.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("returns must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
value = 0
peak = -99999
n = 0
for i in NAV:
if i > peak:
peak = i
DD = (peak - i) / peak
if DD > 0:
value += DD
n += 1
if n == 0:
value = 0
else:
value = value / n
value = value.item()
return value
def ConRelDD(X, alpha=0.01):
r"""
Calculates the Conditional Drawdown at Risk (CDaR) of a returns series
using cumpound cumulated returns.
.. math::
\text{CDaR}_{\alpha}(X) = \text{DaR}_{\alpha}(X) + \frac{1}{\alpha T}
\sum_{i=0}^{T} \max \left [ \max_{t \in (0,T)}
\left ( \prod_{i=0}^{t}(1+X_{i}) - \prod_{i=0}^{j}(1+X_{i}) \right )
- \text{DaR}_{\alpha}(X), 0 \right ]
Where:
:math:`\text{DaR}_{\alpha}` is the Drawdown at Risk of a cumpound
acumulated return series :math:`X`.
Parameters
----------
X : 1d-array
Returns series, must have Tx1 size..
alpha : float, optional
Significance level of CDaR. The default is 0.01.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
CDaR of a cumpound cumulated returns series.
"""
a = np.array(X, ndmin=2)
if a.shape[0] == 1 and a.shape[1] > 1:
a = a.T
if a.shape[0] > 1 and a.shape[1] > 1:
raise ValueError("X must have Tx1 size")
prices = 1 + np.insert(np.array(a), 0, 0, axis=0)
NAV = np.cumprod(prices, axis=0)
DD = []
peak = -99999
for i in NAV:
if i > peak:
peak = i
DD.append(-(peak - i) / peak)
del DD[0]
sorted_DD = np.sort(np.array(DD), axis=0)
index = int(np.ceil(alpha * len(sorted_DD)) - 1)
sum_var = 0
for i in range(0, index + 1):
sum_var = sum_var + sorted_DD[i] - sorted_DD[index]
value = -sorted_DD[index] - sum_var / (alpha * len(sorted_DD))
value = value.item()
return value
###############################################################################
# Risk Adjusted Return Ratios
###############################################################################
def Sharpe_Risk(w, cov=None, returns=None, rm="MV", rf=0, alpha=0.01):
r"""
Calculate the risk measure available on the Sharpe function.
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded returns.
rf : float, optional
Risk free rate. The default is 0.
**kwargs : dict
Other arguments that depends on the risk measure.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk measure of the portfolio.
"""
w_ = np.array(w, ndmin=2)
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
a = returns_ @ w_
if rm == "MV":
risk = w_.T @ cov_ @ w_
risk = np.sqrt(risk.item())
elif rm == "MAD":
risk = MAD(a)
elif rm == "MSV":
risk = SemiDeviation(a)
elif rm == "FLPM":
risk = LPM(a, MAR=rf, p=1)
elif rm == "SLPM":
risk = LPM(a, MAR=rf, p=2)
elif rm == "VaR":
risk = VaR_Hist(a, alpha=alpha)
elif rm == "CVaR":
risk = CVaR_Hist(a, alpha=alpha)
elif rm == "WR":
risk = WR(a)
elif rm == "MDD":
risk = MaxAbsDD(a)
elif rm == "ADD":
risk = AvgAbsDD(a)
elif rm == "CDaR":
risk = ConAbsDD(a, alpha=alpha)
value = risk
return value
def Sharpe(w, mu, cov=None, returns=None, rm="MV", rf=0, alpha=0.01):
r"""
Calculate the Risk Adjusted Return Ratio from a portfolio returns series.
.. math::
\text{Sharpe}(X) = \frac{\mathbb{E}(X) -
r_{f}}{\phi(X)}
Where:
:math:`X` is the vector of portfolio returns.
:math:`r_{f}` is the risk free rate, when the risk measure is
:math:`\text{LPM}` uses instead of :math:`r_{f}` the :math:`\text{MAR}`.
:math:`\phi(X)` is a convex risk measure. The risk measures availabe are:
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
mu : DataFrame or nd-array of shape (1, n_assets)
Vector of expected returns, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded returns.
rf : float, optional
Risk free rate. The default is 0.
**kwargs : dict
Other arguments that depends on the risk measure.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk adjusted return ratio of :math:`X`.
"""
if cov is None and rm == "MV":
raise ValueError("covariance matrix is necessary to calculate the sharpe ratio")
elif returns is None and rm != "MV":
raise ValueError(
"returns scenarios are necessary to calculate the sharpe ratio"
)
w_ = np.array(w, ndmin=2)
mu_ = np.array(mu, ndmin=2)
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
ret = mu_ @ w_
ret = ret.item()
risk = Sharpe_Risk(w, cov=cov_, returns=returns_, rm=rm, rf=rf, alpha=alpha)
value = (ret - rf) / risk
return value
###############################################################################
# Risk Contribution Vectors
###############################################################################
def Risk_Contribution(w, cov=None, returns=None, rm="MV", rf=0, alpha=0.01):
r"""
Calculate the risk contribution for each asset based on the risk measure
selected.
Parameters
----------
w : DataFrame or 1d-array of shape (n_assets, 1)
Weights matrix, where n_assets is the number of assets.
cov : DataFrame or nd-array of shape (n_features, n_features)
Covariance matrix, where n_features is the number of features.
returns : DataFrame or nd-array of shape (n_samples, n_features)
Features matrix, where n_samples is the number of samples and
n_features is the number of features.
rm : str, optional
Risk measure used in the denominator of the ratio. The default is
'MV'. Posible values are:
- 'MV': Standard Deviation.
- 'MAD': Mean Absolute Deviation.
- 'MSV': Semi Standard Deviation.
- 'FLPM': First Lower Partial Moment (Omega Ratio).
- 'SLPM': Second Lower Partial Moment (Sortino Ratio).
- 'VaR': Value at Risk.
- 'CVaR': Conditional Value at Risk.
- 'WR': Worst Realization (Minimax)
- 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio).
- 'ADD': Average Drawdown of uncompounded returns.
- 'CDaR': Conditional Drawdown at Risk of uncompounded returns.
rf : float, optional
Risk free rate. The default is 0.
**kwargs : dict
Other arguments that depends on the risk measure.
Raises
------
ValueError
When the value cannot be calculated.
Returns
-------
value : float
Risk measure of the portfolio.
"""
w_ = np.array(w, ndmin=2)
if cov is not None:
cov_ = np.array(cov, ndmin=2)
if returns is not None:
returns_ = np.array(returns, ndmin=2)
# risk = Sharpe_Risk(w, cov=cov_, returns=returns_, rm=rm, rf=rf, alpha=alpha)
RC = []
d_i = 0.0000001
for i in range(0, w_.shape[0]):
delta = np.zeros((w_.shape[0], 1))
delta[i, 0] = d_i
w_1 = w_ + delta
w_2 = w_ - delta
a_1 = returns_ @ w_1
a_2 = returns_ @ w_2
if rm == "MV":
risk_1 = w_1.T @ cov_ @ w_1
risk_1 = np.sqrt(risk_1.item())
risk_2 = w_2.T @ cov_ @ w_2
risk_2 = np.sqrt(risk_2.item())
elif rm == "MAD":
risk_1 = MAD(a_1)
risk_2 = MAD(a_2)
elif rm == "MSV":
risk_1 = SemiDeviation(a_1)
risk_2 = SemiDeviation(a_2)
elif rm == "FLPM":
risk_1 = LPM(a_1, MAR=rf, p=1)
risk_2 = LPM(a_2, MAR=rf, p=1)
elif rm == "SLPM":
risk_1 = LPM(a_1, MAR=rf, p=2)
risk_2 = LPM(a_2, MAR=rf, p=2)
elif rm == "VaR":
risk_1 = VaR_Hist(a_1, alpha=alpha)
risk_2 = VaR_Hist(a_2, alpha=alpha)
elif rm == "CVaR":
risk_1 = CVaR_Hist(a_1, alpha=alpha)
risk_2 = CVaR_Hist(a_2, alpha=alpha)
elif rm == "WR":
risk_1 = WR(a_1)
risk_2 = WR(a_2)
elif rm == "MDD":
risk_1 = MaxAbsDD(a_1)
risk_2 = MaxAbsDD(a_2)
elif rm == "ADD":
risk_1 = AvgAbsDD(a_1)
risk_2 = AvgAbsDD(a_2)
elif rm == "CDaR":
risk_1 = ConAbsDD(a_1, alpha=alpha)
risk_2 = ConAbsDD(a_2, alpha=alpha)
RC_i = (risk_1 - risk_2) / (2 * d_i) * w_[i, 0]
RC.append(RC_i)
RC = np.array(RC, ndmin=1)
return RC
| 25.998997 | 88 | 0.532078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15,484 | 0.597353 |
b3597781efddb74aa0e9cc460ed2839b023ce0f7 | 4,598 | py | Python | qSonify/sonify/song.py | jiosue/qSonify | 35f4a16dfa08b4643586feda9ae74a76817c8432 | [
"MIT"
] | null | null | null | qSonify/sonify/song.py | jiosue/qSonify | 35f4a16dfa08b4643586feda9ae74a76817c8432 | [
"MIT"
] | null | null | null | qSonify/sonify/song.py | jiosue/qSonify | 35f4a16dfa08b4643586feda9ae74a76817c8432 | [
"MIT"
] | null | null | null | from midiutil.MidiFile import MIDIFile
import os
def _create_midi_mapping():
""" Create a dictionary that maps note name to midi note integer """
middle_c = 60
notes = "c", "c#", "d", "d#", "e", "f", "f#", "g", "g#", "a", "a#", "b"
equiv = (("c#", "db"), ("d#", "eb"),
("f#", "gb"), ("g#", "ab"), ("a#", "bb"))
m = {}
j, o = len(notes)-1, 3
for v in range(middle_c-1, -1, -1):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
if j == 0: o -= 1
j = (j - 1) % len(notes)
j, o = 0, 4
for v in range(middle_c, 128):
for e in equiv: m[notes[j].replace(*e) + str(o)] = v
j = (j + 1) % len(notes)
if j == 0: o += 1
return m
_midi_mapping = _create_midi_mapping()
class Song(MIDIFile):
_valid = tuple, list, type(x for x in range(1))
def __init__(self, name="test", tempo=100, num_tracks=1):
"""
Intialize Song object.
name: str, name of song/file.
tempo: int, bpm of song.
num_tracks: int, number of tracks for the midi file to have.
"""
super().__init__(num_tracks)
self.name, self.tempo, self.volume = name, tempo, 100
self.filename = "%s.mid" % name
self.path = ""
track, self.channel = 0, 0
self.time = [0]*num_tracks # start each track at the beginning
self.addTempo(track, self.time[0], self.tempo)
def addNote(self, notes, duration=4, track=0):
"""
Overrides MIDIFile's addNote method, but uses it as a subroutine. Adds
a note or notes with a duration to the specified track, then increments
the time by that duration.
notes: str or tuple of strs, notes to add at the current location of
of the track.
duration: float, number of beats for the note/chord.
track: int, which track to add to.
"""
if not isinstance(notes, Song._valid): notes = notes,
for note in notes:
note = note.lower()
if note in _midi_mapping: pitch = _midi_mapping[note]
elif note+"4" in _midi_mapping: pitch = _midi_mapping[note+"4"]
else: raise ValueError("Note not valid:", note)
super().addNote(track, self.channel, pitch,
self.time[track], duration, self.volume)
self.time[track] += duration
self.need_to_write = True
def addRest(self, duration=1, track=0):
"""
Add a rest to the track, just corresponds to adjusting the time.
duration: float, number of beats the rest lasts.
track: int, which track to add the rest to.
"""
self.time[track] += duration
self.need_to_write = True
def addText(self, text, track=0):
"""
Add text to a track at the current time. For it to be visible, there
must be a note at the current time on this track.
text: str, text to add.
track: int, which track to add the text to.
"""
super().addText(track, self.time[track], str(text))
self.need_to_write = True
def writeFile(self, path=""):
"""
Write the current midi track to a file
path: str, path to write the file to. Must end with a "/"!
"""
if not self.need_to_write: return
try:
with open(path+self.filename, "wb") as f: super().writeFile(f)
except FileNotFoundError:
os.mkdir(path)
with open(path+self.filename, "wb") as f: super().writeFile(f)
self.need_to_write = False
self.path = path
def play(self, path=""):
"""
Write the midi file, then call on the system's default midi player. On
Windows, this is probably Windows Media Player. THIS ONLY WORKS ON
WINDOWS, IF YOU WANT TO USE IT YOU MUST CHANGE THE SYSTEM CALL.
path: str, where to save the file to. Must end with a "/"!
"""
if not path and self.path: path = self.path
self.writeFile(path)
os.system("start %s" % (self.path+self.filename))
def __str__(self):
""" Return the string name of the song """
return self.filename
if __name__ == "__main__":
s = Song(name="helloworld", tempo=110, path="")
s.addNote("c")
s.addNote("d")
s.addNote(("c", "d", "e"))
s.view()
| 37.080645 | 80 | 0.536755 | 3,599 | 0.782732 | 0 | 0 | 0 | 0 | 0 | 0 | 1,853 | 0.403001 |
b35d694bac54a0ae6c31c50cc6cb7382b796541b | 27,333 | py | Python | src/fora/connectors/tunnel_dispatcher.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 14 | 2021-12-17T10:38:27.000Z | 2022-03-02T01:20:01.000Z | src/fora/connectors/tunnel_dispatcher.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 2 | 2022-01-11T13:31:09.000Z | 2022-02-03T15:41:43.000Z | src/fora/connectors/tunnel_dispatcher.py | oddlama/forge | d09b0f309ce7dcda79dc03765473b48732c71845 | [
"MIT"
] | 2 | 2022-02-03T15:20:51.000Z | 2022-02-03T15:45:11.000Z | #!/usr/bin/env python3
# pylint: disable=too-many-lines
"""
Provides a stdin/stdout based protocol to safely dispatch commands and return their
results over any connection that forwards both stdin/stdout, as well as some other
needed remote system related utilities.
"""
import errno as sys_errno
import hashlib
import os
import stat
import struct
import subprocess
import sys
import typing
from pwd import getpwnam, getpwuid
from grp import getgrnam, getgrgid, getgrall
from spwd import getspnam
from struct import pack, unpack
from typing import IO, Any, Type, TypeVar, Callable, Optional, Union, NamedTuple, NewType, cast
T = TypeVar('T')
i32 = NewType('i32', int)
u32 = NewType('u32', int)
i64 = NewType('i64', int)
u64 = NewType('u64', int)
is_server = False
debug = False
try:
import fora
except ModuleNotFoundError:
pass
# TODO: timeout on commands
# TODO: interactive commands?
# TODO: env
class RemoteOSError(Exception):
"""An exception type for remote OSErrors."""
def __init__(self, errno: int, strerror: str, msg: str):
super().__init__(msg)
self.errno = errno
self.strerror = strerror
# Utility functions
# ----------------------------------------------------------------
def _is_debug() -> bool:
"""Returns True if debugging output should be genereated."""
return debug if is_server else cast(bool, fora.args.debug)
def _log(msg: str) -> None:
"""
Logs the given message to stderr, appending a prefix to indicate whether this
is running on a remote (server) or locally (client).
Parameters
----------
msg
The message to log.
"""
if not _is_debug():
return
# TODO color should be configurable
prefix = " [1;33mREMOTE[m: " if is_server else " [1;32mLOCAL[m: "
print(f"{prefix}{msg}", file=sys.stderr, flush=True)
def _resolve_oct(value: str) -> int:
"""
Resolves an octal string to a numeric value (e.g. for umask or mode).
Raises a ValueError if the value is malformed.
Parameters
----------
value
The octal string value
Returns
-------
int
The numeric representation of the octal value
"""
try:
return int(value, 8)
except ValueError:
raise ValueError(f"Invalid value '{value}': Must be in octal format.") # pylint: disable=raise-missing-from
def _resolve_user(user: str) -> tuple[int, int]:
"""
Resolves the given user string to a uid and gid.
The string may be either a username or a uid.
Raises a ValueError if the user/uid does not exist.
Parameters
----------
user
The username or uid to resolve
Returns
-------
tuple[int, int]
A tuple (uid, gid) with the numeric ids of the user and its primary group
"""
try:
pw = getpwnam(user)
except KeyError:
try:
uid = int(user)
try:
pw = getpwuid(uid)
except KeyError:
raise ValueError(f"The user with the uid '{uid}' does not exist.") # pylint: disable=raise-missing-from
except ValueError:
raise ValueError(f"The user with the name '{user}' does not exist.") # pylint: disable=raise-missing-from
return (pw.pw_uid, pw.pw_gid)
def _resolve_group(group: str) -> int:
"""
Resolves the given group string to a gid.
The string may be either a groupname or a gid.
Raises a ValueError if the group/gid does not exist.
Parameters
----------
group
The groupname or gid to resolve
Returns
-------
int
The numeric gid of the group
"""
try:
gr = getgrnam(group)
except KeyError:
try:
gid = int(group)
try:
gr = getgrgid(gid)
except KeyError:
raise ValueError(f"The group with the gid '{gid}' does not exist.") # pylint: disable=raise-missing-from
except ValueError:
raise ValueError(f"The group with the name '{group}' does not exist.") # pylint: disable=raise-missing-from
return gr.gr_gid
# Connection wrapper
# ----------------------------------------------------------------
# pylint: disable=too-many-public-methods
class Connection:
"""Represents a connection to this dispatcher via an input and output buffer."""
def __init__(self, buffer_in: IO[bytes], buffer_out: IO[bytes]):
self.buffer_in = buffer_in
self.buffer_out = buffer_out
self.should_close = False
def flush(self) -> None:
"""Flushes the output buffer."""
self.buffer_out.flush()
def read(self, count: int) -> bytes:
"""Reads exactly the given amount of bytes."""
return self.buffer_in.read(count)
def write(self, data: bytes, count: int) -> None:
"""Writes exactly the given amount of bytes from data."""
self.buffer_out.write(data[:count])
def write_packet(self, packet: Any) -> None:
"""Writes the given packet."""
if not hasattr(packet, '_is_packet') or not bool(getattr(packet, '_is_packet')):
raise ValueError("Invalid argument: Must be a packet!")
# We don't expose write directly, as the type checker currently is unable
# to determine whether this function exists, as it is added by the @Packet decorator.
packet._write(self) # pylint: disable=protected-access
# Primary serialization and deserialization
# ----------------------------------------------------------------
def _is_optional(field: Type[Any]) -> bool:
"""Returns True when the given type annotation is Optional[...]."""
return typing.get_origin(field) is Union and type(None) in typing.get_args(field)
def _is_list(field: Type[Any]) -> bool:
"""Returns True when the given type annotation is list[...]."""
return typing.get_origin(field) is list
_serializers: dict[Any, Callable[[Connection, Any], Any]] = {}
_serializers[bool] = lambda conn, v: conn.write(pack(">?", v), 1)
_serializers[i32] = lambda conn, v: conn.write(pack(">i", v), 4)
_serializers[u32] = lambda conn, v: conn.write(pack(">I", v), 4)
_serializers[i64] = lambda conn, v: conn.write(pack(">q", v), 8)
_serializers[u64] = lambda conn, v: conn.write(pack(">Q", v), 8)
_serializers[bytes] = lambda conn, v: (_serializers[u64](conn, len(v)), conn.write(v, len(v))) # type: ignore[func-returns-value]
_serializers[str] = lambda conn, v: _serializers[bytes](conn, v.encode('utf-8'))
def _serialize(conn: Connection, vtype: Type[Any], v: Any) -> None:
"""Serializes v based on the underlying type 'vtype' and writes it to the given connection."""
if vtype in _serializers:
_serializers[vtype](conn, v)
elif _is_optional(vtype):
real_type = typing.get_args(vtype)[0]
_serializers[bool](conn, v is not None)
if v is not None:
_serialize(conn, real_type, v)
elif _is_list(vtype):
element_type = typing.get_args(vtype)[0]
_serializers[u64](conn, len(v))
for i in v:
_serialize(conn, element_type, i)
else:
raise ValueError(f"Cannot serialize object of type {vtype}")
_deserializers: dict[Any, Callable[[Connection], Any]] = {}
_deserializers[bool] = lambda conn: unpack(">?", conn.read(1))[0]
_deserializers[i32] = lambda conn: unpack(">i", conn.read(4))[0]
_deserializers[u32] = lambda conn: unpack(">I", conn.read(4))[0]
_deserializers[i64] = lambda conn: unpack(">q", conn.read(8))[0]
_deserializers[u64] = lambda conn: unpack(">Q", conn.read(8))[0]
_deserializers[bytes] = lambda conn: conn.read(_deserializers[u64](conn))
_deserializers[str] = lambda conn: _deserializers[bytes](conn).decode('utf-8')
def _deserialize(conn: Connection, vtype: Type[Any]) -> Any:
"""Deserializes an object from the given connection based on the underlying type 'vtype' and returns it."""
# pylint: disable=no-else-return
if vtype in _deserializers:
return _deserializers[vtype](conn)
elif _is_optional(vtype):
real_type = typing.get_args(vtype)[0]
if not _deserializers[bool](conn):
return None
return _deserialize(conn, real_type)
elif _is_list(vtype):
element_type = typing.get_args(vtype)[0]
return list(_deserialize(conn, element_type) for _ in range(_deserializers[u64](conn)))
else:
raise ValueError(f"Cannot deserialize object of type {vtype}")
# Packet helpers
# ----------------------------------------------------------------
packets: list[Any] = []
packet_deserializers: dict[int, Callable[[Connection], Any]] = {}
def _handle_response_packet() -> None:
raise RuntimeError("This packet is a server-side response packet and must never be sent by the client!")
# Define generic read and write functions
def _read_packet(cls: Type[Any], conn: Connection) -> Any:
kwargs: dict[str, Any] = {}
for f in cast(Any, cls)._fields:
ftype = cls.__annotations__[f]
kwargs[f] = _deserialize(conn, ftype)
return cls(**kwargs)
def _write_packet(cls: Type[Any], packet_id: u32, this: object, conn: Connection) -> None:
_serialize(conn, u32, packet_id)
for f in cls._fields:
ftype = cls.__annotations__[f]
_serialize(conn, ftype, getattr(this, f))
conn.flush()
def Packet(type: str) -> Callable[[Type[Any]], Any]: # pylint: disable=redefined-builtin
"""Decorator for packet types. Registers the packet and generates read and write methods."""
if type not in ['response', 'request']:
raise RuntimeError("Invalid @Packet decoration: type must be either 'response' or 'request'.")
def wrapper(cls: Type[Any]) -> Type[Any]:
# Assert cls is a NamedTuple
if not hasattr(cls, '_fields'):
raise RuntimeError("Invalid @Packet decoration: Decorated class must inherit from NamedTuple.")
# Find next packet id
packet_id = u32(len(packets))
# Replace functions
cls._is_packet = True # pylint: disable=protected-access
cls._write = lambda self, conn: _write_packet(cls, packet_id, self, conn) # pylint: disable=protected-access
if type == 'response':
cls.handle = _handle_response_packet
elif type == 'request':
if not hasattr(cls, 'handle') or not callable(getattr(cls, 'handle')):
raise RuntimeError("Invalid @Packet decoration: request packets must provide a handle method!")
# Register packet
packets.append(cls)
packet_deserializers[packet_id] = lambda conn: _read_packet(cls, conn)
return cls
return wrapper
# Packets
# ----------------------------------------------------------------
@Packet(type='response')
class PacketOk(NamedTuple):
"""This packet is used by some requests as a generic successful status indicator."""
@Packet(type='response')
class PacketAck(NamedTuple):
"""This packet is used to acknowledge a previous PacketCheckAlive packet."""
@Packet(type='request')
class PacketCheckAlive(NamedTuple):
"""This packet is used to check whether a connection is alive.
The receiver must answer with PacketAck immediately."""
def handle(self, conn: Connection) -> None:
"""Responds with PacketAck."""
_ = (self)
conn.write_packet(PacketAck())
@Packet(type='request')
class PacketExit(NamedTuple):
"""This packet is used to signal the server to close the connection and end the dispatcher."""
def handle(self, conn: Connection) -> None:
"""Signals the connection to close."""
_ = (self)
conn.should_close = True
@Packet(type='response')
class PacketOSError(NamedTuple):
"""This packet is sent when an OSError occurs."""
errno: i64
strerror: str
msg: str
@Packet(type='response')
class PacketInvalidField(NamedTuple):
"""This packet is used when an invalid value was given in a previous packet."""
field: str
error_message: str
@Packet(type='response')
class PacketProcessCompleted(NamedTuple):
"""This packet is used to return the results of a process."""
stdout: Optional[bytes]
stderr: Optional[bytes]
returncode: i32
@Packet(type='response')
class PacketProcessError(NamedTuple):
"""This packet is used to indicate an error when running a process or when running the preexec_fn."""
message: str
@Packet(type='request')
class PacketProcessRun(NamedTuple):
"""This packet is used to run a process."""
command: list[str]
stdin: Optional[bytes] = None
capture_output: bool = True
user: Optional[str] = None
group: Optional[str] = None
umask: Optional[str] = None
cwd: Optional[str] = None
def handle(self, conn: Connection) -> None:
"""Runs the requested command."""
# By default we will run commands as the current user.
uid, gid = (None, None)
umask_oct = 0o077
if self.umask is not None:
try:
umask_oct = _resolve_oct(self.umask)
except ValueError as e:
conn.write_packet(PacketInvalidField("umask", str(e)))
return
if self.user is not None:
try:
(uid, gid) = _resolve_user(self.user)
except ValueError as e:
conn.write_packet(PacketInvalidField("user", str(e)))
return
if self.group is not None:
try:
gid = _resolve_group(self.group)
except ValueError as e:
conn.write_packet(PacketInvalidField("group", str(e)))
return
if self.cwd is not None:
if not os.path.isdir(self.cwd):
conn.write_packet(PacketInvalidField("cwd", "The directory does not exist"))
return
def child_preexec() -> None:
"""
Sets umask and becomes the correct user.
"""
os.umask(umask_oct)
if gid is not None:
os.setresgid(gid, gid, gid)
if uid is not None:
os.setresuid(uid, uid, uid)
if self.cwd is not None:
os.chdir(self.cwd)
# Execute command with desired parameters
try:
result = subprocess.run(self.command,
input=self.stdin,
capture_output=self.capture_output,
cwd=self.cwd,
preexec_fn=child_preexec,
check=False)
except subprocess.SubprocessError as e:
conn.write_packet(PacketProcessError(str(e)))
return
# Send response for command result
conn.write_packet(PacketProcessCompleted(result.stdout, result.stderr, i32(result.returncode)))
@Packet(type='response')
class PacketStatResult(NamedTuple):
"""This packet is used to return the results of a stat packet."""
type: str # pylint: disable=redefined-builtin
mode: u64
owner: str
group: str
size: u64
mtime: u64
ctime: u64
sha512sum: Optional[bytes]
@Packet(type='request')
class PacketStat(NamedTuple):
"""This packet is used to retrieve information about a file or directory."""
path: str
follow_links: bool = False
sha512sum: bool = False
def handle(self, conn: Connection) -> None:
"""Stats the requested path."""
try:
s = os.stat(self.path, follow_symlinks=self.follow_links)
except OSError as e:
if e.errno != sys_errno.ENOENT:
raise
conn.write_packet(PacketInvalidField("path", str(e)))
return
ftype = "dir" if stat.S_ISDIR(s.st_mode) else \
"chr" if stat.S_ISCHR(s.st_mode) else \
"blk" if stat.S_ISBLK(s.st_mode) else \
"file" if stat.S_ISREG(s.st_mode) else \
"fifo" if stat.S_ISFIFO(s.st_mode) else \
"link" if stat.S_ISLNK(s.st_mode) else \
"sock" if stat.S_ISSOCK(s.st_mode) else \
"other"
try:
owner = getpwuid(s.st_uid).pw_name
except KeyError:
owner = str(s.st_uid)
try:
group = getgrgid(s.st_gid).gr_name
except KeyError:
group = str(s.st_gid)
sha512sum: Optional[bytes]
if self.sha512sum and ftype == "file":
with open(self.path, 'rb') as f:
sha512sum = hashlib.sha512(f.read()).digest()
else:
sha512sum = None
# Send response
conn.write_packet(PacketStatResult(
type=ftype,
mode=u64(stat.S_IMODE(s.st_mode)),
owner=owner,
group=group,
size=u64(s.st_size),
mtime=u64(s.st_mtime_ns),
ctime=u64(s.st_ctime_ns),
sha512sum=sha512sum))
@Packet(type='response')
class PacketResolveResult(NamedTuple):
"""This packet is used to return the results of a resolve packet."""
value: str
@Packet(type='request')
class PacketResolveUser(NamedTuple):
"""
This packet is used to canonicalize a user name / uid and to ensure it exists.
If None is given, it queries the current user.
"""
user: Optional[str]
def handle(self, conn: Connection) -> None:
"""Resolves the requested user."""
user = self.user if self.user is not None else str(os.getuid())
try:
pw = getpwnam(user)
except KeyError:
try:
uid = int(user)
pw = getpwuid(uid)
except (KeyError, ValueError):
conn.write_packet(PacketInvalidField("user", "The user does not exist"))
return
# Send response
conn.write_packet(PacketResolveResult(value=pw.pw_name))
@Packet(type='request')
class PacketResolveGroup(NamedTuple):
"""
This packet is used to canonicalize a group name / gid and to ensure it exists.
If None is given, it queries the current group.
"""
group: Optional[str]
def handle(self, conn: Connection) -> None:
"""Resolves the requested group."""
group = self.group if self.group is not None else str(os.getgid())
try:
gr = getgrnam(group)
except KeyError:
try:
gid = int(group)
gr = getgrgid(gid)
except (KeyError, ValueError):
conn.write_packet(PacketInvalidField("group", "The group does not exist"))
return
# Send response
conn.write_packet(PacketResolveResult(value=gr.gr_name))
@Packet(type='request')
class PacketUpload(NamedTuple):
"""This packet is used to upload the given content to the remote and save it as a file.
Overwrites existing files. Responds with PacketOk if saving was successful, or PacketInvalidField if any
field contained an invalid value."""
file: str
content: bytes
mode: Optional[str] = None
owner: Optional[str] = None
group: Optional[str] = None
def handle(self, conn: Connection) -> None:
"""Saves the content under the given path."""
uid, gid = (None, None)
mode_oct = None
if self.mode is not None:
try:
mode_oct = _resolve_oct(self.mode)
except ValueError as e:
conn.write_packet(PacketInvalidField("mode", str(e)))
return
if self.owner is not None:
try:
(uid, gid) = _resolve_user(self.owner)
except ValueError as e:
conn.write_packet(PacketInvalidField("owner", str(e)))
return
if self.group is not None:
try:
gid = _resolve_group(self.group)
except ValueError as e:
conn.write_packet(PacketInvalidField("group", str(e)))
return
with open(self.file, 'wb') as f:
f.write(self.content)
if mode_oct is not None:
os.chmod(self.file, mode_oct)
if uid is not None or gid is not None:
os.chown(self.file, uid or 0, gid or 0)
conn.write_packet(PacketOk())
@Packet(type='response')
class PacketDownloadResult(NamedTuple):
"""This packet is used to return the content of a file."""
content: bytes
@Packet(type='request')
class PacketDownload(NamedTuple):
"""This packet is used to download the contents of a given file.
Responds with PacketDownloadResult if reading was successful, or PacketInvalidField if any
field contained an invalid value."""
file: str
def handle(self, conn: Connection) -> None:
"""Reads the file."""
try:
with open(self.file, 'rb') as f:
content = f.read()
except OSError as e:
if e.errno != sys_errno.ENOENT:
raise
conn.write_packet(PacketInvalidField("file", str(e)))
return
conn.write_packet(PacketDownloadResult(content))
@Packet(type='response')
class PacketUserEntry(NamedTuple):
"""This packet is used to return information about a user."""
name: str
"""The name of the user"""
uid: i64
"""The numerical user id"""
group: str
"""The name of the primary group"""
gid: i64
"""The numerical primary group id"""
groups: list[str]
"""All names of the supplementary groups this user belongs to"""
password_hash: Optional[str]
"""The password hash from shadow"""
gecos: str
"""The comment (GECOS) field of the user"""
home: str
"""The home directory of the user"""
shell: str
"""The default shell of the user"""
@Packet(type='request')
class PacketQueryUser(NamedTuple):
"""This packet is used to get information about a group via pwd.getpw*."""
user: str
"""User name or decimal uid"""
query_password_hash: bool
"""Whether the current password hash from shadow should also be returned"""
def handle(self, conn: Connection) -> None:
"""Queries the requested user."""
try:
pw = getpwnam(self.user)
except KeyError:
try:
gid = int(self.user)
pw = getpwuid(gid)
except (KeyError, ValueError):
conn.write_packet(PacketInvalidField("user", "The user does not exist"))
return
pw_hash: Optional[str] = None
if self.query_password_hash:
try:
pw_hash = getspnam(pw.pw_name).sp_pwdp
except KeyError:
conn.write_packet(PacketInvalidField("user", "The user has no shadow entry, or it is inaccessible."))
return
groups = [g.gr_name for g in getgrall() if pw.pw_name in g.gr_mem]
try:
conn.write_packet(PacketUserEntry(
name=pw.pw_name,
uid=i64(pw.pw_uid),
group=getgrgid(pw.pw_gid).gr_name,
gid=i64(pw.pw_gid),
groups=groups,
password_hash=pw_hash,
gecos=pw.pw_gecos,
home=pw.pw_dir,
shell=pw.pw_shell))
except KeyError:
conn.write_packet(PacketInvalidField("user", "The user's primary group doesn't exist"))
return
@Packet(type='response')
class PacketGroupEntry(NamedTuple):
"""This packet is used to return information about a group."""
name: str
"""The name of the group"""
gid: i64
"""The numerical group id"""
members: list[str]
"""All the group member's user names"""
@Packet(type='request')
class PacketQueryGroup(NamedTuple):
"""This packet is used to get information about a group via grp.getgr*."""
group: str
"""Group name or decimal gid"""
def handle(self, conn: Connection) -> None:
"""Queries the requested group."""
try:
gr = getgrnam(self.group)
except KeyError:
try:
gid = int(self.group)
gr = getgrgid(gid)
except (KeyError, ValueError):
conn.write_packet(PacketInvalidField("group", "The group does not exist"))
return
# Send response
conn.write_packet(PacketGroupEntry(name=gr.gr_name, gid=i64(gr.gr_gid), members=gr.gr_mem))
@Packet(type='response')
class PacketEnvironVar(NamedTuple):
"""This packet is used to return an environment variable."""
value: Optional[str]
"""The value of the environment variable, if it was set."""
@Packet(type='request')
class PacketGetenv(NamedTuple):
"""This packet is used to get an environment variable."""
key: str
"""The environment variable to retrieve"""
def handle(self, conn: Connection) -> None:
"""Gets the requested environment variable."""
conn.write_packet(PacketEnvironVar(value=os.getenv(self.key)))
def receive_packet(conn: Connection, request: Any = None) -> Any:
"""
Receives the next packet from the given connection.
Parameters
----------
conn
The connection
request
The corresponding request packet, if any.
Returns
-------
Any
The received packet
Raises
------
RemoteOSError
An OSError occurred on the remote host.
IOError
When an issue on the connection occurs.
ValueError
When an PacketInvalidField is received as the response and a corresponding request packet was given.
"""
try:
packet_id = cast(u32, _deserialize(conn, u32))
if packet_id not in packet_deserializers:
raise IOError(f"Received invalid packet id '{packet_id}'")
try:
packet_name = packets[packet_id].__name__
except KeyError:
packet_name = f"[unknown packet with id {packet_id}]"
_log(f"got packet header for: {packet_name}")
packet = packet_deserializers[packet_id](conn)
if isinstance(packet, PacketOSError):
raise RemoteOSError(msg=packet.msg, errno=packet.errno, strerror=packet.strerror)
if isinstance(packet, PacketInvalidField):
raise ValueError(f"Invalid value '{getattr(request, packet.field)}' given for field '{packet.field}': {packet.error_message}")
return packet
except struct.error as e:
raise IOError("Unexpected EOF in data stream") from e
def _main() -> None:
"""Handles all incoming packets in a loop until an invalid packet or a PacketExit is received."""
os.umask(0o077)
# pylint: disable=global-statement
global debug
global is_server
debug = len(sys.argv) > 1 and sys.argv[1] == "--debug"
is_server = __name__ == "__main__"
conn = Connection(sys.stdin.buffer, sys.stdout.buffer)
while not conn.should_close:
try:
_log("waiting for packet")
packet = receive_packet(conn)
except IOError as e:
print(f"{str(e)}. Aborting.", file=sys.stderr, flush=True)
sys.exit(3)
_log(f"received packet {type(packet).__name__}")
try:
packet.handle(conn)
except OSError as e:
conn.write_packet(PacketOSError(errno=i64(e.errno), strerror=e.strerror, msg=str(e)))
if __name__ == '__main__':
_main()
| 34.081047 | 138 | 0.612849 | 14,996 | 0.548641 | 0 | 0 | 14,159 | 0.518019 | 0 | 0 | 9,468 | 0.346394 |
b35e97178885b8f0b2dbae4044010a368837f069 | 1,726 | py | Python | events/importer/fill_events_language.py | Anthon98/linkedevents | 1171dc6d344fcbe8452ddee88b52719bb6cd5ed6 | [
"MIT"
] | 1 | 2021-08-19T12:44:21.000Z | 2021-08-19T12:44:21.000Z | events/importer/fill_events_language.py | Anthon98/linkedevents | 1171dc6d344fcbe8452ddee88b52719bb6cd5ed6 | [
"MIT"
] | 11 | 2020-08-21T07:22:03.000Z | 2022-03-31T09:10:44.000Z | events/importer/fill_events_language.py | Anthon98/linkedevents | 1171dc6d344fcbe8452ddee88b52719bb6cd5ed6 | [
"MIT"
] | 6 | 2020-02-11T06:58:33.000Z | 2021-11-09T12:48:19.000Z | # -*- coding: utf-8 -*-
import logging
from django import db
from django.conf import settings
from django.core.management import call_command, BaseCommand, CommandError
from django.utils.module_loading import import_string
from django_orghierarchy.models import Organization
from django.db import transaction
from events.models import Language
from .sync import ModelSyncher
from .base import Importer, register_importer
# Per module logger
logger = logging.getLogger(__name__)
#this importer fills events_language table fields (the table has already be but there is id's only)
@register_importer
class LanguageFiedsImporter(Importer):
name = 'fill_events_language'
supported_languages = ['fi', 'sv']
def setup(self):
self.organization = ''
self.data_source = ''
LANGUAGE_SET_DATA = [{
'id': 'fi',
'name': 'Suomi',
'name_fi': 'Suomi',
'name_sv': 'Finska',
'name_en': 'Finnish',
},
{
'id': 'sv',
'name': 'Ruotsi',
'name_fi': 'Ruotsi',
'name_sv': 'Svenska',
'name_en': 'Swedish',
},
{
'id': 'en',
'name': 'Englanti',
'name_fi': 'Englanti',
'name_sv': 'Engelska',
'name_en': 'English',
}]
for i in LANGUAGE_SET_DATA:
language, created = Language.objects.update_or_create(id=i['id'], defaults= i)
if created:
print('New language %s (%s)' % (i['name_fi'], i['id']))
else:
print('Language %s (%s) already exists and it is updated now.' % (i['name_fi'], i['id']))
| 26.553846 | 105 | 0.570104 | 1,119 | 0.64832 | 0 | 0 | 1,138 | 0.659328 | 0 | 0 | 510 | 0.295481 |
b35f80332b012e3019899544e95458e4103fb451 | 1,524 | py | Python | megatron/logging.py | coreweave/gpt-neox | 5c9641c8b1dae16e5642d78a29e879ad312e0725 | [
"Apache-2.0"
] | 1 | 2021-04-27T21:28:25.000Z | 2021-04-27T21:28:25.000Z | megatron/logging.py | fplk/gpt-neox | 9992042ab113428022e5e91421c04917577b8e00 | [
"Apache-2.0"
] | null | null | null | megatron/logging.py | fplk/gpt-neox | 9992042ab113428022e5e91421c04917577b8e00 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (c) 2021, EleutherAI contributors
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
class Tee:
""" Duplicate output to both stdout/err and file """
def __init__(self, file, err=False):
self.file = open(file, 'w')
self.err = err
if not err:
self.std = sys.stdout
sys.stdout = self
else:
self.std = sys.stderr
sys.stderr = self
def __del__(self):
if not self.err:
sys.stdout = self.std
else:
sys.stderr = self.std
self.file.close()
def write(self, data):
try:
self.file.write(data)
except OSError:
pass
try:
self.std.write(data)
except OSError:
pass
def flush(self):
try:
self.file.flush()
except OSError:
pass
| 28.222222 | 106 | 0.612861 | 795 | 0.521654 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.494751 |
b360163542b7e4208c87b59b074892580edf0734 | 2,908 | py | Python | Code/set.py | chelseacastelli/CS-1.3-Core-Data-Structures | 0a130d9a51c66c47c17b0b520e01360d48559954 | [
"MIT"
] | null | null | null | Code/set.py | chelseacastelli/CS-1.3-Core-Data-Structures | 0a130d9a51c66c47c17b0b520e01360d48559954 | [
"MIT"
] | 1 | 2020-03-09T23:24:21.000Z | 2020-03-16T23:48:31.000Z | Code/set.py | chelseacastelli/CS-1.3-Core-Data-Structures | 0a130d9a51c66c47c17b0b520e01360d48559954 | [
"MIT"
] | null | null | null | from hashtable import HashTable
class Set:
def __init__(self, elements=None):
"""Initialize this new set and add the given elements"""
self.hash_set = HashTable()
if elements is not None:
for element in elements:
self.add(element)
def size(self):
"""Returns the size of the set"""
return self.hash_set.size
def contains(self, element):
"""Return True if the set contains the given element, or False.
Running time: 0(1); hash tables automatically resize"""
return self.hash_set.contains(element)
def add(self, element):
"""Add given element to the set, if not already present
Running time: 0(1); adds key-value pair at constant time"""
if not self.hash_set.contains(element):
self.hash_set.set(element, 1)
def remove(self, element):
"""Remove the given element from the set, if exists, or raise KeyError
Running time: 0(1); jump right to element using key & remove -- constant time"""
if self.hash_set.contains(element):
self.hash_set.delete(element)
else:
raise KeyError(f'Item not found: {element}')
def union(self, other_set):
"""Return a new set that is the union of this set and other_set
Running time: 0(m+n); gets keys, possible resizing needed, adds to new set"""
new_set = Set()
t_set = self.hash_set.keys()
o_set = other_set.hash_set.keys()
for element in t_set:
new_set.add(element)
for element in o_set:
new_set.add(element)
return new_set
def intersection(self, other_set):
"""Return a new set that is the intersection of this set and other_set
Running time: 0(n); gets keys linearly"""
new_set = Set()
o_set = other_set.hash_set.keys()
for element in o_set:
if self.contains(element):
new_set.add(element)
return new_set
def difference(self, other_set):
"""Return a new set that is the difference of this set and other_set
Running time: 0(n); gets keys linearly"""
new_set = Set()
t_set = self.hash_set.keys()
o_set = other_set.hash_set.keys()
for element in t_set:
if other_set.contains(element) is False:
new_set.add(element)
for element in o_set:
if self.contains(element) is False:
new_set.add(element)
return new_set
def is_subset(self, other_set):
"""Return True if other_set is a subset of this set, or False
Running time: 0(n); gets keys linearly"""
t_set = self.hash_set.keys()
o_set = other_set.hash_set.keys()
for element in o_set:
if element not in t_set:
return False
return True
| 31.608696 | 88 | 0.6011 | 2,873 | 0.987964 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.352132 |
b3651538877d3892bf8c5ca4d029cf992243ceee | 2,286 | py | Python | openmdao/solvers/test/test_brent_multicomp.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 17 | 2018-01-11T20:13:59.000Z | 2022-03-22T03:46:05.000Z | openmdao/solvers/test/test_brent_multicomp.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 6 | 2017-10-19T23:14:14.000Z | 2020-11-22T17:30:57.000Z | openmdao/solvers/test/test_brent_multicomp.py | naylor-b/OpenMDAO1 | 49d82f6601b33db9bdcf7d146d030d55e3b62ef4 | [
"Apache-2.0"
] | 10 | 2018-04-12T22:13:33.000Z | 2020-05-07T10:02:59.000Z | """ Unit test for the Brent, one variable nonlinear solver. In this case, the
system has multiple components."""
import unittest
from openmdao.api import Group, Problem, Component, Brent, ScipyGMRES
from openmdao.test.util import assert_rel_error
class CompPart1(Component):
def __init__(self):
super(CompPart1, self).__init__()
self.deriv_options['type'] = 'fd'
self.add_param('a', val=1.)
self.add_param('n', val=77.0/27.0)
self.add_param('part2', val=0.)
self.add_state('x', val=2., lower=0, upper=100)
def solve_nonlinear(self, p, u, r):
pass
def apply_nonlinear(self, p, u, r):
r['x'] = p['a'] * u['x']**p['n'] + p['part2'] #+ p['b'] * u['x'] - p['c']
# print self.pathname, "ap_nl", p['part2'], p['a'], u['x'], p['n'], r['x']
class CompPart2(Component):
def __init__(self):
super(CompPart2, self).__init__()
self.deriv_options['type'] = 'fd'
self.add_param('b', val=1.)
self.add_param('c', val=10.)
self.add_param('x', val=2.)
self.add_output('part2', val=0.)
def solve_nonlinear(self, p, u, r):
u['part2'] = p['b'] * p['x'] - p['c']
# print self.pathname, "sp_nl", p['x'], p['c'], p['b'], u['part2']
class Combined(Group):
def __init__(self):
super(Combined, self).__init__()
self.add('p1', CompPart1(), promotes=['*'])
self.add('p2', CompPart2(), promotes=['*'])
# self.add('i1', IndepVarComp('a', 1.), promotes=['*'])
# self.add('i2', IndepVarComp('b', 1.), promotes=['*'])
# self.nl_solver = Newton()
self.nl_solver = Brent()
self.nl_solver.options['state_var'] = 'x'
self.ln_solver = ScipyGMRES()
self.set_order(('p1','p2'))
class BrentMultiCompTestCase(unittest.TestCase):
"""test to make sure brent can converge multiple components
in a group with a single residual across them all
"""
def test_multi_comp(self):
p = Problem()
p.root = Combined()
p.setup(check=False)
p.run()
assert_rel_error(self, p.root.unknowns['x'], 2.06720359226, .0001)
assert_rel_error(self, p.root.resids['x'], 0, .0001)
if __name__ == "__main__":
unittest.main() | 27.878049 | 82 | 0.575678 | 1,978 | 0.865267 | 0 | 0 | 0 | 0 | 0 | 0 | 676 | 0.295713 |
b36568a7f5e7ad85c77f6c9d7ddd21f9cadfde38 | 1,132 | py | Python | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | src/afterpay/merchant.py | nyneava/afterpay-python | ec9f9230ce321a2d9876ac93f222c24ffe7eee1a | [
"MIT"
] | null | null | null | from afterpay.attribute_getter import AttributeGetter
from afterpay.exceptions import AfterpayError
class Merchant(AttributeGetter):
"""
Merchant object
Attributes:
redirectConfirmUrl: The consumer is redirected to this URL when the payment process is completed.
redirectCancelUrl: The consumer is redirected to this URL if the payment process is cancelled.
"""
attribute_list = [
"redirectConfirmUrl",
"redirectCancelUrl",
]
def __init__(self, attributes):
if "redirectConfirmUrl" not in attributes:
raise AfterpayError("Cannot initialize Contact object without a 'redirectConfirmUrl'")
if "redirectCancelUrl" not in attributes:
raise AfterpayError("Cannot initialize Contact object without a 'redirectCancelUrl'")
AttributeGetter.__init__(self, attributes)
def __repr__(self):
return super(Merchant, self).__repr__(self.attribute_list)
def get_json(self):
return {
i: super(Merchant, self).__dict__[i] for i in super(Merchant, self).__dict__ if i in self.attribute_list
}
| 35.375 | 116 | 0.701413 | 1,029 | 0.909011 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.409011 |
b366133a08c1c7f546f337d1c267e087dc6444d2 | 8,442 | py | Python | src/tou/tou_optimization.py | NREL/EFFORT | b83e425d038c60f13b4c8da5b9c7caaf6b5f64d7 | [
"BSD-3-Clause"
] | null | null | null | src/tou/tou_optimization.py | NREL/EFFORT | b83e425d038c60f13b4c8da5b9c7caaf6b5f64d7 | [
"BSD-3-Clause"
] | null | null | null | src/tou/tou_optimization.py | NREL/EFFORT | b83e425d038c60f13b4c8da5b9c7caaf6b5f64d7 | [
"BSD-3-Clause"
] | null | null | null | # Standard imports
import os
import logging
import json
# Third-party imports
from pyomo.environ import SolverFactory
import pandas as pd
import matplotlib.pyplot as plt
# Internal imports
#from tou_model import model
from tou.tou_model_price import model
from generate_profile.constants import LOG_FORMAT
'''
Obtained from DERC website (http://www.derc.gov.in/ordersPetitions/orders/Tariff/Tariff%20Order/FY%202019-20/Tariff%20Order%20FY%202019-20/Tariff%20Orders%202019-20/BRPL.pdf
, page 321, SECI solar rajasthan : Rs. 5.5/kWh, SECI wind : Rs. 2.52/kWh
avrage renewable price: (6.75*5.5+10.25*2.52)/(6.75+10.25) = Rs. 3.7/kWh)
'''
RENEWABLE_PRICE = 3700
'''
Network avoidance cost obtained from BRPL per MW is Rs 2 crore
'''
CAPACITY_UPGRADE_COST = 20000000
class RunModel:
def __init__(self, **kwargs):
# setup logger
self.logger = logging.getLogger()
logging.getLogger('matplotlib.font_manager').disabled = True
logging.basicConfig(format=LOG_FORMAT,level='DEBUG')
self.logger.info(f'optimization initiallized with dictionary {kwargs}')
self.config = kwargs
self.prepare_input()
instance, results = self.run(self.modified_dat_filepath, self.solver_name, model)
col_headers=['time','Net load MW','Original Load MW','New Load MW',
'Original Price Rs/MWh','New Price Rs/MWh','Off peak Rs/MWh', 'Energy Price (Rs./MWh)' ,'On Peak', 'Off Peak']
self.outputs=pd.DataFrame(data=[],columns=col_headers)
for t in instance.time:
outputs_temp=pd.DataFrame([list([
t,
instance.netLoad[t],
instance.Load[t],
instance.Load_Response[t].expr(),
instance.baseprice.value,
instance.price_on_peak.value,
instance.price_off_peak.value,
instance.energy_price[t],
instance.tier_time_on_peak[t],
instance.tier_time_off_peak[t] #instance.price_on_peak.value #instance.price_var[t].expr()
])], columns=col_headers)
self.outputs=self.outputs.append(outputs_temp)
if 'export_path' not in self.config:
self.outputs.to_csv('output.csv')
else:
self.outputs.to_csv(self.config['export_path'])
self.logger.info(f"Peak reduced (MW): {-max(self.outputs['New Load MW'].tolist())+max(self.outputs['Original Load MW'].tolist())}")
self.logger.info(f"Price difference (Rs.): {self.outputs['New Price Rs/MWh'].tolist()[0]-self.outputs['Off peak Rs/MWh'].tolist()[0]}")
self.get_result()
if 'plot_result' in self.config:
if self.config['plot_result']:
self.plot_loadprofile()
def prepare_input(self):
if 'data_path' not in self.config:
''' Read the data.dat file from directoy of currently running file'''
main_dir = os.path.join(os.path.dirname(__file__))
dat_filepath = main_dir+r'/data_price.dat' #main_dir+r'/data.dat'
main_dir = main_dir + '\data'
else:
main_dir = self.config['data_path']
dat_filepath = os.path.join(self.config['data_path'],'data.dat')
if not os.path.exists(dat_filepath):
self.logger.error(f"{dat_filepath} does not exist!!")
with open(dat_filepath, 'r') as file:
filedata = file.read()
''' Replace DIRECTORY placeholder with current directory'''
filedata = filedata.replace('DIRECTORY', main_dir)
''' Write modified dat file'''
with open(os.path.join(main_dir,'data_mod.dat'), 'w') as file:
file.write(filedata)
self.logger.info(f"Modified datfile successfully written - {os.path.join(main_dir,'data_mod.dat')}")
self.modified_dat_filepath = os.path.join(main_dir,'data_mod.dat')
if 'solver' in self.config:
self.solver_name = self.config['solver']
else:
self.solver_name = 'ipopt' #'glpk'
self.logger.info(SolverFactory(self.solver_name).available())
''' Update on-peak and off peak time csv files'''
if 'on_time_list' in self.config:
on_peak_time = [0]*self.config['num_of_hours']
off_peak_time = [1]*self.config['num_of_hours']
for index in range(self.config['num_of_hours']):
hour_index = index%24
if hour_index in self.config['on_time_list']:
on_peak_time[index] = 1
off_peak_time[index] = 0
df1 = pd.DataFrame({'time': list(range(self.config['num_of_hours'])),'tier_time_on_peak':on_peak_time})
df2 = pd.DataFrame({'time': list(range(self.config['num_of_hours'])),'tier_time_off_peak':off_peak_time})
df1.to_csv(os.path.join(main_dir,'tier_time_on_peak.csv'),index=False)
df2.to_csv(os.path.join(main_dir,'tier_time_off_peak.csv'),index=False)
def run(self,dat_filename, solver_name, model):
instance = model.create_instance(dat_filename)
results = SolverFactory(solver_name).solve(instance, tee=False)
#results.write()
instance.solutions.store_to(results)
return (instance, results)
def get_output_dataframe(self):
return self.outputs
def get_result(self):
on_peak_price = self.outputs['New Price Rs/MWh'].tolist()[0]
off_peak_price = self.outputs['Off peak Rs/MWh'].tolist()[0]
base_price = self.outputs['Original Price Rs/MWh'].tolist()[0]
new_load_profile = self.outputs['New Load MW'].tolist()
net_load_profile = self.outputs['Net load MW'].tolist()
original_profile = self.outputs['Original Load MW'].tolist()
on_peak = self.outputs['On Peak'].tolist()
off_peak = self.outputs['Off Peak'].tolist()
energy_price = self.outputs['Energy Price (Rs./MWh)'].tolist()
renewable_obligation = [x[0]-x[1] for x in zip(original_profile,net_load_profile)]
peak_reduced = max(self.outputs['Original Load MW'].tolist()) - max(self.outputs['New Load MW'].tolist())
price_diff = on_peak_price - off_peak_price
if off_peak_price!=0:
price_ratio = on_peak_price/off_peak_price
else:
price_ratio = None
utility_new_energy_cost = sum([(x[0]-x[1])*x[2] + x[1]*RENEWABLE_PRICE for x in zip(new_load_profile,renewable_obligation,energy_price)])
utility_fixed_cost_saving = peak_reduced * CAPACITY_UPGRADE_COST
utility_new_cost= utility_new_energy_cost - utility_fixed_cost_saving
utility_original_energy_cost = sum([(x[0]-x[1])*x[2]+ x[1]*RENEWABLE_PRICE for x in zip(original_profile,renewable_obligation,energy_price)])
customer_price_series = [on_peak_price*x[0] + off_peak_price*x[1] for x in zip(on_peak,off_peak)]
customers_original_bill = sum(x*base_price for x in original_profile)
customers_new_bill = sum([x[0]*x[1] for x in zip(new_load_profile,customer_price_series)])
customers_saving = customers_original_bill - customers_new_bill
self.singleoutput = {
'Customers original cost (Rs in Million)': customers_original_bill/1000000,
'Customers New cost (Rs in Million)': customers_new_bill/1000000,
'Customers Saving (Rs in Million)': customers_saving/1000000,
'Utility original variable (energy) cost (Rs in Million)': utility_original_energy_cost/1000000,
'Utility new variable (energy) cost (Rs in Million)': utility_new_energy_cost/1000000,
'Utility variable cost saving (Rs in Million)' : (utility_original_energy_cost-utility_new_energy_cost)/1000000,
'Utility fixed cost saving (Rs in Million)': utility_fixed_cost_saving/1000000,
'Peak load reduction (MW)': peak_reduced,
'On-peak price (Rs./MWh)': self.outputs['New Price Rs/MWh'].tolist()[0],
'Off-peak price (Rs./MWh)':self.outputs['Off peak Rs/MWh'].tolist()[0],
'On-peak off peak difference': price_diff,
'On-peak off peak ratio': price_ratio
}
self.logger.info(f"{self.singleoutput}")
file_name = self.config['export_path'].replace('.csv','.json')
with open(file_name,'w') as json_file:
json.dump(self.singleoutput,json_file)
return self.singleoutput
def plot_loadprofile(self):
new_load_profile = self.outputs['New Load MW'].tolist()
original_profile = self.outputs['Original Load MW'].tolist()
plt.plot(range(len(new_load_profile)),new_load_profile,'--or',label='new')
plt.plot(range(len(original_profile)),original_profile,'--og',label='old')
plt.ylabel('load (MW)')
plt.xlabel('time index')
plt.legend()
plt.show()
if __name__ == '__main__':
instance = RunModel()
| 40.392344 | 173 | 0.684672 | 7,618 | 0.902393 | 0 | 0 | 0 | 0 | 0 | 0 | 2,573 | 0.304786 |
b366f304d22f4d1525ccf2ccb3bef5468f62fabf | 4,036 | py | Python | limonero/util/upload.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 1 | 2018-01-01T20:35:43.000Z | 2018-01-01T20:35:43.000Z | limonero/util/upload.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 37 | 2017-02-24T17:07:25.000Z | 2021-09-02T14:49:19.000Z | limonero/util/upload.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 2 | 2019-11-05T13:45:45.000Z | 2020-11-13T22:02:37.000Z | # -*- coding: utf-8 -*-
import logging
import uuid
from gettext import gettext
from py4j.compat import bytearray2
from urllib.parse import urlparse
from limonero.py4j_init import create_gateway
WRONG_HDFS_CONFIG = gettext(
"Limonero HDFS access not correctly configured (see "
"config 'dfs.client.use.datanode.hostname')")
log = logging.getLogger(__name__)
def get_tmp_path(jvm, hdfs, parsed, filename):
"""
Temporary directory used to upload files to HDFS
"""
tmp_dir = '{}/tmp/upload/{}'.format(parsed.path.replace('//', '/'),
filename)
tmp_path = jvm.org.apache.hadoop.fs.Path(tmp_dir)
if not hdfs.exists(tmp_path):
hdfs.mkdirs(tmp_path)
return tmp_path
def create_hdfs_chunk(chunk_number, filename, storage, use_hostname,
gateway_port):
parsed = urlparse(storage.url)
conf, jvm = create_gateway_and_hdfs_conf(use_hostname, gateway_port)
str_uri = '{proto}://{host}:{port}'.format(
proto=parsed.scheme, host=parsed.hostname, port=parsed.port)
uri = jvm.java.net.URI(str_uri)
hdfs = jvm.org.apache.hadoop.fs.FileSystem.get(uri, conf)
tmp_path = get_tmp_path(jvm, hdfs, parsed, filename)
chunk_filename = "{tmp}/{file}.part{part:09d}".format(
tmp=tmp_path.toString(), file=filename, part=chunk_number)
# time.sleep(1)
chunk_path = jvm.org.apache.hadoop.fs.Path(chunk_filename)
return chunk_path, hdfs
def write_chunk(jvm, chunk_number, filename, storage, file_data,
conf):
"""
Writes a single chunk in HDFS. Chunks are provided by the interface and
are blocks of data (binary)
"""
storage_url = storage.url if storage.url[-1] != '/' \
else storage.url[:-1]
parsed = urlparse(storage_url)
if parsed.scheme == 'file':
str_uri = '{proto}://{path}'.format(
proto=parsed.scheme, path=parsed.path)
else:
str_uri = '{proto}://{host}:{port}'.format(
proto=parsed.scheme, host=parsed.hostname,
port=parsed.port)
uri = jvm.java.net.URI(str_uri)
hdfs = jvm.org.apache.hadoop.fs.FileSystem.get(uri, conf)
log.info('================== %s', uri)
tmp_path = get_tmp_path(jvm, hdfs, parsed, filename)
chunk_filename = "{tmp}/{file}.part{part:09d}".format(
tmp=tmp_path.toString(), file=filename, part=chunk_number)
chunk_path = jvm.org.apache.hadoop.fs.Path(chunk_filename)
output_stream = hdfs.create(chunk_path)
block = bytearray2(file_data)
output_stream.write(block, 0, len(block))
output_stream.close()
# Checks if all file's parts are present
full_path = tmp_path
list_iter = hdfs.listFiles(full_path, False)
counter = 0
while list_iter.hasNext():
counter += 1
list_iter.next()
return file_data, hdfs, str_uri, tmp_path, counter
def create_gateway_and_hdfs_conf(use_datanode, gateway_port):
"""
Stats JVM and define HDFS configuration used to upload data.
"""
gateway = create_gateway(log, gateway_port)
jvm = gateway.jvm
conf = jvm.org.apache.hadoop.conf.Configuration()
conf.set('dfs.client.use.datanode.hostname',
"true" if use_datanode else "false")
return conf, jvm
def merge_chunks(conf, filename, full_path, hdfs, jvm, str_uri,
instance_name):
"""
Merge already uploaded chunks in a single file using HDFS API.
"""
final_filename = '{}_{}'.format(uuid.uuid4().hex, filename)
# time to merge all files
target_path = jvm.org.apache.hadoop.fs.Path('{}/{}/{}/{}'.format(
str_uri, '/limonero/data', instance_name, final_filename))
result_code = 200
result = None
if hdfs.exists(target_path):
result = {'status': 'error', 'message': gettext('File already exists')}
result_code = 500
jvm.org.apache.hadoop.fs.FileUtil.copyMerge(
hdfs, full_path, hdfs, target_path, True, conf, None)
return result_code, result, target_path
| 32.548387 | 79 | 0.6556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.210109 |
b366f48606841fe29fdccd25f0931d6c51909f80 | 14,396 | py | Python | DailyProgrammer/DP20160722C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/DP20160722C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/DP20160722C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
[2016-07-22] Challenge #276 [Hard] ∞ Loop solver part 2
https://www.reddit.com/r/dailyprogrammer/comments/4u3e96/20160722_challenge_276_hard_loop_solver_part_2/
This is the same challenge as /u/jnazario's excellent [∞ Loop
solver](https://www.reddit.com/r/dailyprogrammer/comments/4rug59/20160708_challenge_274_hard_loop_solver/) but for
larger inputs.
The input format is different, as you will be given a presolved partial grid, where each cell is the possible rotations
that line up with a possible rotation of neighbour cells.
The challenge is to find ALL of the valid grid solutions
# 20x20 input visualization
┌─┬─────┬────┬───────┬────┬───┬───┬────┬─────┬────────┬────┬────────┬────┬─────┬──┬──┬──┬──┬──┬──┐
│6│12 │6 │10 │10 │12 │6 │12 │6 │12 │6 │14 │12 │6 │10│10│10│14│14│12│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│13 │3 │14 │12 │3 │9 │7 │15 │9 │5 │7 │11 │9 │6 │12│6 │13│5 │5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│9 │6 │9 │7 │10 │10 │9 │7 │10 │13 │7 │10 │10 │9 │5 │5 │5 │3 │9 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│6 │15 │12 │5 │6 │14 │14 │15 │12 │5 │3 │10 │14 │10│11│11│15│10│12│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│13 │3 │9 │3 │15 │11 │13 │7 │9 │7 │12 │6 │11 │10│10│10│9 │6 │9 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│11 │14 │14 │14 │9 │6 │15 │15 │12 │5 │3 │15 │14 │14│12│6 │12│3 │12│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│6 │9 │3 │9 │6 │9 │5 │7 │13 │5 │6 │15 │15 │15│13│7 │13│6 │13│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│5 │6 │10 │10 │13 │6 │15 │15 │11 13 │13 7│7 13 11 │11 7│11 │15│11│9 │3 │15│9 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│9 │5 │6 │10 │11 │9 │7 │9 │6 3 │11 │11 13 14│14 7│10 │11│14│12│6 │15│12│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│6 │9 │3 │12 │6 │10 │9 │6 │13 11 14│6 12│14 7 │9 │6 │10│9 │7 │9 │5 │5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│11 │14 │10 │9 │7 │10 │14 │13 11│7 14 │11 │11 │10 │13 │6 │14│9 │6 │13│5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│12 │7 │12 │6 │13 │6 │9 │3 6 │13 │6 │10 │12 │7 │11│11│14│15│13│5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│13 11│3 9 │11 13 7│13 7│3 9│9 3│6 12│14 7 │15 │11 │10 │9 │3 │14│10│9 │3 │9 │5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│13 14│6 12│14 7 │11 │12 │6 │13 │5 │3 │14 │12 │6 │12 │5 │6 │14│14│12│5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│3 │15 │11 │12 │7 │9 │7 │11 │12 │5 │7 │9 │7 │15│11│13│7 │13│5 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│5│6 │9 │6 │11 │13 │6 │13 │6 │15 │9 │7 │10 │13 │3 │10│9 │3 │15│13│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│3│13 │6 │15 │12 │7 │15 │9 │3 │13 │6 │13 11 │6 12│11 7 │14│10│12│6 │15│9 │
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│6│13 │3 │11 │15 │15 │13 │6 │10 │15 │11 │11 14 │11 │14 11│13│6 │15│9 │3 │12│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│7│11 │12 │6 │15 │9 │5 │7 │14 │9 │6 │14 13 │12 6│7 14 │9 │5 │7 │12│6 │13│
├─┼─────┼────┼───────┼────┼───┼───┼────┼─────┼────────┼────┼────────┼────┼─────┼──┼──┼──┼──┼──┼──┤
│3│10 │9 │3 │11 │10 │11 │11 │11 │10 │9 │3 │11 │11 │10│11│11│9 │3 │9 │
└─┴─────┴────┴───────┴────┴───┴───┴────┴─────┴────────┴────┴────────┴────┴─────┴──┴──┴──┴──┴──┴──┘
1. The numbers in each cell are indexes (0-based) into the looper tiles `╹╺┗╻┃┏┣╸┛━┻┓┫┳╋` (leading index 0 is space)
2. The 4 digit binary representation of each index indicates whether there is a tick that points `WSEN`
3. Cells with a single index are forced moves. Cells with multiple indexes are potential moves.
4. The general strategy for finding all valid final (ones with single indexes per cell) grids is to repeatedly split
the grid based on one multiple cell (where each grid has a unique index in that cell), and then find all forced moves
in each independent grid.
5. A forced move by row is one where the left cells' East tick is equal to the right cell's West tick. By column, the
top cell's South tick is equal to the lower cell's North tick.
**input** (each row separated by LF, each cell by comma, each candidate by space)
20x20
6,12,6,10,10,12,6,12,6,12,6,14,12,6,10,10,10,14,14,12
7,13,3,14,12,3,9,7,15,9,5,7,11,9,6,12,6,13,5,5
7,9,6,9,7,10,10,9,7,10,13,7,10,10,9,5,5,5,3,9
5,6,15,12,5,6,14,14,15,12,5,3,10,14,10,11,11,15,10,12
7,13,3,9,3,15,11,13,7,9,7,12,6,11,10,10,10,9,6,9
7,11,14,14,14,9,6,15,15,12,5,3,15,14,14,12,6,12,3,12
5,6,9,3,9,6,9,5,7,13,5,6,15,15,15,13,7,13,6,13
5,5,6,10,10,13,6,15,15,11 13,13 7,7 13 11,11 7,11,15,11,9,3,15,9
7,9,5,6,10,11,9,7,9,6 3,11,11 13 14,14 7,10,11,14,12,6,15,12
5,6,9,3,12,6,10,9,6,13 11 14,6 12,14 7,9,6,10,9,7,9,5,5
7,11,14,10,9,7,10,14,13 11,7 14,11,11,10,13,6,14,9,6,13,5
7,12,7,12,6,13,6,9,3 6,13,6,10,12,7,11,11,14,15,13,5
7,13 11,3 9,11 13 7,13 7,3 9,9 3,6 12,14 7,15,11,10,9,3,14,10,9,3,9,5
7,13 14,6 12,14 7,11,12,6,13,5,3,14,12,6,12,5,6,14,14,12,5
5,3,15,11,12,7,9,7,11,12,5,7,9,7,15,11,13,7,13,5
5,6,9,6,11,13,6,13,6,15,9,7,10,13,3,10,9,3,15,13
3,13,6,15,12,7,15,9,3,13,6,13 11,6 12,11 7,14,10,12,6,15,9
6,13,3,11,15,15,13,6,10,15,11,11 14,11,14 11,13,6,15,9,3,12
7,11,12,6,15,9,5,7,14,9,6,14 13,12 6,7 14,9,5,7,12,6,13
3,10,9,3,11,10,11,11,11,10,9,3,11,11,10,11,11,9,3,9
**output**
to save space just provide the number of distinct valid grids. (I get 12)
# 30x30 challenges
thanks to /u/bearific for creating a generator for this challenge. The above and larger inputs are available here:
https://gist.github.com/FrankRuis/0aa761b9562a32ea7fdcff32f1768eb0
"reduced input" (above) formats of the 30x30 challenges: (you may use the original input format and solve these anyway
you like)
**first input**
6,10,14,12,6,14,10,12,6,12,6,14,10,12,6,10,14,14,14,12,6,14,12,6,10,14,10,12,6,12
3,14,13,7,13,3,14,15,15,15,11,13,6,9,5,6,11,9,5,3,15,15,13,5,6,15,10,15,13,5
6,11,15,15,15,10,13 11,7 11,15,9,6,9,7,12,3,13,6,10,11,14,11,15,15,11,15,9,6,13,7,13
7,12,3,13,5,6,13 14,7 14,11,14,9,6,15,11,14,15,11,10,12,7,12,7,13 11,6 12,13 7,6 12,11 7,13,5,5
7,11,14,9,3,9,7,13,6,15,14,13,5,6,9,3,10,10,13,3,15,13,3 6,15,15,11 13,14 7,13,5,5
7,14,13,6,14,12,5,7,15,15,9,3,9,5,6,12,6,14,9,6,15,13 11,6 12 3 9,9 3,7 13,14 7,15,13,7,9
7,15,13,5,5,3,9,5,5,5,6,14,14,9,3,11,11,13,6,15,15,13 14,7 11 14,14,15,15,15,11,11,12
7,11,9,5,7,12,6,13,3,13,3,13,3,10,10,10,14,11 7,9,5,3,11,13 14,7 11,15,11,11,10,12,5
5,6,10,9,5,5,5,7,12,5,6,9,6,12,6,14,13 11,6 12 3 9,12 6,7 13,12 6,6 12,9 3,7 14,15,10,12,6,15,13
7,9,6,12,3,9,5,5,3,9,3,14,11 13,11 13 7,11 13 7,13 11 7,5 10,7 13 14,11 7,13,5,7,14,11,9,6,11,13,3,13
5,6,11,9,6,12,3,13,6,14,14,13,6,10,14 11,11,11 14,13,6 3,15,11,15,9,6,12,7,10,9,6,13
3,11,10,10,9,3,10,15,9,7,15,13,5,6,13 14,6 12,14 13 7,9 3,7 13 14,11 7,14,9,6,11,13,3,12,6,11,9
6,10,14,10,10,12,6,15,10,11 13,13 7,7 11,13,5,5,3,11,14,13,6 3,15,12,5,6,15,12,5,7,14,12
5,6,9,6,14,11,15,15,10,12 9,7,11 14,13,7,13,6,12,5,3,11 14,9,5,5,7,15,15,11,15,15,9
3,15,14,15,13,6,9,5,6,13 11 14,3 9,14 13 7,13 7,3 9,11 7,11,15,9,6,14 11,10,11,15,11 13,11 7,13,6,11,11,12
6,15,15,15,11,15,14,11 13,13 7,7 14,12,3,15,10,12 9,6,9,6,11 13,11 7 14,10,12,3,14 13,14 7,15,11,12,6,13
3,9,7,9,6,13 11,7 13 11,14 11 7,15,11,15,12,5,6,13 14,3 9,12 6,3 9,10 5,14 11 7,10,15,14,13,5,3,10,15,11,13
6,14,11,14,15,13 11 14,7 13 11 14,11 13 7 14,11 7,10,9,3,11,13,5,6,13,6,12 9,3 6,10,13,3,13,5,6,12,3,10,9
3,13,6,13,3,13 14,7 13 14,14 13 11 7,14 11 7,14,12,6,10,9,5,5,3,15,11 13 14,14 7,10,13,6,15,11,13,5,6,10,12
6,15,9,3,14,9,7,13 14,7 14,15,11,11,10,12,3,15,12,3,14 13,9 3,6 12,11 7,13,7,10,11,11,15,12,5
7,13,6,10,15,14,9,5,3,11,14,12,6,9,6,9,3,14,9,6,15,12 9,5,7,10,10,12,3,13,5
7,9,7,14,11,11,12,5,6,10,11,13,7,12,5,6,12,7,10,11,13 11,3 9 6 12,13 11 7,7 11,14,14,11,12,5,5
5,6,13,7,12,6,13,5,3,14,14,13,3,15,11,11,11,13,6,12,7 13 14,10 5,11 7 14,9 12,3,15,14,11,11,13
7,9,7,9,5,7,11,15,14,13,5,7,12,3,10,14,12,3,13 11,3 9,9 3,6 12 3 9,14 13 7,14 7,10,11,15,14,12,5
7,12,3,10,11,15,14,11,9,3,9,3,15,12,6,13,3,10,13 14,6 12,12 6,7 14,11,15,14,12,3,13,5,5
3,9,6,10,12,7,9,6,14,10,12,6,13,7,15,15,12,6,9,7,15,11,12,3,13,3,12,3,9,5
6,12,7,14,9,7,14,9,7,12,3,9,3,15,11 13,11 7,9,5,6,15,15,14,15,12,3,14,13,6,14,9
7,15,13,7,10,11,11,10,13,5,6,10,14,13,6 3,14 11,10,9,5,5,3,13,5,5,6,15,11,15,15,12
7,9,3,13,6,14,12,6,15,11,11,10,11,11,13 14,7 14,14,12,7,15,12,7,15,13,3,13,6,11,15,13
3,10,10,9,3,9,3,9,3,10,10,10,10,10,9,3,9,3,9,3,11,9,3,11,10,9,3,10,11,9
**input 2**
6,10,14,12,6,14,10,12,6,12,6,14,10,12,6,10,14,14,14,12,6,14,12,6,10,14,10,12,6,12
3,14,13,7,13,3,14,15,15,15,11,13,6,9,5,6,11,9,5,3,15,15,13,5,6,15,10,15,13,5
6,11,15,15,15,10,13 11,7 11,15,9,6,9,7,12,3,13,6,10,11,14,11,15,15,11,15,9,6,13,7,13
7,12,3,13,5,6,13 14,7 14,11,14,9,6,15,11,14,15,11,10,12,7,12,7,13 11,6 12,13 7,6 12,11 7,13,5,5
7,11,14,9,3,9,7,13,6,15,14,13,5,6,9,3,10,10,13,3,15,13,3 6,15,15,13 11,14 7,13,5,5
7,14,13,6,14,12,5,7,15,15,9,3,9,5,6,12,6,14,9,6,15,11 13,12 6 9 3,3 9,13 7,7 14,15,13,7,9
7,15,13,5,5,3,9,5,5,5,6,14,14,9,3,11,11,13,6,15,15,14 13,11 7 14,14,15,15,15,11,11,12
7,11,9,5,7,12,6,13,3,13,3,13,3,10,10,10,14,11 7,9,5,3,11,14 13,11 7,15,11,11,10,12,5
5,6,10,9,5,5,5,7,12,5,6,9,6,12,6,14,13 11,6 12 3 9,12 6,7 13,12 6,6 12,9 3,14 7,15,10,12,6,15,13
7,9,6,12,3,9,5,5,3,9,3,14,13 11,7 13 11,13 11 7,7 13 11,5 10,13 7 14,7 11,13,5,7,14,11,9,6,11,13,3,13
5,6,11,9,6,12,3,13,6,14,14,13,6,10,11 14,11,11 14,13,6 3,15,11,15,9,6,12,7,10,9,6,13
3,11,10,10,9,3,10,15,9,7,15,13,5,6,14 13,12 6,14 7 13,9 3,7 13 14,11 7,14,9,6,11,13,3,12,6,11,9
6,10,14,10,10,12,6,15,10,13 11,7 13,11 7,13,5,5,3,11,14,13,6 3,15,12,5,6,15,12,5,7,14,12
5,6,9,6,14,11,15,15,10,9 12,7,14 11,13,7,13,6,12,5,3,11 14,9,5,5,7,15,15,11,15,15,9
3,15,14,15,13,6,9,5,6,14 13 11,9 3,7 13 14,13 7,3 9,11 7,11,15,9,6,14 11,10,11,15,13 11,7 11,13,6,11,11,12
6,15,15,15,11,15,14,13 11,7 13,7 14,12,3,15,10,12 9,6,9,6,13 11,7 11 14,10,12,3,13 14,7 14,15,11,12,6,13
3,9,7,9,6,13 11,7 13 11,11 7 14,15,11,15,12,5,6,13 14,9 3,6 12,9 3,5 10,7 11 14,10,15,14,13,5,3,10,15,11,13
6,14,11,14,15,13 11 14,7 13 11 14,14 13 11 7,11 7,10,9,3,11,13,5,6,13,6,9 12,3 6,10,13,3,13,5,6,12,3,10,9
3,13,6,13,3,13 14,7 13 14,13 11 7 14,14 7 11,14,12,6,10,9,5,5,3,15,14 13 11,14 7,10,13,6,15,11,13,5,6,10,12
6,15,9,3,14,9,7,13 14,7 14,15,11,11,10,12,3,15,12,3,13 14,3 9,12 6,7 11,13,7,10,11,11,15,12,5
7,13,6,10,15,14,9,5,3,11,14,12,6,9,6,9,3,14,9,6,15,9 12,5,7,10,10,12,3,13,5
7,9,7,14,11,11,12,5,6,10,11,13,7,12,5,6,12,7,10,11,11 13,12 6 9 3,7 13 11,11 7,14,14,11,12,5,5
5,6,13,7,12,6,13,5,3,14,14,13,3,15,11,11,11,13,6,12,14 13 7,5 10,11 7 14,12 9,3,15,14,11,11,13
7,9,7,9,5,7,11,15,14,13,5,7,12,3,10,14,12,3,13 11,3 9,9 3,3 9 6 12,14 13 7,7 14,10,11,15,14,12,5
7,12,3,10,11,15,14,11,9,3,9,3,15,12,6,13,3,10,13 14,6 12,12 6,14 7,11,15,14,12,3,13,5,5
3,9,6,10,12,7,9,6,14,10,12,6,13,7,15,15,12,6,9,7,15,11,12,3,13,3,12,3,9,5
6,12,7,14,9,7,14,9,7,12,3,9,3,15,13 11,7 11,9,5,6,15,15,14,15,12,3,14,13,6,14,9
7,15,13,7,10,11,11,10,13,5,6,10,14,13,3 6,11 14,10,9,5,5,3,13,5,5,6,15,11,15,15,12
7,9,3,13,6,14,12,6,15,11,11,10,11,11,14 13,14 7,14,12,7,15,12,7,15,13,3,13,6,11,15,13
3,10,10,9,3,9,3,9,3,10,10,10,10,10,9,3,9,3,9,3,11,9,3,11,10,9,3,10,11,9
"""
def main():
pass
if __name__ == "__main__":
main()
| 88.319018 | 119 | 0.427549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19,332 | 0.997214 |
b367137798db5a8033c70626264857c8575384f8 | 851 | py | Python | account/urls.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | account/urls.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | account/urls.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
urls.py
Created by Darcy Liu on 2012-03-03.
Copyright (c) 2012 Close To U. All rights reserved.
"""
from django.conf.urls import *
from django.contrib.auth import views as auth_views
urlpatterns = patterns('account.views',
(r'^$','index'),
(r'^signup$','signup'),
(r'^signin$','signin'),
(r'^signout$','signout'),
(r'^logs$','logs'),
(r'^avatar/(?P<username>\w+).png$','avatar'),
(r'^custom_style$','custom_style'),
(r'^password_change$', 'password_change'),
(r'^password_change_done$', 'password_change_done'),
(r'^password/change/$',auth_views.password_change),
(r'^password/change/done/$',auth_views.password_change_done),
(r'^password/$','index'),
) | 34.04 | 73 | 0.564042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 467 | 0.548766 |
b36b45596be0c00b3ce27b76dfcdb7d0c441a008 | 693 | py | Python | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 1 | 2020-12-06T13:27:45.000Z | 2020-12-06T13:27:45.000Z | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 1 | 2021-11-28T14:03:23.000Z | 2021-11-28T14:03:23.000Z | webdriver_setup/opera.py | xloem/webdriver-setup | 3cd091559ea6d3017995ac7f252c2c107ff1f44c | [
"Apache-2.0"
] | 2 | 2021-07-21T11:24:56.000Z | 2021-09-20T11:13:24.000Z | from selenium import webdriver
from webdriver_manager.opera import OperaDriverManager
from webdriver_setup.driver import DriverBase
class OperaDriver(DriverBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def create_driver(self, **kwargs):
"""Create Opera webdriver
:type kwargs: dict
:param kwargs: Optional arguments
:rtype: selenium.webdriver.Opera
:returns: Opera webdriver instance
"""
cache_timeout = kwargs.get("cache_valid_range", 7)
driver_path = OperaDriverManager(cache_valid_range=cache_timeout).install()
return webdriver.Opera(executable_path=driver_path, **kwargs)
| 25.666667 | 83 | 0.701299 | 557 | 0.803752 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.30303 |