hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
346900f77d8ee086d65dafb8a1ff78831519c40d | 1,188 | py | Python | tests/unit/schema/test_links.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/schema/test_links.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/schema/test_links.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | 1 | 2022-01-19T21:23:56.000Z | 2022-01-19T21:23:56.000Z | import mock
import pytest
from openapi_core.schema.links.models import Link
from openapi_core.schema.servers.models import Server
class TestLinks(object):
@pytest.fixture
def link_factory(self):
def link_factory(request_body, server):
parameters = {
'par1': mock.sentinel.par1,
'par2': mock.sentinel.par2,
}
return Link(
'op_id',
parameters,
request_body,
'Test link',
server
)
return link_factory
servers = [
None,
Server("https://bad.remote.domain.net/"),
Server("http://localhost")
]
request_body_list = [
None,
"request",
'{"request": "value", "opt": 2}',
{"request": "value", "opt": 2}
]
@pytest.mark.parametrize("server", servers)
@pytest.mark.parametrize("request_body", request_body_list)
def test_iteritems(self, link_factory, request_body, server):
link = link_factory(request_body, server)
for par_name in link.parameters:
assert link[par_name] == link.parameters[par_name]
| 26.4 | 65 | 0.564815 | 1,054 | 0.887205 | 0 | 0 | 749 | 0.630471 | 0 | 0 | 164 | 0.138047 |
3469ba25c1cf2d5686363b9ab8181ddb2f05f798 | 1,022 | py | Python | ex105.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex105.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | ex105.py | erikamaylim/Python-CursoemVideo | 5a6809818c4c55a02ec52379d95f3d20c833df2e | [
"MIT"
] | null | null | null | """Faça um programa que tenha uma função notas() que pode receber várias notas de alunos
e vai retornar um dicionário com as seguintes informações:
– Quantidade de notas
- A maior nota
– A menor nota
– A média da turma
– A situação (opcional)"""
def notas(* num, s=False):
"""
-> Função para coletar notas dos alunos e retornar informações gerais e a situação da turma.
:param num: Notas da turma
:param s: Situação (Boa, Razoável ou Ruim)
:return: dicionário com informações sobre a turma
"""
soma = sum(num)
qtd = len(num)
maior = max(num)
menor = min(num)
media = soma / qtd
if media >= 6:
sit = 'Boa'
elif media >= 5:
sit = 'Razoável'
else:
sit = 'Ruim'
total = {'Quantidade de notas': qtd, 'Maior nota': maior, 'Menor nota': menor, 'Média': media}
if s:
total['Situação'] = sit
return total
print(notas(2, 3, 5, 4, 1, 3, s=True))
print(notas(10, 7, 8, 10, s=True))
print(notas(4, 6, 7, 5, 6.5, 7, 5))
help(notas)
| 26.894737 | 98 | 0.61546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.570076 |
346b05d408ff7992b28238f3b72a34e2e2a82c5a | 645 | py | Python | swigwin-3.0.12/Examples/test-suite/python/swigobject_runme.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | null | null | null | swigwin-3.0.12/Examples/test-suite/python/swigobject_runme.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | 2 | 2020-03-24T18:19:22.000Z | 2020-03-31T11:22:32.000Z | swigwin-3.0.12/Examples/test-suite/python/swigobject_runme.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | 2 | 2019-11-01T01:28:09.000Z | 2020-05-11T05:48:26.000Z |
from swigobject import *
a = A()
a1 = a_ptr(a)
a2 = a_ptr(a)
if a1.this != a2.this:
raise RuntimeError
lthis = long(a.this)
# match pointer value, but deal with leading zeros on 8/16 bit systems and
# different C++ compilers interpretation of %p
xstr1 = "%016X" % (lthis,)
xstr1 = str.lstrip(xstr1, '0')
xstr2 = pointer_str(a)
xstr2 = str.replace(xstr2, "0x", "")
xstr2 = str.replace(xstr2, "0X", "")
xstr2 = str.lstrip(xstr2, '0')
xstr2 = str.upper(xstr2)
if xstr1 != xstr2:
print xstr1, xstr2
raise RuntimeError
s = str(a.this)
r = repr(a.this)
v1 = v_ptr(a)
v2 = v_ptr(a)
if long(v1) != long(v2):
raise RuntimeError
| 17.916667 | 74 | 0.651163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.224806 |
346b88da15ac27f750c964f42e0fb2bd9d1b5698 | 205 | py | Python | com/LimePencil/Q2164/Main.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | 2 | 2021-07-17T13:05:42.000Z | 2021-09-12T09:14:24.000Z | com/LimePencil/Q2164/Main.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | com/LimePencil/Q2164/Main.py | LimePencil/baekjoonProblems | 61eeeeb875585d165d9e39ecdb3d905b4ba6aa87 | [
"MIT"
] | null | null | null | import sys
from collections import deque
n = int(sys.stdin.readline())
deck = deque(list(range(1, n+1)))
for i in range(n-1):
deck.popleft()
deck.append(deck.popleft())
print(str(deck.pop()))
| 20.5 | 33 | 0.663415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
346d66976904f2a2e890a78b4c0d64f9e6578329 | 347 | py | Python | contrib/mypy/examples/src/python/mypy_plugin/settings.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2020-08-26T03:30:31.000Z | 2020-08-26T03:30:31.000Z | contrib/mypy/examples/src/python/mypy_plugin/settings.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2020-01-21T16:34:02.000Z | 2020-01-21T16:34:02.000Z | contrib/mypy/examples/src/python/mypy_plugin/settings.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from django.urls import URLPattern
DEBUG: bool = True
DEFAULT_FROM_EMAIL: str = 'webmaster@example.com'
SECRET_KEY: str = 'not so secret'
MY_SETTING: URLPattern = URLPattern(pattern='foo', callback=lambda: None)
| 28.916667 | 73 | 0.769452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.495677 |
346d8c6aecb6d5c799c07698354cb687ab52ff19 | 216 | py | Python | function/python/brightics/function/statistics/__init__.py | janrenz/studio | a0714ed8dcd9dcd8d024162104d3b4de89ac2b49 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/statistics/__init__.py | janrenz/studio | a0714ed8dcd9dcd8d024162104d3b4de89ac2b49 | [
"Apache-2.0"
] | null | null | null | function/python/brightics/function/statistics/__init__.py | janrenz/studio | a0714ed8dcd9dcd8d024162104d3b4de89ac2b49 | [
"Apache-2.0"
] | null | null | null | from .profile_table import profile_table
from .correlation import correlation
from .pairplot import pairplot
from .anova import bartletts_test
from .anova import oneway_anova
from .anova import tukeys_range_test | 36 | 41 | 0.842593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
346dcec89ab4ed1144ede11615065491709f1045 | 1,637 | py | Python | modules/hello-world/dags/hello_world.py | nalin-adhikari/apache-airflow | 52b8dd241b9b631d6bad5137bd55c774f1d7beea | [
"MIT"
] | null | null | null | modules/hello-world/dags/hello_world.py | nalin-adhikari/apache-airflow | 52b8dd241b9b631d6bad5137bd55c774f1d7beea | [
"MIT"
] | null | null | null | modules/hello-world/dags/hello_world.py | nalin-adhikari/apache-airflow | 52b8dd241b9b631d6bad5137bd55c774f1d7beea | [
"MIT"
] | null | null | null | import json
from datetime import timedelta, datetime
from requests import get
from airflow import DAG
from airflow.models import Variable
from airflow.operators.python_operator import PythonOperator
# Config variables
# dag_config = Variable.get("hello_world_variables", deserialize_json=True)
default_args = {
'owner': 'nalin',
'depends_on_past': True,
'start_date': datetime(2020, 12, 4),
# 'end_date': datetime(2018, 12, 5),
'email': ['nalinadhikariofficial@gmail.com'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 2,
'retry_delay': timedelta(minutes=2),
}
# Set Schedule: Run pipeline once a day.
# Use cron to define exact time. Eg. 8:15am would be "15 08 * * *"
schedule_interval = "21 1 * * *"
# Define DAG: Set ID and assign default args and schedule interval
dag = DAG(
'fake_rest_api',
default_args=default_args,
schedule_interval=schedule_interval
)
def hello_world():
print("Hello World")
def fetch_data():
url = "https://jsonplaceholder.typicode.com/todos"
response = get(url)
if response.status_code == 200:
print(response.text)
else:
print(response.status_code)
def bye_world():
print("Bye World")
t1 = PythonOperator(
task_id='print_hello_world',
provide_context=False,
python_callable=hello_world,
dag=dag,
)
t2 = PythonOperator(
task_id='fetch_data',
provide_context=False,
python_callable=fetch_data,
dag=dag,
)
t3 = PythonOperator(
task_id='print_bye_world',
provide_context=False,
python_callable=bye_world,
dag=dag,
)
t1 >> [t2]
t2 >> [t3] | 23.056338 | 75 | 0.689676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 584 | 0.35675 |
346dfc38e5228510fbb6d8575a372cd8d9bac798 | 9,074 | py | Python | src/thex/apps/utils/signal_utils.py | harris-2374/THEx | 04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a | [
"MIT"
] | null | null | null | src/thex/apps/utils/signal_utils.py | harris-2374/THEx | 04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a | [
"MIT"
] | null | null | null | src/thex/apps/utils/signal_utils.py | harris-2374/THEx | 04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a | [
"MIT"
] | null | null | null | from pathlib import Path
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import plotly
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# -------------------- Graphing Functions --------------------
def single_chromosome_graph_line(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.line(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
line=dict(width=float(marker_width)),
)
return fig
def single_chromosome_graph_scatter(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.scatter(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
marker=dict(size=float(marker_width)),
)
return fig
def whole_genome_line(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
fig = make_subplots(
rows=len(chromosomes),
cols=1,
x_title="Position",
y_title="Edit Me!",
row_titles=chromosomes,
row_heights=[2]*len(chromosomes),
)
for n, sample in enumerate(samples):
legend_flag = True
for row, current_chromosome in enumerate(chromosomes, start=1):
filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
sample_chromosome_data = df[filt]
# Make figure
fig.add_trace(
go.Scatter(
x=sample_chromosome_data['Window'],
y=sample_chromosome_data['Value'],
mode='lines',
legendgroup=str(sample),
name=sample,
line=dict(
color=colors[n],
width=float(marker_width)
),
showlegend=legend_flag,
),
row=row,
col=1
)
legend_flag = False
continue
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family="Arial",
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
def whole_genome_scatter(
df,
chromosomes,
samples,
colors,
marker_width,
template,
font_size,
y_max,
x_max,
xaxis_gridlines,
yaxis_gridlines,
font_family,
):
# fig = make_subplots(
# rows=len(chromosomes),
# cols=1,
# x_title="Position",
# y_title="Edit Me!",
# row_titles=chromosomes,
# row_heights=[2]*len(chromosomes),
# )
# for n, sample in enumerate(samples):
# legend_flag = True
# for row, current_chromosome in enumerate(chromosomes, start=1):
# filt = (df['Chromosome'] == current_chromosome) & (df["Sample"] == sample)
# sample_chromosome_data = df[filt]
# # Make figure
# fig.add_trace(
# go.Scatter(
# x=sample_chromosome_data['Window'],
# y=sample_chromosome_data['Value'],
# mode='markers',
# legendgroup=str(sample),
# name=sample,
# line=dict(
# color=colors[n],
# width=float(marker_width)
# ),
# showlegend=legend_flag,
# ),
# row=row,
# col=1
# )
# legend_flag = False
# continue
fig = px.scatter(
df,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
# height=500,
facet_row="Chromosome",
)
# --- Update Figure ---
fig.update_layout(
font=dict(size=font_size, family=font_family),
height=125*len(chromosomes),
hovermode="x unified",
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="left",
x=0,
itemsizing='trace',
title="",
),
margin=dict(
l=60,
r=50,
b=60,
t=10,
),
template=template,
title_x=0.5,
font_family=font_family,
)
fig.update_xaxes(
fixedrange=True,
range=[0, x_max],
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
range=[0.0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
title='',
)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_traces(marker=dict(size=float(marker_width)))
# Rotate chromosome names to 0-degrees
for annotation in fig['layout']['annotations']:
if annotation['text'] == "Edit Me!":
continue
annotation['textangle']=0
annotation['align']="center"
return fig
# -------------------- File Validation --------------------
def validate_signal_tracer_headers(df):
"""Validate that headers are correct"""
expected_headers = ["Chromosome", "Window", "Sample", "Value"]
try:
assert list(df.columns) == expected_headers
return True
except AssertionError:
return False
def validate_signal_tracer_values(xlsx_df):
"""Return False if value column data are not int or float"""
try:
assert xlsx_df['Value'].dtype != "object"
return True
except AssertionError:
return False
def validate_file_type(filename):
"""Return False if file type is not valid """
valid_filetypes = ['.tsv', '.csv', '.xlsx', '.txt']
filetype = Path(filename).suffix
if filetype not in valid_filetypes:
return False
else:
return True
| 25.925714 | 88 | 0.528543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,133 | 0.235067 |
346e40e8a2a72d7ea7828ba82b650ee8a97e793a | 7,249 | py | Python | tests/unit/test_ncit.py | cancervariants/disease-normalization | b4d64c9bf30d600fec4d23b890da669835d93c73 | [
"MIT"
] | null | null | null | tests/unit/test_ncit.py | cancervariants/disease-normalization | b4d64c9bf30d600fec4d23b890da669835d93c73 | [
"MIT"
] | 39 | 2021-02-11T11:33:32.000Z | 2022-03-23T13:53:31.000Z | tests/unit/test_ncit.py | cancervariants/disease-normalization | b4d64c9bf30d600fec4d23b890da669835d93c73 | [
"MIT"
] | null | null | null | """Test NCIt source."""
import pytest
from disease.schemas import MatchType, SourceName
from disease.query import QueryHandler
@pytest.fixture(scope='module')
def ncit():
"""Build NCIt ETL test fixture."""
class QueryGetter:
def __init__(self):
self.query_handler = QueryHandler()
def search(self, query_str):
response = self.query_handler.search_sources(query_str, keyed=True,
incl='ncit')
return response['source_matches'][SourceName.NCIT]
return QueryGetter()
@pytest.fixture(scope='module')
def neuroblastoma():
"""Build neuroblastoma test fixture."""
return {
"label_and_type": "ncit:c3270##identity",
"concept_id": "ncit:C3270",
"label": "Neuroblastoma",
"aliases": [
"Neural Crest Tumor, Malignant",
"Neuroblastoma (NBL)",
"Neuroblastoma (Schwannian Stroma-poor)",
"Neuroblastoma (Schwannian Stroma-Poor)",
"NEUROBLASTOMA, MALIGNANT",
"Neuroblastoma, NOS",
"neuroblastoma"
],
"xrefs": [],
"associated_with": ["umls:C0027819", "icdo:9500/3"],
"src_name": "NCIt"
}
@pytest.fixture(scope='module')
def nsclc():
"""Build fixture for non-small cell lung carcinoma"""
return {
"concept_id": "ncit:C2926",
"label": "Lung Non-Small Cell Carcinoma",
"aliases": [
"Non Small Cell Lung Cancer NOS",
"Non-Small Cell Lung Cancer",
"Non-Small Cell Cancer of the Lung",
"NSCLC",
"non-small cell lung cancer",
"Non-Small Cell Carcinoma of the Lung",
"Non-Small Cell Cancer of Lung",
"Non-small cell lung cancer, NOS",
"Non-Small Cell Carcinoma of Lung",
"NSCLC - Non-Small Cell Lung Cancer",
"Non-Small Cell Lung Carcinoma"
],
"xrefs": [],
"associated_with": ["umls:C0007131"]
}
def test_concept_id_match(ncit, neuroblastoma, nsclc, compare_records):
"""Test that concept ID search resolves to correct record"""
response = ncit.search('ncit:C3270')
assert response['match_type'] == MatchType.CONCEPT_ID
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('ncit:c2926')
assert response['match_type'] == MatchType.CONCEPT_ID
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
response = ncit.search('NCIT:C2926')
assert response['match_type'] == MatchType.CONCEPT_ID
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
response = ncit.search('C3270')
assert response['match_type'] == MatchType.CONCEPT_ID
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('3270')
assert response['match_type'] == MatchType.NO_MATCH
def test_label_match(ncit, neuroblastoma, nsclc, compare_records):
"""Test that label search resolves to correct record."""
response = ncit.search('Neuroblastoma')
assert response['match_type'] == MatchType.LABEL
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('NEUROBLASTOMA')
assert response['match_type'] == MatchType.LABEL
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('lung non-small cell carcinoma')
assert response['match_type'] == MatchType.LABEL
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
response = ncit.search('lung non small cell carcinoma')
assert response['match_type'] == MatchType.NO_MATCH
def test_alias_match(ncit, neuroblastoma, nsclc, compare_records):
"""Test that alias search resolves to correct record."""
response = ncit.search('neuroblastoma, nos')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('neuroblastoma (Schwannian Stroma-Poor)')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('Neuroblastoma, Malignant')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('Neural Crest Tumor, Malignant')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('nsclc')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
response = ncit.search('NSCLC - Non-Small Cell Lung Cancer')
assert response['match_type'] == MatchType.ALIAS
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
response = ncit.search('neuroblastoma nbl')
assert response['match_type'] == MatchType.NO_MATCH
def test_associated_with_match(ncit, neuroblastoma, nsclc, compare_records):
"""Test that associated_with search resolves to correct record."""
response = ncit.search('icdo:9500/3')
assert response['match_type'] == MatchType.ASSOCIATED_WITH
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, neuroblastoma)
response = ncit.search('umls:C0007131')
assert response['match_type'] == MatchType.ASSOCIATED_WITH
assert len(response['records']) == 1
actual_disease = response['records'][0]
compare_records(actual_disease, nsclc)
def test_meta(ncit):
"""Test that meta field is correct."""
response = ncit.search('neuroblastoma')
assert response['source_meta_']['data_license'] == 'CC BY 4.0'
assert response['source_meta_']['data_license_url'] == \
'https://creativecommons.org/licenses/by/4.0/legalcode'
assert response['source_meta_']['version'] == '21.01d'
assert response['source_meta_']['data_url'] == \
"https://evs.nci.nih.gov/ftp1/NCI_Thesaurus/archive/21.01d_Release/" # noqa: E501
assert response['source_meta_']['rdp_url'] == 'http://reusabledata.org/ncit.html' # noqa: E501
assert response['source_meta_']['data_license_attributes'] == {
"non_commercial": False,
"share_alike": False,
"attribution": True
}
| 37.365979 | 99 | 0.66423 | 346 | 0.047731 | 0 | 0 | 1,915 | 0.264174 | 0 | 0 | 2,529 | 0.348876 |
346e60329f7119de9c6fd91260d085bb215a900f | 7,084 | py | Python | pytglib/api/types/message.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/message.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/message.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class Message(Object):
"""
Describes a message
Attributes:
ID (:obj:`str`): ``Message``
Args:
id (:obj:`int`):
Message identifier, unique for the chat to which the message belongs
sender_user_id (:obj:`int`):
Identifier of the user who sent the message; 0 if unknownCurrently, it is unknown for channel posts and for channel posts automatically forwarded to discussion group
chat_id (:obj:`int`):
Chat identifier
sending_state (:class:`telegram.api.types.MessageSendingState`):
Information about the sending state of the message; may be null
scheduling_state (:class:`telegram.api.types.MessageSchedulingState`):
Information about the scheduling state of the message; may be null
is_outgoing (:obj:`bool`):
True, if the message is outgoing
can_be_edited (:obj:`bool`):
True, if the message can be editedFor live location and poll messages this fields shows whether editMessageLiveLocation or stopPoll can be used with this message by the client
can_be_forwarded (:obj:`bool`):
True, if the message can be forwarded
can_be_deleted_only_for_self (:obj:`bool`):
True, if the message can be deleted only for the current user while other users will continue to see it
can_be_deleted_for_all_users (:obj:`bool`):
True, if the message can be deleted for all users
is_channel_post (:obj:`bool`):
True, if the message is a channel postAll messages to channels are channel posts, all other messages are not channel posts
contains_unread_mention (:obj:`bool`):
True, if the message contains an unread mention for the current user
date (:obj:`int`):
Point in time (Unix timestamp) when the message was sent
edit_date (:obj:`int`):
Point in time (Unix timestamp) when the message was last edited
forward_info (:class:`telegram.api.types.messageForwardInfo`):
Information about the initial message sender; may be null
reply_to_message_id (:obj:`int`):
If non-zero, the identifier of the message this message is replying to; can be the identifier of a deleted message
ttl (:obj:`int`):
For self-destructing messages, the message's TTL (Time To Live), in seconds; 0 if noneTDLib will send updateDeleteMessages or updateMessageContent once the TTL expires
ttl_expires_in (:obj:`float`):
Time left before the message expires, in seconds
via_bot_user_id (:obj:`int`):
If non-zero, the user identifier of the bot through which this message was sent
author_signature (:obj:`str`):
For channel posts, optional author signature
views (:obj:`int`):
Number of times this message was viewed
media_album_id (:obj:`int`):
Unique identifier of an album this message belongs toOnly photos and videos can be grouped together in albums
restriction_reason (:obj:`str`):
If non-empty, contains a human-readable description of the reason why access to this message must be restricted
content (:class:`telegram.api.types.MessageContent`):
Content of the message
reply_markup (:class:`telegram.api.types.ReplyMarkup`):
Reply markup for the message; may be null
Returns:
Message
Raises:
:class:`telegram.Error`
"""
ID = "message"
def __init__(self, id, sender_user_id, chat_id, sending_state, scheduling_state, is_outgoing, can_be_edited, can_be_forwarded, can_be_deleted_only_for_self, can_be_deleted_for_all_users, is_channel_post, contains_unread_mention, date, edit_date, forward_info, reply_to_message_id, ttl, ttl_expires_in, via_bot_user_id, author_signature, views, media_album_id, restriction_reason, content, reply_markup, **kwargs):
self.id = id # int
self.sender_user_id = sender_user_id # int
self.chat_id = chat_id # int
self.sending_state = sending_state # MessageSendingState
self.scheduling_state = scheduling_state # MessageSchedulingState
self.is_outgoing = is_outgoing # bool
self.can_be_edited = can_be_edited # bool
self.can_be_forwarded = can_be_forwarded # bool
self.can_be_deleted_only_for_self = can_be_deleted_only_for_self # bool
self.can_be_deleted_for_all_users = can_be_deleted_for_all_users # bool
self.is_channel_post = is_channel_post # bool
self.contains_unread_mention = contains_unread_mention # bool
self.date = date # int
self.edit_date = edit_date # int
self.forward_info = forward_info # MessageForwardInfo
self.reply_to_message_id = reply_to_message_id # int
self.ttl = ttl # int
self.ttl_expires_in = ttl_expires_in # float
self.via_bot_user_id = via_bot_user_id # int
self.author_signature = author_signature # str
self.views = views # int
self.media_album_id = media_album_id # int
self.restriction_reason = restriction_reason # str
self.content = content # MessageContent
self.reply_markup = reply_markup # ReplyMarkup
@staticmethod
def read(q: dict, *args) -> "Message":
id = q.get('id')
sender_user_id = q.get('sender_user_id')
chat_id = q.get('chat_id')
sending_state = Object.read(q.get('sending_state'))
scheduling_state = Object.read(q.get('scheduling_state'))
is_outgoing = q.get('is_outgoing')
can_be_edited = q.get('can_be_edited')
can_be_forwarded = q.get('can_be_forwarded')
can_be_deleted_only_for_self = q.get('can_be_deleted_only_for_self')
can_be_deleted_for_all_users = q.get('can_be_deleted_for_all_users')
is_channel_post = q.get('is_channel_post')
contains_unread_mention = q.get('contains_unread_mention')
date = q.get('date')
edit_date = q.get('edit_date')
forward_info = Object.read(q.get('forward_info'))
reply_to_message_id = q.get('reply_to_message_id')
ttl = q.get('ttl')
ttl_expires_in = q.get('ttl_expires_in')
via_bot_user_id = q.get('via_bot_user_id')
author_signature = q.get('author_signature')
views = q.get('views')
media_album_id = q.get('media_album_id')
restriction_reason = q.get('restriction_reason')
content = Object.read(q.get('content'))
reply_markup = Object.read(q.get('reply_markup'))
return Message(id, sender_user_id, chat_id, sending_state, scheduling_state, is_outgoing, can_be_edited, can_be_forwarded, can_be_deleted_only_for_self, can_be_deleted_for_all_users, is_channel_post, contains_unread_mention, date, edit_date, forward_info, reply_to_message_id, ttl, ttl_expires_in, via_bot_user_id, author_signature, views, media_album_id, restriction_reason, content, reply_markup)
| 54.914729 | 417 | 0.683512 | 7,052 | 0.995483 | 0 | 0 | 1,721 | 0.242942 | 0 | 0 | 4,110 | 0.580181 |
346fa28c58b006ee5c9a022eea52649d810fd3de | 902 | py | Python | trainer/migrations/0023_userpretest.py | tthelen/interpunct | 83e2cbd67dcc94c131a3a2b155eefd710636a912 | [
"MIT"
] | 2 | 2016-10-21T21:52:08.000Z | 2021-10-19T02:19:43.000Z | trainer/migrations/0023_userpretest.py | tthelen/interpunct | 83e2cbd67dcc94c131a3a2b155eefd710636a912 | [
"MIT"
] | null | null | null | trainer/migrations/0023_userpretest.py | tthelen/interpunct | 83e2cbd67dcc94c131a3a2b155eefd710636a912 | [
"MIT"
] | 4 | 2016-10-24T19:17:48.000Z | 2018-05-11T11:53:12.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-18 08:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trainer', '0022_auto_20180517_1429'),
]
operations = [
migrations.CreateModel(
name='UserPretest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.BooleanField(default=False)),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='link_to_pretests', to='trainer.User')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='link_to_pretest', to='trainer.User')),
],
),
]
| 34.692308 | 141 | 0.640798 | 713 | 0.790466 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.22949 |
346fda67f8f453b3f930138da3f2c3b926fc2d88 | 2,352 | py | Python | CsvToDynamo.py | cushind/csv-to-dynamodb | 5a9cdedde4dfee121efe8efb9bcd61950f242c94 | [
"Apache-2.0"
] | 2 | 2018-04-13T21:03:38.000Z | 2018-06-25T21:56:02.000Z | CsvToDynamo.py | cushind/csv-to-dynamodb | 5a9cdedde4dfee121efe8efb9bcd61950f242c94 | [
"Apache-2.0"
] | null | null | null | CsvToDynamo.py | cushind/csv-to-dynamodb | 5a9cdedde4dfee121efe8efb9bcd61950f242c94 | [
"Apache-2.0"
] | 1 | 2018-07-18T10:21:31.000Z | 2018-07-18T10:21:31.000Z | import boto3
import csv
import json
import argparse
'''
You need to have aws configured with access tokens prior to running this script (use aws configure)
'''
def batch_create(table, csv_file_name, column_names):
'''
Can Handle many puts at one time. Boto3 gives an example of 50, even though
max batch size is 25, because batch writer is buffering and sending items behind the scenes.
'''
print('Beginning csv to dynamo import...')
with table.batch_writer() as batch:
with open(csv_file_name, newline='') as csv_file:
reader = csv.reader(csv_file)
# skip first row which we know is a header row
next(reader)
count = 0
for row in reader:
item = {}
for column in range (0, len(column_names)):
item[column_names[column]] = row[column]
batch.put_item(Item=item)
count += 1
if count % 100 == 0:
print('Inserted ' + str(count) + ' items...')
csv_file.close()
print('Finished importing data into dynamo...')
def validate(table, csv_file_name, partition_key_col_name, sort_key_col_name):
print('Beginning data validation...')
with open(csv_file_name, newline='') as csv_file:
reader = csv.reader(csv_file)
# skip first row which we know is a header row
next(reader)
for row in reader:
key = {partition_key_col_name: row[0], sort_key_col_name: row[1]}
try:
response = table.get_item(Key=key)
assert('Item' in response)
except AssertionError:
print('Failed to validate data. Key ' + json.dumps(key) + ' does not exist...')
csv_file.close()
print('Finished data validation...')
def main():
csv_file_name = ''
table_name = ''
region = 'us-west-2'
partition_key_col_name = ''
sort_key_col_name=''
column_names = [partition_key_col_name, sort_key_col_name, 'Column3']
dynamodb_resource = boto3.resource('dynamodb', region_name=region)
table = dynamodb_resource.Table(table_name)
batch_create(table, csv_file_name, column_names)
validate(table, csv_file_name, partition_key_col_name, sort_key_col_name)
if __name__ == "__main__":
main() | 30.545455 | 99 | 0.619898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 653 | 0.277636 |
3470757a316e0d017e10fe10eeda40a2177e7aab | 5,636 | py | Python | qcdb/molecule/parker.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 8 | 2019-03-28T11:54:59.000Z | 2022-03-19T03:31:37.000Z | qcdb/molecule/parker.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 39 | 2018-10-31T23:02:18.000Z | 2021-12-12T22:11:37.000Z | qcdb/molecule/parker.py | loriab/qccddb | d9e156ef8b313ac0633211fc6b841f84a3ddde24 | [
"BSD-3-Clause"
] | 9 | 2018-03-12T20:51:50.000Z | 2022-02-28T15:18:34.000Z | import math
import numpy as np
import qcelemental as qcel
BOND_FACTOR = 1.2 # fudge factor for bond length threshold
_expected_bonds = {
'H': 1,
'C': 4,
'N': 3,
'O': 2,
'F': 1,
'P': 3,
'S': 2,
}
def xyz2mol(self):
"""Returns a string of Molecule formatted for mol2.
Written by Trent M. Parker 9 Jun 2014
"""
bonds = _bond_profile(self)
N = 0
for i in range(self.natom()):
if self.Z(i):
N += 1
# header
text = '%s\n' % (self.name())
text += ' Generated by xyz2mol\n\n'
text += '%3i%3i 0 0 0 0 0 0 0 0999 V2000\n' % (N, len(bonds))
# coordinates
for i in range(self.natom()):
x = self.x(i) * qcel.constants.bohr2angstroms
y = self.y(i) * qcel.constants.bohr2angstroms
z = self.z(i) * qcel.constants.bohr2angstroms
if self.Z(i):
text += ' %9.4f %9.4f %9.4f %-2s 0 0 0 0 0\n' % (x, y, z, self.symbol(i))
# bonds
for p in range(len(bonds)):
text += '%3i%3i%3i' % (bonds[p][0] + 1, bonds[p][1] + 1, bonds[p][2])
text += ' 0 0 0\n'
text += 'M END\n'
return text
def missing_bonds(bonds, bond_tree, at_types):
"""Determine number of bonds missing for each atom"""
n_missing = []
for i in range(len(at_types)):
n_bonds_i = 0
for p in range(len(bonds)):
at1 = bonds[p][0]
at2 = bonds[p][1]
if (at1 == i or at2 == i):
bond_order = bonds[p][2]
n_bonds_i += bond_order
n_expect_i = _expected_bonds[at_types[i]]
n_missing.append(n_expect_i - n_bonds_i)
return n_missing
def missing_neighbors(bond_tree, n_missing):
"""Determine number of neighboring atoms missing bonds for each atom"""
missing_neighbors = []
for i in range(len(bond_tree)):
N_neighbors = len(bond_tree[i])
missing = 0
for a in range(N_neighbors):
j = bond_tree[i][a]
if n_missing[j] > 0:
missing += 1
missing_neighbors.append(missing)
return missing_neighbors
def _bond_profile(self):
"""Obtain bonding topology of molecule"""
# determine bond topology from covalent radii
bonds = []
for i in range(self.natom()):
for j in range(i + 1, self.natom()):
try:
# qcdb.Molecule
dist = np.linalg.norm(self.xyz(j, np_out=True) - self.xyz(i, np_out=True))
except TypeError:
# psi4.core.Molecule
dist = self.xyz(j).distance(self.xyz(i))
# TOOD check bohr/ang progress
bonded_dist = BOND_FACTOR * (qcel.covalentradii.get(self.symbol(i)) +
qcel.covalentradii.get(self.symbol(j)))
if bonded_dist > dist:
bonds.append([i, j, 1])
# determine bond order from number of bonds
N_atoms = self.natom()
N_bonds = len(bonds)
at_types = [self.symbol(i) for i in range(self.natom())]
bond_tree = [[] for i in range(N_atoms)]
for i in range(N_bonds):
at1 = bonds[i][0]
at2 = bonds[i][1]
bond_tree[at1].append(at2)
bond_tree[at2].append(at1)
# determine bond order for all bonds from bond tree and element types
n_missing = missing_bonds(bonds, bond_tree, at_types)
n_neighbors_missing = missing_neighbors(bond_tree, n_missing)
# add double / triple bonds if only one neighbor missing bonds
N_left = math.floor(sum(n_missing) / 2)
N_left_previous = N_left + 1
N_iter = 0
while N_left > 0:
N_iter += 1
if N_left == N_left_previous:
neighbor_min += 1
else:
neighbor_min = 1
N_left_previous = N_left
# add a multiple bond to a deficient atom with the fewest number of deficient neighbors
BREAK_LOOP = False
for i in range(N_atoms):
if n_missing[i] > 0 and n_neighbors_missing[i] == neighbor_min:
N_neighbors = len(bond_tree[i])
for a in range(N_neighbors):
j = bond_tree[i][a]
if n_missing[j] > 0:
for p in range(N_bonds):
at1 = bonds[p][0]
at2 = bonds[p][1]
if (at1 == i and at2 == j) or (at1 == j and at2 == i):
bonds[p][2] += 1
n_missing[i] += -1
n_missing[j] += -1
n_neighbors_missing[i] += -1
n_neighbors_missing[j] += -1
N_left = math.floor(sum(n_missing) / 2)
BREAK_LOOP = True
if BREAK_LOOP:
break
if BREAK_LOOP:
break
# recalculate incomplete bond topology
n_missing = missing_bonds(bonds, bond_tree, at_types)
n_neighbors_missing = missing_neighbors(bond_tree, n_missing)
# break cycle if takes more than given number of iterations
max_iter = 100
if N_iter > max_iter:
print("""Error: multiple bond determination not complete""")
print(""" %i bonds unaccounted for""" % (N_left))
break
# bond order is number of bonds between each bonded atom pair
bond_order = []
for p in range(N_bonds):
bond_order.append(bonds[p][2])
for p in range(len(bond_order)):
bonds[p][2] = bond_order[p]
return bonds
| 32.022727 | 95 | 0.532825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,124 | 0.199432 |
34721682431358cf0bdf09cf841278f4336d58a5 | 4,050 | py | Python | Projects/CS_VQE/UnitaryPartitioning_myriad_on_FULL_H_LCU.py | AlexisRalli/VQE-code | 4112d2bba4c327360e95dfd7cb6120b2ce67bf29 | [
"MIT"
] | 1 | 2021-04-01T14:01:46.000Z | 2021-04-01T14:01:46.000Z | Projects/CS_VQE/UnitaryPartitioning_myriad_on_FULL_H_LCU.py | AlexisRalli/VQE-code | 4112d2bba4c327360e95dfd7cb6120b2ce67bf29 | [
"MIT"
] | 5 | 2019-11-13T16:23:54.000Z | 2021-04-07T11:03:06.000Z | Projects/CS_VQE/UnitaryPartitioning_myriad_on_FULL_H_LCU.py | AlexisRalli/VQE-code | 4112d2bba4c327360e95dfd7cb6120b2ce67bf29 | [
"MIT"
] | null | null | null | import numpy as np
import scipy as sp
import ast
import os
from quchem.Unitary_Partitioning.Graph import Clique_cover_Hamiltonian
import quchem.Misc_functions.conversion_scripts as conv_scr
from copy import deepcopy
from quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method import LCU_linalg_Energy
from openfermion import qubit_operator_sparse
import pickle
import datetime
#######
import sys
# working_dir = os.getcwd()
working_dir = os.path.dirname(os.path.abspath(__file__)) # gets directory where running python file is!
Analysis_dir = os.path.join(working_dir, 'Analysis')
full_H_results_dir = os.path.join(Analysis_dir, 'SeqRot_LCU_script_A_results')
print('start time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
print('working directory:', working_dir)
###### IMPORT INITIAL RESULTS
## import LCU results
myriad_LCU_results = {}
for filename in os.listdir(full_H_results_dir):
if (filename.endswith('.pickle') and filename.startswith('LCU_CS_VQE_exp')):
file_path = os.path.join(full_H_results_dir, filename)
mol_name = filename[40:-8]
with open(file_path,'rb') as infile:
data = pickle.load(infile)
myriad_LCU_results[mol_name] = data
### find anti-commuting sets
unitary_paritioning_LCU={}
# optional params!
commutativity_flag = 'AC' ## <- defines relationship between sets!!!
Graph_colouring_strategy='largest_first'
check_reduction_LCU = False
######## take commandline arguement to run in parallel
mol_num = int(sys.argv[1])
sorted_mol_names = sorted(list(myriad_LCU_results.keys()))
mol_key = sorted_mol_names[mol_num-1] # UCL supercomputer indexes from 1, hence minus one here!
########
# for mol_key in tqdm(list(myriad_LCU_results.keys())): # removed loop and used myriad array input!
anti_commuting_sets_different_H_LCU_sizes={}
for ind_key in myriad_LCU_results[mol_key]:
if isinstance(ind_key, str):
continue
if ind_key==0:
# only non-contextual problem
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': {},
'E':myriad_LCU_results[mol_key][ind_key]['E']}
else:
### LCU
H_LCU_dict = myriad_LCU_results[mol_key][ind_key]['H']
H_LCU_pruned = {P_key: coeff.real for P_key, coeff in H_LCU_dict.items() if not np.isclose(coeff.real,0)}
H_LCU= conv_scr.Get_Openfermion_Hamiltonian(H_LCU_pruned)
n_qubits = len(list(H_LCU_dict.keys())[0])
anti_commuting_sets_LCU = Clique_cover_Hamiltonian(list(H_LCU),
n_qubits,
commutativity_flag,
Graph_colouring_strategy)
all_zero_Pn_index_dict = {set_key: 0 for set_key in anti_commuting_sets_LCU}
E_LCU = LCU_linalg_Energy(anti_commuting_sets_LCU,
all_zero_Pn_index_dict,
n_qubits,
atol=1e-8,
rtol=1e-05,
check_reduction=check_reduction_LCU)
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': anti_commuting_sets_LCU,
'E':E_LCU}
unitary_paritioning_LCU[mol_key]= deepcopy(anti_commuting_sets_different_H_LCU_sizes)
del anti_commuting_sets_different_H_LCU_sizes
####### SAVE OUTPUT details
unique_file_time = datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')
# output_dir = os.path.join(working_dir, 'Pickle_out')
output_dir = os.getcwd()
########
####### SAVE OUTPUT
file_name2 = 'Unitary_Partitinging_LCU_CS_VQE_LCU_exp__{}__{}_.pickle'.format(unique_file_time, mol_key)
file_out2=os.path.join(output_dir, file_name2)
with open(file_out2, 'wb') as outfile:
pickle.dump(unitary_paritioning_LCU, outfile)
print('pickle files dumped unqiue time id: {}'.format(unique_file_time))
print('end time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f'))) | 33.75 | 113 | 0.678765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 899 | 0.221975 |
3472c5f0edade05adeaae51cedd69cdc00629866 | 2,925 | py | Python | notebooks/00-00-inspect-orig-files.py | will-henney/teresa-pn-ou5 | 5d127880c1da481f0a1fb3e23abce1d3bf9ccc1a | [
"MIT"
] | null | null | null | notebooks/00-00-inspect-orig-files.py | will-henney/teresa-pn-ou5 | 5d127880c1da481f0a1fb3e23abce1d3bf9ccc1a | [
"MIT"
] | null | null | null | notebooks/00-00-inspect-orig-files.py | will-henney/teresa-pn-ou5 | 5d127880c1da481f0a1fb3e23abce1d3bf9ccc1a | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # PN Ou 5: Inspect original files
from pathlib import Path
from astropy.io import fits
from astropy.table import Table
dpath = Path("../data/originals/")
# Look and see what sort of files we have:
data = []
kwds = ["MEZMODE", "DATE-OBS", "FILTER", "RA", "DEC", "PA", "CCDTYPE", "CCDSUM"]
for _file in sorted(dpath.glob("*.fits")):
hdu = fits.open(_file)[0]
thisdata = {"File": _file.stem}
for k in kwds:
thisdata[k] = hdu.header.get(k)
data.append(thisdata)
tab = Table(rows=data)
tab.show_in_notebook()
# So we have 2017 data with 70 micron slit and 2x2 binning, and then 2018, 2019 data with 150 micron slit and 3x3 binning.
# Select the image+slit or slit+image files that we will need to do astrometry of
m = ["slit" in _ for _ in tab["MEZMODE"]]
tab[m]
# Write out a list of all the Image+slit files
listfile = dpath.parent / "image-list.dat"
listfile.write_text("\n".join(tab[m]["File"]))
listfile
# Check that it worked:
listfile.read_text().splitlines()
# ## Find the HEALpix coordinates of our source
from astropy.coordinates import SkyCoord, ICRS
import astropy.units as u
# All the positions should be about the same, so we just use the first one.
c = SkyCoord(tab[0]["RA"], tab[0]["DEC"], unit=(u.hourangle, u.deg))
c
from astropy_healpix import HEALPix
# In order to find which data files to download from http://data.astrometry.net/5000/, we need to translate the celestial coordinate to HEALpix index numbers:
hp_2 = HEALPix(nside=2, order="nested", frame=ICRS())
hp_1 = HEALPix(nside=1, order="nested", frame=ICRS())
# Levels 0 to 4 use the `nside=2` tiles.
hp_2.cone_search_skycoord(c, radius=5 * u.arcminute)
# So that means `index500[0-4]-13.fits`
hp_1.cone_search_skycoord(c, radius=5 * u.arcminute)
# So that means `index500[5-7]-03.fits`
# + tags=[]
hp_2.cone_search_lonlat(300 * u.deg, 50 * u.deg, 0.1 * u.deg)
# -
# ## Look at the HEALpix data files
#
# Something isn't right. I got the 13 series but the program complains that the coordinates are not contained in the tile.
hdulist = fits.open(dpath.parent / "astrometry-net" / "index-5004-13.fits")
hdulist.info()
# Looks like HDU 13 has the original table of stars:
hdulist[13].header
tstars = Table.read(hdulist[13])
df = tstars.to_pandas()
df[["ra", "dec"]].describe()
# So no wonder that is not working. I want (318.6, 43.7) but this has an RA range of 270 to 315
tstars2 = Table.read(fits.open(dpath.parent / "astrometry-net" / "index-5004-14.fits")[13])
df2 = tstars2.to_pandas()
df2[["ra", "dec"]].describe()
# So, it turns out that tile 14 is what I needed, not 13.
| 25.884956 | 158 | 0.68547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,666 | 0.569573 |
3473928efd974d616527ce1198698da2f5b89c17 | 4,930 | py | Python | common/create_public_vn.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 1 | 2017-06-13T04:42:34.000Z | 2017-06-13T04:42:34.000Z | common/create_public_vn.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | null | null | null | common/create_public_vn.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | null | null | null | import project_test
from common.contrail_test_init import ContrailTestInit
from common.connections import ContrailConnections
import os
import fixtures
from test import BaseTestCase
import time
from floating_ip import *
from vn_test import *
from control_node import *
from common import isolated_creds
from tcutils.util import Singleton
class PublicVn(fixtures.Fixture):
__metaclass__ = Singleton
def __init__(self, isolated_creds_obj, inputs, ini_file = None ,logger = None, mx_rt = None):
self.isolated_creds = isolated_creds_obj
self.username = self.isolated_creds.username
self.password = self.isolated_creds.password
self.inputs = inputs
self.ini_file = ini_file
self.logger = logger
self.public_vn = self.inputs.public_vn
self.public_tenant = self.inputs.admin_tenant
self.setUp()
self.create_public_vn(mx_rt)
self.create_floatingip_pool()
self.configure_control_nodes()
def setUp(self):
super(PublicVn, self).setUp()
self.project = self.isolated_creds.create_tenant(self.public_tenant)
self.inputs = self.isolated_creds.get_inputs(self.project)
self.connections = self.isolated_creds.get_connections(self.inputs)
if self.isolated_creds.__class__.__name__ == 'AdminIsolatedCreds':
# If AdminIsolatedCreds, one could add user to tenant
# Else, it is assumed that the administrator has taken
# care
self.isolated_creds.create_and_attach_user_to_tenant(
self.project,
self.username,
self.password)
self.project.set_sec_group_for_allow_all(\
self.public_tenant, 'default')
# end setUp
def create_public_vn(self,mx_rt = None):
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
fip_pool_name = self.inputs.fip_pool_name
fvn_name = self.public_vn
fip_subnets = [self.inputs.fip_pool]
if not mx_rt:
mx_rt = self.inputs.mx_rt
self.public_vn_fixture = self.useFixture(
VNFixture(
project_name=self.project.project_name,
connections=self.connections,
vn_name=fvn_name,
inputs=self.inputs,
subnets=fip_subnets,
router_asn=self.inputs.router_asn,
rt_number=mx_rt,
router_external=True))
assert self.public_vn_fixture.verify_on_setup()
self.logger.info('created public VN:%s' % fvn_name)
# end createPublicVN
def create_floatingip_pool(self):
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
fip_pool_name = self.inputs.fip_pool_name
fvn_name = self.public_vn
fip_subnets = [self.inputs.fip_pool]
self.fip_fixture = self.useFixture(
FloatingIPFixture(
project_name=self.public_tenant,
inputs=self.inputs,
connections=self.connections,
pool_name=fip_pool_name,
vn_id=self.public_vn_fixture.vn_id,
option='neutron',
vn_name=fvn_name))
assert self.fip_fixture.verify_on_setup()
self.logger.info('created FIP Pool:%s under Project:%s' %
(self.fip_fixture.pool_name,
self.project.project_name))
# end createfloatingip
def configure_control_nodes(self):
# Configuring all control nodes here
if (('MX_GW_TEST' in os.environ) and (
os.environ.get('MX_GW_TEST') == '1')):
router_name = self.inputs.ext_routers[0][0]
router_ip = self.inputs.ext_routers[0][1]
for entry in self.inputs.bgp_ips:
hostname = self.inputs.host_data[entry]['name']
entry_control_ip = self.inputs.host_data[
entry]['host_control_ip']
cn_fixture1 = self.useFixture(
CNFixture(
connections=self.connections,
router_name=hostname,
router_ip=entry_control_ip,
router_type='contrail',
inputs=self.inputs))
cn_fixturemx = self.useFixture(
CNFixture(
connections=self.connections,
router_name=router_name,
router_ip=router_ip,
router_type='mx',
inputs=self.inputs))
sleep(10)
assert cn_fixturemx.verify_on_setup()
# TODO Configure MX. Doing Manually For Now
| 40.409836 | 97 | 0.585598 | 4,588 | 0.930629 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.093915 |
347441db8bc4affdf62a00833370a2e23025b1d1 | 996 | py | Python | aiotdlib/api/functions/get_voice_chat_available_participants.py | jraylan/aiotdlib | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | [
"MIT"
] | null | null | null | aiotdlib/api/functions/get_voice_chat_available_participants.py | jraylan/aiotdlib | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | [
"MIT"
] | null | null | null | aiotdlib/api/functions/get_voice_chat_available_participants.py | jraylan/aiotdlib | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | [
"MIT"
] | null | null | null | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class GetVoiceChatAvailableParticipants(BaseObject):
"""
Returns list of participant identifiers, which can be used to join voice chats in a chat
:param chat_id: Chat identifier
:type chat_id: :class:`int`
"""
ID: str = Field("getVoiceChatAvailableParticipants", alias="@type")
chat_id: int
@staticmethod
def read(q: dict) -> GetVoiceChatAvailableParticipants:
return GetVoiceChatAvailableParticipants.construct(**q)
| 35.571429 | 92 | 0.47992 | 472 | 0.473896 | 0 | 0 | 137 | 0.13755 | 0 | 0 | 639 | 0.641566 |
34779ad96b37a7ece57e092fb2f969041bbfc0ed | 27,098 | py | Python | tests/test_tree.py | tgragnato/geneva | 2fc5b2f2f4766278902cff25af50b753d1d26a76 | [
"BSD-3-Clause"
] | 1,182 | 2019-11-15T02:56:47.000Z | 2022-03-30T16:09:04.000Z | tests/test_tree.py | Nekotekina/geneva | 3eb6b7342f9afd7add1f4aba9e2aadf0b9a5f196 | [
"BSD-3-Clause"
] | 21 | 2019-11-15T15:08:02.000Z | 2022-01-03T16:22:45.000Z | tests/test_tree.py | Nekotekina/geneva | 3eb6b7342f9afd7add1f4aba9e2aadf0b9a5f196 | [
"BSD-3-Clause"
] | 102 | 2019-11-15T15:01:07.000Z | 2022-03-30T13:52:47.000Z | import logging
import os
from scapy.all import IP, TCP
import actions.tree
import actions.drop
import actions.tamper
import actions.duplicate
import actions.utils
import layers.packet
def test_init():
"""
Tests initialization
"""
print(actions.action.Action.get_actions("out"))
def test_count_leaves():
"""
Tests leaf count is correct.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
assert not a.parse("TCP:reserved:0tamper{TCP:flags:replace:S}-|", logger), "Tree parsed malformed DNA"
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
assert a.count_leaves() == 1
assert a.remove_one()
a.add_action(duplicate)
assert a.count_leaves() == 1
duplicate.left = duplicate2
assert a.count_leaves() == 1
duplicate.right = drop
assert a.count_leaves() == 2
def test_check():
"""
Tests action tree check function.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:flags:RA]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert not a.check(p, logger)
p = layers.packet.Packet(IP(ttl=64)/TCP(flags="RA"))
assert a.check(p, logger)
assert a.remove_one()
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
a.parse("[IP:ttl:64]-tamper{TCP:flags:replace:S}-|", logger)
assert a.check(p, logger)
p = layers.packet.Packet(IP(ttl=15)/TCP(flags="RA"))
assert not a.check(p, logger)
def test_scapy():
"""
Tests misc. scapy aspects relevant to strategies.
"""
a = actions.tree.ActionTree("out")
logger = logging.getLogger("test")
a.parse("[TCP:reserved:0]-tamper{TCP:flags:replace:S}-|", logger)
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
packets = a.run(p, logger)
assert packets[0][TCP].flags == "S"
p = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(p, logger)
a.parse("[TCP:reserved:0]-tamper{TCP:chksum:corrupt}-|", logger)
packets = a.run(p, logger)
assert packets[0][TCP].chksum
assert a.check(p, logger)
def test_str():
"""
Tests string representation.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert str(a).strip() == "[%s]-|" % str(t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
# Tree will not add a duplicate action
assert not a.add_action(tamper)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}-|"
assert a.add_action(tamper2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|"
assert a.add_action(actions.duplicate.DuplicateAction())
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
drop = actions.drop.DropAction()
assert a.add_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(drop,),),)-|" or \
str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate(,drop),),)-|"
assert a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
# Cannot remove action that is not present
assert not a.remove_action(drop)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(duplicate,),)-|"
a = actions.tree.ActionTree("out", trigger=t)
orig = "[TCP:urgptr:15963]-duplicate(,drop)-|"
a.parse(orig, logger)
assert a.remove_one()
assert orig != str(a)
assert str(a) in ["[TCP:urgptr:15963]-drop-|", "[TCP:urgptr:15963]-duplicate-|"]
def test_pretty_print_send():
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── ===> \n└── ===> "
assert a.pretty_print() == correct_string
def test_pretty_print(logger):
"""
Print complex tree, although difficult to test
"""
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
duplicate3 = actions.duplicate.DuplicateAction()
duplicate4 = actions.duplicate.DuplicateAction()
duplicate5 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
drop4 = actions.drop.DropAction()
duplicate.left = duplicate2
duplicate.right = duplicate3
duplicate2.left = tamper
duplicate2.right = drop
duplicate3.left = duplicate4
duplicate3.right = drop2
duplicate4.left = duplicate5
duplicate4.right = drop3
duplicate5.left = drop4
duplicate5.right = tamper2
a.add_action(duplicate)
correct_string = "TCP:flags:0\nduplicate\n├── duplicate\n│ ├── tamper{TCP:flags:replace:S}\n│ │ └── ===> \n│ └── drop\n└── duplicate\n ├── duplicate\n │ ├── duplicate\n │ │ ├── drop\n │ │ └── tamper{TCP:flags:replace:R}\n │ │ └── ===> \n │ └── drop\n └── drop"
assert a.pretty_print() == correct_string
assert a.pretty_print(visual=True)
assert os.path.exists("tree.png")
os.remove("tree.png")
a.parse("[TCP:flags:0]-|", logger)
a.pretty_print(visual=True) # Empty action tree
assert not os.path.exists("tree.png")
def test_pretty_print_order():
"""
Tests the left/right ordering by reading in a new tree
"""
logger = logging.getLogger("test")
a = actions.tree.ActionTree("out")
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S}(tamper{TCP:chksum:replace:14239},),))-|", logger)
correct_pretty_print = "TCP:flags:A\nduplicate\n├── tamper{TCP:flags:replace:R}\n│ └── tamper{TCP:chksum:replace:14239}\n│ └── ===> \n└── duplicate\n ├── tamper{TCP:flags:replace:S}\n │ └── tamper{TCP:chksum:replace:14239}\n │ └── ===> \n └── ===> "
assert a.pretty_print() == correct_pretty_print
def test_parse():
"""
Tests string parsing.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
a.parse("[TCP:flags:0]-|", logger)
assert str(a) == str(base_a)
assert len(a) == 0
base_a.add_action(tamper)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}-|", logger)
assert str(a) == str(base_a)
assert len(a) == 1
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},)-|", logging.getLogger("test"))
base_a.add_action(tamper2)
assert str(a) == str(base_a)
assert len(a) == 2
base_a.add_action(tamper3)
base_a.add_action(tamper4)
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:S}(tamper{TCP:flags:replace:R},),),)-|", logging.getLogger("test"))
assert str(a) == str(base_a)
assert len(a) == 4
base_t = actions.trigger.Trigger("field", "flags", "TCP")
base_a = actions.tree.ActionTree("out", trigger=base_t)
duplicate = actions.duplicate.DuplicateAction()
assert a.parse("[TCP:flags:0]-duplicate-|", logger)
base_a.add_action(duplicate)
assert str(a) == str(base_a)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="A")
tamper4 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate.left = tamper
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},)-|", logger)
assert str(a) == str(base_a)
duplicate.right = tamper2
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-|", logger)
assert str(a) == str(base_a)
tamper2.left = tamper3
assert a.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R}(tamper{TCP:flags:replace:A},))-|", logger)
assert str(a) == str(base_a)
strategy = actions.utils.parse("[TCP:flags:0]-duplicate(tamper{TCP:flags:replace:S},tamper{TCP:flags:replace:R})-| \/", logger)
assert strategy
assert len(strategy.out_actions[0]) == 3
assert len(strategy.in_actions) == 0
assert not a.parse("[]", logger) # No valid trigger
assert not a.parse("[TCP:flags:0]-", logger) # No valid ending "|"
assert not a.parse("[TCP:]-|", logger) # invalid trigger
assert not a.parse("[TCP:flags:0]-foo-|", logger) # Non-existent action
assert not a.parse("[TCP:flags:0]--|", logger) # Empty action
assert not a.parse("[TCP:flags:0]-duplicate(,,,)-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate()))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(((()-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-duplicate(,))))-|", logger) # Bad tree
assert not a.parse("[TCP:flags:0]-drop(duplicate,)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-drop(duplicate,duplicate)-|", logger) # Terminal action with children
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(,duplicate)-|", logger) # Non-branching action with right child
assert not a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,duplicate)-|", logger) # Non-branching action with children
def test_tree():
"""
Tests basic tree functionality.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(None)
a.add_action(tamper)
assert a.get_slots() == 1
a.add_action(tamper2)
assert a.get_slots() == 1
a.add_action(duplicate)
assert a.get_slots() == 2
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
drop = actions.drop.DropAction()
a.add_action(drop)
assert a.get_slots() == 0
add_success = a.add_action(tamper)
assert not add_success
assert a.get_slots() == 0
rep = ""
for s in a.string_repr(a.action_root):
rep += s
assert rep == "drop"
print(str(a))
assert a.parse("[TCP:flags:A]-duplicate(tamper{TCP:seq:corrupt},)-|", logging.getLogger("test"))
for act in a:
print(str(a))
assert len(a) == 2
assert a.get_slots() == 2
for _ in range(100):
assert str(a.get_rand_action("out", request="DropAction")) == "drop"
def test_remove():
"""
Tests remove
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.remove_action(tamper)
a.add_action(tamper)
assert a.remove_action(tamper)
a.add_action(tamper)
a.add_action(tamper2)
a.add_action(tamper3)
assert a.remove_action(tamper2)
assert tamper2 not in a
assert tamper.left == tamper3
assert not tamper.right
assert len(a) == 2
a = actions.tree.ActionTree("out", trigger=t)
duplicate = actions.duplicate.DuplicateAction()
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
a.add_action(tamper)
assert a.action_root == tamper
duplicate.left = tamper2
duplicate.right = tamper3
a.add_action(duplicate)
assert len(a) == 4
assert a.remove_action(duplicate)
assert duplicate not in a
assert tamper.left == tamper2
assert not tamper.right
assert len(a) == 2
a.parse("[TCP:flags:A]-|", logging.getLogger("test"))
assert not a.remove_one(), "Cannot remove one with no action root"
def test_len():
"""
Tests length calculation.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
assert len(a) == 0, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper)
assert len(a) == 1, "__len__ returned wrong length"
a.add_action(tamper2)
assert len(a) == 2, "__len__ returned wrong length"
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert len(a) == 3, "__len__ returned wrong length"
def test_contains():
"""
Tests contains method
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction()
tamper2 = actions.tamper.TamperAction()
tamper3 = actions.tamper.TamperAction()
assert not a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
add_success = a.add_action(tamper)
assert not add_success, "added duplicate action"
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.remove_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert not a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper2)
assert a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
remove_success = a.remove_action(tamper)
assert remove_success
assert not a.contains(tamper), "contains incorrect behavior"
assert a.contains(tamper2), "contains incorrect behavior"
a.add_action(tamper3)
assert a.contains(tamper3), "contains incorrect behavior"
assert len(a) == 2, "len incorrect return"
remove_success = a.remove_action(tamper2)
assert remove_success
def test_iter():
"""
Tests iterator.
"""
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
assert a.add_action(tamper)
assert a.add_action(tamper2)
assert not a.add_action(tamper)
for node in a:
print(node)
def test_run():
"""
Tests running packets through the chain.
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger(None, None, None)
a = actions.tree.ActionTree("out", trigger=t)
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP())
a.add_action(tamper)
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "S"
a.add_action(tamper2)
print(str(a))
packet = layers.packet.Packet(IP()/TCP())
assert not a.add_action(tamper), "tree added duplicate action"
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 1
assert None not in packets
assert packets[0].get("TCP", "flags") == "R"
print(str(a))
a.remove_action(tamper2)
a.remove_action(tamper)
a.add_action(duplicate)
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "RA"
assert packets[1][TCP].flags == "RA"
print(str(a))
duplicate.left = tamper
duplicate.right = tamper2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
print("ABUT TO RUN")
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
print(str(a))
print(str(packets[0]))
print(str(packets[1]))
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "R"
print(str(a))
tamper.left = duplicate2
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 3
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "R"
print(str(a))
tamper2.left = drop
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logging.getLogger("test"))
assert len(packets) == 2
assert None not in packets
assert packets[0][TCP].flags == "S"
assert packets[1][TCP].flags == "S"
print(str(a))
assert a.remove_action(duplicate2)
tamper.left = actions.drop.DropAction()
packet = layers.packet.Packet(IP()/TCP(flags="RA"))
packets = a.run(packet, logger )
assert len(packets) == 0
print(str(a))
a.parse("[TCP:flags:A]-duplicate(tamper{TCP:flags:replace:R}(tamper{TCP:chksum:replace:14239},),duplicate(tamper{TCP:flags:replace:S},))-|", logger)
packet = layers.packet.Packet(IP()/TCP(flags="A"))
assert a.check(packet, logger)
packets = a.run(packet, logger)
assert len(packets) == 3
assert packets[0][TCP].flags == "R"
assert packets[1][TCP].flags == "S"
assert packets[2][TCP].flags == "A"
def test_index():
"""
Tests index
"""
a = actions.tree.ActionTree("out")
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
tamper3 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="F")
assert a.add_action(tamper)
assert a[0] == tamper
assert not a[1]
assert a.add_action(tamper2)
assert a[0] == tamper
assert a[1] == tamper2
assert a[-1] == tamper2
assert not a[10]
assert a.add_action(tamper3)
assert a[-1] == tamper3
assert not a[-11]
def test_mate():
"""
Tests mate primitive
"""
logger = logging.getLogger("test")
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a), "Can't mate empty trees"
assert a.add_action(tamper)
assert other_a.add_action(tamper2)
assert a.choose_one() == tamper
assert other_a.choose_one() == tamper2
assert a.get_parent(tamper) == (None, None)
assert other_a.get_parent(tamper2) == (None, None)
assert a.add_action(duplicate)
assert a.get_parent(duplicate) == (tamper, "left")
duplicate.right = drop
assert a.get_parent(drop) == (duplicate, "right")
assert other_a.add_action(duplicate2)
# Test mating a full tree with a full tree
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|"
assert str(other_a) == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate,)-|"
assert a.swap(duplicate, other_a, duplicate2)
assert str(a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
assert str(other_a).strip() == "[TCP:flags:0]-tamper{TCP:flags:replace:R}(duplicate(,drop),)-|"
assert len(a) == 2
assert len(other_a) == 3
assert duplicate2 not in other_a
assert duplicate not in a
assert tamper.left == duplicate2
assert tamper2.left == duplicate
assert other_a.get_parent(duplicate) == (tamper2, "left")
assert a.get_parent(duplicate2) == (tamper, "left")
assert other_a.get_parent(drop) == (duplicate, "right")
assert a.get_parent(None) == (None, None)
# Test mating two trees with just root nodes
t = actions.trigger.Trigger("field", "flags", "TCP")
a = actions.tree.ActionTree("out", trigger=t)
assert not a.choose_one()
tamper = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="S")
tamper2 = actions.tamper.TamperAction(field="flags", tamper_type="replace", tamper_value="R")
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
other_a = actions.tree.ActionTree("out", trigger=t)
assert not a.mate(other_a)
assert a.add_action(duplicate)
assert other_a.add_action(duplicate2)
assert a.mate(other_a)
assert a.action_root == duplicate2
assert other_a.action_root == duplicate
assert not duplicate.left and not duplicate.right
assert not duplicate2.left and not duplicate2.right
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
assert not node.left == other_node
assert not node.right == other_node
# Test mating two trees where one is empty
assert a.remove_action(duplicate2)
# This should swap the duplicate action to be the action root of the other tree
assert str(a) == "[TCP:flags:0]-|"
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert a.mate(other_a)
assert not other_a.action_root
assert a.action_root == duplicate
assert len(a) == 1
assert len(other_a) == 0
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert a.parse("[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate(,drop),)-|", logger)
drop = a.action_root.left.right
assert str(drop) == "drop"
# Note that this will return a valid ActionTree, but because it is empty,
# it is technically a False-y value, as it's length is 0
assert other_a.parse("[TCP:flags:0]-|", logger) == other_a
a.swap(drop, other_a, None)
assert other_a.action_root == drop
assert not a.action_root.left.right
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(duplicate,)-|"
other_a.swap(drop, a, a.action_root.left)
# Confirm that no nodes have been aliased or connected between the trees
for node in a:
for other_node in other_a:
if other_node:
assert not node.left == other_node
assert not node.right == other_node
assert str(other_a) == "[TCP:flags:0]-duplicate-|"
assert str(a) == "[TCP:flags:0]-tamper{TCP:flags:replace:S}(drop,)-|"
a.parse("[TCP:flags:0]-drop-|", logger)
other_a.parse("[TCP:flags:0]-duplicate(drop,drop)-|", logger)
a_drop = a.action_root
other_duplicate = other_a.action_root
a.swap(a_drop, other_a, other_duplicate)
print(str(a))
print(str(other_a))
assert str(other_a) == "[TCP:flags:0]-drop-|"
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
duplicate = actions.duplicate.DuplicateAction()
duplicate2 = actions.duplicate.DuplicateAction()
drop = actions.drop.DropAction()
drop2 = actions.drop.DropAction()
drop3 = actions.drop.DropAction()
a = actions.tree.ActionTree("out", trigger=t)
a.add_action(duplicate)
a.add_action(drop)
a.add_action(drop2)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.get_slots() == 0
other_a = actions.tree.ActionTree("out", trigger=t)
other_a.add_action(drop3)
a.swap(drop, other_a, drop3)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
a.swap(drop3, other_a, drop)
assert str(a) == "[TCP:flags:0]-duplicate(drop,drop)-|"
assert a.mate(other_a)
def test_choose_one():
"""
Tests choose_one functionality
"""
a = actions.tree.ActionTree("out")
drop = actions.drop.DropAction()
assert not a.choose_one()
assert a.add_action(drop)
assert a.choose_one() == drop
assert a.remove_action(drop)
assert not a.choose_one()
duplicate = actions.duplicate.DuplicateAction()
a.add_action(duplicate)
assert a.choose_one() == duplicate
duplicate.left = drop
assert a.choose_one() in [duplicate, drop]
# Make sure that both actions get chosen
chosen = set()
for i in range(0, 10000):
act = a.choose_one()
chosen.add(act)
assert chosen == set([duplicate, drop])
| 39.501458 | 315 | 0.665104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,514 | 0.275622 |
3478264c794a3f180ec86805c965604a1b3922e9 | 2,365 | py | Python | l2address/formatter.py | EgorBlagov/l2address | d29970116184bd4bb78a58e789e07ed5009228b4 | [
"MIT"
] | 1 | 2021-08-02T07:13:29.000Z | 2021-08-02T07:13:29.000Z | l2address/formatter.py | EgorBlagov/l2address | d29970116184bd4bb78a58e789e07ed5009228b4 | [
"MIT"
] | null | null | null | l2address/formatter.py | EgorBlagov/l2address | d29970116184bd4bb78a58e789e07ed5009228b4 | [
"MIT"
] | null | null | null | import re
from abc import ABC, abstractmethod
from .utils import parse_hex, per_join
class Formatter(ABC):
def _to_clean_str(self, value, max_value):
value_str = str(hex(value))[2:]
full_mac_str = '0' * (self._hex_digits_count(max_value) -
len(value_str)) + value_str
return full_mac_str
def _hex_digits_count(self, value):
return len(hex(value)[2:])
def _common_regex(self, max_value, delimeter, step):
return "^" + per_join([r'[\da-fA-f]' for _ in range(self._hex_digits_count(max_value))], delimeter, step) + "$"
@abstractmethod
def format(self, value, max_value):
pass
def parse(self, _str, max_value):
m = re.match(self._get_validator_regexp(_str, max_value), _str)
if m is None:
raise ValueError('Invalid MAC address format')
else:
return self._parse_value_from_str(_str)
def _parse_value_from_str(self, _str):
return parse_hex(_str)
@abstractmethod
def _get_validator_regexp(self, _str, max_value):
pass
class ColonFormatter(Formatter):
def format(self, value, max_value):
return per_join(self._to_clean_str(value, max_value), ':', 2)
def _get_validator_regexp(self, _str, max_value):
return self._common_regex(max_value, r'\:', 2)
class PeriodFormatter(Formatter):
def __init__(self, step=2):
super().__init__()
self.step = step
def format(self, value, max_value):
return per_join(self._to_clean_str(value, max_value), '.', self.step)
def _get_validator_regexp(self, _str, max_value):
return self._common_regex(max_value, r'\.', self.step)
class HyphenFormatter(Formatter):
def format(self, value, max_value):
return per_join(self._to_clean_str(value, max_value), '-', 2)
def _get_validator_regexp(self, _str, max_value):
return self._common_regex(max_value, r'\-', 2)
class CleanFormatter(Formatter):
def format(self, value, max_value):
return self._to_clean_str(value, max_value)
def _get_validator_regexp(self, _str, max_value):
return self._common_regex(max_value, '', 2)
DEFAULT_FORMATTERS = [
ColonFormatter(),
PeriodFormatter(2),
PeriodFormatter(3),
PeriodFormatter(4),
HyphenFormatter(),
CleanFormatter()
]
| 28.493976 | 119 | 0.662579 | 2,099 | 0.887526 | 0 | 0 | 150 | 0.063425 | 0 | 0 | 76 | 0.032135 |
347843e7209d1e94b13bc470e5fc335542cf2f21 | 5,142 | py | Python | pytests/ent_backup_restore/provider/s3.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/ent_backup_restore/provider/s3.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pytests/ent_backup_restore/provider/s3.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import json
import re
import boto3
import botocore
from . import provider
class S3(provider.Provider):
def __init__(self, access_key_id, bucket, cacert, endpoint, no_ssl_verify, region, secret_access_key, staging_directory):
"""Create a new S3 provider which allows interaction with S3 masked behind the common 'Provider' interface. All
required parameters should be those parsed from the ini.
"""
super().__init__(access_key_id, bucket, cacert, endpoint, no_ssl_verify, region, secret_access_key, staging_directory)
# boto3 will raise an exception if given an empty string as the endpoint_url so we must construct a kwargs
# dictionary and conditionally populate it.
kwargs = {}
if self.access_key_id:
kwargs['aws_access_key_id'] = self.access_key_id
if self.cacert:
kwargs['verify'] = self.cacert
if self.endpoint != '':
kwargs['endpoint_url'] = self.endpoint
if self.no_ssl_verify:
# Supplying no_ssl_verify will override the cacert value if supplied e.g. they are mutually exclusive
kwargs['verify'] = False
if self.region:
kwargs['region_name'] = self.region
if self.secret_access_key:
kwargs['aws_secret_access_key'] = self.secret_access_key
self.resource = boto3.resource('s3', **kwargs)
def schema_prefix(self):
"""See super class"""
return 's3://'
def setup(self):
"""See super class"""
configuration = {}
if self.region:
configuration['LocationConstraint'] = self.region
try:
self.resource.create_bucket(Bucket=self.bucket, CreateBucketConfiguration=configuration)
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code != 'BucketAlreadyExists':
raise error
def teardown(self, info, remote_client):
"""See super class"""
bucket = self.resource.Bucket(self.bucket)
# Delete all the remaining objects
try:
for obj in bucket.objects.all():
obj.delete()
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'NoSuchBucket':
# Some tests remove the bucket after it's created/cleaned, if the bucket doesn't exist then all we need
# to do is clean the staging directory.
self._remove_staging_directory(info, remote_client)
return
raise error_code
# Abort all the remaining multipart uploads. We ignore any 'NoSuchUpload' errors because we don't care if the
# upload doesn't exist; we are trying to remove it.
for upload in bucket.multipart_uploads.all():
try:
upload.abort()
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code != "NoSuchUpload":
raise error
# Remove the staging directory because cbbackupmgr has validation to ensure that are unique to each archive
self._remove_staging_directory(info, remote_client)
def remove_bucket(self):
"""See super class"""
self.resource.Bucket(self.bucket).delete()
def get_json_object(self, key):
"""See super class"""
obj = None
try:
obj = json.loads(self.resource.Object(self.bucket, key).get()['Body'].read())
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code not in ('NoSuchKey', 'KeyNotFound'):
raise error_code
return obj
def list_objects(self, prefix=None):
"""See super class"""
keys = []
kwargs = {}
if prefix:
kwargs['Prefix'] = prefix
for obj in self.resource.Bucket(self.bucket).objects.filter(**kwargs):
keys.append(obj.key)
return keys
def delete_objects(self, prefix):
"""See super class"""
kwargs = {}
if prefix:
kwargs['Prefix'] = prefix
for obj in self.resource.Bucket(self.bucket).objects.filter(**kwargs):
obj.delete()
def list_backups(self, archive, repo):
"""See super class"""
pattern = re.compile("([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])[Tt]([01][0-9]|2[0-3])_([0-5][0-9])_([0-5][0-9]|60)(\.[0-9]+)?(([Zz])|([\+|\-]([01][0-9]|2[0-3])_[0-5][0-9]))")
backups = []
for obj in self.resource.Bucket(self.bucket).objects.filter(Prefix=f"{archive}/{repo}"):
res = pattern.search(obj.key)
if res and res.group() not in backups:
backups.append(res.group())
return backups
def num_multipart_uploads(self):
return sum(1 for _ in self.resource.Bucket(self.bucket).multipart_uploads.all())
provider.Provider.register(S3)
| 36.211268 | 190 | 0.606573 | 5,012 | 0.974718 | 0 | 0 | 0 | 0 | 0 | 0 | 1,511 | 0.293855 |
34792d446f26e4e27a5343d406f40c7fae30afe3 | 1,059 | py | Python | pythonProject/05al137Random/rd.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/05al137Random/rd.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/05al137Random/rd.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | import random
import string
# Gera um número inteiro entra A e B
# inteiro = random.randint(10, 20)
# Gera um número de ponto flutuante entra A e B
# flutuante = random.uniform(10, 20)
# Gera um número de ponto flutuante entre 0.0 e 1.0
flutuante = random.random()
# Gerar um número aleatório usando a função range()
# de ate pulando de 10
inteiro = random.randrange(900, 1000, 10)
lista = ['Luiz', 'Otávio', 'Maria', 'Rose', 'Jenny', 'Danilo', 'Felipe']
# Seleciona aleatóriamente valores de uma lista
sorteio = random.sample(lista, 2) ##sorteia dois nomes diferentes aleatorios por vez
# sorteio = random.choices(lista, k=2) #sorteia dois nomes aleatorios por vez porem pode repetir o mesmo nome
# sorteio = random.choice(lista) #sorteia um nome na lista
# Embaralha a lista
random.shuffle(lista)# embaralhar a lista
# Gera senha aleatória
letras = string.ascii_letters
digitos = string.digits
caracteres = '!@#$%&*._-'
geral = letras + digitos + caracteres
senha = "".join(random.choices(geral, k=20))
print(senha)
| 28.621622 | 109 | 0.708215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 706 | 0.66043 |
347a6f99a4de31c46f4ac69d858282e197897fe5 | 1,651 | py | Python | chris_backend/users/tests/test_serializers.py | PintoGideon/ChRIS_ultron_backEnd | 3c094c90f45c64e279c6d78d9accc357679fb37b | [
"MIT"
] | null | null | null | chris_backend/users/tests/test_serializers.py | PintoGideon/ChRIS_ultron_backEnd | 3c094c90f45c64e279c6d78d9accc357679fb37b | [
"MIT"
] | null | null | null | chris_backend/users/tests/test_serializers.py | PintoGideon/ChRIS_ultron_backEnd | 3c094c90f45c64e279c6d78d9accc357679fb37b | [
"MIT"
] | null | null | null |
import logging
from django.test import TestCase
from rest_framework import serializers
from users.serializers import UserSerializer
class UserSerializerTests(TestCase):
"""
Generic user view tests' setup and tearDown
"""
def setUp(self):
# avoid cluttered console output (for instance logging all the http requests)
logging.disable(logging.CRITICAL)
self.username = 'cube'
self.password = 'cubepass'
self.email = 'dev@babymri.org'
def tearDown(self):
# re-enable logging
logging.disable(logging.DEBUG)
def test_create(self):
"""
Test whether overriden create method takes care of the password hashing.
"""
user_serializer = UserSerializer()
validated_data = {'username': self.username, 'password': self.password,
'email': self.email}
user = user_serializer.create(validated_data)
self.assertEqual(user.username, self.username)
self.assertEqual(user.email, self.email)
self.assertNotEqual(user.password, self.password)
self.assertTrue(user.check_password(self.password))
def test_validate_username(self):
"""
Test whether overriden validate_username method raises a
serializers.ValidationError when the username contains forward slashes.
"""
user_serializer = UserSerializer()
with self.assertRaises(serializers.ValidationError):
user_serializer.validate_username('user/')
username = user_serializer.validate_username(self.username)
self.assertEqual(username, self.username)
| 32.372549 | 85 | 0.675348 | 1,512 | 0.915809 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.289522 |
347c3d0645e2c613d5b9e0808de4a8ec3f9718f3 | 11,036 | py | Python | pegtree/optimizer.py | Caterpie-poke/pegtree | 81c37a743d4bccc2eac05ebb10803224785f55c0 | [
"MIT"
] | 1 | 2022-03-05T13:33:55.000Z | 2022-03-05T13:33:55.000Z | pegtree/optimizer.py | Caterpie-poke/pegtree | 81c37a743d4bccc2eac05ebb10803224785f55c0 | [
"MIT"
] | 6 | 2020-10-06T23:57:10.000Z | 2021-05-11T22:16:46.000Z | pegtree/optimizer.py | Caterpie-poke/pegtree | 81c37a743d4bccc2eac05ebb10803224785f55c0 | [
"MIT"
] | 5 | 2020-02-18T12:00:32.000Z | 2020-05-13T06:35:23.000Z | from .peg import *
# # PRange Utilities
def bitsetRange(chars, ranges):
cs = 0
for c in chars:
cs |= 1 << ord(c)
r = ranges
while len(r) > 1:
for c in range(ord(r[0]), ord(r[1])+1):
cs |= 1 << c
r = r[2:]
return cs
def stringfyRange(bits):
c = 0
s = None
p = None
chars = []
ranges = []
while bits > 0:
if bits & 1 == 1:
if s is None:
s = c
p = c
elif p + 1 == c:
p = c
else:
_appendRange(s, p, chars, ranges)
s = c
p = c
bits >>= 1
c += 1
if s is not None:
_appendRange(s, p, chars, ranges)
return ''.join(chars), ''.join(ranges)
def _appendRange(s, p, chars, ranges):
if s == p:
chars.append(chr(s))
elif s+1 == p:
chars.append(chr(s))
chars.append(chr(p))
else:
ranges.append(chr(s))
ranges.append(chr(p))
def uniqueRange(chars, ranges):
bits = bitsetRange(chars, ranges)
newchars, newranges = stringfyRange(bits)
checkbits = bitsetRange(chars, ranges)
assert bits == checkbits
return newchars, newranges
#
# Inlining
#
def isCharOrRange(pe):
return isinstance(pe, PChar) or isinstance(pe, PRange)
def inline(pe: PExpr, filter=isCharOrRange):
start = pe
while isinstance(pe, PRef) or isinstance(pe, PName):
pe = pe.deref()
if filter(pe):
if(pe != start):
logger.info('INLINE', start, '=>', pe)
return pe
return start
#
# make Minimum Rules
#
def makeMinimumRules(pe: PExpr, visited: dict, rules: list):
if isinstance(pe, PName):
pe.deref()
pe = pe.e
if isinstance(pe, PRef):
uname = pe.uname()
if uname not in visited:
visited[uname] = pe
makeMinimumRules(pe.deref(), visited, rules)
rules.append(pe)
return rules
if isinstance(pe, PUnary) or isinstance(pe, PTuple):
for e in pe:
makeMinimumRules(e, visited, rules)
return rules
#
# Sorting Rules
#
def sortRules(refs):
newrefs = []
unsolved = []
for ref in refs:
names = set([])
_makeSortingRefs(ref.deref(), names)
if len(names) == 0:
newrefs.append(ref)
else:
unsolved.append((ref, set(names)))
return _solveSortingRefs(newrefs, unsolved)
def _makeSortingRefs(e, names):
if isinstance(e, PTuple):
for e2 in e:
_makeSortingRefs(e2, names)
elif hasattr(e, 'e'):
_makeSortingRefs(e.e, names)
elif isinstance(e, PRef):
names.add(e.uname())
def _removeSolvedName(unsolved, uname):
removed = False
for _, names in unsolved:
if uname in names:
removed = True
names.remove(uname)
return removed
def _solveSortingRefs(refs, unsolved):
removed = False
# print(refs)
for ref in refs:
removed |= _removeSolvedName(unsolved, ref.uname())
max = 0
while max < 10:
removed = True
while removed:
removed = False
newrefs = []
stillUnsolved = []
for ref, names in unsolved:
if len(names) <= max:
refs.append(ref)
newrefs.append(ref)
else:
stillUnsolved.append((ref, names))
unsolved = stillUnsolved
#print(max, newrefs)
for ref in newrefs:
removed |= _removeSolvedName(unsolved, ref.uname())
if removed:
max = 0
max += 1
for ref, _ in unsolved:
refs.append(ref)
return refs
###
def flattenSeq(pe: PExpr, ps: list, conv=lambda x: x):
if isinstance(pe, PSeq):
for e in pe:
ps.append(conv(e))
else:
ps.append(conv(pe))
return ps
def appendSeq(ps: list, pe: PExpr):
if pe == EMPTY or (isinstance(pe, PChar) and len(pe.text) == 0):
return
if isinstance(pe, PSeq):
for e in pe:
appendSeq(ps, e)
return
if len(ps) == 0:
ps.append(pe)
return
e0 = ps[-1]
if isinstance(pe, PChar) and isinstance(e0, PChar):
ps[-1] = PChar(e0.text+pe.text)
return
ps.append(pe)
def newSeq(ps: list):
optimized = []
for e in ps:
appendSeq(optimized, e)
if len(optimized) == 0:
return EMPTY
if len(optimized) == 1:
return optimized[0]
return PSeq(*optimized)
#
# Ore
#
def flattenOre(pe: PExpr, ps: list, conv=lambda x: x):
if isinstance(pe, POre):
for e in pe:
ps.append(conv(e))
else:
ps.append(conv(pe))
return ps
def mergeRange(e, e2):
if isAny(e) or isAny(e2):
return ANY
chars = ''
ranges = ''
if isinstance(e, PChar):
chars += e.text
if isinstance(e2, PChar):
chars += e2.text
if isinstance(e, PRange):
chars += e.chars
ranges += e.ranges
if isinstance(e2, PRange):
chars += e2.chars
ranges += e2.ranges
chars, ranges = uniqueRange(chars, ranges)
return PRange.new(chars, ranges)
def prefixChar(pe):
if isinstance(pe, PChar) and len(pe.text) > 0:
return pe.text[0]
if isinstance(pe, PSeq) and isinstance(pe.es[0], PChar) and len(pe.es[0].text) > 0:
return pe.es[0].text[0]
return None
def dc(pe):
if isinstance(pe, PChar) and len(pe.text) > 0:
return PChar.new(pe.text[1:])
if isinstance(pe, PSeq) and isinstance(pe.es[0], PChar) and len(pe.es[0].text) > 0:
first = PChar.new(pe.es[0].text[1:])
return PSeq(first, *pe.es[1:])
return FAIL
def appendOre(ps: list, pe, cmap=None, deref=False):
start = pe
while deref and (isinstance(pe, PRef) or isinstance(pe, PName)):
pe = pe.deref()
if isinstance(pe, POre):
for e in pe:
appendOre(ps, e, cmap, deref)
return
if len(ps) > 0:
e0 = ps[-1]
if isEmpty(e0):
return
if isSingleCharacter(e0) and isSingleCharacter(pe):
ps[-1] = mergeRange(e0, pe)
return
c = prefixChar(pe)
if c is not None and cmap != None:
if c not in cmap:
cmap[c] = len(ps)
ps.append([pe])
else:
nested_choice = ps[cmap[c]]
nested_choice.append(pe)
return
ps.append(start)
def newOre(ps: list, cmap=None, deref=False):
optimized = []
for e in ps:
appendOre(optimized, e, cmap, deref)
for i in range(len(optimized)):
if not isinstance(optimized[i], list):
continue
nested_choice = optimized[i]
if len(nested_choice) == 1:
optimized[i] = nested_choice[0]
else:
first = PChar(prefixChar(nested_choice[0]))
nested = []
for ne in nested_choice:
ne = dc(ne)
appendOre(nested, ne)
optimized[i] = PSeq.new(first, newOre(nested, {}))
ps = optimized
optimized = []
for e in ps:
appendOre(optimized, e, None, False)
if len(optimized) == 0:
return FAIL
if len(optimized) == 1:
return optimized[0]
if len(optimized) == 2 and isEmpty(optimized[1]):
return POption(optimized[0])
return POre(*optimized)
#
# Out of order execution
#
def fixedSize(e: PExpr):
if isinstance(e, PRange) or isinstance(e, PAny):
return 1
if isinstance(e, PChar):
return len(e.text)
if isinstance(e, PAnd) or isinstance(e, PNot):
return 0
return -1
def splitFixed(e, conv=lambda x: x):
ps = flattenSeq(e, [], conv)
if fixedSize(ps[0]) == -1:
return None, -1, e
shift = 0
fixed = []
for e in ps:
size = fixedSize(e)
if size == -1:
break
shift += size
fixed.append(e)
#print('@', fixed, shift, ps[len(fixed):])
return fixed, shift, ps[len(fixed):]
###
class Optimizer(object):
def visit(self, pe: PExpr):
pe = inline(pe)
cname = pe.cname()
if not hasattr(self, cname):
return pe
f = getattr(self, cname)
optimized = f(pe)
return optimized
def PRef(self, pe):
return pe
def PName(self, pe):
return PName(self.visit(pe.e), pe.name, pe.tree, pe.isLeftRec)
def PAnd(self, pe):
return PAnd(self.visit(pe.e))
def PNot(self, pe):
return PNot(self.visit(pe.e))
def PMany(self, pe):
return PMany(self.visit(pe.e))
def POneMany(self, pe):
return POneMany(self.visit(pe.e))
def POption(self, pe):
return POption(self.visit(pe.e))
def PSeq(self, pe):
ps = flattenSeq(pe, [], lambda e: self.visit(e))
return newSeq(ps)
# Ore
def POre(self, pe: POre):
if pe.isDict() and len(pe.es) > 8:
return pe
ps = flattenOre(pe, [], lambda e: self.visit(e))
return newOre(ps, {})
# Tree Construction
def PNode(self, pe: PNode):
e = self.visit(pe.e)
fixed, shift, ves = splitFixed(e)
if fixed is None: # or not self.Ooox:
return PNode(e, pe.tag, pe.shift)
fixed.append(PNode(newSeq(ves), pe.tag, pe.shift - shift))
return newSeq(fixed)
def PEdge(self, pe):
e = self.visit(pe.e)
fixed, shift, ves = splitFixed(e)
if fixed is None:
return PEdge(pe.edge, e, pe.shift)
fixed.append(PEdge(pe.edge, newSeq(ves), pe.shift - shift))
return newSeq(fixed)
def PFold(self, pe):
e = self.visit(pe.e)
fixed, shift, ves = splitFixed(e)
if fixed is None: # or not self.Ooox:
return PFold(pe.edge, e, pe.tag, pe.shift)
fixed.append(PFold(pe.edge, newSeq(ves), pe.tag, pe.shift - shift))
return newSeq(fixed)
def PAbs(self, pe):
return PAbs(self.visit(pe.e))
def PAction(self, pe):
return PAction(self.visit(pe.e), pe.func, pe.params, pe.ptree)
optimizer = Optimizer()
def default_optimizer(e):
return optimizer.visit(e)
# def default_optimizer(e):
# return e
def optimize(e):
return optimizer.visit(e)
def prepare(peg: Grammar, start_name=None, optimize_function=default_optimizer):
#peg = peg
if start_name is None:
start_name = peg.start()
start_ref = peg.newRef(start_name)
refs = makeMinimumRules(start_ref, {}, [])
refs = sortRules(refs)
rules = {}
memos = []
for ref in refs:
uname = ref.uname(peg)
rules[uname] = optimize_function(ref.deref())
memos.append(uname)
# if str(rules[uname]) != str(ref.deref()):
# print('OPTIMIZE', ref.deref(), '\n\t=>', rules[uname])
if 'packrat' not in peg:
memos.clear()
return start_ref, refs, rules, memos
| 24.415929 | 87 | 0.545216 | 2,074 | 0.18793 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.038873 |
347c4640275f1ffebdd61eb9ff26c587f84bd59e | 911 | py | Python | lib/ipc/async_emitter.py | stevenman42/architus | 86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4 | [
"MIT"
] | null | null | null | lib/ipc/async_emitter.py | stevenman42/architus | 86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4 | [
"MIT"
] | null | null | null | lib/ipc/async_emitter.py | stevenman42/architus | 86f0c065172e27ae02603d6ef4e7d4eedfb6dbe4 | [
"MIT"
] | null | null | null | import json
from aio_pika import Message, DeliveryMode, ExchangeType
from lib.ipc.util import poll_for_async_connection
class Emitter:
def __init__(self):
self.connection = None
self.event_exchange = None
async def connect(self, loop):
# Perform connection
self.connection = await poll_for_async_connection(loop)
channel = await self.connection.channel()
self.event_exchange = await channel.declare_exchange(
'events', ExchangeType.TOPIC
)
return self
async def close(self):
await self.connection.close()
async def emit(self, routing_key, body):
message = Message(
json.dumps(body).encode(),
delivery_mode=DeliveryMode.PERSISTENT
)
# Sending the message
await self.event_exchange.publish(
message, routing_key=routing_key
)
| 23.973684 | 63 | 0.644347 | 787 | 0.863886 | 0 | 0 | 0 | 0 | 665 | 0.729967 | 49 | 0.053787 |
347c814616bfce0ed2381cbdbfdc2ba0cb647a74 | 72 | py | Python | pys/classes/annotations.py | Xithrius/Examples | d29fe9510f1c62a807e09f9707d0b2f6de9ffeed | [
"MIT"
] | null | null | null | pys/classes/annotations.py | Xithrius/Examples | d29fe9510f1c62a807e09f9707d0b2f6de9ffeed | [
"MIT"
] | null | null | null | pys/classes/annotations.py | Xithrius/Examples | d29fe9510f1c62a807e09f9707d0b2f6de9ffeed | [
"MIT"
] | null | null | null | import typing as t
def test0(a: t.Union[str, int]) -> t.Any:
pass
| 12 | 41 | 0.611111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
347d4a00184ea7c2c1c24dd55efd4162c15dc9d8 | 7,727 | py | Python | day_4/day_4_improvements/tests/test_user_edit.py | dmchu/Pytest_REST_API_with_Allure | a41fbbcc04304a89a99ece7b06148de668a7b696 | [
"Apache-2.0"
] | null | null | null | day_4/day_4_improvements/tests/test_user_edit.py | dmchu/Pytest_REST_API_with_Allure | a41fbbcc04304a89a99ece7b06148de668a7b696 | [
"Apache-2.0"
] | null | null | null | day_4/day_4_improvements/tests/test_user_edit.py | dmchu/Pytest_REST_API_with_Allure | a41fbbcc04304a89a99ece7b06148de668a7b696 | [
"Apache-2.0"
] | null | null | null | import allure
from day_4.day_4_improvements.lib.base_case import BaseCase
from day_4.day_4_improvements.lib.assersions import Assertions as AS
from day_4.day_4_improvements.lib.my_requests import MyRequests as MR
from day_4.day_4_improvements.lib.helpers import Helpers as HP
@allure.epic("User Profile Edit cases")
class TestUserEdit(BaseCase):
BASE_URI: str = "/user/"
URI_LOGIN: str = BASE_URI + "login"
@allure.feature("User Profile Edit")
@allure.story("positive - Edit profile of just created user")
@allure.description("Verifiying that user profile of just created user can be edited")
def test_edit_just_created_user(self):
registered_user: dict = HP.register_user(HP)
user_email = registered_user.get("user_email")
user_password = registered_user.get("user_password")
user_id = registered_user.get("user_id")
login_user_response = HP.authorize_user(HP, user_email, user_password)
URI_USER = self.BASE_URI + str(user_id)
new_name = "Changed Name"
headers = {
'x-csrf-token': login_user_response.get("token")
}
cookies = {
'auth_sid': login_user_response.get("auth_sid")
}
edit_data = {
'firstName': new_name
}
# Edit user data
response3 = MR.put(URI_USER, headers=headers, cookies=cookies, data=edit_data)
AS.assert_code_status(response3, 200)
# Get updated user data
response4 = MR.get(URI_USER, headers=headers, cookies=cookies)
AS.assert_json_value_by_name(response4, "firstName", new_name, "Wrong name of user after update")
@allure.feature("User Profile Edit")
@allure.story("negative - Edit profile of user without authorization")
@allure.description("Verifiying that user profile can not be edited without authorization")
def test_edit_existing_user_without_authorization(self):
registered_user: dict = HP.register_user(HP)
user_id = registered_user.get("user_id")
URI_USER = self.BASE_URI + str(user_id)
new_name = "Changed Name2"
headers = {
'x-csrf-token': ""
}
cookies = {
'auth_sid': ""
}
edit_data = {
'firstName': new_name
}
# Edit user data
response = MR.put(URI_USER, headers=headers, cookies=cookies, data=edit_data)
AS.assert_code_status(response, 400)
AS.assert_response_text(response, "Auth token not supplied")
@allure.feature("User Profile Edit")
@allure.story("negative - Edit profile of user with another user authorization")
@allure.description("Verifiying that user profile can not be edited with another user authorization")
def test_edit_existing_user_with_authorization_by_another_user(self):
registered_user: dict = HP.register_user(HP)
correct_user_email = registered_user.get("user_email")
correct_user_password = registered_user.get("user_password")
user_id = registered_user.get("user_id")
user_email = "vinkotov@example.com"
user_password = "1234"
# Authorization with another user
login_data = {
'email': user_email,
'password': user_password
}
response = MR.post(self.URI_LOGIN, data=login_data)
auth_sid = self.get_cookie(response, "auth_sid")
token = self.get_header(response, "x-csrf-token")
URI_USER = self.BASE_URI + str(user_id)
new_name = "Changed Name3"
headers = {
'x-csrf-token': token
}
cookies = {
'auth_sid': auth_sid
}
edit_data = {
'email': new_name
}
# Try to edit user data
response2 = MR.put(URI_USER, headers=headers, cookies=cookies, data=edit_data)
AS.assert_code_status(response2, 400)
# Authorization with correct user
login_user_response = HP.authorize_user(HP, correct_user_email, correct_user_password)
URI_USER = self.BASE_URI + str(user_id)
new_name = "Changed Name"
headers_2 = {
'x-csrf-token': login_user_response.get("token")
}
cookies_2 = {
'auth_sid': login_user_response.get("auth_sid")
}
# Get user data and verify that changes was not made
response4 = MR.get(URI_USER, headers=headers_2, cookies=cookies_2)
response_data = response4.json()
user_first_name = response_data.get("firstName")
assert user_first_name != new_name, \
"First name should not be changed by user with another authenticated user, but it did"
@allure.feature("User Profile Edit")
@allure.story("negative - Edit user 'email' with wrong email format")
@allure.description("Verifiying that user 'email' can not be edited with wrong email format")
def test_edit_user_email_with_wrong_format(self):
registered_user: dict = HP.register_user(HP)
user_email = registered_user.get("user_email")
user_password = registered_user.get("user_password")
user_id = registered_user.get("user_id")
login_user_response = HP.authorize_user(HP, user_email, user_password)
URI_USER = self.BASE_URI + str(user_id)
new_email = user_email.replace("@", ".")
headers = {
'x-csrf-token': login_user_response.get("token")
}
cookies = {
'auth_sid': login_user_response.get("auth_sid")
}
edit_data = {
'email': new_email
}
# Edit user data
response3 = MR.put(URI_USER, headers=headers, cookies=cookies, data=edit_data)
AS.assert_code_status(response3, 400)
AS.assert_response_text(response3, "Invalid email format")
# Get updated user data
response4 = MR.get(URI_USER, headers=headers, cookies=cookies)
response_data = response4.json()
user_email = response_data.get("email")
assert user_email != new_email, \
"Email should not be changed by user to email with wrong format, but it did"
@allure.feature("User Profile Edit")
@allure.story("negative - Edit user 'first name' with one character")
@allure.description("Verifiying that user 'first name' can not be edited with one character")
def test_edit_user_first_name_with_one_character(self):
registered_user: dict = HP.register_user(HP)
user_email = registered_user.get("user_email")
user_password = registered_user.get("user_password")
user_id = registered_user.get("user_id")
login_user_response = HP.authorize_user(HP, user_email, user_password)
URI_USER = self.BASE_URI + str(user_id)
new_email = user_email.replace("@", ".")
headers = {
'x-csrf-token': login_user_response.get("token")
}
cookies = {
'auth_sid': login_user_response.get("auth_sid")
}
edit_data = {
'firstName': "V"
}
# Edit user data
response3 = MR.put(URI_USER, headers=headers, cookies=cookies, data=edit_data)
AS.assert_code_status(response3, 400)
AS.assert_json_value_by_name(response3, "error",
"Too short value for field firstName", "The error message is not as expected")
# Get updated user data
response4 = MR.get(URI_USER, headers=headers, cookies=cookies)
response_data = response4.json()
user_first_name = response_data.get("firstName")
assert user_first_name != "V", \
"First name should not be changed by user to very short name, but it did"
| 38.635 | 115 | 0.64734 | 7,408 | 0.958716 | 0 | 0 | 7,448 | 0.963893 | 0 | 0 | 2,040 | 0.264009 |
347d886b920493c8af185f7166a5e1089d5bdb0f | 23 | py | Python | test/example/__init__.py | fieldOfView/izzyPythonPlugin | deb0dca2a4294cd74aa18ab1228de10eceba1266 | [
"MIT"
] | 6 | 2019-11-06T00:37:43.000Z | 2021-12-22T02:39:45.000Z | example/__init__.py | Submanifold/cmake-cpp-pybind11 | f22893e87977de8c619d6033b7ca5bb240451f3d | [
"MIT"
] | null | null | null | example/__init__.py | Submanifold/cmake-cpp-pybind11 | f22893e87977de8c619d6033b7ca5bb240451f3d | [
"MIT"
] | null | null | null | from .example import *
| 11.5 | 22 | 0.73913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
347e054992d27cb2ec4c3396370171a573169533 | 64 | py | Python | backend/models/__init__.py | Hori1234/gastech-project | dc89e546fce9b588fc8038cf63b7d82ad3eafff9 | [
"MIT"
] | null | null | null | backend/models/__init__.py | Hori1234/gastech-project | dc89e546fce9b588fc8038cf63b7d82ad3eafff9 | [
"MIT"
] | null | null | null | backend/models/__init__.py | Hori1234/gastech-project | dc89e546fce9b588fc8038cf63b7d82ad3eafff9 | [
"MIT"
] | null | null | null |
from .users import User
__all__ = [
'User']
| 9.142857 | 24 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.09375 |
347e6ea9930d9b92685144c3238b1acc36f5d915 | 6,005 | py | Python | bandit_github_formatter/formatter.py | epsylabs/action-bandit | 626214a0b7ee3fa22ac81b0d8d5f14da61b41345 | [
"MIT"
] | 8 | 2021-06-29T07:12:33.000Z | 2022-02-15T22:10:07.000Z | bandit_github_formatter/formatter.py | epsylabs/action-bandit | 626214a0b7ee3fa22ac81b0d8d5f14da61b41345 | [
"MIT"
] | null | null | null | bandit_github_formatter/formatter.py | epsylabs/action-bandit | 626214a0b7ee3fa22ac81b0d8d5f14da61b41345 | [
"MIT"
] | 1 | 2022-02-13T20:09:01.000Z | 2022-02-13T20:09:01.000Z | r"""
==============
GitHub Formatter
==============
This formatter outputs the issues as plain text.
:Example:
.. code-block:: none
>> Issue: [B301:blacklist_calls] Use of unsafe yaml load. Allows
instantiation of arbitrary objects. Consider yaml.safe_load().
Severity: Medium Confidence: High
Location: examples/yaml_load.py:5
More Info: https://bandit.readthedocs.io/en/latest/
4 ystr = yaml.dump({'a' : 1, 'b' : 2, 'c' : 3})
5 y = yaml.load(ystr)
6 yaml.dump(y)
.. versionadded:: 0.9.0
"""
from __future__ import print_function
import logging
import json
import os
import requests
from bandit.core import constants
from bandit.core import docs_utils
from bandit.core import test_properties
LOG = logging.getLogger(__name__)
def get_verbose_details(manager):
# TODO: re-format the output for verbose details
bits = []
bits.append(u'Files in scope (%i):' % len(manager.files_list))
tpl = u"\t%s (score: {SEVERITY: %i, CONFIDENCE: %i})"
bits.extend([tpl % (item, sum(score['SEVERITY']), sum(score['CONFIDENCE']))
for (item, score)
in zip(manager.files_list, manager.scores)])
bits.append(u'Files excluded (%i):' % len(manager.excluded_files))
bits.extend([u"\t%s" % fname for fname in manager.excluded_files])
return '\n'.join([bit for bit in bits])
def get_metrics(manager):
bits = []
bits.append("\n### Run metrics:\n")
bits.append(f"| | {'|'.join(constants.RANKING)}|")
bits.append("|:-:|:-:|:-:|:-:|:-:|")
severity_list = [str(int(manager.metrics.data['_totals'][f'SEVERITY.{rank}'])) for rank in constants.RANKING]
bits.append(f"|SEVERITY|{'|'.join(severity_list)}|")
confidence_list = [str(int(manager.metrics.data['_totals'][f'CONFIDENCE.{rank}'])) for rank in constants.RANKING]
bits.append(f"|CONFIDENCE|{'|'.join(confidence_list)}|")
return '\n'.join([bit for bit in bits])
def _output_issue_str(issue, indent, show_lineno=True, show_code=True,
lines=-1):
# returns a list of lines that should be added to the existing lines list
bits = []
bits.append("<details>")
bits.append("<summary><strong>[%s:%s]</strong> %s</summary>\n<br>\n" % (
issue.test_id, issue.test, issue.text))
bits.append("|<strong>Severity</strong>| %s |\n|:-:|:-:|\n|<strong>Confidence</strong>| %s |" % (
issue.severity.capitalize(), issue.confidence.capitalize()))
bits.append("|<strong>Location<strong>| %s:%s:%s |" % (
issue.fname, issue.lineno if show_lineno else "",
""))
bits.append("|<strong>More Info<strong>| %s |\n" % (
docs_utils.get_url(issue.test_id)))
if show_code:
bits.append("<br>\n\n```python")
bits.extend([indent + line for line in
issue.get_code(lines, True).split('\n')])
bits.append("```\n")
bits.append("</details>")
return '\n'.join([bit for bit in bits])
def get_results(manager, sev_level, conf_level, lines):
bits = []
issues = manager.get_issue_list(sev_level, conf_level)
baseline = not isinstance(issues, list)
candidate_indent = ' ' * 10
if not len(issues):
return u"\tNo issues identified."
for issue in issues:
# if not a baseline or only one candidate we know the issue
if not baseline or len(issues[issue]) == 1:
bits.append(_output_issue_str(issue, "", lines=lines))
# otherwise show the finding and the candidates
else:
bits.append(_output_issue_str(issue, "",
show_lineno=False,
show_code=False))
bits.append(u'\n-- Candidate Issues --')
for candidate in issues[issue]:
bits.append(_output_issue_str(candidate,
candidate_indent,
lines=lines))
bits.append('\n')
return '\n'.join([bit for bit in bits])
def comment_on_pr(message):
token = os.getenv("INPUT_GITHUB_TOKEN")
if not token:
print(message)
return
if os.getenv("GITHUB_EVENT_NAME") == "pull_request":
with open(os.getenv("GITHUB_EVENT_PATH")) as json_file:
event = json.load(json_file)
headers_dict = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {token}"
}
request_path = (
f"https://api.github.com/repos/{event['repository']['full_name']}/issues/{event['number']}/comments")
requests.post(request_path, headers=headers_dict, json={"body": message})
@test_properties.accepts_baseline
def report(manager, fileobj, sev_level, conf_level, lines=-1):
"""Prints discovered issues in the text format
:param manager: the bandit manager object
:param fileobj: The output file object, which may be sys.stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param lines: Number of lines to report, -1 for all
"""
bits = []
if manager.results_count(sev_level, conf_level):
if manager.verbose:
bits.append(get_verbose_details(manager))
bits.append("## Bandit results:")
bits.append('<strong>Total lines of code:</strong> %i' %
(manager.metrics.data['_totals']['loc']))
bits.append('<strong>Total lines skipped (#nosec):</strong> %i' %
(manager.metrics.data['_totals']['nosec']))
bits.append(get_metrics(manager))
bits.append("<details><summary>📋 Click here to see the all possible security issues</summary>\n<br>\n")
bits.append(get_results(manager, sev_level, conf_level, lines))
bits.append("</details>")
result = '\n'.join([bit for bit in bits]) + '\n'
comment_on_pr(result)
| 34.119318 | 117 | 0.602498 | 0 | 0 | 0 | 0 | 1,237 | 0.205892 | 0 | 0 | 2,308 | 0.384154 |
347e90ac805f9147fa4f7a2a02dbf307daa9c5ca | 3,685 | py | Python | usecase/usecase-cordova-android-tests/samples/SharedModeLibraryDownload4.x/res/test.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | usecase/usecase-cordova-android-tests/samples/SharedModeLibraryDownload4.x/res/test.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | usecase/usecase-cordova-android-tests/samples/SharedModeLibraryDownload4.x/res/test.py | JianfengXu/crosswalk-test-suite | 6fb6ef9d89235743ee8b867fd2541c5bdf388786 | [
"BSD-3-Clause"
] | null | null | null | import os
import commands
import sys
import json
from optparse import OptionParser
global CROSSWALK_VERSION
with open("../../tools/VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
try:
usage = "Usage: ./test.py -u [http://host/XWalkRuntimeLib.apk]"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-u",
"--url",
dest="url",
help="specify the url, e.g. http://host/XWalkRuntimeLib.apk")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong options: %s, exit ..." % e
sys.exit(1)
if not BUILD_PARAMETERS.url:
print "Please add the -u parameter for the url of XWalkRuntimeLib.apk"
sys.exit(1)
version_parts = CROSSWALK_VERSION.split('.')
if len(version_parts) < 4:
print "The crosswalk version is not configured exactly!"
sys.exit(1)
versionType = version_parts[3]
if versionType == '0':
username = commands.getoutput("echo $USER")
repository_aar_path = "/home/%s/.m2/repository/org/xwalk/xwalk_shared_library/%s/" \
"xwalk_shared_library-%s.aar" % \
(username, CROSSWALK_VERSION, CROSSWALK_VERSION)
repository_pom_path = "/home/%s/.m2/repository/org/xwalk/xwalk_shared_library/%s/" \
"xwalk_shared_library-%s.pom" % \
(username, CROSSWALK_VERSION, CROSSWALK_VERSION)
if not os.path.exists(repository_aar_path) or not os.path.exists(repository_pom_path):
wget_cmd = "wget https://download.01.org/crosswalk/releases/crosswalk/" \
"android/canary/%s/crosswalk-shared-%s.aar" % \
(CROSSWALK_VERSION, CROSSWALK_VERSION)
install_cmd = "mvn install:install-file -DgroupId=org.xwalk " \
"-DartifactId=xwalk_shared_library -Dversion=%s -Dpackaging=aar " \
"-Dfile=crosswalk-shared-%s.aar -DgeneratePom=true" % \
(CROSSWALK_VERSION, CROSSWALK_VERSION)
os.system(wget_cmd)
os.system(install_cmd)
library_url = BUILD_PARAMETERS.url
library_url = library_url.replace("/", "\\/")
if os.path.exists("SharedModeLibraryDownload"):
os.system("rm -rf SharedModeLibraryDownload")
os.system("cordova create SharedModeLibraryDownload com.example.sharedModeLibraryDownload SharedModeLibraryDownload")
os.chdir("./SharedModeLibraryDownload")
os.system('sed -i "s/<widget/<widget android-activityName=\\"SharedModeLibraryDownload\\"/g" config.xml')
os.system('sed -i "s/<\/widget>/ <allow-navigation href=\\"*\\" \/>\\n<\/widget>/g" config.xml')
os.system("cordova platform add android")
add_plugin_cmd = "cordova plugin add ../../../tools/cordova-plugin-crosswalk-webview" \
" --variable XWALK_VERSION=\"%s\" --variable XWALK_MODE=\"shared\"" % CROSSWALK_VERSION
print add_plugin_cmd
os.system(add_plugin_cmd)
os.system('sed -i "s/android:supportsRtl=\\"true\\">/android:supportsRtl=\\"true\\">\\n <meta-data android:name=\\"xwalk_apk_url\\" android:value=\\"' + library_url + '\\" \\/>/g" platforms/android/AndroidManifest.xml')
os.system("cordova build android")
os.system("cordova run")
lsstatus = commands.getstatusoutput("ls ./platforms/android/build/outputs/apk/*.apk")
if lsstatus[0] == 0:
print "Build Package Successfully"
else:
print "Build Package Error"
pmstatus = commands.getstatusoutput("adb shell pm list packages |grep com.example.sharedModeLibraryDownload")
if pmstatus[0] == 0:
print "Package Name Consistent"
else:
print "Package Name Inconsistent"
| 44.39759 | 226 | 0.699322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.483039 |
347ea510032b5f75cc03eb21b308b8c9b11d3a97 | 1,176 | py | Python | examples/python/simple_triangulation_3.py | chrisidefix/cgal-bindings | ddd8551f1ded7354c82a9690c06fcbc6512b6604 | [
"BSL-1.0"
] | 33 | 2015-04-14T22:03:37.000Z | 2022-01-23T00:01:23.000Z | examples/python/simple_triangulation_3.py | chrisidefix/cgal-bindings | ddd8551f1ded7354c82a9690c06fcbc6512b6604 | [
"BSL-1.0"
] | 4 | 2017-07-23T18:02:32.000Z | 2020-02-17T18:31:37.000Z | examples/python/simple_triangulation_3.py | chrisidefix/cgal-bindings | ddd8551f1ded7354c82a9690c06fcbc6512b6604 | [
"BSL-1.0"
] | 6 | 2015-06-10T10:51:11.000Z | 2019-04-16T10:24:53.000Z | from CGAL.CGAL_Kernel import Point_3
from CGAL.CGAL_Triangulation_3 import Delaunay_triangulation_3
from CGAL.CGAL_Triangulation_3 import Delaunay_triangulation_3_Cell_handle
from CGAL.CGAL_Triangulation_3 import Delaunay_triangulation_3_Vertex_handle
from CGAL.CGAL_Triangulation_3 import Ref_Locate_type_3
from CGAL.CGAL_Triangulation_3 import VERTEX
from CGAL.CGAL_Kernel import Ref_int
L=[]
L.append( Point_3(0,0,0) )
L.append( Point_3(1,0,0) )
L.append( Point_3(0,1,0) )
T=Delaunay_triangulation_3(L)
n=T.number_of_vertices()
V=[]
V.append( Point_3(0,0,1) )
V.append( Point_3(1,1,1) )
V.append( Point_3(2,2,2) )
n = n + T.insert(V)
assert n==6
assert T.is_valid()
lt=Ref_Locate_type_3()
li=Ref_int()
lj=Ref_int()
p=Point_3(0,0,0)
c = T.locate(p, lt, li, lj)
assert lt.object() == VERTEX
assert c.vertex(li.object()).point() == p
v = c.vertex( (li.object()+1)&3 )
nc = c.neighbor(li.object())
nli=Ref_int()
assert nc.has_vertex( v, nli )
T.write_to_file("output",14)
T1 = Delaunay_triangulation_3()
T1.read_from_file("output")
assert T1.is_valid()
assert T1.number_of_vertices() == T.number_of_vertices()
assert T1.number_of_cells() == T.number_of_cells()
| 21.777778 | 76 | 0.754252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.013605 |
347f4781f31fad4fabc2967828da3263fd07f923 | 17,298 | py | Python | lims/inventory/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 12 | 2017-03-01T10:39:36.000Z | 2022-01-04T06:17:19.000Z | lims/inventory/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 29 | 2017-04-25T14:05:08.000Z | 2021-06-21T14:41:53.000Z | lims/inventory/views.py | sqilz/LIMS-Backend | b64e1fa512f89e4492803d44c6b8c35e4d4724cc | [
"MIT"
] | 4 | 2017-10-11T16:22:53.000Z | 2021-02-23T15:45:21.000Z | import io
import json
from django.core.exceptions import ObjectDoesNotExist
from pint import UnitRegistry
import django_filters
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
from rest_framework import serializers
from rest_framework.parsers import FormParser, MultiPartParser
from rest_framework.filters import (OrderingFilter,
SearchFilter,
DjangoFilterBackend)
from lims.permissions.permissions import (IsInAdminGroupOrRO,
ViewPermissionsMixin, ExtendedObjectPermissions,
ExtendedObjectPermissionsFilter)
from lims.shared.mixins import StatsViewMixin, AuditTrailViewMixin
from lims.filetemplate.models import FileTemplate
from lims.projects.models import Product
from .models import Set, Item, ItemTransfer, ItemType, Location, AmountMeasure
from .serializers import (AmountMeasureSerializer, ItemTypeSerializer, LocationSerializer,
ItemSerializer, DetailedItemSerializer, SetSerializer,
ItemTransferSerializer)
from .providers import InventoryItemPluginProvider
# Define as module level due to issues with file locking
# when calling a function requiring it multiple times
ureg = UnitRegistry()
class LeveledMixin(AuditTrailViewMixin):
"""
Provide a display value for a heirarchy of elements
"""
def _to_leveled(self, obj):
level = getattr(obj, obj._mptt_meta.level_attr)
if level == 0:
display_value = obj.name
else:
display_value = '{} {}'.format('--' * level, obj.name)
return {
'display_value': display_value,
'value': obj.name,
'root': obj.get_root().name
}
class MeasureViewSet(AuditTrailViewMixin, viewsets.ModelViewSet):
queryset = AmountMeasure.objects.all()
serializer_class = AmountMeasureSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('symbol', 'name',)
class ItemTypeViewSet(viewsets.ModelViewSet, LeveledMixin):
queryset = ItemType.objects.all()
serializer_class = ItemTypeSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('name', 'parent__name',)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.has_children():
return Response({'message': 'Cannot delete ItemType with children'},
status=400)
self.perform_destroy(instance)
return Response(status=204)
class LocationViewSet(viewsets.ModelViewSet, LeveledMixin):
queryset = Location.objects.all()
serializer_class = LocationSerializer
permission_classes = (IsInAdminGroupOrRO,)
search_fields = ('name', 'parent__name')
def filter_queryset(self, queryset):
queryset = super(LocationViewSet, self).filter_queryset(queryset)
# Set ordering explicitly as django-filter borks the defaults
return queryset.order_by('tree_id', 'lft')
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.has_children():
return Response({'message': 'Cannot delete Location with children'},
status=400)
self.perform_destroy(instance)
return Response(status=204)
class InventoryFilterSet(django_filters.FilterSet):
"""
Filter for inventory items
"""
class Meta:
model = Item
fields = {
'id': ['exact'],
'name': ['exact', 'icontains'],
'added_by__username': ['exact'],
'identifier': ['exact'],
'barcode': ['exact'],
'description': ['icontains'],
'item_type__name': ['exact'],
'location__name': ['exact'],
'in_inventory': ['exact'],
'amount_measure__symbol': ['exact'],
'amount_available': ['exact', 'lt', 'lte', 'gt', 'gte'],
'concentration_measure__symbol': ['exact'],
'concentration': ['exact', 'lt', 'lte', 'gt', 'gte'],
'added_on': ['exact', 'lt', 'lte', 'gt', 'gte'],
'last_updated_on': ['exact', 'lt', 'lte', 'gt', 'gte'],
'properties__name': ['exact', 'icontains'],
'properties__value': ['exact', 'icontains'],
}
class InventoryViewSet(LeveledMixin, StatsViewMixin, ViewPermissionsMixin, viewsets.ModelViewSet):
queryset = Item.objects.all()
serializer_class = ItemSerializer
permission_classes = (ExtendedObjectPermissions,)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
search_fields = ('name', 'identifier', 'item_type__name', 'location__name',
'location__parent__name')
filter_class = InventoryFilterSet
def get_serializer_class(self):
if self.action == 'list':
return self.serializer_class
return DetailedItemSerializer
def get_object(self):
instance = super().get_object()
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.view()
return instance
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save(added_by=self.request.user)
self.assign_permissions(instance, permissions)
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.create()
def perform_update(self, serializer):
instance = serializer.save()
plugins = [p(instance) for p in InventoryItemPluginProvider.plugins]
for p in plugins:
p.update()
@list_route(methods=['POST'], parser_classes=(FormParser, MultiPartParser,))
def importitems(self, request):
"""
Import items from a CSV file
Expects:
file_template: The ID of the file template to use to parse the file
items_file: The CSV file to parse
permissions: Standard permissions format ({"name": "rw"}) to give to all items
"""
file_template_id = request.data.get('filetemplate', None)
uploaded_file = request.data.get('items_file', None)
permissions = request.data.get('permissions', '{}')
response_data = {}
if uploaded_file and file_template_id:
try:
filetemplate = FileTemplate.objects.get(id=file_template_id)
except FileTemplate.DoesNotExist:
return Response({'message': 'File template does not exist'}, status=404)
encoding = 'utf-8' if request.encoding is None else request.encoding
f = io.TextIOWrapper(uploaded_file.file, encoding=encoding)
items_to_import = filetemplate.read(f, as_list=True)
saved = []
rejected = []
if items_to_import:
for item_data in items_to_import:
item_data['assign_groups'] = json.loads(permissions)
if 'properties' not in item_data:
item_data['properties'] = []
'''
I'm not actually sure what this was supposed to do!
Properties are already list so this shouldn't be required.
else:
item_data['properties'] = ast.literal_eval(item_data['properties'])
'''
item = DetailedItemSerializer(data=item_data)
if item.is_valid():
saved.append(item_data)
item, parsed_permissions = self.clean_serializer_of_permissions(item)
item.validated_data['added_by'] = request.user
instance = item.save()
self.assign_permissions(instance, parsed_permissions)
if 'product' in item_data:
try:
prod = item_data['product']
product = Product.objects.get(product_identifier=prod)
except:
pass
else:
product.linked_inventory.add(instance)
else:
item_data['errors'] = item.errors
rejected.append(item_data)
else:
return Response({'message': 'File is format is incorrect'}, status=400)
response_data = {
'saved': saved,
'rejected': rejected
}
return Response(response_data)
@list_route(methods=['POST'])
def export_items(self, request):
# The ID of the file template
file_template_id = request.data.get('filetemplate', None)
# The ID's of items to get
selected = request.data.get('selected', None)
if file_template_id:
if selected:
ids = selected.strip(',').split(',')
items = Item.objects.filter(pk__in=ids)
else:
# The query used to get the results
# Query params in URL used NOT in .data
items = self.filter_queryset(self.get_queryset())
serializer = DetailedItemSerializer(items, many=True)
try:
file_template = FileTemplate.objects.get(pk=file_template_id)
except:
return Response({'message': 'File template does not exist'}, status=404)
with io.StringIO() as output_file:
output_file = file_template.write(output_file, serializer.data)
output_file.seek(0)
return Response(output_file.read(), content_type='text/csv')
return Response({'Please supply a file template and data to export'}, status=400)
@detail_route(methods=['POST'])
def transfer(self, request, pk=None):
"""
Either create or complete an item transfer.
"""
tfr_id = request.query_params.get('id', None)
complete_transfer = request.query_params.get('complete', False)
transfer_details = request.data
if tfr_id and complete_transfer:
try:
tfr = ItemTransfer.objects.get(pk=tfr_id)
except ObjectDoesNotExist:
return Response({'message': 'No item transfer exists with that ID'}, status=404)
tfr.transfer_complete = True
tfr.save()
return Response({'message': 'Transfer {} complete'.format(tfr_id)})
elif transfer_details:
item = self.get_object()
raw_amount = float(transfer_details.get('amount', 0))
raw_measure = transfer_details.get('measure', item.amount_measure.symbol)
addition = transfer_details.get('is_addition', False)
# Booleanise them
is_complete = False
is_addition = False
if addition:
is_addition = True
is_complete = True
if transfer_details.get('transfer_complete', False):
is_complete = True
try:
measure = AmountMeasure.objects.get(symbol=raw_measure)
except AmountMeasure.DoesNotExist:
raise serializers.ValidationError({'message':
'Measure {} does not exist'.format(raw_measure)
})
tfr = ItemTransfer(
item=item,
amount_taken=raw_amount,
amount_measure=measure,
barcode=transfer_details.get('barcode', ''),
coordinates=transfer_details.get('coordinates', ''),
transfer_complete=is_complete,
is_addition=is_addition
)
transfer_status = tfr.check_transfer()
if transfer_status[0] is True:
tfr.save()
tfr.do_transfer(ureg)
else:
return Response(
{'message': 'Inventory item {} ({}) is short of amount by {}'.format(
item.identifier, item.name, transfer_status[1])}, status=400)
return Response({'message': 'Transfer {} created'.format(tfr.id)})
return Response({'message': 'You must provide a transfer ID'}, status=400)
@detail_route(methods=['POST'])
def cancel_transfer(self, request, pk=None):
"""
Cancel an active transfer, adding the amount back
"""
tfr_id = request.query_params.get('id', None)
if tfr_id:
try:
tfr = ItemTransfer.objects.get(pk=tfr_id, transfer_complete=False)
except ObjectDoesNotExist:
return Response({'message': 'No item transfer exists with that ID'}, status=404)
tfr.is_addition = True
tfr.do_transfer(ureg)
tfr.delete()
return Response({'message': 'Transfer cancelled'})
return Response({'message': 'You must provide a transfer ID'}, status=400)
class SetViewSet(AuditTrailViewMixin, viewsets.ModelViewSet, ViewPermissionsMixin):
queryset = Set.objects.all()
serializer_class = SetSerializer
permission_classes = (ExtendedObjectPermissions,)
search_fields = ('name',)
filter_fields = ('is_partset',)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter, ExtendedObjectPermissionsFilter,)
def perform_create(self, serializer):
serializer, permissions = self.clean_serializer_of_permissions(serializer)
instance = serializer.save()
self.assign_permissions(instance, permissions)
@detail_route()
def items(self, request, pk=None):
limit_to = request.query_params.get('limit_to', None)
item = self.get_object()
if limit_to:
queryset = [o for o in item.items.all() if o.item_type.name == limit_to]
else:
queryset = item.items.all()
serializer = ItemSerializer(queryset, many=True)
return Response(serializer.data)
@detail_route(methods=['POST'])
def add(self, request, pk=None):
item_id = request.query_params.get('id', None)
inventoryset = self.get_object()
if item_id:
try:
item = Item.objects.get(pk=item_id)
except Item.DoesNotExist:
raise serializers.ValidationError({'message':
'Item {} does not exist'.format(item_id)})
item.sets.add(inventoryset)
return Response(status=201)
return Response(
{'message': 'The id of the item to add to the inventory is required'}, status=400)
@detail_route(methods=['DELETE'])
def remove(self, request, pk=None):
item_id = request.query_params.get('id', None)
inventoryset = self.get_object()
if item_id:
try:
item = inventoryset.items.get(pk=item_id)
except Item.DoesNotExist:
raise serializers.ValidationError({'message':
'Item {} does not exist'.format(item_id)})
inventoryset.items.remove(item)
return Response(status=204)
return Response(
{'message': 'The id of the item to add to the inventory is required'}, status=400)
class ItemTransferViewSet(AuditTrailViewMixin, viewsets.ReadOnlyModelViewSet, ViewPermissionsMixin):
queryset = ItemTransfer.objects.all()
serializer_class = ItemTransferSerializer
search_fields = ('item__name', 'item__identifier', 'barcode',)
filter_fields = ('transfer_complete', 'barcode',)
filter_backends = (SearchFilter, DjangoFilterBackend,
OrderingFilter,)
def get_queryset(self):
return ItemTransfer.objects.filter(transfer_complete=False)
@list_route(methods=['GET'])
def grouped(self, request):
"""
Group transfers under the same barcode e.g. as if they where in plates.
Limit allows to set how many barcodes are fetched.
"""
limit = int(request.query_params.get('limit', 10))
qs = (ItemTransfer.objects.filter(transfer_complete=False)
.distinct('barcode')
.order_by('barcode', '-date_created')[:limit])
barcodes = [i.barcode for i in qs]
transfers = (ItemTransfer.objects.filter(transfer_complete=False, barcode__in=barcodes)
.order_by('barcode', 'coordinates'))
serializer = ItemTransferSerializer(transfers, many=True)
groups = {}
for t in serializer.data:
if t['barcode'] not in groups:
groups[t['barcode']] = []
groups[t['barcode']].append(t)
return Response(groups)
| 41.482014 | 100 | 0.594693 | 15,863 | 0.917042 | 0 | 0 | 10,165 | 0.58764 | 0 | 0 | 3,296 | 0.190542 |
34827671e1dfddc2f889fbb286b2c0765d7735fe | 7,047 | py | Python | python/xfr/models/vggface.py | rwe0214/xfr | a49d9e80a1bc45c25c72c394c60f6274599321aa | [
"MIT"
] | 52 | 2020-08-04T11:33:09.000Z | 2021-12-05T14:16:22.000Z | python/xfr/models/vggface.py | rwe0214/xfr | a49d9e80a1bc45c25c72c394c60f6274599321aa | [
"MIT"
] | 10 | 2020-08-24T08:34:15.000Z | 2021-12-05T06:59:50.000Z | python/xfr/models/vggface.py | rwe0214/xfr | a49d9e80a1bc45c25c72c394c60f6274599321aa | [
"MIT"
] | 12 | 2020-08-05T03:11:56.000Z | 2021-12-14T23:52:50.000Z | # Copyright 2019 Systems & Technology Research, LLC
# Use of this software is governed by the license.txt file.
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import ImageFilter
def prepare_vggface_image(img):
"""
Convert an RGB byte image to a FloatTensor suitable for processing with the network.
This function assumes the image has already been resized, cropped, jittered, etc.
"""
# Convert to BGR
img_bgr = np.array(img)[...,[2,1,0]]
# Subtract mean pixel value
img_bgr_fp = img_bgr - np.array((93.5940, 104.7624, 129.1863))
# Permute dimensions so output is 3xRxC
img_bgr_fp = np.rollaxis(img_bgr_fp, 2, 0)
return torch.from_numpy(img_bgr_fp).float()
def generate_random_blur(blur_radius, blur_prob):
def random_blur(img):
if np.random.random() < blur_prob:
return img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
else:
return img
return random_blur
""" Function suitable for transform argument of datasets.ImageFolder """
def vggface_preprocess(jitter=False, blur_radius=None, blur_prob=1.0):
transform_list = [transforms.Resize(256),]
if jitter:
transform_list.append(transforms.RandomCrop((224,224)))
transform_list.append(transforms.RandomHorizontalFlip())
#transform_list.append(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1))
else:
transform_list.append(transforms.CenterCrop((224,224)))
if blur_radius is not None and blur_prob > 0:
transform_list.append(transforms.Lambda(generate_random_blur(blur_radius, blur_prob)))
# finally, convert PIL RGB image to FloatTensor
transform_list.append(transforms.Lambda(prepare_vggface_image))
return transforms.Compose(transform_list)
class VGGFace(nn.Module):
"""
The VGGFace network (VGG_VD_16)
mode can be one of ['encode', 'classify', 'both']
"""
def __init__(self, mode='encode', num_classes=2622):
super(VGGFace, self).__init__()
valid_modes = {'encode','classify','both'}
if mode not in valid_modes:
raise Exception('mode should be one of ' + str(valid_modes))
self.mode = mode
self.fc_outputs = num_classes
# layers with stored weights
self.conv1_1 = nn.Conv2d(3,64,(3, 3),(1, 1),(1, 1))
self.conv1_2 = nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1))
self.conv2_1 = nn.Conv2d(64,128,(3, 3),(1, 1),(1, 1))
self.conv2_2 = nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1))
self.conv3_1 = nn.Conv2d(128,256,(3, 3),(1, 1),(1, 1))
self.conv3_2 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv3_3 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv4_1 = nn.Conv2d(256,512,(3, 3),(1, 1),(1, 1))
self.conv4_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv4_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_1 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.fc6 = nn.Linear(25088,4096)
self.fc7 = nn.Linear(4096,4096)
self.fc8 = nn.Linear(4096, self.fc_outputs)
# layers with no weights
self.nonlin = nn.ReLU()
self.maxpool = nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)
self.dropout = nn.Dropout(0.5)
def forward(self, input):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
# return e7, e8, or both depending on self.mode
if self.mode == 'encode':
return e7
else:
e8 = self.fc8(self.dropout(e7))
if self.mode == 'classify':
return e8
elif self.mode == 'both':
return e7,e8
else:
raise Exception('Invalid mode: ' + mode)
def set_fc_outputs(self, new_fc_outputs):
self.fc_outputs = new_fc_outputs
self.fc8 = nn.Linear(4096, self.fc_outputs)
class VGGFace_Custom(VGGFace):
"""Inherit VGGFace() and override the forward pass to
normalize the output. Don't care about classification
"""
def forward(self, input, nrm=True):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
"""Override code here: Want to normalize the output and
return the encoding. Don't care about classification.
"""
if nrm is False:
return e7
#print torch.div(e7,torch.norm(e7))
#print e7.size()
xnorm = F.normalize(e7, p=2, dim=1)
return xnorm
#return torch.div(e7,torch.norm(e7))
def vgg16(model_filename=None):
"""
Constructs a VGG-16 model
"""
model = VGGFace_Custom()
if model_filename is not None:
model.load_state_dict(torch.load(model_filename))
return model
| 34.208738 | 109 | 0.616858 | 4,917 | 0.697744 | 0 | 0 | 0 | 0 | 0 | 0 | 1,624 | 0.230453 |
348349d381d9db8465fa237f65e4440d7cf40299 | 1,400 | py | Python | easy/1356-Sort Integers by The Number of 1 Bits.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | 2 | 2020-05-08T02:17:17.000Z | 2020-05-17T04:55:56.000Z | easy/1356-Sort Integers by The Number of 1 Bits.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | easy/1356-Sort Integers by The Number of 1 Bits.py | Davidxswang/leetcode | d554b7f5228f14c646f726ddb91014a612673e06 | [
"Apache-2.0"
] | null | null | null | """
https://leetcode.com/problems/sort-integers-by-the-number-of-1-bits/
Given an integer array arr. You have to sort the integers in the array in ascending order by the number of 1's in their binary representation and in case of two or more integers have the same number of 1's you have to sort them in ascending order.
Return the sorted array.
Example 1:
Input: arr = [0,1,2,3,4,5,6,7,8]
Output: [0,1,2,4,8,3,5,6,7]
Explantion: [0] is the only integer with 0 bits.
[1,2,4,8] all have 1 bit.
[3,5,6] have 2 bits.
[7] has 3 bits.
The sorted array by bits is [0,1,2,4,8,3,5,6,7]
Example 2:
Input: arr = [1024,512,256,128,64,32,16,8,4,2,1]
Output: [1,2,4,8,16,32,64,128,256,512,1024]
Explantion: All integers have 1 bit in the binary representation, you should just sort them in ascending order.
Example 3:
Input: arr = [10000,10000]
Output: [10000,10000]
Example 4:
Input: arr = [2,3,5,7,11,13,17,19]
Output: [2,3,5,17,7,11,13,19]
Example 5:
Input: arr = [10,100,1000,10000]
Output: [10,100,10000,1000]
Constraints:
1 <= arr.length <= 500
0 <= arr[i] <= 10^4
"""
# time complexity: O(nlogn), space complexity: O(n)
class Solution:
def sortByBits(self, arr: List[int]) -> List[int]:
scale = 1e5
for i in range(len(arr)):
arr[i] = [bin(arr[i]).count('1')*scale+arr[i],arr[i]]
arr.sort(key=lambda x:x[0])
return list(num[1] for num in arr)
| 26.923077 | 247 | 0.660714 | 269 | 0.192143 | 0 | 0 | 0 | 0 | 0 | 0 | 1,129 | 0.806429 |
ca9e9bd0ad838e0babbbf679e67b3ceb8ca12cb5 | 959 | py | Python | server/plugins/machine_detail_ard_info/scripts/ard_info.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | 2 | 2019-11-01T20:50:35.000Z | 2021-01-13T22:02:55.000Z | server/plugins/machine_detail_ard_info/scripts/ard_info.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | server/plugins/machine_detail_ard_info/scripts/ard_info.py | gregneagle/sal | 74c583fb1c1b33d3201b308b147376b3dcaca33f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
import sys
sys.path.append("/usr/local/munki/munkilib")
import FoundationPlist
RESULTS_PATH = "/usr/local/sal/plugin_results.plist"
def main():
ard_path = "/Library/Preferences/com.apple.RemoteDesktop.plist"
if os.path.exists(ard_path):
ard_prefs = FoundationPlist.readPlist(ard_path)
else:
ard_prefs = {}
sal_result_key = "ARD_Info_{}"
prefs_key_prefix = "Text{}"
data = {
sal_result_key.format(i): ard_prefs.get(prefs_key_prefix.format(i), "")
for i in xrange(1, 5)}
formatted_results = {
"plugin": "ARD_Info",
"historical": False,
"data": data}
if os.path.exists(RESULTS_PATH):
plugin_results = FoundationPlist.readPlist(RESULTS_PATH)
else:
plugin_results = []
plugin_results.append(formatted_results)
FoundationPlist.writePlist(plugin_results, RESULTS_PATH)
if __name__ == "__main__":
main() | 21.795455 | 79 | 0.663191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.210636 |
ca9f36676f8900dd5fc1fa8ab6463b0c5873344a | 12,476 | py | Python | biquad_filter_original.py | ignaciodsimon/optimised_biquad_filter | 0d85dc42033e767eeb55107e72dba98417377686 | [
"MIT"
] | null | null | null | biquad_filter_original.py | ignaciodsimon/optimised_biquad_filter | 0d85dc42033e767eeb55107e72dba98417377686 | [
"MIT"
] | null | null | null | biquad_filter_original.py | ignaciodsimon/optimised_biquad_filter | 0d85dc42033e767eeb55107e72dba98417377686 | [
"MIT"
] | null | null | null | '''
Standard implementation of a biquad filter
Joe Simon 2018.
'''
import math
from enum import Enum
class BiquadFilterCoefficients():
def __init__(self, b0=1.0, b1=0, b2=0, a0=0, a1=0, a2=0):
self.b0 = b0
self.b1 = b1
self.b2 = b2
self.a0 = a0
self.a1 = a1
self.a2 = a2
class BiquadFilterType(Enum):
BPF_NORMALIZED = 1
HPF = 2
BPF = 3
LPF = 4
NOTCH = 5
APF = 6
LOW_SHELVING = 7
HIGH_SHELVING = 8
PEAK = 9
class BiquadFilterParameters:
def __init__(self, sampleRate=48000.0, filterType=BiquadFilterType.APF, filterf0=1000.0, filterQ=1.0, filterGain=0.0):
self.filterf0 = filterf0
self.filterQ = filterQ
self.filterGain = filterGain
self.filterType = filterType
self.sampleRate = sampleRate
class BiquadFilter:
def __init__(self, filterParameters=None, sampleRate=None):
# Filter parameters (initialize to a default value if not provided)
if filterParameters is None:
filterParameters = BiquadFilterParameters()
self.filterf0 = filterParameters.filterf0
self.filterQ = filterParameters.filterQ
self.filterGain = filterParameters.filterGain
self.filterType = filterParameters.filterType
self._sampleRate = filterParameters.sampleRate
if not sampleRate is None:
self._sampleRate = sampleRate
# Initilize filter states
self.x1 = 0.0
self.x2 = 0.0
self.y1 = 0.0
self.y2 = 0.0
# Generate filter coefficients
self.filterCoefficients = BiquadFilterCoefficients()
self.generateBiQuadCoefficients(filterType=self.filterType, filterf0=self.filterf0, filterQ=self.filterQ, filterGain=self.filterGain)
def __str__(self):
return "f0:%10.3f Hz, Q: %.3f, Gain: %.3f (%5.1f dB), Type: %s" % (self.filterf0, self.filterQ,
10**(self.filterGain/20.0), self.filterGain,
str(self.filterType))
def processSample(self, inputSample, outputSample=None):
# return inputSample
# # Calculate new output sample
# _output = (inputSample * self.filterCoefficients.b0) + \
# (self.x1 * self.filterCoefficients.b1) + \
# (self.x2 * self.filterCoefficients.b2) - \
# (self.y1 * self.filterCoefficients.a1) - \
# (self.y2 * self.filterCoefficients.a2)
# _output = _output / self.filterCoefficients.a0
# return _output
if outputSample is None:
inputSample = float(inputSample)
self._output = (inputSample * self.filterCoefficients.b0)
self._output += (self.x1 * self.filterCoefficients.b1)
self._output += (self.x2 * self.filterCoefficients.b2)
self._output -= (self.y1 * self.filterCoefficients.a1)
self._output -= (self.y2 * self.filterCoefficients.a2)
self._output /= self.filterCoefficients.a0
# Rotate states
self.x2 = self.x1
self.x1 = inputSample
self.y2 = self.y1
self.y1 = self._output
# Return new output
return self._output
else:
# Calculate new output sample
# inputSample[0] = float(inputSample[0])
outputSample[0] = (float(inputSample[0]) * self.filterCoefficients.b0)
outputSample[0] += (self.x1 * self.filterCoefficients.b1)
outputSample[0] += (self.x2 * self.filterCoefficients.b2)
outputSample[0] -= (self.y1 * self.filterCoefficients.a1)
outputSample[0] -= (self.y2 * self.filterCoefficients.a2)
outputSample[0] /= self.filterCoefficients.a0
# Rotate states
self.x2 = self.x1
self.x1 = inputSample[0]
self.y2 = self.y1
self.y1 = outputSample[0]
def generateBiQuadCoefficients(self, filterType, filterf0, filterQ, filterGain=0):
'''
This example generates the coefficients for a bandpass filter
with the peak gain at 0 dB
http://shepazu.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html
'''
# print("Generating filter coefficients for parameters: Type=%s, f0=%.1f [Hz], Q=%.2f, Gain=%.1f[dB]" % (str(filterType), filterf0, filterQ, filterGain))
if filterf0 > self._sampleRate / 2.0:
_limitFreq = 0.99 * self._sampleRate / 2.0
print("Warning: Filter's f0 was set to %.1f [Hz], limiting it f0 to: %.1f" % (filterf0, _limitFreq))
filterf0 = _limitFreq
_omega0 = 2 * math.pi * filterf0 / self._sampleRate
_alpha = math.sin(_omega0) / (2 * filterQ)
if filterType == BiquadFilterType.BPF_NORMALIZED:
# BPF:
_b0 = _alpha
_b1 = 0
_b2 = -_alpha
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.HPF:
# HPF:
_b0 = (1 + math.cos(_omega0)) / 2.0
_b1 = - (1 + math.cos(_omega0))
_b2 = _b0
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.BPF:
# BPF-2:
_b0 = filterQ * _alpha
_b1 = 0
_b2 = -filterQ * _alpha
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.LPF:
_b0 = (1 - math.cos(_omega0)) / 2.0
_b1 = 1 - math.cos(_omega0)
_b2 = _b0
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.NOTCH:
_b0 = 1
_b1 = -2 * math.cos(_omega0)
_b2 = 1
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.APF:
_b0 = 1 - _alpha
_b1 = -2 * math.cos(_omega0)
_b2 = 1 + _alpha
_a0 = 1 + _alpha
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - _alpha
elif filterType == BiquadFilterType.PEAK:
A = 10**(filterGain / 40.0)
_b0 = 1 + (_alpha * A)
_b1 = -2 * math.cos(_omega0)
_b2 = 1 - (_alpha * A)
_a0 = 1 + (_alpha / A)
_a1 = -2 * math.cos(_omega0)
_a2 = 1 - (_alpha / A)
elif filterType == BiquadFilterType.LOW_SHELVING:
A = 10**(filterGain / 40.0)
_b0 = A * ((A + 1) - (A - 1)*math.cos(_omega0) + 2*math.sqrt(A)*_alpha)
_b1 = 2 * A * ((A - 1) - (A + 1)*math.cos(_omega0))
_b2 = A * ((A + 1) - (A - 1)*math.cos(_omega0) - 2*math.sqrt(A)*_alpha)
_a0 = (A + 1) + (A - 1)*math.cos(_omega0) + 2*math.sqrt(A)*_alpha
_a1 = -2 * ((A - 1) + (A + 1)*math.cos(_omega0))
_a2 = (A + 1) + (A - 1)*math.cos(_omega0) - 2*math.sqrt(A)*_alpha
elif filterType == BiquadFilterType.HIGH_SHELVING:
A = 10**(filterGain / 40.0)
_b0 = A * ((A + 1) + (A - 1)*math.cos(_omega0) + 2*math.sqrt(A)*_alpha)
_b1 = -2 * A * ((A - 1) + (A + 1)*math.cos(_omega0))
_b2 = A * ((A + 1) + (A - 1)*math.cos(_omega0) - 2*math.sqrt(A)*_alpha)
_a0 = (A + 1) - (A - 1)*math.cos(_omega0) + 2*math.sqrt(A)*_alpha
_a1 = 2 * ((A - 1) - (A + 1)*math.cos(_omega0))
_a2 = (A + 1) - (A - 1)*math.cos(_omega0) - 2*math.sqrt(A)*_alpha
else:
# Unknown type of filter:
_b0 = 1
_b1 = 0
_b2 = 0
_a0 = 0
_a1 = 0
_a2 = 0
# Using numpy types is slower, so for real-time applications, better use floats
self.filterCoefficients.b0 = float(_b0)
self.filterCoefficients.b1 = float(_b1)
self.filterCoefficients.b2 = float(_b2)
self.filterCoefficients.a0 = float(_a0)
self.filterCoefficients.a1 = float(_a1)
self.filterCoefficients.a2 = float(_a2)
def computeBiquadFilterResponse(filterCoefficients, normalizedFreqBins):
if filterCoefficients is None:
return
if not len(filterCoefficients) == 6:
return
b0 = filterCoefficients[0]
b1 = filterCoefficients[1]
b2 = filterCoefficients[2]
a0 = filterCoefficients[3]
a1 = filterCoefficients[4]
a2 = filterCoefficients[5]
alpha = b0
import math
import numpy as np
w0 = np.abs(math.acos(-a1 / 2.0))
Q = np.sin(w0) / (2.0 * alpha)
e = np.exp
pi = np.pi
_freqResponse = [(b0 + (b1*e(-1j*2*pi*f)) + (b2 * e(-2j*2*pi*f))) / (a0 + (a1*e(-1j*2*pi*f)) + (a2*e(-2j*2*pi*f))) for f in normalizedFreqBins]
return _freqResponse
def computeBiquadFilterIR(filterCoefficients, IRLength):
_freqResponse = computeBiquadFilterResponse(filterCoefficients, [i / IRLength for i in range(int(IRLength))])
return np.real(np.fft.ifft(_freqResponse))
def getFilterSpectrumModule(filter):
_coeffs = [filter.filterCoefficients.b0, filter.filterCoefficients.b1, filter.filterCoefficients.b2, \
filter.filterCoefficients.a0, filter.filterCoefficients.a1, filter.filterCoefficients.a2]
# Compute complex frequency response
sampleRate = 48000
_normalizedFreqBins = [i/sampleRate for i in range(int(sampleRate/2.0))]
_freqResponse = computeBiquadFilterResponse(_coeffs, _normalizedFreqBins)
_freqResponseMod = 20.0 * np.log10(np.abs(_freqResponse))
_freqResponsePha = np.angle(_freqResponse) / np.pi * 180.0
_ir = computeBiquadFilterIR(_coeffs, len(_normalizedFreqBins))
return np.multiply(_normalizedFreqBins, sampleRate), _freqResponseMod, _freqResponsePha, _ir
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plot
# Real-time filtering of an MLS signal and IR calculation
_filter = BiquadFilter()
_filter.generateBiQuadCoefficients(filterf0=1000, filterQ=0.125, filterType=BiquadFilterType.BPF_NORMALIZED)
_inputSignal = [0.0 for i in range(48000)]
_inputSignal[0] = 1.0
_outputSignal = [_filter.processSample(_inputSignal[i]) for i in range(len(_inputSignal))]
_ir = _outputSignal
_spec = 20 * np.log10(np.abs(np.fft.fft(_ir)))
_spec = _spec[0 : round(len(_spec)/2.0)]
_freqBins = [i / len(_spec) * 48000.0 / 2.0 for i in range(len(_spec))]
plot.semilogx(_freqBins, _spec)
plot.grid(1)
plot.ylim([-60, 10])
plot.xlim([2, 20000])
plot.show()
# quit()
# Real-time filtering of an input signal
_filter = BiquadFilter()
_filter.generateBiQuadCoefficients(filterf0=1000, filterQ=10.0, filterType=BiquadFilterType.NOTCH)
_inputSignal = [np.sin(2 * np.pi * 1000 * i / 48000) for i in range(1000)]
_outputSignal = [_filter.processSample(_inputSignal[i]) for i in range(len(_inputSignal))]
plot.plot(_inputSignal)
plot.plot(_outputSignal)
plot.legend(['Input', 'Output'])
plot.grid(1)
plot.show()
# quit()
# Spectrum plotting of a couple of filters
_filter = BiquadFilter()
_filterSpecs = [[1000, 0.3, BiquadFilterType.NOTCH],
[1000, 30, BiquadFilterType.NOTCH]]
for _specs in _filterSpecs:
_filter.generateBiQuadCoefficients(filterf0=_specs[0], filterQ=_specs[1], filterType=_specs[2])
_bins, _mod, _pha, _ir = getFilterSpectrumModule(_filter)
plot.subplot(3,1,1)
plot.semilogx(_bins, _mod)
plot.grid(1)
plot.ylim([-60, 10])
plot.subplot(3,1,2)
plot.semilogx(_bins, _pha)
plot.ylim([-180, 180])
plot.grid(1)
plot.subplot(3,1,3)
plot.plot(_ir)
plot.grid(1)
plot.grid(1)
plot.show()
| 37.920973 | 162 | 0.558913 | 8,539 | 0.684434 | 0 | 0 | 0 | 0 | 0 | 0 | 1,562 | 0.1252 |
ca9fc7fc056114c1fa1f918327e46761a7db8549 | 5,065 | py | Python | util/training.py | NeuralVFX/facial-pose-estimation-pytorch-v2 | 20af370409e8508a4a5e1e78341f66c5c4f9c1c2 | [
"MIT"
] | 5 | 2021-02-03T21:55:55.000Z | 2022-03-01T15:52:14.000Z | util/training.py | NeuralVFX/facial-pose-estimation-pytorch-v2 | 20af370409e8508a4a5e1e78341f66c5c4f9c1c2 | [
"MIT"
] | null | null | null | util/training.py | NeuralVFX/facial-pose-estimation-pytorch-v2 | 20af370409e8508a4a5e1e78341f66c5c4f9c1c2 | [
"MIT"
] | 1 | 2021-12-08T02:48:08.000Z | 2021-12-08T02:48:08.000Z | import math
import numpy as np
import torch
############################################################################
# Learning Rate
############################################################################
def set_lr_sched(epochs, iters, mult):
""" Learning rate schedule, using cyclical learning rate
Args:
epochs (int): how many cycles to build
iters (int): iter count for first epoch
mult (float): how much longer each epoch will be
Returns:
list: float values representing learning rate during training
"""
mult_iter = iters
iter_stack = []
for a in range(epochs):
iter_stack += [math.cos((x / mult_iter) * 3.14) * .5 + .5 for x in (range(int(mult_iter)))]
mult_iter *= mult
return iter_stack
def set_opt_lr(opt, lr):
""" Sets optimizer learning rate, using different values for each sub group
Args:
opt (torch.optimizer): optimizer to set learning rate on
lr (np.array): array of learning rate values
"""
opt.param_groups[0]['lr'] = lr[0]
opt.param_groups[1]['lr'] = lr[1]
opt.param_groups[2]['lr'] = lr[2]
############################################################################
# Training loop
############################################################################
def train(train_dict, x, y, lr, lr_lookup, current_iter):
""" Train network for one iter
Args:
train_dict (dict): dictionary containing network, optimizer, loss and loader
x (torch.tensor): input image from dataset
y (torch.tensor): actual y value from dataset
lr (float): base learning rate overall multiplier
lr_lookup (list): float values representing learning rate during training
current_iter (int): which iteration we are on, used to lookup learning rate
Returns:
torch.tensor: mse loss
torch.tensor: point loss
"""
lr_mult = lr_lookup[current_iter]
set_opt_lr(train_dict["opt"], lr * lr_mult)
train_dict["opt"].zero_grad()
yhat = train_dict["net"](x)
mse_loss = train_dict["mse_crit"](yhat, y.float())
point_loss = train_dict["point_crit"](yhat, y.float())
# Testing with no point loss, works better in some situations
loss = mse_loss # + point_loss # might not help
loss.backward()
train_dict["opt"].step()
return mse_loss, point_loss
def test(train_dict, x, y):
""" Test network for one iter
Args:
train_dict (dict): dictionary containing network, optimizer, loss and loader
x (torch.tensor): input image from dataset
y (torch.tensor): actual y value from dataset
Returns:
torch.tensor: mse loss
torch.tensor: point loss
"""
yhat = train_dict["net"](x)
mse_loss = train_dict["mse_crit"](yhat, y.float())
point_loss = train_dict["point_crit"](yhat, y.float())
return mse_loss, point_loss
def one_run(train_dict, freeze, lr_list, lr_array):
""" Run through one cycle of cyclical learning
Args:
train_dict (dict): dictionary containing network, optimizer, loss and loader
freeze (int): how many layers to freeze on the model
lr_list (float): float values representing learning rate during training
lr_array (np.array): array of learning rate values
"""
current_iter = 0
current_epoch = 0
done = False
train_dict["net"].set_freeze(freeze)
while not done:
epoch_test_mse_loss = []
epoch_train_mse_loss = []
epoch_test_point_loss = []
epoch_train_point_loss = []
# TRAIN LOOP
train_dict["net"].train()
for x, y in train_dict["train_loader"]:
if current_iter > len(lr_list) - 1:
done = True
break
x, y = x.cuda(), y.cuda()
mse_loss, point_loss = train(train_dict,
x, y,
lr_array,
lr_list,
current_iter)
epoch_train_mse_loss.append(torch.mean(mse_loss).cpu().detach().numpy())
epoch_train_point_loss.append(torch.mean(point_loss.float()).cpu().detach().numpy())
current_iter += 1
# TEST LOOP
train_dict["net"].eval()
for x, y in train_dict["test_loader"]:
x, y = x.cuda(), y.cuda()
mse_loss, point_loss = test(train_dict,
x, y)
epoch_test_mse_loss.append(torch.mean(mse_loss).cpu().detach().numpy())
epoch_test_point_loss.append(torch.mean(point_loss.float()).cpu().detach().numpy())
print(
f'train mse_loss: {np.array(epoch_train_mse_loss).mean()}'
f' train point_loss: {np.array(epoch_train_point_loss).mean()}')
print(
f'test mse_loss: {np.array(epoch_test_mse_loss).mean()}'
f' test point_loss: {np.array(epoch_test_point_loss).mean()}')
current_epoch += 1
print('Done')
| 31.855346 | 99 | 0.569793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,514 | 0.496347 |
caa060579f980710bf47cd0784773c22fd517be1 | 10,754 | py | Python | components/fatfs/test_fatfsgen/test_fatfsparse.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | components/fatfs/test_fatfsgen/test_fatfsparse.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | components/fatfs/test_fatfsgen/test_fatfsparse.py | fbucafusco/esp-idf | c2ccc383dae2a47c2c2dc8c7ad78175a3fd11361 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
import shutil
import sys
import unittest
from subprocess import STDOUT, run
from test_utils import compare_folders, fill_sector, generate_local_folder_structure, generate_test_dir_2
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import fatfsgen # noqa E402 # pylint: disable=C0413
class FatFSGen(unittest.TestCase):
def setUp(self) -> None:
os.makedirs('output_data')
generate_test_dir_2()
def tearDown(self) -> None:
shutil.rmtree('output_data', ignore_errors=True)
shutil.rmtree('Espressif', ignore_errors=True)
shutil.rmtree('testf', ignore_errors=True)
if os.path.exists('fatfs_image.img'):
os.remove('fatfs_image.img')
@staticmethod
def test_gen_parse() -> None:
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'output_data/tst_str'
], stderr=STDOUT)
run(['python', '../fatfsgen.py', 'output_data/tst_str'], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TEST', 'TESTFILE'}
with open('Espressif/TESTFILE', 'rb') as in_:
assert in_.read() == b'ahoj\n'
assert set(os.listdir('Espressif/TEST')) == {'TEST', 'TESTFIL2'}
with open('Espressif/TEST/TESTFIL2', 'rb') as in_:
assert in_.read() == b'thisistest\n'
assert set(os.listdir('Espressif/TEST/TEST')) == {'LASTFILE.TXT'}
with open('Espressif/TEST/TEST/LASTFILE.TXT', 'rb') as in_:
assert in_.read() == b'deeptest\n'
@staticmethod
def test_file_chaining() -> None:
fatfs = fatfsgen.FATFS()
fatfs.create_file('WRITEF', extension='TXT')
fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
with open('Espressif/WRITEF.TXT', 'rb') as in_:
assert in_.read() == 4097 * b'a'
@staticmethod
def test_full_two_sectors_folder() -> None:
fatfs = fatfsgen.FATFS(size=2 * 1024 * 1024)
fatfs.create_directory('TESTFOLD')
for i in range((2 * 4096) // 32):
fatfs.create_file(f'A{str(i).upper()}', path_from_root=['TESTFOLD'])
fatfs.write_content(path_from_root=['TESTFOLD', 'A253'], content=b'later')
fatfs.write_content(path_from_root=['TESTFOLD', 'A255'], content=b'last')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TESTFOLD'}
assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(256)}
with open('Espressif/TESTFOLD/A253', 'rb') as in_:
assert in_.read() == b'later'
with open('Espressif/TESTFOLD/A255', 'rb') as in_:
assert in_.read() == b'last'
@staticmethod
def test_empty_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
@staticmethod
def test_chaining_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.create_file('WRITEF', extension='TXT')
fatfs.write_content(path_from_root=['WRITEF.TXT'], content=4096 * b'a' + b'a')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
with open('Espressif/WRITEF.TXT', 'rb') as in_:
assert in_.read() == 4097 * b'a'
@staticmethod
def test_full_sector_folder_fat16() -> None:
fatfs = fatfsgen.FATFS(size=17 * 1024 * 1024)
fatfs.create_directory('TESTFOLD')
fill_sector(fatfs)
fatfs.write_content(path_from_root=['TESTFOLD', 'A0'], content=b'first')
fatfs.write_content(path_from_root=['TESTFOLD', 'A126'], content=b'later')
fatfs.write_filesystem('fatfs_image.img')
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert set(os.listdir('Espressif')) == {'TESTFOLD'}
assert set(os.listdir('Espressif/TESTFOLD')) == {f'A{str(i).upper()}' for i in range(128)}
with open('Espressif/TESTFOLD/A0', 'rb') as in_:
assert in_.read() == b'first'
with open('Espressif/TESTFOLD/A126', 'rb') as in_:
assert in_.read() == b'later'
@staticmethod
def file_(x: str, content_: str = 'hey this is a test') -> dict:
return {
'type': 'file',
'name': x,
'content': content_
}
def test_e2e_file(self) -> None:
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [self.file_('NEWF')]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_deeper(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE'),
self.file_('NEW.TXT'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_deeper_large(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder2_ = {
'type': 'folder',
'name': 'XYZ3',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder3_ = {
'type': 'folder',
'name': 'XYZ2',
'content': [self.file_(f'A{i}') for i in range(50)]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_,
folder2_,
folder3_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_very_deep(self) -> None:
folder_ = {
'type': 'folder',
'name': 'XYZ',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
]
}
folder2_ = {
'type': 'folder',
'name': 'XYZ3',
'content': [
self.file_('NEWFLE', content_=4097 * 'a'),
self.file_('NEW.TXT', content_=2 * 4097 * 'a'),
self.file_('NEWE.TXT'),
self.file_('NEW4.TXT'),
self.file_('NEW5.TXT'),
folder_,
]
}
folder3_ = {
'type': 'folder',
'name': 'XYZ2',
'content': [self.file_(f'A{i}') for i in range(50)] + [folder2_]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('MY_NEW'),
folder_,
folder2_,
folder3_
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
def test_e2e_very_deep_long(self) -> None:
folder_ = {
'type': 'folder',
'name': 'veryveryverylong111',
'content': [
self.file_('myndewveryverylongfile1.txt', content_=4097 * 'a'),
self.file_('mynewveryverylongfile22.txt', content_=2 * 4097 * 'a'),
self.file_('mynewveryverylongfile333.txt' * 8),
self.file_('mynewveryverylongfile4444.txt' * 8),
self.file_('mynewveryverylongfile5555.txt'),
self.file_('SHORT.TXT'),
]
}
struct_: dict = {
'type': 'folder',
'name': 'testf',
'content': [
self.file_('mynewveryverylongfile.txt' * 5),
folder_,
]
}
generate_local_folder_structure(struct_, path_='.')
run([
'python',
f'{os.path.join(os.path.dirname(__file__), "..", "fatfsgen.py")}',
'testf', '--long_name_support'
], stderr=STDOUT)
run(['python', '../fatfsparse.py', 'fatfs_image.img', '--long-name-support'], stderr=STDOUT)
assert compare_folders('testf', 'Espressif')
if __name__ == '__main__':
unittest.main()
| 35.846667 | 105 | 0.521666 | 10,258 | 0.953878 | 0 | 0 | 4,047 | 0.376325 | 0 | 0 | 3,275 | 0.304538 |
caa067b7ced84067ccfd44ea02b97cd1e99a10d1 | 7,121 | py | Python | tests/test_tasks.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | null | null | null | tests/test_tasks.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | null | null | null | tests/test_tasks.py | rzuris/python-harvest_apiv2 | 1a4915c2772aa9d27b74a545b14138d418566832 | [
"MIT"
] | 1 | 2022-03-28T10:47:37.000Z | 2022-03-28T10:47:37.000Z |
# Copyright 2020 Bradbase
import os, sys
import unittest
import configparser
from dataclasses import asdict
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import MobileApplicationClient, WebApplicationClient
import httpretty
import warnings
from dacite import from_dict
import json
sys.path.insert(0, sys.path[0]+"/..")
import harvest
from harvest.harvestdataclasses import *
"""
There is a sample test config.
Copy it, name it test_config.ini and fill it out with your test details.
tests/test_config.ini is already in .gitignore
Just in case, the test config file looks like this:
[PERSONAL ACCESS TOKEN]
url = https://api.harvestapp.com/api/v2
put_auth_in_header = True
personal_token = Bearer 1234567.pt.somebunchoflettersandnumbers
account_id = 1234567
[OAuth2 Implicit Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
auth_url = https://id.getharvest.com/oauth2/authorize
[OAuth2 Authorization Code Grant]
uri = https://api.harvestapp.com/api/v2
client_id = aclientid
client_secret = itsmysecret
auth_url = https://id.getharvest.com/oauth2/authorize
token_url = https://id.getharvest.com/api/v2/oauth2/token
account_id = 1234567
"""
"""
Those who tread this path:-
These tests currently really only test that the default URL has been formed
correctly and that the datatype that gets returned can be typed into the dataclass.
Probably enough but a long way from "comprehensive".
"""
class TestTasks(unittest.TestCase):
def setUp(self):
personal_access_token = PersonalAccessToken('ACCOUNT_NUMBER', 'PERSONAL_ACCESS_TOKEN')
self.harvest = harvest.Harvest('https://api.harvestapp.com/api/v2', personal_access_token)
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") # There's a bug in httpretty ATM.
httpretty.enable()
def teardown(self):
httpretty.reset()
httpretty.disable()
def test_tasks(self):
task_8083800_dict = {
"id":8083800,
"name":"Business Development",
"billable_by_default":False,
"default_hourly_rate":0.0,
"is_default":False,
"is_active":True,
"created_at":"2017-06-26T22:08:25Z",
"updated_at":"2017-06-26T22:08:25Z"
}
task_8083369_dict = {
"id":8083369,
"name":"Research",
"billable_by_default":False,
"default_hourly_rate":0.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:53:34Z"
}
task_8083368_dict = {
"id":8083368,
"name":"Project Management",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:10Z"
}
task_8083366_dict = {
"id":8083366,
"name":"Programming",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:07Z"
}
task_8083365_dict = {
"id":8083365,
"name":"Graphic Design",
"billable_by_default":True,
"default_hourly_rate":100.0,
"is_default":True,
"is_active":True,
"created_at":"2017-06-26T20:41:00Z",
"updated_at":"2017-06-26T21:14:02Z"
}
task_8083782_dict = {
"id":8083782,
"name":"New Task Name",
"billable_by_default":True,
"default_hourly_rate":0.0, # TODO: this is supposed to be an int. Something isn't casting int to float.
"is_default":False,
"is_active":True,
"created_at":"2017-06-26T22:04:31Z",
"updated_at":"2017-06-26T22:04:31Z"
}
tasks_dict = {
"tasks":[task_8083800_dict, task_8083369_dict, task_8083368_dict, task_8083366_dict, task_8083365_dict],
"per_page":100,
"total_pages":1,
"total_entries":5,
"next_page":None,
"previous_page":None,
"page":1,
"links":{
"first":"https://api.harvestapp.com/v2/tasks?page=1&per_page=100",
"next":None,
"previous":None,
"last":"https://api.harvestapp.com/v2/tasks?page=1&per_page=100"
}
}
# tasks
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/tasks?page=1&per_page=100",
body=json.dumps(tasks_dict),
status=200
)
tasks = from_dict(data_class=Tasks, data=tasks_dict)
requested_tasks = self.harvest.tasks()
self.assertEqual(requested_tasks, tasks)
# get_task
httpretty.register_uri(httpretty.GET,
"https://api.harvestapp.com/api/v2/tasks/8083800",
body=json.dumps(task_8083800_dict),
status=200
)
task = from_dict(data_class=Task, data=task_8083800_dict)
requested_task = self.harvest.get_task(task_id= 8083800)
self.assertEqual(requested_task, task)
# create_task
httpretty.register_uri(httpretty.POST,
"https://api.harvestapp.com/api/v2/tasks",
body=json.dumps(task_8083782_dict),
status=201
)
new_task = from_dict(data_class=Task, data=task_8083782_dict)
requested_new_task = self.harvest.create_task(name= "New Task Name", default_hourly_rate= 120.0) # Harvest doco is wrong. they use hourly_rate not default_hourly_rate
self.assertEqual(requested_new_task, new_task)
# update_task
task_8083782_dict["is_default"] = True
httpretty.register_uri(httpretty.PATCH,
"https://api.harvestapp.com/api/v2/tasks/8083782",
body=json.dumps(task_8083782_dict),
status=200
)
updated_task = from_dict(data_class=Task, data=task_8083782_dict)
requested_updated_task = self.harvest.update_task(task_id=8083782, is_default=True)
self.assertEqual(requested_updated_task, updated_task)
# delete_task
httpretty.register_uri(httpretty.DELETE,
"https://api.harvestapp.com/api/v2/tasks/8083782",
status=200
)
requested_deleted_task = self.harvest.delete_task(task_id=8083782)
self.assertEqual(requested_deleted_task, None)
httpretty.reset()
| 35.078818 | 174 | 0.592052 | 5,675 | 0.796939 | 0 | 0 | 0 | 0 | 0 | 0 | 2,857 | 0.401208 |
caa0e695442da8dc639ed3c9061223b76d6ae4f6 | 579 | py | Python | db_models/deckentry.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | 1 | 2020-10-01T17:44:26.000Z | 2020-10-01T17:44:26.000Z | db_models/deckentry.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | null | null | null | db_models/deckentry.py | Teplitsa/false-security-1 | 9e5cc23c8bf324d923965bb2624cac4994891154 | [
"MIT"
] | 1 | 2021-10-05T12:09:07.000Z | 2021-10-05T12:09:07.000Z | from globals import db
import db_models.game
from db_models.card import Card
class DeckEntry(db.Model):
#__table_args__ = {'extend_existing': True}
__tablename__ = 'deckentry'
id = db.Column(db.Integer, primary_key=True)
# TODO: Undo nullable
cardId = db.Column(db.Integer, db.ForeignKey('card.id'), nullable=True)
gameId = db.Column(db.Integer, db.ForeignKey('game.id'), nullable=False)
card = db.relationship('Card', lazy=False)
game = db.relationship('Game', back_populates='deck', lazy=False)
order = db.Column(db.Integer, nullable=False) | 41.357143 | 76 | 0.713299 | 501 | 0.865285 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.19171 |
caa108c99289e504df2d4967b77c333b2d533a6d | 1,482 | py | Python | fisher_py/data/file_error.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 3 | 2021-11-03T20:55:45.000Z | 2022-02-01T10:11:47.000Z | fisher_py/data/file_error.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 2 | 2022-01-28T02:04:21.000Z | 2022-01-29T01:29:14.000Z | fisher_py/data/file_error.py | abdelq/fisher_py | befb98732ba7c4e57858d158c68cda09ed829d66 | [
"MIT"
] | 1 | 2022-01-26T23:30:37.000Z | 2022-01-26T23:30:37.000Z | from fisher_py.net_wrapping import NetWrapperBase
class FileError(NetWrapperBase):
@property
def has_error(self) -> bool:
"""
Gets a value indicating whether this file has detected an error. If this is false:
Other error properties in this interface have no meaning. Applications should
not continue with processing data from any file which indicates an error.
"""
return self._get_wrapped_object_().HasError
@property
def has_warning(self) -> bool:
"""
Gets a value indicating whether this file has detected a warning. If this is
false: Other warning properties in this interface have no meaning.
"""
return self._get_wrapped_object_().HasWarning
@property
def error_code(self) -> int:
"""
Gets the error code number. Typically this is a windows system error number.
The lowest valid windows error is: 0x00030200 Errors detected within our files
will have codes below 100.
"""
return self._get_wrapped_object_().ErrorCode
@property
def error_message(self) -> str:
"""
Gets the error message. For "unknown exceptions" this may include a stack trace.
"""
return self._get_wrapped_object_().ErrorMessage
@property
def warning_message(self) -> str:
"""
Gets the warning message.
"""
return self._get_wrapped_object_().WarningMessage
| 32.217391 | 90 | 0.654521 | 1,428 | 0.963563 | 0 | 0 | 1,362 | 0.919028 | 0 | 0 | 824 | 0.556005 |
caa1562b36eda4a5d00b900f7d291109468dbc07 | 1,839 | py | Python | 05-data_acquisition/scrap.py | sachinpr0001/data_science | d028233ff7bbcbbb6b26f01806d1c5ccf788df9a | [
"MIT"
] | null | null | null | 05-data_acquisition/scrap.py | sachinpr0001/data_science | d028233ff7bbcbbb6b26f01806d1c5ccf788df9a | [
"MIT"
] | null | null | null | 05-data_acquisition/scrap.py | sachinpr0001/data_science | d028233ff7bbcbbb6b26f01806d1c5ccf788df9a | [
"MIT"
] | null | null | null | import bs4
import requests
import os
str = input()
input_str = str
str = str.replace(" ", "&20")
url = "https://www.snapdeal.com/search?keyword={}&santizedKeyword=&catId=&categoryId=0&suggested=false&vertical=&noOfResults=20&searchState=&clickSrc=go_header&lastKeyword=&prodCatId=&changeBackToAll=false&foundInAll=false&categoryIdSearched=&cityPageUrl=&categoryUrl=&url=&utmContent=&dealDetail=&sort=rlvncy".format(str)
response = requests.get(url)
soup = bs4.BeautifulSoup(response.content)
picture_element = soup.findAll('picture')
count = 0
try:
os.mkdir(input_str)
for i, picture in enumerate(picture_element):
count = i
with open('{}/{}-{}.jpg'.format(input_str, input_str, i), 'wb') as file:
try:
img_url = picture.img.attrs['src']
response = requests.get(img_url)
file.write(response.content)
except KeyError:
img_url = picture.img.attrs['data-src']
response = requests.get(img_url)
file.write(response.content)
except FileExistsError:
print("The search keyword is same to a previously searched keyword. Therefore, deleting old files.")
for f in os.listdir(input_str):
os.remove(os.path.join(input_str, f))
for i, picture in enumerate(picture_element):
count = i
with open('{}/{}-{}.jpg'.format(input_str, input_str, i), 'wb') as file:
try:
img_url = picture.img.attrs['src']
response = requests.get(img_url)
file.write(response.content)
except KeyError:
img_url = picture.img.attrs['data-src']
response = requests.get(img_url)
file.write(response.content)
print(count, "new files are saved in the newly created folder") | 42.767442 | 322 | 0.633496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.287656 |
caa39db7232070832f9bd3b29c7411a08b85d2e9 | 980 | py | Python | slixmpp/__init__.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/__init__.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/__init__.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
import asyncio
# Required for python < 3.7 to use the old ssl implementation
# and manage to do starttls as an unintended side effect
asyncio.sslproto._is_sslproto_available = lambda: False
from slixmpp.stanza import Message, Presence, Iq
from slixmpp.jid import JID, InvalidJID
from slixmpp.xmlstream.stanzabase import ET, ElementBase, register_stanza_plugin
from slixmpp.xmlstream.handler import *
from slixmpp.xmlstream import XMLStream
from slixmpp.xmlstream.matcher import *
from slixmpp.xmlstream.asyncio import asyncio, future_wrapper
from slixmpp.basexmpp import BaseXMPP
from slixmpp.clientxmpp import ClientXMPP
from slixmpp.componentxmpp import ComponentXMPP
from slixmpp.version import __version__, __version_info__
| 33.793103 | 80 | 0.812245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.292857 |
caa3dd046c62cf7f7f653e5201f7bf879ec9032d | 164 | py | Python | model_wrappers/errors.py | SelfHacked/django-model-wrappers | 2aa0bb773d61c6b5c794126b1dc4f06d622ff079 | [
"MIT"
] | null | null | null | model_wrappers/errors.py | SelfHacked/django-model-wrappers | 2aa0bb773d61c6b5c794126b1dc4f06d622ff079 | [
"MIT"
] | null | null | null | model_wrappers/errors.py | SelfHacked/django-model-wrappers | 2aa0bb773d61c6b5c794126b1dc4f06d622ff079 | [
"MIT"
] | 1 | 2021-06-03T12:04:22.000Z | 2021-06-03T12:04:22.000Z | class FieldDoesNotExist(Exception):
def __init__(self, **kwargs):
super().__init__(f"{self.__class__.__name__}: {kwargs}")
self.kwargs = kwargs
| 32.8 | 64 | 0.664634 | 163 | 0.993902 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.231707 |
caa57ab541795d55d1fd7fa6b28f28f0dd754bf4 | 761 | py | Python | ois_api_client/v3_0/deserialization/deserialize_user_header.py | peterkulik/ois_api_client | 51dabcc9f920f89982c4419bb058f5a88193cee0 | [
"MIT"
] | 7 | 2020-10-22T08:15:29.000Z | 2022-01-27T07:59:39.000Z | ois_api_client/v3_0/deserialization/deserialize_user_header.py | peterkulik/ois_api_client | 51dabcc9f920f89982c4419bb058f5a88193cee0 | [
"MIT"
] | null | null | null | ois_api_client/v3_0/deserialization/deserialize_user_header.py | peterkulik/ois_api_client | 51dabcc9f920f89982c4419bb058f5a88193cee0 | [
"MIT"
] | null | null | null | from typing import Optional
import xml.etree.ElementTree as ET
from ...xml.XmlReader import XmlReader as XR
from ..namespaces import COMMON
from ..dto.UserHeader import UserHeader
from .deserialize_crypto import deserialize_crypto
def deserialize_user_header(element: ET.Element) -> Optional[UserHeader]:
if element is None:
return None
result = UserHeader(
login=XR.get_child_text(element, 'login', COMMON),
password_hash=deserialize_crypto(
XR.find_child(element, 'passwordHash', COMMON)
),
tax_number=XR.get_child_text(element, 'taxNumber', COMMON),
request_signature=deserialize_crypto(
XR.find_child(element, 'requestSignature', COMMON)
),
)
return result
| 30.44 | 73 | 0.70565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.065703 |
caa5955124e72ef0bcafe3a20466c885857de706 | 5,172 | py | Python | src/test.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | null | null | null | src/test.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | 1 | 2021-12-08T02:47:10.000Z | 2021-12-08T02:47:10.000Z | src/test.py | kevin3314/gcn_ppi | 39b0e618bbb592f9cb8d37edf28deeb7c0987dad | [
"MIT"
] | null | null | null | from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from sklearn.model_selection import KFold
from src.train import PrepareTmpFile
from src.utils import utils
log = utils.get_logger(__name__)
def test(config: DictConfig, datamodule: Optional[LightningDataModule] = None) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Init lightning datamodule
if datamodule is None:
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model_cls = utils._locate(config.model._target_)
checkpoint_path: Path = Path(config.work_dir) / config.load_checkpoint
model: LightningModule = model_cls.load_from_checkpoint(checkpoint_path)
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger, _convert_="partial")
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters!")
utils.log_hyperparameters(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Evaluate model on test set, using the best model achieved during training
log.info("Starting testing!")
result: List[Dict[str, float]] = trainer.test(model=model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
return result
def test_cv(config: OmegaConf, df: pd.DataFrame):
# Filter run
log.debug("Filtering")
log.debug(f"Length: {len(df)}")
for name, d in [("model", config.model), ("dataset", config.datamodule), ("trainer", config.trainer)]:
for k, v in d.items():
if len(df) == 1:
break
df = df[df[f"{name}_{k}"] == v]
log.debug(f"{name}_{k}={v}")
log.debug(f"Length: {len(df)}")
index = df.index
assert len(index) == 1
run_name = index[0]
log.info(f"Run name: {run_name}")
checkpoint_paths = df.filter(regex="^best_checkpoint")
result_dict = defaultdict(list)
# Load csv
df = pd.read_csv(config.datamodule.csv_path)
kf = KFold(n_splits=config["folds"], shuffle=True, random_state=config.seed)
datamodule_params = dict(config.datamodule)
datamodule_cls = utils._locate(datamodule_params.pop("_target_"))
datamodule_params.pop("csv_path") # remove csv_path from params
for i, (checkpoint_path, (train_idx, test_idx)) in enumerate(
zip(checkpoint_paths.values[0], kf.split(df)), start=1
):
log.info(f"Start {i}th fold out of {kf.n_splits} folds")
train_df = df.iloc[train_idx]
test_df = df.iloc[test_idx]
valid_df, test_df = np.array_split(test_df, 2)
log.info(checkpoint_path)
config.load_checkpoint = checkpoint_path
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
with PrepareTmpFile(train_df, valid_df, test_df) as (ft, fv, fe):
datamodule: LightningDataModule = datamodule_cls(ft.name, fv.name, fe.name, **datamodule_params)
result: List[Dict[str, float]] = test(config, datamodule)
print(result)
assert len(result) == 1
result = result[0]
for k, v in result.items():
result_dict[k].append(v)
utils.log_cv_result(run_name, config, result_dict)
| 35.424658 | 119 | 0.67208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,342 | 0.259474 |
caa5f1ac1b06136391a9d7d4dbe4508ef117d016 | 1,234 | py | Python | source_code/main.py | Sehannnnnnn/shortest-path | a815cbde365a58c1c4b3a0b60293f77dfc89f3df | [
"MIT"
] | null | null | null | source_code/main.py | Sehannnnnnn/shortest-path | a815cbde365a58c1c4b3a0b60293f77dfc89f3df | [
"MIT"
] | null | null | null | source_code/main.py | Sehannnnnnn/shortest-path | a815cbde365a58c1c4b3a0b60293f77dfc89f3df | [
"MIT"
] | null | null | null | from agent import Qnet
from agent import ReplayBuffer
from agent import train
q = Qnet()
q_target = Qnet()
q_target.load_state_dict(q.state_dict())
memory = ReplayBuffer()
print_interval = 20
score = 0.0
optimizer = optim.Adam(q.parameters(), lr=learning_rate)
score_history= []
for n_epi in range(3000):
epsilon = max(0.01, 0.08 - 0.01*(n_epi/200)) #Linear annealing from 8% to 1%
s = env.reset(random_init=True)
done = False
n_step =0
while not done:
n_step +=1
a = q.sample_action(torch.from_numpy(np.array(s)).float(), epsilon)
s_prime, r, done = env.transition(a)
done_mask = 0.0 if done else 1.0
memory.put((s,a,r,s_prime, done_mask))
score += r
if done:
break
s = s_prime
if memory.size()>2000:
train(q, q_target, memory, optimizer)
if n_epi%print_interval==0 and n_epi!=0:
q_target.load_state_dict(q.state_dict())
print("n_episode :{}, score : {:.1f}, n_buffer : {}, eps : {:.1f}%, n_step:{}".format(n_epi, score/print_interval, memory.size(), epsilon*100, n_step))
score_history.append(score/print_interval)
score = 0.0 | 31.641026 | 160 | 0.604538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.084279 |
caa6662e2795f3a211e91c2d833e2f7ae98a4ebc | 9,094 | py | Python | ide/tests/test_import_archive.py | Ramonrlb/cloudpebble | 20b5408724aa810ce1626552d9f1062f1094fc3b | [
"MIT"
] | 147 | 2015-01-11T04:33:10.000Z | 2021-08-12T18:22:52.000Z | ide/tests/test_import_archive.py | Ramonrlb/cloudpebble | 20b5408724aa810ce1626552d9f1062f1094fc3b | [
"MIT"
] | 155 | 2015-01-02T12:54:30.000Z | 2020-11-06T19:17:09.000Z | ide/tests/test_import_archive.py | gfunkmonk/cloudpebble-1 | c5b63483ac26ae0d60ac7ef1bf9e803400188e91 | [
"MIT"
] | 105 | 2015-01-01T21:04:36.000Z | 2021-01-22T22:10:38.000Z | """ These tests check basic operation of ide.tasks.archive.do_import_archive """
import mock
from django.core.exceptions import ValidationError
from ide.tasks.archive import do_import_archive, InvalidProjectArchiveException
from ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings
from ide.models.project import Project
from utils.fakes import FakeS3
__author__ = 'joe'
fake_s3 = FakeS3()
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportArchive(CloudpebbleTestCase):
def setUp(self):
self.login()
@staticmethod
def make_resource_spec(name='IMAGE_BLAH'):
return {
'resources': {
'media': [{
'file': 'images/blah.png',
'name': name,
'type': 'bitmap'
}]
}
}
def test_import_basic_bundle_with_appinfo(self):
""" Check that a minimal bundle imports without error """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo()
})
do_import_archive(self.project_id, bundle)
def test_throws_with_invalid_appinfo(self):
""" Check that appinfo validation is performed with a few invalid values """
invalid_things = [
('projectType', 'invalid'),
('sdkVersion', '1'),
('versionLabel', '01.0'),
]
for k, v in invalid_things:
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo({k: v})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_basic_bundle_with_npm_manifest(self):
""" Check that archives with package.json can be imported """
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={'name': 'myproject'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.app_long_name, 'test')
self.assertEqual(project.app_short_name, 'myproject')
def test_import_package_with_dependencies(self):
""" Check that dependencies in a package.json file are imported into the database """
deps = {
'some_package': '3.14.15',
'another': 'http://blah.com/package.git',
}
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': deps
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
actual_deps = {d.name: d.version for d in project.dependencies.all()}
self.assertDictEqual(actual_deps, deps)
def test_import_package_with_keywords(self):
""" Check that keywords in a package.json file are imported into the database """
keywords = ['pebbles', 'watch', 'bunnies']
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'keywords': keywords
})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(set(keywords), set(project.keywords))
def test_import_appinfo_with_resources(self):
""" Check that a resource can be imported in an appinfo.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'appinfo.json': make_appinfo(options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_import_package_with_resources(self):
""" Check that a resource can be imported in an package.json project """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec())
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')
def test_throws_with_local_file_dependencies(self):
""" Throw if any dependencies reference local files """
bad_versions = [
'file:security/breach',
'/security/breach',
'./security/breach',
'../security/breach',
'~/security/breach'
]
for version in bad_versions:
bundle = build_bundle({
'src/main.c': '',
'package.json': make_package(package_options={
'dependencies': {'some_package': version}
})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_throws_if_sdk2_project_has_array_appkeys(self):
""" Throw when trying to import an sdk 2 project with array appkeys """
bundle = build_bundle({
'src/main.c': '',
'appinfo.json': make_appinfo(options={'appKeys': [], 'sdkVersion': '2'})
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_invalid_resource_id(self):
""" Check that invalid characters are banned from resource IDs """
bundle = build_bundle({
'src/main.c': '',
'resources/images/blah.png': 'contents!',
'package.json': make_package(pebble_options=self.make_resource_spec("<>"))
})
with self.assertRaises(ValidationError):
do_import_archive(self.project_id, bundle)
def test_import_json_file(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/js/test.json': '{}',
'src/main.c': '',
'package.json': make_package()
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='test.json').count(), 1)
def test_import_rocky(self):
""" Check that json files are correctly imported """
bundle = build_bundle({
'src/rocky/index.js': '',
'src/common/lib.js': '',
'src/pkjs/app.js': '',
'package.json': make_package(pebble_options={'projectType': 'rocky'})
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertEqual(project.source_files.filter(file_name='index.js', target='app').count(), 1)
self.assertEqual(project.source_files.filter(file_name='lib.js', target='common').count(), 1)
self.assertEqual(project.source_files.filter(file_name='app.js', target='pkjs').count(), 1)
@mock.patch('ide.models.s3file.s3', fake_s3)
class TestImportLibrary(CloudpebbleTestCase):
def setUp(self):
self.login(type='package')
def test_import_basic_library(self):
""" Try importing a basic library """
bundle = build_bundle({
'include/my-lib.h': '',
'package.json': make_package(pebble_options={'projectType': 'package'}),
'src/c/my-lib.c': '',
'src/c/my-priv.h': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
files = {f.file_name: f for f in project.source_files.all()}
self.assertSetEqual(set(files.keys()), {'my-lib.h', 'my-lib.c', 'my-priv.h'})
self.assertEqual(files['my-lib.h'].target, 'public')
self.assertEqual(files['my-lib.c'].target, 'app')
self.assertEqual(files['my-priv.h'].target, 'app')
def test_import_library_with_resources(self):
""" Try importing a basic library with resources """
bundle = build_bundle({
'package.json': make_package(pebble_options={
'projectType': 'package',
'resources': {'media': [{
'type': 'bitmap',
'name': 'MY_RES1',
'file': 'res1.png'
}, {
'type': 'bitmap',
'name': 'MY_RES2',
'file': 'res2.png'
}]}
}),
'src/resources/res1.png': '',
'src/resources/res2.png': '',
})
do_import_archive(self.project_id, bundle)
project = Project.objects.get(pk=self.project_id)
self.assertSetEqual({f.file_name for f in project.resources.all()}, {'res1.png', 'res2.png'})
| 40.238938 | 119 | 0.591819 | 8,542 | 0.939301 | 0 | 0 | 8,632 | 0.949197 | 0 | 0 | 2,462 | 0.270728 |
caa7b5af1d7c721d26af5a7a4217743395012765 | 2,419 | py | Python | src/features/build_features.py | weasysolutions/skin-lesion-dataset-cookiecutter | f3788b273f9c0caca21867f4846d6efd3683f646 | [
"MIT"
] | null | null | null | src/features/build_features.py | weasysolutions/skin-lesion-dataset-cookiecutter | f3788b273f9c0caca21867f4846d6efd3683f646 | [
"MIT"
] | null | null | null | src/features/build_features.py | weasysolutions/skin-lesion-dataset-cookiecutter | f3788b273f9c0caca21867f4846d6efd3683f646 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
import os
import logging
import sys
import pandas as pd
import os, sys, inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"images_in_features_subdirs")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"non_duplicate_lesion_id")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"training_and_validation_sets")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
from images_in_features_subdirs import images_in_features_subdirs
from non_duplicate_lesion_id import non_duplicate_lesion_id
from training_and_validation_sets import training_and_validation_sets
def build_features(**kwargs):
metadata_csv = kwargs['metadata_csv']
train_dir = kwargs['train_dir']
images_dir = kwargs['images_dir']
val_dir = kwargs['val_dir']
"""
Takes data in ../data/interim. Splits training and validation data.
Stores splitted sets in ../data/processed/base_dir/train_dir
and ../data/processed/base_dir/val_dir
"""
#load meta-data-set
df = pd.read_csv(metadata_csv)
#Create non_duplicate column.
# Make a new df with unique_ids
df, df_unique_id = non_duplicate_lesion_id(df)
#split in training and validation dataframes
df, df_train, df_val = training_and_validation_sets(df,df_unique_id)
#place images in named attributes directories
images_in_features_subdirs(df,
images_dir = images_dir,
train_dir = train_dir,
val_dir = val_dir \
)
logger = logging.getLogger(__name__)
logger.info('Features added. Data is ready for modelling.')
if __name__ == '__main__':
import json
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
data=json.loads(argv[1])
build_features(data) | 32.253333 | 155 | 0.667218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.259611 |
caaa9bfd17d71dc9c4b5c2733db7f41b398c87ca | 5,897 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/hsa/hlc/hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/hsa/hlc/hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 1 | 2017-12-21T23:31:59.000Z | 2017-12-29T16:56:05.000Z | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/hsa/hlc/hlc.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | # A temporary wrapper to connect to the HLC LLVM binaries.
# Currently, connect to commandline interface.
from __future__ import print_function, absolute_import
import sys
from subprocess import check_call
import tempfile
import os
from collections import namedtuple
from numba import config
from .utils import adapt_llvm_version
from .config import BUILTIN_PATH
_real_check_call = check_call
def check_call(*args, **kwargs):
print('CMD: ' + ';'.join(args), file=sys.stdout)
return _real_check_call(*args, **kwargs)
class CmdLine(object):
CMD_OPT = ("$HSAILBIN/opt "
"-O3 "
# "-gpu "
# "-whole "
"-verify "
"-S "
"-o {fout} "
"{fin}")
CMD_VERIFY = ("$HSAILBIN/opt "
"-verify "
"-S "
"-o {fout} "
"{fin}")
CMD_GEN_HSAIL = ("$HSAILBIN/llc -O2 "
"-march=hsail64 "
"-filetype=asm "
"-o {fout} "
"{fin}")
CMD_GEN_BRIG = ("$HSAILBIN/llc -O2 "
"-march=hsail64 "
"-filetype=obj "
"-o {fout} "
"{fin}")
CMD_LINK_BUILTINS = ("$HSAILBIN/llvm-link "
# "-prelink-opt "
"-S "
"-o {fout} "
"{fin} "
"{lib}")
CMD_LINK_LIBS = ("$HSAILBIN/llvm-link "
# "-prelink-opt "
"-S "
"-o {fout} "
"{fin} ")
def verify(self, ipath, opath):
check_call(self.CMD_VERIFY.format(fout=opath, fin=ipath), shell=True)
def optimize(self, ipath, opath):
check_call(self.CMD_OPT.format(fout=opath, fin=ipath), shell=True)
def generate_hsail(self, ipath, opath):
check_call(self.CMD_GEN_HSAIL.format(fout=opath, fin=ipath), shell=True)
def generate_brig(self, ipath, opath):
check_call(self.CMD_GEN_BRIG.format(fout=opath, fin=ipath), shell=True)
def link_builtins(self, ipath, opath):
cmd = self.CMD_LINK_BUILTINS.format(fout=opath, fin=ipath,
lib=BUILTIN_PATH)
check_call(cmd, shell=True)
def link_libs(self, ipath, libpaths, opath):
cmdline = self.CMD_LINK_LIBS.format(fout=opath, fin=ipath)
cmdline += ' '.join(["{0}".format(lib) for lib in libpaths])
check_call(cmdline, shell=True)
class Module(object):
def __init__(self):
"""
Setup
"""
self._tmpdir = tempfile.mkdtemp()
self._tempfiles = []
self._linkfiles = []
self._cmd = CmdLine()
self._finalized = False
def __del__(self):
return
self.close()
def close(self):
# Remove all temporary files
for afile in self._tempfiles:
os.unlink(afile)
# Remove directory
os.rmdir(self._tmpdir)
def _create_temp_file(self, name, mode='wb'):
path = self._track_temp_file(name)
fobj = open(path, mode=mode)
return fobj, path
def _track_temp_file(self, name):
path = os.path.join(self._tmpdir,
"{0}-{1}".format(len(self._tempfiles), name))
self._tempfiles.append(path)
return path
def _preprocess(self, llvmir):
return adapt_llvm_version(llvmir)
def load_llvm(self, llvmir):
"""
Load LLVM with HSAIL SPIR spec
"""
# Preprocess LLVM IR
# Because HLC does not handle dot in LLVM variable names
llvmir = self._preprocess(llvmir)
# Create temp file to store the input file
tmp_llvm_ir, fin = self._create_temp_file("dump-llvm-ir")
with tmp_llvm_ir:
tmp_llvm_ir.write(llvmir.encode('ascii'))
# Create temp file for optimization
fout = self._track_temp_file("verified-llvm-ir")
self._cmd.verify(ipath=fin, opath=fout)
if config.DUMP_OPTIMIZED:
with open(fout, 'rb') as fin_opt:
print(fin_opt.read().decode('ascii'))
self._linkfiles.append(fout)
def finalize(self):
"""
Finalize module and return the HSAIL code
"""
assert not self._finalized, "Module finalized already"
# Link dependencies libraries
llvmfile = self._linkfiles[0]
pre_builtin_path = self._track_temp_file("link-dep")
libpaths = self._linkfiles[1:]
self._cmd.link_libs(ipath=llvmfile, libpaths=libpaths,
opath=pre_builtin_path)
# Link library with the builtin modules
linked_path = self._track_temp_file("linked-path")
self._cmd.link_builtins(ipath=pre_builtin_path, opath=linked_path)
# Optimize
opt_path = self._track_temp_file("optimized-llvm-ir")
self._cmd.optimize(ipath=linked_path, opath=opt_path)
if config.DUMP_OPTIMIZED:
with open(opt_path, 'rb') as fin:
print(fin.read().decode('ascii'))
# Finalize the llvm to HSAIL
hsail_path = self._track_temp_file("finalized-hsail")
self._cmd.generate_hsail(ipath=opt_path, opath=hsail_path)
# Finalize the llvm to BRIG
brig_path = self._track_temp_file("finalized-brig")
self._cmd.generate_brig(ipath=opt_path, opath=brig_path)
self._finalized = True
# Read HSAIL
with open(hsail_path, 'rb') as fin:
hsail = fin.read().decode('ascii')
# Read BRIG
with open(brig_path, 'rb') as fin:
brig = fin.read()
if config.DUMP_ASSEMBLY:
print(hsail)
return namedtuple('FinalizerResult', ['hsail', 'brig'])(hsail, brig)
| 30.713542 | 80 | 0.559267 | 5,363 | 0.909445 | 0 | 0 | 0 | 0 | 0 | 0 | 1,250 | 0.211972 |
caad98cebdae9619248fd033d10604a27d5863a1 | 5,851 | py | Python | lensit/ffs_iterators/bfgs.py | Sebastian-Belkner/LensIt | 3e746ceeaa53b2845af31cc8372cd897e34ad53f | [
"MIT"
] | null | null | null | lensit/ffs_iterators/bfgs.py | Sebastian-Belkner/LensIt | 3e746ceeaa53b2845af31cc8372cd897e34ad53f | [
"MIT"
] | null | null | null | lensit/ffs_iterators/bfgs.py | Sebastian-Belkner/LensIt | 3e746ceeaa53b2845af31cc8372cd897e34ad53f | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import os
class BFGS_Hessian(object):
"""
Class to evaluate the update to inverse Hessian matrix in the L-BFGS scheme.
(see wikipedia article if nothing else).
H is B^-1 form that article.
B_k+1 = B + yy^t / (y^ts) - B s s^t B / (s^t Bk s)) (all k on the RHS)
H_k+1 = (1 - sy^t / (y^t s) ) H (1 - ys^t / (y^ts))) + ss^t / (y^t s).
Determinant of B:
ln det Bk+1 = ln det Bk + ln( s^ty / s^t B s).
For quasi Newton, s_k = x_k1 - x_k = - alpha_k Hk grad_k with alpha_k newton step-length.
--> s^t B s at k is alpha_k^2 g_k H g_k
s^t y is - alpha_k (g_k+1 - g_k) H g_k
This leads to ln|B_k + 1| = ln |B_k| + ln(1 - 1/alpha_k g_k+1 H g_k / (gk H gk))
"""
def __init__(self, lib_dir, apply_H0k, paths2ys, paths2ss, L=100000, apply_B0k=None, verbose=True):
"""
:param apply_H0k: user supplied function(x,k), applying a zeroth order estimate of the inverse Hessian to x at
iter k.
:param paths2ys: dictionary of paths to the y vectors. y_k = grad_k+1 - grad_k
:param paths2ss: dictionary of paths to the s vectors. s_k = x_k+1 - xk_k
:return:
H is inverse Hessian, not Hessian.
"""
self.lib_dir = lib_dir
self.paths2ys = paths2ys
self.paths2ss = paths2ss
self.L = L
self.applyH0k = apply_H0k
self.applyB0k = apply_B0k
self.verbose = verbose
def y(self, n):
return np.load(self.paths2ys[n], mmap_mode='r')
def s(self, n):
return np.load(self.paths2ss[n], mmap_mode='r')
def add_ys(self, path2y, path2s, k):
assert os.path.exists(path2y), path2y
assert os.path.exists(path2s), path2s
self.paths2ys[k] = path2y
self.paths2ss[k] = path2s
if self.verbose:
print('Linked y vector ', path2y, ' to Hessian')
print('Linked s vector ', path2s, ' to Hessian')
def _save_alpha(self, alpha, i):
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
np.save(fname, alpha)
return
def _load_alpha(self, i):
"""
Loads, and remove, alpha from disk.
:param i:
:return:
"""
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
assert os.path.exists(fname)
ret = np.load(fname)
os.remove(fname)
return ret
def applyH(self, x, k, _depth=0):
"""
Recursive calculation of H_k x, for any x.
This uses the product form update H_new = (1 - rho s y^t) H (1 - rho y s^t) + rho ss^t
:param x: vector to apply the inverse Hessian to
:param k: iter level. Output is H_k x.
:param _depth : internal, for internal bookkeeping.
:return:
"""
if k <= 0 or _depth >= self.L or self.L == 0: return self.applyH0k(x, k)
s = self.s(k - 1)
y = self.y(k - 1)
rho = 1. / np.sum(s * y)
Hv = self.applyH(x - rho * y * np.sum(x * s), k - 1, _depth=_depth + 1)
return Hv - s * (rho * np.sum(y * Hv)) + rho * s * np.sum(s * x)
def get_gk(self, k, alpha_k0):
"""
Reconstruct gradient at xk, given the first newton step length at step max(0,k-L)
! this is very badly behaved numerically.
"""
assert self.applyB0k is not None
ret = -self.applyB0k(self.s(max(0, k - self.L)),max(0,k-self.L)) / alpha_k0
for j in range(max(0, k - self.L), k):
ret += self.y(j)
return ret
def get_sBs(self, k, alpha_k, alpha_k0):
"""
Reconstruct s^Bs at x_k, given the first newton step length at step max(0,k-L) and current step alpha_k.
"""
return - alpha_k * np.sum(self.s(k) * self.get_gk(k, alpha_k0))
def get_lndet_update(self, k, alpha_k, alpha_k0):
"""
Return update to B log determinant, lndet B_k+1 = lndet B_k + output.
"""
return np.log(np.sum(self.y(k) * self.s(k)) / self.get_sBs(k, alpha_k, alpha_k0))
def get_mHkgk(self, gk, k, output_fname=None):
"""
Obtains - H_k g_k with L-BFGS two-loop recursion.
:param gk: grad f(x_k)
:param k: iterate index
:return: - H_k g_k according to L-BFGS.
If output_fname is set then output is saved in file and nothing is returned.
Should be fine with k == 0
"""
q = gk.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
for i in range(k - 1, np.max([-1, k - self.L - 1]), -1):
alpha_i = rho(i) * np.sum(self.s(i) * q)
q -= alpha_i * self.y(i)
self._save_alpha(alpha_i, i)
r = self.applyH0k(q, k)
for i in range(np.max([0, k - self.L]), k):
beta = rho(i) * np.sum(self.y(i) * r)
r += self.s(i) * (self._load_alpha(i) - beta)
if output_fname is None: return -r
np.save(output_fname, -r)
return
def sample_Gaussian(self, k, x_0, rng_state=None):
"""
sample from a MV zero-mean Gaussian with covariance matrix H, at iteration level k,
given input x_0 random vector with covariance H_0.
Since H is the inverse Hessian, then H is roughly the covariance matrix of the parameters in a line search.
:param k:
:param x_0:
:return:
"""
ret = x_0.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
if rng_state is not None: np.random.set_state(rng_state)
eps = np.random.standard_normal((len(range(np.max([0, k - self.L]), k)), 1))
for idx, i in enumerate(range(np.max([0, k - self.L]), k)):
ret = ret - self.s(i) * np.sum(self.y(i) * ret) * rho(i) + np.sqrt(rho(i)) * self.s(i) * eps[idx]
return ret
| 38.24183 | 118 | 0.56093 | 5,780 | 0.987865 | 0 | 0 | 0 | 0 | 0 | 0 | 2,620 | 0.447787 |
caaf2c40188789d0575a2ef98deb50c6eded3c8b | 464 | py | Python | annotations/migrations/0015_organization_description.py | alexliyihao/auto-annotation-web | 391bd2c4a8ea1d2d3aba92a13cd7c41dd77a609d | [
"MIT"
] | 1 | 2021-11-17T15:34:33.000Z | 2021-11-17T15:34:33.000Z | annotations/migrations/0015_organization_description.py | alexliyihao/auto-annotation-web | 391bd2c4a8ea1d2d3aba92a13cd7c41dd77a609d | [
"MIT"
] | null | null | null | annotations/migrations/0015_organization_description.py | alexliyihao/auto-annotation-web | 391bd2c4a8ea1d2d3aba92a13cd7c41dd77a609d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-11 02:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotations', '0014_auto_20211110_2007'),
]
operations = [
migrations.AddField(
model_name='organization',
name='description',
field=models.CharField(default='test description', max_length=400),
preserve_default=False,
),
]
| 23.2 | 79 | 0.62069 | 371 | 0.799569 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.280172 |
caaf463e68f987c64b55b2231633fe5be3a6307b | 5,536 | py | Python | paper/figures/make_photometry_table.py | abostroem/asassn15oz | ade090096b61b155c86108d1945bb7b4522365b8 | [
"BSD-3-Clause"
] | null | null | null | paper/figures/make_photometry_table.py | abostroem/asassn15oz | ade090096b61b155c86108d1945bb7b4522365b8 | [
"BSD-3-Clause"
] | 3 | 2019-02-24T23:24:33.000Z | 2019-02-24T23:25:12.000Z | paper/figures/make_photometry_table.py | abostroem/asassn15oz | ade090096b61b155c86108d1945bb7b4522365b8 | [
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Creates a table of all imaging observations in the database for paper:
# * lc_obs.tex
# In[1]:
import numpy as np
from astropy import table
from astropy.table import Table
from astropy.time import Time
from utilities_az import supernova, connect_to_sndavis
# In[2]:
db, cursor = connect_to_sndavis.get_cursor()
# In[3]:
sn15oz = supernova.LightCurve2('asassn-15oz')
# In[4]:
query_str = '''
SELECT DISTINCT mag, magerr, BINARY(filter), jd, source
FROM photometry
WHERE targetid = 322
ORDER BY jd'''
# In[5]:
cursor.execute(query_str)
results = cursor.fetchall()
# In[6]:
loc_dict = {
1: {'short':'OGG 2m', 'long': 'Haleakala Observatory - 2m'}, #ogg2m001-fs02 2m0
2: {'short':'COJ 2m', 'long': 'Siding Springs Observatory - 2m'}, #coj2m002-fs03
3: {'short':'COJ 1m', 'long': 'Siding Springs Observatory - 1m'}, #coj1m003-kb71
4: {'short':'LSC 1m', 'long': 'CTIO - Region IV'}, #lsc1m004-kb77 1m0
5: {'short':'LSC 1m', 'long': 'CTIO - Region IV'}, #lsc1m005-kb78 1m0
8: {'short':'ELP 1m', 'long': 'McDonald Observatory - 1m'},#elp1m008-kb74 1m0
9: {'short':'LSC 1m', 'long': 'CTIO - Region IV'}, #lsc1m009-fl03 1m0
10: {'short': 'CPT 1m', 'long': 'SAAO - Sutherland Facilities - 1m'}, #cpt1m010
11: {'short': 'COJ 1m', 'long': 'Siding Springs Observatory - 1m'}, #coj1m011-kb05 1m0
12: {'short': 'CPT 1m', 'long': 'SAAO - Sutherland Facilities - 1m'}, #cpt1m012-kb75 1m0
13: {'short': 'CPT 1m', 'long': 'SAAO - Sutherland Facilities - 1m '},#cpt1m013-kb76 1m0
88: {'short': 'Swift', 'long': 'Swift'}, #Swift
-88: {'short': 'Swift', 'long': 'Swift'}} #Swift; non-detections in the DB have negative sources
# In[7]:
band = []
jd = []
date = []
mag = []
mag_err = []
source = []
phase = []
for iresult in results:
#rename the Swift filters to their proper names
ifilter = iresult['BINARY(filter)']
if ifilter in [b'us', b'bs', b'vs']:
band.append(ifilter.decode('utf-8')[0])
elif ifilter in [b'uw1', b'uw2', b'um2']:
band.append('uv'+ifilter.decode('utf-8')[1:])
else:
band.append(ifilter)
jd.append(iresult['jd'])
mag.append(iresult['mag'])
mag_err.append(iresult['magerr'])
if ifilter in [b'J', b'H', b'K']:
source.append('NTT')
else:
source.append(loc_dict[iresult['source']]['short'])
date.append(Time(iresult['jd'], format='jd', out_subfmt='date').iso)
phase.append((Time(iresult['jd'], format='jd') - Time(sn15oz.jdexpl, format='jd')).value)
tbdata = Table([date, jd, phase, mag, mag_err, band, source],
names=['Date-Obs','JD', 'Phase (Day)',
'Apparent Magnitude',
'Apparent Magnitude Error',
'Filter',
'Source'])
tbdata.sort(keys=['JD', 'Filter'])
# In[8]:
#tbdata.write('../lc_obs.tex', format='aastex',
# formats={'JD':'%8.2f',
# 'Phase (Day)':'%4.2f',
# 'Apparent Magnitude':'%2.2f',
# 'Apparent Magnitude Error': '%1.2f'}, overwrite=True,
# latexdict={'preamble':r'\centering',
# 'caption':r'Imaging Observations of ASASSN-15oz.\label{tab:LcObs}',
# 'data_start':r'\hline'})
# In[9]:
tbdata_short = tbdata[0:5].copy()
tbdata_short.write('../lc_obs_short.tex', format='latex',
formats={'JD':'%8.2f',
'Phase (Day)':'%4.2f',
'Apparent Magnitude':'%2.2f',
'Apparent Magnitude Error': '%1.2f'}, overwrite=True,
latexdict={'preamble':r'\centering',
'caption':r'Sample of Imaging Observations of ASASSN-15oz. Full table available on-line.\label{tab:LcObs}',
'data_start':r'\hline',
'data_end':r'\hline',
'header_start':r'\hline',
'tabletype': 'table*'})
# In[10]:
#tbdata.write('../lc_obs.dat', overwrite=True, format='ascii')
#ofile = open('../lc_obs.dat', 'r')
#all_lines = ofile.readlines()
#ofile.close()
#header = '''#Photometric observations of ASASSN-15oz.
##Columns:
##Date-Obs: (str) Human readable date of observation
##JD: (float) Julian Date of observation
##Phase: (float) Days since explosion, where explosion is defined as {}
##Apparent Magnitude: (float)
##Apparent Magntidue Error: (float)
##Filter: (str) Filter used for observation
##Source: (str) Observatory used to take the data. OGG, COJ, LSC, ELP, and CPT are Las Cumbres Observatory Telescopes.\n
#'''.format(sn15oz.jdexpl)
#ofile = open('../asassn15oz_lc_obs.dat', 'w')
#ofile.write(header)
#for iline in all_lines[1:]:
# ofile.write(iline)
#ofile.close()
# In[11]:
tbdata.write('../lc_obs.csv', overwrite=True)
ofile = open('../lc_obs.csv', 'r')
all_lines = ofile.readlines()
ofile.close()
header = '''#Photometric observations of ASASSN-15oz.
#Columns:
#Date-Obs: (str) Human readable date of observation
#JD: (float) Julian Date of observation
#Phase: (float) Days since explosion, where explosion is defined as {}
#Apparent Magnitude: (float)
#Apparent Magntidue Error: (float)
#Filter: (str) Filter used for observation
#Source: (str) Observatory used to take the data. OGG, COJ, LSC, ELP, and CPT are Las Cumbres Observatory Telescopes.\n
'''.format(sn15oz.jdexpl)
ofile = open('../asassn15oz_lc_obs.csv', 'w')
ofile.write(header)
for iline in all_lines[1:]:
ofile.write(iline)
ofile.close()
| 27.405941 | 130 | 0.604408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,489 | 0.630238 |
caaf9b2d174b67f74ceb1ce325fcdf36e83f2b9b | 632 | py | Python | flask_resources/parsers/__init__.py | inveniosoftware/flask-resources | f9d926a959f392b66811a10ee8faaa2f77e6d528 | [
"MIT"
] | 2 | 2021-09-04T22:36:26.000Z | 2021-12-06T22:02:37.000Z | flask_resources/parsers/__init__.py | inveniosoftware/flask-resources | f9d926a959f392b66811a10ee8faaa2f77e6d528 | [
"MIT"
] | 62 | 2020-06-09T09:09:28.000Z | 2021-03-31T16:32:51.000Z | flask_resources/parsers/__init__.py | inveniosoftware/flask-resources | f9d926a959f392b66811a10ee8faaa2f77e6d528 | [
"MIT"
] | 6 | 2020-04-28T08:23:55.000Z | 2021-04-09T07:41:15.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Request parser for the body, headers, query string and view args."""
from .base import RequestParser
from .body import RequestBodyParser
from .decorators import request_body_parser, request_parser
from .schema import MultiDictSchema
__all__ = (
"MultiDictSchema",
"request_body_parser",
"request_parser",
"RequestBodyParser",
"RequestParser",
)
| 27.478261 | 76 | 0.740506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.653481 |
caaff8629778159017275b2997472982be0e80e4 | 3,253 | py | Python | analyze_tagged_corpus.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | 1 | 2021-10-08T11:40:09.000Z | 2021-10-08T11:40:09.000Z | analyze_tagged_corpus.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | null | null | null | analyze_tagged_corpus.py | kevincobain2000/nltk-trainer | 5c0b53ccecb7a5042d5af6c4325e134f7d83cb45 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import argparse
import nltk.corpus
from nltk.corpus.util import LazyCorpusLoader
from nltk.probability import FreqDist
from nltk.tag.simplify import simplify_wsj_tag
from nltk_trainer import load_corpus_reader
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Analyze a part-of-speech tagged corpus',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('corpus',
help='''The name of a tagged corpus included with NLTK, such as treebank,
brown, cess_esp, floresta, or the root path to a corpus directory,
which can be either an absolute path or relative to a nltk_data directory.''')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to %(default)d. 0 is no trace output.')
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader', default=None,
help='''Full module path to a corpus reader class, such as
nltk.corpus.reader.tagged.TaggedCorpusReader''')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
corpus_group.add_argument('--simplify_tags', action='store_true', default=False,
help='Use simplified tags')
sort_group = parser.add_argument_group('Tag Count Sorting Options')
sort_group.add_argument('--sort', default='tag', choices=['tag', 'count'],
help='Sort key, defaults to %(default)s')
sort_group.add_argument('--reverse', action='store_true', default=False,
help='Sort in revere order')
args = parser.parse_args()
###################
## corpus reader ##
###################
tagged_corpus = load_corpus_reader(args.corpus, reader=args.reader, fileids=args.fileids)
if not tagged_corpus:
raise ValueError('%s is an unknown corpus')
if args.trace:
print 'loading %s' % args.corpus
##############
## counting ##
##############
wc = 0
tag_counts = FreqDist()
taglen = 7
word_set = set()
if args.simplify_tags and args.corpus not in ['conll2000', 'switchboard']:
kwargs = {'simplify_tags': True}
else:
kwargs = {}
for word, tag in tagged_corpus.tagged_words(fileids=args.fileids, **kwargs):
if len(tag) > taglen:
taglen = len(tag)
if args.corpus in ['conll2000', 'switchboard'] and args.simplify_tags:
tag = simplify_wsj_tag(tag)
wc += 1
# loading corpora/treebank/tagged with ChunkedCorpusReader produces None tags
if not isinstance(tag, basestring): tag = str(tag)
tag_counts.inc(tag)
word_set.add(word)
############
## output ##
############
print '%d total words\n%d unique words\n%d tags\n' % (wc, len(word_set), len(tag_counts))
if args.sort == 'tag':
sort_key = lambda (t, c): t
elif args.sort == 'count':
sort_key = lambda (t, c): c
else:
raise ValueError('%s is not a valid sort option' % args.sort)
countlen = max(len(str(tag_counts[tag_counts.max()])) + 2, 9)
# simple reSt table format
print ' '.join(['Tag'.center(taglen), 'Count'.center(countlen)])
print ' '.join(['='*taglen, '='*(countlen)])
for tag, count in sorted(tag_counts.items(), key=sort_key, reverse=args.reverse):
print ' '.join([tag.ljust(taglen), str(count).rjust(countlen)])
print ' '.join(['='*taglen, '='*(countlen)]) | 32.53 | 89 | 0.694436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.40455 |
cab0e7355c94993a3d9aee2b860a711d2c4abfa3 | 5,562 | py | Python | service/example_config.py | w1kke/example-service | 4c845796ae314f18b886bd62dcf42f75a40601d0 | [
"MIT"
] | null | null | null | service/example_config.py | w1kke/example-service | 4c845796ae314f18b886bd62dcf42f75a40601d0 | [
"MIT"
] | null | null | null | service/example_config.py | w1kke/example-service | 4c845796ae314f18b886bd62dcf42f75a40601d0 | [
"MIT"
] | null | null | null | # Copyright 2018 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import sys
from squid_py import Config
def get_variable_value(variable):
if os.getenv(variable) is None:
logging.error(f'you should provide a {variable}')
sys.exit(1)
else:
return os.getenv(variable)
class ExampleConfig:
_local_aqua_url = "http://172.15.0.15:5000"
_local_brizo_url = "http://localhost:8030"
_duero_aqua_url = "https://aquarius.compute.duero.dev-ocean.com"
_duero_brizo_url = "https://brizo.compute.duero.dev-ocean.com"
# _duero_aqua_url = "http://localhost:5000"
# _duero_brizo_url = "http://localhost:8030"
_pacific_aqua_url = "https://aquarius.pacific.dev-ocean.com"
_pacific_brizo_url = "https://brizo.pacific.dev-ocean.com"
# _nile_aqua_url = "http://172.15.0.15:5000"
# _nile_aqua_url = "https://nginx-aquarius.dev-ocean.com"
# _nile_brizo_url = "https://nginx-brizo.dev-ocean.com"
# _nile_aqua_url = "https://nginx-aquarius.dev-ocean.com"
_nile_aqua_url = "https://aquarius.marketplace.dev-ocean.com"
# _nile_aqua_url = "http://172.15.0.15:5000"
_nile_brizo_url = "https://brizo.marketplace.dev-ocean.com"
# _nile_brizo_url = "http://localhost:8030"
_duero_secret_store_url = "https://secret-store.duero.dev-ocean.com"
_nile_secret_store_url = "https://secret-store.dev-ocean.com"
_pacific_secret_store_url = "https://secret-store.pacific.oceanprotocol.com"
# _nile_secret_store_url = "https://secret-store.marketplace.dev-ocean.com"
_kovan_keeper_url = "http://localhost:8545"
_remote_keeper_url = "https://%s.dev-ocean.com"
_parity_url = "http://localhost:8545"
_net_to_services_url = {
'duero': {'aquarius': _duero_aqua_url, 'brizo': _duero_brizo_url},
'nile': {'aquarius': _nile_aqua_url, 'brizo': _nile_brizo_url},
'kovan': {'aquarius': _local_aqua_url, 'brizo': _local_brizo_url},
'pacific': {'aquarius': _pacific_aqua_url, 'brizo': _pacific_brizo_url},
}
_net_name_map = {
'duero': 'duero',
'duero_local': 'duero',
'nile': 'nile',
'nile_local': 'nile',
'kovan': 'kovan',
'kovan_local': 'kovan',
'pacific': 'pacific',
'pacific_local': 'pacific'
}
_net_to_env_name = {
'nile': 'TEST_NILE',
'nile_local': 'TEST_LOCAL_NILE',
'duero': 'TEST_DUERO',
'duero_local': 'TEST_LOCAL_DUERO',
'spree': 'TEST_LOCAL_SPREE',
'kovan': 'TEST_KOVAN',
'kovan_local': 'TEST_LOCAL_KOVAN',
'pacific': 'TEST_PACIFIC',
'pacific_local': 'TEST_LOCAL_PACIFIC'
}
@staticmethod
def get_config_net():
return os.environ.get('TEST_NET', 'spree')
@staticmethod
def get_env_name():
net = ExampleConfig.get_config_net()
return ExampleConfig._net_to_env_name.get(net)
@staticmethod
def get_base_config():
config = {
"keeper-contracts": {
"keeper.url": "http://localhost:8545",
"keeper.path": "artifacts",
"secret_store.url": "http://localhost:12001",
"parity.url": "http://localhost:8545",
},
"resources": {
"aquarius.url": "http://172.15.0.15:5000",
# "aquarius.url": "http://localhost:5000",
# "brizo.url": "http://172.15.0.17:8030",
"brizo.url": "http://localhost:8030",
"storage.path": "squid_py.db",
"downloads.path": "consume-downloads"
}
}
return config
@staticmethod
def _get_config(local_node=True, net_key=''):
config = ExampleConfig.get_base_config()
net_name = ExampleConfig._net_name_map.get(net_key)
if net_name == 'kovan':
config['keeper-contracts']['keeper.url'] = ExampleConfig._kovan_keeper_url
elif net_name == 'pacific':
config['keeper-contracts']['keeper.url'] = 'https://pacific.oceanprotocol.com'
config['keeper-contracts']['parity.url'] = 'https://pacific.oceanprotocol.com'
elif not local_node:
config['keeper-contracts']['keeper.url'] = ExampleConfig._remote_keeper_url % net_name
config['keeper-contracts']['parity.url'] = ExampleConfig._remote_keeper_url % net_name
if net_name:
config['keeper-contracts']['secret_store.url'] = \
ExampleConfig._duero_secret_store_url if net_name == 'duero' \
else ExampleConfig._nile_secret_store_url
service_url = ExampleConfig._net_to_services_url[net_name]
config['resources']['aquarius.url'] = service_url['aquarius']
config['resources']['brizo.url'] = service_url['brizo']
# parity_url maybe different than the keeper_url
# config['keeper-contracts']['parity.url'] = ExampleConfig._parity_url
return config
@staticmethod
def get_config_dict():
test_net = ExampleConfig.get_config_net()
local_node = not test_net or test_net in (
'nile_local', 'duero_local', 'spree', 'kovan_local')
config_dict = ExampleConfig._get_config(local_node, test_net)
return config_dict
@staticmethod
def get_config():
logging.debug("Configuration loaded for environment '{}'"
.format(ExampleConfig.get_config_net()))
return Config(options_dict=ExampleConfig.get_config_dict())
| 38.895105 | 98 | 0.627652 | 5,215 | 0.937612 | 0 | 0 | 2,828 | 0.50845 | 0 | 0 | 2,491 | 0.44786 |
cab12a2e4b4e5bc1f20b3d34222016965f6e7990 | 352 | py | Python | 02_Variables/Variable types/tests.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | 02_Variables/Variable types/tests.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | 02_Variables/Variable types/tests.py | dannymeijer/level-up-with-python | 1bd1169aafd0fdc124984c30edc7f0153626cf06 | [
"MIT"
] | null | null | null | from lessons.test_helper import run_common_tests, get_answer_placeholders, passed, failed
def test_type_used():
window = get_answer_placeholders()[0]
if "type" in window and "float_number" in window:
passed()
else:
failed("Use the type() function")
if __name__ == '__main__':
run_common_tests()
test_type_used()
| 23.466667 | 89 | 0.696023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.15625 |
cab19f6521b5afec4f623e6c04e0f97d6d1dbb36 | 579 | py | Python | rssexample.py | nhtnhan/CMPUT404-Web-Mining | ed9d847a1e3636b9d40c3a29e80aae44b97a0247 | [
"MIT"
] | 1 | 2016-04-01T19:18:16.000Z | 2016-04-01T19:18:16.000Z | rssexample.py | nhtnhan/CMPUT404-Web-Mining | ed9d847a1e3636b9d40c3a29e80aae44b97a0247 | [
"MIT"
] | null | null | null | rssexample.py | nhtnhan/CMPUT404-Web-Mining | ed9d847a1e3636b9d40c3a29e80aae44b97a0247 | [
"MIT"
] | 3 | 2015-02-14T18:11:50.000Z | 2021-04-18T06:48:07.000Z | import feedparser
import difflib
import json
cbc = feedparser.parse("http://rss.cbc.ca/lineup/topstories.xml")
print(json.dumps(cbc))
print("\n\n################################################\n\n")
cnn = feedparser.parse("http://rss.cnn.com/rss/cnn_topstories.rss")
print(json.dumps(cnn))
print("\n\n################################################\n\n")
cbc_titles = [x['title'] for x in cbc.get('entries')]
cnn_titles = [x['title'] for x in cnn.get('entries')]
res = [(x,difflib.get_close_matches(x,cbc_titles,1,0.01)) for x in
cnn_titles]
print(json.dumps(res))
| 38.6 | 67 | 0.582038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.400691 |
cab1bebb90bfc4565d2855b792d6d2d579ca6ab9 | 3,271 | py | Python | dump_to_json.py | saqib-nadeem/sample_python_scripts | 35054816b262a3860ab9db383ab5d9e11ac0ce87 | [
"MIT"
] | null | null | null | dump_to_json.py | saqib-nadeem/sample_python_scripts | 35054816b262a3860ab9db383ab5d9e11ac0ce87 | [
"MIT"
] | null | null | null | dump_to_json.py | saqib-nadeem/sample_python_scripts | 35054816b262a3860ab9db383ab5d9e11ac0ce87 | [
"MIT"
] | null | null | null | """
Usage Example:
cat imesh_sample.txt | python dump_to_json.py -o imesh.json -e imesh_hashes.json
"""
import sys
import json
import argparse
import traceback
from os.path import dirname, abspath
project_folder = dirname(dirname(abspath('.')))
if project_folder not in sys.path:
sys.path.append(project_folder)
from breaches.lib.data_record import ValidationError
def eprint(*args, **kwargs): # pylint: disable=w0621
print(*args, file=sys.stderr, **kwargs)
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_filename", help="File to write json data info for breaches index")
parser.add_argument("-e", "--hash_filename",
help="File to write json data info for hashes index")
args = parser.parse_args()
if args.output_filename is None:
parser.print_help()
sys.exit(1)
package_base = dirname(dirname(dirname(abspath(__file__))))
if package_base not in sys.path:
sys.path.insert(0, package_base)
from breaches.imesh.imesh import ImeshImporter
importer = ImeshImporter()
dump_type = 'hashed'
output_file = open(args.output_filename, "w", encoding='utf8')
hash_file = None
if args.hash_filename:
hash_file = open(args.hash_filename, "w")
processed_count = 0
error_count = 0
line_num = 1
for line in sys.stdin:
try:
hash_record = {}
line_num += 1
record = importer.process_record(line.rstrip(), dump_type, for_import=True)
if record is None:
eprint("Skipping: " + line.rstrip())
continue
if hash_file and hasattr(record, 'hash') and record.hash is not None:
hash_record["hash"] = record.hash
if hasattr(record, 'password') and record.password is not None:
hash_record["password"] = record.password
if hasattr(record, 'salt') and record.salt is not None:
hash_record["salt"] = record.salt
hash_record["hashtype"] = record.hashtype
hash_file.write(json.dumps(hash_record) + '\n')
# Delete any fields that are in the dump's ignore list
if importer._import_ignore_fields:
for fname in importer._import_ignore_fields:
if hasattr(record, fname):
delattr(record, fname)
processed_count += 1
output_file.write(record.to_json() + '\n')
if 0 == processed_count % 100000:
eprint("Processed %i, Errors: %i" % (processed_count, error_count))
except ValidationError as vexp:
error_count += 1
eprint("ValidationError %r\n while processing line number %i\n %s" %
(vexp, line_num, line))
except Exception as exp:
error_count += 1
eprint("Error %r\n while processing line number %i\n %s" %
(exp, line_num, line))
traceback.print_exc(file=sys.stderr)
print("Processed %i, Errors: %i" % (processed_count, error_count))
output_file.close()
if hash_file:
hash_file.close()
| 30.858491 | 106 | 0.602874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.179762 |
cab39dac3498266e965d7a8495df886ca0322b45 | 31,860 | py | Python | src/setprogramoptions/unittests/test_SetProgramOptionsCMake.py | sandialabs/SetProgramOptions | c0b3cec0e2309d726f450b449c080a33e7df3092 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T21:53:26.000Z | 2021-12-21T21:53:26.000Z | src/setprogramoptions/unittests/test_SetProgramOptionsCMake.py | sandialabs/SetProgramOptions | c0b3cec0e2309d726f450b449c080a33e7df3092 | [
"BSD-3-Clause"
] | null | null | null | src/setprogramoptions/unittests/test_SetProgramOptionsCMake.py | sandialabs/SetProgramOptions | c0b3cec0e2309d726f450b449c080a33e7df3092 | [
"BSD-3-Clause"
] | 3 | 2021-12-27T18:29:04.000Z | 2022-01-31T14:08:41.000Z | #!/usr/bin/env python3
# -*- mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*-
#===============================================================================
#
# License (3-Clause BSD)
# ----------------------
# Copyright 2021 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
"""
from __future__ import print_function
import sys
sys.dont_write_bytecode = True
import contextlib
import io
import os
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pprint import pprint
import unittest
from unittest import TestCase
# Coverage will always miss one of these depending on the system
# and what is available.
try: # pragma: no cover
import unittest.mock as mock # pragma: no cover
except: # pragma: no cover
import mock # pragma: no cover
from mock import Mock
from mock import MagicMock
from mock import patch
import filecmp
from textwrap import dedent
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from setprogramoptions import *
from .common import *
# ===============================================================================
#
# General Utility Data
#
# ===============================================================================
global_gen_new_ground_truth_files = False
# global_gen_new_ground_truth_files = True # comment this out for production.
class DEFAULT_VALUE(object):
pass
# ===============================================================================
#
# General Utility Functions
#
# ===============================================================================
# ===============================================================================
#
# Mock Helpers
#
# ===============================================================================
# ===============================================================================
#
# Tests
#
# ===============================================================================
class SetProgramOptionsTestCMake(TestCase):
"""
Main test driver for the SetProgramOptions class
"""
def setUp(self):
print("")
self.maxDiff = None
self._filename = find_config_ini(filename="config_test_setprogramoptions.ini")
# Get the location of the unit testing scripts (for file writing tests)
unit_test_path = os.path.realpath(__file__)
self.unit_test_file = os.path.basename(unit_test_path)
self.unit_test_path = os.path.dirname(unit_test_path)
def test_SetProgramOptionsCMake_Template(self):
"""
Basic template test for SetProgramOptions.
This test doesn't really validate any output -- it just runs a basic check.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "CMAKE_GENERATOR_NINJA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_property_inifilepath(self):
"""
Runs a check that loads the filename using `inifilepath` property
rather than the parameter in the c'tor.
"""
parser = self._create_standard_parser(filename=None)
parser.inifilepath = self._filename
print("-----[ TEST BEGIN ]----------------------------------------")
# parse all sections
print("-" * 40)
print("Execute Parser")
print("-" * 40)
sections = parser.configparserenhanceddata.sections(parse=False)
self.assertGreater(len(sections), 2)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TRILINOS_CONFIGURATION_ALPHA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-G=Ninja',
'-DTrilinos_ENABLE_COMPLEX:BOOL=ON',
'-DTrilinos_ENABLE_THREAD_SAFE:BOOL=ON',
# '-DTrilinos_PARALLEL_COMPILE_JOBS_LIMIT=20',
# '-DTrilinos_PARALLEL_LINK_JOBS_LIMIT=4',
'-DTrilinos_ENABLE_Kokkos:BOOL=ON',
'-DTrilinos_ENABLE_KokkosCore:BOOL=ON',
'-DTrilinos_ENABLE_KokkosKernels:BOOL=ON',
'-DKokkosKernels_ENABLE_EXAMPLES:BOOL=ON',
'-DTrilinos_ENABLE_Tpetra:BOOL=ON',
'-DTpetra_INST_DOUBLE:BOOL=ON',
'/path/to/source/dir'
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
# parser.exception_control_compact_warnings = True
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_01"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"',
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("OK")
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Update 03 will generate the update option
section = "TEST_VAR_EXPANSION_UPDATE_03"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'cmake',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"',
'-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo -bif"',
]
option_list_actual = parser.gen_option_list(section, generator="bash")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("OK")
print("-----[ TEST END ]------------------------------------------")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars_with_unknown_cmake_var_ecl3(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator when the ECL for
ExpandVarsInTextCMake is set to 3 or lower. This should generate a WARNING.
"""
parser = self._create_standard_parser()
parser.exception_control_compact_warnings = False
parser.exception_control_level = 3
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse the section
self._execute_parser(parser, section)
# Generate a BASH script representing the instructions in the section.
# what answer do we EXPECT:
option_list_expect = [
'cmake', '-DCMAKE_CXX_FLAGS:STRING="${LDFLAGS} -foo"', '-DCMAKE_F90_FLAGS:STRING=" -baz"'
]
# Generate the BASH entries:
option_list_actual = parser.gen_option_list(section, generator="bash")
# Verify the results:
self.assertListEqual(option_list_actual, option_list_expect)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_expandvars_with_unknown_cmake_var_ecl4(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator when the ECL
for ExpandVarsInTextCMake is set to 4 or higher. This should raise a ``ValueError``.
"""
parser = self._create_standard_parser()
parser.exception_control_level = 5
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse the section
self._execute_parser(parser, section)
# Generate a BASH script representing the instructions in the section.
with self.assertRaises(ValueError):
parser.gen_option_list(section, generator="bash")
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_cmake_fragment(self):
"""
Test the ``gen_option_list`` method using the ``cmake_fragment`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TRILINOS_CONFIGURATION_ALPHA"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
print("-" * 40)
print("Option List")
print("-" * 40)
option_list_expect = [
'set(Trilinos_ENABLE_COMPLEX ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_THREAD_SAFE ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_PARALLEL_COMPILE_JOBS_LIMIT 20)',
'set(Trilinos_PARALLEL_LINK_JOBS_LIMIT 4)',
'set(Trilinos_ENABLE_Kokkos ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_KokkosCore ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_KokkosKernels ON CACHE BOOL "from .ini configuration")',
'set(KokkosKernels_ENABLE_EXAMPLES ON CACHE BOOL "from .ini configuration")',
'set(Trilinos_ENABLE_Tpetra ON CACHE BOOL "from .ini configuration")',
'set(Tpetra_INST_DOUBLE ON CACHE BOOL "from .ini configuration")'
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
pprint(option_list_actual, width=200)
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_cmake_fragment_expandvars(self):
"""
Test the ``gen_option_list`` method using the ``bash`` generator.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_02"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
'set(CMAKE_F90_FLAGS "${CMAKE_F90_FLAGS} -baz" CACHE STRING "from .ini configuration")'
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
# Test that the CMake generator will generate a sequence of operations
# that don't include a FORCE option on an update of an existing CACHE
# value. As far as SPOCM is concerned, it'll generate the CMake as
# defined in the .ini file.
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_01"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bar" CACHE STRING "from .ini configuration")',
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
# Test that the CMake generator will generate a sequence of operations
# that do include a FORCE option on an update of an existing CACHE
# value. As far as SPOCM is concerned w/rt to CMake fragments, we will
# generate what the .ini file tells us to do and respect that the CMake
# engine will operate as the CMake engine does.
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_VAR_EXPANSION_UPDATE_03"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
parser.gen_option_list(section, generator="cmake_fragment")
option_list_expect = [
# Sets CMAKE_CXX_FLAGS the _first_ time, CMAKE_CXX_FLAGS would be set.
'set(CMAKE_CXX_FLAGS "$ENV{LDFLAGS} -foo" CACHE STRING "from .ini configuration")',
# Tries to update CMAKE_CXX_FLAGS the _second_ time without FORCE.
# CMake will not save this.
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bar" CACHE STRING "from .ini configuration")',
# Tries to update CMAKE_CXX_FLAGS again but this time uses FORCE.
# CMake will save this updated value.
'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -bif" CACHE STRING "from .ini configuration" FORCE)',
]
option_list_actual = parser.gen_option_list(section, generator="cmake_fragment")
print("Expected Output:\n{}\n".format("\n".join(option_list_expect)))
print("Actual Output:\n{}\n".format("\n".join(option_list_actual)))
self.assertListEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_param_order_01(self):
"""
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_CACHE_PARAM_ORDER"
print("Section : {}".format(section))
self._execute_parser(parser, section)
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_expect = [
'-DCMAKE_VAR_A:STRING="ON"',
'-DCMAKE_VAR_C:BOOL=ON',
'-DCMAKE_VAR_D:BOOL=ON',
'-DCMAKE_VAR_E:BOOL=ON',
]
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
option_list_cmake_fragment_expect = [
'set(CMAKE_VAR_A ON CACHE STRING "from .ini configuration" FORCE)',
'set(CMAKE_VAR_B ON PARENT_SCOPE)',
'set(CMAKE_VAR_C ON CACHE BOOL "from .ini configuration")',
'set(CMAKE_VAR_D ON CACHE BOOL "from .ini configuration" FORCE)',
'set(CMAKE_VAR_E ON CACHE BOOL "from .ini configuration" FORCE)',
'set(CMAKE_VAR_F ON CACHE BOOL "from .ini configuration" PARENT_SCOPE)',
'set(CMAKE_VAR_G ON CACHE BOOL "from .ini configuration" PARENT_SCOPE)',
]
option_list_cmake_fragment_actual = parser.gen_option_list(section, generator="cmake_fragment")
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_param_order_02(self):
"""
Tests that we correctly generate output if extra flags
are provided such as something to uniqueify a .ini option entry.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_CMAKE_CACHE_PARAM_TEST_02"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
option_list_bash_expect = ['-DCMAKE_VAR_A:STRING="ON"']
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_bash_generator_ignores_PARENT_SCOPE(self):
"""
Verify that the bash generator will not add a ``-D`` entry for a
``opt-set-cmake-var`` that has the ``PARENT_SCOPE`` flag since that
will always force CMake to create a type-1 (non-cache) var assignment.
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_PARENT_SCOPE_NOT_BASH"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_expect = []
option_list_bash_actual = parser.gen_option_list(section, generator="bash")
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_expect = [
'set(FOO_VAR_A "FOO_VAL A" PARENT_SCOPE)',
'set(FOO_VAR_B "FOO_VAL B" CACHE STRING "from .ini configuration" PARENT_SCOPE)'
]
option_list_cmake_fragment_actual = parser.gen_option_list(section, generator="cmake_fragment")
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_fail_on_FORCE_and_PARENT_SCOPE(self):
"""
Tests the case that both PARENT_SCOPE and FORCE are provided.
This will cause a CMake error beacuse the existence of PARENT_SCOPE
forces CMake to use a Type-1 set operation, i.e. a NON-CACHEd
variable. However ``FORCE`` is only valid for a CACHED variable (Type-2).
These two options are mutually exclusive and CMake will fail.
In this case SetProgramOptionsCMake should raise a CATASTROPHIC
error because the operation provided is invalid.
"""
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_CMAKE_FAIL_ON_PARENT_SCOPE_AND_FORCE"
print("Section : {}".format(section))
# parse a section
self._execute_parser(parser, section)
with self.assertRaises(ValueError):
parser.gen_option_list(section, generator="bash")
print("-----[ TEST END ]------------------------------------------")
print("OK")
return
def test_SetProgramOptionsCMake_test_STRING_value_surrounded_by_double_quotes(self):
"""
Test STRING values are surrounded by double quotes.
"""
print("\n")
print("Load file: {}".format(self._filename))
parser = self._create_standard_parser()
print("-----[ TEST BEGIN ]----------------------------------------")
section = "TEST_STRING_DOUBLE_QUOTES"
print("Section : {}".format(section))
option_list_expect = ['-DFOO:STRING="foo::bar::baz<Type>"', '-DBAR:STRING="600"']
option_list_actual = parser.gen_option_list(section, generator="bash")
print("-" * 40)
print("Options List Expect")
print("-" * 40)
pprint(option_list_expect, width=120)
print("")
print("Options List Actual")
print("-" * 40)
pprint(option_list_actual, width=120)
self.assertEqual(option_list_expect, option_list_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_opt_remove(self):
"""
This test validates that `opt-remove` will correctly remove a CMake var
that was created using `opt-set-cmake-var`
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_REMOVE"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_actual = parser.gen_option_list(section, 'bash')
option_list_bash_expect = ['-DBAR_TEST:STRING="BAR"', '-DBAZ_TEST:STRING="BAZ"']
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_actual = parser.gen_option_list(section, 'cmake_fragment')
option_list_cmake_fragment_expect = [
'set(BAR_TEST BAR CACHE STRING "from .ini configuration")',
'set(BAZ_TEST BAZ CACHE STRING "from .ini configuration")'
]
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_FORCE_only_for_bash(self):
"""
Test that an ``opt-set-cmake-var`` that has a FORCE but does
not specify a TYPE will be assigned STRING by default and will
generate the appropriate ``-D`` entry.
[TEST_CMAKE_VAR_FORCE_ONLY]
opt-set-cmake-var FOO FORCE : "BAR"
should generate:
-DFOO:STRING="BAR"
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_FORCE_ONLY"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_bash_actual = parser.gen_option_list(section, 'bash')
option_list_bash_expect = [
'-DFOO:STRING="BAR"',
]
self.assertListEqual(option_list_bash_expect, option_list_bash_actual)
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
option_list_cmake_fragment_actual = parser.gen_option_list(section, 'cmake_fragment')
option_list_cmake_fragment_expect = [
'set(FOO BAR CACHE STRING "from .ini configuration" FORCE)',
]
self.assertListEqual(option_list_cmake_fragment_expect, option_list_cmake_fragment_actual)
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def test_SetProgramOptionsCMake_gen_option_list_bash_unresolved_cmake_var_01(self):
"""
Tests what we do with an unresolved cmake variable encountered in the
bash generator. The hitch is that if we replace the unresolved cmake
var with an empty string we may be allowing a ``cmake-fragment`` and a
``bash command`` to diverge sicne the cmake fragment would have additional
context of pre-existing variables that *might exist* versus the bash command
where a cmake variable *definitely will not exist*.
"""
parser = self._create_standard_parser()
section = "TEST_CMAKE_VAR_IN_BASH_GENERATOR"
print("Section : {}".format(section))
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 1: Validate exception is raised when `exception_control_level`
# is the default (4).
with self.assertRaises(ValueError):
option_list_actual = parser.gen_option_list(section, generator='bash')
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 2: Reduce the `exception_control_level` so that the exception is
# not generated.
# - Sets `exception_control_level` to 3
# - Sets `exception_control_compact_warnings` to False
# Note: This test is sensitive to formatting changes to `ExceptionControl`
# if this is a big problem we may need to change this in the future
# to be less sensitive to stdout.
option_list_expect = [
'-DFOO_VAR:STRING="FOO"',
'-DFOO_VAR:STRING="BAR "'
]
parser.exception_control_level = 3
parser.exception_control_compact_warnings = False
with io.StringIO() as m_stdout:
with contextlib.redirect_stdout(m_stdout):
option_list_actual = parser.gen_option_list(section, generator='bash')
# Check that the output matches
self.assertListEqual(option_list_expect, option_list_actual)
# Check that the exception-control warning message gets printed
self.assertIn("EXCEPTION SKIPPED", m_stdout.getvalue())
self.assertIn("Event Type : MINOR", m_stdout.getvalue())
self.assertIn("Exception : ValueError", m_stdout.getvalue())
print("-----[ TEST END ]------------------------------------------")
print("-----[ TEST BEGIN ]----------------------------------------")
# Test 2: Repeat the previous test but with *compact* warnings from
# `exception_control_compact_warnings` set to True to enable
# compact warnings.
# - Sets `exception_control_level` to 3
# - Sets `exception_control_compact_warnings` to True
# Note: This test is sensitive to formatting changes to `ExceptionControl`
# if this is a big problem we may need to change this in the future
# to be less sensitive to stdout.
option_list_expect = [
'-DFOO_VAR:STRING="FOO"',
'-DFOO_VAR:STRING="BAR "'
]
parser.exception_control_level = 3
parser.exception_control_compact_warnings = True
with io.StringIO() as m_stdout:
with contextlib.redirect_stdout(m_stdout):
option_list_actual = parser.gen_option_list(section, generator='bash')
# Check that the output matches
self.assertListEqual(option_list_expect, option_list_actual)
# Check that the exception-control warning message gets printed
self.assertIn("EXCEPTION SKIPPED", m_stdout.getvalue())
self.assertIn("(MINOR : ValueError)", m_stdout.getvalue())
print("-----[ TEST END ]------------------------------------------")
print("OK")
return 0
def _create_standard_parser(
self, filename=DEFAULT_VALUE(), debug_level=5, ece_level=4, ece_compact=False
):
if isinstance(filename, DEFAULT_VALUE):
filename = self._filename
output = None
if filename is not None:
print("\n")
print("filename: {}".format(filename))
output = SetProgramOptionsCMake(filename)
else:
output = SetProgramOptionsCMake()
output.debug_level = debug_level
output.exception_control_level = ece_level
output.exception_control_compact_warnings = ece_compact
return output
def _execute_parser(self, parser, section):
output = None
# parse a section
print("-" * 40)
print("Execute Parser")
print("-" * 40)
output = parser.parse_section(section)
# pretty print the output
print("-" * 40)
print("Output")
print("-" * 40)
pprint(output, width=120)
# pretty print the loginfo
print("-" * 40)
print("LogInfo")
print("-" * 40)
parser._loginfo_print()
return output
| 40.227273 | 177 | 0.570088 | 28,120 | 0.882611 | 0 | 0 | 0 | 0 | 0 | 0 | 16,242 | 0.509793 |
cab3e0c73d1808d2ec171a56c51c18d7eab9c9eb | 603 | py | Python | accelRF/rep/base.py | nexuslrf/Accel-RF | 6e1034c27ea2a3f51093a6dcb10310e7fae04e30 | [
"MIT"
] | null | null | null | accelRF/rep/base.py | nexuslrf/Accel-RF | 6e1034c27ea2a3f51093a6dcb10310e7fae04e30 | [
"MIT"
] | null | null | null | accelRF/rep/base.py | nexuslrf/Accel-RF | 6e1034c27ea2a3f51093a6dcb10310e7fae04e30 | [
"MIT"
] | null | null | null | from typing import Tuple
import torch.nn as nn
from torch import Tensor
class Explicit3D(nn.Module):
# corner_points: Tensor
center_points: Tensor
center2corner: Tensor
n_voxels: int
n_corners: int
grid_shape: Tensor
voxel_size: float
occupancy: Tensor
def __init__(self):
super().__init__()
def ray_intersect(
self, rays_o: Tensor, rays_d: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
NotImplemented
def pruning(self, keep: Tensor):
NotImplemented
def splitting(self):
NotImplemented | 22.333333 | 51 | 0.648425 | 530 | 0.878939 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.038143 |
cab43f37667bf4a445a87188b43c261b7be4262e | 1,250 | py | Python | URLSHORT/lib.py | its-mr-monday/Url-Shortener | bca0f9c1a8d6c5a686704d7a410351b9ce31439b | [
"MIT"
] | null | null | null | URLSHORT/lib.py | its-mr-monday/Url-Shortener | bca0f9c1a8d6c5a686704d7a410351b9ce31439b | [
"MIT"
] | null | null | null | URLSHORT/lib.py | its-mr-monday/Url-Shortener | bca0f9c1a8d6c5a686704d7a410351b9ce31439b | [
"MIT"
] | null | null | null | import random
import string
import requests
def SQL_SYNTAX_CHECK(input: str) -> bool:
bad_char = ['*',';','SELECT ',' FROM ', ' TRUE ', ' WHERE ']
for char in bad_char:
if char in input:
return False
return True
def validateRegistration(name, uname, email, password, confirm):
if len(name) < 1 or len(name) > 45:
return "Error invalid name"
if len(uname) < 1 or len(uname) > 20:
return "Error invalid username"
if len(email) < 1 or len(email) > 100:
return "Error invalid email"
if len(password) < 1:
return "Error invalid password"
if password != confirm:
return "Error passwords do not match"
return "Success"
def validate_email(email: str):
at_counter = 0
for x in email:
if x == "@":
at_counter+=1
if at_counter == 1:
return True
else:
return False
def validate_link(link: str):
req = requests.get('http://www.example.com')
if req.status_code == 200:
return True
else:
return False
def generate_link() -> str:
letters = string.ascii_lowercase+string.ascii_uppercase+"0123456789"
return (''.join(random.choice(letters) for i in range(10))) | 24.509804 | 72 | 0.604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.1672 |
cab5b79870681508e9b3e9a34436132f63a674f8 | 3,985 | py | Python | main.py | hamolicious/Python-Word-Search-Generator | 9a8ce3f54afd1e2dd286b89bdb9052bb03023ccf | [
"Apache-2.0"
] | null | null | null | main.py | hamolicious/Python-Word-Search-Generator | 9a8ce3f54afd1e2dd286b89bdb9052bb03023ccf | [
"Apache-2.0"
] | null | null | null | main.py | hamolicious/Python-Word-Search-Generator | 9a8ce3f54afd1e2dd286b89bdb9052bb03023ccf | [
"Apache-2.0"
] | null | null | null |
from random import choice, randint
import os
def generate_grid(w, h):
global width, height
alphabet = 'qwertyuiopasdfghjklzxcvbnm'
grid = []
for i in range(h):
row = []
for j in range(w):
row.append(' ')
grid.append(row)
return grid
def populate_grid(words, grid):
for word in words:
done = False
tries = 10
while not done:
try:
start_x = randint(0, len(grid[0]) - len(word))
start_y = randint(0, len(grid) - len(word))
vel = choice([(1, 0), (0, 1), (1, 1)])
except ValueError:
done = True
break
valid_spot = True
x, y = start_x, start_y
for i in range(len(word)):
if grid[y][x] == ' ' or grid[y][x] == word[i]:
pass
else:
valid_spot = False
tries -= 1
break
x += vel[0]
y += vel[1]
if tries <= 0:
done = True
if valid_spot:
x, y = start_x, start_y
for i in range(len(word)):
grid[y][x] = word[i].upper()
x += vel[0]
y += vel[1]
done = True
alphabet = 'qwertyuiopasdfghjklzxcvbnm'.upper()
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == ' ':
grid[i][j] = choice(alphabet)
return grid
def draw_grid(grid):
screen = ''
for row in grid:
screen += '\n'
do_once = True
for tile in row:
if do_once:
screen += '|'
do_once = False
screen += tile + '|'
print(screen)
def use_random():
path = 'words.txt'
if os.path.exists(path):
with open(path, 'r') as file:
temp = file.readlines()
class wrd():
def __init__(self, word):
self.word = word
self.index = randint(0, 1000)
def __lt__(self, other):
return self.index < other.index
words = []
word_count = 5
for word in temp:
w = word.replace('\n', '')
words.append(wrd(w))
words.sort()
temp = []
for i in range(word_count):
temp.append(words[i].word)
return temp
def get_details():
# width
while True:
w = input('\nWhat is the width of the grid in characters?\n[>> ')
if w.isdigit():
w = int(w)
break
# height
while True:
h = input('\nWhat is the height of the grid in characters?\n[>> ')
if h.isdigit():
h = int(h)
break
# words
words = []
while True:
wrd = input('Please add the words to the bank, you can press ENTER to use random words and press "q" when you\'re finished.\n[>> ').strip().lower()
if wrd == '':
words = use_random()
break
if wrd == 'q':
break
for let in wrd:
if let not in 'qwertyuiopasdfghjklzxcvbnm':
print('Invalid word, words can only contain the following letters:', ''.join(i for i in sorted('qwertyuiopasdfghjklzxcvbnm')))
break
else:
words.append(wrd)
return w, h, words
def clear():
print('\n' * 50)
width, height, words = get_details()
while True:
grid = generate_grid(width, height)
grid = populate_grid(words, grid)
clear()
draw_grid(grid)
input('>')
| 20.863874 | 156 | 0.430866 | 221 | 0.055458 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.120452 |
cab84df34eb13ac7d2ea53e10575d0f1080c819c | 4,416 | py | Python | Scripts/HLS_Stream_Dowloader/hls_dowloader.py | WilliamMokoena/portfolio | 8176d81e669279510af9fc7ff98aa769603cff60 | [
"MIT"
] | null | null | null | Scripts/HLS_Stream_Dowloader/hls_dowloader.py | WilliamMokoena/portfolio | 8176d81e669279510af9fc7ff98aa769603cff60 | [
"MIT"
] | null | null | null | Scripts/HLS_Stream_Dowloader/hls_dowloader.py | WilliamMokoena/portfolio | 8176d81e669279510af9fc7ff98aa769603cff60 | [
"MIT"
] | null | null | null | import sys, os, asyncio, shutil
import wget
from ffmpeg import FFmpeg
# Func calls wget to download the file given in url arg
def webget(url):
wget.download(url)
# Fuc calls ffmpeg to transcode .m3u8 to .mp4
def transcode(ffmpeg):
@ffmpeg.on('stderr')
def on_stderr(line):
print(line)
@ffmpeg.on('progress')
def on_progress(progress):
print(progress)
@ffmpeg.on('completed')
def on_completed():
print('\nCompleted')
@ffmpeg.on('error')
def on_error(code):
print('Error:', code)
loop = asyncio.get_event_loop()
loop.run_until_complete(ffmpeg.execute())
loop.close()
def parse_m3u8_url(input_url):
parse_url = input_url.split('/')
input_m3u8 = parse_url[len(parse_url) - 1]
base_url = input_url[:-len(input_m3u8)]
if '?' in input_m3u8:
input_m3u8 = input_m3u8.split('?')[0]
return base_url, input_m3u8
def create_manifest(input_m3u8):
with open(f'./{input_m3u8}', 'r') as f:
file = f.readlines()
manifest = []
for el in file:
el = el[:-1]
if 'http' in el and '?' in el and '=' in el:
manifest.append(el)
elif 'https' in el:
el = el.split('/')
el = el[len(el)-1]
manifest.append(el)
else:
manifest.append(el)
with open('./manifest.m3u8', 'a') as f:
for elm in manifest:
f.write(elm+'\n')
def cleanup_working_dir(input_m3u8, storage_folder):
try:
# Create folder given in arg
os.mkdir(storage_folder)
except FileExistsError:
print('\nWARNING: Output folder exists')
cwd = os.getcwd()
files = os.listdir()
print(f'\nMESSAGE: Cleaning up and Packaging things nicely')
os.mkdir(f'{storage_folder}/{storage_folder}')
for f in files:
# Logic for moving the output file
if f[-3:] == 'mp4':
original = f'{cwd}/{f}'
target = f'{cwd}/{storage_folder}'
# Moving the output file
print(f'\nMESSAGE: Moving {input_m3u8} to {storage_folder}')
shutil.move(original,target)
if f[-4:] == 'm3u8':
original = f'{os.getcwd()}/{f}'
target = f'{os.getcwd()}/{storage_folder}/{storage_folder}'
shutil.move(original,target)
if f[-2:] == 'ts':
original = f'{os.getcwd()}/{f}'
target = f'{os.getcwd()}/{storage_folder}/{storage_folder}'
shutil.move(original,target)
# Read cli args : 'hls-downloader.py ["m3u8_url"] ["mp4_output_name"] ["storage_folder"]'
input_url = sys.argv[1]
output_filename = sys.argv[2]
storage_folder = "./site/media"
base_url, input_m3u8 = parse_m3u8_url(input_url)
# Call wget to download files
if input_m3u8 in os.listdir():
print(f'WARNING: {input_m3u8} already exists')
else:
print(f'MESSAGE: Downloading m3u8 file')
webget(input_url)
print(f'\nMESSAGE: Creating manifest.m3u8')
create_manifest(input_m3u8)
print(f'\nMESSAGE: Reading {input_m3u8}')
data = None
if 'movcloud' in input_url:
with open('playlist.m3u8', 'r') as f:
data = f.read()
elif 'manifest.m3u8' in os.listdir():
with open('manifest.m3u8', 'r') as f:
data = f.read()
if data != None:
contents = data.split('\n')
print(f'\nMESSAGE: Attempting to download items from {input_m3u8}')
for item in contents:
if item in os.listdir():
continue
if 'http' in item and '?' in item and '=' in item:
webget(item)
if 'movcloud' in item:
item_sp = item.split('/')
if item_sp[len(item_sp)-1] in os.listdir():
continue
else:
webget(item)
else:
stxt = item[0:5]
entxt = item[-2:]
if stxt == 'https':
l = item.split('/')
name = item[len(l)-1]
webget(item)
elif entxt == 'ts':
cut = slice(0,-len(input_m3u8))
webget(input_url[cut] + item)
# Configuring ffmpeg
## ffmpeg -i "./folder/file.m3u8" -c copy file.mp4
_ffmpeg = FFmpeg().option('n').input('./manifest.m3u8').output(output_filename,{'c': 'copy'})
print(f'\n\nMESSAGE: Running command: ffmpeg -i ./manifest.m3u8 -c copy {output_filename}')
transcode(_ffmpeg)
cleanup_working_dir(input_m3u8, storage_folder)
| 25.976471 | 93 | 0.584013 | 0 | 0 | 0 | 0 | 291 | 0.065897 | 0 | 0 | 1,306 | 0.295743 |
cab89ee2fce3fceaa9fcc3a842fbaa80ba1da5df | 2,532 | py | Python | fairlearn/metrics/__init__.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 1,142 | 2019-10-14T18:05:46.000Z | 2022-03-30T06:56:54.000Z | fairlearn/metrics/__init__.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 623 | 2019-10-14T17:11:25.000Z | 2022-03-31T17:46:54.000Z | fairlearn/metrics/__init__.py | alliesaizan/fairlearn | 846ce6cdaf188e32a545d3f90197515a4a5bc471 | [
"MIT"
] | 299 | 2019-10-15T00:09:53.000Z | 2022-03-30T12:35:27.000Z | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""Functionality for computing metrics, with a particular focus on disaggregated metrics.
For our purpose, a metric is a function with signature
``f(y_true, y_pred, ....)``
where ``y_true`` are the set of true values and ``y_pred`` are
values predicted by a machine learning algorithm. Other
arguments may be present (most often sample weights), which will
affect how the metric is calculated.
This module provides the concept of a *disaggregated metric*.
This is a metric where in addition to ``y_true`` and ``y_pred``
values, the user provides information about group membership
for each sample.
For example, a user could provide a 'Gender' column, and the
disaggregated metric would contain separate results for the subgroups
'male', 'female' and 'nonbinary' indicated by that column.
The underlying metric function is evaluated for each of these three
subgroups.
This extends to multiple grouping columns, calculating the metric
for each combination of subgroups.
"""
import sys as _sys
from ._metric_frame import MetricFrame # noqa: F401
from ._make_derived_metric import make_derived_metric # noqa: F401
from ._generated_metrics import _generated_metric_dict
from ._disparities import ( # noqa: F401
demographic_parity_difference,
demographic_parity_ratio,
equalized_odds_difference,
equalized_odds_ratio)
from ._extra_metrics import ( # noqa: F401
true_positive_rate,
true_negative_rate,
false_positive_rate,
false_negative_rate,
_balanced_root_mean_squared_error,
mean_prediction,
selection_rate,
_mean_overprediction,
_mean_underprediction,
count)
# Add the generated metrics of the form and
# `<metric>_{difference,ratio,group_min,group_max`
_module_obj = _sys.modules[__name__]
for _name, _func in _generated_metric_dict.items():
setattr(_module_obj, _name, _func)
# ============================================
# Build list of items to be listed in the docs
_core = [
"MetricFrame",
"make_derived_metric"
]
_disparities = [
"demographic_parity_difference",
"demographic_parity_ratio",
"equalized_odds_difference",
"equalized_odds_ratio"
]
_extra_metrics = [
"true_positive_rate",
"true_negative_rate",
"false_positive_rate",
"false_negative_rate",
"mean_prediction",
"selection_rate",
"count"
]
__all__ = _core + _disparities + _extra_metrics + list(sorted(_generated_metric_dict.keys()))
| 30.142857 | 93 | 0.75079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,566 | 0.618483 |
cab923dc066cf5ab57a2921c1652c8204411aadb | 4,360 | py | Python | detector/ssd/ssd.py | Senyaaa/detection-experiments | 5e80dd458e886ca27db5420d25ade8f9d74ae5a8 | [
"Apache-2.0"
] | 5 | 2020-06-08T08:21:03.000Z | 2021-03-03T21:54:06.000Z | detector/ssd/ssd.py | Senyaaa/detection-experiments | 5e80dd458e886ca27db5420d25ade8f9d74ae5a8 | [
"Apache-2.0"
] | 3 | 2021-02-06T20:21:02.000Z | 2021-06-06T18:46:27.000Z | detector/ssd/ssd.py | Senyaaa/detection-experiments | 5e80dd458e886ca27db5420d25ade8f9d74ae5a8 | [
"Apache-2.0"
] | 2 | 2020-06-08T08:21:05.000Z | 2021-02-06T11:44:04.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from detector.ssd.utils import box_utils
from nn.separable_conv_2d import SeparableConv2d
from fpn.extension import Extension
from detector.ssd.to_predictions import ToPredictions
class SSD(nn.Module):
def __init__(self, num_classes, backbone, arch_name,
batch_size=None, config=None):
"""Compose a SSD model using the given components.
"""
super(SSD, self).__init__()
self.num_classes = num_classes
self.backbone = backbone
self.arch_name = arch_name
self.batch_size = batch_size # to ease the inference model
feature_channels = self.backbone.feature_channels()
self.extras = Extension(
bootstrap_channels=feature_channels[-1],
out_channels=[512, 256, 256, 64],
conv=SeparableConv2d)
self.classification_headers = nn.ModuleList([
SeparableConv2d(in_channels=feature_channels[-2],
out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=feature_channels[-1],
out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=512, out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=256, out_channels=6 * num_classes,
kernel_size=3, padding=1),
SeparableConv2d(in_channels=256, out_channels=6 * num_classes,
kernel_size=3, padding=1),
nn.Conv2d(in_channels=64, out_channels=6 * num_classes,
kernel_size=1),
])
self.regression_headers = nn.ModuleList([
SeparableConv2d(in_channels=feature_channels[-2],
out_channels=6 * 4,
kernel_size=3, padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=feature_channels[-1],
out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
SeparableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3,
padding=1, onnx_compatible=False),
nn.Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
])
self.config = config
def forward(self, x):
confidences = []
locations = []
cs = self.backbone.forward(x)
cs = cs[-2:]
for i, c in enumerate(cs):
confidence, location = self.compute_header(i, c)
x = c
confidences.append(confidence)
locations.append(location)
extra_x = self.extras.forward(x)
header_index = i + 1
for ex in extra_x:
confidence, location = self.compute_header(header_index, ex)
header_index += 1
confidences.append(confidence)
locations.append(location)
confidences = torch.cat(confidences, 1)
locations = torch.cat(locations, 1)
return confidences, locations
def compute_header(self, i, x):
batch_size = self.batch_size or x.size(0)
confidence = self.classification_headers[i](x)
confidence = confidence.permute(0, 2, 3, 1).contiguous()
confidence = confidence.reshape(batch_size, -1, self.num_classes)
location = self.regression_headers[i](x)
location = location.permute(0, 2, 3, 1).contiguous()
location = location.reshape(batch_size, -1, 4)
return confidence, location
def load_backbone_weights(self, path):
self.backbone.load_state_dict(
torch.load(path, map_location=lambda storage, loc: storage),
strict=True)
def freeze_backbone(self):
for p in self.backbone.parameters():
p.requires_grad = False
class SSDInference(SSD):
def __init__(self, num_classes, backbone, arch_name,
batch_size=None, config=None):
super(SSDInference, self).__init__(num_classes, backbone, arch_name,
batch_size, config)
self.to_predictions = ToPredictions(self.config.priors,
self.config.center_variance,
self.config.size_variance)
def forward(self, x):
confidences, locations = super(SSDInference, self).forward(x)
confidences, boxes = self.to_predictions.forward(confidences, locations)
return confidences, boxes
| 32.296296 | 74 | 0.682339 | 4,084 | 0.936697 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.019495 |
caba47926befb920d795eb25c2284edc52eb5271 | 1,044 | py | Python | commands/serverinfo.py | Stereo528/Osmium | cd17912b2cd559238b25c75f2d53785c72452aca | [
"MIT"
] | 2 | 2021-12-15T10:50:45.000Z | 2022-02-15T13:15:30.000Z | commands/serverinfo.py | Stereo528/Osmium | cd17912b2cd559238b25c75f2d53785c72452aca | [
"MIT"
] | 6 | 2021-02-23T17:08:09.000Z | 2021-02-23T23:33:55.000Z | commands/serverinfo.py | Stereo528/Osmium | cd17912b2cd559238b25c75f2d53785c72452aca | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from main import getAlias
class Util(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=getAlias("serverinfo"))
async def serverinfo(self, ctx):
embed = discord.Embed(
title=f"Info for {ctx.guild.name}",
description=f"Emoji Limit: {ctx.guild.emoji_limit} \nFilesize Limit: {round(ctx.guild.filesize_limit/1000000)} megabytes",
color=discord.Color.blurple()
)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name="Current Members", value=ctx.guild.member_count, inline=True)
embed.add_field(name="Other Data", value=f"Nitro Level: {ctx.guild.premium_tier}, Nitro Boosters: {ctx.guild.premium_subscription_count}", inline=False)
embed.add_field(name="Owner Info", value=f"Server Owner: {ctx.guild.owner} \nOwner ID: {ctx.guild.owner_id}", inline=False)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Util(client)) | 45.391304 | 160 | 0.694444 | 916 | 0.877395 | 0 | 0 | 824 | 0.789272 | 770 | 0.737548 | 353 | 0.338123 |
cababbb0c22d7ff63e14475c4594f1e5800c07e0 | 6,019 | py | Python | sklearn_pipeline_enhancements/shared/transformers.py | Kgoetsch/sklearn_pipeline_enhancements | 9afb9e03d762c3d8ecd19639f3b84f38cb33a71d | [
"MIT"
] | 11 | 2017-04-18T00:19:05.000Z | 2020-04-06T22:16:35.000Z | sklearn_pipeline_enhancements/shared/transformers.py | OlliePage/sklearn_pipeline_enhancements | 9afb9e03d762c3d8ecd19639f3b84f38cb33a71d | [
"MIT"
] | null | null | null | sklearn_pipeline_enhancements/shared/transformers.py | OlliePage/sklearn_pipeline_enhancements | 9afb9e03d762c3d8ecd19639f3b84f38cb33a71d | [
"MIT"
] | 5 | 2017-12-05T20:14:55.000Z | 2021-03-29T12:25:36.000Z | import numpy as np
import pandas as pd
from patsy.highlevel import dmatrix
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.pipeline import _name_estimators, Pipeline
__author__ = 'kgoetsch'
def make_dataframeunion(steps):
return DataFrameUnion(_name_estimators(steps))
class FactorExtractor(TransformerMixin, BaseEstimator):
"""
In: pd.DataFrame
Column in that Frame
Out: pd.Series
"""
def __init__(self, factor):
self.factor = factor
def transform(self, data):
return data[self.factor]
def fit(self, *_):
return self
class RenameField(TransformerMixin, BaseEstimator):
"""
In: pd.DataFrame
Column in that Frame
Out: pd.Series
"""
def __init__(self, new_name):
self.new_name = new_name
def transform(self, data):
data.name = self.new_name
return data
def fit(self, *_):
return self
class FillNA(TransformerMixin, BaseEstimator):
"""
In: pd.Series
Out: pd.Series
"""
def __init__(self, na_replacement=None):
if na_replacement is not None:
self.NA_replacement = na_replacement
else:
self.NA_replacement = 'missing'
def transform(self, data):
return data.fillna(self.NA_replacement)
def fit(self, *_):
return self
class DataFrameUnion(TransformerMixin, BaseEstimator):
"""
In: list of (string, transformer) tuples :
Out: pd.DataFrame
"""
def __init__(self, transformer_list):
self.feature_names = None
self.transformer_list = transformer_list # (string, Transformer)-tuple list
def __getitem__(self, attrib):
return self.__dict__[attrib]
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = (self._transform_one(trans, X)
for name, trans in self.transformer_list)
df_merged_result = self._merge_results(Xs)
return df_merged_result
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
:param X: pd.DataFrame
Input data, used to fit transformers.
:param y:
"""
transformers = (self._fit_one_transformer(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def _merge_results(self, transformed_result_generator):
df_merged_result = ''
for transformed in transformed_result_generator:
if isinstance(transformed, pd.Series):
transformed = pd.DataFrame(data=transformed)
if not isinstance(df_merged_result, pd.DataFrame):
df_merged_result = transformed
else:
df_merged_result = pd.concat([df_merged_result, transformed], axis=1)
if self.feature_names is None:
self.feature_names = df_merged_result.columns
elif (len(self.feature_names) != len(df_merged_result.columns)) or \
((self.feature_names != df_merged_result.columns).any()):
custom_dataframe = pd.DataFrame(data=0, columns=self.feature_names, index=df_merged_result.index)
custom_dataframe.update(df_merged_result)
df_merged_result = custom_dataframe
return df_merged_result
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
def _fit_one_transformer(self, transformer, X, y):
return transformer.fit(X, y)
def _transform_one(self, transformer, X):
return transformer.transform(X)
def extract_and_denull(var, na=0):
return Pipeline([
('extract', FactorExtractor(var)),
('fill_na', FillNA(na))
])
class ConvertToArray(TransformerMixin, BaseEstimator):
"""
In: pd.Dataframe
Out: np.array
"""
def transform(self, data):
return np.ascontiguousarray(data.values)
def fit(self, *_):
return self
class CategoricalDummifier(TransformerMixin, BaseEstimator):
"""
In: pd.Series
Out: pd.DataFrame
"""
def transform(self, data):
return dmatrix(formula_like=str(data.name), data=pd.DataFrame(data.apply(str)), return_type='dataframe',
NA_action='raise').drop('Intercept', axis=1)
def fit(self, *_):
return self
class WeekdayExtraction(TransformerMixin, BaseEstimator):
"""
In: pd.DataFrame
Out: pd.Series
"""
def transform(self, data):
return_data = pd.Series(data.index.weekday, index=data.index, name='weekday')
return return_data
def fit(self, *_):
return self
class LengthofField(TransformerMixin, BaseEstimator):
"""
In: pd.Series
Out: pd.Series
"""
def transform(self, data):
return_value = data.apply(len)
return return_value
def fit(self, *_):
return self
class InsertIntercept(TransformerMixin, BaseEstimator):
"""
In: pd.DataFrame
Out: pd.Series = 1 of the same length
"""
def transform(self, data):
return pd.DataFrame(data=1, index=data.index, columns=['Intercept'])
def fit(self, *_):
return self
if __name__ == '__main__':
target = make_dataframeunion([extract_and_denull('years'), extract_and_denull('kitten')])
for step in target.transformer_list:
print step
| 25.943966 | 112 | 0.632663 | 5,370 | 0.892175 | 0 | 0 | 0 | 0 | 0 | 0 | 1,382 | 0.229606 |
cabbf72f74049f95a62204023ec60f67c663a913 | 1,130 | py | Python | exercises/exercise7.py | AsBeeb/DistributedExercisesAAU | b84343d5d5b86ccb4750d47a8594a428ecbd83ce | [
"MIT"
] | 4 | 2021-09-16T12:52:04.000Z | 2022-01-09T15:44:49.000Z | exercises/exercise7.py | AsBeeb/DistributedExercisesAAU | b84343d5d5b86ccb4750d47a8594a428ecbd83ce | [
"MIT"
] | null | null | null | exercises/exercise7.py | AsBeeb/DistributedExercisesAAU | b84343d5d5b86ccb4750d47a8594a428ecbd83ce | [
"MIT"
] | 21 | 2021-09-06T09:39:18.000Z | 2022-03-08T12:18:23.000Z | import math
import random
import threading
import time
from emulators.Medium import Medium
from emulators.Device import Device
from emulators.MessageStub import MessageStub
class Vote(MessageStub):
def __init__(self, sender: int, destination: int, vote: int, decided: bool):
super().__init__(sender, destination)
self._vote = vote
self._decided = decided
def vote(self):
return self._vote
def decided(self):
return self._decided
def __str__(self):
return f'Vote: {self.source} -> {self.destination}, voted for {self._vote}, decided? {self._decided}'
class Bully(Device):
def __init__(self, index: int, number_of_devices: int, medium: Medium):
super().__init__(index, number_of_devices, medium)
self._leader = None
self._shut_up = False
self._election = False
def largest(self):
return self.index() == max(self.medium().ids())
def run(self):
"""TODO"""
def start_election(self):
"""TODO"""
def print_result(self):
print(f'Leader seen from {self._id} is {self._leader}') | 24.565217 | 109 | 0.652212 | 951 | 0.841593 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.143363 |
cabc29877369a0222f319b94559f9ee8817e0fb8 | 4,036 | py | Python | pait/util/_pydantic_util.py | so1n/pa | bf43a20f54b6b7e9f1bd6531c49f4f39deb68399 | [
"Apache-2.0"
] | 19 | 2020-08-26T13:46:33.000Z | 2022-02-22T07:48:29.000Z | pait/util/_pydantic_util.py | so1n/pa | bf43a20f54b6b7e9f1bd6531c49f4f39deb68399 | [
"Apache-2.0"
] | 1 | 2021-06-06T17:45:54.000Z | 2021-06-06T17:45:54.000Z | pait/util/_pydantic_util.py | so1n/pa | bf43a20f54b6b7e9f1bd6531c49f4f39deb68399 | [
"Apache-2.0"
] | 1 | 2022-01-21T20:25:33.000Z | 2022-01-21T20:25:33.000Z | from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union
from pydantic.schema import (
default_ref_template,
get_flat_models_from_model,
get_long_model_name,
get_model,
get_schema_ref,
model_process_schema,
normalize_name,
)
if TYPE_CHECKING:
from pydantic import BaseModel
from pydantic.dataclasses import Dataclass
from pydantic.schema import TypeModelOrEnum, TypeModelSet
global_name_model_map = {}
global_conflicting_names: Set[str] = set()
def get_model_global_name(model: "TypeModelOrEnum") -> str:
return pait_get_model_name_map({model})[model]
def pait_get_model_name_map(unique_models: "TypeModelSet") -> Dict["TypeModelOrEnum", str]:
"""
Process a set of models and generate unique names for them to be used as keys in the JSON Schema
definitions. By default the names are the same as the class name. But if two models in different Python
modules have the same name (e.g. "users.Model" and "items.Model"), the generated names will be
based on the Python module path for those conflicting models to prevent name collisions.
:param unique_models: a Python set of models
:return: dict mapping models to names
"""
global global_name_model_map
global global_conflicting_names
for model in unique_models:
model_name = normalize_name(model.__name__)
if model_name in global_conflicting_names:
model_name = get_long_model_name(model)
global_name_model_map[model_name] = model
elif model_name in global_name_model_map:
global_conflicting_names.add(model_name)
conflicting_model = global_name_model_map.pop(model_name)
global_name_model_map[get_long_model_name(conflicting_model)] = conflicting_model
global_name_model_map[get_long_model_name(model)] = model
else:
global_name_model_map[model_name] = model
return {v: k for k, v in global_name_model_map.items()}
def pait_model_schema(
model: Union[Type["BaseModel"], Type["Dataclass"]],
by_alias: bool = True,
ref_prefix: Optional[str] = None,
ref_template: str = default_ref_template,
) -> Dict[str, Any]:
"""
Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level
JSON key.
:param model: a Pydantic model (a class that inherits from BaseModel)
:param by_alias: generate the schemas using the aliases defined, if any
:param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the
default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere
else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the
top-level key ``definitions``, so you can extract them from there. But all the references will have the set
prefix.
:param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for
references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a
sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``.
:return: dict with the JSON Schema for the passed ``model``
"""
model = get_model(model)
flat_models = get_flat_models_from_model(model)
model_name_map = pait_get_model_name_map(flat_models)
model_name = model_name_map[model]
m_schema, m_definitions, nested_models = model_process_schema(
model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, ref_template=ref_template
)
if model_name in nested_models:
# model_name is in Nested models, it has circular references
m_definitions[model_name] = m_schema
m_schema = get_schema_ref(model_name, ref_prefix, ref_template, False)
if m_definitions:
m_schema.update({"definitions": m_definitions})
return m_schema
| 42.93617 | 116 | 0.724727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,777 | 0.440287 |
cabd038328a350d6730ea789068223bf2e852a25 | 526 | py | Python | experimental/plotlyDelaunay3D.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | experimental/plotlyDelaunay3D.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | experimental/plotlyDelaunay3D.py | FYP-DES5/deepscan-core | b6ce70ae69577fbdf5b80b30c4e83c7ee9cf6942 | [
"MIT"
] | null | null | null | import plotly.plotly as py
from plotly.graph_objs import *
import numpy as np
import matplotlib.cm as cm
from scipy.spatial import Delaunay
u=np.linspace(0,2*np.pi, 24)
v=np.linspace(-1,1, 8)
u,v=np.meshgrid(u,v)
u=u.flatten()
v=v.flatten()
#evaluate the parameterization at the flattened u and v
tp=1+0.5*v*np.cos(u/2.)
x=tp*np.cos(u)
y=tp*np.sin(u)
z=0.5*v*np.sin(u/2.)
#define 2D points, as input data for the Delaunay triangulation of U
points2D=np.vstack([u,v]).T
tri = Delaunay(points2D)#triangulate the rectangle U
| 22.869565 | 68 | 0.731939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.287072 |
cabe8c977597c3348430dae0fd1a0dcbabe5b972 | 3,184 | py | Python | src/sardana/taurus/qt/qtcore/tango/sardana/pool.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 43 | 2016-11-25T15:21:23.000Z | 2021-08-20T06:09:40.000Z | src/sardana/taurus/qt/qtcore/tango/sardana/pool.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 1,263 | 2016-11-25T15:58:37.000Z | 2021-11-02T22:23:47.000Z | src/sardana/taurus/qt/qtcore/tango/sardana/pool.py | marc2332/sardana | 48dc9191baaa63f6c714d8c025e8f3f96548ad26 | [
"CC-BY-3.0"
] | 58 | 2016-11-21T11:33:55.000Z | 2021-09-01T06:21:21.000Z | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""Device pool extension for taurus Qt"""
__all__ = ["QPool", "QMeasurementGroup",
"registerExtensions"]
import json
from taurus.external.qt import Qt
from taurus.core.taurusbasetypes import TaurusEventType
from taurus.core.tango import TangoDevice
CHANGE_EVTS = TaurusEventType.Change, TaurusEventType.Periodic
class QPool(Qt.QObject, TangoDevice):
def __init__(self, name='', qt_parent=None, **kw):
self.call__init__(TangoDevice, name, **kw)
self.call__init__wo_kw(Qt.QObject, qt_parent)
class QMeasurementGroup(Qt.QObject, TangoDevice):
configurationChanged = Qt.pyqtSignal()
def __init__(self, name='', qt_parent=None, **kw):
self.call__init__(TangoDevice, name, **kw)
self.call__init__wo_kw(Qt.QObject, qt_parent)
self._config = None
self.__configuration = self.getAttribute("Configuration")
self.__configuration.addListener(self._configurationChanged)
def __getattr__(self, name):
try:
return Qt.QObject.__getattr__(self, name)
except AttributeError:
return TangoDevice.__getattr__(self, name)
def _configurationChanged(self, s, t, v):
if t == TaurusEventType.Config:
return
if TaurusEventType.Error:
self._config = None
else:
self._config = json.loads(v.value)
self.configurationChanged.emit()
def getConfiguration(self, cache=True):
if self._config is None or not cache:
try:
v = self.read_attribute("configuration")
self._config = json.loads(v.value)
except:
self._config = None
return self._config
def setConfiguration(self, config):
self.write_attribute("configuration", json.dumps(config))
def registerExtensions():
"""Registers the pool extensions in the :class:`taurus.core.tango.TangoFactory`"""
import taurus
#import sardana.taurus.core.tango.sardana.pool
# sardana.taurus.core.tango.sardana.pool.registerExtensions()
factory = taurus.Factory()
#factory.registerDeviceClass('Pool', QPool)
factory.registerDeviceClass('MeasurementGroup', QMeasurementGroup)
| 33.166667 | 86 | 0.659548 | 1,484 | 0.46608 | 0 | 0 | 0 | 0 | 0 | 0 | 1,327 | 0.416771 |
cac1340a7afa2d15888e2ebc2d6dd2802d2bd8de | 85 | py | Python | basics_data_structure/queue.py | corenel/algorithm-exercises | f3f31f709e289e590c98247c019d36fc9cc44faf | [
"MIT"
] | null | null | null | basics_data_structure/queue.py | corenel/algorithm-exercises | f3f31f709e289e590c98247c019d36fc9cc44faf | [
"MIT"
] | null | null | null | basics_data_structure/queue.py | corenel/algorithm-exercises | f3f31f709e289e590c98247c019d36fc9cc44faf | [
"MIT"
] | null | null | null | """
Queue
https://algorithm.yuanbin.me/zh-hans/basics_data_structure/queue.html
"""
| 14.166667 | 69 | 0.752941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.988235 |
cac8d19cf2fbd34c80ea8f4c9aeb60a84c12ab4c | 1,542 | py | Python | after/trans.py | Windsooon/Comments | 47a6077e3bf46743a8da3d59ea8ebcd5601c9fe9 | [
"MIT"
] | 1 | 2020-07-08T06:17:54.000Z | 2020-07-08T06:17:54.000Z | after/trans.py | Windsooon/Comments | 47a6077e3bf46743a8da3d59ea8ebcd5601c9fe9 | [
"MIT"
] | null | null | null | after/trans.py | Windsooon/Comments | 47a6077e3bf46743a8da3d59ea8ebcd5601c9fe9 | [
"MIT"
] | null | null | null | import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from comments.base import DATA_DIR, cut_words, sigmoid
class Trans:
def __init__(self):
self.trans_matrix = pd.read_excel(os.path.join(DATA_DIR, 'trans.xls'), index_col=0)
# add one to every value
self.trans_matrix += 1
def calculate_pro(self, sentence):
'''
TODO
'''
res = cut_words(sentence, postag=True)
# Get all the POS
pos = [r[1] for r in res]
possibility = 0
pre = 'begin'
for i in range(len(pos)-1):
try:
possibility += np.log(self.trans_matrix.loc[pre, : ][pos[i]] * 10 / sum(self.trans_matrix.loc[pre, : ]))
except KeyError:
pass
else:
pre = pos[i]
return possibility
def pro(self, csv_file):
data = pd.read_csv(os.path.join(DATA_DIR, csv_file), skipinitialspace=True)
data['pro'] = data['comments'].apply(self.calculate_pro)
data['pro'].to_csv('output_' + csv_file)
def show(self, csv_file):
axes = plt.axes()
axes.set_xlim([-200, 200])
d = pd.read_csv(csv_file, skipinitialspace=True, names=['pro'])
plt.hist(d['pro'], color='blue', edgecolor='black', bins = 300)
plt.title('Log Pro')
plt.xlabel('log_pro')
plt.ylabel('number')
plt.tight_layout()
plt.show()
t = Trans()
# t.pro('fin_useless.csv')
t.show('output_fin_useless.csv')
| 29.09434 | 120 | 0.576524 | 1,318 | 0.854734 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.139429 |
cac957160334299ea86d99379b7a64786fa17120 | 2,071 | py | Python | titan/api_pkg/mutation/props.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | titan/api_pkg/mutation/props.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | titan/api_pkg/mutation/props.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | from moonleap import u0
from moonleap.typespec.field_spec import FieldSpec, FormFieldSpec
from moonleap.typespec.type_spec_store import TypeSpec, type_spec_store
from titan.api_pkg.mutation.default_outputs_type_spec import default_outputs_type_spec
def _field_spec_from_item(self, item):
return FormFieldSpec(
name=item.item_name + "Form",
required=False,
private=False,
field_type="form",
field_type_attrs=dict(target=item.item_type.name),
)
def _field_spec_from_deleted_item_list(self, item_list):
return FieldSpec(
name=item_list.item_name + "Ids",
required=True,
private=False,
field_type="idList",
field_type_attrs=dict(target=item_list.item_type.name),
)
def _default_inputs_type_spec(self, name):
return TypeSpec(
type_name=name,
field_specs=[
*[_field_spec_from_item(self, item) for item in self.items_posted],
*[
_field_spec_from_deleted_item_list(self, item_list)
for item_list in self.item_lists_deleted
],
],
)
def inputs_type_spec(self):
type_spec_name = f"{u0(self.name)}Inputs"
type_spec = type_spec_store().get(type_spec_name, None)
if not type_spec:
type_spec = _default_inputs_type_spec(self, type_spec_name)
type_spec_store().setdefault(type_spec_name, type_spec)
return type_spec_store().get(type_spec_name)
def outputs_type_spec(self):
name = f"{u0(self.name)}Outputs"
if not type_spec_store().has(name):
type_spec_store().setdefault(name, default_outputs_type_spec(self, name))
return type_spec_store().get(name)
def posts_item(self, item_name):
return [x for x in self.items_posted if x.item_name == item_name]
def graphql_api_item_types_posted(self):
result = list()
for mutation in self.mutations:
for item_posted in mutation.items_posted:
if item_posted.item_type not in result:
result.append(item_posted.item_type)
return result
| 30.455882 | 86 | 0.695316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.035732 |
cac979ac83238ac6883853f4946f27c46d7d2e84 | 10,337 | py | Python | api/opentrons/instruments/pipette_config.py | heyoni/opentrons | abb4c742091d800541ed27273c6865fd5ba3aeac | [
"Apache-2.0"
] | null | null | null | api/opentrons/instruments/pipette_config.py | heyoni/opentrons | abb4c742091d800541ed27273c6865fd5ba3aeac | [
"Apache-2.0"
] | null | null | null | api/opentrons/instruments/pipette_config.py | heyoni/opentrons | abb4c742091d800541ed27273c6865fd5ba3aeac | [
"Apache-2.0"
] | null | null | null | import logging
import os
import json
from collections import namedtuple
from opentrons.config import get_config_index
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
log = logging.getLogger(__name__)
def pipette_config_path():
index = get_config_index()
return index.get('pipetteConfigFile', './settings.json')
pipette_config = namedtuple(
'pipette_config',
[
'plunger_positions',
'pick_up_current',
'aspirate_flow_rate',
'dispense_flow_rate',
'ul_per_mm',
'channels',
'name',
'model_offset',
'plunger_current',
'drop_tip_current',
'tip_length' # TODO (andy): remove from pipette, move to tip-rack
]
)
def _create_config_from_dict(cfg: dict, model: str) -> pipette_config:
def _dict_key_to_config_attribute(key: str) -> str:
'''
Converts the JSON key syntax (eg: "plungerPositions"), to the format
used in the namedtuple `plunger_config` (eg: "plunger_positions")
'''
return ''.join([
'_{}'.format(c.lower()) if c.isupper() else c
for c in key
])
def _load_config_value(config_dict: dict, key: str):
'''
Retrieves a given key from the loaded JSON config dict. If that key is
not present in the dictionary, it falls back to the value from
the namedtuple `plunger_config`, named "fallback"
'''
nonlocal model
fallback_cfg = fallback_configs.get(model)
fallback_key = _dict_key_to_config_attribute(key)
fallback_value = getattr(fallback_cfg, fallback_key)
return config_dict.get(key, fallback_value)
res = None
try:
plunger_pos = _load_config_value(cfg, 'plungerPositions')
res = pipette_config(
plunger_positions={
'top': plunger_pos['top'],
'bottom': plunger_pos['bottom'],
'blow_out': plunger_pos.get(
'blowOut', plunger_pos.get('blow_out')),
'drop_tip': plunger_pos.get(
'dropTip', plunger_pos.get('drop_tip')),
},
pick_up_current=_load_config_value(cfg, 'pickUpCurrent'),
aspirate_flow_rate=_load_config_value(
cfg, 'aspirateFlowRate'),
dispense_flow_rate=_load_config_value(
cfg, 'dispenseFlowRate'),
ul_per_mm=_load_config_value(cfg, 'ulPerMm'),
channels=_load_config_value(cfg, 'channels'),
name=model,
model_offset=_load_config_value(cfg, 'modelOffset'),
plunger_current=_load_config_value(cfg, 'plungerCurrent'),
drop_tip_current=_load_config_value(cfg, 'dropTipCurrent'),
tip_length=_load_config_value(cfg, 'tipLength')
)
except (KeyError, json.decoder.JSONDecodeError) as e:
log.error('Error when loading pipette config: {}'.format(e))
return res
def _load_config_dict_from_file(pipette_model: str) -> dict:
config_file = pipette_config_path()
cfg = {}
if os.path.exists(config_file):
with open(config_file) as conf:
all_configs = json.load(conf)
cfg = all_configs[pipette_model]
return cfg
# ------------------------- deprecated data ---------------------------
# This section is left in as a fall-back until the settings file is
# available on all robots. Currently, getting the settings file onto
# the robots requires a Resin push, which involves some pain to users
# because it restarts the robot--even if a protocol run is in progress.
# The preferred solution is to implement a server endpoint that will
# accept a data packet and save it in the robot, the same way that API
# server updates are currently done. Once that is in place, the app can
# ship the required data to the robot and this fallback data can be
# removed from server code. Delete from here to "end deprecated data"
# below, and remove the `select_config` call from the `config` dict
# comprehension.
DISTANCE_BETWEEN_NOZZLES = 9
NUM_MULTI_CHANNEL_NOZZLES = 8
MULTI_LENGTH = (NUM_MULTI_CHANNEL_NOZZLES - 1) * DISTANCE_BETWEEN_NOZZLES
Y_OFFSET_MULTI = MULTI_LENGTH / 2
Z_OFFSET_MULTI = -25.8
Z_OFFSET_P10 = -13 # longest single-channel pipette
Z_OFFSET_P50 = 0
Z_OFFSET_P300 = 0
Z_OFFSET_P1000 = 20 # shortest single-channel pipette
DEFAULT_ASPIRATE_SECONDS = 2
DEFAULT_DISPENSE_SECONDS = 1
# TODO (ben 20180511): should we read these values from
# TODO /shared-data/robot-data/pipette-config.json ? Unclear,
# TODO because this is the backup in case that behavior fails,
# TODO but we could make it more reliable if we start bundling
# TODO config data into the wheel file perhaps. Needs research.
p10_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': -0.5,
'drop_tip': -4
},
pick_up_current=0.1,
aspirate_flow_rate=10 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=10 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=0.77,
channels=1,
name='p10_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P10],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=33
)
p10_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 4,
'blow_out': 1,
'drop_tip': -4.5
},
pick_up_current=0.2,
aspirate_flow_rate=10 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=10 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=0.77,
channels=8,
name='p10_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=33
)
p50_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 2,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=50 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=50 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=3.35,
channels=1,
name='p50_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P50],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=51.7
)
p50_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 2,
'drop_tip': -4
},
pick_up_current=0.3,
aspirate_flow_rate=50 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=50 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=3.35,
channels=8,
name='p50_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=51.7
)
p300_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 2.5,
'blow_out': 1,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=300 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=300 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=18.7,
channels=1,
name='p300_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P300],
plunger_current=0.3,
drop_tip_current=0.5,
tip_length=51.7
)
p300_multi = pipette_config(
plunger_positions={
'top': 19,
'bottom': 3,
'blow_out': 1,
'drop_tip': -3.5
},
pick_up_current=0.3,
aspirate_flow_rate=300 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=300 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=19,
channels=8,
name='p300_multi_v1',
model_offset=[0.0, Y_OFFSET_MULTI, Z_OFFSET_MULTI],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=51.7
)
p1000_single = pipette_config(
plunger_positions={
'top': 19,
'bottom': 3,
'blow_out': 1,
'drop_tip': -5
},
pick_up_current=0.1,
aspirate_flow_rate=1000 / DEFAULT_ASPIRATE_SECONDS,
dispense_flow_rate=1000 / DEFAULT_DISPENSE_SECONDS,
ul_per_mm=65,
channels=1,
name='p1000_single_v1',
model_offset=[0.0, 0.0, Z_OFFSET_P1000],
plunger_current=0.5,
drop_tip_current=0.5,
tip_length=76.7
)
fallback_configs = {
'p10_single_v1': p10_single,
'p10_multi_v1': p10_multi,
'p50_single_v1': p50_single,
'p50_multi_v1': p50_multi,
'p300_single_v1': p300_single,
'p300_multi_v1': p300_multi,
'p1000_single_v1': p1000_single
}
def select_config(model: str):
cfg_dict = _load_config_dict_from_file(model)
cfg = _create_config_from_dict(cfg_dict, model)
if not cfg:
cfg = fallback_configs.get(model)
return cfg
# ----------------------- end deprecated data -------------------------
# Notes:
# - multi-channel pipettes share the same dimensional offsets
# - single-channel pipettes have different lengths
# - Default number of seconds to aspirate/dispense a pipette's full volume,
# and these times were chosen to mimic normal human-pipetting motions.
# However, accurate speeds are dependent on environment (ex: liquid
# viscosity), therefore a pipette's flow-rates (ul/sec) should be set by
# protocol writer
# model-specific ID's, saved with each Pipette's memory
# used to identifiy what model pipette is currently connected to machine
PIPETTE_MODEL_IDENTIFIERS = {
'single': {
'10': 'p10_single_v1',
'50': 'p50_single_v1',
'300': 'p300_single_v1',
'1000': 'p1000_single_v1'
},
'multi': {
'10': 'p10_multi_v1',
'50': 'p50_multi_v1',
'300': 'p300_multi_v1',
}
}
configs = {
model: select_config(model)
for model in [
'p10_single_v1',
'p10_multi_v1',
'p50_single_v1',
'p50_multi_v1',
'p300_single_v1',
'p300_multi_v1',
'p1000_single_v1']}
def load(pipette_model: str) -> pipette_config:
"""
Lazily loads pipette config data from disk. This means that changes to the
configuration data should be picked up on newly instantiated objects
without requiring a restart. If :param pipette_model is not in the top-
level keys of the "pipette-config.json" file, this function will raise a
KeyError
:param pipette_model: a pipette model string corresponding to a top-level
key in the "pipette-config.json" file
:return: a `pipette_config` instance
"""
return select_config(pipette_model)
| 30.764881 | 79 | 0.65164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,967 | 0.383767 |
cacadf76d959296c2be37d92baf26148aaa927bb | 4,417 | py | Python | pysnmp/Unisphere-Products-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Unisphere-Products-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Unisphere-Products-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Unisphere-Products-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Products-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:26:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
iso, NotificationType, Counter64, Gauge32, ModuleIdentity, Counter32, IpAddress, Integer32, Unsigned32, TimeTicks, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "NotificationType", "Counter64", "Gauge32", "ModuleIdentity", "Counter32", "IpAddress", "Integer32", "Unsigned32", "TimeTicks", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
unisphere, = mibBuilder.importSymbols("Unisphere-SMI", "unisphere")
usProducts = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 1))
usProducts.setRevisions(('2001-12-07 15:36', '2001-10-15 18:29', '2001-03-01 15:27', '2000-05-24 00:00', '1999-12-13 19:36', '1999-11-16 00:00', '1999-09-28 00:00',))
if mibBuilder.loadTexts: usProducts.setLastUpdated('200112071536Z')
if mibBuilder.loadTexts: usProducts.setOrganization('Unisphere Networks, Inc.')
productFamilies = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1))
unisphereProductFamilies = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1))
usErx = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 1))
usEdgeRoutingSwitch1400 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 1, 1))
usEdgeRoutingSwitch700 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 1, 2))
usEdgeRoutingSwitch1440 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 1, 3))
usEdgeRoutingSwitch705 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 1, 4))
usMrx = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 2))
usMrxRoutingSwitch16000 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 2, 1))
usMrxRoutingSwitch32000 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 2, 2))
usSmx = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 3))
usServiceMediationSwitch2100 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 3, 1))
usSrx = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 4))
usServiceReadySwitch3000 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 4, 1))
usUmc = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 5))
usUmcSystemManagement = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 1, 5, 1))
oemProductFamilies = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2))
marconiProductFamilies = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2, 1))
usSsx = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2, 1, 1))
usSsx1400 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2, 1, 1, 1))
usSsx700 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2, 1, 1, 2))
usSsx1440 = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 1, 1, 2, 1, 1, 3))
mibBuilder.exportSymbols("Unisphere-Products-MIB", usSsx1400=usSsx1400, usErx=usErx, usServiceMediationSwitch2100=usServiceMediationSwitch2100, oemProductFamilies=oemProductFamilies, usServiceReadySwitch3000=usServiceReadySwitch3000, usUmc=usUmc, usSmx=usSmx, usSsx1440=usSsx1440, unisphereProductFamilies=unisphereProductFamilies, usEdgeRoutingSwitch705=usEdgeRoutingSwitch705, usMrxRoutingSwitch16000=usMrxRoutingSwitch16000, usSsx=usSsx, usProducts=usProducts, usEdgeRoutingSwitch700=usEdgeRoutingSwitch700, usSsx700=usSsx700, usUmcSystemManagement=usUmcSystemManagement, marconiProductFamilies=marconiProductFamilies, productFamilies=productFamilies, usEdgeRoutingSwitch1440=usEdgeRoutingSwitch1440, usMrx=usMrx, usMrxRoutingSwitch32000=usMrxRoutingSwitch32000, usEdgeRoutingSwitch1400=usEdgeRoutingSwitch1400, PYSNMP_MODULE_ID=usProducts, usSrx=usSrx)
| 105.166667 | 856 | 0.737152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,076 | 0.243604 |
cacbc63aebd384ee4e0145c5118d6ffecda40a4b | 5,006 | py | Python | src/main.py | MauroLuzzatto/algorithmic-explanations | 4a362ae9576cc68ecf4b61dd6bad2105ff62bf57 | [
"Apache-2.0"
] | null | null | null | src/main.py | MauroLuzzatto/algorithmic-explanations | 4a362ae9576cc68ecf4b61dd6bad2105ff62bf57 | [
"Apache-2.0"
] | null | null | null | src/main.py | MauroLuzzatto/algorithmic-explanations | 4a362ae9576cc68ecf4b61dd6bad2105ff62bf57 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:16:29 2020
@author: mauro
"""
import logging
import os
from explanation import (
CounterfactualExplanation,
PermutationExplanation,
ShapleyExplanation,
SurrogateModelExplanation,
ControlGroupExplanation
)
from src.model.config import path_base
from src.model.DataConfig import DataConfig
from src.model.utils import (
average_the_ratings,
get_dataset,
load_pickle,
map_index_to_sample,
shuffle_in_unison,
experiment_setup,
create_treatment_dataframe
)
from src.explanation.surrogate_manual import run
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
path_config = os.path.join(path_base, "src", "resources")
path_load = os.path.join(path_base, "dataset", "training")
path_model_base = os.path.join(path_base, "model")
data = DataConfig(path_config)
data_config = data.load_config()
def print_output(sample, output):
score_text, method_text, explanation_text = output
separator = '---' * 20
print(sample)
print(separator)
print(score_text)
print(separator)
print(method_text)
print(separator)
print(explanation_text)
print("\n")
def find_winner(X, y):
y_pred = model.predict(X.values)
y_winner = y.copy()
y_winner['y_pred'] = y_pred
y_winner.reset_index(inplace=True)
index_winner = y_winner['y_pred'].argmax()
df_winner = y_winner.iloc[index_winner]
return df_winner
for field in ['all']:
model_name = [name for name in os.listdir(path_model_base) if field in name][-1]
print(model_name)
path_model = os.path.join(path_model_base, model_name)
path_save = os.path.join(os.path.dirname(os.getcwd()), "reports", field)
config = data_config[field]
config["folder"] = field
model = load_pickle(
path_model=path_model,
model_name="XGBRegressor.pickle",
)
X, y = get_dataset(
path_load=path_load,
name=data_config["dataset"],
target=config["target"],
features=config["features"],
)
new_name = f"{field}.player.rating"
y = average_the_ratings(y, list(y), new_name)
df_winner = find_winner(X, y)
df_winner.to_csv(
os.path.join(path_save, 'winner.csv'),
sep=";",
encoding="utf-8-sig"
)
print(X.loc[df_winner['Entry ID']].tolist())
# remove winner
X.drop(df_winner['Entry ID'], inplace=True)
y.drop(df_winner['Entry ID'], inplace=True)
X, y = shuffle_in_unison(X, y)
samples_dict = experiment_setup(X)
df_treatment = create_treatment_dataframe(samples_dict)
df_treatment.to_csv(
os.path.join(path_save, 'treatment_groups.csv'),
sep=";",
encoding="utf-8-sig",
)
# control group
for samples, sparse, show_rating in samples_dict["control_group"]:
control = ControlGroupExplanation(X, y, model, sparse, show_rating, config)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = control.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Global, Non-contrastive
for samples, sparse, show_rating in samples_dict["permutation"]:
permutation = PermutationExplanation(
X, y, model, sparse, show_rating, config
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = permutation.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Local, Non-contrastive
for samples, sparse, show_rating in samples_dict["shapley"]:
shapely = ShapleyExplanation(X, y, model, sparse, show_rating, config)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = shapely.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Global, Contrastive
for samples, sparse, show_rating in samples_dict["surrogate"]:
surrogate = SurrogateModelExplanation(
X, y, model, sparse, show_rating, config
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = surrogate.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
# Local, Contrastive
for samples, sparse, show_rating in samples_dict["counterfactual"]:
counterfactual = CounterfactualExplanation(
X, y, model, sparse, show_rating, config, y_desired=8.
)
for sample in samples:
sample_index = map_index_to_sample(X, sample)
output = counterfactual.main(sample_index, sample)
print(sparse, show_rating)
print_output(sample, output)
| 28.605714 | 84 | 0.646025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.103676 |
cacbc9b4cfe03469a1b46747898d31856ab1f581 | 3,752 | py | Python | det.py | styler00dollar/CenDetect | eba84c3702c5196b40e994515f399ae0db2e43fb | [
"Apache-2.0"
] | 3 | 2021-07-19T16:49:18.000Z | 2021-07-19T19:24:26.000Z | det.py | styler00dollar/CenDetect | eba84c3702c5196b40e994515f399ae0db2e43fb | [
"Apache-2.0"
] | null | null | null | det.py | styler00dollar/CenDetect | eba84c3702c5196b40e994515f399ae0db2e43fb | [
"Apache-2.0"
] | null | null | null | import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
from mmcv.runner import wrap_fp16_model
import cv2
import numpy as np
from tqdm import tqdm
import os
import glob
import torch
# dont print deprication warnings
import warnings
warnings.filterwarnings("ignore")
def parse_args():
parser = ArgumentParser()
parser.add_argument('--fp16', action='store_true', required=False, help='Enables FP16')
parser.add_argument('--input_path', type=str, default="input", required=False, help='Path to a folder for input')
parser.add_argument('--output_path', type=str, default="output", required=False, help='Path to a folder for output')
# manually selecting config
#parser.add_argument('--config_path', type=str, default="mask_rcnn_r50_fpn_2x_coco.py", required=False, help='Config file to build the model')
#parser.add_argument('--model_path', type=str, default="epoch_3.pth", required=False, help='The path to the pth model itself')
parser.add_argument('--model_directory', type=str, default="models", required=False, help='Folder path to configs and models')
parser.add_argument('--model', type=str, default="mask_rcnn_r50_fpn", required=False, help='Seleting a model. [mask_rcnn_r50_fpn, mask_rcnn_r101_fpn, point_rend_r50_fpn, cascade_mask_rcnn_r50_fpn_dconv]')
parser.add_argument('--device', default=None, help='Device used for inference')
parser.add_argument('--confidence', type=float, default=0.3, required=False, help='Confidence thresh for detections (Values between 0 and 1)')
args = parser.parse_args()
return args
def main(args):
# manually select model and config path
#model = init_detector(args.config_path, args.model_path, device=args.device)
# selection
config_path = os.path.join(args.model_directory, args.model + ".py")
model_path = os.path.join(args.model_directory, args.model + ".pth")
if args.device == None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
device = args.device
print(f"Using device: {device}")
model = init_detector(config_path, model_path, device=device)
# detect all files from input folder
files = glob.glob(args.input_path + '/**/*.png', recursive=True)
files_jpg = glob.glob(args.input_path + '/**/*.jpg', recursive=True)
files_jpeg = glob.glob(args.input_path + '/**/*.jpeg', recursive=True)
files_webp = glob.glob(args.input_path + '/**/*.webp', recursive=True)
files.extend(files_jpg)
files.extend(files_jpeg)
files.extend(files_webp)
if args.fp16 == True:
wrap_fp16_model(model)
model.half()
# test a single image
for i in tqdm(files):
image = cv2.imread(i, cv2.IMREAD_COLOR)
result = inference_detector(model, i)
# creating mask from detection
counter = 0
mask = np.zeros((image.shape[0], image.shape[1])).astype(bool)
# bar
for f in result[0][0]:
if f[4] > args.confidence:
mask = mask | result[1][0][counter]
counter += 1
counter = 0
# mosaic
for f in result[0][1]:
if f[4] > args.confidence:
mask = mask | result[1][1][counter]
counter += 1
# only save the mask
#cv2.imwrite(os.path.join(output_path, os.path.splitext(os.path.basename(i))[0] + ".png"), np.array(mask).astype(np.uint8)*255)
# save with image
image[mask]= [0, 255, 0]
cv2.imwrite(os.path.join(args.output_path, os.path.splitext(os.path.basename(i))[0] + ".png"), np.array(image).astype(np.uint8))
if __name__ == '__main__':
args = parse_args()
main(args)
| 36.427184 | 208 | 0.679104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,283 | 0.341951 |
cacd5c042e61c49091f95d611b580f1100626117 | 3,357 | py | Python | command/sub8_alarm/alarm_handlers/kill.py | bhostetler18/SubjuGator | 2f56231d821bc802988264d0eab3005f8286a597 | [
"MIT"
] | null | null | null | command/sub8_alarm/alarm_handlers/kill.py | bhostetler18/SubjuGator | 2f56231d821bc802988264d0eab3005f8286a597 | [
"MIT"
] | null | null | null | command/sub8_alarm/alarm_handlers/kill.py | bhostetler18/SubjuGator | 2f56231d821bc802988264d0eab3005f8286a597 | [
"MIT"
] | null | null | null | import rospy
from ros_alarms import HandlerBase, Alarm
from actionlib import SimpleActionClient, TerminalState
from mil_msgs.msg import BagOnlineAction, BagOnlineGoal
import os
class Kill(HandlerBase):
alarm_name = 'kill'
initally_raised = True
def __init__(self):
# Alarm server wil set this as the intial state of kill alarm (starts killed)
self.initial_alarm = Alarm(self.alarm_name, True,
node_name='alarm_server',
problem_description='Initial kill')
self._killed = False
self._last_mission_killed = False
self.bag_client = SimpleActionClient('/online_bagger/bag', BagOnlineAction)
self.first = True
def raised(self, alarm):
self._killed = True
self.bagger_dump()
self.first = False
def cleared(self, alarm):
self._killed = False
def _bag_done_cb(self, status, result):
if status == 3:
rospy.loginfo('KILL BAG WRITTEN TO {}'.format(result.filename))
else:
rospy.logwarn('KILL BAG {}, status: {}'.format(TerminalState.to_string(status), result.status))
def bagger_dump(self):
"""Call online_bagger/dump service"""
if self.first:
return
if 'BAG_ALWAYS' not in os.environ or 'bag_kill' not in os.environ:
rospy.logwarn('BAG_ALWAYS or BAG_KILL not set. Not making kill bag.')
return
goal = BagOnlineGoal(bag_name='kill.bag')
goal.topics = os.environ['BAG_ALWAYS'] + ' ' + os.environ['bag_kill']
self.bag_client.send_goal(goal, done_cb=self._bag_done_cb)
def meta_predicate(self, meta_alarm, sub_alarms):
ignore = []
# Stay killed until user clears
if self._killed:
return True
if sub_alarms["pause-kill"].raised and sub_alarms["pause-kill"].severity == 5:
return True
ignore.append("pause-kill")
# Battery too low
if sub_alarms["bus-voltage"].raised and sub_alarms["bus-voltage"].severity == 5:
return True
ignore.append("bus-voltage")
if sub_alarms["odom-kill"].raised and sub_alarms["odom-kill"].severity == 5:
return True
ignore.append("odom-kill")
# If we lose network but don't want to go autonomous
if sub_alarms["network-loss"].raised and not rospy.get_param("/autonomous", False):
return True
ignore.append("network-loss")
# Severity level of 3 means too many thrusters out (3 thrusters out)
if sub_alarms["thruster-out"].raised and sub_alarms["thruster-out"].severity == 3:
return True
ignore.append("thruster-out")
# If a mission wants us to kill, go ahead and kill
if sub_alarms["mission-kill"].raised:
self._last_mission_killed = True
return True
elif self._last_mission_killed:
self._last_mission_killed = False
# If we weren't killed by another source, clear the kill
if not self._killed:
return False
ignore.append("mission-kill")
# Raised if any alarms besides the two above are raised
return any([alarm.raised for name, alarm in sub_alarms.items()
if name not in ignore])
| 36.096774 | 107 | 0.619303 | 3,177 | 0.946381 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.260947 |
cacea4a58d86e5f652adf524e84090333af95605 | 12,094 | py | Python | Projects/Healthcare/breast-cancer/src/data_loading/augmentations.py | DanielMabadeje/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 7adab3877fc1d3f1d5f57e6c1743dae8f76f72c5 | [
"Apache-2.0"
] | 3,266 | 2017-08-06T16:51:46.000Z | 2022-03-30T07:34:24.000Z | Projects/Healthcare/breast-cancer/src/data_loading/augmentations.py | Yasin-Shah/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 243a2a744ced81b69438e08e981249d7629a1f03 | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | Projects/Healthcare/breast-cancer/src/data_loading/augmentations.py | sourcecode369/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 372874b93b0d054abc499887cba39e93a00f10a9 | [
"Apache-2.0"
] | 1,449 | 2017-08-06T17:40:59.000Z | 2022-03-31T12:03:24.000Z | # Copyright (C) 2019 Nan Wu, Jason Phang, Jungkyu Park, Yiqiu Shen, Zhe Huang, Masha Zorin,
# Stanisław Jastrzębski, Thibault Févry, Joe Katsnelson, Eric Kim, Stacey Wolfson, Ujas Parikh,
# Sushma Gaddam, Leng Leng Young Lin, Kara Ho, Joshua D. Weinstein, Beatriu Reig, Yiming Gao,
# Hildegard Toth, Kristine Pysarenko, Alana Lewin, Jiyon Lee, Krystal Airola, Eralda Mema,
# Stephanie Chung, Esther Hwang, Naziya Samreen, S. Gene Kim, Laura Heacock, Linda Moy,
# Kyunghyun Cho, Krzysztof J. Geras
#
# This file is part of breast_cancer_classifier.
#
# breast_cancer_classifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# breast_cancer_classifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import cv2
import numpy as np
from src.constants import VIEWS
def shift_window_inside_image(start, end, image_axis_size, input_axis_size):
"""
If the window goes outside the bound of the image, then shifts it to fit inside the image.
"""
if start < 0:
start = 0
end = start + input_axis_size
elif end > image_axis_size:
end = image_axis_size
start = end - input_axis_size
return start, end
def zero_pad_and_align_window(image_axis_size, input_axis_size, max_crop_and_size_noise, bidirectional):
"""
Adds Zero padding to the image if cropped image is smaller than required window size.
"""
pad_width = input_axis_size - image_axis_size + max_crop_and_size_noise * (2 if bidirectional else 1)
assert (pad_width >= 0)
if bidirectional:
pad_front = int(pad_width / 2)
start = max_crop_and_size_noise
else:
start, pad_front = 0, 0
pad_back = pad_width - pad_front
end = start + input_axis_size
return start, end, pad_front, pad_back
def simple_resize(image_to_resize, size):
"""
Resizes image to the required size
"""
image_resized = cv2.resize(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
if len(image_to_resize.shape) == 3 and len(image_resized.shape) == 2 and image_to_resize.shape[2] == 1:
image_resized = np.expand_dims(image_resized, 2)
return image_resized
def crop_image(image, input_size, borders):
"""
Crops image to the required size using window location
"""
cropped_image = image[borders[0]: borders[1], borders[2]: borders[3]]
if ((borders[1] - borders[0]) != input_size[0]) or ((borders[3] - borders[2]) != input_size[1]):
cropped_image = simple_resize(cropped_image, input_size)
return cropped_image
def window_location_at_center_point(input_size, center_y, center_x):
"""
Calculates window location (top, bottom, left, right)
given center point and size of augmentation window
"""
half_height = input_size[0] // 2
half_width = input_size[1] // 2
top = center_y - half_height
bottom = center_y + input_size[0] - half_height
left = center_x - half_width
right = center_x + input_size[1] - half_width
return top, bottom, left, right
def sample_crop_best_center(image, input_size, random_number_generator, max_crop_noise, max_crop_size_noise,
best_center, view):
"""
Crops using the best center point and ideal window size.
Pads small images to have enough room for crop noise and size noise.
Applies crop noise in location of the window borders.
"""
max_crop_noise = np.array(max_crop_noise)
crop_noise_multiplier = np.zeros(2, dtype=np.float32)
if max_crop_noise.any():
# there is no point in sampling crop_noise_multiplier if it's going to be multiplied by (0, 0)
crop_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=2)
center_y, center_x = best_center
# get the window around the center point. The window might be outside of the image.
top, bottom, left, right = window_location_at_center_point(input_size, center_y, center_x)
pad_y_top, pad_y_bottom, pad_x_right = 0, 0, 0
if VIEWS.is_cc(view):
if image.shape[0] < input_size[0] + (max_crop_noise[0] + max_crop_size_noise) * 2:
# Image is smaller than window size + noise margin in y direction.
# CC view: pad at both top and bottom
top, bottom, pad_y_top, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise,
True)
elif VIEWS.is_mlo(view):
if image.shape[0] < input_size[0] + max_crop_noise[0] + max_crop_size_noise:
# Image is smaller than window size + noise margin in y direction.
# MLO view: only pad at the bottom
top, bottom, _, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise, False)
else:
raise KeyError("Unknown view", view)
if image.shape[1] < input_size[1] + max_crop_noise[1] + max_crop_size_noise:
# Image is smaller than window size + noise margin in x direction.
left, right, _, pad_x_right = zero_pad_and_align_window(image.shape[1], input_size[1],
max_crop_noise[1] + max_crop_size_noise, False)
# Pad image if necessary by allocating new memory and copying contents over
if pad_y_top > 0 or pad_y_bottom > 0 or pad_x_right > 0:
new_zero_array = np.zeros((
image.shape[0] + pad_y_top + pad_y_bottom,
image.shape[1] + pad_x_right, image.shape[2]), dtype=image.dtype)
new_zero_array[pad_y_top: image.shape[0] + pad_y_top, 0: image.shape[1]] = image
image = new_zero_array
# if window is drawn outside of image, shift it to be inside the image.
top, bottom = shift_window_inside_image(top, bottom, image.shape[0], input_size[0])
left, right = shift_window_inside_image(left, right, image.shape[1], input_size[1])
if top == 0:
# there is nowhere to shift upwards, we only apply noise downwards
crop_noise_multiplier[0] = np.abs(crop_noise_multiplier[0])
elif bottom == image.shape[0]:
# there is nowhere to shift down, we only apply noise upwards
crop_noise_multiplier[0] = -np.abs(crop_noise_multiplier[0])
# else: we do nothing to the noise multiplier
if left == 0:
# there is nowhere to shift left, we only apply noise to move right
crop_noise_multiplier[1] = np.abs(crop_noise_multiplier[1])
elif right == image.shape[1]:
# there is nowhere to shift right, we only apply noise to move left
crop_noise_multiplier[1] = -np.abs(crop_noise_multiplier[1])
# else: we do nothing to the noise multiplier
borders = np.array((top, bottom, left, right), dtype=np.int32)
# Calculate maximum amount of how much the window can move for cropping noise
top_margin = top
bottom_margin = image.shape[0] - bottom
left_margin = left
right_margin = image.shape[1] - right
if crop_noise_multiplier[0] >= 0:
vertical_margin = bottom_margin
else:
vertical_margin = top_margin
if crop_noise_multiplier[1] >= 0:
horizontal_margin = right_margin
else:
horizontal_margin = left_margin
if vertical_margin < max_crop_noise[0]:
max_crop_noise[0] = vertical_margin
if horizontal_margin < max_crop_noise[1]:
max_crop_noise[1] = horizontal_margin
crop_noise = np.round(max_crop_noise * crop_noise_multiplier)
crop_noise = np.array((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]), dtype=np.int32)
borders = borders + crop_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Centre of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# return the padded image and cropping window information
return image, borders
def sample_crop(image, input_size, borders, random_number_generator, max_crop_size_noise):
"""
Applies size noise of the window borders.
"""
size_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=4)
top_margin = borders[0]
bottom_margin = image.shape[0] - borders[1]
left_margin = borders[2]
right_margin = image.shape[1] - borders[3]
max_crop_size_noise = min(max_crop_size_noise, top_margin, bottom_margin, left_margin, right_margin)
if input_size[0] >= input_size[1]:
max_crop_size_vertical_noise = max_crop_size_noise
max_crop_size_horizontal_noise = np.round(max_crop_size_noise * (input_size[1] / input_size[0]))
elif input_size[0] < input_size[1]:
max_crop_size_vertical_noise = np.round(max_crop_size_noise * (input_size[0] / input_size[1]))
max_crop_size_horizontal_noise = max_crop_size_noise
else:
raise RuntimeError()
max_crop_size_noise = np.array((max_crop_size_vertical_noise, max_crop_size_vertical_noise,
max_crop_size_horizontal_noise, max_crop_size_horizontal_noise),
dtype=np.int32)
size_noise = np.round(max_crop_size_noise * size_noise_multiplier)
size_noise = np.array(size_noise, dtype=np.int32)
borders = borders + size_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Center of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# Sanity check. make sure that the top is above the bottom
assert borders[1] > borders[0], "Bottom above the top. Top: " + str(borders[0]) + ', bottom: ' + str(borders[1])
# Sanity check. make sure that the left is left to the right
assert borders[3] > borders[2], "Left on the right. Left: " + str(borders[2]) + ', right: ' + str(borders[3])
return borders
def random_augmentation_best_center(image, input_size, random_number_generator, max_crop_noise=(0, 0),
max_crop_size_noise=0, auxiliary_image=None,
best_center=None, view=""):
"""
Crops augmentation window from a given image
by applying noise in location and size of the window.
"""
joint_image = np.expand_dims(image, 2)
if auxiliary_image is not None:
joint_image = np.concatenate([joint_image, auxiliary_image], axis=2)
joint_image, borders = sample_crop_best_center(joint_image, input_size, random_number_generator, max_crop_noise,
max_crop_size_noise, best_center, view)
borders = sample_crop(joint_image, input_size, borders, random_number_generator, max_crop_size_noise)
sampled_joint_image = crop_image(joint_image, input_size, borders)
if auxiliary_image is None:
return sampled_joint_image[:, :, 0], None
else:
return sampled_joint_image[:, :, 0], sampled_joint_image[:, :, 1:]
| 44.792593 | 117 | 0.670415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,811 | 0.315037 |
caceb51d93c36ec68a44bf085fefaa6d893e959c | 2,609 | py | Python | ooobuild/dyn/awt/field_unit.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/awt/field_unit.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/awt/field_unit.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.awt
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.awt import FieldUnit as FieldUnit
if hasattr(FieldUnit, '_constants') and isinstance(FieldUnit._constants, dict):
FieldUnit._constants['__ooo_ns__'] = 'com.sun.star.awt'
FieldUnit._constants['__ooo_full_ns__'] = 'com.sun.star.awt.FieldUnit'
FieldUnit._constants['__ooo_type_name__'] = 'const'
def build_enum():
global FieldUnitEnum
ls = [f for f in dir(FieldUnit) if not callable(getattr(FieldUnit, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(FieldUnit, name)
FieldUnitEnum = IntEnum('FieldUnitEnum', _dict)
build_enum()
else:
from ...lo.awt.field_unit import FieldUnit as FieldUnit
class FieldUnitEnum(IntEnum):
"""
Enum of Const Class FieldUnit
specifies attributes for the MetricField map units.
IMPORTANT: These constants have to be disjunct with constants in util/MeasureUnit.
"""
FUNIT_NONE = FieldUnit.FUNIT_NONE
FUNIT_MM = FieldUnit.FUNIT_MM
FUNIT_CM = FieldUnit.FUNIT_CM
FUNIT_M = FieldUnit.FUNIT_M
FUNIT_KM = FieldUnit.FUNIT_KM
FUNIT_TWIP = FieldUnit.FUNIT_TWIP
FUNIT_POINT = FieldUnit.FUNIT_POINT
FUNIT_PICA = FieldUnit.FUNIT_PICA
FUNIT_INCH = FieldUnit.FUNIT_INCH
FUNIT_FOOT = FieldUnit.FUNIT_FOOT
FUNIT_MILE = FieldUnit.FUNIT_MILE
FUNIT_CUSTOM = FieldUnit.FUNIT_CUSTOM
FUNIT_PERCENT = FieldUnit.FUNIT_PERCENT
FUNIT_100TH_MM = FieldUnit.FUNIT_100TH_MM
__all__ = ['FieldUnit', 'FieldUnitEnum']
| 37.811594 | 106 | 0.708317 | 842 | 0.322729 | 0 | 0 | 0 | 0 | 0 | 0 | 1,086 | 0.416251 |
cad072e6da4ed8b99fa019319f2a53f7231fa953 | 7,202 | py | Python | utils.py | zrongcheng/srcnn | 33942c0a54fbdc3c59c4ab796c98a64eef10db77 | [
"Apache-2.0"
] | null | null | null | utils.py | zrongcheng/srcnn | 33942c0a54fbdc3c59c4ab796c98a64eef10db77 | [
"Apache-2.0"
] | null | null | null | utils.py | zrongcheng/srcnn | 33942c0a54fbdc3c59c4ab796c98a64eef10db77 | [
"Apache-2.0"
] | null | null | null | """
Scipy version > 0.18 is needed, due to 'mode' option from scipy.misc.imread function
"""
import os
import glob
import h5py
import random
import matplotlib.pyplot as plt
from PIL import Image # for loading images as YCbCr format
import Scipy.misc
import Scipy.ndimage
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS#全局变量
def read_data(path):
"""
Read h5 format data file
Args:
path: file path of desired file所需文件
data: '.h5' file format that contains train data values
label: '.h5' file format that contains train label values
"""
with h5py.File(path, 'r') as hf:
data = np.array(hf.get('data'))
label = np.array(hf.get('label'))
return data, label
def preprocess(path, scale=3):
"""
Preprocess single image file
(1) Read original image as YCbCr format (and grayscale as default)
(2) Normalize
(3) Apply image file with bicubic interpolation
唯一的预处理操作
Args:
path: file path of desired file
input_: image applied bicubic interpolation (low-resolution)
label_: image with original resolution (high-resolution)
"""
image = imread(path, is_grayscale=True)#??为什么灰度处理
label_ = modcrop(image, scale)
# Must be normalized
#image = image / 255.
label_ = label_ / 255.
#两次为了降低精度
input_ = Scipy.ndimage.interpolation.zoom(label_, zoom=(1. / scale), prefilter=False)#一次
input_ = Scipy.ndimage.interpolation.zoom(input_, zoom=(scale / 1.), prefilter=False)#二次,bicubic
#imsave(input_,r'F:\tf_py\srcnn\sample\test1.png')
#imsave(label_, r'F:\tf_py\srcnn\sample\test2.png')
return input_, label_
def prepare_data(dataset):
"""
Args:
dataset: choose train dataset or test dataset
For train dataset, output data would be ['.../t1.bmp', '.../t2.bmp', ..., '.../t99.bmp']
"""
if FLAGS.is_train:
#filenames = os.listdir(dataset)
data_dir = os.path.join(os.getcwd(), dataset)
# glob.glob()获取当前目录或相对路径所有文件的路径,输出一个list,读取字符中的*(通配符)
data = glob.glob(os.path.join(data_dir, "*.bmp"))
else:
data_dir = os.path.join(os.sep, (os.path.join(os.getcwd(), dataset)), "Set5")
data = glob.glob(os.path.join(data_dir, "*.bmp"))
return data
def make_data(data, label):
"""
Make input data as h5 file format
Depending on 'is_train' (flag value), savepath would be changed.
"""
if FLAGS.is_train:
savepath = os.path.join(os.getcwd(), 'checkpoint/train.h5')
else:
savepath = os.path.join(os.getcwd(), 'checkpoint/test.h5')
with h5py.File(savepath, 'w') as hf:
hf.create_dataset('data', data=data)
hf.create_dataset('label', data=label)
def imread(path, is_grayscale=True):
"""
Read image using its path.
Default value is gray-scale, and image is read by YCbCr format as the paper said.
"""
if is_grayscale:
return Scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)#将图像转灰度
else:
return Scipy.misc.imread(path, mode='YCbCr').astype(np.float)#默认为false
def modcrop(image, scale=3):
"""
To scale down and up the original image, first thing to do is to have no remainder while scaling operation.
We need to find modulo of height (and width) and scale factor.
Then, subtract the modulo from height (and width) of original image size.
There would be no remainder even after scaling operation.
要缩放原始图像, 首先要做的是在缩放操作时没有余数。
我们需要找到高度 (和宽度) 和比例因子的模。
然后, 减去原始图像大小的高度 (和宽度) 的模。
即使在缩放操作之后, 也不会有余数。
"""
if len(image.shape) == 3:#彩色 800*600*3
h, w, _ = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w, :]
else:#灰度 800*600
h, w = image.shape
h = h - np.mod(h, scale)
w = w - np.mod(w, scale)
image = image[0:h, 0:w]
return image
def input_setup(config):
"""
Read image files and make their sub-images and saved them as a h5 file format.
"""
# Load data path
if config.is_train:
data = prepare_data(dataset="Train")
else:
data = prepare_data(dataset="Test")
sub_input_sequence = []
sub_label_sequence = []
padding = abs(config.image_size - config.label_size) // 2 # 6 填充
if config.is_train:
for i in range(len(data)):
input_, label_ = preprocess(data[i], config.scale)#data[i]为数据目录
if len(input_.shape) == 3:
h, w, _ = input_.shape
else:
h, w = input_.shape
#。。。
for x in range(0, h-config.image_size+1, config.stride):
for y in range(0, w-config.image_size+1, config.stride):
sub_input = input_[x:x+config.image_size, y:y+config.image_size] # [33 x 33]
sub_label = label_[x+padding:x+padding+config.label_size,
y+padding:y+padding+config.label_size] # [21 x 21]
# Make channel value,颜色通道1
sub_input = sub_input.reshape([config.image_size, config.image_size, 1])
sub_label = sub_label.reshape([config.label_size, config.label_size, 1])
sub_input_sequence.append(sub_input)
sub_label_sequence.append(sub_label)
# Make list to numpy array. With this transform
arrdata = np.asarray(sub_input_sequence) # [?, 33, 33, 1]
arrlabel = np.asarray(sub_label_sequence) # [?, 21, 21, 1]
make_data(arrdata, arrlabel) # 把处理好的数据进行存储,路径为checkpoint/..
else:
input_, label_ = preprocess(data[4], config.scale)
if len(input_.shape) == 3:
h, w, _ = input_.shape
else:
h, w = input_.shape
input = input_.reshape([h,w,1])
label=label_[6:h-6,6:w-6]
label=label.reshape([h-12,w-12,1])
sub_input_sequence.append(input)
sub_label_sequence.append(label)
input1 = np.asarray(sub_input_sequence)
label1 = np.asarray(sub_label_sequence)
#label=label_.reshape([height,weight,1])
return input1,label1,h,w
# # Numbers of sub-images in height and width of image are needed to compute merge operation.
# nx = ny = 0
# for x in range(0, h-config.image_size+1, config.stride):
# nx += 1; ny = 0
# for y in range(0, w-config.image_size+1, config.stride):
# ny += 1
# sub_input = input_[x:x+config.image_size, y:y+config.image_size] # [33 x 33]
# sub_label = label_[x+padding:x+padding+config.label_size,
# y+padding:y+padding+config.label_size] # [21 x 21]
#
# sub_input = sub_input.reshape([config.image_size, config.image_size, 1])
# sub_label = sub_label.reshape([config.label_size, config.label_size, 1])
#
# sub_input_sequence.append(sub_input)
# sub_label_sequence.append(sub_label)
"""
len(sub_input_sequence) : the number of sub_input (33 x 33 x ch) in one image
(sub_input_sequence[0]).shape : (33, 33, 1)
"""
# if not config.is_train:
# return nx, ny
def imsave(image, path):
return Scipy.misc.imsave(path, image)
# def merge(images, size):
# h, w = images.shape[1], images.shape[2]#21*21
# p,q,j=0,0,0
# img = np.zeros((14*(size[0]-1)+21, 14*(size[1]-1)+21, 1))
# for idx, image in enumerate(images):#image.shape=(21,21,1)
# i = idx % size[1]#余数
# t=j
# j = idx // size[1]#商
# if (j-t)==1:
# p=p+14
# q=0
# #img[0:21,0:21,:]=image
# img[p:p+h, q:q+w, :] = image
#
# q=q+14
#
# return img
| 31.177489 | 109 | 0.650097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,963 | 0.522134 |
cad1115bd177c78411f2be817f4464e97774f362 | 826 | py | Python | db/mongodb/save_all.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 13 | 2018-12-17T23:30:54.000Z | 2021-12-29T14:31:43.000Z | db/mongodb/save_all.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 36 | 2018-06-07T21:34:13.000Z | 2022-03-13T21:01:43.000Z | db/mongodb/save_all.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 2 | 2021-01-03T11:47:20.000Z | 2021-12-29T14:31:49.000Z | import os
import json
from pymongo import MongoClient
with open('keys.json', 'r') as file:
keys = json.loads(file.read())
db_all = MongoClient(
username=keys['login'],
password=keys['password'],
authSource='admin',
authMechanism='SCRAM-SHA-1'
)
for db_name in db_all.list_database_names():
if db_name in ('admin', 'config', 'local'):
continue
print('---', db_name, '---')
os.mkdir('db/{}'.format(db_name))
db = db_all[db_name]
collections = [collection['name'] for collection in db.list_collections()]
for collection_name in collections:
print('{}'.format(collection_name), end=' ')
with open('db/{}/{}.txt'.format(db_name, collection_name), 'w') as file:
for i in db[collection_name].find():
del i['_id']
print(json.dumps(i, ensure_ascii=False), file=file) # , indent='\t'
print('✅')
| 22.324324 | 75 | 0.670702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.175121 |
cad1619bf5ab33d3d587d63fa05c0c21b4bc2a86 | 797 | py | Python | setup.py | saksham/nepse-crawler | adee65ebc86e269c2941d48f91018b8e3263e14e | [
"MIT"
] | 1 | 2019-10-25T18:41:24.000Z | 2019-10-25T18:41:24.000Z | setup.py | saksham/nepse-crawler | adee65ebc86e269c2941d48f91018b8e3263e14e | [
"MIT"
] | null | null | null | setup.py | saksham/nepse-crawler | adee65ebc86e269c2941d48f91018b8e3263e14e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def read_file(filename):
with open(path.join(here, filename)) as f:
return f.read()
setup(name='nepse-crawler',
version='0.0.1',
description='Crawls websites for Nepal Stock Exchange and loads them to SQLite3 database',
long_description=read_file('README.md'),
url='https://github.com/saksham/nepse-crawler',
install_requires=read_file('requirements.txt'),
author='Saksham',
author_email='saksham@no-reply.github.com',
license='MIT',
entry_points={
'console_scripts': [
'crawl-nepse=crawler.__main__:main'
]
})
| 27.482759 | 96 | 0.644918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.387704 |
cad2fe1cdd6d86eceead5fb008c5cfdfa6b6f198 | 2,678 | py | Python | backend/backend.py | alexnbferreira/CryptoTracker | 7071fab27200c59288eade6616e6c4f5e2eaa5ad | [
"MIT"
] | null | null | null | backend/backend.py | alexnbferreira/CryptoTracker | 7071fab27200c59288eade6616e6c4f5e2eaa5ad | [
"MIT"
] | null | null | null | backend/backend.py | alexnbferreira/CryptoTracker | 7071fab27200c59288eade6616e6c4f5e2eaa5ad | [
"MIT"
] | null | null | null | from flask import Flask, Response, render_template
import requests
import json
import logging
from pandas import read_json
from decimal import Decimal
app = Flask(__name__)
exchanges = ["BNB", "BTX", "BFX"]
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6ImRldkBnbG92b2FwcC5jb20iLCJpZCI6IjVhNTcyZGEyNTM4OWMzNzZiZWZlNjY1NCIsImlhdCI6MTUxNTY2MjgyMn0.a6homMOumqLBxwfX9nOwbBaxmSx-srkS8dISSPCPPYE"
logging.basicConfig()
logger = logging.getLogger(__name__)
base_currencies = None
@app.route("/products", methods=["GET"])
def get_products():
#This end points fetches the list of pairs for each exchange,
#updates the global list "common_pairs" with currency pairs common in all 3 exchanges
#and sends it as a json
headers = {"Authorization": "Bearer {}".format(token)}
api_endpoint = "https://api.moneeda.com/api/exchanges/{}/products"
common_pairs = None
for exchange in exchanges:
# Fetch products for each exchange and store the pairs
try:
resp = requests.get(api_endpoint.format(exchange), headers=headers).text
pairs_df = read_json(resp)
pairs = pairs_df["id"].tolist()
# Create the set of pairs that appear on all exchanges
if common_pairs == None:
common_pairs = set(pairs)
else:
common_pairs = common_pairs & set(pairs)
except requests.exceptions.ConnectionError as e:
logger.error("No response from server")
logger.error(e)
continue
return Response(json.dumps(list(common_pairs)), mimetype="application/json")
@app.route("/products/<pair>/prices", methods=["GET"])
def get_product_prices(pair):
#This endpoint fetches the price for a given <pair> and returns it's price for each of the listed exchanges in a json object
headers = {"Authorization": "Bearer {}".format(token)}
api_endpoint = "https://api.moneeda.com/api/exchanges/{exchange}/ticker?product={pair}"
prices = {}
for exchange in exchanges:
endp = api_endpoint.format(exchange=exchange, pair=pair)
try:
ticker = requests.get(endp, headers=headers).text
ticker_json = json.loads(ticker, parse_float=Decimal)
prices[exchange] = str(ticker_json["price"])
except requests.exceptions.ConnectionError as e:
logger.error("Error fetching prices")
logger.error(e)
continue
return Response(json.dumps(prices), mimetype="application/json")
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return render_template("index.html")
app.run(debug=True) | 38.257143 | 194 | 0.685213 | 0 | 0 | 0 | 0 | 2,164 | 0.808066 | 0 | 0 | 955 | 0.356609 |
cad3c7ef16ebb65b6d272b2d40089713a3ce54ff | 11,375 | py | Python | data_pipeline/_kafka_producer.py | poros/data_pipeline | e143a4031b0940e17b22cdf36db0b677b46e3975 | [
"Apache-2.0"
] | 110 | 2016-11-17T18:32:25.000Z | 2022-01-03T17:27:58.000Z | data_pipeline/_kafka_producer.py | poros/data_pipeline | e143a4031b0940e17b22cdf36db0b677b46e3975 | [
"Apache-2.0"
] | 12 | 2016-11-18T00:00:37.000Z | 2018-01-14T00:31:37.000Z | data_pipeline/_kafka_producer.py | poros/data_pipeline | e143a4031b0940e17b22cdf36db0b677b46e3975 | [
"Apache-2.0"
] | 25 | 2016-11-18T15:00:16.000Z | 2020-10-01T13:42:47.000Z | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from collections import defaultdict
from collections import namedtuple
from contextlib import contextmanager
from cached_property import cached_property
from kafka import create_message
from kafka import KafkaClient
from kafka.common import ProduceRequest
from data_pipeline._position_data_tracker import PositionDataTracker
from data_pipeline._producer_retry import RetryHandler
from data_pipeline._retry_util import ExpBackoffPolicy
from data_pipeline._retry_util import MaxRetryError
from data_pipeline._retry_util import Predicate
from data_pipeline._retry_util import retry_on_condition
from data_pipeline._retry_util import RetryPolicy
from data_pipeline.config import get_config
from data_pipeline.envelope import Envelope
_EnvelopeAndMessage = namedtuple("_EnvelopeAndMessage", ["envelope", "message"])
logger = get_config().logger
# prepare needs to be in the module top level so it can be serialized for
# multiprocessing
def _prepare(envelope_and_message):
try:
kwargs = {}
if envelope_and_message.message.keys:
kwargs['key'] = envelope_and_message.message.encoded_keys
return create_message(
envelope_and_message.envelope.pack(envelope_and_message.message),
**kwargs
)
except:
logger.exception('Prepare failed')
raise
class KafkaProducer(object):
"""The KafkaProducer deals with buffering messages that need to be published
into Kafka, preparing them for publication, and ultimately publishing them.
Args:
producer_position_callback (function): The producer position callback
is called when the KafkaProducer is instantiated, and every time
messages are published to notify the producer of current position
information of successfully published messages.
dry_run (Optional[bool]): When dry_run mode is on, the producer won't
talk to real KafKa topic, nor to real Schematizer. Default to False.
"""
@cached_property
def envelope(self):
return Envelope()
def __init__(self, producer_position_callback, dry_run=False):
self.producer_position_callback = producer_position_callback
self.dry_run = dry_run
self.kafka_client = KafkaClient(get_config().cluster_config.broker_list)
self.position_data_tracker = PositionDataTracker()
self._reset_message_buffer()
self.skip_messages_with_pii = get_config().skip_messages_with_pii
self._publish_retry_policy = RetryPolicy(
ExpBackoffPolicy(with_jitter=True),
max_retry_count=get_config().producer_max_publish_retry_count
)
self._automatic_flush_enabled = True
@contextmanager
def disable_automatic_flushing(self):
"""Prevents the producer from flushing automatically (e.g. for timeouts
or batch size) while the context manager is open.
"""
try:
self._automatic_flush_enabled = False
yield
finally:
self._automatic_flush_enabled = True
def wake(self):
"""Should be called periodically if we're not otherwise waking up by
publishing, to ensure that messages are actually published.
"""
# if we haven't woken up in a while, we may need to flush messages
self._flush_if_necessary()
def publish(self, message):
if message.contains_pii and self.skip_messages_with_pii:
logger.info(
"Skipping a PII message - "
"uuid hex: {0}, "
"schema_id: {1}, "
"timestamp: {2}, "
"type: {3}".format(
message.uuid_hex,
message.schema_id,
message.timestamp,
message.message_type.name
)
)
return
self._add_message_to_buffer(message)
self.position_data_tracker.record_message_buffered(message)
self._flush_if_necessary()
def flush_buffered_messages(self):
produce_method = (self._publish_produce_requests_dry_run
if self.dry_run else self._publish_produce_requests)
produce_method(self._generate_produce_requests())
self._reset_message_buffer()
def close(self):
self.flush_buffered_messages()
self.kafka_client.close()
def _publish_produce_requests(self, requests):
"""It will try to publish all the produce requests for topics, and
retry a number of times until either all the requests are successfully
published or it can no longer retry, in which case, the exception will
be thrown.
Each time the requests that are successfully published in the previous
round will be removed from the requests and won't be published again.
"""
unpublished_requests = list(requests)
retry_handler = RetryHandler(self.kafka_client, unpublished_requests)
def has_requests_to_be_sent():
return bool(retry_handler.requests_to_be_sent)
retry_handler = retry_on_condition(
retry_policy=self._publish_retry_policy,
retry_conditions=[Predicate(has_requests_to_be_sent)],
func_to_retry=self._publish_requests,
use_previous_result_as_param=True,
retry_handler=retry_handler
)
if retry_handler.has_unpublished_request:
raise MaxRetryError(last_result=retry_handler)
def _publish_requests(self, retry_handler):
"""Main function to publish message requests. This function is wrapped
with retry function and will be retried based on specified retry policy
Args:
retry_handler: :class:`data_pipeline._producer_retry.RetryHandler`
that determines which messages should be retried next time.
"""
if not retry_handler.requests_to_be_sent:
return retry_handler
responses = self._try_send_produce_requests(
retry_handler.requests_to_be_sent
)
retry_handler.update_requests_to_be_sent(
responses,
self.position_data_tracker.topic_to_kafka_offset_map
)
self._record_success_requests(retry_handler.success_topic_stats_map)
return retry_handler
def _try_send_produce_requests(self, requests):
# Either it throws exceptions and none of them succeeds, or it returns
# responses of all the requests (success or fail response).
try:
return self.kafka_client.send_produce_request(
payloads=requests,
acks=get_config().kafka_client_ack_count,
fail_on_error=False
)
except Exception:
# Exceptions like KafkaUnavailableError, LeaderNotAvailableError,
# UnknownTopicOrPartitionError, etc., are not controlled by
# `fail_on_error` flag and could be thrown from the kafka client,
# and fail all the requests. We will retry all the requests until
# either all of them are successfully published or it exceeds the
# maximum retry criteria.
return []
def _record_success_requests(self, success_topic_stats_map):
for topic_partition, stats in success_topic_stats_map.iteritems():
topic = topic_partition.topic_name
assert stats.message_count == len(self.message_buffer[topic])
self.position_data_tracker.record_messages_published(
topic=topic,
offset=stats.original_offset,
message_count=stats.message_count
)
self.message_buffer.pop(topic)
def _publish_produce_requests_dry_run(self, requests):
for request in requests:
self._publish_single_request_dry_run(request)
def _publish_single_request_dry_run(self, request):
topic = request.topic
message_count = len(request.messages)
self.position_data_tracker.record_messages_published(
topic,
-1,
message_count
)
def _is_ready_to_flush(self):
time_limit = get_config().kafka_producer_flush_time_limit_seconds
return (self._automatic_flush_enabled and (
(time.time() - self.start_time) >= time_limit or
self.message_buffer_size >= get_config().kafka_producer_buffer_size
))
def _flush_if_necessary(self):
if self._is_ready_to_flush():
self.flush_buffered_messages()
def _add_message_to_buffer(self, message):
topic = message.topic
message = self._prepare_message(message)
self.message_buffer[topic].append(message)
self.message_buffer_size += 1
def _generate_produce_requests(self):
return [
ProduceRequest(topic=topic, partition=0, messages=messages)
for topic, messages in self._generate_prepared_topic_and_messages()
]
def _generate_prepared_topic_and_messages(self):
return self.message_buffer.iteritems()
def _prepare_message(self, message):
return _prepare(_EnvelopeAndMessage(envelope=self.envelope, message=message))
def _reset_message_buffer(self):
if not hasattr(self, 'message_buffer_size') or self.message_buffer_size > 0:
self.producer_position_callback(self.position_data_tracker.get_position_data())
self.start_time = time.time()
self.message_buffer = defaultdict(list)
self.message_buffer_size = 0
class LoggingKafkaProducer(KafkaProducer):
def _publish_produce_requests(self, requests):
logger.info(
"Flushing buffered messages - requests={0}, messages={1}".format(
len(requests), self.message_buffer_size
)
)
try:
super(LoggingKafkaProducer, self)._publish_produce_requests(requests)
logger.info("All messages published successfully")
except MaxRetryError as e:
logger.exception(
"Failed to publish all produce requests. {0}".format(repr(e))
)
raise
def _reset_message_buffer(self):
logger.info("Resetting message buffer for success requests.")
super(LoggingKafkaProducer, self)._reset_message_buffer()
def _publish_single_request_dry_run(self, request):
super(LoggingKafkaProducer, self)._publish_single_request_dry_run(request)
logger.debug("dry_run mode: Would have published {0} messages to {1}".format(
len(request.messages),
request.topic
))
| 39.359862 | 91 | 0.683868 | 9,351 | 0.822066 | 334 | 0.029363 | 420 | 0.036923 | 0 | 0 | 3,292 | 0.289407 |
cad56f6e99929dcb2621454332e576da53bc0b1f | 25,097 | py | Python | morphsed/images.py | RuancunLi/MorphSED | e386db49305d120841c61e8bea194257b4abd89b | [
"MIT"
] | null | null | null | morphsed/images.py | RuancunLi/MorphSED | e386db49305d120841c61e8bea194257b4abd89b | [
"MIT"
] | null | null | null | morphsed/images.py | RuancunLi/MorphSED | e386db49305d120841c61e8bea194257b4abd89b | [
"MIT"
] | 1 | 2021-01-04T10:54:03.000Z | 2021-01-04T10:54:03.000Z | import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import CCDData
from astropy.nddata import Cutout2D
from astropy.stats import sigma_clipped_stats
from astropy.wcs.utils import proj_plane_pixel_scales
from .plot import plot_image
from .instrument_info import get_zp
from .utils import get_wcs_rotation
from astropy.visualization import simple_norm, make_lupton_rgb
from .math import Maskellipse,polynomialfit,cross_match
from photutils.segmentation import deblend_sources
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from photutils import detect_threshold
from photutils import detect_sources
from photutils import source_properties
from astropy.table import Table, Column, join, join_skycoord
from astropy.wcs import WCS
from astropy.nddata import NDData
from photutils.psf import extract_stars
import matplotlib.colors as colors
from photutils import EPSFBuilder
__all__ = ['image', 'image_atlas']
class image(object):
'''
A single image object.
Functions
---------
* Read from fits file use CCDData.
* get_size : Get the image size.
* plot : Plot the image.
* sigma_clipped_stats : Calculate the basic statistics of the image.
* set_data : Load from numpy array.
* set_mask : Set image mask.
* set_pixel_scales : Set the pixel scales along two axes.
* set_zero_point : Set magnitude zero point.
'''
def __init__(self, filename=None, hdu=0, unit=None, zero_point=None,
pixel_scales=None, wcs_rotation=None, mask=None, verbose=True):
'''
Parameters
----------
filename (optional) : string
FITS file name of the image.
hdu : int (default: 0)
The number of extension to load from the FITS file.
unit (optional) : string
Unit of the image flux for CCDData.
zero_point (optional) : float
Magnitude zero point.
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
wcs_rotation (optional) : float
WCS rotation, east of north, units: radian.
mask (optional) : 2D bool array
The image mask.
verbose : bool (default: True)
Print out auxiliary data.
'''
if filename is None:
self.data = None
else:
self.data = CCDData.read(filename, hdu=hdu, unit=unit, mask=mask)
if self.data.wcs and (pixel_scales is None):
pixel_scales = proj_plane_pixel_scales(self.data.wcs) * u.degree.to('arcsec')
self.zero_point = zero_point
if pixel_scales is None:
self.pixel_scales = None
else:
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
if self.data.wcs and (wcs_rotation is None):
self.wcs_rotation = get_wcs_rotation(self.data.wcs)
elif wcs_rotation is not None:
self.wcs_rotation = wcs_rotation * u.radian
else:
self.wcs_rotation = None
self.sources_catalog = None
self.sigma_image = None
self.sources_skycord = None
self.ss_data = None
self.PSF = None
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_data_info(self):
'''
Data information to generate model image.
Returns
-------
d : dict
shape : (ny, nx)
Image array shape.
pixel_scale : (pixelscale_x, pixelscale_y), default units: arcsec
Pixel scales.
wcs_rotation : angle, default units: radian
WCS rotation, east of north.
'''
d = dict(shape=self.data.shape,
pixel_scale=self.pixel_scale,
wcs_rotation=self.wcs_rotation)
return d
def sigma_clipped_stats(self, **kwargs):
'''
Run astropy.stats.sigma_clipped_stats to get the basic statistics of
the image.
Parameters
----------
All of the parameters go to astropy.stats.sigma_clipped_stats().
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped data.
'''
return sigma_clipped_stats(self.data.data, mask=self.data.mask, **kwargs)
def plot(self, stretch='asinh', units='arcsec', vmin=None, vmax=None,
a=None, ax=None, plain=False, **kwargs):
'''
Plot an image.
Parameters
----------
stretch : string (default: 'asinh')
Choice of stretch: asinh, linear, sqrt, log.
units : string (default: 'arcsec')
Units of pixel scale.
vmin (optional) : float
Minimal value of imshow.
vmax (optional) : float
Maximal value of imshow.
a (optional) : float
Scale factor of some stretch function.
ax (optional) : matplotlib.Axis
Axis to plot the image.
plain : bool (default: False)
If False, tune the image.
**kwargs : Additional parameters goes into plt.imshow()
Returns
-------
ax : matplotlib.Axis
Axis to plot the image.
'''
assert self.data is not None, 'Set data first!'
ax = plot_image(self.data, self.pixel_scales, stretch=stretch,
units=units, vmin=vmin, vmax=vmax, a=a, ax=ax,
plain=plain, **kwargs)
if plain is False:
ax.set_xlabel(r'$\Delta X$ ({0})'.format(units), fontsize=24)
ax.set_ylabel(r'$\Delta Y$ ({0})'.format(units), fontsize=24)
return ax
def plot_direction(self, ax, xy=(0, 0), len_E=None, len_N=None, color='k', fontsize=20,
linewidth=2, frac_len=0.1, units='arcsec', backextend=0.05):
'''
Plot the direction arrow. Only applied to plots using WCS.
Parameters
----------
ax : Axis
Axis to plot the direction.
xy : (x, y)
Coordinate of the origin of the arrows.
length : float
Length of the arrows, units: pixel.
units: string (default: arcsec)
Units of xy.
'''
xlim = ax.get_xlim()
len_total = np.abs(xlim[1] - xlim[0])
pixelscale = self.pixel_scales[0].to('degree').value
if len_E is None:
len_E = len_total * frac_len / pixelscale
if len_N is None:
len_N = len_total * frac_len / pixelscale
wcs = self.data.wcs
header = wcs.to_header()
d_ra = len_E * pixelscale
d_dec = len_N * pixelscale
ra = [header['CRVAL1'], header['CRVAL1']+d_ra, header['CRVAL1']]
dec = [header['CRVAL2'], header['CRVAL2'], header['CRVAL2']+d_dec]
ra_pix, dec_pix = wcs.all_world2pix(ra, dec, 1)
d_arrow1 = [ra_pix[1]-ra_pix[0], dec_pix[1]-dec_pix[0]]
d_arrow2 = [ra_pix[2]-ra_pix[0], dec_pix[2]-dec_pix[0]]
l_arrow1 = np.sqrt(d_arrow1[0]**2 + d_arrow1[1]**2)
l_arrow2 = np.sqrt(d_arrow2[0]**2 + d_arrow2[1]**2)
d_arrow1 = np.array(d_arrow1) / l_arrow1 * len_E * pixelscale
d_arrow2 = np.array(d_arrow2) / l_arrow2 * len_N * pixelscale
def sign_2_align(sign):
'''
Determine the alignment of the text.
'''
if sign[0] < 0:
ha = 'right'
else:
ha = 'left'
if sign[1] < 0:
va = 'top'
else:
va = 'bottom'
return ha, va
ha1, va1 = sign_2_align(np.sign(d_arrow1))
ha2, va2 = sign_2_align(np.sign(d_arrow2))
xy_e = (xy[0] - d_arrow1[0] * backextend, xy[1] - d_arrow1[1] * backextend)
ax.annotate('E', xy=xy_e, xycoords='data', fontsize=fontsize,
xytext=(d_arrow1[0]+xy[0], d_arrow1[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha1, va=va1)
xy_n = (xy[0] - d_arrow2[0] * backextend, xy[1] - d_arrow2[1] * backextend)
ax.annotate('N', xy=xy_n, xycoords='data', fontsize=fontsize,
xytext=(d_arrow2[0]+xy[0], d_arrow2[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha2, va=va2)
def set_data(self, data, unit):
'''
Parameters
----------
data : 2D array
Image data.
unit : string
Unit for CCDData.
'''
self.data = CCDData(data, unit=unit)
def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
'''
Parameters
----------
psfFWHM : float
FWHM of the imaging point spread function
nsigma : float
source detection threshold
'''
data = np.array(self.data.copy())
psfFWHMpix = psfFWHM / self.pixel_scales[0].value
thresholder = detect_threshold(data, nsigma=nsigma)
sigma = psfFWHMpix * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
kernel.normalize()
segm = detect_sources(data, thresholder, npixels=5, filter_kernel=kernel)
props = source_properties(data, segm)
tab = Table(props.to_table())
self.sources_catalog = tab
srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'], tab['ycentroid'],1)
sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
sctab = Table([sc,np.arange(len(sc))],names=['sc','sloop_{0}'.format(sc_key)])
self.sources_skycord = sctab
def make_mask(self,sources=None,magnification=3.):
'''
make mask for the extension.
Parameters
----------
sources : a to-be masked source table (can generate from photutils source detection)
if None, will use its own source catalog
magnification : expand factor to generate mask
'''
mask=np.zeros_like(self.data, dtype=bool)
mask[np.isnan(self.data)] = True
mask[np.isinf(self.data)] = True
if sources is None:
sources = self.sources_catalog
for loop in range(len(sources)):
position = (sources['xcentroid'][loop],sources['ycentroid'][loop])
a = sources['semimajor_axis_sigma'][loop]
b = sources['semiminor_axis_sigma'][loop]
theta = sources['orientation'][loop]*180./np.pi
mask=Maskellipse(mask,position,magnification*a,(1-b/a),theta)
self.data.mask = mask
if self.ss_data is not None:
self.ss_data.mask = mask
def set_mask(self, mask):
'''
Set mask for the extension.
Parameters
----------
mask : 2D array
The mask.
'''
assert self.data.shape == mask.shape, 'Mask shape incorrect!'
self.data.mask = mask
if self.ss_data is not Nont:
self.ss_data.mask = mask
def set_pixel_scales(self, pixel_scales):
'''
Parameters
----------
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
'''
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
def set_zero_point(self, zp):
'''
Set magnitude zero point.
'''
self.zero_point = zp
def sky_subtraction(self, order=3 , filepath = None):
'''
Do polynomial-fitting sky subtraction
Parameters
----------
order (optional) : int
order of the polynomial
'''
data = np.array(self.data.copy())
maskplus = self.data.mask.copy()
backR=polynomialfit(data,maskplus.astype(bool),order=order)
background=backR['bkg']
self.ss_data = CCDData(data-background, unit=self.data.unit)
self.ss_data.mask = maskplus
if filepath is not None:
hdu_temp = fits.PrimaryHDU(data-background)
hdu_temp.writeto(filepath, overwrite=True)
def read_ss_image(self,filepath):
'''
read sky subtracted image from "filepath"
'''
hdu = fits.open(filepath)
self.ss_data = CCDData(hdu[0].data, unit=self.data.unit)
self.ss_data.mask = self.data.mask.copy()
def cal_sigma_image(self,filepath=None):
'''
Construct sigma map following the same procedure as Galfit (quadruture sum of sigma at each pixel from source and sky background).
Note
----------
'GAIN' keyword must be available in the image header and ADU x GAIN = electron
Parameters
----------
filepath:
Whether and where to save sigma map
'''
GAIN = self.data.header['CELL.GAIN']
if self.ss_data is None:
raise ValueError(" Please do sky subtration first !!!")
data = np.array(self.ss_data.copy())
mask = self.ss_data.mask.copy()
bkgrms = np.nanstd(data[~mask.astype(bool)])
data[~mask.astype(bool)] = 0.
sigmap = np.sqrt(data/GAIN+bkgrms**2)
self.sigma_image = sigmap
if filepath is not None:
hdu_temp = fits.PrimaryHDU(sigmap)
hdu_temp.writeto(filepath, overwrite=True)
def read_sigmap(self, filepath):
'''
read sigma image from "filepath"
'''
hdu = fits.open(filepath)
self.sigma_image = hdu[0].data
def read_PSF(self, filepath):
'''
read PSF image from "filepath"
'''
hdu = fits.open(filepath)
self.PSF = hdu[0].data
class image_atlas(object):
'''
Many images.
'''
def __init__(self, image_list=None, zp_list=None, band_list=None, psfFWHM_list=None):
'''
Parameters
----------
image_list (optional) : List
List of `image`.
zp_list (optional) : List
List of magnitude zeropoint.
band_list (optional) : List
List of band name. Check `instrument_info` for band names.
'''
if image_list is None:
self.image_list = []
else:
self.image_list = image_list
if band_list is None:
self.band_list = []
else:
self.band_list = band_list
if (zp_list is None) and (band_list is not None):
zp_list = []
for b in band_list:
zp_list.append(get_zp(b))
for loop, img in enumerate(self.image_list):
img.set_zero_point(zp_list[loop])
if psfFWHM_list is None:
self.psfFWHM_list = []
else:
self.psfFWHM_list = psfFWHM_list
self.__length = len(image_list)
self.common_catalog = None
def __getitem__(self, key):
'''
Get the image data using the filter name or number index.
'''
if type(key) is str:
idx = self.band_list.index(key)
elif type(key) is int:
idx = key
return self.image_list[idx]
def __len__(self):
'''
Get the length of the data list.
'''
return self.__length
def source_detection(self,nsigma=3.0):
'''
Do multi-band source detection
Parameters
----------
nsigma : float, or a array with same size as image_atlas
source detection threshold
'''
if type(nsigma) == float:
nsigma = nsigma * np.ones(self.__length,dtype=float)
for loop in range(self.__length):
self.image_list[loop].source_detection_individual(self.psfFWHM_list[loop],nsigma=nsigma[loop],sc_key=loop+1)
def make_common_catalog(self,CM_separation=2.5,magnification=3.0,applylist=None):
'''
Do multi-band source detection
Parameters
----------
CM_separation : float
angular separation used to do sky coordinates crossmatching, unit in deg
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
cats = []
for loop in applylist:
cats.append(self.image_list[loop].sources_skycord)
comc = cross_match(cats,angular_sep = 2.5)
lencc = len(comc)
master_a = np.zeros(lencc, dtype = float)
master_b = np.zeros(lencc, dtype = float)
for loop in range(len(comc)):
a = []
b = []
for loop2 in applylist:
a.append(self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
b.append(self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
master_a[loop] = np.max(np.array(a))
master_b[loop] = np.max(np.array(b))
comc.add_column(Column(master_a, name = 'master_a'))
comc.add_column(Column(master_b, name = 'master_b'))
self.common_catalog = comc
def sky_subtraction(self,order=3,filepaths=None):
'''
Do multi-band sky subtration
Parameters
----------
order (optional) : int
order of the polynomial
filepaths : filepath to store the sky subtracted images
'''
if type(order) == int:
order = order * np.ones(self.__length,dtype=int)
for loop in range(self.__length):
if filepaths is None:
self.image_list[loop].sky_subtraction(order[loop])
else:
self.image_list[loop].sky_subtraction(order[loop],filepath=filepaths[loop])
def master_mask(self, magnification=3.0, applylist=None):
'''
Do multi-band source masking
Parameters
----------
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
comc = self.common_catalog.copy()
commonsourcelist = []
for loop2 in applylist:
newsc = self.image_list[loop2].sources_catalog.copy()
for loop in range(len(comc)):
self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_a'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_b'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), comc['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
commonsourcelist.append(newsc)
for loop2 in range(self.__length):
self.image_list[loop2].make_mask(sources=commonsourcelist[loop2],magnification=magnification[loop2])
def generate_PSFs(self, equivalent_radius=2., size = 20.,oversampling=1, plot=None, filepaths=None):
'''
Generate effective point spread fuctions (ePSFs) for each image
Parameters
----------
equivalent_radius : float, unit arcsec
radius criteria to indentify star
size : float, unit pixel
use what size box to extract stars
oversampling : int
oversample the ePSF
plot : None for not plot stars & ePSF
list like [1,2,3] to plot rgb image
filepaths : filepath to store the ePSFs
'''
stars = self.common_catalog.copy()
remolist = []
for loop in range(len(stars)):
for loop2 in range(self.__length):
a = (self.image_list[loop2].sources_catalog['equivalent_radius'][stars['sloop_{0}'.format(loop2+1)][loop]])*self.image_list[loop2].pixel_scales[0].value
if (a > equivalent_radius):
remolist.append(loop)
break
stars.remove_rows(remolist)
star_images = []
PSFs = []
for loop2 in range(self.__length):
newsc = self.image_list[loop2].sources_catalog.copy()
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), stars['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
stars_tbl = Table()
stars_tbl['x']=np.array(newsc['maxval_xpos'])
stars_tbl['y']=np.array(newsc['maxval_ypos'])
nddata = NDData(data=np.array(self.image_list[loop2].ss_data))
Tstar = extract_stars(nddata, stars_tbl, size=size)
epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=15,progress_bar=False)
epsf, fitted_stars = epsf_builder(Tstar)
self.image_list[loop2].PSF = epsf.data
if filepaths is not None:
hdu = fits.PrimaryHDU(epsf.data.astype('float32'))
After = fits.HDUList([hdu])
After.writeto(filepaths[loop2],overwrite= True)
if plot is not None:
star_images.append(Tstar)
PSFs.append(epsf.data)
if plot is not None:
tlens = len(stars)
if (((tlens//5)+1)*5-tlens) < (((tlens//4)+1)*4-tlens):
ncols = 5
nrows = (tlens//5)+1
else:
ncols = 4
nrows = (tlens//4)+1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(3*ncols, 3*nrows),squeeze=True)
ax = ax.ravel()
for i in range(tlens):
if len(plot) > 2:
star_b = star_images[plot[0]][i].data*100./np.sum(star_images[plot[0]][i].data)
star_g = star_images[plot[1]][i].data*100./np.sum(star_images[plot[1]][i].data)
star_r = star_images[plot[2]][i].data*100./np.sum(star_images[plot[2]][i].data)
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = star_images[plot[0]][i].data
norm = simple_norm(image, 'log', percent=99.)
ax[i].imshow(image,norm=norm ,origin='lower')
plt.show()
fig=plt.figure(figsize=(10,10))
if len(plot) > 2:
star_b = PSFs[plot[0]]*100./np.sum(PSFs[plot[0]])
star_g = PSFs[plot[1]]*100./np.sum(PSFs[plot[1]])
star_r = PSFs[plot[2]]*100./np.sum(PSFs[plot[2]])
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = PSFs[plot[0]]
norm = simple_norm(image, 'log', percent=99.)
plt.imshow(image,norm=norm ,origin='lower')
plt.show()
| 37.853695 | 212 | 0.5721 | 24,018 | 0.957007 | 0 | 0 | 0 | 0 | 0 | 0 | 8,260 | 0.329123 |
cad6d4d9039f032ee0647fb24456c499c4ecede6 | 1,108 | py | Python | tests/test_kfold.py | pal-16/preprocessy | 6577143d3397a47bd69c125c8de46ebc5ad2ddae | [
"MIT"
] | null | null | null | tests/test_kfold.py | pal-16/preprocessy | 6577143d3397a47bd69c125c8de46ebc5ad2ddae | [
"MIT"
] | null | null | null | tests/test_kfold.py | pal-16/preprocessy | 6577143d3397a47bd69c125c8de46ebc5ad2ddae | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from preprocessy.resampling import KFold
class TestKFold:
def test_kfold(self):
with pytest.raises(ValueError):
KFold(n_splits=2.3)
with pytest.raises(ValueError):
KFold(n_splits=0)
with pytest.raises(ValueError):
KFold(shuffle=1)
with pytest.raises(ValueError):
KFold(shuffle=True, random_state=4.5)
with pytest.raises(ValueError):
KFold(shuffle=False, random_state=69)
def test_split(self):
with pytest.raises(ValueError):
arr = np.arange(10)
kFold = KFold(n_splits=20)
for train_indices, test_indices in kFold.split(arr):
print(
f"Train indices: {train_indices}\nTest indices:"
f" {test_indices}"
)
arr = np.arange(12)
kFold = KFold(n_splits=3, shuffle=True, random_state=69)
for train_indices, test_indices in kFold.split(arr):
assert len(train_indices) == 8
assert len(test_indices) == 4
| 27.7 | 68 | 0.583935 | 1,030 | 0.929603 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.059567 |
cad6f407f7e2ec628f292a0cf8cb6230d6d12b29 | 337 | py | Python | srforge_tests/unit_tests/utilities_tests/__init__.py | alunduil/srforge | ef498b0c2e327ae14613bd369f45940005ad8b1f | [
"MIT"
] | null | null | null | srforge_tests/unit_tests/utilities_tests/__init__.py | alunduil/srforge | ef498b0c2e327ae14613bd369f45940005ad8b1f | [
"MIT"
] | null | null | null | srforge_tests/unit_tests/utilities_tests/__init__.py | alunduil/srforge | ef498b0c2e327ae14613bd369f45940005ad8b1f | [
"MIT"
] | null | null | null | # Copyright (C) 2016 srforge project developers.
#
# See the COPYRIGHT file at the top-level directory of this distribution and at
# https://github.com/alunduil/srforge/blob/master/COPYRIGHT
#
# srforge is freely distributable under the terms of an MIT-style license.
# See LICENSE or http://www.opensource.org/licenses/mit-license.php.
| 42.125 | 79 | 0.780415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.979228 |
cad795ae9dd6135d51053b4f8de0331e4efd6df4 | 422 | py | Python | prices/admin.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T13:06:11.000Z | 2021-06-23T13:06:11.000Z | prices/admin.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T21:21:52.000Z | 2021-06-23T21:21:52.000Z | prices/admin.py | KazuruK/FilmGetter | fd84bcaddf17d4b89ad6e5d27095535346c5f4a9 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T19:14:19.000Z | 2021-06-28T19:14:19.000Z | from django.contrib import admin
# Register your models here.
from prices.models import IdDB
class IdDBAdmin(admin.ModelAdmin):
list_display = (
'kinopoisk_id',
'title',
'title_en',
'year',
'date_created',
'price',)
search_fields = ('kinopoisk_id', 'title',)
list_filter = ('year',)
empty_value_display = '-пусто-'
admin.site.register(IdDB, IdDBAdmin)
| 19.181818 | 46 | 0.620853 | 290 | 0.679157 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.297424 |
cad84f636cd4b82b3be3bfeb9a329a3e645a1011 | 2,166 | py | Python | tapetracker.py | kemfic/pumavision2019 | b14a566ce63629a90f31fd70d5f62d9efd61bdff | [
"MIT"
] | 2 | 2019-03-05T21:59:06.000Z | 2019-06-18T03:58:00.000Z | tapetracker.py | kemfic/pumavision2019 | b14a566ce63629a90f31fd70d5f62d9efd61bdff | [
"MIT"
] | null | null | null | tapetracker.py | kemfic/pumavision2019 | b14a566ce63629a90f31fd70d5f62d9efd61bdff | [
"MIT"
] | null | null | null | import cv2
import numpy as np
class TapeTracker(object):
min_thresh = np.array( [80,0,0] )
max_thresh = np.array( [90, 255, 255] )
def __init_(self):
self.img = np.zeros((500,500))
def pipeline(self, img):
self.img = cv2.resize(img, (300,300), cv2.INTER_NEAREST)
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2HLS)
self.mask = cv2.inRange(self.img, self.min_thresh, self.max_thresh)
kernel = np.ones((5,5), np.uint8)
#self.mask = cv2.dilate(self.mask,kernel, iterations=2)
self.cnt, self.hier = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
self.ret = np.copy(self.img)
self.cnt_f = []
self.cnt = sorted(self.cnt, key=cv2.contourArea, reverse=True)[:2] # get largest contour
for cnt in self.cnt:
x,y,w,h = cv2.boundingRect(cnt)
if w < 0.6*h and cv2.contourArea(cnt) > 10:
cv2.rectangle(self.ret, (x,y), (x+w, y+h), (0,255,0), 2)
self.cnt_f.append(cnt)
M_1 = cv2.moments(self.cnt_f[0])
cx_1 = int(M_1['m10']/M_1['m00'])
cy_1 = int(M_1['m01']/M_1['m00'])
M_2 = cv2.moments(self.cnt_f[1])
cx_2 = int(M_2['m10']/M_2['m00'])
cy_2 = int(M_2['m01']/M_2['m00'])
midpoint = ((cx_1+cx_2)//2, (cy_1+cy_2)//2)
self.error = midpoint[0] - self.img.shape[0]
print(self.error)
#cy = int(M['m01']/M['m00'])
#print(cx - self.img.shape[0]//2)
#print(cx)
self.ret = cv2.drawContours(self.ret, self.cnt_f, -1, (150, 150, 255), 2)
self.ret = cv2.circle(self.ret, (cx_1, cy_1), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, (cx_2, cy_2), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, midpoint, 2, (150, 255, 255))
if __name__ == "__main__":
ct = TapeTracker()
img = cv2.imread('img/1.jpg')
ct.pipeline(img)
cv2.imshow('output', cv2.resize(cv2.cvtColor(ct.img, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
cv2.imshow('mask', cv2.resize(ct.mask, (500,500), cv2.INTER_NEAREST))
cv2.imshow('contour', cv2.resize(cv2.cvtColor(ct.ret, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
| 34.935484 | 107 | 0.617729 | 1,675 | 0.773315 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.10711 |
cad9516cda873ad5c7a80d3614252cdbc50f0241 | 3,241 | py | Python | Engine/Tools.py | NoNotCar/SpaceX | 803390fdf0ed3deee18d8f0d101ae575cf9b55e1 | [
"MIT"
] | null | null | null | Engine/Tools.py | NoNotCar/SpaceX | 803390fdf0ed3deee18d8f0d101ae575cf9b55e1 | [
"MIT"
] | null | null | null | Engine/Tools.py | NoNotCar/SpaceX | 803390fdf0ed3deee18d8f0d101ae575cf9b55e1 | [
"MIT"
] | null | null | null | from .Items import Item
from Lib import Img
import pygame
from Game import Registry,Research
from Objects import War
error=Img.sndget("error")
bsnd=Img.sndget("break")
class Pickaxe(Item):
img=Img.imgx("Tools/Pickaxe")
last_used=0
MAX_CONTINUOUS=200
last_mined=None
stack_size = 1
singular = True
continuous = True
def use(self,area,tpos,tr,p):
for l in reversed(area.layers):
o=l[tpos]
if o and o.hardness:
if o is self.last_mined and pygame.time.get_ticks()-self.last_used<self.MAX_CONTINUOUS:
self.prog+=1
if self.prog==o.hardness:
item=o.mined()
o.on_mine(area,tpos)
if item is None or p.inv.add(item,1):
bsnd.play()
elif area.clear("Items",tpos):
area.spawn_item(item,tpos)
bsnd.play()
else:
o.on_mine(area, tpos,True)
error.play()
self.prog=0
self.prog_max=0
self.last_mined=None
else:
self.prog=0
self.prog_max=o.hardness
self.last_mined=o
break
self.last_used=pygame.time.get_ticks()
class Bridger(Item):
img=Img.imgx("Tools/BridgeBuilder")
def use(self,area,tpos,tr,p):
t=area.get("Tiles",tpos)
if t and not t.support:
area.set_tile("Bridge",tpos)
return True
class ChainSaw(Item):
img=Img.imgx("Tools/ChainSaw")
stack_size = 1
name="Chainsaw"
def use(self,area,tpos,tr,p):
tree=area.get("Objects",tpos)
if tree and tree.name=="Tree" and p.inv.add(tree.mined()):
area.dobj(tree,tpos)
class FireFlower(Item):
img=Img.imgstripx("Plants/FireFlower")[-1]
def use(self,area,tpos,tr,p):
if area.clear("Objects",tpos):
area.spawn_new(War.Fireball,tpos,p.d)
return True
return False
class Wrench(Item):
img=Img.imgx("Tools/Wrench")
def use(self,area,tpos,tr,p):
o=area.get("Objects",tpos)
if o:
o.wrench(p,p.coords.pos,tpos,area)
from Game import Boxes
class EntangledPlacer(Item):
imgs=Img.imgstripx("Tools/EntangledPlacer")
img=imgs[0]
singular = True
stack_size = 1
phase=0
e1=None
name="EntangledPlacer"
def use(self,area,tpos,tr,p):
for l in Boxes.EntangledBox1.layers:
if not area.clear(l,tpos):
return False
if self.phase:
area.spawn_new(Boxes.EntangledBox2,tpos,self.e1.area,self.e1.dummy)
return True
else:
self.e1=Boxes.EntangledBox1(None)
area.spawn(self.e1,tpos)
self.phase=1
self.img=self.imgs[1]
Registry.add_recipe({"Iron":3,"Brick":5},Bridger())
Registry.add_recipe({"Iron":3},Wrench())
Research.add_recipesearch({"Circuit":5,"Steel":3},ChainSaw(),[1],20)
Research.add_recipesearch({"StdBox":1,"ChaosCrystal":3},EntangledPlacer(),[1],50)
| 33.412371 | 103 | 0.54983 | 2,798 | 0.863314 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.080531 |
cadb7857fc43538c2164847a75f460688bce44f4 | 879 | py | Python | tests/max/test_rmc.py | realead/rmc | 6dafa2a4b5ab7199e86e86a4c10388bc8e472bb6 | [
"MIT"
] | null | null | null | tests/max/test_rmc.py | realead/rmc | 6dafa2a4b5ab7199e86e86a4c10388bc8e472bb6 | [
"MIT"
] | null | null | null | tests/max/test_rmc.py | realead/rmc | 6dafa2a4b5ab7199e86e86a4c10388bc8e472bb6 | [
"MIT"
] | null | null | null | import os
import exetest as ex
import exetest.decorator as dec
import RMCTester
@dec.to_unit_tests
class Tester(RMCTester.RMCTester):
#setting up the test case
my_path = os.path.dirname(__file__)
program_name = "max"
exe = os.path.join(my_path, RMCTester.RMCTester.EXE_NAME)
default_parameters = {ex.EXIT_CODE: 0,
ex.STDERR: "",
ex.INPUT: ""}
casedata_both_nulls = {ex.OPTIONS: ["2", "0", "0"],
ex.STDOUT: "0\n"}
casedata_both_ones = {ex.OPTIONS: ["2", "1", "1"],
ex.STDOUT: "1\n"}
casedata_max_first= {ex.OPTIONS: ["2", "5", "1"],
ex.STDOUT: "5\n"}
casedata_max_second = {ex.OPTIONS: ["2", "5", "6"],
ex.STDOUT: "6\n"}
| 25.114286 | 61 | 0.486917 | 773 | 0.879408 | 0 | 0 | 792 | 0.901024 | 0 | 0 | 90 | 0.102389 |
cadb919713f442cb696ef20531e176f4131df704 | 1,925 | py | Python | ceilometer_infoblox/pollsters/__init__.py | mwinslow7/ceilometer-infoblox | c183fec8d12d6957156485684759e064fc43bbcd | [
"Apache-2.0"
] | 4 | 2015-09-22T22:26:00.000Z | 2018-12-27T07:16:24.000Z | ceilometer_infoblox/pollsters/__init__.py | mwinslow7/ceilometer-infoblox | c183fec8d12d6957156485684759e064fc43bbcd | [
"Apache-2.0"
] | 1 | 2017-05-03T00:34:03.000Z | 2017-05-03T00:37:28.000Z | ceilometer_infoblox/pollsters/__init__.py | mwinslow7/ceilometer-infoblox | c183fec8d12d6957156485684759e064fc43bbcd | [
"Apache-2.0"
] | 9 | 2015-10-22T15:55:29.000Z | 2019-09-02T07:47:11.000Z | # Copyright 2015 Infoblox, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_utils import timeutils
import six
from ceilometer.hardware.pollsters import generic
from ceilometer import sample
@six.add_metaclass(abc.ABCMeta)
class BaseNIOSPollster(generic.GenericHardwareDeclarativePollster):
def __init__(self):
super(BaseNIOSPollster, self).__init__()
meter = generic.MeterDefinition(self.meter_dict)
self._update_meter_definition(meter)
@property
def default_discovery(self):
return 'nios_instances'
def generate_samples(self, host_url, data):
"""Generate a list of Sample from the data returned by inspector
:param host_url: host url of the endpoint
:param data: list of data returned by the corresponding inspector
"""
samples = []
definition = self.meter_definition
for (value, metadata, extra) in data:
s = sample.Sample(
name=definition.name,
type=definition.type,
unit=definition.unit,
volume=value,
user_id=extra['user_id'],
project_id=extra['tenant_id'],
resource_id=extra['resource_id'],
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=extra,
)
samples.append(s)
return samples
| 32.627119 | 75 | 0.668052 | 1,173 | 0.609351 | 0 | 0 | 1,205 | 0.625974 | 0 | 0 | 816 | 0.423896 |
cadf0bfc3589bccc805fdee98c4cd79597ee146b | 2,309 | py | Python | waitercaller.py | salaikumar/waitercaller | 83ad2f8a477fc196c665db1ce1159a042d69fe8e | [
"Apache-2.0"
] | null | null | null | waitercaller.py | salaikumar/waitercaller | 83ad2f8a477fc196c665db1ce1159a042d69fe8e | [
"Apache-2.0"
] | null | null | null | waitercaller.py | salaikumar/waitercaller | 83ad2f8a477fc196c665db1ce1159a042d69fe8e | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask import request
from flask import render_template
# Login Extension
from flask_login import LoginManager
from flask_login import login_required
from flask_login import login_user
from flask_login import logout_user
from mockdbhelper import MockDBHelper as DBHelper
from user import User
from flask import redirect
from flask import url_for
# Password Helper imports
from passwordhelper import PasswordHelper
# DB Helper instance
DB = DBHelper()
# Password Helper instance
PH = PasswordHelper()
# Creating a Flask App instance
app = Flask(__name__)
# Set a secret key for you application
app.secret_key = 'tPXJY3X37Qybz4QykV+hOyUxVQeEXf1Ao2C8upz+fGQXKsM'
login_manager = LoginManager(app)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/account")
@login_required
def account():
return "You're logged in"
@login_manager.user_loader
def load_user(user_id):
user_password = DB.get_user(user_id)
if user_password:
return User(user_id)
@app.route("/login", methods=["POST"])
def login():
email = request.form.get("email")
password = request.form.get("password")
stored_user = DB.get_user(email)
if stored_user and PH.validate_password(password, stored_user['salt'], stored_user['hashed']):
user = User(email)
login_user(user, remember=True)
return redirect(url_for('account'))
# user_password = DB.get_user(email)
# if user_password and user_password == password:
# user = User(email)
# login_user(user)
# return redirect(url_for('account'))
# return account()
return home()
# Register function
@app.route("/register" , methods=["POST"])
def register():
email = request.form.get("email")
password = request.form.get("password")
confirmpass = request.form.get("password2")
if not password == confirmpass:
return redirect(url_for('home'))
if DB.get_user(email):
return redirect(url_for('home'))
salt = PH.get_salt()
hashed = PH.get_hash(password + salt)
DB.add_user(email, salt, hashed)
return redirect(url_for('home'))
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
if __name__ == '__main__':
app.run(port=5000, debug=True)
| 27.164706 | 98 | 0.708099 | 0 | 0 | 0 | 0 | 1,481 | 0.641403 | 0 | 0 | 604 | 0.261585 |
cadff6a5947df261c22029325d9e900afbcb18eb | 348 | py | Python | Part_3_advanced/m03_date_and_time/date_today/example_1/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m03_date_and_time/date_today/example_1/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m03_date_and_time/date_today/example_1/main.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from book_book import books_directory, user_interface
def run_example():
user_interface.add_new_book()
print("Data dodania: ", books_directory.available_books[-1].added_at_date)
user_interface.add_new_book()
print("Data dodania: ", books_directory.available_books[-1].added_at_date)
if __name__ == "__main__":
run_example()
| 26.769231 | 78 | 0.755747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.12069 |
cae0c802208358b65003e5f7be5cda37037a9c79 | 1,602 | py | Python | examples/example1.py | inverseproblem/pyMag2Dpoly | d4210faa691c1ca96677e4a416ae6571d53970ac | [
"MIT"
] | 4 | 2021-01-31T00:41:43.000Z | 2021-09-08T05:01:13.000Z | examples/example1.py | inverseproblem/pyMag2Dpoly | d4210faa691c1ca96677e4a416ae6571d53970ac | [
"MIT"
] | null | null | null | examples/example1.py | inverseproblem/pyMag2Dpoly | d4210faa691c1ca96677e4a416ae6571d53970ac | [
"MIT"
] | 1 | 2021-03-28T07:19:26.000Z | 2021-03-28T07:19:26.000Z |
import sys
# in this case local import
sys.path.append("../")
import mag2dpoly as mag
import numpy as np
# induced magnetization
Jind = mag.MagnetizVector(mod=4.9,Ideg=90.0,Ddeg=45.0)
# remanent magnetization
Jrem = mag.MagnetizVector(mod=3.1,Ideg=45.0,Ddeg=0.0)
# angle with the North axis
northxax = 90.0
# number of observation
Nobs = 101
xzobs = np.transpose(np.vstack(( np.linspace(0.0,100.0,Nobs), -1.0*np.ones(Nobs))))
# vertices of the poligonal bodies
vertices = np.array([ [35.0, 50.0],
[65.0, 50.0],
[80.0, 35.0],
[65.0, 20.0],
[35.0, 20.0],
[20.0, 35.0] ])
# indices of vertices for the body
nbod = 1
bodyindices = np.empty(shape=(nbod,), dtype=np.object)
inds = range(6)
bodyindices[0] = np.array(inds)
# construct the poligonal body object
pbody = mag.MagPolyBodies2D(bodyindices,vertices)
# type of forward algorithm
forwardtype = "talwani"
# compute total field
# make Jind and Jrem arrays of objects (as many as there are bodies)
Jindv = np.array([Jind]) # we have one single body in this case
Jremv = np.array([Jrem]) # we have one single body in this case
tmag = mag.tmagpolybodies2Dgen(xzobs,Jindv,Jremv,northxax,pbody,forwardtype)
## plot
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.title("Magnetic anomaly")
plt.plot(xzobs[:,0],tmag,"o-")
plt.subplot(212)
plt.title("Polygonal body")
x = np.append(pbody.bo[0].ver1[:,0],pbody.bo[0].ver1[0,0])
y = np.append(pbody.bo[0].ver1[:,1],pbody.bo[0].ver1[0,1])
plt.plot(x,y,"o-")
plt.show()
| 25.03125 | 83 | 0.655431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.302122 |
cae2b6216f4f43c83ea66c2eb2462e3bd35c9bfd | 1,607 | py | Python | stats.py | warppoint42/Mahjong221 | dac82c726927730e11112e2a62b500587717b7ed | [
"MIT"
] | null | null | null | stats.py | warppoint42/Mahjong221 | dac82c726927730e11112e2a62b500587717b7ed | [
"MIT"
] | null | null | null | stats.py | warppoint42/Mahjong221 | dac82c726927730e11112e2a62b500587717b7ed | [
"MIT"
] | null | null | null | import csv
stats = dict()
with open('unifiedroundlog.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile, ('AI', 'gameID', 'end', 'win', 'feed', 'riichi'))
for row in reader:
if row['AI'] not in stats:
stats[row['AI']] = dict()
stats[row['AI']].setdefault('n', 0)
stats[row['AI']].setdefault('end', 0)
stats[row['AI']].setdefault('win', 0)
stats[row['AI']].setdefault('feed', 0)
stats[row['AI']]['n'] += 1
if row['end'] == '1':
stats[row['AI']]['end'] += 1
if row['win'] == '1':
stats[row['AI']]['win'] += 1
if row['feed'] == '1':
stats[row['AI']]['feed'] += 1
with open('owari.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile, ('AI', 'gameID', '1', '2', '3', '4'))
for row in reader:
stats[row['AI']].setdefault('1', 0)
stats[row['AI']].setdefault('2', 0)
stats[row['AI']].setdefault('3', 0)
stats[row['AI']].setdefault('4', 0)
stats[row['AI']].setdefault('totp', 0)
stats[row['AI']].setdefault('totn', 0)
stats[row['AI']]['totn'] += 1
if 'Name' in row['1']:
stats[row['AI']]['1'] += 1
stats[row['AI']]['totp'] += 1
if 'Name' in row['2']:
stats[row['AI']]['2'] += 1
stats[row['AI']]['totp'] += 2
if 'Name' in row['3']:
stats[row['AI']]['3'] += 1
stats[row['AI']]['totp'] += 3
if 'Name' in row['4']:
stats[row['AI']]['4'] += 1
stats[row['AI']]['totp'] += 4
print(stats)
| 32.14 | 86 | 0.450529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.225887 |
cae4acfde1f01b9cb0259fd0db2f62854a8254cc | 4,544 | py | Python | fabfile.py | yanikou19/pymatgen | 8ee0d9ff35a9c2fa4f00da5d423e536ed8914e31 | [
"MIT"
] | null | null | null | fabfile.py | yanikou19/pymatgen | 8ee0d9ff35a9c2fa4f00da5d423e536ed8914e31 | [
"MIT"
] | null | null | null | fabfile.py | yanikou19/pymatgen | 8ee0d9ff35a9c2fa4f00da5d423e536ed8914e31 | [
"MIT"
] | null | null | null | """
Deployment file to facilitate releases of pymatgen.
Note that this file is meant to be run from the root directory of the pymatgen
repo.
"""
__author__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Sep 1, 2014"
import glob
import os
import json
import webbrowser
import requests
import re
import subprocess
from fabric.api import local, lcd
from pymatgen import __version__ as ver
def make_doc():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs/latest_changes.rst", "w") as f:
f.write(changes)
with lcd("examples"):
local("ipython nbconvert --to html *.ipynb")
local("mv *.html ../docs/_static")
with lcd("docs"):
local("cp ../CHANGES.rst change_log.rst")
local("sphinx-apidoc -d 6 -o . -f ../pymatgen")
local("rm pymatgen.*.tests.rst")
for f in glob.glob("docs/*.rst"):
if f.startswith('docs/pymatgen') and f.endswith('rst'):
newoutput = []
suboutput = []
subpackage = False
with open(f, 'r') as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, 'w') as fid:
fid.write("".join(newoutput))
local("make html")
local("cp _static/* _build/html/_static")
#This makes sure pymatgen.org works to redirect to the Gihub page
local("echo \"pymatgen.org\" > _build/html/CNAME")
#Avoid ths use of jekyll so that _dir works as intended.
local("touch _build/html/.nojekyll")
def publish():
local("python setup.py release")
def setver():
local("sed s/version=.*,/version=\\\"{}\\\",/ setup.py > newsetup"
.format(ver))
local("mv newsetup setup.py")
def update_doc():
make_doc()
with lcd("docs/_build/html/"):
local("git add .")
local("git commit -a -m \"Update dev docs\"")
local("git push origin gh-pages")
def merge_stable():
local("git commit -a -m \"v%s release\"" % ver)
local("git push")
local("git checkout stable")
local("git pull")
local("git merge master")
local("git push")
local("git checkout master")
def release_github():
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split("\-+", contents)
desc = toks[1].strip()
payload = {
"tag_name": "v" + ver,
"target_commitish": "master",
"name": "v" + ver,
"body": desc,
"draft": False,
"prerelease": False
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]})
print response.text
def update_changelog():
output = subprocess.check_output(["git", "log", "--pretty=format:%s",
"v%s..HEAD" % ver])
lines = ["* " + l for l in output.strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
toks = contents.split("==========")
toks.insert(-1, "\n\n" + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write("==========".join(toks))
def log_ver():
filepath = os.path.join(os.environ["HOME"], "Dropbox", "Public",
"pymatgen", ver)
with open(filepath, "w") as f:
f.write("Release")
def release(skip_test=False):
setver()
if not skip_test:
local("nosetests")
publish()
log_ver()
update_doc()
merge_stable()
release_github()
def open_doc():
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
| 29.316129 | 92 | 0.541813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,487 | 0.327245 |