hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
454453eab91aa77a631ac0aa12025ca69dac8cdb | 4,294 | py | Python | app.py | univoid/a3x | 46f48363f191344747fec5e643efe1b467fb04c3 | [
"MIT"
] | null | null | null | app.py | univoid/a3x | 46f48363f191344747fec5e643efe1b467fb04c3 | [
"MIT"
] | null | null | null | app.py | univoid/a3x | 46f48363f191344747fec5e643efe1b467fb04c3 | [
"MIT"
] | null | null | null | import os
import base64
import botocore
import boto3
import json
import urllib
from chalice import BadRequestError
from chalice import ChaliceViewError
from chalice import Chalice
app = Chalice(app_name='a3x')
app.debug = True
REGION = 'us-east-1'
BUCKET = 'freko-001'
S3 = boto3.resource('s3')
REKOGNITION = boto3.client('rekognition')
| 30.027972 | 96 | 0.642757 |
188445351d4fd03596d67479b0ce34074904480c | 7,387 | py | Python | nnutils/laplacian_loss.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 10 | 2022-03-16T02:14:56.000Z | 2022-03-31T19:01:34.000Z | nnutils/laplacian_loss.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 1 | 2022-03-18T06:43:16.000Z | 2022-03-18T06:56:35.000Z | nnutils/laplacian_loss.py | lolrudy/GPV_pose | f326a623b3e45e6edfc1963b068e8e7aaea2bfff | [
"MIT"
] | 2 | 2022-03-19T13:06:28.000Z | 2022-03-19T16:08:18.000Z | # --------------------------------------------------------
# Written by Yufei Ye (https://github.com/JudyYe)
# --------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# customize laplacian argument
import torch
def mesh_laplacian_smoothing(meshes, verts_packed=None, method: str = "uniform"):
r"""
Computes the laplacian smoothing objective for a batch of meshes.
This function supports three variants of Laplacian smoothing,
namely with uniform weights("uniform"), with cotangent weights ("cot"),
and cotangent cuvature ("cotcurv").For more details read [1, 2].
Args:
meshes: Meshes object with a batch of meshes.
method: str specifying the method for the laplacian.
Returns:
loss: Average laplacian smoothing loss across the batch.
Returns 0 if meshes contains no meshes or all empty meshes.
Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3.
The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors:
for a uniform Laplacian, LuV[i] points to the centroid of its neighboring
vertices, a cotangent Laplacian LcV[i] is known to be an approximation of
the surface normal, while the curvature variant LckV[i] scales the normals
by the discrete mean curvature. For vertex i, assume S[i] is the set of
neighboring vertices to i, a_ij and b_ij are the "outside" angles in the
two triangles connecting vertex v_i and its neighboring vertex v_j
for j in S[i], as seen in the diagram below.
.. code-block:: python
a_ij
/\
/ \
/ \
/ \
v_i /________\ v_j
\ /
\ /
\ /
\ /
\/
b_ij
The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i)
For the uniform variant, w_ij = 1 / |S[i]|
For the cotangent variant,
w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik)
For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i])
where A[i] is the sum of the areas of all triangles containing vertex v_i.
There is a nice trigonometry identity to compute cotangents. Consider a triangle
with side lengths A, B, C and angles a, b, c.
.. code-block:: python
c
/|\
/ | \
/ | \
B / H| \ A
/ | \
/ | \
/a_____|_____b\
C
Then cot a = (B^2 + C^2 - A^2) / 4 * area
We know that area = CH/2, and by the law of cosines we have
A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a
Putting these together, we get:
B^2 + C^2 - A^2 2BC cos a
_______________ = _________ = (B/H) cos a = cos a / sin a = cot a
4 * area 2CH
[1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion
and curvature flow", SIGGRAPH 1999.
[2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006.
"""
if meshes.isempty():
return torch.tensor(
[0.0], dtype=torch.float32, device=meshes.device, requires_grad=True
)
N = len(meshes)
if verts_packed is None:
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,)
verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),)
weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),)
weights = 1.0 / weights.float()
# We don't want to backprop through the computation of the Laplacian;
# just treat it as a magic constant matrix that is used to transform
# verts into normals
with torch.no_grad():
if method == "uniform":
L = meshes.laplacian_packed()
elif method in ["cot", "cotcurv"]:
L, inv_areas = laplacian_cot(meshes)
if method == "cot":
norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
else:
norm_w = 0.25 * inv_areas
else:
raise ValueError("Method should be one of {uniform, cot, cotcurv}")
if method == "uniform":
loss = L.mm(verts_packed)
elif method == "cot":
loss = L.mm(verts_packed) * norm_w - verts_packed
elif method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * norm_w
loss = loss.norm(dim=1)
loss = loss * weights
return loss.sum() / N
def laplacian_cot(meshes):
"""
Returns the Laplacian matrix with cotangent weights and the inverse of the
face areas.
Args:
meshes: Meshes object with a batch of meshes.
Returns:
2-element tuple containing
- **L**: FloatTensor of shape (V,V) for the Laplacian matrix (V = sum(V_n))
Here, L[i, j] = cot a_ij + cot b_ij iff (i, j) is an edge in meshes.
See the description above for more clarity.
- **inv_areas**: FloatTensor of shape (V,) containing the inverse of sum of
face areas containing each vertex
"""
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
# V = sum(V_n), F = sum(F_n)
V, F = verts_packed.shape[0], faces_packed.shape[0]
face_verts = verts_packed[faces_packed]
v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
# Side lengths of each triangle, of shape (sum(F_n),)
# A is the side opposite v1, B is opposite v2, and C is opposite v3
A = (v1 - v2).norm(dim=1)
B = (v0 - v2).norm(dim=1)
C = (v0 - v1).norm(dim=1)
# Area of each triangle (with Heron's formula); shape is (sum(F_n),)
s = 0.5 * (A + B + C)
# note that the area can be negative (close to 0) causing nans after sqrt()
# we clip it to a small positive value
area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
# Compute cotangents of angles, of shape (sum(F_n), 3)
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / area
cotb = (A2 + C2 - B2) / area
cotc = (A2 + B2 - C2) / area
cot = torch.stack([cota, cotb, cotc], dim=1)
cot /= 4.0
# Construct a sparse matrix by basically doing:
# L[v1, v2] = cota
# L[v2, v0] = cotb
# L[v0, v1] = cotc
ii = faces_packed[:, [1, 2, 0]]
jj = faces_packed[:, [2, 0, 1]]
idx = torch.stack([ii, jj], dim=0).view(2, F * 3)
L = torch.sparse.FloatTensor(idx, cot.view(-1), (V, V))
# Make it symmetric; this means we are also setting
# L[v2, v1] = cota
# L[v0, v2] = cotb
# L[v1, v0] = cotc
L += L.t()
# For each vertex, compute the sum of areas for triangles containing it.
idx = faces_packed.view(-1)
inv_areas = torch.zeros(V, dtype=torch.float32, device=meshes.device)
val = torch.stack([area] * 3, dim=1).view(-1)
inv_areas.scatter_add_(0, idx, val)
idx = inv_areas > 0
inv_areas[idx] = 1.0 / inv_areas[idx]
inv_areas = inv_areas.view(-1, 1)
return L, inv_areas
| 36.569307 | 84 | 0.578855 |
18864b9d7449a28b5e8d9bd986a21846e666ecbc | 3,294 | py | Python | bioimageio/spec/commands.py | esgomezm/spec-bioimage-io | 2bc3f8177d5346ac94bf8a771ed619e076c6e935 | [
"MIT"
] | null | null | null | bioimageio/spec/commands.py | esgomezm/spec-bioimage-io | 2bc3f8177d5346ac94bf8a771ed619e076c6e935 | [
"MIT"
] | null | null | null | bioimageio/spec/commands.py | esgomezm/spec-bioimage-io | 2bc3f8177d5346ac94bf8a771ed619e076c6e935 | [
"MIT"
] | null | null | null | import shutil
import traceback
from pathlib import Path
from pprint import pprint
from typing import List, Optional, Union
from marshmallow import ValidationError
from bioimageio.spec import export_resource_package, load_raw_resource_description
from bioimageio.spec.shared.raw_nodes import URI
from bioimageio.spec.shared.utils import resolve_uri
def package(
rdf_source: Union[Path, str, URI, dict],
path: Path = Path() / "{src_name}-package.zip",
update_format: bool = False,
weights_priority_order: Optional[List[str]] = None,
verbose: bool = False,
) -> int:
"""Package a BioImage.IO resource described by a BioImage.IO Resource Description File (RDF)."""
code = validate(rdf_source, update_format=update_format, update_format_inner=update_format, verbose=verbose)
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
if code:
print(f"Cannot export invalid BioImage.IO RDF {source_name}")
return code
try:
tmp_package_path = export_resource_package(
rdf_source, update_to_current_format=update_format, weights_priority_order=weights_priority_order
)
except Exception as e:
print(f"Failed to package {source_name} due to: {e}")
if verbose:
traceback.print_exc()
return 1
try:
rdf_local_source = resolve_uri(rdf_source)
path = path.with_name(path.name.format(src_name=rdf_local_source.stem))
shutil.move(tmp_package_path, path)
except Exception as e:
print(f"Failed to move package from {tmp_package_path} to {path} due to: {e}")
if verbose:
traceback.print_exc()
return 1
print(f"exported bioimageio package from {source_name} to {path}")
return 0
def validate(
rdf_source: Union[Path, str, URI, dict],
update_format: bool = False,
update_format_inner: bool = None,
verbose: bool = False,
) -> int:
"""Validate a BioImage.IO Resource Description File (RDF)."""
if update_format_inner is None:
update_format_inner = update_format
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
try:
raw_rd = load_raw_resource_description(rdf_source, update_to_current_format=update_format)
except ValidationError as e:
print(f"Invalid {source_name}:")
pprint(e.normalized_messages())
return 1
except Exception as e:
print(f"Could not validate {source_name}:")
pprint(e)
if verbose:
traceback.print_exc()
return 1
code = 0
if raw_rd.type == "collection":
for inner_category in ["application", "collection", "dataset", "model", "notebook"]:
for inner in getattr(raw_rd, inner_category) or []:
try:
inner_source = inner.source
except Exception as e:
pprint(e)
code += 1
else:
code += validate(inner_source, update_format_inner, update_format_inner, verbose)
if code:
print(f"Found invalid RDFs in collection {source_name}.")
if not code:
print(f"successfully verified {raw_rd.type} {source_name}")
return code
| 34.3125 | 112 | 0.663327 |
18865f902dc6b7cb2f38ac721fff0266a60bf991 | 1,309 | py | Python | pettingzoo/test/max_cycles_test.py | RedTachyon/PettingZoo | 0c4be0ca0de5a11bf8eff3f7b87976edcacd093e | [
"Apache-2.0"
] | 846 | 2020-05-12T05:55:00.000Z | 2021-10-08T19:38:40.000Z | pettingzoo/test/max_cycles_test.py | RedTachyon/PettingZoo | 0c4be0ca0de5a11bf8eff3f7b87976edcacd093e | [
"Apache-2.0"
] | 237 | 2020-04-27T06:01:39.000Z | 2021-10-13T02:55:54.000Z | pettingzoo/test/max_cycles_test.py | RedTachyon/PettingZoo | 0c4be0ca0de5a11bf8eff3f7b87976edcacd093e | [
"Apache-2.0"
] | 126 | 2020-05-29T04:20:29.000Z | 2021-10-13T05:31:12.000Z | import numpy as np
| 36.361111 | 121 | 0.683728 |
188756e6fbd2150cbbb2fc3f3acd8c382070ad00 | 3,460 | py | Python | models/models.py | nv-tlabs/DIB-R-Single-Image-3D-Reconstruction | faa6364cc6ec464f81f960a9fa6b55bbf3443d5f | [
"Apache-2.0"
] | 8 | 2021-09-10T04:54:54.000Z | 2022-03-26T02:34:54.000Z | models/models.py | nv-tlabs/DIB-R-Single-Image-3D-Reconstruction | faa6364cc6ec464f81f960a9fa6b55bbf3443d5f | [
"Apache-2.0"
] | 2 | 2021-11-12T17:10:26.000Z | 2022-03-24T14:59:01.000Z | models/models.py | nv-tlabs/DIB-R-Single-Image-3D-Reconstruction | faa6364cc6ec464f81f960a9fa6b55bbf3443d5f | [
"Apache-2.0"
] | 2 | 2021-09-19T16:25:26.000Z | 2021-12-27T16:01:31.000Z | '''
MIT License
Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems Tbingen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Copyright (c) 2020,21 NVIDIA CORPORATION & AFFILIATES.. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The functions in file is mostly borrowed from
https://github.com/autonomousvision/differentiable_volumetric_rendering/blob/11542ed5ac4e7e4c19c5c74eba7929c1333f3896/im2mesh/dvr/models/__init__.py
with some modifications.
Codes released under MIT license
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .decoder import Decoder
from .conv import Resnet18
import numpy as np
########################################################
| 35.306122 | 148 | 0.695954 |
18875eee96b4d3c67bcfb581481611caf6ee9b44 | 4,855 | py | Python | slixmpp/plugins/xep_0223.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0223.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/plugins/xep_0223.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | """
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins.base import BasePlugin, register_plugin
log = logging.getLogger(__name__)
register_plugin(XEP_0223)
| 40.458333 | 82 | 0.557158 |
18890abc6529f42ce336f6f93049f9ebe7b6d9a1 | 3,578 | py | Python | merge_pdf/merge.py | DariHernandez/merge_pdf | 5aa0df950caee81d1a2c2709697f82472858b7ec | [
"MIT"
] | null | null | null | merge_pdf/merge.py | DariHernandez/merge_pdf | 5aa0df950caee81d1a2c2709697f82472858b7ec | [
"MIT"
] | null | null | null | merge_pdf/merge.py | DariHernandez/merge_pdf | 5aa0df950caee81d1a2c2709697f82472858b7ec | [
"MIT"
] | 1 | 2021-06-23T19:46:42.000Z | 2021-06-23T19:46:42.000Z | #! python3
# Combines all the pafs in the current working directory into a single pdf
import PyPDF2, os, sys, logging
| 31.385965 | 103 | 0.564561 |
1889e33c1df53b96578448ca9e90add8e038bfe9 | 3,941 | py | Python | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | null | null | null | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 6 | 2020-05-26T17:40:07.000Z | 2022-03-11T16:33:11.000Z | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 1 | 2020-05-26T17:12:59.000Z | 2020-05-26T17:12:59.000Z | # -*- coding: utf-8 -*-
import traceback
from JobBrowserBFF.TestBase import TestBase
from biokbase.Errors import ServiceError
import unittest
import re
UPSTREAM_SERVICE = 'mock'
ENV = 'mock'
JOB_ID_WITH_LOGS = '59820c93e4b06f68bf751eeb' # non-admin
JOB_ID_NO_LOGS = '5cf1522aaa5a4d298c5dc2ff' # non-admin
JOB_ID_NOT_FOUND = '5cf1522aaa5a4d298c5dc2fe' # non-admin
JOB_ID_NO_PERMISSION = '57ec06aee4b0b05cf8996b89' # access it as non-admin user
TIMEOUT_MS = 10000
| 36.831776 | 107 | 0.603146 |
188cb20c595f8931979892b300bbc3dc12968c1c | 674 | py | Python | migrations/versions/323f8d77567b_index_related_entity_names.py | yaelmi3/backslash | edf39caf97af2c926da01c340a83648f4874e97e | [
"BSD-3-Clause"
] | 17 | 2015-11-25T13:02:38.000Z | 2021-12-14T20:18:36.000Z | migrations/versions/323f8d77567b_index_related_entity_names.py | yaelmi3/backslash | edf39caf97af2c926da01c340a83648f4874e97e | [
"BSD-3-Clause"
] | 533 | 2015-11-24T12:47:13.000Z | 2022-02-12T07:59:08.000Z | migrations/versions/323f8d77567b_index_related_entity_names.py | parallelsystems/backslash | 577cdd18d5f665a8b493c4b2e2a605b7e0f6e11b | [
"BSD-3-Clause"
] | 15 | 2015-11-22T13:25:54.000Z | 2022-02-16T19:23:11.000Z | """Index related entity names
Revision ID: 323f8d77567b
Revises: 82b34e2777a4
Create Date: 2016-11-16 13:00:25.782487
"""
# revision identifiers, used by Alembic.
revision = '323f8d77567b'
down_revision = '82b34e2777a4'
from alembic import op
import sqlalchemy as sa
| 24.962963 | 93 | 0.71365 |
1890545ab78e2e102de4a2155cb00d7f5cb2cdc7 | 772 | py | Python | learn/02week/code/cc_dicegame.py | tmax818/nucamp_intro_python | 6fac59f53054055ba4ab40559c44eba07b7f9fd6 | [
"MIT"
] | null | null | null | learn/02week/code/cc_dicegame.py | tmax818/nucamp_intro_python | 6fac59f53054055ba4ab40559c44eba07b7f9fd6 | [
"MIT"
] | null | null | null | learn/02week/code/cc_dicegame.py | tmax818/nucamp_intro_python | 6fac59f53054055ba4ab40559c44eba07b7f9fd6 | [
"MIT"
] | null | null | null | import random
high_score = 0
dice_game()
| 20.864865 | 56 | 0.477979 |
189184fcb0cca3093cef204f192b8979a5c7f238 | 29,762 | py | Python | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | """Copyright [2020] [Jiatong Shi].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# !/usr/bin/env python3
import copy
import librosa
from librosa.display import specshow
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy import signal
import soundfile as sf
from SVS.model.layers.global_mvn import GlobalMVN
import SVS.utils.metrics as Metrics
import time
import torch
# from SVS.model.layers.utterance_mvn import UtteranceMVN
# from pathlib import Path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collect_stats(train_loader, args):
"""collect_stats."""
print("get in collect stats", flush=True)
count, sum, sum_square = 0, 0, 0
count_mel, sum_mel, sum_square_mel = 0, 0, 0
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
# print(f"spec.shape: {spec.shape},length.shape:
# {length.shape}, mel.shape: {mel.shape}")
for i, seq in enumerate(spec.cpu().numpy()):
# print(f"seq.shape: {seq.shape}")
seq_length = torch.max(length[i])
# print(seq_length)
seq = seq[:seq_length]
sum += seq.sum(0)
sum_square += (seq ** 2).sum(0)
count += len(seq)
for i, seq in enumerate(mel.cpu().numpy()):
seq_length = torch.max(length[i])
seq = seq[:seq_length]
sum_mel += seq.sum(0)
sum_square_mel += (seq ** 2).sum(0)
count_mel += len(seq)
assert count_mel == count
dirnames = [
os.path.dirname(args.stats_file),
os.path.dirname(args.stats_mel_file),
]
for name in dirnames:
if not os.path.exists(name):
os.makedirs(name)
np.savez(
args.stats_file,
count=count,
sum=sum,
sum_square=sum_square,
)
np.savez(
args.stats_mel_file,
count=count_mel,
sum=sum_mel,
sum_square=sum_square_mel,
)
def train_one_epoch(
train_loader,
model,
device,
optimizer,
criterion,
perceptual_entropy,
epoch,
args,
):
"""train_one_epoch."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
# mcd_metric = AverageMeter()
# f0_distortion_metric, vuv_error_metric =
# AverageMeter(), AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.train()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_train_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
# f0_ground_truth_all = np.reshape(np.array([]), (-1, 1))
# f0_synthesis_all = np.reshape(np.array([]), (-1, 1))
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
# output = [batch size, num frames, feat_dim]
# output_mel = [batch size, num frames, n_mels dimension]
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
# print(f"chars: {np.shape(chars)}, phone:
# {np.shape(phone)}, length: {np.shape(length)}")
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(
phone, pitch, beat, length, args
) # mel loss written in spec loss
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
train_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss + (1 - args.perceptual_loss) * train_loss
)
else:
final_loss = train_loss
final_loss = final_loss / args.accumulation_steps
final_loss.backward()
if args.gradclip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradclip)
if (epoch + 1) % args.accumulation_steps == 0:
if args.optimizer == "noam":
optimizer.step_and_update_lr()
else:
optimizer.step()
#
optimizer.zero_grad()
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if step % args.train_step_log == 0:
end = time.time()
if args.model_type == "USTC_DAR":
# normalize inverse inferlogwav,mcd
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
else:
# normalize inverse inferlogwav,mcd
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
log_figure(step, output, spec_origin, att, length, log_save_dir, args)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
print("{} -- sum_time: {:.2f}s".format(out_log, (end - start)))
info = {"loss": losses.avg, "spec_loss": spec_losses.avg}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
def validate(dev_loader, model, device, criterion, perceptual_entropy, epoch, args):
"""validate."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
mcd_metric = AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.eval()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_val_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
with torch.no_grad():
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(dev_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(phone, pitch, beat, length, args)
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
dev_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss
+ (1 - args.perceptual_loss) * dev_loss
)
else:
final_loss = dev_loss
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
# pe_loss = perceptual_entropy(output, real, imag)
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if args.model_type == "USTC_DAR":
# normalize inverse stage
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
mcd_value, length_sum = (
0,
1,
) # FIX ME! Calculate_melcd_fromMelSpectrum
else:
# normalize inverse stage
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
(mcd_value, length_sum,) = Metrics.Calculate_melcd_fromLinearSpectrum(
output, spec_origin, length, args
)
mcd_metric.update(mcd_value, length_sum)
if step % args.dev_step_log == 0:
if args.model_type == "USTC_DAR":
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
else:
log_figure(
step,
output,
spec_origin,
att,
length,
log_save_dir,
args,
)
out_log = (
"step {}: train_loss {:.4f}; "
"spec_loss {:.4f}; mcd_value {:.4f};".format(
step, losses.avg, spec_losses.avg, mcd_metric.avg
)
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
end = time.time()
print("{} -- sum_time: {}s".format(out_log, (end - start)))
info = {
"loss": losses.avg,
"spec_loss": spec_losses.avg,
"mcd_value": mcd_metric.avg,
}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
def save_checkpoint(state, model_filename):
"""save_checkpoint."""
torch.save(state, model_filename)
return 0
def save_model(
args,
epoch,
model,
optimizer,
train_info,
dev_info,
logger,
save_loss_select,
):
"""save_model."""
if args.optimizer == "noam":
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer": optimizer._optimizer.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
else:
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
# record training and validation information
if args.use_tfboard:
record_info(train_info, dev_info, epoch, logger)
def record_info(train_info, dev_info, epoch, logger):
"""record_info."""
loss_info = {
"train_loss": train_info["loss"],
"dev_loss": dev_info["loss"],
}
logger.add_scalars("losses", loss_info, epoch)
return 0
def invert_spectrogram(spectrogram, win_length, hop_length):
"""Invert_spectrogram.
applies inverse fft.
Args:
spectrogram: [1+n_fft//2, t]
"""
return librosa.istft(spectrogram, hop_length, win_length=win_length, window="hann")
def griffin_lim(spectrogram, iter_vocoder, n_fft, hop_length, win_length):
"""griffin_lim."""
X_best = copy.deepcopy(spectrogram)
for i in range(iter_vocoder):
X_t = invert_spectrogram(X_best, win_length, hop_length)
est = librosa.stft(X_t, n_fft, hop_length, win_length=win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = invert_spectrogram(X_best, win_length, hop_length)
y = np.real(X_t)
return y
def spectrogram2wav(
mag, max_db, ref_db, preemphasis, power, sr, hop_length, win_length, n_fft
):
"""Generate wave file from linear magnitude spectrogram.
Args:
mag: A numpy array of (T, 1+n_fft//2)
Returns:
wav: A 1-D numpy array.
"""
hop_length = int(hop_length * sr)
win_length = int(win_length * sr)
n_fft = n_fft
# transpose
mag = mag.T
# de-noramlize
mag = (np.clip(mag, 0, 1) * max_db) - max_db + ref_db
# to amplitude
mag = np.power(10.0, mag * 0.05)
# wav reconstruction
wav = griffin_lim(mag ** power, 100, n_fft, hop_length, win_length)
# de-preemphasis
wav = signal.lfilter([1], [1, -preemphasis], wav)
# trim
wav, _ = librosa.effects.trim(wav)
return wav.astype(np.float32)
def log_figure_mel(step, output, spec, att, length, save_dir, args):
"""log_figure_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
# FIX ME! Need WaveRNN to produce wav from mel-spec
# wav = spectrogram2wav(output, args.max_db, args.ref_db,
# args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# wav_true = spectrogram2wav(out_spec, args.max_db,
# args.ref_db, args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# if librosa.__version__ < '0.8.0':
# librosa.output.write_wav(os.path.join(save_dir,
# '{}.wav'.format(step)), wav, args.sampling_rate)
# librosa.output.write_wav(os.path.join(save_dir,
# '{}_true.wav'.format(step)), wav_true, args.sampling_rate)
# else:
# # librosa > 0.8 remove librosa.output.write_wav module
# sf.write(os.path.join(save_dir, '{}.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
# sf.write(os.path.join(save_dir, '{}_true.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_figure(step, output, spec, att, length, save_dir, args):
"""log_figure."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
wav = spectrogram2wav(
output,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_mel(step, output_mel, spec, att, length, save_dir, args, voc_model):
"""log_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output_mel = output_mel.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output_mel = output_mel[:length]
out_spec = out_spec[:length]
wav = voc_model.generate(output_mel)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)), wav, args.sampling_rate
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output_mel.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def Calculate_time(elapsed_time):
"""Calculate_time."""
elapsed_hours = int(elapsed_time / 3600)
elapsed_mins = int((elapsed_time - (elapsed_hours * 3600)) / 60)
elapsed_secs = int(elapsed_time - (elapsed_hours * 3600) - (elapsed_mins * 60))
return elapsed_hours, elapsed_mins, elapsed_secs
def Calculate_time_path(path):
"""Calculate_time_path."""
num_list = os.listdir(path)
total_time = 0
for number in num_list:
# print(number)
number_path = os.path.join(path, number)
# print(number_path)
wav_name_list = os.listdir(number_path)
for wav_name in wav_name_list:
wav_path = os.path.join(number_path, wav_name)
print(wav_path)
time = librosa.get_duration(filename=wav_path)
print(time)
total_time += time
return total_time
def Calculate_dataset_duration(dataset_path):
"""Calculate_dataset_duration."""
train_path = os.path.join(dataset_path, "train")
dev_path = os.path.join(dataset_path, "dev")
test_path = os.path.join(dataset_path, "test")
total_time = (
Calculate_time_path(train_path)
+ Calculate_time_path(dev_path)
+ Calculate_time_path(test_path)
)
hours, mins, secs = Calculate_time(total_time)
print(f"Time: {hours}h {mins}m {secs}s'")
if __name__ == "__main__":
# path = "/data5/jiatong/SVS_system/SVS/data/
# public_dataset/kiritan_data/wav_info"
path = "/data5/jiatong/SVS_system/SVS/data/public_dataset/hts_data/wav_info"
Calculate_dataset_duration(path)
| 32.140389 | 88 | 0.540152 |
18918e38b1911d887e6cd9f7014807483471a02f | 10,115 | py | Python | run_classifier.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 21 | 2019-03-04T03:43:19.000Z | 2022-02-14T15:50:41.000Z | run_classifier.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 1 | 2019-10-07T17:49:21.000Z | 2019-12-14T11:50:10.000Z | run_classifier.py | to-aoki/my-pytorch-bert | 8e412ae6331f5f19fee55b430be389de2f5c49a6 | [
"Apache-2.0"
] | 5 | 2019-07-19T07:04:55.000Z | 2020-07-01T13:24:14.000Z | # Author Toshihiko Aoki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BertClassifier."""
from mptb import BertClassifier
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='BERT classification.', usage='%(prog)s [options]')
parser.add_argument('--config_path', help='JSON file path for defines networks.', nargs='?',
type=str, default='config/bert_base.json')
parser.add_argument('--train_dataset_path', help='Training Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--eval_dataset_path', help='Evaluate Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--pretrain_path', help='Pre-training PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--tf_pretrain_path', help='Pre-training TensorFlow(Google) model path.', nargs='?',
type=str, default=None)
parser.add_argument('--model_path', help='Classifier PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--vocab_path', help='Vocabulary file path for BERT to pre-training.', nargs='?', required=True,
type=str)
parser.add_argument('--sp_model_path', help='Trained SentencePiece model path.', nargs='?',
type=str, default=None)
parser.add_argument('--save_dir', help='Classification model saving directory path.', nargs='?',
type=str, default='classifier/')
parser.add_argument('--log_dir', help='Logging file path.', nargs='?',
type=str, default=None)
parser.add_argument('--batch_size', help='Batch size', nargs='?',
type=int, default=4)
parser.add_argument('--max_pos', help='The maximum sequence length for BERT (slow as big).', nargs='?',
type=int, default=512)
parser.add_argument('--lr', help='Learning rate', nargs='?',
type=float, default=2e-5)
parser.add_argument('--warmup_steps', help='Warm-up steps proportion.', nargs='?',
type=float, default=0.1)
parser.add_argument('--epochs', help='Epochs', nargs='?',
type=int, default=10)
parser.add_argument('--per_save_epochs', help=
'Saving training model timing is the number divided by the epoch number', nargs='?',
type=int, default=1)
parser.add_argument('--mode', help='train or eval', nargs='?',
type=str, default='train')
parser.add_argument('--label_num', help='labels number', nargs='?',
type=int, default=-1)
parser.add_argument('--balance_weight', action='store_true',
help='Use automatically adjust weights')
parser.add_argument('--balance_sample', action='store_true',
help='Use automatically adjust samples(random)')
parser.add_argument('--under_sampling', action='store_true',
help='Use automatically adjust under samples')
parser.add_argument('--under_sampling_cycle', action='store_true',
help='Use automatically adjust under samples cycle peer')
parser.add_argument('--tokenizer', nargs='?', type=str, default='google',
help=
'Select from the following name groups tokenizer that uses only vocabulary files.(mecab, juman)'
)
parser.add_argument('--read_head', action='store_true',
help='Use not include header TSV file')
parser.add_argument('--fp16', action='store_true',
help='Use nVidia fp16 (require apex module)')
parser.add_argument('--task', nargs='?', type=str, default='class', help='Target Task (class or choice)')
parser.add_argument('--device', nargs='?', type=str, default=None, help='Target Runing device name.')
parser.add_argument('--quantize', action='store_true',
help='Use quantized bert (testing),')
parser.add_argument('--model_name', nargs='?', type=str, default='bert',
help=
'Select from the following name groups model. (bert, proj, albert)'
)
parser.add_argument('--optimizer', nargs='?', type=str, default='bert',
help=
'Select from the following name groups optimizer. (bert, adamw, lamb)'
)
parser.add_argument('--encoder_json_path', help='GPT2 encoder JSON file path.', nargs='?', type=str)
parser.add_argument('--vocab_bpe_path', help='GPT2 encoder bpe file path.', nargs='?', type=str)
parser.add_argument('--sw_log_dir', help='TensorBoard lgo_dir path.', nargs='?', type=str, default='runs')
args = parser.parse_args()
classification(
config_path=args.config_path,
train_dataset_path=args.train_dataset_path,
eval_dataset_path=args.eval_dataset_path,
pretrain_path= args.pretrain_path,
tf_pretrain_path=args.tf_pretrain_path,
model_path=args.model_path,
vocab_path=args.vocab_path,
sp_model_path=args.sp_model_path,
save_dir=args.save_dir,
log_dir=args.log_dir,
batch_size=args.batch_size,
max_pos=args.max_pos,
lr=args.lr,
warmup_proportion=args.warmup_steps,
epochs=args.epochs,
per_save_epochs=args.per_save_epochs,
mode=args.mode,
label_num=args.label_num,
balance_weight=args.balance_weight,
balance_sample=args.balance_sample,
under_sampling=args.under_sampling,
under_sampling_cycle=args.under_sampling_cycle,
tokenizer_name=args.tokenizer,
read_head=args.read_head,
fp16=args.fp16,
task=args.task,
device=args.device,
quantize=args.quantize,
model_name=args.model_name,
optimizer=args.optimizer,
encoder_json_path=args.encoder_json_path,
vocab_bpe_path=args.vocab_bpe_path,
sw_log_dir=args.sw_log_dir
)
| 43.787879 | 120 | 0.625309 |
1891a7b479098620bf760b07575489ea73d1fccf | 279 | py | Python | dev/urls.py | ledgku/utilscombine | aaf13ca2208bcf522f005c64769c34bc8e8ee9f4 | [
"MIT"
] | 2 | 2018-07-18T10:10:01.000Z | 2018-07-18T10:10:17.000Z | dev/urls.py | ledgku/utilscombine | aaf13ca2208bcf522f005c64769c34bc8e8ee9f4 | [
"MIT"
] | 5 | 2018-09-19T11:33:54.000Z | 2021-06-10T20:43:32.000Z | dev/urls.py | ledgku/utilscombine | aaf13ca2208bcf522f005c64769c34bc8e8ee9f4 | [
"MIT"
] | null | null | null | from django.urls import path
from dev.views import FindMyIp,FindMyGps
app_name = 'dev'
urlpatterns = [
# path('', Main.as_view(), name = 'index'),
path('findmyip', FindMyIp.as_view(), name = 'findmyip'),
path('findmygps', FindMyGps.as_view(), name = 'findmygps'),
] | 27.9 | 63 | 0.666667 |
1891f24339199dafe363a5f1dfae29b9615437e5 | 406 | py | Python | cookbook/c02/p17_html_xml.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 3 | 2018-09-19T06:44:13.000Z | 2019-03-24T10:07:07.000Z | cookbook/c02/p17_html_xml.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 2 | 2020-09-19T17:10:23.000Z | 2020-10-17T16:43:52.000Z | cookbook/c02/p17_html_xml.py | itpubs/python3-cookbook | 140f5e4cc0416b9674edca7f4c901b1f58fc1415 | [
"Apache-2.0"
] | 1 | 2020-07-20T22:10:31.000Z | 2020-07-20T22:10:31.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: htmlxml
Desc :
"""
import html
if __name__ == '__main__':
html_xml()
| 16.916667 | 56 | 0.618227 |
1892414a440ce9963c67565a93d5515f1867c2ed | 53 | py | Python | utils/__init__.py | Rfam/rfam-production | 36f3963380da2a08e9cf73c951691c4e95738ac4 | [
"Apache-2.0"
] | 7 | 2016-06-17T09:21:11.000Z | 2021-10-13T20:25:06.000Z | utils/__init__.py | mb1069/rfam-production | 10c76e249dc22d30862b3a873fd54f390e859ad8 | [
"Apache-2.0"
] | 82 | 2016-04-08T10:51:32.000Z | 2022-03-11T13:49:18.000Z | utils/__init__.py | mb1069/rfam-production | 10c76e249dc22d30862b3a873fd54f390e859ad8 | [
"Apache-2.0"
] | 3 | 2019-09-01T09:46:35.000Z | 2021-11-29T08:01:58.000Z | __all__ = ['db_utils', 'RfamDB', 'parse_taxbrowser']
| 26.5 | 52 | 0.698113 |
1892cc18e4d651a8551d9ce9d603987daef5b912 | 625 | py | Python | jina/drivers/craft.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/drivers/craft.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/drivers/craft.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from . import FlatRecursiveMixin, BaseExecutableDriver, DocsExtractUpdateMixin
| 31.25 | 84 | 0.72 |
18931f6a4553b81704db5d7e58f8609781b151d9 | 5,543 | py | Python | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | freyrsae/pydatalab | 9aba1ac6bbe8e1384e7a4b07c5042af84348797d | [
"Apache-2.0"
] | 198 | 2016-07-14T19:47:52.000Z | 2022-03-15T08:45:21.000Z | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | freyrsae/pydatalab | 9aba1ac6bbe8e1384e7a4b07c5042af84348797d | [
"Apache-2.0"
] | 534 | 2016-07-15T19:12:43.000Z | 2022-03-11T23:11:39.000Z | solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py | freyrsae/pydatalab | 9aba1ac6bbe8e1384e7a4b07c5042af84348797d | [
"Apache-2.0"
] | 86 | 2016-07-13T17:39:05.000Z | 2021-11-03T03:39:41.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import json
import os
import six
import sys
from tensorflow.python.lib.io import file_io
SCHEMA_FILE = 'schema.json'
NUMERICAL_ANALYSIS_FILE = 'stats.json'
CATEGORICAL_ANALYSIS_FILE = 'vocab_%s.csv'
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
"""
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured CSV data.')
parser.add_argument('--input-file-pattern',
type=str,
required=True,
help='Input CSV file names. May contain a file pattern')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=True,
help=('BigQuery json schema file'))
args = parser.parse_args(args=argv[1:])
# Make sure the output folder exists if local folder.
file_io.recursive_create_dir(args.output_dir)
return args
def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels)
def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True)
if __name__ == '__main__':
main()
| 32.798817 | 86 | 0.673642 |
189920c0b5a16b9f087f121b4d4d90e4791b2184 | 5,876 | py | Python | habitrac/habits/api/resolvers.py | IgnisDa/habitrac | 0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a | [
"Apache-2.0"
] | null | null | null | habitrac/habits/api/resolvers.py | IgnisDa/habitrac | 0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a | [
"Apache-2.0"
] | null | null | null | habitrac/habits/api/resolvers.py | IgnisDa/habitrac | 0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a | [
"Apache-2.0"
] | null | null | null | import datetime
import json
from ariadne import MutationType, QueryType, convert_kwargs_to_snake_case
from ariadne_token_auth.decorators import login_required
from django.contrib.auth import get_user_model
from habits import models as habit_models
from utils.general import get_user
from utils.handlers.errors import ErrorContainer
CUSTOM_USER_MODEL = get_user_model()
query = QueryType()
mutation = MutationType()
| 32.826816 | 89 | 0.681246 |
189970d0714654ace0194ba8650e7bc2d279578b | 2,542 | py | Python | src/target_matrix.py | smusali/rightwhalerecognition | 0def80bc7e19864093008112455ae08869b40501 | [
"MIT"
] | null | null | null | src/target_matrix.py | smusali/rightwhalerecognition | 0def80bc7e19864093008112455ae08869b40501 | [
"MIT"
] | null | null | null | src/target_matrix.py | smusali/rightwhalerecognition | 0def80bc7e19864093008112455ae08869b40501 | [
"MIT"
] | null | null | null | import csv, pylab as pl, re
DB = dict();
BD = dict();
whales_ = [];
classes = [];
line_num = 0;
with open('data/train.csv', 'rb') as train_class_data:
data = csv.reader(train_class_data, delimiter=',');
for line in data:
if (line_num == 0):
line_num += 1;
continue;
keys = DB.keys();
syek = BD.keys();
pic_name = line[0];
class_name = line[1];
whales_.append(int(re.sub('w_','',re.sub('.jpg','',pic_name))));
if (class_name not in keys):
DB[class_name] = [pic_name];
classes.append(int(re.sub('whale_','',class_name)));
else:
DB[class_name].append(pic_name);
BD[pic_name] = class_name;
keys = DB.keys();
N = len(keys);
frequency_table = [0 for i in xrange(N)];
for i in xrange(N):
frequency_table[i] = len(DB[keys[i]]);
pl.plot(frequency_table);
M = len(whales_);
match_table = [[0 for j in xrange(N+1)] for i in xrange(M+1)];
for j in xrange(N):
match_table[0][j+1] = classes[j];
for i in xrange(M):
match_table[i+1][0] = whales_[i];
for i in xrange(N):
for j in xrange(M):
strWhale = 'w_'+str(whales_[j])+'.jpg';
num_zero = 0;
if (classes[i] < 10):
num_zero += 4;
elif (classes[i] < 100):
num_zero += 3;
elif (classes[i] < 1000):
num_zero += 2;
elif (classes[i] < 10000):
num_zero += 1;
zeros = num_zero*'0';
strClass = 'whale_'+zeros+str(classes[i]);
if (strWhale in DB[strClass]):
match_table[j+1][i+1] = 1;
match_table = pl.array(match_table);
pl.savetxt('data/match_table.csv', match_table, delimiter=',');
target_matrix = pl.array([[0 for j in xrange(M)] for j in xrange(M)]);
i = 0;
for pic_name_i in whales_:
j = 0;
for pic_name_j in whales_:
class_of_i = BD['w_'+str(pic_name_i)+'.jpg'];
class_of_j = BD['w_'+str(pic_name_j)+'.jpg'];
if (class_of_i == class_of_j):
target_matrix[i,j] = 1;
j += 1;
target_matrix[i,i] = 1;
i += 1;
new_train_numerical = pl.array([[0 for it1 in xrange(2)] for it2 in xrange(M)]);
for i in xrange(M):
whale = whales_[i];
new_train_numerical[i,0] = whale;
class_ = class_of_i = BD['w_'+str(whale)+'.jpg'];
new_train_numerical[i,1] = int(re.sub('whale_','',class_));
pl.savetxt('data/target_matrix.csv', target_matrix, delimiter=',');
pl.savetxt('data/train_numer.csv', new_train_numerical, delimiter=','); | 30.261905 | 80 | 0.562549 |
1899c8f523a33646e893cd33cd3682188e0ca8e5 | 5,990 | py | Python | src/fsec/yammler.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | src/fsec/yammler.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | src/fsec/yammler.py | HiggsHydra/permian-frac-exchange | 1dd1e409e5389373590c7d3780a54cd9f12d1166 | [
"MIT"
] | null | null | null | from typing import Union
from datetime import datetime
import os
import tempfile
from contextlib import contextmanager
import logging
from collections import Counter
import yaml
logger = logging.getLogger(__name__)
if __name__ == "__main__":
fspath = "./config/download_log.yaml"
import loggers
from yammler import Yammler
loggers.standard_config()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
with DownloadLog.context(fspath) as f:
# print(f)
# f.known_files = "test"
# print(f.known_files)
f.add("test1")
print(f)
y = Yammler("./config/operators.yaml")
s2 = [{x.pop("operator"): x} for x in s]
from stringprocessor import StringProcessor as sp
for s in s2:
for k, v in s.items():
x = s.pop(k)
x["alias"] = sp.normalize(x.pop("alias"), lower=True)
x["method"] = sp.normalize(x.pop("method"), lower=True)
s[sp.normalize(k, lower=True)] = x
for x in s2:
for key, value in x.items():
try:
value["created"] = value["created"].isoformat()
value["updated"] = value["updated"].isoformat()
except:
pass
finally:
y[key] = value
f = DownloadLog(fspath)
# f.known_files
# f.add("test1")
# f.dump()
# f.remove("test1")
| 26.504425 | 88 | 0.543573 |
189b834780427e5805e8ddb0880c32074d93411d | 879 | py | Python | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.2 on 2022-03-25 11:59
from django.db import migrations, models
| 32.555556 | 117 | 0.602958 |
189c6b3a4cd4803a7422b2fd630d54013aa0aa1e | 14,356 | py | Python | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | null | null | null | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | 2 | 2019-05-12T22:11:46.000Z | 2019-05-13T11:46:16.000Z | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
# Module with fdf-aware dictionary
from tkdict import FDFDict
from aiida.orm.calculation.job import JobCalculation
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo
from aiida.common.utils import classproperty
from aiida.common.datastructures import CodeInfo
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.9.10"
__contributors__ = "Victor M. Garcia-Suarez, Alberto Garcia"
def get_input_data_text(key,val, mapping=None):
"""
Given a key and a value, return a string (possibly multiline for arrays)
with the text to be added to the input file.
:param key: the flag name
:param val: the flag value. If it is an array, a line for each element
is produced, with variable indexing starting from 1.
Each value is formatted using the conv_to_fortran function.
:param mapping: Optional parameter, must be provided if val is a dictionary.
It maps each key of the 'val' dictionary to the corresponding
list index. For instance, if ``key='magn'``,
``val = {'Fe': 0.1, 'O': 0.2}`` and ``mapping = {'Fe': 2, 'O': 1}``,
this function will return the two lines ``magn(1) = 0.2`` and
``magn(2) = 0.1``. This parameter is ignored if 'val'
is not a dictionary.
"""
from aiida.common.utils import conv_to_fortran
# I check first the dictionary, because it would also match
# hasattr(__iter__)
if isinstance(val, dict):
if mapping is None:
raise ValueError("If 'val' is a dictionary, you must provide also "
"the 'mapping' parameter")
list_of_strings = []
for elemk, itemval in val.iteritems():
try:
idx = mapping[elemk]
except KeyError:
raise ValueError("Unable to find the key '{}' in the mapping "
"dictionary".format(elemk))
list_of_strings.append((idx," {0}({2}) = {1}\n".format(
key, conv_to_fortran(itemval), idx)))
# I first have to resort, then to remove the index from the first
# column, finally to join the strings
list_of_strings = zip(*sorted(list_of_strings))[1]
return "".join(list_of_strings)
elif hasattr(val,'__iter__'):
# a list/array/tuple of values
list_of_strings = [
"{0}({2}) {1}\n".format(key, conv_to_fortran(itemval), idx+1)
for idx, itemval in enumerate(val)]
return "".join(list_of_strings)
else:
# single value
if key[:6] == '%block':
bname = key.split()[1]
b1 = "{0} {1}".format(key, my_conv_to_fortran(val))
return b1 + "\n%endblock " + bname + "\n"
else:
return "{0} {1}\n".format(key, my_conv_to_fortran(val))
def my_conv_to_fortran(val):
"""
Special version to avoid surrounding strings with extra ' '. Otherwise the
fdf tokenizer will not split values and units, for example.
:param val: the value to be read and converted to a Fortran-friendly string.
"""
# Note that bool should come before integer, because a boolean matches also
# isinstance(...,int)
if (isinstance(val, bool)):
if val:
val_str = '.true.'
else:
val_str = '.false.'
elif (isinstance(val, (int, long))):
val_str = "{:d}".format(val)
elif (isinstance(val, float)):
val_str = ("{:18.10e}".format(val)).replace('e', 'd')
elif (isinstance(val, basestring)):
val_str = "{!s}".format(val)
else:
raise ValueError("Invalid value passed, accepts only bools, ints, "
"floats and strings")
return val_str
| 39.657459 | 278 | 0.60156 |
189dad7d4fc31c11984202f5abd8d52b7d7034ce | 5,974 | py | Python | backend/api/fixtures/test/functional_test/load_ft_data.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 18 | 2017-05-10T21:55:11.000Z | 2021-03-01T16:41:32.000Z | backend/api/fixtures/test/functional_test/load_ft_data.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 1,167 | 2017-03-04T00:18:43.000Z | 2022-03-03T22:31:51.000Z | backend/api/fixtures/test/functional_test/load_ft_data.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 48 | 2017-03-09T17:19:39.000Z | 2022-02-24T16:38:17.000Z | import uuid
import os
from datetime import datetime
from django.db import transaction
from api.management.data_script import OperationalDataScript
from api.models.CompliancePeriod import CompliancePeriod
from api.models.Organization import Organization
from api.models.OrganizationActionsType import OrganizationActionsType
from api.models.OrganizationBalance import OrganizationBalance
from api.models.OrganizationStatus import OrganizationStatus
from api.models.OrganizationType import OrganizationType
from api.models.Role import Role
from api.models.User import User
from api.models.UserRole import UserRole
script_class = LoadFTData
| 55.831776 | 132 | 0.68999 |
189e049c8ff1d8fc6680e58e527827763bd3d33c | 3,456 | py | Python | challenge/agoda_cancellation_prediction.py | ZebraForce9/IML.HUJI | a263a621331c7c7d51c90c8325f76aa0797d424e | [
"MIT"
] | null | null | null | challenge/agoda_cancellation_prediction.py | ZebraForce9/IML.HUJI | a263a621331c7c7d51c90c8325f76aa0797d424e | [
"MIT"
] | null | null | null | challenge/agoda_cancellation_prediction.py | ZebraForce9/IML.HUJI | a263a621331c7c7d51c90c8325f76aa0797d424e | [
"MIT"
] | null | null | null | from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
from IMLearn.base import BaseEstimator
import numpy as np
import pandas as pd
def load_data(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
full_data = pd.read_csv(filename).drop_duplicates()
full_data["cancellation_datetime"] = full_data["cancellation_datetime"].fillna(0)
full_data = full_data.dropna()
# full_data = full_data.fillna("Unknown")
full_data[["booking_datetime", "checkin_date", "checkout_date", "hotel_live_date", "cancellation_datetime"]] = \
full_data[
["booking_datetime", "checkin_date", "checkout_date", "hotel_live_date", "cancellation_datetime"]].apply(
pd.to_datetime)
full_data["cancellation_datetime"] = full_data["cancellation_datetime"].apply(lambda x: x.value // 10**9)
full_data["booking_date"], full_data["booking_time"] = full_data["booking_datetime"].dt.date, full_data[
"booking_datetime"].dt.time
features_to_dummify = ["hotel_id", "hotel_country_code", "accommadation_type_name", "charge_option",
"customer_nationality", "guest_nationality_country_name", "origin_country_code", "language",
"original_payment_method", "original_payment_currency", "hotel_area_code", "hotel_city_code"]
for feature in features_to_dummify:
feature_dummies = pd.get_dummies(full_data[feature]).add_prefix(f"{feature}")
full_data = full_data.join(feature_dummies)
full_data = full_data.drop(["h_booking_id", "h_customer_id", "booking_datetime"] + features_to_dummify, axis=1)
labels = full_data.pop("cancellation_datetime")
return full_data, labels
def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray, filename: str):
"""
Export to specified file the prediction results of given estimator on given test set.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False)
if __name__ == '__main__':
np.random.seed(0)
# Load data
df, cancellation_labels = load_data("agoda_cancellation_train.csv")
train_X, train_y, test_X, test_y = split_train_test(df, cancellation_labels)
# Fit model over data
from time import time
start = time()
estimator = AgodaCancellationEstimator().fit(train_X, train_y)
pred = estimator.predict(test_X)
print(time() - start)
# Store model predictions over test set
evaluate_and_export(estimator, test_X, "id1_id2_id3.csv")
| 37.565217 | 120 | 0.713252 |
189ea5728ae22b441ea875f1bd0c5faac3a76ced | 294 | py | Python | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | null | null | null | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | 3 | 2020-04-28T13:14:31.000Z | 2021-01-15T09:41:56.000Z | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import scripthelper
scripthelper.add_argument("-n", "--name", help="Name to greet")
logger, args = scripthelper.bootstrap_args()
if args.name:
logger.debug("Name was provided")
logger.info(f"Hello {args.name}")
else:
logger.warning("Name was not provided")
| 24.5 | 63 | 0.710884 |
18a1a66ddfc12bbc493302b88cd1fbc01b59d040 | 71 | py | Python | Chapter 01/Chap01_Example1.40.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 01/Chap01_Example1.40.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 01/Chap01_Example1.40.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | #backslash and new line ignored
print("one\
two\
three")
| 14.2 | 31 | 0.591549 |
18a1ae9b017d856fff834435791dc30cf0986f3f | 15,575 | py | Python | turbo_seti/find_event/find_event_pipeline.py | savinshynu/turbo_seti | 7d756f130af5a323403affcdcb9f9bfa62325836 | [
"MIT"
] | null | null | null | turbo_seti/find_event/find_event_pipeline.py | savinshynu/turbo_seti | 7d756f130af5a323403affcdcb9f9bfa62325836 | [
"MIT"
] | null | null | null | turbo_seti/find_event/find_event_pipeline.py | savinshynu/turbo_seti | 7d756f130af5a323403affcdcb9f9bfa62325836 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
r"""
Front-facing script to find drifting, narrowband events in a set of generalized
cadences of ON-OFF radio SETI observations.
The main function contained in this file is :func:`find_event_pipeline` calls
find_events from find_events.py to read a list of turboSETI .dat files.
It then finds events within this group of files.
"""
#required packages and programs
import os
from operator import attrgetter
import logging
logger_name = 'find_event_pipeline'
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
import pandas as pd
import numpy as np
from blimpy import Waterfall
from blimpy.utils import change_the_ext
from turbo_seti.find_event.find_event import find_events
RTOL_DIFF = 0.01 # 1%
def get_file_header(filepath_h5):
r'''
Extract and return the target's source name from the DAT file path.
Parameters
----------
dat_path : str
Full or relative path name of the DAT file
Returns
-------
header : Waterfall header object
'''
wf = Waterfall(filepath_h5, load_data=False)
return wf.container.header
def close_enough(x, y):
r"""Make sure that x and y are close enough to be considered roughly equal."""
if np.isclose(float(x), float(y), rtol=RTOL_DIFF):
return True
return False
def find_event_pipeline(dat_file_list_str,h5_file_list_str=None, SNR_cut=10, check_zero_drift=False, filter_threshold=3,
on_off_first='ON', number_in_cadence=6, on_source_complex_cadence=False,
saving=True, csv_name=None, user_validation=False,
sortby_tstart=True):
"""
Find event pipeline.
Parameters
----------
dat_file_list_str : str
The string name of a plaintext file ending in .lst
that contains the filenames of .dat files, each on a
new line, that were created with seti_event.py. The
.lst should contain a set of cadences (ON observations
alternating with OFF observations). The cadence can be
of any length, given that the ON source is every other
file. This includes Breakthrough Listen standard ABACAD
as well as OFF first cadences like BACADA. Minimum
cadence length is 2, maximum cadence length is
unspecified (currently tested up to 6).
Example: ABACAD|ABACAD|ABACAD
h5_file_list_str : str | None
The string name of a plaintext file ending in .lst
that contains the filenames of .h5 files, each on a
new line, that were created with seti_event.py. The
.lst should contain a set of cadences (ON observations
alternating with OFF observations). The cadence can be
of any length, given that the ON source is every other
file. This includes Breakthrough Listen standard ABACAD
as well as OFF first cadences like BACADA. Minimum
cadence length is 2, maximum cadence length is
unspecified (currently tested up to 6).
SNR_cut : int
The threshold SNR below which hits in the ON source
will be disregarded. For the least strict thresholding,
set this parameter equal to the minimum-searched SNR
that you used to create the .dat files from
seti_event.py. Recommendation (and default) is 10.
check_zero_drift : bool
A True/False flag that tells the program whether to
include hits that have a drift rate of 0 Hz/s. Earth-
based RFI tends to have no drift rate, while signals
from the sky are expected to have non-zero drift rates.
filter_threshold : int
Specification for how strict the hit filtering will be.
There are 3 different levels of filtering, specified by
the integers 1, 2, and 3. Filter_threshold = 1
returns hits above an SNR cut, taking into account the
check_zero_drift parameter, but without an ON-OFF check.
Filter_threshold = 2 returns hits that passed level 1
AND that are in at least one ON but no OFFs.
Filter_threshold = 3 returns events that passed level 2
AND that are present in *ALL* ONs.
on_off_first : str {'ON', 'OFF'}
Tells the code whether the .dat sequence starts with
the ON or the OFF observation. Valid entries are 'ON'
and 'OFF' only. Default is 'ON'.
number_in_cadence : int
The number of files in a single ON-OFF cadence.
Default is 6 for ABACAD.
on_source_complex_cadence : bool
If using a complex cadence (i.e. ons and offs not
alternating), this variable should be the string
target name used in the .dat filenames. The code will
then determine which files in your dat_file_list_str
cadence are ons and which are offs.
saving : bool
A True/False flag that tells the program whether to
save the output array as a .csv.
user_validation : bool
A True/False flag that, when set to True, asks if the
user wishes to continue with their input parameters
(and requires a 'y' or 'n' typed as confirmation)
before beginning to run the program. Recommended when
first learning the program, not recommended for
automated scripts.
sortby_tstart : bool
If True, the input file list is sorted by header.tstart.
Returns
-------
Either:
* a Pandas dataframe with all the events that were found.
* None, if no events were found.
Notes
-----
The HDF5 file is ASSUMED(!!) to have the same name as .dat files.
Examples
--------
>>> import find_event_pipeline;
>>> find_event_pipeline.find_event_pipeline(dat_file_list_str,
... SNR_cut=10,
... check_zero_drift=False,
... filter_threshold=3,
... on_off_first='ON',
... number_in_cadence=6,
... on_source_complex_cadence=False,
... saving=True,
... user_validation=False)
"""
print()
print("************ BEGINNING FIND_EVENT PIPELINE **************")
print()
if on_source_complex_cadence:
print("Assuming a complex cadence for the following on source: {}"
.format(on_source_complex_cadence))
else: # not on_source_complex_cadence:
print("Assuming the first observation is an " + on_off_first)
complex_cadence = on_source_complex_cadence
# Get a list of the DAT files.
# Get source names and build path_record list.
source_name_list = []
path_record = []
# Get a list of the DAT/h5 files.
n_files, dat_file_list = list_of_files(dat_file_list_str)
if h5_file_list_str is None:
h5_file_list = dat_file_list
for hf in h5_file_list:
header = get_file_header(change_the_ext(hf, 'dat', 'h5'))
source_name = header["source_name"]
tstart = header["tstart"]
path_record.append(PathRecord(hf, tstart, source_name, header["fch1"],
header["foff"], header["nchans"]))
source_name_list.append(source_name)
else:
hn_files, h5_file_list = list_of_files(h5_file_list_str)
for hf in h5_file_list:
header = get_file_header(hf)
for dat in dat_file_list: # O(n^2) TODO: create tests in pytest
if os.path.basename(dat).replace('.dat','.h5')==os.path.basename(hf):
source_name = header["source_name"]
tstart = header["tstart"]
path_record.append(PathRecord(dat, tstart, source_name, header["fch1"],
header["foff"], header["nchans"]))
source_name_list.append(source_name)
# If sorting by header.tstart, then rewrite the dat_file_list in header.tstart order.
if sortby_tstart:
path_record = sorted(path_record, key=attrgetter('tstart'))
dat_file_list = []
for obj in path_record:
dat_file_list.append(obj.path_dat)
# Set up the frequency range matcher record.
# If a complex cadence, the source name is used to select the matcher;
# Otherwise, just use the first record.
if on_source_complex_cadence:
flag_terminate = True
for obj in path_record: # Look for 1st occurence of source_name.
if obj.source_name == on_source_complex_cadence:
matcher = obj
flag_terminate = False
break
if flag_terminate:
logger.error("find_event_pipeline: Source '{}' is not in this complex cadence!"
.format(on_source_complex_cadence))
for obj in path_record:
logger.info("find_event_pipeline: file={}, tstart={}, source_name={}, fch1={}, foff={}, nchans={}"
.format(os.path.basename(obj.path_dat), obj.tstart, obj.source_name,
obj.fch1, obj.foff, obj.nchans))
return None
else:
matcher = path_record[0]
# Display path_record rows.
flag_terminate = False
for obj in path_record:
logger.info("find_event_pipeline: file={}, tstart={}, source_name={}, fch1={}, foff={}, nchans={}"
.format(os.path.basename(obj.path_dat), obj.tstart, obj.source_name,
obj.fch1, obj.foff, obj.nchans))
if on_source_complex_cadence: # Complex cadence?
# If not a part of the complex cadence, then skip it.
if on_source_complex_cadence != obj.source_name:
continue
# Part of the cadence, complex or not.
# Make sure that the frequency range makes sense.
if not close_enough(obj.fch1, matcher.fch1) \
or not close_enough(obj.foff, matcher.foff) \
or obj.nchans != matcher.nchans:
logger.error("find_event_pipeline: Inconsistent frequency range! This does not look like a cadence of related files.")
flag_terminate = True
if flag_terminate:
return None
# If this is a complex cadence,
# * construct a complex_cadence list of 1s and 0s.
# * compute count_cadence = number of matches on on_source_complex_cadence.
if on_source_complex_cadence:
complex_cadence = []
count_cadence = 0
for i in range(0, len(source_name_list)):
source = source_name_list[i]
if source == on_source_complex_cadence:
complex_cadence.append(1)
count_cadence += 1
else:
complex_cadence.append(0)
if count_cadence > 0:
print("The derived complex cadence is: " + str(complex_cadence))
else:
print("\n*** find_event_pipeline [complex cadence]: Sorry, no potential candidates with your given on_source_complex_cadence={} :("
.format(on_source_complex_cadence))
return None
num_of_sets = int(n_files / number_in_cadence)
print("There are " + str(len(dat_file_list)) + " total files in the filelist "
+ dat_file_list_str)
print("therefore, looking for events in " + str(num_of_sets) + " on-off set(s)")
print("with a minimum SNR of " + str(SNR_cut))
if filter_threshold == 1:
print("Present in an ON source only, above SNR_cut")
if filter_threshold == 2:
print("Present in at least one ON source with RFI rejection from the OFF sources")
if filter_threshold == 3:
print("Present in all ON sources with RFI rejection from the OFF sources")
if not check_zero_drift:
print("not including signals with zero drift")
else:
print("including signals with zero drift")
if not saving:
print("not saving the output files")
else:
print("saving the output files")
if user_validation:
question = "Do you wish to proceed with these settings?"
while "the answer is invalid":
reply = str(input(question+' (y/n): ')).lower().strip()
if reply == '':
return None
if reply[0] == 'y':
break
if reply[0] == 'n':
return None
#Looping over number_in_cadence chunks.
candidate_list = []
for ii in range(num_of_sets):
sublist_low = number_in_cadence * ii
sublist_high = sublist_low + number_in_cadence
file_sublist = dat_file_list[sublist_low : sublist_high]
if not complex_cadence:
if on_off_first == 'ON':
filename = os.path.basename(file_sublist[0])
else: # on_off_first == 'OFF'
filename = os.path.basename(file_sublist[1])
else: # complex_cadence
filename = os.path.basename(file_sublist[complex_cadence.index(1)])
print()
print("*** First DAT file in set: " + filename + " ***")
print()
cand = find_events(file_sublist,
SNR_cut=SNR_cut,
check_zero_drift=check_zero_drift,
filter_threshold=filter_threshold,
on_off_first=on_off_first,
complex_cadence=complex_cadence)
cand_len = 1
if cand is None:
cand_len = 0
if cand_len != 0:
candidate_list.append(cand)
if len(candidate_list) > 0:
find_event_output_dataframe = pd.concat(candidate_list)
else:
print("\n*** find_event_pipeline: Sorry, no potential candidates with your given parameters :(")
return None
print("*** find_event_output_dataframe is complete ***")
if saving:
if csv_name is None:
prefix = os.path.dirname(dat_file_list[0]) + '/' + source_name_list[0]
if check_zero_drift:
filestring = prefix + '_f' + str(filter_threshold) + '_snr' \
+ str(SNR_cut) + '_zero' + '.csv'
else:
filestring = prefix + '_f' + str(filter_threshold) + '_snr' \
+ str(SNR_cut) + '.csv'
else:
filestring = csv_name
if not isinstance(find_event_output_dataframe, list):
find_event_output_dataframe.to_csv(filestring)
print("find_event_pipeline: Saved CSV file to {}".format(filestring))
else:
print("\n*** find_event_pipeline: Sorry, no events to save :(")
return None
return find_event_output_dataframe
| 41.094987 | 144 | 0.616116 |
18a1b4659b986cda93994b346c85aae4f37fb1a4 | 1,558 | py | Python | scripts/plot_snaps.py | wordsworthgroup/libode | c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479 | [
"MIT"
] | 11 | 2020-02-27T22:32:04.000Z | 2021-05-06T17:51:50.000Z | scripts/plot_snaps.py | markmbaum/libode | c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479 | [
"MIT"
] | null | null | null | scripts/plot_snaps.py | markmbaum/libode | c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479 | [
"MIT"
] | 2 | 2021-09-26T07:36:55.000Z | 2021-11-29T23:45:32.000Z | import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
| 29.396226 | 86 | 0.632863 |
18a22f9ecd12b8cd2ba070dcb05f2e55ef3f8d64 | 86 | py | Python | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1,953 | 2015-01-17T20:33:46.000Z | 2022-03-30T04:36:34.000Z | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 8,490 | 2015-01-01T13:04:18.000Z | 2022-03-31T23:02:08.000Z | mne/datasets/kiloword/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 1,130 | 2015-01-08T22:39:27.000Z | 2022-03-30T21:44:26.000Z | """MNE visual_92_categories dataset."""
from .kiloword import data_path, get_version
| 21.5 | 44 | 0.790698 |
18a3288be8f39dff9c36e526ba62428f9babcc0d | 1,688 | py | Python | app/framework/tagger_framework/tagger/pos/evaluation.py | kislerdm/nlp_pos_demo | cea5a0432e3fc0a626f090d40a28e084e3243efc | [
"MIT"
] | null | null | null | app/framework/tagger_framework/tagger/pos/evaluation.py | kislerdm/nlp_pos_demo | cea5a0432e3fc0a626f090d40a28e084e3243efc | [
"MIT"
] | null | null | null | app/framework/tagger_framework/tagger/pos/evaluation.py | kislerdm/nlp_pos_demo | cea5a0432e3fc0a626f090d40a28e084e3243efc | [
"MIT"
] | null | null | null | # Dmitry Kisler 2020-present
# www.dkisler.com
from typing import List, Dict
from sklearn.metrics import f1_score, accuracy_score
def model_performance(y_true: List[List[str]],
y_pred: List[List[str]]) -> Dict[str, float]:
"""Accuracy calculation function
Args:
y_true: List of true labels of the tokenized sentese.
y_pred: List of predicted labels of the tokenized sentese.
Returns:
Dict of metrics:
{
"accuracy": float,
"f1_micro": float,
"f1_macro": float,
"f1_weighted": float,
}
Raises:
ValueError: Exception occurred when input lists' length don't match.
"""
if len(y_true) == 0:
return None
if len(y_true) != len(y_pred):
raise ValueError("Lengths of input lists don't match.")
def _list_flattener(inpt: List[List[str]]) -> List[str]:
"""Flattener for list of lists into a single list."""
output = []
for i in inpt:
output.extend(i)
return output
y_true = _list_flattener(y_true)
y_pred = _list_flattener(y_pred)
if len(y_true) != len(y_pred):
raise ValueError("Numper of tokens don't match between y_true and y_pred.")
try:
metrics = {
"accuracy": accuracy_score(y_true, y_pred),
"f1_micro": f1_score(y_true, y_pred, average='micro'),
"f1_macro": f1_score(y_true, y_pred, average='macro'),
"f1_weighted": f1_score(y_true, y_pred, average='weighted'),
}
except Exception as ex:
raise Exception(f"Metrics calculation error: {ex}")
return metrics
| 29.103448 | 83 | 0.598934 |
18a5fa9fa6c228a2b9d1020387a728db780df2f0 | 3,451 | py | Python | tools/interpret.py | Notgnoshi/generative | d9702c18b59553541f0cce706089f9fad501cd33 | [
"MIT"
] | 5 | 2021-02-11T07:55:51.000Z | 2022-02-10T01:11:02.000Z | tools/interpret.py | Notgnoshi/generative | d9702c18b59553541f0cce706089f9fad501cd33 | [
"MIT"
] | 67 | 2020-12-31T18:02:05.000Z | 2022-02-21T14:57:52.000Z | tools/interpret.py | Notgnoshi/generative | d9702c18b59553541f0cce706089f9fad501cd33 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Interpret an L-String as a set of 3D Turtle commands and record the turtle's path.
Multiple lines of input will be treated as a continuation of a single L-String.
Default commandset:
F,G - Step forward while drawing
f,g - Step forward without drawing
-,+ - Yaw around the normal axis
v,^ - Pitch around the transverse axis
<,> - Roll around the longitudinal axis
| - Flip orientation 180 degrees
d,D - Turn drawing on, off
[,] - Push, pop position and orientation onto a stack
"""
import argparse
import logging
import pathlib
import sys
root = pathlib.Path(__file__).resolve().parent.parent
sys.path.insert(0, str(root))
from generative.lsystem.interpreter import LSystemInterpeter
from generative.wkio import serialize_geometries
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
DEFAULT_LEVEL = "WARNING"
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=LOG_LEVELS.get(args.log_level),
stream=sys.stderr,
)
logger = logging.getLogger(name=__file__)
main(args)
| 29 | 101 | 0.634888 |
18a6ffdb28982da58249e4d719411ed0e1af6ac5 | 699 | py | Python | PointsToRobot.py | chuong/robot-arm-manipulation | a5ad277f86c278ccf8fe99abe337d0c64f8a407e | [
"MIT"
] | null | null | null | PointsToRobot.py | chuong/robot-arm-manipulation | a5ad277f86c278ccf8fe99abe337d0c64f8a407e | [
"MIT"
] | null | null | null | PointsToRobot.py | chuong/robot-arm-manipulation | a5ad277f86c278ccf8fe99abe337d0c64f8a407e | [
"MIT"
] | null | null | null |
"""
@author: yuboya
"""
### pins position to be sent to robot
## from TransformationCalculation:
import numpy as np
import math
| 18.891892 | 61 | 0.542203 |
18a73b665ef0eeab4028398fb264c011541365f0 | 2,418 | py | Python | plugins/module_utils/github_api.py | zp4rker/ansible-github-api | 8b4d154915a5d92ec6f379d50cfb2c66a07fb16c | [
"Apache-2.0"
] | null | null | null | plugins/module_utils/github_api.py | zp4rker/ansible-github-api | 8b4d154915a5d92ec6f379d50cfb2c66a07fb16c | [
"Apache-2.0"
] | null | null | null | plugins/module_utils/github_api.py | zp4rker/ansible-github-api | 8b4d154915a5d92ec6f379d50cfb2c66a07fb16c | [
"Apache-2.0"
] | null | null | null | import requests
import json
from json import JSONDecodeError
base_uri = "https://api.github.com/"
licenses = ['afl-3.0', 'apache-2.0', 'artistic-2.0', 'bsl-1.0', 'bsd-2-clause', 'license bsd-3-clause', 'bsd-3-clause-clear', 'cc', 'cc0-1.0', 'cc-by-4.0', 'cc-by-sa-4.0', 'wtfpl', 'ecl-2.0', 'epl-1.0', 'epl-2.0', 'eupl-1.1', 'agpl-3.0', 'gpl', 'gpl-2.0', 'gpl-3.0', 'lgpl', 'lgpl-2.1', 'lgpl-3.0', 'isc', 'lppl-1.3c', 'ms-pl', 'mit', 'mpl-2.0', 'osl-3.0', 'postgresql', 'ofl-1.1', 'ncsa', 'unlicense', 'zlib']
| 34.056338 | 413 | 0.619934 |
18a994a759d85007cf88e43e5353bf80d7ac9a5c | 3,055 | py | Python | src/onegov/core/datamanager.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/core/datamanager.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/core/datamanager.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | import os
import tempfile
import transaction
from onegov.core import log
from onegov.core.utils import safe_move
| 24.637097 | 73 | 0.629133 |
18aaa916c943bdb538fc41fcf2673ef26fba2444 | 3,603 | py | Python | djangocms_modules/models.py | crydotsnake/djangocms-modules | ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7 | [
"BSD-3-Clause"
] | 8 | 2019-01-29T15:11:30.000Z | 2020-06-07T19:27:50.000Z | djangocms_modules/models.py | crydotsnake/djangocms-modules | ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7 | [
"BSD-3-Clause"
] | 11 | 2018-12-14T14:01:06.000Z | 2020-09-02T09:02:49.000Z | djangocms_modules/models.py | divio/djangocms-modules | 8328f130cddd4cf5f90beca170d1303b95158cda | [
"BSD-3-Clause"
] | 3 | 2021-04-16T12:26:27.000Z | 2021-06-25T14:53:47.000Z | from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.urls import Resolver404, resolve
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from cms import operations
from cms.models import CMSPlugin, Placeholder
from cms.models.fields import PlaceholderField
from cms.signals import pre_placeholder_operation
from cms.utils.plugins import get_bound_plugins
| 27.090226 | 75 | 0.686095 |
18ad2e27ac7dd39dc407f19da04c84adb7ca9a06 | 553 | py | Python | do_tasks.py | youqingkui/zhihufav | 97c465d1bf825a6621d221c39a3677887cbd9261 | [
"MIT"
] | null | null | null | do_tasks.py | youqingkui/zhihufav | 97c465d1bf825a6621d221c39a3677887cbd9261 | [
"MIT"
] | null | null | null | do_tasks.py | youqingkui/zhihufav | 97c465d1bf825a6621d221c39a3677887cbd9261 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
import json
from lib.sqs import zhihufav_sqs
from lib.tasks import add_note
if __name__=="__main__":
for i in range(5):
get_sqs_queue()
| 22.12 | 60 | 0.679928 |
18ad36444d5128007b08506ac3f31875adc10b4d | 127 | py | Python | books/SystemProgramming/ch4_advanced/echo_command.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/SystemProgramming/ch4_advanced/echo_command.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | books/SystemProgramming/ch4_advanced/echo_command.py | zeroam/TIL | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | [
"MIT"
] | null | null | null | from subprocess import Popen, PIPE
cmd = "echo hello world"
p = Popen(cmd, shell=True, stdout=PIPE)
ret, err = p.communicate() | 25.4 | 39 | 0.724409 |
18ad5089ae1f33994da7db7c1701301bde09c817 | 2,238 | py | Python | NiaPy/algorithms/basic/bbfwa.py | Flyzoor/NiaPy | fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42 | [
"MIT"
] | null | null | null | NiaPy/algorithms/basic/bbfwa.py | Flyzoor/NiaPy | fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42 | [
"MIT"
] | null | null | null | NiaPy/algorithms/basic/bbfwa.py | Flyzoor/NiaPy | fec1faee0f215cc3a6c2c967ec77dcbe2cbffa42 | [
"MIT"
] | null | null | null | # encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, multiple-statements, attribute-defined-outside-init, logging-not-lazy
import logging
from numpy import apply_along_axis, argmin
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['BareBonesFireworksAlgorithm']
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 32.434783 | 204 | 0.707328 |
18ae5fde1fdfdd5b09f5207f83e23ef0e8f54a07 | 854 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| 30.5 | 94 | 0.701405 |
18af0a7d2a7ce2d43b7672a9c24d93c96068fd61 | 1,083 | py | Python | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.3 on 2018-11-18 02:34
from django.db import migrations, models
| 40.111111 | 152 | 0.626962 |
18b132a361a1a147d36815958a1a5e8956b159fc | 6,050 | py | Python | ktaned/bomb.py | MartinHarding/ktaned | b38fb91b4e2d370d20310e472863766007d4adb3 | [
"MIT"
] | 1 | 2017-12-02T21:21:37.000Z | 2017-12-02T21:21:37.000Z | ktaned/bomb.py | MartinHarding/ktaned | b38fb91b4e2d370d20310e472863766007d4adb3 | [
"MIT"
] | 22 | 2017-12-02T05:15:32.000Z | 2018-07-24T02:04:56.000Z | ktaned/bomb.py | MartinHarding/ktaned | b38fb91b4e2d370d20310e472863766007d4adb3 | [
"MIT"
] | 2 | 2017-12-01T23:49:17.000Z | 2017-12-27T17:05:03.000Z | import random
| 31.842105 | 79 | 0.571074 |
18b146154d393893b10c35ac0c235675a70fdc26 | 1,377 | py | Python | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | jogador = dict()
partidas = list()
jogador['nome'] = str(input('Nome do jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(0, tot):
partidas.append(int(input(f' Quantos gols na partida {c}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
print(30*'-=')
print(jogador)
print(30*'-=')
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}')
print(30*'-=')
print(f'O jogador {jogador["nome"]} jogou {len(jogador["gols"])} partidas.')
for i, v in enumerate(jogador["gols"]):
print(f' => Na partida {i}, fez {v} gols.')
print(f'Foi um total de {jogador["total"]} gols.')
# Ou
# jogador = dict()
# partidas = list()
# p = tot = 0
# jogador['nome'] = str(input('Nome do Jogador: '))
# quant = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
# while p < quant:
# jogos = int(input(f' Quantos gols na partida {p}? '))
# partidas.append(jogos)
# tot += jogos
# p += 1
# jogador['gols'] = partidas
# jogador['total'] = tot
# print(30*'-=')
# print(jogador)
# print(30*'-=')
# for k, v in jogador.items():
# print(f'O campo {k} tem o valor {v}')
# print(30*'-=')
# print(f'O jogador {jogador["nome"]} jogou {quant} partidas.')
# for c, g in enumerate(partidas):
# print(f' => Na partida {c}, fez {g} gols.')
# print(f'Foi um total de {jogador["total"]} gols.') | 31.295455 | 76 | 0.600581 |
18b187b96d4e16d8219c2f6163b45c5b1b15ce59 | 2,832 | py | Python | hummingbot/core/data_type/kline_stream_tracker.py | gmfang/hummingbot | fbdf516903c3b98c8447e4dc1bdceee6607b20ab | [
"Apache-2.0"
] | null | null | null | hummingbot/core/data_type/kline_stream_tracker.py | gmfang/hummingbot | fbdf516903c3b98c8447e4dc1bdceee6607b20ab | [
"Apache-2.0"
] | null | null | null | hummingbot/core/data_type/kline_stream_tracker.py | gmfang/hummingbot | fbdf516903c3b98c8447e4dc1bdceee6607b20ab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import asyncio
from abc import abstractmethod, ABC
from enum import Enum
import logging
from typing import (
Optional,
List,
Deque
)
from hummingbot.logger import HummingbotLogger
from hummingbot.core.data_type.kline_stream_tracker_data_source import \
KlineStreamTrackerDataSource
from hummingbot.core.data_type.kline import Kline
import numpy as np
import talib
from collections import deque
def calc_tech_indicators(self):
array = [float(kline.close_price) for kline in self._klines]
# self.logger().info(f"HAHA array is {array}")
np_closes = np.array(array)
ema_short = talib.EMA(np_closes, timeperiod=7)
ema_long = talib.EMA(np_closes, timeperiod=20)
macd = talib.MACD(np_closes, fastperiod=7, slowperiod=20,
signalperiod=9)
self._ema_short = ema_short[-1]
self._ema_long = ema_long[-1]
# MACD output 3 lists. We only need last list(histogram). We only
# copy the last 10 histograms.
self._macd_histograms = macd[-1][-10:]
self.logger().info(
f"(Classic) EMA_7 is {self._ema_short}, EMA_20 is {self._ema_long}, MACD(7, 20, 9) Histogram is {macd[-1][-1]} Histogram list is {self._macd_histograms}")
| 28.897959 | 166 | 0.67161 |
18b20197ca16f4d94391b3685611593c8849a3d6 | 23,599 | py | Python | cogs/management.py | xthecoolboy/MizaBOT | fb8a449bde29fdf1d32b5a597e48e6b3463dd867 | [
"MIT"
] | null | null | null | cogs/management.py | xthecoolboy/MizaBOT | fb8a449bde29fdf1d32b5a597e48e6b3463dd867 | [
"MIT"
] | null | null | null | cogs/management.py | xthecoolboy/MizaBOT | fb8a449bde29fdf1d32b5a597e48e6b3463dd867 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import asyncio
from datetime import datetime, timedelta
import psutil
# Bot related commands
| 59.593434 | 695 | 0.604221 |
18b252f0addcf4c4512b055a5ed661c24cb4f654 | 3,658 | py | Python | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | 13 | 2017-08-10T21:54:12.000Z | 2021-12-08T12:50:31.000Z | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | null | null | null | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | null | null | null | from Stack import Stack
from random import randint
| 29.983607 | 70 | 0.617824 |
18b25e53c1ed1abb7bdec386aaba62360b44deb4 | 1,826 | py | Python | masterStock.py | Coway/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 69 | 2017-03-09T00:24:09.000Z | 2021-11-15T05:52:09.000Z | masterStock.py | Coway/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 12 | 2017-03-11T04:31:29.000Z | 2018-06-21T03:54:28.000Z | masterStock.py | supthunder/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 19 | 2017-03-05T22:16:37.000Z | 2020-06-23T22:41:33.000Z | import requests
from bs4 import BeautifulSoup
import json
if __name__ == '__main__':
loadMasterStock()
| 41.5 | 161 | 0.680723 |
18b566b173e3af542df61de7dc132ac1fb281305 | 231 | py | Python | tests/WebkitGtkDriverBenchmarkTest.py | hiroshitoda/WebDriverBenchmark.py | 74b643b9f299436ef6fb50741a60f04c0c69cf8c | [
"Apache-2.0"
] | null | null | null | tests/WebkitGtkDriverBenchmarkTest.py | hiroshitoda/WebDriverBenchmark.py | 74b643b9f299436ef6fb50741a60f04c0c69cf8c | [
"Apache-2.0"
] | null | null | null | tests/WebkitGtkDriverBenchmarkTest.py | hiroshitoda/WebDriverBenchmark.py | 74b643b9f299436ef6fb50741a60f04c0c69cf8c | [
"Apache-2.0"
] | null | null | null | import unittest
from selenium import webdriver
from tests import Base
if __name__ == "__main__":
unittest.main()
| 16.5 | 46 | 0.74026 |
18b58622c0bb04c070be5b53bb5876f7354aa18d | 18,442 | py | Python | utils/create_cropped_motion_dataset.py | maheriya/tennisLabels | d363addcd043dba731aebf1f4a5abb86ef434ac5 | [
"MIT"
] | null | null | null | utils/create_cropped_motion_dataset.py | maheriya/tennisLabels | d363addcd043dba731aebf1f4a5abb86ef434ac5 | [
"MIT"
] | null | null | null | utils/create_cropped_motion_dataset.py | maheriya/tennisLabels | d363addcd043dba731aebf1f4a5abb86ef434ac5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Given a VOC dataset of TENNIS videos dumped at 1920x1080 resolution, this script creates a
# scaled and cropped dataset. Even though the cropped zone size is static (1280x720/640x360)
# crop scale), the zones themselves are dynamically selected based on the objects locations
# (by reading the annotations).
# The zone size 1280x720 is selected for multiple reasons: [Other size is 640x360]
# a. This size (2/3 of full scale) gives grid boxes of 1/3rd the full scale. This grid size
# is the minimum overlap between the diagonal zones. Horizontal and vertically aligned
# zones have the overlap that is double the height or width of this grid size. The
# minimum grid size is large enough to include a trail of tennis ball across three frames
# even at fast speeds. This allows us to fully utilize motion information during training.
# b. When images are cropped at 1280x720, and then finally scaled by 1/2, we get 640x360
# as the final image size. This works perfectly with either 533x300 or 300x300 of final
# training size while still allowing for random crop for training time data augmentation.
#
# Alternative to 1280x720 cropping is direct cropping at 640x360. Of course, this imposes
# stricter tracking requirement at inference time.
#
# Since we want this to work well for motion dataset for at least three frames of motion, the
# algorithm reads three frames at a time to decide how to crop the images. The three frames of
# motion also adds inherent hysteresis to the zone selection, making it stable.
#
# The algorithm is as follows:
# 1. Read three sequential frames -- current, prev1, prev2
# 2. Read annotations. Use 'ball' and 'racket' objects annotations for zones selection.
# 3. Create a union of bboxes for each object across three frames. Let's call this uboxes.
# 4. Select zones to crop: The zone selection is based on how centered a ubox is inside a zone.
# Since zones have significant overlap with each other, multiple zones may contain an
# object. We compute the distance of each ubox center from the center of the zone.
# For each object, the zone where this distance is the smallest is selected.
# 5. Crop out the selected zone/s to create output image/s.
#
# Note that here the emphasis is NOT to center the objects within the cropped output. If we did
# that, the network will incorrectly learn to expect the objects at the center of the image.
# Since we can't provide the network with such images at the inference time, this type of
# training will be useless.
# Instead, we use fixed, four zone locations within the image, and select the zones purely on
# the basis of how *close* an object is to a zone center. This method guarantees to create
# output images where the objects will be found in various locations within the image which
# adds a good amount of regularization to the training and avoid overfitting.
#
# For the real-time inference, the application must make an initial guess about which region
# to crop for the input to the network, and may require multiple tries in the beginning.
# However, once the ball is detected, the one can implement rudimentary tracking for the next
# crop. Since ball detection (and not the racket detection) is the most important part of
# detection, decision making is trivial.
#
# Just to be clear, it is not necessary to use the same zones during inference; any region
# within the image will be fine as long as it contains the ball. When the ball nears the
# player, the racket will automatically get into the view. Note that at the time of training,
# we utilize all available samples of racket images, not just the images where both ball and
# racket are visible at the same time.
from __future__ import print_function
import os
import sys
import cv2 as cv
from lxml import etree
from glob import glob
import re
import argparse
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import tennis_common as tc
## INPUT IMAGE DIMENSIONS (scaled to these dimensions if required)
WIDTH = 1920
HEIGHT = 1080
## MOTION DB setting: '3FRAMES' or 'FRAMESDIFF'
MOTION_TYPE = 'FRAMESDIFF'
## Change this to view images
SHOW_IMAGES = False
## Verbosity
DEBUG = 0
tc.DEBUG = DEBUG
##-#####################################################################################
args = parseArgs()
## Main variables
IN_VOCDIR = os.path.abspath(args.invoc)
IN_IMGDIR = os.path.join(IN_VOCDIR, "{}", "JPEGImages") # Template
IN_ANNDIR = os.path.join(IN_VOCDIR, "{}", "Annotations") # Template
OUT_VOCDIR = os.path.abspath(args.outvoc)
OUT_IMGDIR = os.path.join(OUT_VOCDIR, "{}", "JPEGImages") # Template
OUT_ANNDIR = os.path.join(OUT_VOCDIR, "{}", "Annotations")# Template
cropsize = (int(args.height*16./9.), args.height)
if args.height != 720 and args.height != 360:
print("Crop height of {} is not supported (use 720 or 360).".format(args.height))
sys.exit(1)
## Find base datasets containing annotations
output = tc.runSystemCmd(r"find {}/ -mindepth 3 -name '*.xml' | sed -e 's#/Annotations/.*.xml##g' | sort | uniq".format(IN_VOCDIR))
vocbases = [os.path.basename(d) for d in output]
#print(vocbases)
print("There are {} datasets to process".format(len(vocbases)))
cnt = 0
dbcnt = 0
for vocbase in vocbases:
dbcnt += 1
print("\n{}/{}. VOC Base: {}".format(dbcnt, len(vocbases), vocbase))
print("-------------------------------------------------")
i_imgdir = IN_IMGDIR.format(vocbase)
i_anndir = IN_ANNDIR.format(vocbase)
if not os.path.isdir(i_imgdir):
print("Input image dir {} is not accessible".format(i_imgdir))
if not os.path.isdir(i_anndir):
print("Input annotations dir {} is not accessible".format(i_anndir))
o_imgdir = OUT_IMGDIR.format(vocbase)
o_anndir = OUT_ANNDIR.format(vocbase)
for idir in [o_imgdir, o_anndir]:
if not os.path.isdir(idir):
os.makedirs(idir)
else:
print("Dir {} already exists".format(idir))
## Create image list to process
imgs = glob("{}/*.jpg".format(i_imgdir))
imgs = [os.path.basename(i) for i in imgs]
imgs.sort() # Sort images to pick frames in order. It is assumed the images are named likewise
(fprefix, ntemplate) = tc.getNumberingScheme(imgs[0])
if cropsize[1] == 720:
## Define the grid points
## 0/3 1/3 2/3 3/3
gy = [0, int(HEIGHT/3.), int(HEIGHT*2.0/3.0), HEIGHT]
gx = [0, int( WIDTH/3.), int( WIDTH*2.0/3.0), WIDTH]
## Create zones based on the grid
zones = tc.BoundingBoxes('zones')
# ymin xmin ymax xmax
zones.addBBox([gy[0], gx[0], gy[2], gx[2]]) # Top-left zone
zones.addBBox([gy[0], gx[1], gy[2], gx[3]]) # Top-right zone
zones.addBBox([gy[1], gx[0], gy[3], gx[2]]) # Bottom-left zone
zones.addBBox([gy[1], gx[1], gy[3], gx[3]]) # Bottom-right zone
else: # cropsize[1] == 360:
## Define the grid points
## 0/6 1/6 2/6 3/6 4/6 5/6 6/6
gy = [0, int(HEIGHT/6.), int(HEIGHT/3.), int(HEIGHT/2.), int(HEIGHT*2.0/3.0), int(HEIGHT*5.0/6.0), HEIGHT]
gx = [0, int( WIDTH/6.), int( WIDTH/3.), int( WIDTH/2.), int( WIDTH*2.0/3.0), int( WIDTH*5.0/6.0), WIDTH]
## Create zones based on the grid
zones = tc.BoundingBoxes('zones')
for y in range(len(gy)-2):
for x in range(len(gx)-2):
zones.addBBox([gy[y], gx[x], gy[y+2], gx[x+2]])
annnames = glob("{}/*.xml".format(i_anndir))
annnames = [os.path.basename(i) for i in annnames]
annnames.sort() # Sort files to pick frames in order. It is assumed that xml/images are named likewise
if len(annnames) < 3:
print("This VOC Base has less than 3 annotations. Skipping.")
continue
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(4,4))
i = 2 ## Index
for annfile in annnames[2:]:
annName_i = annnames[i]
annName_p1 = annnames[i-1]
annName_p2 = annnames[i-2]
i += 1
fnum = int(re.sub(r'.*[-_](\d+).xml', r'\1', annName_i))
eannName_i = fprefix + ntemplate.format(fnum) + '.xml'
eannName_p1 = fprefix + ntemplate.format(fnum-1) + '.xml'
eannName_p2 = fprefix + ntemplate.format(fnum-2) + '.xml'
if annName_i != eannName_i or annName_p1 != eannName_p1 or annName_p2 != eannName_p2:
# Not a continuous series of three frames including previous two, we skip this frame
if 1: #DEBUG>=1:
print("Skipping. Frame sequence not found for {}. ".format(annName_i))
continue # Get next image/ann
else:
if DEBUG>=1:
print("Processing {}".format(annName_i))
## Now that we got a three sequential frames, let's read annotations and get uboxes
## uboxes = union of bboxes for each of the 'ball' or 'racket' bbox in all three images
## We are assuming only one 'ball' annotation per image. However, it is easy to handle
## multiple balls per image too. Not needed for our dataset.
annfiles = [fprefix + ntemplate.format(fn) + '.xml' for fn in [fnum, fnum-1, fnum-2]]
anns = [tc.getAnnotations(os.path.join(i_anndir, annfile)) for annfile in annfiles]
seq = True
for ann_ in anns:
objs = ann_.findall('.//object/name')
if 'ball' not in objs:
seq = False
break # don't check other anns
if not seq:
if 1: # DEBUG>=1:
print("\tSkipping. 3 ball labels sequence not found for {}".format(annName_i))
continue # Get next image/ann
ballUBox, _ = tc.getUBoxes(anns[1:]) # Find union bbox for ball label from two previous frames
assert(ballUBox is not None),"Error! Cannot find union of previous two balls bounding boxes"
## Add this as a new label. We call this label 'pballs' for 'previous balls'
tc.addAnnotation(anns[0], 'pballs', ballUBox)
w = anns[0].size.width
## Scale input to WIDTHxHEIGHT fixed dimensions if input size is different
if w != WIDTH:
scale = float(WIDTH) / float(w)
## Scale annotations
anns = [tc.scaleAnnotations(ann, scale) for ann in anns]
else:
scale = 1.0
ballUBox, racketUBox = tc.getUBoxes(anns)
## Find best enclosing zone for ball and racket UBoxes
zid_b = zones.findEnclosing(ballUBox)
zid_r = zones.findEnclosing(racketUBox)
crop_zids = []
if zid_b == zid_r: ## Both ball and racket are in the same zone
if zid_b is not None:
crop_zids.append(zid_b)
else:
for zid in [zid_b, zid_r]:
if zid is not None:
crop_zids.append(zid)
if DEBUG>=1:
print("Crop Zones: {}".format(crop_zids))
#assert(len(crop_zids) != 0), "No zones found for cropping. This means that the frame doesn't have ball or racket"
if len(crop_zids) == 0:
print("No zones found for cropping. This means that the frame doesn't have ball or racket. Skipped")
continue
## load images as grayscale
img_i, img_p1, img_p2 = [fprefix + ntemplate.format(fn) + '.jpg' for fn in [fnum, fnum-1, fnum-2]]
_cvimg_c = cv.imread(os.path.join(i_imgdir, img_i), cv.IMREAD_COLOR)
_cvimg = cv.cvtColor(_cvimg_c, cv.COLOR_BGR2GRAY)
_cvimg1 = cv.imread(os.path.join(i_imgdir, img_p1), cv.IMREAD_GRAYSCALE)
_cvimg2 = cv.imread(os.path.join(i_imgdir, img_p2), cv.IMREAD_GRAYSCALE)
if w != WIDTH:
## Resize if scale is different
cvimg_c = cv.resize(_cvimg_c, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg = cv.resize(_cvimg, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg1 = cv.resize(_cvimg1, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
cvimg2 = cv.resize(_cvimg2, (WIDTH, HEIGHT), interpolation = cv.INTER_CUBIC)
else:
cvimg_c = _cvimg_c
cvimg = _cvimg
cvimg1 = _cvimg1
cvimg2 = _cvimg2
if MOTION_TYPE == '3FRAMES':
# Merge (merge 3 grascale motion frames into BGR channels)
cvimg_n = cv.merge([cvimg, cvimg1, cvimg2])
elif MOTION_TYPE == 'FRAMESDIFF':
## Create frame-diff based background subtracted image with a trail of three balls
## We are doing this (keeping the trail) on purpse. This to provide the network
## with some referene in the case when the ball is not visible in the current frame
## but it was visible in previous frames.
diff_p1p2 = cv.absdiff(cvimg1, cvimg2)
diff_cp1 = cv.absdiff(cvimg, cvimg1)
image_b = cv.bitwise_or(diff_p1p2, diff_cp1) ## This will create the trail of three objects
#bring back? =>#image_diff= cv.dilate(image_b, kernel) ## enlarge the blobs
# Replace blue channel with frame diff. Blue channel is less important in tennis for us
# since the ball is greenish yellow -- most information in red and green channel.
cvimg_n = cvimg_c.copy()
cvimg_n[:,:,0] = image_b #image_diff
else:
print("Unsupported motion type {}".format(MOTION_TYPE))
sys.exit(1)
## Crop images and annotations as per selected zones
imgfilenames = []
annfilenames = []
outimgs = []
outanns = []
for zid in crop_zids:
imgbase = fprefix + ntemplate.format(fnum) + '-z{:02d}'.format(zid)
imgname = imgbase + '.jpg'
annname = imgbase + '.xml'
imgfilenames.append(imgname)
annfilenames.append(annname)
roi = zones.getBBox(zid)
outann = tc.cropAnnotations(anns[0], roi, imgname, 6)
outimg = zones.getImgRoI(zid, cvimg_n).copy()
outanns.append(outann)
outimgs.append(outimg)
if DEBUG>=3 and len(crop_zids) > 1:
obj_xml = etree.tostring(outann, pretty_print=True, xml_declaration=False)
print("Annotation {}\n{}".format(annname, obj_xml))
######################################################################################
## Write output files
######################################################################################
for index in range(len(outimgs)):
## Write annotation files
tc.cleanUpAnnotations(outanns[index], ['ball', 'racket', 'pballs'])
tc.writeAnnotation(outanns[index], os.path.join(o_anndir, annfilenames[index]))
## Write cropped motion images
imgfile = os.path.join(o_imgdir, imgfilenames[index])
if DEBUG>=2:
print("Writing {}".format(imgfile))
cv.imwrite(imgfile, outimgs[index])
if SHOW_IMAGES:
for zid in crop_zids:
cvimg_n = drawZone(cvimg_n, zones, zid, cropsize)
for index in range(len(outimgs)):
img = outimgs[index]
for obj in outanns[index].iter('object'):
bbox = [obj.bndbox.ymin, obj.bndbox.xmin, obj.bndbox.ymax, obj.bndbox.xmax]
outimgs[index] = tc.drawBoundingBox(outimgs[index], bbox, tc.LBL_IDS[obj.name])
## Draw bounding boxes
if ballUBox is not None:
cvimg_n = tc.drawBoundingBox(cvimg_n, ballUBox, 1)
if racketUBox is not None:
cvimg_n = tc.drawBoundingBox(cvimg_n, racketUBox, 2)
show_imgs(cvimg_c, cvimg_n, outimgs)
#if (cnt >= 50):
# assert(False), "Temp forced exit to check work. Remove later."
cnt += 1
cv.destroyAllWindows()
print("Done. Motion Dataset created with {} annotations and images".format(cnt))
| 46.570707 | 131 | 0.612244 |
18b5cd9e5d6c9c3f826dbcf798680d452eb2f577 | 5,454 | py | Python | tests/unit/core/test_core_config.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 19 | 2021-03-18T16:48:03.000Z | 2022-03-02T13:09:21.000Z | tests/unit/core/test_core_config.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 3 | 2021-04-23T23:03:29.000Z | 2021-05-11T14:09:16.000Z | tests/unit/core/test_core_config.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 3 | 2021-04-19T22:05:34.000Z | 2022-02-21T11:32:16.000Z | """Tests for core.config."""
import json
import yaml
from pathlib import Path
import pytest
import fromconfig
def test_core_config_no_jsonnet(tmpdir, monkeypatch):
"""Test jsonnet missing handling."""
monkeypatch.setattr(fromconfig.core.config, "_jsonnet", None)
# No issue to dump even if missing
config = {"x": 2}
fromconfig.dump(config, str(tmpdir.join("config.jsonnet")))
fromconfig.dump(config, str(tmpdir.join("config.json")))
fromconfig.dump(config, str(tmpdir.join("config.yaml")))
fromconfig.dump(config, str(tmpdir.join("config.yml")))
# No issue to load non-jsonnet files
assert fromconfig.load(str(tmpdir.join("config.json"))) == config
assert fromconfig.load(str(tmpdir.join("config.yaml"))) == config
assert fromconfig.load(str(tmpdir.join("config.yml"))) == config
# Raise import error if reloading from jsonnet
with pytest.raises(ImportError):
fromconfig.load(str(tmpdir.join("config.jsonnet")))
def test_core_config():
"""Test Config."""
config = fromconfig.Config(x=1)
assert config["x"] == 1
assert list(config) == ["x"]
config["x"] = 2
assert config["x"] == 2
def test_core_config_is_json_serializable():
"""Test that Config is json serializable."""
config = fromconfig.Config(x=1)
assert json.dumps(config) == '{"x": 1}'
| 31.894737 | 113 | 0.565457 |
18b6001fed8371bb91ce9e52ae604dbe21d1ea14 | 5,353 | py | Python | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 4 | 2020-03-11T01:35:42.000Z | 2021-08-31T20:18:22.000Z | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 15 | 2018-04-29T20:25:14.000Z | 2020-03-14T13:44:59.000Z | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 1 | 2020-10-27T22:43:46.000Z | 2020-10-27T22:43:46.000Z | #!/usr/bin/env python
#
# Release script for beholder
#
import hashlib
import urllib
from collections import OrderedDict
try:
from hostage import * #pylint: disable=unused-wildcard-import,wildcard-import
except ImportError:
print "!! Release library unavailable."
print "!! Use `pip install hostage` to fix."
print "!! You will also need an API token in .github.token,"
print "!! a .hubrrc config, or `brew install hub` configured."
print "!! A $GITHUB_TOKEN env variable will also work."
exit(1)
#
# Globals
#
notes = File(".last-release-notes")
latestTag = git.Tag.latest()
def buildLabeled(labelsToTitles):
"""Given a set of (label, title) tuples, produces an
OrderedDict whose keys are `label`, and whose values are
dictionaries containing 'title' -> `title`, and
'content' -> string. The iteration order of the dictionary
will preserve the ordering of the provided tuples
"""
result = OrderedDict()
for k, v in labelsToTitles:
result[k] = {'title': v, 'content': ''}
return result
#
# Verify
#
verify(Grep("stopship", inDir="src").foundAny(silent=False)) \
.then(echoAndDie("I don't think so"))
version = verify(File("src/beholder.go")
.filtersTo(RegexFilter('const Version = "(.*)"'))
).valueElse(echoAndDie("No version!?"))
versionTag = git.Tag(version)
verify(versionTag.exists())\
.then(echoAndDie("Version `%s` already exists!" % version))
#
# Make sure all the tests pass
#
# this syntax recursively checks all subpackages for tests
verify(Execute("go test ./... -v")).succeeds(silent=False).orElse(die())
#
# Build the release notes
#
initialNotes = verify(notes.contents()).valueElse(buildDefaultNotes)
notes.delete()
verify(Edit(notes, withContent=initialNotes).didCreate())\
.orElse(echoAndDie("Aborted due to empty message"))
releaseNotes = notes.contents()
#
# Compile
#
versions = [
# (label, os, arch) tuples
("macOS", "darwin", "amd64"),
("windows-x64", "windows", "amd64"),
]
compiled = []
for (buildLabel, os, arch) in versions:
f = 'bin/beholder-%s-%s' % (version, buildLabel)
if os == "windows":
f += ".exe"
print "Compiling:", f
cmd = 'env GOOS=%s GOARCH=%s go build -v -o %s' % (os, arch, f)
verify(Execute(cmd)).succeeds(silent=False)
compiled.append(f)
#
# Upload to github
#
print "Uploading to Github..."
verify(versionTag).create()
verify(versionTag).push("origin")
gitRelease = github.Release(version)
verify(gitRelease).create(body=releaseNotes)
for f in compiled:
print "Uploading", f
verify(gitRelease).uploadFile(f, 'application/octet-stream')
#
# Update homebrew repo
#
print "Updating homebrew..."
tarUrl = 'https://github.com/dhleong/beholder/archive/%s.tar.gz' % version
tarSha = sha256(tarUrl)
homebrewConfig = github.Config("dhleong/homebrew-tap")
formulaFile = github.RepoFile("/Formula/beholder.rb", config=homebrewConfig)
oldContents = formulaFile.read()
newContents = oldContents
newContents = re.sub('url "[^"]+"', 'url "%s"' % tarUrl, newContents)
newContents = re.sub('sha256 "[^"]+"', 'sha256 "%s"' % tarSha, newContents)
print " url <-", tarUrl
print " sha256 <-", tarSha
commit = 'Update for v%s' % version
verify(formulaFile).write(newContents, commitMessage=commit)
#
# Success! Now, just cleanup and we're done!
#
notes.delete()
print "Done! Published %s" % version
| 27.172589 | 82 | 0.64618 |
18b65fdb2a140d38c3ae1d51c5156e9061a7ade5 | 881 | py | Python | cmsplugin_cascade/migrations/0003_inlinecascadeelement.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | null | null | null | cmsplugin_cascade/migrations/0003_inlinecascadeelement.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | null | null | null | cmsplugin_cascade/migrations/0003_inlinecascadeelement.py | aDENTinTIME/djangocms-cascade | c38c1c5ad052dbe233b50fb833ad8e9a919014f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
| 31.464286 | 152 | 0.611805 |
18b6ab1df2a80e856e7bccdd1594333d60103c4a | 366 | py | Python | SmartWaiterAPI/API/collections/goeswellwith_operations.py | KyrumX/project78-api | 334b4781a4488cf53b360f75b9f3265e40ebf8b4 | [
"MIT"
] | null | null | null | SmartWaiterAPI/API/collections/goeswellwith_operations.py | KyrumX/project78-api | 334b4781a4488cf53b360f75b9f3265e40ebf8b4 | [
"MIT"
] | null | null | null | SmartWaiterAPI/API/collections/goeswellwith_operations.py | KyrumX/project78-api | 334b4781a4488cf53b360f75b9f3265e40ebf8b4 | [
"MIT"
] | null | null | null | from API.models import GoesWellWith, Menu
| 24.4 | 67 | 0.661202 |
18b77fe12dbcd84b5d365548128c4a03151b1396 | 3,949 | py | Python | src/simulator/simulator.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 46 | 2020-12-25T04:09:15.000Z | 2022-03-25T12:32:42.000Z | src/simulator/simulator.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 36 | 2020-12-21T16:10:02.000Z | 2022-01-03T01:42:01.000Z | src/simulator/simulator.py | judicaelclair/PathBenchURO | 101e67674efdfa8e27e1cf7787dac9fdf99552fe | [
"BSD-3-Clause"
] | 11 | 2021-01-06T23:34:12.000Z | 2022-03-21T17:21:47.000Z | from typing import Optional
from algorithms.basic_testing import BasicTesting
from simulator.controllers.main_controller import MainController
from simulator.controllers.map.map_controller import MapController
from simulator.controllers.gui.gui_controller import GuiController
from simulator.models.main_model import MainModel
from simulator.models.map_model import MapModel
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reinit_event import ReinitEvent
from simulator.views.main_view import MainView
from simulator.views.map.map_view import MapView
from simulator.views.gui.gui_view import GuiView
from structures import Size
"""
Implementation is done after https://github.com/wesleywerner/mvc-game-design
"""
| 34.640351 | 95 | 0.683971 |
18b7fbb4733a21ef838f96c25af5f53f3a7b8f73 | 1,445 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp_byproduct/models/mrp_subproduct.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
| 40.138889 | 125 | 0.665744 |
18b95560e12ae1f8ecbf164d50ad646b8d18c3b3 | 126 | py | Python | contacts/urls.py | HaraDev001/RealEstate-Backend | db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c | [
"MIT"
] | 2 | 2021-05-17T18:02:36.000Z | 2021-05-17T18:02:44.000Z | contacts/urls.py | HaraDev001/RealEstate-Backend | db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c | [
"MIT"
] | null | null | null | contacts/urls.py | HaraDev001/RealEstate-Backend | db2ae8d143bd15fbb49432ae8b14fd3bf8e6dd1c | [
"MIT"
] | null | null | null | from django.urls import path
from .views import ContactCreateView
urlpatterns = [
path('',ContactCreateView.as_view()),
] | 21 | 41 | 0.753968 |
18b9e35412962cc6d7d17f54bab50f62ce2c5c9d | 410 | py | Python | Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | Python_do_zero_Guanabara/04_CondiçõesEmPython/aula/aula15.py | HenriqueSOliver/Projetos_Python | f18c5a343ad1b746a12bd372298b2debe9bc65ec | [
"MIT"
] | null | null | null | # modelo anterior - Enquanto cont at 10 for verdade, ser repetido
cont = 1
while cont <= 10:
print(cont, ' ...', end='')
cont += 1
print('FIM')
# Usando o Enquanto VERDADE ele vai repetir para sempre, temos que colocar uma condio PARA=BREAK
n = s = 0
while True:
n = int(input('Digite um nmero: [Digite 999 para PARAR] '))
if n == 999:
break
s += n
print(f'A soma vale {s}') | 27.333333 | 98 | 0.62439 |
18bb4104d3cd6b1e910557e18aee65ea9222b8ce | 1,124 | py | Python | internal/handlers/lebanon.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | null | null | null | internal/handlers/lebanon.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | 20 | 2019-10-20T11:27:38.000Z | 2022-03-12T00:28:17.000Z | internal/handlers/lebanon.py | fillingthemoon/cartogram-web | 58b645bca0c22b9bccdb2a5a8213a5a24a7e5958 | [
"MIT"
] | 16 | 2019-08-22T04:49:44.000Z | 2021-06-09T04:44:57.000Z | import settings
import handlers.base_handler
import csv
| 28.1 | 319 | 0.623665 |
18bbd1f2f3931ba0aa7f9a0bc9c67949e29e02ad | 11,184 | py | Python | routes/GetFeed/insta_crawling 복사본/ScrollFeed.py | akalswl14/styltebox_manageweb | 5d0e33435a7456387d28b6b58762912d0552a717 | [
"MIT"
] | null | null | null | routes/GetFeed/insta_crawling 복사본/ScrollFeed.py | akalswl14/styltebox_manageweb | 5d0e33435a7456387d28b6b58762912d0552a717 | [
"MIT"
] | 2 | 2021-03-31T20:20:47.000Z | 2021-12-13T20:50:07.000Z | routes/GetFeed/insta_crawling 복사본/ScrollFeed.py | akalswl14/styltebox_manageweb | 5d0e33435a7456387d28b6b58762912d0552a717 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import urllib.request
from urllib.request import urlopen # url
from urllib.parse import quote_plus #
from bs4 import BeautifulSoup
from selenium import webdriver # webdriver
import time #
from time import sleep
import warnings #
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from MakeExcel import MakeFollowerExcel
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
warnings.filterwarnings(action='ignore') #
# url
baseUrl = "https://www.instagram.com/"
SCROLL_PAUSE_TIME = 1.0 | 36.429967 | 109 | 0.630097 |
18bca4227b43e8db0e3b74e9fc679d7c822dc33c | 358 | py | Python | option.py | ujiro99/python_cli_sample | 34e39e05722ebba3b539861b6567aeecb93a818f | [
"MIT"
] | null | null | null | option.py | ujiro99/python_cli_sample | 34e39e05722ebba3b539861b6567aeecb93a818f | [
"MIT"
] | null | null | null | option.py | ujiro99/python_cli_sample | 34e39e05722ebba3b539861b6567aeecb93a818f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
if __name__ == '__main__':
main()
| 14.916667 | 71 | 0.572626 |
18bcc995a7294c17a7102d9ddff9a88a24d958f1 | 27 | py | Python | itsnp/__init__.py | CaffeineDuck/itsnp-discord-bot | 73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8 | [
"MIT"
] | null | null | null | itsnp/__init__.py | CaffeineDuck/itsnp-discord-bot | 73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8 | [
"MIT"
] | null | null | null | itsnp/__init__.py | CaffeineDuck/itsnp-discord-bot | 73d8fddc282c0fbc3cdaef81eef3efa9dccacfd8 | [
"MIT"
] | null | null | null | from .bot import ItsnpBot
| 13.5 | 26 | 0.777778 |
18be667bef982c766e8e51b2444d4138ae324879 | 7,182 | py | Python | mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | mojo/public/tools/bindings/pylib/parse/mojo_lexer_unittest.py | Acidburn0zzz/chromium-1 | 4c08f442d2588a2c7cfaa117a55bd87d2ac32f9a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojo_lexer
import unittest
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
except ImportError:
# This assumes this file is in src/mojo/public/tools/bindings/pylib/parse/.
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
# This (monkey-patching LexToken to make comparison value-based) is evil, but
# we'll do it anyway. (I'm pretty sure ply's lexer never cares about comparing
# for object identity.)
setattr(lex.LexToken, '__eq__', _LexTokenEq)
def _MakeLexToken(type, value, lineno=1, lexpos=0):
"""Makes a LexToken with the given parameters. (Note that lineno is 1-based,
but lexpos is 0-based.)"""
rv = lex.LexToken()
rv.type, rv.value, rv.lineno, rv.lexpos = type, value, lineno, lexpos
return rv
def _MakeLexTokenForKeyword(keyword, **kwargs):
"""Makes a LexToken for the given keyword."""
return _MakeLexToken(keyword.upper(), keyword.lower(), **kwargs)
if __name__ == "__main__":
unittest.main()
| 44.608696 | 80 | 0.632693 |
18c2d8a09f275424cdb15f2a256534524b3fa369 | 59 | py | Python | glue/admin.py | Valchris/AngularJS-Django-Template | 10c90087984dcd9e6d29380eb4380824e65bcecf | [
"MIT"
] | 1 | 2015-07-29T04:28:26.000Z | 2015-07-29T04:28:26.000Z | glue/admin.py | Valchris/AngularJS-Django-Template | 10c90087984dcd9e6d29380eb4380824e65bcecf | [
"MIT"
] | null | null | null | glue/admin.py | Valchris/AngularJS-Django-Template | 10c90087984dcd9e6d29380eb4380824e65bcecf | [
"MIT"
] | null | null | null | from django.contrib import admin
from glue.models import *
| 19.666667 | 32 | 0.813559 |
18c72218e5a46e6e788b195ce2de8f4c86c23159 | 444 | py | Python | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | from typing import Any, Dict, List
from qmt.infrastructure import WithParts
| 26.117647 | 58 | 0.572072 |
18c9fc293f4846928246ba71ec2d917b2627fc7c | 20,166 | py | Python | ANSIBLE/library/eos_routemap.py | ayosef/pynet_test | 1b750a62467fbbcb2436c035ce49d41b435f45ba | [
"Apache-2.0"
] | null | null | null | ANSIBLE/library/eos_routemap.py | ayosef/pynet_test | 1b750a62467fbbcb2436c035ce49d41b435f45ba | [
"Apache-2.0"
] | null | null | null | ANSIBLE/library/eos_routemap.py | ayosef/pynet_test | 1b750a62467fbbcb2436c035ce49d41b435f45ba | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_routemap
short_description: Manage EOS routemap resources
description:
- This module will manage routemap entries on EOS nodes
version_added: 1.2.0
category: Route Policy
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.4.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The name of the routemap to manage.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
action:
description:
- The action associated with the routemap name.
required: true
default: 'permit'
choices: ['permit','deny']
aliases: []
version_added: 1.2.0
seqno:
description:
- The sequence number of the rule that this entry corresponds to.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
description:
description:
- The description for this routemap entry.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
match:
description:
- The list of match statements that define the routemap entry. The
match statements should be a comma separated list of match statements
without the word match at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
set:
description:
- The list of set statements that define the routemap entry. The
set statements should be a comma separated list of set statements
without the word set at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
continue:
description:
- The statement defines the next routemap clause to evaluate.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- eos_routemap: name=rm1 action=permit seqno=10
description='this is a great routemap'
match='as 50,interface Ethernet2'
set='tag 100,weight 1000'
continue=20
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Routemaps based on name, action and sequence
number.
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
_instance = dict(name=name, action=action, seqno=seqno, state='absent')
try:
result = module.api('routemaps').get(name)[action][seqno]
except:
result = None
if result:
_instance['state'] = 'present'
_instance['seqno'] = str(seqno)
_instance['set'] = ','.join(result['set'])
desc = result['description']
_instance['description'] = desc if desc else ''
_instance['match'] = ','.join(result['match'])
cont = result['continue']
_instance['continue'] = str(cont) if cont else ''
return _instance
def set_description(module):
""" Configures the description for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
value = module.attributes['description']
module.log('Invoked set_description with %s for eos_routemap[%s %s %s]'
% (value, name, action, seqno))
if value == '':
module.node.api('routemaps').set_description(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_description(name, action, seqno, value)
def set_continue(module):
""" Configures the continue value for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
try:
value = int(module.attributes['continue'])
except:
value = None
module.log('Invoked set_continue for eos_routemap[%s %s %s]'
% (name, action, seqno))
if value is None:
module.node.api('routemaps').set_continue(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_continue(name, action, seqno, value)
def set_match(module):
""" Configures the match statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['match'].split(',')
module.log('Invoked set_match for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_match_statements(name, action, seqno,
statements)
def set_set(module):
""" Configures the set statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['set'].split(',')
module.log('Invoked set_set for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_set_statements(name, action, seqno,
statements)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
action=dict(default='permit', choices=['permit', 'deny']),
seqno=dict(required=True),
description=dict(),
match=dict(),
set=dict()
)
argument_spec['continue'] = dict()
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main() | 33.1133 | 82 | 0.611475 |
18cbef6584ee81c511138c2578efbf19d3e08e5c | 890 | py | Python | setup.py | colinfrei/furystoolbox | 2a8613393a46ad6ae2ad2c2fa86fd255fea96796 | [
"MIT"
] | 1 | 2020-01-03T00:32:35.000Z | 2020-01-03T00:32:35.000Z | setup.py | colinfrei/furystoolbox | 2a8613393a46ad6ae2ad2c2fa86fd255fea96796 | [
"MIT"
] | 1 | 2020-02-08T08:54:31.000Z | 2020-02-08T09:31:30.000Z | setup.py | colinfrei/furystoolbox | 2a8613393a46ad6ae2ad2c2fa86fd255fea96796 | [
"MIT"
] | 1 | 2020-02-08T06:54:29.000Z | 2020-02-08T06:54:29.000Z | """Setup configuration."""
import setuptools
from furystoolbox import __version__
with open("README.md", "r") as fh:
LONG = fh.read()
REQUIRES = ['click>=7.0',
'requests>=2.21.0',
'PyGithub>=1.43.4']
setuptools.setup(
name="furystoolbox",
version=__version__,
author="Joakim Sorensen",
author_email="ludeeus@gmail.com",
description="A collection of tools.",
long_description=LONG,
long_description_content_type="text/markdown",
url="https://github.com/ludeeus/furystoolbox",
install_requires=REQUIRES,
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
entry_points={
'console_scripts': [
'fury = furystoolbox.cli.cli:CLI'
]
}
)
| 26.176471 | 50 | 0.62809 |
18cca0ce2ddedc77fe6c967bfef7de9a4fb88942 | 2,120 | py | Python | pythran/tests/cases/sobelfilter.py | SylvainCorlay/pythran | 908ec070d837baf77d828d01c3e35e2f4bfa2bfa | [
"BSD-3-Clause"
] | 1 | 2018-03-24T00:33:03.000Z | 2018-03-24T00:33:03.000Z | pythran/tests/cases/sobelfilter.py | SylvainCorlay/pythran | 908ec070d837baf77d828d01c3e35e2f4bfa2bfa | [
"BSD-3-Clause"
] | null | null | null | pythran/tests/cases/sobelfilter.py | SylvainCorlay/pythran | 908ec070d837baf77d828d01c3e35e2f4bfa2bfa | [
"BSD-3-Clause"
] | 1 | 2017-03-12T20:32:36.000Z | 2017-03-12T20:32:36.000Z | #skip.runas import Image; im = Image.open("Scribus.gif"); image_list = list(im.getdata()); cols, rows = im.size; res = range(len(image_list)); sobelFilter(image_list, res, cols, rows)
#runas cols = 100; rows = 100 ;image_list=[x%10+y%20 for x in xrange(cols) for y in xrange(rows)]; sobelFilter(image_list, cols, rows)
#bench cols = 1000; rows = 500 ;image_list=[x%10+y%20 for x in xrange(cols) for y in xrange(rows)]; sobelFilter(image_list, cols, rows)
#pythran export sobelFilter(int list, int, int)
| 49.302326 | 183 | 0.544811 |
18cd66ae12672c4f05fb7afeb5ea83419646d0b9 | 7,110 | py | Python | occam_utils/occam_datasets.py | dschinagl/occam | f001cc3a0bf56687dc4c4bb79385f5d010cdd43e | [
"BSD-3-Clause"
] | 1 | 2022-03-29T07:05:23.000Z | 2022-03-29T07:05:23.000Z | occam_utils/occam_datasets.py | dschinagl/occam | f001cc3a0bf56687dc4c4bb79385f5d010cdd43e | [
"BSD-3-Clause"
] | null | null | null | occam_utils/occam_datasets.py | dschinagl/occam | f001cc3a0bf56687dc4c4bb79385f5d010cdd43e | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import torch
from spconv.pytorch.utils import PointToVoxel
from scipy.spatial.transform import Rotation
from pcdet.datasets import DatasetTemplate
| 35.909091 | 87 | 0.610689 |
18ceea770cb8f269d967cd89240a6533d6cf62a5 | 5,840 | py | Python | utils/calibration_module.py | choushunn/holography_test | 79100f8b955683afd47e63e2762d6945d6b14e34 | [
"CC-BY-3.0"
] | null | null | null | utils/calibration_module.py | choushunn/holography_test | 79100f8b955683afd47e63e2762d6945d6b14e34 | [
"CC-BY-3.0"
] | null | null | null | utils/calibration_module.py | choushunn/holography_test | 79100f8b955683afd47e63e2762d6945d6b14e34 | [
"CC-BY-3.0"
] | 1 | 2021-12-24T04:18:22.000Z | 2021-12-24T04:18:22.000Z | """
This is the script containing the calibration module, basically calculating homography matrix.
This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
# The license is only for non-commercial use (commercial licenses can be obtained from Stanford).
# The material is provided as-is, with no warranties whatsoever.
# If you publish any code, data, or scientific work based on this, please cite our work.
Technical Paper:
Y. Peng, S. Choi, N. Padmanaban, G. Wetzstein. Neural Holography with Camera-in-the-loop Training. ACM TOG (SIGGRAPH Asia), 2020.
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
def circle_detect(captured_img, num_circles, spacing, pad_pixels=(0., 0.), show_preview=True):
"""
Detects the circle of a circle board pattern
:param captured_img: captured image
:param num_circles: a tuple of integers, (num_circle_x, num_circle_y)
:param spacing: a tuple of integers, in pixels, (space between circles in x, space btw circs in y direction)
:param show_preview: boolean, default True
:param pad_pixels: coordinate of the left top corner of warped image.
Assuming pad this amount of pixels on the other side.
:return: a tuple, (found_dots, H)
found_dots: boolean, indicating success of calibration
H: a 3x3 homography matrix (numpy)
"""
# Binarization
# org_copy = org.copy() # Otherwise, we write on the original image!
img = (captured_img.copy() * 255).astype(np.uint8)
if len(img.shape) > 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 15)
img_gray = img.copy()
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 121, 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img = 255 - img
# Blob detection
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.filterByColor = True
params.minThreshold = 128
# Filter by Area.
params.filterByArea = True
params.minArea = 50
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.785
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
# Detecting keypoints
# this is redundant for what comes next, but gives us access to the detected dots for debug
keypoints = detector.detect(img)
found_dots, centers = cv2.findCirclesGrid(img, num_circles,
blobDetector=detector, flags=cv2.CALIB_CB_SYMMETRIC_GRID)
# Drawing the keypoints
cv2.drawChessboardCorners(captured_img, num_circles, centers, found_dots)
img_gray = cv2.drawKeypoints(img_gray, keypoints, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Find transformation
H = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=np.float32)
if found_dots:
# Generate reference points to compute the homography
ref_pts = np.zeros((num_circles[0] * num_circles[1], 1, 2), np.float32)
pos = 0
for i in range(0, num_circles[1]):
for j in range(0, num_circles[0]):
ref_pts[pos, 0, :] = spacing * np.array([j, i]) + np.array(pad_pixels)
pos += 1
H, mask = cv2.findHomography(centers, ref_pts, cv2.RANSAC, 1)
if show_preview:
dsize = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(num_circles, spacing, pad_pixels)]
captured_img_warp = cv2.warpPerspective(captured_img, H, tuple(dsize))
if show_preview:
fig = plt.figure()
ax = fig.add_subplot(223)
ax.imshow(img_gray, cmap='gray')
ax2 = fig.add_subplot(221)
ax2.imshow(img, cmap='gray')
ax3 = fig.add_subplot(222)
ax3.imshow(captured_img, cmap='gray')
if found_dots:
ax4 = fig.add_subplot(224)
ax4.imshow(captured_img_warp, cmap='gray')
plt.show()
return found_dots, H
| 37.677419 | 136 | 0.638185 |
18ceea954bda99122d17bf7b1a926a3bf8227da9 | 270 | py | Python | Main/apps.py | Naretto95/Django-Vault | 36fac69873c844bf72732ff635513f0204b7d61a | [
"MIT"
] | null | null | null | Main/apps.py | Naretto95/Django-Vault | 36fac69873c844bf72732ff635513f0204b7d61a | [
"MIT"
] | null | null | null | Main/apps.py | Naretto95/Django-Vault | 36fac69873c844bf72732ff635513f0204b7d61a | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
| 27 | 56 | 0.781481 |
18d163664110bd63d5393ef2d5efd9b345f52613 | 38 | py | Python | researchutils/task/__init__.py | yuishihara/researchutils | bb3ec467386d43a1e2282ec6d024216ce4dae841 | [
"MIT"
] | 1 | 2018-09-06T00:54:49.000Z | 2018-09-06T00:54:49.000Z | researchutils/task/__init__.py | yuishihara/researchutils | bb3ec467386d43a1e2282ec6d024216ce4dae841 | [
"MIT"
] | 28 | 2018-08-25T03:54:30.000Z | 2018-10-14T12:09:47.000Z | researchutils/task/__init__.py | yuishihara/researchutils | bb3ec467386d43a1e2282ec6d024216ce4dae841 | [
"MIT"
] | null | null | null | from researchutils.task import plotter | 38 | 38 | 0.894737 |
18d43cd8f5f88ffb19e9b4a5bb9e768fb2646c67 | 220,532 | py | Python | venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py | harun-vit/aws-cdk-pipelines-demo | 7e7faeee112c3dca718613fa8a1fba80d2116bac | [
"MIT-0"
] | null | null | null | venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py | harun-vit/aws-cdk-pipelines-demo | 7e7faeee112c3dca718613fa8a1fba80d2116bac | [
"MIT-0"
] | null | null | null | venv/lib/python3.8/site-packages/aws_cdk/aws_kinesis/__init__.py | harun-vit/aws-cdk-pipelines-demo | 7e7faeee112c3dca718613fa8a1fba80d2116bac | [
"MIT-0"
] | null | null | null | '''
# Amazon Kinesis Construct Library
<!--BEGIN STABILITY BANNER-->---


---
<!--END STABILITY BANNER-->
[Amazon Kinesis](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) provides collection and processing of large
[streams](https://aws.amazon.com/streaming-data/) of data records in real time. Kinesis data streams can be used for rapid and continuous data
intake and aggregation.
## Table Of Contents
* [Streams](#streams)
* [Encryption](#encryption)
* [Import](#import)
* [Permission Grants](#permission-grants)
* [Read Permissions](#read-permissions)
* [Write Permissions](#write-permissions)
* [Custom Permissions](#custom-permissions)
* [Metrics](#metrics)
## Streams
Amazon Kinesis Data Streams ingests a large amount of data in real time, durably stores the data, and makes the data available for consumption.
Using the CDK, a new Kinesis stream can be created as part of the stack using the construct's constructor. You may specify the `streamName` to give
your own identifier to the stream. If not, CloudFormation will generate a name.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream"
)
```
You can also specify properties such as `shardCount` to indicate how many shards the stream should choose and a `retentionPeriod`
to specify how long the data in the shards should remain accessible.
Read more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html)
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream",
shard_count=3,
retention_period=Duration.hours(48)
)
```
### Encryption
[Stream encryption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html) enables
server-side encryption using an AWS KMS key for a specified stream.
Encryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream")
```
You can enable encryption on your stream with a user-managed key by specifying the `encryption` property.
A KMS key will be created for you and associated with the stream.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
```
You can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_kms as kms
key = kms.Key(self, "MyKey")
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS,
encryption_key=key
)
```
### Import
Any Kinesis stream that has been created outside the stack can be imported into your CDK app.
Streams can be imported by their ARN via the `Stream.fromStreamArn()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_arn(stack, "ImportedStream", "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j")
```
Encrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
from aws_cdk.aws_kms import Key
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_attributes(stack, "ImportedEncryptedStream",
stream_arn="arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j",
encryption_key=kms.Key.from_key_arn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
)
```
### Permission Grants
IAM roles, users or groups which need to be able to work with Amazon Kinesis streams at runtime should be granted IAM permissions.
Any object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:
* `grantRead(principal)` - grants the principal read access
* `grantWrite(principal)` - grants the principal write permissions to a Stream
* `grantReadWrite(principal)` - grants principal read and write permissions
#### Read Permissions
Grant `read` access to a stream by calling the `grantRead()` API.
If the stream has an encryption key, read permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to read stream
stream.grant_read(lambda_role)
```
The following read permissions are provided to a service principal by the `grantRead()` API:
* `kinesis:DescribeStreamSummary`
* `kinesis:GetRecords`
* `kinesis:GetShardIterator`
* `kinesis:ListShards`
* `kinesis:SubscribeToShard`
#### Write Permissions
Grant `write` permissions to a stream is provided by calling the `grantWrite()` API.
If the stream has an encryption key, write permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to write to stream
stream.grant_write(lambda_role)
```
The following write permissions are provided to a service principal by the `grantWrite()` API:
* `kinesis:ListShards`
* `kinesis:PutRecord`
* `kinesis:PutRecords`
#### Custom Permissions
You can add any set of permissions to a stream by calling the `grant()` API.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
user = iam.User(stack, "MyUser")
stream = Stream(stack, "MyStream")
# give my user permissions to list shards
stream.grant(user, "kinesis:ListShards")
```
### Metrics
You can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html).
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stream = Stream(stack, "MyStream")
# Using base metric method passing the metric name
stream.metric("GetRecords.Success")
# using pre-defined metric method
stream.metric_get_records_success()
# using pre-defined and overriding the statistic
stream.metric_get_records_success(statistic="Maximum")
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_iam
import aws_cdk.aws_kms
import aws_cdk.core
import constructs
class _IStreamProxy(
jsii.proxy_for(aws_cdk.core.IResource) # type: ignore[misc]
):
'''A Kinesis Stream.'''
__jsii_type__: typing.ClassVar[str] = "@aws-cdk/aws-kinesis.IStream"
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, IStream).__jsii_proxy_class__ = lambda : _IStreamProxy
__all__ = [
"CfnStream",
"CfnStreamConsumer",
"CfnStreamConsumerProps",
"CfnStreamProps",
"IStream",
"Stream",
"StreamAttributes",
"StreamEncryption",
"StreamProps",
]
publication.publish()
| 60.635689 | 468 | 0.68824 |
18d5365ed6c594ed06788598b0b869b72340bab9 | 2,752 | py | Python | model.py | nupurbaghel/Image_Captioning_CV | 2af5abe1464006113e38a911ace62faacb9cbbd4 | [
"MIT"
] | null | null | null | model.py | nupurbaghel/Image_Captioning_CV | 2af5abe1464006113e38a911ace62faacb9cbbd4 | [
"MIT"
] | null | null | null | model.py | nupurbaghel/Image_Captioning_CV | 2af5abe1464006113e38a911ace62faacb9cbbd4 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
| 43 | 125 | 0.62936 |
18d56845b92528becf4631678e4c6ca21b008e41 | 965 | py | Python | BaseTest/click_button_chrome.py | lloydtawanda/AzurePriceListWebScrapper | 0d6e7a38af13cb780a7b04a8832b67a22727e3bc | [
"Apache-2.0"
] | 2 | 2019-07-16T13:49:35.000Z | 2021-06-17T22:21:17.000Z | BaseTest/click_button_chrome.py | lloydtawanda/AzurePriceListWebScrapper | 0d6e7a38af13cb780a7b04a8832b67a22727e3bc | [
"Apache-2.0"
] | null | null | null | BaseTest/click_button_chrome.py | lloydtawanda/AzurePriceListWebScrapper | 0d6e7a38af13cb780a7b04a8832b67a22727e3bc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 14:36:46 2019
@author: Tawanda
"""
import sys
import argparse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--driver", help="path to chrome driver")
args = parser.parse_args()
if not args.driver:
print("Please enter a valid path to the chrome driver ( --driver argument )")
sys.exit(1)
browser = webdriver.Chrome(executable_path=args.driver)
browser.implicitly_wait(10)
browser.maximize_window()
try:
browser.get('https://www.oursky.com/')
button = browser.find_element_by_class_name('btn-header')
button.click()
print('=======Button Click test was successful=======')
except NoSuchElementException as ex:
print(f'Error :: No such element : {ex}') | 28.382353 | 85 | 0.660104 |
18d5b7387f5bbbe02061b184773c4b0590414bd7 | 22,854 | py | Python | hymo/swmmreport.py | lucashtnguyen/hymo | 956661401b2ac5220a83349ed15bc1d4bb7d60f4 | [
"BSD-3-Clause"
] | 4 | 2017-12-18T17:43:54.000Z | 2021-09-29T01:05:33.000Z | hymo/swmmreport.py | lucashtnguyen/hymo | 956661401b2ac5220a83349ed15bc1d4bb7d60f4 | [
"BSD-3-Clause"
] | 30 | 2017-09-26T22:23:33.000Z | 2021-09-03T16:38:18.000Z | hymo/swmmreport.py | lucashtnguyen/hymo | 956661401b2ac5220a83349ed15bc1d4bb7d60f4 | [
"BSD-3-Clause"
] | 2 | 2017-10-03T01:41:16.000Z | 2019-12-17T23:42:42.000Z | from .base_reader import BaseReader
import pandas as pd
| 36.801932 | 141 | 0.586899 |
18d5cbf8a3d63285ac1fed2569f0fc69a3422e0e | 25,917 | py | Python | tbip.py | n-longuetmarx/tbip | c6f137167aec8075c2ae98183cdf4c5e7dbc700a | [
"MIT"
] | null | null | null | tbip.py | n-longuetmarx/tbip | c6f137167aec8075c2ae98183cdf4c5e7dbc700a | [
"MIT"
] | null | null | null | tbip.py | n-longuetmarx/tbip | c6f137167aec8075c2ae98183cdf4c5e7dbc700a | [
"MIT"
] | null | null | null | """Learn ideal points with the text-based ideal point model (TBIP).
Let y_{dv} denote the counts of word v in document d. Let x_d refer to the
ideal point of the author of document d. Then we model:
theta, beta ~ Gamma(alpha, alpha)
x, eta ~ N(0, 1)
y_{dv} ~ Pois(sum_k theta_dk beta_kv exp(x_d * eta_kv).
We perform variational inference to provide estimates for the posterior
distribution of each latent variable. We take reparameterization gradients,
using a lognormal variational family for the positive variables (theta, beta)
and a normal variational family for the real variables (x, eta).
The directory `data/{data_name}/clean/` should have the following four files:
1. `counts.npz`: a [num_documents, num_words] sparse matrix containing the
word counts for each document.
2. `author_indices.npy`: a [num_documents] vector where each entry is an
integer in the set {0, 1, ..., num_authors - 1}, indicating the author of
the corresponding document in `counts.npz`.
3. `vocabulary.txt`: a [num_words]-length file where each line is a string
denoting the corresponding word in the vocabulary.
4. `author_map.txt`: a [num_authors]-length file where each line is a string
denoting the name of an author in the corpus.
We provide more details in our paper [1].
#### References
[1]: Keyon Vafa, Suresh Naidu, David Blei. Text-Based Ideal Points. In
_Conference of the Association for Computational Linguistics_, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=1000000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_topics",
default=50,
help="Number of topics.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples to use for ELBO approximation.")
flags.DEFINE_enum("counts_transformation",
default="nothing",
enum_values=["nothing", "binary", "sqrt", "log"],
help="Transformation used on counts data.")
flags.DEFINE_boolean("pre_initialize_parameters",
default=True,
help="Whether to use pre-initialized document and topic "
"intensities (with Poisson factorization).")
flags.DEFINE_string("data",
default="senate-speeches-114",
help="Data source being used.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session (used only when data is "
"'senate-speech-comparisons'.")
flags.DEFINE_integer("print_steps",
default=500,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def build_input_pipeline(data_dir,
batch_size,
random_state,
counts_transformation="nothing"):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be four
files inside the rep: `counts.npz`, `author_indices.npy`,
`author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
counts_transformation: A string indicating how to transform the counts.
One of "nothing", "binary", "log", or "sqrt".
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
num_documents, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = np.max(author_indices + 1)
author_map = np.loadtxt(os.path.join(data_dir, "author_map.txt"),
dtype=str,
delimiter="\n",
encoding='latin-1')
# Shuffle data.
documents = random_state.permutation(num_documents)
shuffled_author_indices = author_indices[documents]
shuffled_counts = counts[documents]
# Apply counts transformation.
if counts_transformation == "nothing":
count_values = shuffled_counts.data
elif counts_transformation == "binary":
count_values = np.int32(shuffled_counts.data > 0)
elif counts_transformation == "log":
count_values = np.round(np.log(1 + shuffled_counts.data))
elif counts_transformation == "sqrt":
count_values = np.round(np.sqrt(shuffled_counts.data))
else:
raise ValueError("Unrecognized counts transformation.")
# Store counts as sparse tensor so it occupies less memory.
shuffled_counts = tf.SparseTensor(
indices=np.array(shuffled_counts.nonzero()).T,
values=count_values,
dense_shape=shuffled_counts.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(documents, shuffled_counts, shuffled_author_indices))
batches = dataset.repeat().batch(batch_size).prefetch(batch_size)
iterator = batches.make_one_shot_iterator()
vocabulary = np.loadtxt(os.path.join(data_dir, "vocabulary.txt"),
dtype=str,
delimiter="\n",
comments="<!-")
total_counts_per_author = np.bincount(
author_indices,
weights=np.array(np.sum(counts, axis=1)).flatten())
counts_per_document_per_author = (
total_counts_per_author / np.bincount(author_indices))
# Author weights is how much lengthy each author's opinion over average is.
author_weights = (counts_per_document_per_author /
np.mean(np.sum(counts, axis=1))).astype(np.float32)
return (iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors)
def build_lognormal_variational_parameters(initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
num_topics):
"""
Build document and objective topic lognormal variational parameters.
Args:
initial_document_loc: A [num_documents, num_topics] NumPy array containing
the initial document intensity means.
initial_objective_topic_loc: A [num_topics, num_words] NumPy array
containing the initial objective topic means.
num_documents: Number of documents in the data set.
num_words: Number of words in the data set.
num_topics: Number of topics.
Returns:
document_loc: A Variable object with shape [num_documents, num_topics].
document_scale: A positive Variable object with shape [num_documents,
num_topics].
objective_topic_loc: A Variable object with shape [num_topics, num_words].
objective_topic_scale: A positive Variable object with shape [num_topics,
num_words].
"""
document_loc = tf.get_variable(
"document_loc",
initializer=tf.constant(np.log(initial_document_loc)))
objective_topic_loc = tf.get_variable(
"objective_topic_loc",
initializer=tf.constant(np.log(initial_objective_topic_loc)))
document_scale_logit = tf.get_variable(
"document_scale_logit",
shape=[num_documents, num_topics],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
objective_topic_scale_logit = tf.get_variable(
"objective_topic_scale_logit",
shape=[num_topics, num_words],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
document_scale = tf.nn.softplus(document_scale_logit)
objective_topic_scale = tf.nn.softplus(objective_topic_scale_logit)
tf.summary.histogram("params/document_loc", document_loc)
tf.summary.histogram("params/objective_topic_loc", objective_topic_loc)
tf.summary.histogram("params/document_scale", document_scale)
tf.summary.histogram("params/objective_topic_scale", objective_topic_scale)
return (document_loc, document_scale,
objective_topic_loc, objective_topic_scale)
def print_topics(neutral_mean, negative_mean, positive_mean, vocabulary):
"""Get neutral and ideological topics to be used for Tensorboard.
Args:
neutral_mean: The mean of the neutral topics, a NumPy matrix with shape
[num_topics, num_words].
negative_mean: The mean of the negative topics, a NumPy matrix with shape
[num_topics, num_words].
positive_mean: The mean of the positive topics, a NumPy matrix with shape
[num_topics, num_words].
vocabulary: A list of the vocabulary with shape [num_words].
Returns:
topic_strings: A list of the negative, neutral, and positive topics.
"""
num_topics, num_words = neutral_mean.shape
words_per_topic = 10
top_neutral_words = np.argsort(-neutral_mean, axis=1)
top_negative_words = np.argsort(-negative_mean, axis=1)
top_positive_words = np.argsort(-positive_mean, axis=1)
topic_strings = []
for topic_idx in range(num_topics):
neutral_start_string = "Neutral {}:".format(topic_idx)
neutral_row = [vocabulary[word] for word in
top_neutral_words[topic_idx, :words_per_topic]]
neutral_row_string = ", ".join(neutral_row)
neutral_string = " ".join([neutral_start_string, neutral_row_string])
positive_start_string = "Positive {}:".format(topic_idx)
positive_row = [vocabulary[word] for word in
top_positive_words[topic_idx, :words_per_topic]]
positive_row_string = ", ".join(positive_row)
positive_string = " ".join([positive_start_string, positive_row_string])
negative_start_string = "Negative {}:".format(topic_idx)
negative_row = [vocabulary[word] for word in
top_negative_words[topic_idx, :words_per_topic]]
negative_row_string = ", ".join(negative_row)
negative_string = " ".join([negative_start_string, negative_row_string])
topic_strings.append(" \n".join(
[negative_string, neutral_string, positive_string]))
return np.array(topic_strings)
def print_ideal_points(ideal_point_loc, author_map):
"""Print ideal point ordering for Tensorboard."""
return ", ".join(author_map[np.argsort(ideal_point_loc)])
def get_log_prior(samples, prior):
"""Return log prior of sampled Gaussians.
Args:
samples: A `Tensor` with shape `[num_samples, :, :]`.
prior: String representing prior distribution.
Returns:
log_prior: A `Tensor` with shape `[num_samples]`, with the log priors
summed across latent dimensions.
"""
if prior == 'normal':
prior_distribution = tfp.distributions.Normal(loc=0., scale=1.)
elif prior == 'gamma':
prior_distribution = tfp.distributions.Gamma(concentration=0.3, rate=0.3)
log_prior = tf.reduce_sum(prior_distribution.log_prob(samples),
axis=[1, 2])
return log_prior
def get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
batch_size,
num_samples=1):
"""Approximate variational Lognormal ELBO using reparameterization.
Args:
counts: A matrix with shape `[batch_size, num_words]`.
document_indices: An int-vector with shape `[batch_size]`.
author_indices: An int-vector with shape `[batch_size]`.
author_weights: A vector with shape `[num_authors]`, constituting how
lengthy the opinion is above average.
document_distribution: A positive `Distribution` object with parameter
shape `[num_documents, num_topics]`.
objective_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideological_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideal_point_distribution: A `Distribution` object over [0, 1] with
parameter_shape `[num_authors]`.
num_documents: The number of documents in the total data set (used to
calculate log-likelihood scale).
batch_size: Batch size (used to calculate log-likelihood scale).
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
document_samples = document_distribution.sample(num_samples)
objective_topic_samples = objective_topic_distribution.sample(num_samples)
ideological_topic_samples = ideological_topic_distribution.sample(
num_samples)
ideal_point_samples = ideal_point_distribution.sample(num_samples)
_, num_topics, _ = objective_topic_samples.get_shape().as_list()
ideal_point_log_prior = tfp.distributions.Normal(
loc=0.,
scale=1.)
ideal_point_log_prior = tf.reduce_sum(
ideal_point_log_prior.log_prob(ideal_point_samples), axis=[1,2])
document_log_prior = get_log_prior(document_samples, 'gamma')
objective_topic_log_prior = get_log_prior(objective_topic_samples, 'gamma')
ideological_topic_log_prior = get_log_prior(ideological_topic_samples,
'normal')
log_prior = (document_log_prior +
objective_topic_log_prior +
ideological_topic_log_prior +
ideal_point_log_prior)
selected_document_samples = tf.gather(document_samples,
document_indices,
axis=1)
selected_ideal_points = tf.gather(ideal_point_samples,
author_indices,
axis=1)
selected_ideological_topic_samples = tf.exp(
# replace by a column
selected_ideal_points[:, :, :, tf.newaxis] *
ideological_topic_samples[:, tf.newaxis, :, :])
# Normalize by how lengthy the author's opinion is.
selected_author_weights = tf.gather(author_weights, author_indices)
selected_ideological_topic_samples = (
selected_author_weights[tf.newaxis, :, tf.newaxis, tf.newaxis] *
selected_ideological_topic_samples)
document_entropy = -tf.reduce_sum(
document_distribution.log_prob(document_samples),
axis=[1, 2])
objective_topic_entropy = -tf.reduce_sum(
objective_topic_distribution.log_prob(objective_topic_samples),
axis=[1, 2])
ideological_topic_entropy = -tf.reduce_sum(
ideological_topic_distribution.log_prob(ideological_topic_samples),
axis=[1, 2])
ideal_point_entropy = -tf.reduce_sum(
ideal_point_distribution.log_prob(ideal_point_samples),
axis=1)
entropy = (document_entropy +
objective_topic_entropy +
ideological_topic_entropy +
ideal_point_entropy)
rate = tf.reduce_sum(
selected_document_samples[:, :, :, tf.newaxis] *
objective_topic_samples[:, tf.newaxis, :, :] *
selected_ideological_topic_samples[:, :, :, :],
axis=2)
count_distribution = tfp.distributions.Poisson(rate=rate)
# Need to un-sparsify the counts to evaluate log-likelihood.
count_log_likelihood = count_distribution.log_prob(
tf.sparse.to_dense(counts))
count_log_likelihood = tf.reduce_sum(count_log_likelihood, axis=[1, 2])
# Adjust for the fact that we're only using a minibatch.
count_log_likelihood = count_log_likelihood * (num_documents / batch_size)
elbo = log_prior + count_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("elbo/elbo", elbo)
tf.summary.scalar("elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("elbo/count_log_likelihood",
tf.reduce_mean(count_log_likelihood))
tf.summary.scalar("elbo/entropy", tf.reduce_mean(entropy))
return elbo
if __name__ == "__main__":
tf.app.run()
| 42.141463 | 80 | 0.680904 |
18d6578d8c4bdcf3e1695a1c9ddbac250283e282 | 6,138 | py | Python | calc/gui.py | tatarskiy-welder/tax_calc | 827ec6e174ffc9cfc13e24427307a8a6b85123e0 | [
"MIT"
] | null | null | null | calc/gui.py | tatarskiy-welder/tax_calc | 827ec6e174ffc9cfc13e24427307a8a6b85123e0 | [
"MIT"
] | null | null | null | calc/gui.py | tatarskiy-welder/tax_calc | 827ec6e174ffc9cfc13e24427307a8a6b85123e0 | [
"MIT"
] | null | null | null | from tkinter import *
from tax_profiler import TaxProfile
from tkinter import messagebox as mb
if __name__ == '__main__':
main()
| 33.540984 | 81 | 0.564679 |
18d67d5d9fabdd711ac5fef81a528edb66bc9e9b | 136 | py | Python | lms_python/lms_app/admin.py | gabrielmdsantos/LMSBD | dff3001a560f8cccb938957bf2d5732d4ae3d163 | [
"Apache-2.0"
] | null | null | null | lms_python/lms_app/admin.py | gabrielmdsantos/LMSBD | dff3001a560f8cccb938957bf2d5732d4ae3d163 | [
"Apache-2.0"
] | null | null | null | lms_python/lms_app/admin.py | gabrielmdsantos/LMSBD | dff3001a560f8cccb938957bf2d5732d4ae3d163 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from lms_app.models import Professor
admin.site.register(Professor)
# Register your models here.
| 22.666667 | 37 | 0.794118 |
18d7a6360616dabd7740bc58273af43b8634ecfa | 5,573 | py | Python | pymedextcore/normalize.py | equipe22/pymedext_core | 578e32fdc015c9b75f566d9e58a8fade889879e7 | [
"Apache-2.0"
] | 1 | 2021-02-04T10:33:00.000Z | 2021-02-04T10:33:00.000Z | pymedextcore/normalize.py | equipe22/pymedext_core | 578e32fdc015c9b75f566d9e58a8fade889879e7 | [
"Apache-2.0"
] | 4 | 2020-12-17T09:16:24.000Z | 2021-03-26T10:40:30.000Z | pymedextcore/normalize.py | equipe22/pymedext_core | 578e32fdc015c9b75f566d9e58a8fade889879e7 | [
"Apache-2.0"
] | 1 | 2020-12-17T12:32:50.000Z | 2020-12-17T12:32:50.000Z | #!/usr/bin/env python3
from .document import Document
from intervaltree import Interval,IntervalTree
# from .annotationGraph import AnnotationGraph
import logging
logger = logging.getLogger(__name__)
| 42.869231 | 145 | 0.61134 |
18d8e4a9db3824bc1bf6d57f22782a4ffcc36549 | 93 | py | Python | phr/dnireniec/apps.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/dnireniec/apps.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | phr/dnireniec/apps.py | richardqa/django-ex | e5b8585f28a97477150ac5daf5e55c74b70d87da | [
"CC0-1.0"
] | null | null | null | from django.apps import AppConfig
| 15.5 | 33 | 0.763441 |
18d91850121d98d86b712bda14df3f044488a26e | 479 | py | Python | Exercício feitos pela primeira vez/ex004colorido.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
] | 1 | 2021-01-23T15:43:34.000Z | 2021-01-23T15:43:34.000Z | Exercício feitos pela primeira vez/ex004colorido.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
] | null | null | null | Exercício feitos pela primeira vez/ex004colorido.py | Claayton/pythonExerciciosLinux | 696cdb16983638418bd0d0d4fe44dc72662b9c97 | [
"MIT"
] | null | null | null | #Ex004b
algo = (input('\033[34m''Digite algo: ''\033[m'))
print('So letras ou palavras?: \033[33m{}\033[m'.format(algo.isalpha()))
print('Est em maisculo?: \033[34m{}\033[m'.format(algo.isupper()))
print('Est em minsculo?: \033[35m{}\033[m'.format(algo.islower()))
print('Est captalizada?: \033[36m{}\033[m'.format(algo.istitle()))
print('S tem espao?: \033[31m{}\033[m'.format(algo.isspace()))
print(' numrico?: \033[32m{}\033[m'.format(algo.isnumeric()))
print('xD')
| 47.9 | 73 | 0.668058 |
18da93de7ae1c7f1f8c72d039c0ee8611ca41811 | 1,444 | py | Python | utilities_common/util_base.py | pettershao-ragilenetworks/sonic-utilities | 553936b61a677b95a45a797c0e3ccdaf015cce94 | [
"Apache-2.0"
] | null | null | null | utilities_common/util_base.py | pettershao-ragilenetworks/sonic-utilities | 553936b61a677b95a45a797c0e3ccdaf015cce94 | [
"Apache-2.0"
] | null | null | null | utilities_common/util_base.py | pettershao-ragilenetworks/sonic-utilities | 553936b61a677b95a45a797c0e3ccdaf015cce94 | [
"Apache-2.0"
] | null | null | null |
import os
import sonic_platform
# Constants ====================================================================
PDDF_SUPPORT_FILE = '/usr/share/sonic/platform/pddf_support'
# Helper classs
| 28.88 | 100 | 0.606648 |
18dbd268ee84904b28a7b1eab62ddc99c40934ff | 2,900 | py | Python | consensus_engine/tests/test_view_create_proposal.py | jonsaunders-git/consensus_engine | 6fc2b3df7b342d4dff919969329c8b586e33a9d3 | [
"MIT"
] | null | null | null | consensus_engine/tests/test_view_create_proposal.py | jonsaunders-git/consensus_engine | 6fc2b3df7b342d4dff919969329c8b586e33a9d3 | [
"MIT"
] | 4 | 2021-06-05T00:03:14.000Z | 2021-09-22T19:41:03.000Z | consensus_engine/tests/test_view_create_proposal.py | jonsaunders-git/consensus_engine | 6fc2b3df7b342d4dff919969329c8b586e33a9d3 | [
"MIT"
] | null | null | null | from django.test import TestCase, RequestFactory
from .mixins import TwoUserMixin, ProposalGroupMixin, ViewMixin
from django.utils import timezone
from consensus_engine.views import CreateProposalView
from consensus_engine.forms import ProposalForm
from consensus_engine.models import Proposal
from django.core.exceptions import PermissionDenied
| 46.774194 | 106 | 0.632759 |
18dc89f687d6010723363d00fb4079f119453e21 | 290 | py | Python | tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py | jdi-testing/jdi-python | 7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7 | [
"MIT"
] | 5 | 2020-02-14T10:32:01.000Z | 2021-07-22T08:20:28.000Z | tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py | jdi-testing/jdi-python | 7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7 | [
"MIT"
] | 54 | 2018-07-27T14:07:33.000Z | 2021-11-08T09:24:16.000Z | tests/jdi_uitests_webtests/main/page_objects/w3c_site/w3c_site.py | jdi-testing/jdi-python | 7c0607b97d4d44b27ea8f532d47c68b8dd00e6f7 | [
"MIT"
] | 1 | 2021-01-20T14:31:52.000Z | 2021-01-20T14:31:52.000Z | from JDI.web.selenium.elements.composite.web_site import WebSite
from tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page import FramePage
| 32.222222 | 86 | 0.793103 |
18dca1ce28f6ce9649a6e926a3f6be554544907d | 1,382 | py | Python | tests/scrapers/test_scraper_composite.py | oluiscabral/stockopedia-scraper | 1050206d7a534f0e57eee84a5187615dc0af6bd9 | [
"MIT"
] | null | null | null | tests/scrapers/test_scraper_composite.py | oluiscabral/stockopedia-scraper | 1050206d7a534f0e57eee84a5187615dc0af6bd9 | [
"MIT"
] | null | null | null | tests/scrapers/test_scraper_composite.py | oluiscabral/stockopedia-scraper | 1050206d7a534f0e57eee84a5187615dc0af6bd9 | [
"MIT"
] | null | null | null | '''
@author: oluiscabral
'''
import unittest
from creationals.scraper_factory import ScraperFactory
from helpers.webdriver_factory import WebdriverFactory
from actioners.login_control import LoginControl
from ui.login_ui import LoginUI
from data_structure.data_ref import DataRef
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 34.55 | 86 | 0.700434 |
18dcab3c94de533e1fad537525409735b1a45b43 | 22,917 | py | Python | deepx/backend/tensorflow.py | sharadmv/deepx | 07470e7a579a63427de1d5ff90b9fd00d3f54b61 | [
"MIT"
] | 74 | 2015-11-13T02:26:37.000Z | 2021-07-29T11:00:45.000Z | deepx/backend/tensorflow.py | sharadmv/deepx | 07470e7a579a63427de1d5ff90b9fd00d3f54b61 | [
"MIT"
] | 21 | 2015-12-12T20:33:55.000Z | 2019-04-03T02:49:42.000Z | deepx/backend/tensorflow.py | sharadmv/deepx | 07470e7a579a63427de1d5ff90b9fd00d3f54b61 | [
"MIT"
] | 19 | 2015-11-23T10:07:01.000Z | 2021-08-30T17:06:00.000Z | import copy
import logging
import numpy as np
import six
import tensorflow as tf
from functools import wraps
from contextlib import contextmanager
from .backend_base import BackendBase, FunctionBase, DeviceDecorator
try:
from tensorflow.contrib.distributions import fill_triangular
except:
print("Cannot find fill_triangular")
| 33.455474 | 116 | 0.595322 |
18dcc7a079d7a14db43a4e9f8cd6c7a80e6794d0 | 90,257 | py | Python | netharn/util/mplutil.py | JoshuaBeard/netharn | 90773542c47363e663ee58f20fd151eb89bc313b | [
"Apache-2.0"
] | null | null | null | netharn/util/mplutil.py | JoshuaBeard/netharn | 90773542c47363e663ee58f20fd151eb89bc313b | [
"Apache-2.0"
] | null | null | null | netharn/util/mplutil.py | JoshuaBeard/netharn | 90773542c47363e663ee58f20fd151eb89bc313b | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
import cv2
import pandas as pd
import numpy as np
import six
import ubelt as ub
from six.moves import zip_longest
from os.path import join, dirname
import warnings
def multi_plot(xdata=None, ydata=[], **kwargs):
r"""
plots multiple lines, bars, etc...
This is the big function that implements almost all of the heavy lifting in
this file. Any function not using this should probably find a way to use
it. It is pretty general and relatively clean.
Args:
xdata (ndarray): can also be a list of arrays
ydata (list or dict of ndarrays): can also be a single array
**kwargs:
Misc:
fnum, pnum, use_legend, legend_loc
Labels:
xlabel, ylabel, title, figtitle
ticksize, titlesize, legendsize, labelsize
Grid:
gridlinewidth, gridlinestyle
Ticks:
num_xticks, num_yticks, tickwidth, ticklength, ticksize
Data:
xmin, xmax, ymin, ymax, spread_list
# can append _list to any of these
# these can be dictionaries if ydata was also a dict
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle']
any plot_kw key can be a scalar (corresponding to all ydatas),
a list if ydata was specified as a list, or a dict if ydata was
specified as a dict.
kind = ['bar', 'plot', ...]
if kind='plot':
spread
if kind='bar':
stacked, width
References:
matplotlib.org/examples/api/barchart_demo.html
CommandLine:
python -m netharn.util.mplutil multi_plot:0 --show
python -m netharn.util.mplutil multi_plot:1 --show
Example:
>>> autompl()
>>> xdata = [1, 2, 3, 4, 5]
>>> ydata_list = [[1, 2, 3, 4, 5], [3, 3, 3, 3, 3], [5, 4, np.nan, 2, 1], [4, 3, np.nan, 1, 0]]
>>> kwargs = {'label': ['spam', 'eggs', 'jam', 'pram'], 'linestyle': '-'}
>>> #fig = multi_plot(xdata, ydata_list, title='$\phi_1(\\vec{x})$', xlabel='\nfds', **kwargs)
>>> fig = multi_plot(xdata, ydata_list, title='', xlabel='\nfds', **kwargs)
>>> show_if_requested()
Example:
>>> autompl()
>>> fig1 = multi_plot([1, 2, 3], [4, 5, 6])
>>> fig2 = multi_plot([1, 2, 3], [4, 5, 6], fnum=4)
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
ydata_list = ydata
if isinstance(ydata_list, dict):
# Special case where ydata is a dictionary
if isinstance(xdata, six.string_types):
# Special-er case where xdata is specified in ydata
xkey = xdata
ykeys = set(ydata_list.keys()) - {xkey}
xdata = ydata_list[xkey]
else:
ykeys = list(ydata_list.keys())
# Normalize input
ydata_list = list(ub.take(ydata_list, ykeys))
kwargs['label_list'] = kwargs.get('label_list', ykeys)
else:
ykeys = None
# allow ydata_list to be passed without a container
if is_list_of_scalars(ydata_list):
ydata_list = [np.array(ydata_list)]
if xdata is None:
xdata = list(range(len(ydata_list[0])))
num_lines = len(ydata_list)
# Transform xdata into xdata_list
if is_list_of_lists(xdata):
xdata_list = [np.array(xd, copy=True) for xd in xdata]
else:
xdata_list = [np.array(xdata, copy=True)] * num_lines
fnum = ensure_fnum(kwargs.get('fnum', None))
pnum = kwargs.get('pnum', None)
kind = kwargs.get('kind', 'plot')
transpose = kwargs.get('transpose', False)
def parsekw_list(key, kwargs, num_lines=num_lines, ykeys=ykeys):
""" copies relevant plot commands into plot_list_kw """
if key in kwargs:
val_list = kwargs[key]
elif key + '_list' in kwargs:
warnings.warn('*_list is depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + '_list']
elif key + 's' in kwargs:
# hack, multiple ways to do something
warnings.warn('*s depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + 's']
else:
val_list = None
if val_list is not None:
if isinstance(val_list, dict):
if ykeys is None:
raise ValueError('ydata is not a dict, but a property was.')
else:
val_list = [val_list[key] for key in ykeys]
if not isinstance(val_list, list):
val_list = [val_list] * num_lines
return val_list
# Parse out arguments to ax.plot
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle', 'alpha']
# hackish / extra args that dont go to plot, but help
extra_plot_kw_keys = ['spread_alpha', 'autolabel', 'edgecolor', 'fill']
plot_kw_keys += extra_plot_kw_keys
plot_ks_vals = [parsekw_list(key, kwargs) for key in plot_kw_keys]
plot_list_kw = dict([
(key, vals)
for key, vals in zip(plot_kw_keys, plot_ks_vals) if vals is not None
])
if 'color' not in plot_list_kw:
plot_list_kw['color'] = distinct_colors(num_lines)
if kind == 'plot':
if 'marker' not in plot_list_kw:
plot_list_kw['marker'] = distinct_markers(num_lines)
if 'spread_alpha' not in plot_list_kw:
plot_list_kw['spread_alpha'] = [.2] * num_lines
if kind == 'bar':
# Remove non-bar kwargs
for key in ['markeredgewidth', 'linewidth', 'marker', 'markersize', 'linestyle']:
plot_list_kw.pop(key, None)
stacked = kwargs.get('stacked', False)
width_key = 'height' if transpose else 'width'
if 'width_list' in kwargs:
plot_list_kw[width_key] = kwargs['width_list']
else:
width = kwargs.get('width', .9)
# if width is None:
# # HACK: need variable width
# # width = np.mean(np.diff(xdata_list[0]))
# width = .9
if not stacked:
width /= num_lines
#plot_list_kw['orientation'] = ['horizontal'] * num_lines
plot_list_kw[width_key] = [width] * num_lines
spread_list = kwargs.get('spread_list', None)
if spread_list is None:
pass
# nest into a list of dicts for each line in the multiplot
valid_keys = list(set(plot_list_kw.keys()) - set(extra_plot_kw_keys))
valid_vals = list(ub.dict_take(plot_list_kw, valid_keys))
plot_kw_list = [dict(zip(valid_keys, vals)) for vals in zip(*valid_vals)]
extra_kw_keys = [key for key in extra_plot_kw_keys if key in plot_list_kw]
extra_kw_vals = list(ub.dict_take(plot_list_kw, extra_kw_keys))
extra_kw_list = [dict(zip(extra_kw_keys, vals)) for vals in zip(*extra_kw_vals)]
# Get passed in axes or setup a new figure
ax = kwargs.get('ax', None)
if ax is None:
doclf = kwargs.get('doclf', False)
fig = figure(fnum=fnum, pnum=pnum, docla=False, doclf=doclf)
ax = plt.gca()
else:
plt.sca(ax)
fig = ax.figure
# +---------------
# Draw plot lines
ydata_list = np.array(ydata_list)
if transpose:
if kind == 'bar':
plot_func = ax.barh
elif kind == 'plot':
else:
plot_func = getattr(ax, kind) # usually ax.plot
assert len(ydata_list) > 0, 'no ydata'
#assert len(extra_kw_list) == len(plot_kw_list), 'bad length'
#assert len(extra_kw_list) == len(ydata_list), 'bad length'
_iter = enumerate(zip_longest(xdata_list, ydata_list, plot_kw_list, extra_kw_list))
for count, (_xdata, _ydata, plot_kw, extra_kw) in _iter:
ymask = np.isfinite(_ydata)
ydata_ = _ydata.compress(ymask)
xdata_ = _xdata.compress(ymask)
if kind == 'bar':
if stacked:
# Plot bars on top of each other
xdata_ = xdata_
else:
# Plot bars side by side
baseoffset = (width * num_lines) / 2
lineoffset = (width * count)
offset = baseoffset - lineoffset # Fixeme for more histogram bars
xdata_ = xdata_ - offset
# width_key = 'height' if transpose else 'width'
# plot_kw[width_key] = np.diff(xdata)
objs = plot_func(xdata_, ydata_, **plot_kw)
if kind == 'bar':
if extra_kw is not None and 'edgecolor' in extra_kw:
for rect in objs:
rect.set_edgecolor(extra_kw['edgecolor'])
if extra_kw is not None and extra_kw.get('autolabel', False):
# FIXME: probably a more cannonical way to include bar
# autolabeling with tranpose support, but this is a hack that
# works for now
for rect in objs:
if transpose:
numlbl = width = rect.get_width()
xpos = width + ((_xdata.max() - _xdata.min()) * .005)
ypos = rect.get_y() + rect.get_height() / 2.
ha, va = 'left', 'center'
else:
numlbl = height = rect.get_height()
xpos = rect.get_x() + rect.get_width() / 2.
ypos = 1.05 * height
ha, va = 'center', 'bottom'
barlbl = '%.3f' % (numlbl,)
ax.text(xpos, ypos, barlbl, ha=ha, va=va)
# print('extra_kw = %r' % (extra_kw,))
if kind == 'plot' and extra_kw.get('fill', False):
ax.fill_between(_xdata, ydata_, alpha=plot_kw.get('alpha', 1.0),
color=plot_kw.get('color', None)) # , zorder=0)
if spread_list is not None:
# Plots a spread around plot lines usually indicating standard
# deviation
_xdata = np.array(_xdata)
spread = spread_list[count]
ydata_ave = np.array(ydata_)
y_data_dev = np.array(spread)
y_data_max = ydata_ave + y_data_dev
y_data_min = ydata_ave - y_data_dev
ax = plt.gca()
spread_alpha = extra_kw['spread_alpha']
ax.fill_between(_xdata, y_data_min, y_data_max, alpha=spread_alpha,
color=plot_kw.get('color', None)) # , zorder=0)
# L________________
#max_y = max(np.max(y_data), max_y)
#min_y = np.min(y_data) if min_y is None else min(np.min(y_data), min_y)
ydata = _ydata # HACK
xdata = _xdata # HACK
if transpose:
#xdata_list = ydata_list
ydata = xdata
# Hack / Fix any transpose issues
kwargs = {transpose_key(key): val for key, val in kwargs.items()}
# Setup axes labeling
title = kwargs.get('title', None)
xlabel = kwargs.get('xlabel', '')
ylabel = kwargs.get('ylabel', '')
xlabel = none_or_unicode(xlabel)
ylabel = none_or_unicode(ylabel)
title = none_or_unicode(title)
# Initial integration with mpl rcParams standards
mplrc = mpl.rcParams.copy()
mplrc.update({
# 'legend.fontsize': custom_figure.LEGEND_SIZE,
# 'axes.titlesize': custom_figure.TITLE_SIZE,
# 'axes.labelsize': custom_figure.LABEL_SIZE,
# 'legend.facecolor': 'w',
# 'font.family': 'sans-serif',
# 'xtick.labelsize': custom_figure.TICK_SIZE,
# 'ytick.labelsize': custom_figure.TICK_SIZE,
})
mplrc.update(kwargs.get('rcParams', {}))
titlesize = kwargs.get('titlesize', mplrc['axes.titlesize'])
labelsize = kwargs.get('labelsize', mplrc['axes.labelsize'])
legendsize = kwargs.get('legendsize', mplrc['legend.fontsize'])
xticksize = kwargs.get('ticksize', mplrc['xtick.labelsize'])
yticksize = kwargs.get('ticksize', mplrc['ytick.labelsize'])
family = kwargs.get('fontfamily', mplrc['font.family'])
tickformat = kwargs.get('tickformat', None)
ytickformat = kwargs.get('ytickformat', tickformat)
xtickformat = kwargs.get('xtickformat', tickformat)
# 'DejaVu Sans','Verdana', 'Arial'
weight = kwargs.get('fontweight', None)
if weight is None:
weight = 'normal'
labelkw = {
'fontproperties': mpl.font_manager.FontProperties(
weight=weight,
family=family, size=labelsize)
}
ax.set_xlabel(xlabel, **labelkw)
ax.set_ylabel(ylabel, **labelkw)
tick_fontprop = mpl.font_manager.FontProperties(family=family,
weight=weight)
if tick_fontprop is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontproperties(tick_fontprop)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontproperties(tick_fontprop)
if xticksize is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(xticksize)
if yticksize is not None:
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(yticksize)
if xtickformat is not None:
# mpl.ticker.StrMethodFormatter # newstyle
# mpl.ticker.FormatStrFormatter # oldstyle
ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(ytickformat))
xtick_kw = ytick_kw = {
'width': kwargs.get('tickwidth', None),
'length': kwargs.get('ticklength', None),
}
xtick_kw = {k: v for k, v in xtick_kw.items() if v is not None}
ytick_kw = {k: v for k, v in ytick_kw.items() if v is not None}
ax.xaxis.set_tick_params(**xtick_kw)
ax.yaxis.set_tick_params(**ytick_kw)
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# Setup axes limits
if 'xlim' in kwargs:
xlim = kwargs['xlim']
if xlim is not None:
if 'xmin' not in kwargs and 'xmax' not in kwargs:
kwargs['xmin'] = xlim[0]
kwargs['xmax'] = xlim[1]
else:
raise ValueError('use xmax, xmin instead of xlim')
if 'ylim' in kwargs:
ylim = kwargs['ylim']
if ylim is not None:
if 'ymin' not in kwargs and 'ymax' not in kwargs:
kwargs['ymin'] = ylim[0]
kwargs['ymax'] = ylim[1]
else:
raise ValueError('use ymax, ymin instead of ylim')
xmin = kwargs.get('xmin', ax.get_xlim()[0])
xmax = kwargs.get('xmax', ax.get_xlim()[1])
ymin = kwargs.get('ymin', ax.get_ylim()[0])
ymax = kwargs.get('ymax', ax.get_ylim()[1])
text_type = six.text_type
if text_type(xmax) == 'data':
xmax = max([xd.max() for xd in xdata_list])
if text_type(xmin) == 'data':
xmin = min([xd.min() for xd in xdata_list])
# Setup axes ticks
num_xticks = kwargs.get('num_xticks', None)
num_yticks = kwargs.get('num_yticks', None)
if num_xticks is not None:
# TODO check if xdata is integral
if xdata.dtype.kind == 'i':
xticks = np.linspace(np.ceil(xmin), np.floor(xmax),
num_xticks).astype(np.int32)
else:
xticks = np.linspace((xmin), (xmax), num_xticks)
ax.set_xticks(xticks)
if num_yticks is not None:
if ydata.dtype.kind == 'i':
yticks = np.linspace(np.ceil(ymin), np.floor(ymax),
num_yticks).astype(np.int32)
else:
yticks = np.linspace((ymin), (ymax), num_yticks)
ax.set_yticks(yticks)
force_xticks = kwargs.get('force_xticks', None)
if force_xticks is not None:
xticks = np.array(sorted(ax.get_xticks().tolist() + force_xticks))
ax.set_xticks(xticks)
yticklabels = kwargs.get('yticklabels', None)
if yticklabels is not None:
# Hack ONLY WORKS WHEN TRANSPOSE = True
# Overrides num_yticks
ax.set_yticks(ydata)
ax.set_yticklabels(yticklabels)
xticklabels = kwargs.get('xticklabels', None)
if xticklabels is not None:
# Overrides num_xticks
ax.set_xticks(xdata)
ax.set_xticklabels(xticklabels)
xtick_rotation = kwargs.get('xtick_rotation', None)
if xtick_rotation is not None:
[lbl.set_rotation(xtick_rotation)
for lbl in ax.get_xticklabels()]
ytick_rotation = kwargs.get('ytick_rotation', None)
if ytick_rotation is not None:
[lbl.set_rotation(ytick_rotation)
for lbl in ax.get_yticklabels()]
# Axis padding
xpad = kwargs.get('xpad', None)
ypad = kwargs.get('ypad', None)
xpad_factor = kwargs.get('xpad_factor', None)
ypad_factor = kwargs.get('ypad_factor', None)
if xpad is None and xpad_factor is not None:
xpad = (xmax - xmin) * xpad_factor
if ypad is None and ypad_factor is not None:
ypad = (ymax - ymin) * ypad_factor
xpad = 0 if xpad is None else xpad
ypad = 0 if ypad is None else ypad
ypad_high = kwargs.get('ypad_high', ypad)
ypad_low = kwargs.get('ypad_low', ypad)
xpad_high = kwargs.get('xpad_high', xpad)
xpad_low = kwargs.get('xpad_low', xpad)
xmin, xmax = (xmin - xpad_low), (xmax + xpad_high)
ymin, ymax = (ymin - ypad_low), (ymax + ypad_high)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
xscale = kwargs.get('xscale', None)
yscale = kwargs.get('yscale', None)
if yscale is not None:
ax.set_yscale(yscale)
if xscale is not None:
ax.set_xscale(xscale)
gridlinestyle = kwargs.get('gridlinestyle', None)
gridlinewidth = kwargs.get('gridlinewidth', None)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
if gridlinestyle:
for line in gridlines:
line.set_linestyle(gridlinestyle)
if gridlinewidth:
for line in gridlines:
line.set_linewidth(gridlinewidth)
# Setup title
if title is not None:
titlekw = {
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=titlesize)
}
ax.set_title(title, **titlekw)
use_legend = kwargs.get('use_legend', 'label' in valid_keys)
legend_loc = kwargs.get('legend_loc', 'best')
legend_alpha = kwargs.get('legend_alpha', 1.0)
if use_legend:
legendkw = {
'alpha': legend_alpha,
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=legendsize)
}
legend(loc=legend_loc, ax=ax, **legendkw)
figtitle = kwargs.get('figtitle', None)
if figtitle is not None:
set_figtitle(figtitle, fontfamily=family, fontweight=weight,
size=kwargs.get('figtitlesize'))
use_darkbackground = kwargs.get('use_darkbackground', None)
lightbg = kwargs.get('lightbg', None)
if lightbg is None:
lightbg = True
if use_darkbackground is None:
use_darkbackground = not lightbg
if use_darkbackground:
_dark_background(force=use_darkbackground is True)
# TODO: return better info
return fig
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m netharn.util.mplutil figure:0 --show
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> show_if_requested()
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> plt.gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
fig = ensure_fig(fnum)
if doclf:
fig.clf()
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None):
r"""
Args:
N (int):
brightness (float):
Returns:
list: RGB_tuples
CommandLine:
python -m color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95
python -m .color_funcs --test-distinct_colors --N 3 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 4 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 6 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 20 --show
References:
http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
CommandLine:
python -m .color_funcs --exec-distinct_colors --show
python -m .color_funcs --exec-distinct_colors --show --no-randomize --N 50
python -m .color_funcs --exec-distinct_colors --show --cmap_seed=foobar
Ignore:
>>> # build test data
>>> autompl()
>>> N = ub.smartcast(ub.get_argval('--N', default=2), int) # FIXME
>>> randomize = not ub.argflag('--no-randomize')
>>> brightness = 0.878
>>> # execute function
>>> cmap_seed = ub.get_argval('--cmap_seed', default=None)
>>> hue_range = ub.smartcast(ub.get_argval('--hue-range', default=(0.00, 1.0)), list) #FIXME
>>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed)
>>> # verify results
>>> assert len(RGB_tuples) == N
>>> result = str(RGB_tuples)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> color_list = RGB_tuples
>>> testshow_colors(color_list)
>>> show_if_requested()
"""
# TODO: Add sin wave modulation to the sat and value
# HACK for white figures
from matplotlib import pyplot as plt
import colorsys
remove_yellow = True
use_jet = False
if use_jet:
cmap = plt.cm.jet
RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N))))
elif cmap_seed is not None:
# Randomized map based on a seed
#cmap_ = 'Set1'
#cmap_ = 'Dark2'
choices = [
#'Set1', 'Dark2',
'jet',
#'gist_rainbow',
#'rainbow',
#'gnuplot',
#'Accent'
]
cmap_hack = ub.argval('--cmap-hack', default=None)
ncolor_hack = ub.argval('--ncolor-hack', default=None)
if cmap_hack is not None:
choices = [cmap_hack]
if ncolor_hack is not None:
N = int(ncolor_hack)
N_ = N
seed = sum(list(map(ord, ub.hash_data(cmap_seed))))
rng = np.random.RandomState(seed + 48930)
cmap_str = rng.choice(choices, 1)[0]
#print('cmap_str = %r' % (cmap_str,))
cmap = plt.cm.get_cmap(cmap_str)
#.hashstr27(cmap_seed)
#cmap_seed = 0
#pass
jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2)))
range_ = np.linspace(0, 1, N, endpoint=False)
#print('range_ = %r' % (range_,))
range_ = range_ + jitter
#print('range_ = %r' % (range_,))
while not (np.all(range_ >= 0) and np.all(range_ <= 1)):
range_[range_ < 0] = np.abs(range_[range_ < 0] )
range_[range_ > 1] = 2 - range_[range_ > 1]
#print('range_ = %r' % (range_,))
shift = rng.rand()
range_ = (range_ + shift) % 1
#print('jitter = %r' % (jitter,))
#print('shift = %r' % (shift,))
#print('range_ = %r' % (range_,))
if ncolor_hack is not None:
range_ = range_[0:N_]
RGB_tuples = list(map(tuple, cmap(range_)))
else:
sat = brightness
val = brightness
hmin, hmax = hue_range
if remove_yellow:
hue_skips = [(.13, .24)]
else:
hue_skips = []
hue_skip_ranges = [_[1] - _[0] for _ in hue_skips]
total_skip = sum(hue_skip_ranges)
hmax_ = hmax - total_skip
hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float)
# Remove colors (like hard to see yellows) in specified ranges
for skip, range_ in zip(hue_skips, hue_skip_ranges):
hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list]
HSV_tuples = [(hue, sat, val) for hue in hue_list]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
if randomize:
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m .draw_func2 --exec-distinct_markers --show
python -m .draw_func2 --exec-distinct_markers --style=star --show
python -m .draw_func2 --exec-distinct_markers --style=polygon --show
Ignore:
>>> autompl()
>>> style = ub.get_argval('--style', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = np.arange(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> plt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> legend()
>>> show_if_requested()
"""
num_sides = 3
style_num = {
'astrisk': 2,
'star': 1,
'polygon': 0,
'circle': 3
}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def deterministic_shuffle(list_, rng=0):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
Example:
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
from netharn import util
rng = util.ensure_rng(rng)
rng.shuffle(list_)
return list_
_BASE_FNUM = 9001
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
import matplotlib.pyplot as plt
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
# if use_argv:
# # hack to take args from commandline
# adjust_dict = ut.parse_dict_from_argv(adjust_dict)
# adjust_subplots(use_argv=True)
# def update_figsize():
# """ updates figsize based on command line """
# figsize = ub.argval('--figsize', type_=list, default=None)
# if figsize is not None:
# # Enforce inches and DPI
# fig = plt.gcf()
# figsize = [eval(term) if isinstance(term, str) else term
# for term in figsize]
# figw, figh = figsize[0], figsize[1]
# print('get_size_inches = %r' % (fig.get_size_inches(),))
# print('fig w,h (inches) = %r, %r' % (figw, figh))
# fig.set_size_inches(figw, figh)
# #print('get_size_inches = %r' % (fig.get_size_inches(),))
# update_figsize()
save_parts = ub.argflag('--saveparts')
fpath_ = ub.argval('--save', default=None)
if fpath_ is None:
fpath_ = ub.argval('--saveparts', default=None)
save_parts = True
if fpath_ is not None:
_save_requested(fpath_, save_parts)
# elif ub.argflag('--cmd'):
# pass
if ub.argflag('--show'):
# if ub.argflag('--tile'):
# if ut.get_computer_name().lower() in ['hyrule']:
# fig_presenter.all_figures_tile(percent_w=.5, monitor_num=0)
# else:
# fig_presenter.all_figures_tile()
# if ub.argflag('--present'):
# fig_presenter.present()
# for fig in fig_presenter.get_all_figures():
# fig.set_dpi(80)
plt.show()
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m draw_func2 save_parts
Ignore:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_subplot(3, 1, 3)
>>> ax.plot(np.sin(np.linspace(0, np.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ub.startfile(subpaths[0])
>>> ub.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before calling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.append([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ub.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ub.augpath(fpath, suffix=chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverted())
savekw = {}
savekw['transparent'] = ub.argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.append(subpath)
return subpaths
_qtensured = False
def _current_ipython_session():
"""
Returns a reference to the current IPython session, if one is running
"""
try:
__IPYTHON__
except NameError:
return None
else:
import IPython
ipython = IPython.get_ipython()
# if ipython is None we must have exited ipython at some point
return ipython
def qtensure():
"""
If you are in an IPython session, ensures that your backend is Qt.
"""
global _qtensured
if not _qtensured:
ipython = _current_ipython_session()
if ipython:
import sys
if 'PyQt4' in sys.modules:
ipython.magic('pylab qt4 --no-import-all')
_qtensured = True
else:
ipython.magic('pylab qt5 --no-import-all')
_qtensured = True
def aggensure():
"""
Ensures that you are in agg mode as long as IPython is not running
This might help prevent errors in tmux like:
qt.qpa.screen: QXcbConnection: Could not connect to display localhost:10.0
Could not connect to any X display.
"""
import matplotlib as mpl
current_backend = mpl.get_backend()
if current_backend != 'agg':
ipython = _current_ipython_session()
if not ipython:
set_mpl_backend('agg')
def set_mpl_backend(backend):
"""
Args:
backend (str): name of backend to use (e.g. Agg, PyQt)
"""
import sys
import matplotlib as mpl
if backend.lower().startswith('qt'):
# handle interactive qt case
qtensure()
if backend != mpl.get_backend():
# If we have already imported pyplot, then we need to use experimental
# behavior. Otherwise, we can just set the backend.
if 'matplotlib.pyplot' in sys.modules:
from matplotlib import pyplot as plt
plt.switch_backend(backend)
else:
mpl.use(backend)
def autompl():
"""
Uses platform heuristics to automatically set the mpl backend.
If no display is available it will be set to agg, otherwise we will try to
use the cross-platform Qt5Agg backend.
"""
import os
import sys
if sys.platform.startswith('win32'):
# TODO: something reasonable
pass
else:
DISPLAY = os.environ.get('DISPLAY', '')
if not DISPLAY:
set_mpl_backend('agg')
else:
set_mpl_backend('Qt5Agg')
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', cmap=None, heatmap=False,
data_colorbar=False, xlabel=None, redraw_image=True,
colorspace='bgr', ax=None, alpha=None, norm=None, **kwargs):
r"""
Args:
img (ndarray): image data
fnum (int): figure number
colorspace (str): if the data is 3-4 channels, this indicates the colorspace
1 channel data is assumed grayscale. 4 channels assumes alpha.
title (str):
figtitle (None):
pnum (tuple): plot number
interpolation (str): other interpolations = nearest, bicubic, bilinear
cmap (None):
heatmap (bool):
data_colorbar (bool):
darken (None):
redraw_image (bool): used when calling imshow over and over. if false
doesnt do the image part.
Returns:
tuple: (fig, ax)
Kwargs:
docla, doclf, projection
Returns:
tuple: (fig, ax)
Ignore:
>>> autompl()
>>> img_fpath = ut.grab_test_imgpath('carl.jpg')
>>> img = util.imread(img_fpath)
>>> (fig, ax) = imshow(img)
>>> result = ('(fig, ax) = %s' % (str((fig, ax)),))
>>> print(result)
>>> ut.show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
if ax is not None:
fig = ax.figure
nospecial = True
else:
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = plt.gca()
nospecial = False
#ax.set_xticks([])
#ax.set_yticks([])
#return fig, ax
if not redraw_image:
return fig, ax
if isinstance(img, six.string_types):
# Allow for path to image to be specified
from netharn import util
img_fpath = img
img = util.imread(img_fpath)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
}
if alpha is not None:
plt_imshow_kwargs['alpha'] = alpha
if norm is not None:
if norm is True:
norm = mpl.colors.Normalize()
plt_imshow_kwargs['norm'] = norm
else:
if cmap is None and not heatmap and not nospecial:
plt_imshow_kwargs['vmin'] = 0
plt_imshow_kwargs['vmax'] = 255
if heatmap:
cmap = 'hot'
# Handle tensor chw format in most cases
if img.ndim == 3:
if img.shape[0] == 3 or img.shape[0] == 1:
if img.shape[2] > 4:
# probably in chw format
img = img.transpose(1, 2, 0)
try:
if len(img.shape) == 3 and (img.shape[2] == 3 or img.shape[2] == 4):
# img is in a color format
from netharn import util
dst_space = 'rgb'
if img.shape[2] == 4:
colorspace += 'a'
dst_space += 'a'
imgRGB = util.convert_colorspace(img, dst_space=dst_space,
src_space=colorspace)
if imgRGB.dtype.kind == 'f':
maxval = imgRGB.max()
if maxval > 1.01 and maxval < 256:
imgRGB = np.array(imgRGB, dtype=np.uint8)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[2] == 1):
# img is in grayscale
if len(img.shape) == 3:
imgGRAY = img.reshape(img.shape[0:2])
else:
imgGRAY = img
if cmap is None:
cmap = plt.get_cmap('gray')
if isinstance(cmap, six.string_types):
cmap = plt.get_cmap(cmap)
# for some reason gray floats aren't working right
if imgGRAY.max() <= 1.01 and imgGRAY.min() >= -1E-9:
imgGRAY = (imgGRAY * 255).astype(np.uint8)
ax.imshow(imgGRAY, cmap=cmap, **plt_imshow_kwargs)
else:
raise AssertionError(
'unknown image format. img.dtype=%r, img.shape=%r' %
(img.dtype, img.shape))
except TypeError as te:
print('[df2] imshow ERROR %r' % (te,))
raise
except Exception as ex:
print('!!!!!!!!!!!!!!WARNING!!!!!!!!!!!')
print('[df2] type(img) = %r' % type(img))
if not isinstance(img, np.ndarray):
print('!!!!!!!!!!!!!!ERRROR!!!!!!!!!!!')
pass
#print('img = %r' % (img,))
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
if data_colorbar is True:
scores = np.unique(img.flatten())
if cmap is None:
cmap = 'hot'
colors = scores_to_color(scores, cmap)
colorbar(scores, colors)
if xlabel is not None:
ax.set_xlabel(xlabel)
if figtitle is not None:
set_figtitle(figtitle)
return fig, ax
def colorbar(scalars, colors, custom=False, lbl=None, ticklabels=None,
float_format='%.2f', **kwargs):
"""
adds a color bar next to the axes based on specific scalars
Args:
scalars (ndarray):
colors (ndarray):
custom (bool): use custom ticks
Kwargs:
See plt.colorbar
Returns:
cb : matplotlib colorbar object
Ignore:
>>> autompl()
>>> scalars = np.array([-1, -2, 1, 1, 2, 7, 10])
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = True
>>> reverse_cmap = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale, reverse_cmap=reverse_cmap, val2_customcolor=val2_customcolor)
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
Ignore:
>>> # ENABLE_DOCTEST
>>> scalars = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = False
>>> reverse_cmap = False
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale,
>>> reverse_cmap=reverse_cmap)
>>> colors = [lighten_rgb(c, .3) for c in colors]
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
assert len(scalars) == len(colors), 'scalars and colors must be corresponding'
if len(scalars) == 0:
return None
# Parameters
ax = plt.gca()
divider = _ensure_divider(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
xy, width, height = _get_axis_xy_width_height(ax)
#orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
#
# Create scalar mappable with cmap
if custom:
# FIXME: clean this code up and change the name custom
# to be meaningful. It is more like: display unique colors
unique_scalars, unique_idx = np.unique(scalars, return_index=True)
unique_colors = np.array(colors)[unique_idx]
#max_, min_ = unique_scalars.max(), unique_scalars.min()
#extent_ = max_ - min_
#bounds = np.linspace(min_, max_ + 1, extent_ + 2)
listed_cmap = mpl.colors.ListedColormap(unique_colors)
#norm = mpl.colors.BoundaryNorm(bounds, listed_cmap.N)
#sm = mpl.cm.ScalarMappable(cmap=listed_cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(np.linspace(0, 1, len(unique_scalars) + 1))
else:
sorted_scalars = sorted(scalars)
listed_cmap = scores_to_cmap(scalars, colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
#COLORBAR_SHRINK = .42 # 1
#COLORBAR_PAD = .01 # 1
#COLORBAR_ASPECT = np.abs(20 * height / (width)) # 1
cb = plt.colorbar(sm, cax=cax, **kwargs)
## Add the colorbar to the correct label
#axis = cb.ax.yaxis # if orientation == 'horizontal' else cb.ax.yaxis
#position = 'bottom' if orientation == 'horizontal' else 'right'
#axis.set_ticks_position(position)
# This line alone removes data
# axis.set_ticks([0, .5, 1])
if custom:
ticks = np.linspace(0, 1, len(unique_scalars) + 1)
if len(ticks) < 2:
ticks += .5
else:
# SO HACKY
ticks += (ticks[1] - ticks[0]) / 2
if isinstance(unique_scalars, np.ndarray) and unique_scalars.dtype.kind == 'f':
ticklabels = [float_format % scalar for scalar in unique_scalars]
else:
ticklabels = unique_scalars
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels) # tick labels
elif ticklabels is not None:
ticks_ = cb.ax.get_yticks()
mx = ticks_.max()
mn = ticks_.min()
ticks = np.linspace(mn, mx, len(ticklabels))
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels)
#cb.ax.get_yticks()
#cb.set_ticks(ticks) # tick locations
#cb.set_ticklabels(ticklabels) # tick labels
# _set_plotdat(cb.ax, 'viztype', 'colorbar-%s' % (lbl,))
# _set_plotdat(cb.ax, 'sm', sm)
# FIXME: Figure out how to make a maximum number of ticks
# and to enforce them to be inside the data bounds
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
# Sets current axis
plt.sca(ax)
if lbl is not None:
cb.set_label(lbl)
return cb
_DF2_DIVIDER_KEY = '_df2_divider'
def _get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def _set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
_plotdat[key] = val
def _del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def _get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def _ensure_divider(ax):
""" Returns previously constructed divider or creates one """
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
_set_plotdat(ax, _DF2_DIVIDER_KEY, divider)
orig_append_axes = divider.append_axes
def df2_append_axes(divider, position, size, pad=None, add_to_figure=True, **kwargs):
""" override divider add axes to register the divided axes """
div_axes = _get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_append_axes(position, size, pad=pad, add_to_figure=add_to_figure, **kwargs)
div_axes.append(new_ax)
_set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
new_method = df2_append_axes.__get__(divider, divider.__class__)
setattr(divider, 'append_axes', new_method)
# ut.inject_func_as_method(divider, df2_append_axes, 'append_axes', allow_override=True)
return divider
def scores_to_color(score_list, cmap_='hot', logscale=False, reverse_cmap=False,
custom=False, val2_customcolor=None, score_range=None,
cmap_range=(.1, .9)):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
Ignore:
>>> ut.exec_funckw(scores_to_color, globals())
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> # score_list = np.array([0, .1, .11, .12, .13, .8])
>>> # score_list = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = scores_to_color(score_list, cmap_)
>>> imgRGB = util.atleast_nd(np.array(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.astype(np.float32)
>>> imgBGR = util.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> imshow(imgBGR)
>>> show_if_requested()
Ignore:
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
import matplotlib.pyplot as plt
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
if logscale:
# Hack
score_list = apply_logscale(score_list)
#if loglogscale
#score_list = np.log2(np.log2(score_list + 2) + 1)
#if isinstance(cmap_, six.string_types):
cmap = plt.get_cmap(cmap_)
#else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
#if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
min_ = score_list.min()
max_ = score_list.max()
else:
min_ = score_range[0]
max_ = score_range[1]
if logscale:
min_, max_ = apply_logscale([min_, max_])
if cmap_range is None:
cmap_scale_min, cmap_scale_max = 0., 1.
else:
cmap_scale_min, cmap_scale_max = cmap_range
extent_ = max_ - min_
if extent_ == 0:
colors = [cmap(.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
score_list = np.array(score_list)
#rank_multiplier = score_list.argsort() / len(score_list)
#normscore = np.array(list(map(score2_01, score_list))) * rank_multiplier
normscore = np.array(list(map(score2_01, score_list)))
colors = list(map(cmap, normscore))
else:
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
np.array(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)]
return colors
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
import matplotlib as mpl
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in six.iteritems(cmap._segmentdata):
data = []
for t in channel:
data.append((1 - t[0], t[1], t[2]))
k.append(key)
reverse.append(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse)))
return cmap_reversed
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.util.mplutil
"""
import xdoctest
xdoctest.doctest_module(__file__)
| 33.943964 | 139 | 0.566649 |
18dcca6339890714a53a527f99f816d155ae5c43 | 4,876 | py | Python | mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 746 | 2021-12-27T10:50:28.000Z | 2022-03-31T13:34:14.000Z | mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 253 | 2021-12-28T05:59:13.000Z | 2022-03-31T18:22:25.000Z | mmdeploy/codebase/mmdet/models/roi_heads/test_mixins.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 147 | 2021-12-27T10:50:33.000Z | 2022-03-30T10:44:20.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.core import FUNCTION_REWRITER
| 41.322034 | 75 | 0.611567 |
18dd011d855404f1d1af53f818b57ec996f325ba | 1,060 | py | Python | examples/props.py | SandNerd/notional | ccab44bc4c5d19d4546156f0d72b22b93e28e2ed | [
"MIT"
] | 23 | 2021-08-03T08:13:14.000Z | 2022-03-27T13:13:54.000Z | examples/props.py | SandNerd/notional | ccab44bc4c5d19d4546156f0d72b22b93e28e2ed | [
"MIT"
] | 15 | 2021-08-03T04:04:23.000Z | 2022-03-31T14:27:26.000Z | examples/props.py | SandNerd/notional | ccab44bc4c5d19d4546156f0d72b22b93e28e2ed | [
"MIT"
] | 3 | 2021-08-08T04:47:48.000Z | 2022-03-06T23:13:52.000Z | #!/usr/bin/env python3
"""This script demonstrates setting properties on a page manually.
The script accepts a single command line option, which is a page ID. It will then
display information about the properties and update a few of them.
Note that this script assumes the database has already been created with required
fields.
The caller must set `NOTION_AUTH_TOKEN` to a valid integration token.
"""
import logging
import os
import sys
logging.basicConfig(level=logging.INFO)
import notional
from notional import types
page_id = sys.argv[1]
auth_token = os.getenv("NOTION_AUTH_TOKEN")
notion = notional.connect(auth=auth_token)
# get an existing page...
page = notion.pages.retrieve(page_id)
print(f"{page.Title} => {page.url}")
# print all current properties on the page...
for name, prop in page.properties.items():
print(f"{name} => {prop}")
# update a property on the page...
page["Complete"] = types.Checkbox.from_value(True)
# FIXME this feature is broken - https://github.com/jheddings/notional/issues/9
# notion.pages.update(page)
| 25.853659 | 82 | 0.756604 |
18dd1d1444e3f06d7820ae1bbcacd5a56dc12c2e | 1,116 | py | Python | retroroot.py | retroroot-linux/retroroo | 07ae0a93f6ea781fa6330a8defdabac9bda82adc | [
"MIT"
] | null | null | null | retroroot.py | retroroot-linux/retroroo | 07ae0a93f6ea781fa6330a8defdabac9bda82adc | [
"MIT"
] | null | null | null | retroroot.py | retroroot-linux/retroroo | 07ae0a93f6ea781fa6330a8defdabac9bda82adc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Use this file to setup a build environment."""
import os
import argparse
from support.linux.log import Log
from support.docker_wrapper.retroroot import RetrorootDocker
CWD = os.getcwd()
def parse_args(args):
"""Parse arguments.
:return: The argument object.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--build",
default=False,
action="store_true",
help="Build")
parser.add_argument("-s", "--setup",
default=False,
action="store_true",
help="setup")
parser.add_argument("--verbose",
default=False,
action="store_true",
help="Prepare verbosely")
return parser.parse_args(args)
if __name__ == '__main__':
main()
| 24.8 | 60 | 0.556452 |
18dd3cd341f57a8da1bfa888190207388f947eb8 | 1,796 | py | Python | grr/test_bench.py | kecho/grr | b6554f20bc8a279bc946a2a0da54d028160d880d | [
"MIT"
] | 8 | 2021-11-08T16:12:25.000Z | 2021-12-16T06:41:01.000Z | grr/test_bench.py | kecho/grr | b6554f20bc8a279bc946a2a0da54d028160d880d | [
"MIT"
] | null | null | null | grr/test_bench.py | kecho/grr | b6554f20bc8a279bc946a2a0da54d028160d880d | [
"MIT"
] | null | null | null | import coalpy.gpu as g
import numpy as np
import math
import functools
from . import prefix_sum as gpu_prefix_sum
if __name__ == "__main__":
run_test("test prefix sum inclusive", test_cluster_gen_inclusive)
run_test("test prefix sum exclusive", test_cluster_gen_exclusive)
| 32.654545 | 114 | 0.698218 |
18dd6ac52fd7ae55fdafeac9d413e2a786dc94b3 | 3,633 | py | Python | code/train.py | ty-on-h12/srgan-pytorch | de0972782200a052a615754b14466f0c495f8b80 | [
"MIT"
] | null | null | null | code/train.py | ty-on-h12/srgan-pytorch | de0972782200a052a615754b14466f0c495f8b80 | [
"MIT"
] | null | null | null | code/train.py | ty-on-h12/srgan-pytorch | de0972782200a052a615754b14466f0c495f8b80 | [
"MIT"
] | null | null | null | from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torch as T
import torch.optim as optim
from model import Generator, Discriminator
from loss_fn import GeneratorLoss, TVLoss
from utils import show_progress, save
import datetime
import gc
import os
device = 'cuda' if T.cuda.is_available() else 'cpu'
BATCH_SIZE = 16
SIZE_HR = 256
SIZE_LR = 64
num_workers = 2
rootpath = '../data'
transform_hr = transforms.Compose([
transforms.Resize((SIZE_HR, SIZE_HR)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_hr = ImageFolder(rootpath, transform=transform_hr)
transform_lr = transforms.Compose([
transforms.Resize((SIZE_LR, SIZE_LR)),
transforms.ToTensor(),
transforms.GaussianBlur(kernel_size=25),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_lr = ImageFolder(rootpath, transform=transform_lr)
full_data = ConcatDataset(data_lr, data_hr)
loader = DataLoader(full_data, BATCH_SIZE, num_workers=num_workers)
generator = Generator(3, 64).to(device)
discriminator = Discriminator(3, 64).to(device)
lr = 1e-1000
gen_optimizer = optim.Adam(generator.parameters(), lr=lr)
disc_optimizer = optim.Adam(discriminator.parameters(), lr=lr)
generator_criterion = GeneratorLoss().to(device)
g_losses = []
d_losses = []
EPOCHS = 1000
if 'models' not in os.listdir():
os.mkdir('models')
save_path = '../models/'
# <----- TRAINING LOOP ----->
for epoch in range(1, EPOCHS):
generator.train()
discriminator.train()
print(f'EPOCH [{epoch}/{EPOCHS}]')
sum_d_loss = 0
sum_g_loss = 0
gc.collect()
T.cuda.empty_cache()
start = datetime.datetime.now()
for idx, (item, target) in enumerate(loader):
item = item[0].to(device)
target = target[0].to(device)
fake_image = generator(item)
discriminator.zero_grad()
real_out = discriminator(target).mean()
fake_out = discriminator(fake_image).mean()
d_loss = 1 - real_out + fake_out
d_loss.backward(retain_graph=True)
generator.zero_grad()
g_loss = generator_criterion(fake_out, fake_image, target)
g_loss.backward()
fake_img = generator(item)
fake_out = discriminator(fake_img).mean()
if idx % 100 == 0:
print(
f'Batch {idx}/{loader.__len__()} \nLoss (Generator) {g_loss.detach().cpu()}\nLoss (Discriminator) {d_loss.detach().cpu()}'
)
pred = fake_img[0].detach().cpu()
save(generator, discriminator, save_path)
show_progress([item.detach().cpu()[0], pred, target.detach().cpu()[0]], save=True, show=False)
gen_optimizer.step()
sum_d_loss += d_loss.detach().cpu()
sum_g_loss += g_loss.detach().cpu()
print(f'Time per epoch = {start - datetime.datetime.now()}')
g_losses.append(sum_g_loss / loader.__len__())
d_losses.append(sum_d_loss / loader.__len__())
print(f'D_loss {sum_d_loss}')
print(f'G_loss {sum_g_loss}') | 31.318966 | 138 | 0.619048 |
18dd7f23d5115fd8f4284ee064ed94347d9523f8 | 497 | py | Python | utils/Formatting.py | levindoneto/lmGen | ffe2150ebff577135efa3d65a845dd3b806a94ed | [
"MIT"
] | 5 | 2018-11-17T17:16:24.000Z | 2019-10-17T15:16:37.000Z | utils/Formatting.py | levindoneto/lanGen | ffe2150ebff577135efa3d65a845dd3b806a94ed | [
"MIT"
] | 6 | 2018-02-06T23:05:29.000Z | 2019-10-14T02:23:38.000Z | utils/Formatting.py | levindoneto/lmGen | ffe2150ebff577135efa3d65a845dd3b806a94ed | [
"MIT"
] | 4 | 2018-10-29T06:37:58.000Z | 2019-10-06T13:51:18.000Z | import re
''' Function for Formatting n-grams.
@Parameters: Tuple: n-gram to be formatted.
@Return: String: formatted gram.
'''
''' Function for Formatting sentences.
@Parameters: Sentence: unformatted sentence.
@Return: String: formatted sentence.
'''
| 26.157895 | 48 | 0.661972 |
18de55269df5672d53cc5989addf4883d366d066 | 1,735 | py | Python | mkt/users/tasks.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/users/tasks.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | mkt/users/tasks.py | ngokevin/zamboni | a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69 | [
"BSD-3-Clause"
] | null | null | null | from datetime import timedelta
import commonware.log
from celeryutils import task
from django.utils.encoding import force_text
from tower import ugettext_lazy as _
from mkt.account.utils import fxa_preverify_url
from mkt.site.mail import send_html_mail_jinja
from mkt.users.models import UserProfile
fxa_email_subjects = {
'customers-before': _('Firefox Accounts is coming'),
'customers-during': _('Activate your Firefox Account'),
'customers-after': _('Activate your Firefox Account'),
'developers-before': _('Firefox Accounts is coming'),
'developers-during': _('Activate your Firefox Account'),
'developers-after': _('Activate your Firefox Account')
}
fxa_email_types = fxa_email_subjects.keys()
log = commonware.log.getLogger('z.users')
| 32.12963 | 72 | 0.673199 |
18df2c4ff7c83fc2ff4c4df2ad5efb199366fdfd | 82 | wsgi | Python | jpmorgan.wsgi | mrukhlov/jpmorgan | ef8f49054772c3f07161f4eaf7c119019ce600e2 | [
"Apache-2.0"
] | null | null | null | jpmorgan.wsgi | mrukhlov/jpmorgan | ef8f49054772c3f07161f4eaf7c119019ce600e2 | [
"Apache-2.0"
] | null | null | null | jpmorgan.wsgi | mrukhlov/jpmorgan | ef8f49054772c3f07161f4eaf7c119019ce600e2 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.insert(0, '/srv/jpmorgan')
from app import app as application | 20.5 | 35 | 0.768293 |
18e485c0872cf9f87d1144effd64d6706192e11d | 449 | py | Python | examples/plot_voronoi.py | smsaladi/msmexplorer | 7880545c239c8f33ababdd111f58fd553b8bbdde | [
"MIT"
] | 6 | 2018-03-02T21:02:32.000Z | 2020-05-26T08:23:24.000Z | examples/plot_voronoi.py | smsaladi/msmexplorer | 7880545c239c8f33ababdd111f58fd553b8bbdde | [
"MIT"
] | 9 | 2018-03-02T21:19:26.000Z | 2021-07-26T13:54:30.000Z | examples/plot_voronoi.py | smsaladi/msmexplorer | 7880545c239c8f33ababdd111f58fd553b8bbdde | [
"MIT"
] | 5 | 2018-02-07T18:42:23.000Z | 2021-04-29T07:01:50.000Z | """
Voronoi Plot
============
"""
import numpy as np
from sklearn.cluster import KMeans
import msmexplorer as msme
# Create a random dataset across several variables
rs = np.random.RandomState(42)
n, p = 1000, 2
d = rs.normal(0, 2, (n, p))
d += np.log(np.arange(1, p + 1)) * -5 + 10
# Cluster data using KMeans
kmeans = KMeans(random_state=rs)
kmeans.fit(d)
# Plot Voronoi Diagram
msme.plot_voronoi(kmeans, color_palette=msme.palettes.msme_rgb)
| 20.409091 | 63 | 0.701559 |
18e6697372af7e5090bad7d69e9278ea7660cfcd | 40,586 | py | Python | algo_sherbend.py | ymoisan/GeoSim | 84f1482c885d7d3b1e07b92dee9580e4bcacf9cb | [
"MIT"
] | null | null | null | algo_sherbend.py | ymoisan/GeoSim | 84f1482c885d7d3b1e07b92dee9580e4bcacf9cb | [
"MIT"
] | null | null | null | algo_sherbend.py | ymoisan/GeoSim | 84f1482c885d7d3b1e07b92dee9580e4bcacf9cb | [
"MIT"
] | null | null | null | """This algorithm implements the Wang Generalization algotithm with constraint checking
This algorithm simplifies lines. It detects for each line the bends. It analyze the bend and
remove the bends that are below a certain diameter. The point and lines that do not need
to be simplified are still used to enforce topology integrity between those feature that need to be simplified
Limits and constraints
Always works better when the line to process meet the OGC simple line.
"""
import math, sys
from shapely.geometry import Point, LineString, LinearRing, Polygon
from shapely.prepared import prep
from shapely import affinity
from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException
# Internal constant ===> Should be modify with care...
_AREA_CMP_INDEX = .75 # Compactness index factor applied to the adjusted area
#Internal key word constants
_BURNED = "Burned"
_DIAMETER = "diameter"
_SIMPLIFIED = 'Simplified'
_NOT_SIMPLIFIED = 'NotSimplified'
_UNSIMPLIFIABLE = 'Unsimplifiable'
def _rotate_start_bend(self):
"""Rotate a closed line string so the start of the line is also the start of a clockwise bend
To be done on closed line only
Parameters
----------
None
Returns
-------
None
"""
rotate = None
max_v = len(self.vertex_orientation)
for i in range(max_v):
j = (i+1) % max_v
if self.vertex_orientation[i] == GenUtil.CLOCKWISE and \
self.vertex_orientation[j] == GenUtil.ANTI_CLOCKWISE:
rotate = i
break
# Rotate the frist last vertex to the position of the biggest bend
if rotate is None:
# All the bend are clockwise. Nothing to do
pass
elif rotate == 0:
# The line string does not to be rotated
pass
else:
lst_coord = self.coords[rotate:] + self.coords[1:rotate+1]
self.coords = lst_coord # Update the LineString coordinate
def _extract_coords(self, i,j):
"""Extract the coordinate between index [i,j]
If j is lower than i act like a circular array and avoid duplication of first/last vertice
Parameters
----------
i,j : int
Index used to extract a sub list
Returns
-------
List
list of (x,y) coordinates
"""
if i <= j:
lst_coords = self.coords[i:j+1]
else:
lst_coords = self.coords[i:] + self.coords[0:j+1]
return lst_coords
def _change_inflexion(self, i):
"""Flag if there is an inflexion between at the specified vertices.
There is inflexion when a change of orientation occurs from clock wise to anti clocwise or vice cersa
Parameters
----------
i : int
Index of for vertex orientation
Returns
-------
bool
Flag indicating if an inflexion occurs or not
"""
max_v = len(self.vertex_orientation)
if (self.vertex_orientation[i] == GenUtil.ANTI_CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.CLOCKWISE) or \
(self.vertex_orientation[i] == GenUtil.CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.ANTI_CLOCKWISE):
inflexion = True
else:
inflexion = False
return inflexion
def _add_bends(self, inflexions):
"""Add Bend to the line from the inflexion list
Parameters
----------
inflexions : List
List of the inflexions in the list
Returns
-------
None
"""
for k in range(len(inflexions) - 1):
i = inflexions[k][0]
j = inflexions[k + 1][1]
self.sb_bends.append(Bend(i, j, self._extract_coords(i, j)))
def _create_bends(self):
"""Create the bends in the line
Parameters
----------
None
Returns
-------
None
"""
# Delete any actual bend information
self.sb_bends = []
# Remove the colinear vertice in order to facilitate bend detection (moreover colinaer vertice are useless)
self._remove_colinear_vertex()
inflexions = []
max = len(self.vertex_orientation)
if self.is_closed:
# Rotate the line to position at the start of a bend
self._rotate_start_bend()
# The vertex_oriention list is considered a circular list
for i in range(max):
j = (i + 1) % max
if self._change_inflexion(i):
inflexions.append((i, j))
# Create the bend from the inflexion point
if inflexions:
if len(inflexions) >= 3:
# If there is more than 23 inflexions we add another circular inflexion
i = inflexions[-1][0]
j = inflexions[0][1]
inflexions.append((i, j))
# Transform the inflexion into bends
self._add_bends(inflexions)
else:
# The vertex_oriention list is not considered a circular list
if max == 3:
# Special case there is only one bend to simplify
j = len(self.coords)-1
self.sb_bends.append(Bend(0, j, self._extract_coords(0, j)))
elif max >= 4:
for i in range(1, max-2):
if self._change_inflexion(i):
inflexions.append((i, i+1))
# Add inflexion to add the first and last bend
inflexions = [(0, None)] + inflexions + [(None, max-1)]
# Transform inflexion into bends
self._add_bends(inflexions)
return
def _sort_bends(self):
"""Sort the bends by order of ascending min_adj_are
Parameters
----------
None
Returns
-------
None
"""
lst_bends = []
for i, bend in enumerate(self.sb_bends):
if bend.adj_area <= self.sb_min_adj_area:
# Only select the bend below the minimum adjusted area
lst_bends.append((i, bend.adj_area))
# Sort based of the adj_area from smallest to biggest
lst_bends.sort(key=lambda tup: tup[1]) # sorts in place
return lst_bends
def _offset_bend_ij(self, i, j):
""""Offset the value of the different bend i,j because one or more vertice of the line were removed
Handle circular list when j < i
Parameters
----------
i,j : int
Index in the line where the vertice were removed
Returns
-------
None
"""
if i < j:
offset = j-i-1
else:
offset = j
for bend in self.sb_bends:
if bend.status == _NOT_SIMPLIFIED:
if bend.i < bend.j:
if bend.i >= j:
bend.i -= offset
bend.j -= offset
else:
if bend.i >= j:
bend.i -= offset
def _make_line_ccw(self):
"""Make sure the line is counter clockwise.
Only apply to closed line
Parameters
----------
None
Returns
-------
None
"""
if self.sb_is_closed:
tmp_ring = LinearRing(self.coords)
if not tmp_ring.is_ccw:
# The linear ring is clockwise. Reverse the coordinates to make it ccw
self.coords = list(reversed(self.coords))
def simplify(self, diameter, s_constraints=None):
"""Simplify the line by reducing each bend
Parameters
----------
None
Returns
-------
None
"""
nbr_bend_simplified = 0
# Make sure the line is counter clockwise
#
self._make_line_ccw()
# Create the bend in the line
self._create_bends()
max_bends = len(self.sb_bends)
sorted_bends = self._sort_bends()
if len(sorted_bends) == 0:
# No more bend to simplify. Line is at its simplest form
self.sb_is_simplest = True
elif len(sorted_bends) >= 2:
# Make the biggest bend (last one) unsimplifiable
ind_last = sorted_bends[-1][0]
self.sb_bends[ind_last].status = _UNSIMPLIFIABLE
# Loop over each bend to simplify them
for sorted_bend in sorted_bends:
ind = sorted_bend[0]
if self.sb_bends[ind].status == _NOT_SIMPLIFIED:
ind_before = None
ind_after = None
if self.sb_is_closed:
if max_bends >= 2:
ind_before = (ind-1) % max_bends
ind_after = (ind+1) % max_bends
else:
if ind > 0:
ind_before = ind-1
if ind < max_bends-1:
ind_after = ind+1
# Validate the spatial constraints
i = self.sb_bends[ind].i
j = self.sb_bends[ind].j
if i < j:
lst_coords = self.coords[0:i+1] + self.coords[j:]
else:
# Manage circular list
lst_coords = self.coords[j:i+1] + self.coords[j:j+1]
if self.is_closed:
if len(lst_coords) >= 4:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# A closed line cannot have less than 4 vertices
in_conflict = True
else:
if len(lst_coords) >= 2:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# An open line cannot have less than 3 vertices
in_conflict = True
if not in_conflict:
# Update the coordinates
self.coords = lst_coords
# Bend before and after must no be simplified in this pass maybe a next pass
if ind_before is not None:
self.sb_bends[ind_before].status = _UNSIMPLIFIABLE
if ind_after is not None:
self.sb_bends[ind_after].status = _UNSIMPLIFIABLE
self.sb_bends[ind].status = _SIMPLIFIED
nbr_bend_simplified += 1
self._offset_bend_ij(i, j)
return nbr_bend_simplified
class PointSb(PointSc):
"""
A class to represent a Point used by the SherBend algorithm
Attributes
----------
coords : tuple
A tuple (x,y) representing one coordinate
properties : dict
The dictionary of the properties (attributes of the features)
fast_access : Boolean
A flag to indicate if we keep a copy od the coordinate in order to accelrate the access becase
the access to the C function is slow
"""
class AlgoSherbend(object):
"""Main class for the Sherbend algorithm
Attributes:
- None
"""
def __init__(self, command, geo_content):
"""Constructor of the class
Parameters
----------
command : DataClass
Contains all the commands for the Sherbend line simplification algorithm
geo_content: DataClass
Contains the geo information needed for the the sherbend line reduction algorithm
Returns
-------
None
"""
self.command = command
self.geo_content = geo_content
self.nbr_bend_simplified = 0
def calculate_min_adj_area(self, diameter):
"""Calculates the minimum adjusted area of a band
Parameters
----------
diameter : float
diameter used to calculate the minimum adjusted area
Returns
-------
float
Minimum adjusted area
"""
return (_AREA_CMP_INDEX * math.pi * (diameter/2.0)**2.0)
def _calculate_adj_area(self, coords):
"""Calculates the adjusted area of a polygon
Parameters
----------
coords : list
List of x,y coordinates defining a polygon
Returns
-------
float
Minimum adjusted area
"""
pol = Polygon(coords)
cmp_index = GenUtil.calculate_compactness_index(pol.area, pol.length)
adj_area = GenUtil.calculate_adjusted_area(pol.area, cmp_index)
return adj_area
def load_features(self, geo_content, command):
"""Load the points, line strings and polygons in the spatial container.
The Polygons are deconstructued into a list LineString with clockwise orientation and extra added information
needed for the reconstruction of the original Polygon
Parameters
----------
geo_content : DataClass
Contains all the input#output geo spatial information
command :ParserArgument
Contains the parameters of the command line interface
Returns
-------
None
"""
features = [] # List of features to pass to the spatial container
# Load all the features in the spatial container
for feature in geo_content.in_features:
diameter = command.dlayer_dict[feature.sb_layer_name]
min_adj_area = self.calculate_min_adj_area(diameter)
if feature.geom_type == GenUtil.POINT:
out_feature = PointSb(feature.coords, feature.sb_layer_name, feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.LINE_STRING:
out_feature = out_feature = LineStringSb(feature.coords, GenUtil.LINE_STRING, min_adj_area, feature.sb_layer_name,
feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.POLYGON:
adj_area = self._calculate_adj_area(feature.exterior.coords)
# Only keep the polygon over the minimum adjusted area
if not command.exclude_polygon or adj_area > min_adj_area:
# Deconstruct the Polygon into a list of LineString with supplementary information
# needed to reconstruct the original Polygon
ext_feature = LineStringSb(feature.exterior.coords, GenUtil.POLYGON_EXTERIOR, min_adj_area,
feature.sb_layer_name, feature.sb_properties)
interiors = feature.interiors
int_features = []
# Extract the interiors as LineString
for interior in interiors:
adj_area = self._calculate_adj_area(interior.coords)
# Only keep the interior (hole) over the minimal adjusted area
if not command.exclude_hole or adj_area > min_adj_area:
interior = LineStringSb(interior.coords, GenUtil.POLYGON_INTERIOR, min_adj_area, None, None)
int_features.append(interior)
else:
geo_content.nbr_del_holes += len(feature.interiors)
#Add interior features needed for Polygon reconstruction
ext_feature.sb_interiors = int_features
# Add the exterior and the interior independently
features.append(ext_feature) # Add the exterior
features += int_features # Add the interiors
else:
# Do not add the feature (exterior and interiors ) in the spatial container
# Update some stats
geo_content.nbr_del_polygons += 1
geo_content.nbr_del_holes += len(feature.interiors)
else:
raise GeoSimException ("Invalid geometry type: {}".format(feature.geometry))
# Create the spatial container that will receive all the spatial features
self.s_container = SpatialContainer()
self.s_container.add_features(features) # Load all the features
return
def _manage_lines_simplification (self, s_constraints):
"""Main routine to simplify the lines
For each line to simplify
For each valid bend to simplify
check the consraints if the constraint are violated check alternative bends (only if the
number of bend to simplify is one.
One of the costly operation specially for very long line string (like contour) is to rewrite the
coordinates into the Shapely structure. This is why we updtade the shapely structure at the end
when the last bend of the line is processed
Parameters
----------
s_constraints : SpatialContraints
Spatal constraints to validate
Returns
-------
int
Total number of bend simplified
"""
iter_nbr = 0
total_nbr_bend_simplified = 0
# Iterate until all the line are simplified or there are no more line have to be simplified
while (True):
iter_nbr_bend_simplified = 0
print('Iteration # {}'.format(iter_nbr))
# Build line iterator
lines = (feature for feature in self.s_container.get_features()
if(not feature.sb_is_simplest and feature.sb_geom_type==GenUtil.LINE_STRING ))
for line in lines:
nbr_bend_simplified = line.simplify(self.command.diameter, s_constraints)
iter_nbr_bend_simplified += nbr_bend_simplified
total_nbr_bend_simplified += nbr_bend_simplified
print('Number of bend simplified {}'.format(iter_nbr_bend_simplified))
print('----------')
iter_nbr += 1
if iter_nbr_bend_simplified == 0:
break
print('Total number of bend simplified: {}'.format(total_nbr_bend_simplified))
print('Total number of simplicity error: {}'.format(s_constraints.nbr_err_simplicity))
print('Total number of crossing error: {}'.format(s_constraints.nbr_err_crossing))
print('Total number of sidedness error: {}'.format(s_constraints.nbr_err_sidedness))
return total_nbr_bend_simplified
def process(self):
"""Main routine for the Sherbend algorithm
The algorithm will simplify the lines using the Sherbend algorithm.
It will iterate over the lines until there are no more bends to simplify.
Parameters
----------
None
Returns
-------
geo_content : DataClass
Contains the output information
"""
# Load the features into the spatial container
self.load_features(self.geo_content, self.command)
s_constraints = SpatialConstraints(s_container=self.s_container)
self._manage_lines_simplification(s_constraints)
for feature in self.s_container.get_features():
if feature.sb_geom_type == GenUtil.POINT:
self.geo_content.out_features.append(feature)
elif feature.sb_geom_type == GenUtil.LINE_STRING:
if feature.sb_original_type == GenUtil.LINE_STRING:
self.geo_content.out_features.append(feature)
else:
if feature.sb_original_type == GenUtil.POLYGON_EXTERIOR:
# The LineString was an exterior Polygon so reconstruct the originalPolygon
interiors = [list(interior.coords) for interior in feature.sb_interiors]
polygon = Polygon(feature.coords, interiors)
polygon.sb_layer_name = feature.sb_layer_name
polygon.sb_properties = feature.sb_properties
self.geo_content.out_features.append(polygon)
else:
pass # Nothing to do with the holes here
return
| 33.486799 | 130 | 0.567708 |
18e68b384996aec6ddd93fd4e05675ce4c043545 | 393 | py | Python | src/Server/Py_Easy_TCP_Server.py | Moguf/Py_Network | 13e351e9955464a5d65bd3dee3642438cfe9ed92 | [
"MIT"
] | null | null | null | src/Server/Py_Easy_TCP_Server.py | Moguf/Py_Network | 13e351e9955464a5d65bd3dee3642438cfe9ed92 | [
"MIT"
] | null | null | null | src/Server/Py_Easy_TCP_Server.py | Moguf/Py_Network | 13e351e9955464a5d65bd3dee3642438cfe9ed92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket
port = 12345
MAX_SIZE = 65535
target_address = '127.0.0.1'
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((target_address,port))
s.listen(2)
conn, addr = s.accept()
# conn: socket is the client socket.
print(addr, "Now Connected")
text = "Thank you for connecting from TCP Server."
data = text.encode('ascii')
conn.send(data)
conn.close()
| 19.65 | 52 | 0.725191 |
18e718827e2560736ccb159689ee15cc3157f2a5 | 4,084 | py | Python | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | 3 | 2021-01-17T14:05:27.000Z | 2022-03-03T06:25:39.000Z | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | null | null | null | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | 1 | 2021-01-17T14:05:29.000Z | 2021-01-17T14:05:29.000Z | from empyric.adapters import *
from empyric.collection.instrument import *
| 22.31694 | 117 | 0.588149 |
18e80ab1f054cab4110f82ef2bcc62a0377ee9cd | 2,468 | py | Python | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | # APACHE LICENSE
# Copyright 2020 Stuart Paterson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# External Packages
import os
import discord
from dotenv import load_dotenv
# Local Files
import utils
# Create the bot
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
def get_channel_by_name(client, guild, name):
"""Returns a channel by name from a specific guild"""
for server in client.guilds:
if server == guild:
for channel in server.text_channels:
if channel.name == name:
return channel
# Run the bot
client.run(TOKEN)
| 29.380952 | 82 | 0.66329 |
18e81c7e28ba4d13c0ba77aba68314299f3e766e | 4,945 | py | Python | src/main.py | LucidtechAI/auth_example | a370833a16f8345e1e595f1ade3e830f8371157c | [
"Apache-2.0"
] | null | null | null | src/main.py | LucidtechAI/auth_example | a370833a16f8345e1e595f1ade3e830f8371157c | [
"Apache-2.0"
] | null | null | null | src/main.py | LucidtechAI/auth_example | a370833a16f8345e1e595f1ade3e830f8371157c | [
"Apache-2.0"
] | 1 | 2019-03-08T09:52:05.000Z | 2019-03-08T09:52:05.000Z | import argparse
import json
import requests
import pathlib
from urllib.parse import urlparse
from auth import AWSSignatureV4
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('api_endpoint', help='HTTPS endpoint for REST API')
parser.add_argument('api_key')
parser.add_argument('access_key_id')
parser.add_argument('secret_access_key')
parser.add_argument('--with_s3_kms', action='store_true')
subparsers = parser.add_subparsers()
invoice_prediction_parser = subparsers.add_parser('invoice_prediction')
invoice_prediction_parser.add_argument('document_path', help='Path to document to make predictions on')
invoice_prediction_parser.add_argument('content_type', choices={'image/jpeg', 'application/pdf'},
help='Content-Type of document to make predictions on')
invoice_prediction_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
invoice_prediction_parser.set_defaults(cmd=invoice_prediction)
receipt_prediction_parser = subparsers.add_parser('receipt_prediction')
receipt_prediction_parser.add_argument('document_path', help='Path to document to make predictions on')
receipt_prediction_parser.add_argument('content_type', choices={'image/jpeg', 'application/pdf'},
help='Content-Type of document to make predictions on')
receipt_prediction_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
receipt_prediction_parser.set_defaults(cmd=receipt_prediction)
document_split_parser = subparsers.add_parser('document_split')
document_split_parser.add_argument('document_path', help='Path to document to split')
document_split_parser.add_argument('content_type', choices={'application/pdf'},
help='Content-Type of document to split')
document_split_parser.add_argument('--consent_id', default='1234',
help='Consent ID is typically a mapping from end user to a unique identifier')
document_split_parser.set_defaults(cmd=document_split)
args = parser.parse_args()
args.cmd()
| 37.180451 | 121 | 0.70455 |