hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
600de579e9f074f3a42976d366b7423013a654a6 | 5,270 | py | Python | exercise-09/programming_assignment/hopfield.py | AleRiccardi/technical-neural-network-course | bfcca623a9dc3f7f4c20e1efe39abe986cd8869e | [
"Apache-2.0"
] | null | null | null | exercise-09/programming_assignment/hopfield.py | AleRiccardi/technical-neural-network-course | bfcca623a9dc3f7f4c20e1efe39abe986cd8869e | [
"Apache-2.0"
] | null | null | null | exercise-09/programming_assignment/hopfield.py | AleRiccardi/technical-neural-network-course | bfcca623a9dc3f7f4c20e1efe39abe986cd8869e | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
letter_C = np.array([
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_C = np.array([
[1, 1, 1, 1, 1],
[0, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 1, 1, 1],
])
letter_I = np.array([
[0, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
])
noisy_I = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 1],
])
letter_T = np.array([
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
])
noisy_T = np.array([
[1, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
])
if __name__ == '__main__':
test_w_less_101()
test_w_more_100()
| 27.164948 | 99 | 0.493548 |
600fae89534379bad1faa45aa725f0ecd7646d79 | 142 | py | Python | util/infoclient/test_infoclient.py | cdla/murfi2 | 45dba5eb90e7f573f01706a50e584265f0f8ffa7 | [
"Apache-2.0"
] | 7 | 2015-02-10T17:00:49.000Z | 2021-07-27T22:09:43.000Z | util/infoclient/test_infoclient.py | cdla/murfi2 | 45dba5eb90e7f573f01706a50e584265f0f8ffa7 | [
"Apache-2.0"
] | 11 | 2015-02-22T19:15:53.000Z | 2021-08-04T17:26:18.000Z | util/infoclient/test_infoclient.py | cdla/murfi2 | 45dba5eb90e7f573f01706a50e584265f0f8ffa7 | [
"Apache-2.0"
] | 8 | 2015-07-06T22:31:51.000Z | 2019-04-22T21:22:07.000Z |
from infoclientLib import InfoClient
ic = InfoClient('localhost', 15002, 'localhost', 15003)
ic.add('roi-weightedave', 'active')
ic.start()
| 20.285714 | 55 | 0.739437 |
60108a3d3357ef01dab42a6e413205a5ad651ed5 | 13,095 | py | Python | lrtc_lib/experiment_runners/experiment_runner.py | MovestaDev/low-resource-text-classification-framework | 4380755a65b35265e84ecbf4b87e872d79e8f079 | [
"Apache-2.0"
] | 57 | 2020-11-18T15:13:06.000Z | 2022-03-28T22:33:26.000Z | lrtc_lib/experiment_runners/experiment_runner.py | MovestaDev/low-resource-text-classification-framework | 4380755a65b35265e84ecbf4b87e872d79e8f079 | [
"Apache-2.0"
] | 5 | 2021-02-23T22:11:07.000Z | 2021-12-13T00:13:48.000Z | lrtc_lib/experiment_runners/experiment_runner.py | MovestaDev/low-resource-text-classification-framework | 4380755a65b35265e84ecbf4b87e872d79e8f079 | [
"Apache-2.0"
] | 14 | 2021-02-10T08:55:27.000Z | 2022-02-23T22:37:54.000Z | # (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import abc
import logging
import time
from collections import defaultdict
from typing import List
import numpy as np
from dataclasses import dataclass
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
import lrtc_lib.data_access.data_access_factory as data_access_factory
import lrtc_lib.experiment_runners.experiments_results_handler as res_handler
from lrtc_lib.oracle_data_access import oracle_data_access_api
from lrtc_lib.active_learning.diversity_calculator import DiversityCalculator
from lrtc_lib.active_learning.knn_outlier_calculator import KnnOutlierCalculator
from lrtc_lib.active_learning.strategies import ActiveLearningStrategies
from lrtc_lib.data_access.core.data_structs import TextElement
from lrtc_lib.data_access.data_access_api import DataAccessApi
from lrtc_lib.data_access.data_access_factory import get_data_access
from lrtc_lib.orchestrator import orchestrator_api
from lrtc_lib.orchestrator.orchestrator_api import DeleteModels
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.training_set_selector.train_and_dev_set_selector_api import TrainingSetSelectionStrategy
| 52.590361 | 136 | 0.72333 |
6011674256a1e396b16faca45277694f253b2c3f | 909 | py | Python | contrast/environment/data.py | alexbjorling/acquisition-framework | 4090381344aabca05155612845ba4e4a47455dc3 | [
"MIT"
] | null | null | null | contrast/environment/data.py | alexbjorling/acquisition-framework | 4090381344aabca05155612845ba4e4a47455dc3 | [
"MIT"
] | 2 | 2018-09-19T06:49:03.000Z | 2019-06-28T10:47:37.000Z | contrast/environment/data.py | alexbjorling/acquisition-framework | 4090381344aabca05155612845ba4e4a47455dc3 | [
"MIT"
] | null | null | null | try:
from tango import DeviceProxy, DevError
except ModuleNotFoundError:
pass
| 25.25 | 79 | 0.581958 |
60121c6217810f4a6299e69b2f99282f9e977749 | 1,504 | py | Python | game_2048/views.py | fung04/csrw_game | 9673fdd311583057d5bf756dec7b99959d961d0c | [
"MIT"
] | null | null | null | game_2048/views.py | fung04/csrw_game | 9673fdd311583057d5bf756dec7b99959d961d0c | [
"MIT"
] | null | null | null | game_2048/views.py | fung04/csrw_game | 9673fdd311583057d5bf756dec7b99959d961d0c | [
"MIT"
] | null | null | null | import json
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import redirect, render
from .models import Game2048
# Create your views here.
# test_user
# 8!S#5RP!WVMACg
| 27.851852 | 84 | 0.672207 |
60125a0886f4a69344f97e125b44faf6103792e1 | 319 | py | Python | distdeepq/__init__.py | Silvicek/distributional-dqn | 41a9095393dd25b7375119b4af7d2c35ee3ec6cc | [
"MIT"
] | 131 | 2017-09-16T02:06:44.000Z | 2022-03-23T08:09:56.000Z | distdeepq/__init__.py | Silvicek/distributional-dqn | 41a9095393dd25b7375119b4af7d2c35ee3ec6cc | [
"MIT"
] | 6 | 2017-10-26T09:36:00.000Z | 2019-03-15T06:23:17.000Z | distdeepq/__init__.py | Silvicek/distributional-dqn | 41a9095393dd25b7375119b4af7d2c35ee3ec6cc | [
"MIT"
] | 29 | 2017-09-16T02:30:27.000Z | 2020-04-12T03:12:39.000Z | from distdeepq import models # noqa
from distdeepq.build_graph import build_act, build_train # noqa
from distdeepq.simple import learn, load, make_session # noqa
from distdeepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa
from distdeepq.static import *
from distdeepq.plots import PlotMachine
| 39.875 | 81 | 0.827586 |
6012d662e5b654522d75f6dba733bb788998a6c0 | 812 | py | Python | python/10.Authentication-&-API-Keys.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
] | null | null | null | python/10.Authentication-&-API-Keys.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
] | null | null | null | python/10.Authentication-&-API-Keys.py | 17nikhil/codecademy | 58fbd652691c9df8139544965ebb0e9748142538 | [
"Apache-2.0"
] | 1 | 2018-10-03T14:36:31.000Z | 2018-10-03T14:36:31.000Z | # Authentication & API Keys
# Many APIs require an API key. Just as a real-world key allows you to access something, an API key grants you access to a particular API. Moreover, an API key identifies you to the API, which helps the API provider keep track of how their service is used and prevent unauthorized or malicious activity.
#
# Some APIs require authentication using a protocol called OAuth. We won't get into the details, but if you've ever been redirected to a page asking for permission to link an application with your account, you've probably used OAuth.
#
# API keys are often long alphanumeric strings. We've made one up in the editor to the right! (It won't actually work on anything, but when you receive your own API keys in future projects, they'll look a lot like this.)
api_key = "string"
| 81.2 | 303 | 0.777094 |
601367658aacd910181efee0a2e8d64036a1544b | 111 | py | Python | plucker/__init__.py | takkaria/json-plucker | 6407dcc9a21d99d8f138128e9ee80c901a08c2e1 | [
"MIT"
] | null | null | null | plucker/__init__.py | takkaria/json-plucker | 6407dcc9a21d99d8f138128e9ee80c901a08c2e1 | [
"MIT"
] | 1 | 2021-03-09T20:57:15.000Z | 2021-03-09T20:57:15.000Z | plucker/__init__.py | takkaria/plucker-python | 6407dcc9a21d99d8f138128e9ee80c901a08c2e1 | [
"MIT"
] | null | null | null | from .plucker import pluck, Path
from .exceptions import PluckError
__all__ = ["pluck", "Path", "PluckError"]
| 22.2 | 41 | 0.738739 |
6013883d7068c2a00e5b4b40942f112984e3413c | 7,417 | py | Python | arviz/plots/pairplot.py | gimbo/arviz | c1df1847aa5170ad2810ae3d705d576d2643e3ec | [
"Apache-2.0"
] | null | null | null | arviz/plots/pairplot.py | gimbo/arviz | c1df1847aa5170ad2810ae3d705d576d2643e3ec | [
"Apache-2.0"
] | null | null | null | arviz/plots/pairplot.py | gimbo/arviz | c1df1847aa5170ad2810ae3d705d576d2643e3ec | [
"Apache-2.0"
] | null | null | null | """Plot a scatter or hexbin of sampled parameters."""
import warnings
import numpy as np
from ..data import convert_to_dataset, convert_to_inference_data
from .plot_utils import xarray_to_ndarray, get_coords, get_plotting_function
from ..utils import _var_names
def plot_pair(
data,
group="posterior",
var_names=None,
coords=None,
figsize=None,
textsize=None,
kind="scatter",
gridsize="auto",
contour=True,
fill_last=True,
divergences=False,
colorbar=False,
ax=None,
divergences_kwargs=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot a scatter or hexbin matrix of the sampled parameters.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
group : str, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
var_names : list of variable names
Variables to be plotted, if None all variable are plotted
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : figure size tuple
If None, size is (8 + numvars, 8 + numvars)
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
kind : str
Type of plot to display (scatter, kde or hexbin)
gridsize : int or (int, int), optional
Only works for kind=hexbin.
The number of hexagons in the x-direction. The corresponding number of hexagons in the
y-direction is chosen such that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
in the x-direction and the y-direction.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
divergences : Boolean
If True divergences will be plotted in a different color, only if group is either 'prior'
or 'posterior'.
colorbar : bool
If True a colorbar will be included as part of the plot (Defaults to False).
Only works when kind=hexbin
ax: axes, optional
Matplotlib axes or bokeh figures.
divergences_kwargs : dicts, optional
Additional keywords passed to ax.scatter for divergences
plot_kwargs : dicts, optional
Additional keywords passed to ax.plot, az.plot_kde or ax.hexbin
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
KDE Pair Plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> centered = az.load_arviz_data('centered_eight')
>>> coords = {'school': ['Choate', 'Deerfield']}
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu', 'tau'],
>>> kind='kde',
>>> coords=coords,
>>> divergences=True,
>>> textsize=18)
Hexbin pair plot
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
>>> var_names=['theta', 'mu'],
>>> coords=coords,
>>> textsize=18,
>>> kind='hexbin')
Pair plot showing divergences
.. plot::
:context: close-figs
>>> az.plot_pair(centered,
... var_names=['theta', 'mu', 'tau'],
... coords=coords,
... divergences=True,
... textsize=18)
"""
valid_kinds = ["scatter", "kde", "hexbin"]
if kind not in valid_kinds:
raise ValueError(
("Plot type {} not recognized." "Plot type must be in {}").format(kind, valid_kinds)
)
if coords is None:
coords = {}
if plot_kwargs is None:
plot_kwargs = {}
if kind == "scatter":
plot_kwargs.setdefault("marker", ".")
plot_kwargs.setdefault("lw", 0)
if divergences_kwargs is None:
divergences_kwargs = {}
divergences_kwargs.setdefault("marker", "o")
divergences_kwargs.setdefault("markeredgecolor", "k")
divergences_kwargs.setdefault("color", "C1")
divergences_kwargs.setdefault("lw", 0)
# Get posterior draws and combine chains
data = convert_to_inference_data(data)
grouped_data = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, grouped_data)
flat_var_names, infdata_group = xarray_to_ndarray(
get_coords(grouped_data, coords), var_names=var_names, combined=True
)
divergent_data = None
diverging_mask = None
# Assigning divergence group based on group param
if group == "posterior":
divergent_group = "sample_stats"
elif group == "prior":
divergent_group = "sample_stats_prior"
else:
divergences = False
# Get diverging draws and combine chains
if divergences:
if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
divergent_data = convert_to_dataset(data, group=divergent_group)
_, diverging_mask = xarray_to_ndarray(
divergent_data, var_names=("diverging",), combined=True
)
diverging_mask = np.squeeze(diverging_mask)
else:
divergences = False
warnings.warn(
"Divergences data not found, plotting without divergences. "
"Make sure the sample method provides divergences data and "
"that it is present in the `diverging` field of `sample_stats` "
"or `sample_stats_prior` or set divergences=False",
SyntaxWarning,
)
if gridsize == "auto":
gridsize = int(len(infdata_group[0]) ** 0.35)
numvars = len(flat_var_names)
if numvars < 2:
raise Exception("Number of variables to be plotted must be 2 or greater.")
pairplot_kwargs = dict(
ax=ax,
infdata_group=infdata_group,
numvars=numvars,
figsize=figsize,
textsize=textsize,
kind=kind,
plot_kwargs=plot_kwargs,
contour=contour,
fill_last=fill_last,
gridsize=gridsize,
colorbar=colorbar,
divergences=divergences,
diverging_mask=diverging_mask,
divergences_kwargs=divergences_kwargs,
flat_var_names=flat_var_names,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
pairplot_kwargs.pop("gridsize", None)
pairplot_kwargs.pop("colorbar", None)
pairplot_kwargs.pop("divergences_kwargs", None)
pairplot_kwargs.pop("hexbin_values", None)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_pair", "pairplot", backend)
ax = plot(**pairplot_kwargs)
return ax
| 33.40991 | 99 | 0.62559 |
60140da7c5e11ee07c450ac06ede300441a124ba | 542 | py | Python | cuestionario/formularios.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | null | null | null | cuestionario/formularios.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | null | null | null | cuestionario/formularios.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | 1 | 2021-09-05T23:29:56.000Z | 2021-09-05T23:29:56.000Z | from django.forms import ModelForm
from .models import Cuestionario, Categoria
from preguntas.models import Pregunta, Respuesta
| 20.846154 | 48 | 0.673432 |
6015330e90658ef9cb434f3116ddc5c99e3f87e7 | 6,403 | py | Python | vitcloud/views.py | biocross/VITCloud | 9656bd489c6d05717bf529d0661e07da0cd2551a | [
"MIT"
] | 2 | 2016-10-09T09:16:39.000Z | 2017-12-30T10:04:24.000Z | vitcloud/views.py | biocross/VITCloud | 9656bd489c6d05717bf529d0661e07da0cd2551a | [
"MIT"
] | 1 | 2015-03-28T12:10:24.000Z | 2015-03-28T19:19:00.000Z | vitcloud/views.py | biocross/VITCloud | 9656bd489c6d05717bf529d0661e07da0cd2551a | [
"MIT"
] | null | null | null | from django.views.generic import View
from django.http import HttpResponse
import os, json, datetime
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from vitcloud.models import File
from django.views.decorators.csrf import csrf_exempt
from listingapikeys import findResult
import sys # sys.setdefaultencoding is cancelled by site.py
reload(sys) # to re-enable sys.setdefaultencoding()
sys.setdefaultencoding('utf-8')
#Custom Functions:
#**Not for Production** Views
#Views:
| 37.444444 | 215 | 0.629705 |
6015c9596e351a0acc5020ff9d107cce20445519 | 406 | py | Python | blurple/ui/base.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 4 | 2021-06-30T19:58:59.000Z | 2021-07-27T13:43:49.000Z | blurple/ui/base.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 2 | 2021-07-10T16:08:25.000Z | 2021-07-12T02:15:40.000Z | blurple/ui/base.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 3 | 2021-07-08T03:00:40.000Z | 2021-09-08T19:57:50.000Z | from abc import ABC
import discord
| 29 | 119 | 0.669951 |
601651a2b4d6d062db448e75989e40e985eb13df | 1,661 | py | Python | migrations/versions/e86dd3bc539c_change_admin_to_boolean.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | null | null | null | migrations/versions/e86dd3bc539c_change_admin_to_boolean.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | null | null | null | migrations/versions/e86dd3bc539c_change_admin_to_boolean.py | jonzxz/project-piscator | 588c8b1ac9355f9a82ac449fdbeaa1ef7eb441ef | [
"MIT"
] | 1 | 2021-02-18T03:08:21.000Z | 2021-02-18T03:08:21.000Z | """change admin to boolean
Revision ID: e86dd3bc539c
Revises: 6f63ef516cdc
Create Date: 2020-11-11 22:32:00.707936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e86dd3bc539c'
down_revision = '6f63ef516cdc'
branch_labels = None
depends_on = None
| 38.627907 | 102 | 0.712222 |
601677ed1a2084da8bff806075ddd7b027330006 | 388 | py | Python | school/migrations/0010_alter_sala_unique_together.py | adrianomqsmts/django-escola | a69541bceb3f30bdd2e9f0f41aa9c2da6081a1d1 | [
"MIT"
] | null | null | null | school/migrations/0010_alter_sala_unique_together.py | adrianomqsmts/django-escola | a69541bceb3f30bdd2e9f0f41aa9c2da6081a1d1 | [
"MIT"
] | null | null | null | school/migrations/0010_alter_sala_unique_together.py | adrianomqsmts/django-escola | a69541bceb3f30bdd2e9f0f41aa9c2da6081a1d1 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-16 03:09
from django.db import migrations
| 21.555556 | 83 | 0.634021 |
6017b0c984f5c9581d7b67c9fd000d7881af64dd | 637 | py | Python | code_trunk/trainer/abc.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | code_trunk/trainer/abc.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | code_trunk/trainer/abc.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | """
Abstract training class
"""
from abc import ABC as AbstractBaseClass
from abc import abstractmethod
| 19.30303 | 71 | 0.616954 |
601874835949dbb0ebb74e3019f720313e38011d | 2,763 | py | Python | quadpy/triangle/cools_haegemans.py | melvyniandrag/quadpy | ae28fc17351be8e76909033f03d71776c7ef8280 | [
"MIT"
] | 1 | 2019-01-02T19:04:42.000Z | 2019-01-02T19:04:42.000Z | quadpy/triangle/cools_haegemans.py | melvyniandrag/quadpy | ae28fc17351be8e76909033f03d71776c7ef8280 | [
"MIT"
] | null | null | null | quadpy/triangle/cools_haegemans.py | melvyniandrag/quadpy | ae28fc17351be8e76909033f03d71776c7ef8280 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
from mpmath import mp
from .helpers import untangle2
| 38.375 | 108 | 0.534202 |
601c017654bfba5b4012ac4932fefa02ad294c7b | 912 | py | Python | account/admin.py | RichardLeeH/invoce_sys | 42a6f5750f45b25e0d7282114ccb7f9f72ee1761 | [
"Apache-2.0"
] | null | null | null | account/admin.py | RichardLeeH/invoce_sys | 42a6f5750f45b25e0d7282114ccb7f9f72ee1761 | [
"Apache-2.0"
] | null | null | null | account/admin.py | RichardLeeH/invoce_sys | 42a6f5750f45b25e0d7282114ccb7f9f72ee1761 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from account.models import Profile
admin.site.site_header = 'invoce'
admin.site.unregister(Token)
admin.site.register(Token, TokenAdmin)
admin.site.unregister(User)
admin.site.register(User, UserCustomAdmin)
| 24 | 71 | 0.718202 |
601c3263a4fb21497920c0fe4c9459fa3c4066b9 | 844 | py | Python | oops/#016exceptions.py | krishankansal/PythonPrograms | 6d4d989068195b8c8dd9d71cf4f920fef1177cf2 | [
"MIT"
] | null | null | null | oops/#016exceptions.py | krishankansal/PythonPrograms | 6d4d989068195b8c8dd9d71cf4f920fef1177cf2 | [
"MIT"
] | null | null | null | oops/#016exceptions.py | krishankansal/PythonPrograms | 6d4d989068195b8c8dd9d71cf4f920fef1177cf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 08:40:11 2020
@author: krishan
"""
for val in (0, "hello", 50.0, 13):
print(f"Testing {val}:", funny_division3(val))
| 24.823529 | 55 | 0.609005 |
601c880be1287d7f4ecd5a8ee1ee870db121bb75 | 4,129 | py | Python | config/simclr_config.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | 5 | 2020-08-24T17:57:51.000Z | 2021-06-06T18:18:19.000Z | config/simclr_config.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | null | null | null | config/simclr_config.py | denn-s/SimCLR | e2239ac52464b1271c3b8ad1ec4eb26f3b73c7d4 | [
"MIT"
] | 1 | 2020-08-29T00:35:36.000Z | 2020-08-29T00:35:36.000Z | import os
from datetime import datetime
import torch
from dataclasses import dataclass
| 28.475862 | 113 | 0.601356 |
601cd7cd07ee2ea23d637edb23a7aada960db1af | 47,259 | py | Python | test/unit/common/middleware/s3api/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 1 | 2019-05-25T10:55:58.000Z | 2019-05-25T10:55:58.000Z | test/unit/common/middleware/s3api/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 12 | 2015-06-23T23:20:17.000Z | 2016-01-27T00:37:12.000Z | test/unit/common/middleware/s3api/test_obj.py | Priyanka-Askani/swift | 1ab691f63778008015b34ce004992844acee9968 | [
"Apache-2.0"
] | 5 | 2015-06-04T19:00:11.000Z | 2015-12-16T21:04:33.000Z | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import hashlib
import os
from os.path import join
import time
from mock import patch
from swift.common import swob
from swift.common.swob import Request
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.test_s3_acl import s3acl
from swift.common.middleware.s3api.subresource import ACL, User, encode_acl, \
Owner, Grant
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.utils import mktime, S3Timestamp
from test.unit.common.middleware.s3api.helpers import FakeSwift
def test_object_PUT_copy_headers_with_match(self):
etag = '7dfa07a8e59ddbcd1dc84d4c4f82aea1'
last_modified_since = 'Fri, 01 Apr 2014 11:00:00 GMT'
header = {'X-Amz-Copy-Source-If-Match': etag,
'X-Amz-Copy-Source-If-Modified-Since': last_modified_since,
'Date': self.get_date_header()}
status, header, body = \
self._test_object_PUT_copy(swob.HTTPOk, header)
self.assertEqual(status.split()[0], '200')
self.assertEqual(len(self.swift.calls_with_headers), 2)
_, _, headers = self.swift.calls_with_headers[-1]
self.assertTrue(headers.get('If-Match') is None)
self.assertTrue(headers.get('If-Modified-Since') is None)
_, _, headers = self.swift.calls_with_headers[0]
self.assertEqual(headers['If-Match'], etag)
self.assertEqual(headers['If-Modified-Since'], last_modified_since)
def _test_object_for_s3acl(self, method, account):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': method},
headers={'Authorization': 'AWS %s:hmac' % account,
'Date': self.get_date_header()})
return self.call_s3api(req)
def _test_set_container_permission(self, account, permission):
grants = [Grant(User(account), permission)]
headers = \
encode_acl('container',
ACL(Owner('test:tester', 'test:tester'), grants))
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, headers, None)
def _test_object_copy_for_s3acl(self, account, src_permission=None,
src_path='/src_bucket/src_obj'):
owner = 'test:tester'
grants = [Grant(User(account), src_permission)] \
if src_permission else [Grant(User(owner), 'FULL_CONTROL')]
src_o_headers = \
encode_acl('object', ACL(Owner(owner, owner), grants))
src_o_headers.update({'last-modified': self.last_modified})
self.swift.register(
'HEAD', join('/v1/AUTH_test', src_path.lstrip('/')),
swob.HTTPOk, src_o_headers, None)
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS %s:hmac' % account,
'X-Amz-Copy-Source': src_path,
'Date': self.get_date_header()})
return self.call_s3api(req)
if __name__ == '__main__':
unittest.main()
| 46.744807 | 79 | 0.578874 |
601e1228f0fc5110925548eceed16ee0fac450d1 | 3,654 | py | Python | pynyzo/pynyzo/keyutil.py | EggPool/pynyzo | 7f3b86f15caa51a975e6a428f4dff578a1f24bcb | [
"MIT"
] | 6 | 2019-02-09T02:46:18.000Z | 2021-03-29T04:15:15.000Z | pynyzo/pynyzo/keyutil.py | EggPool/pynyzo | 7f3b86f15caa51a975e6a428f4dff578a1f24bcb | [
"MIT"
] | 1 | 2020-05-17T18:29:20.000Z | 2020-05-18T08:31:33.000Z | pynyzo/pynyzo/keyutil.py | EggPool/pynyzo | 7f3b86f15caa51a975e6a428f4dff578a1f24bcb | [
"MIT"
] | 5 | 2019-02-09T02:46:19.000Z | 2021-01-08T06:49:50.000Z | """
Eddsa Ed25519 key handling
From
https://github.com/n-y-z-o/nyzoVerifier/blob/b73bc25ba3094abe3470ec070ce306885ad9a18f/src/main/java/co/nyzo/verifier/KeyUtil.java
plus
https://github.com/n-y-z-o/nyzoVerifier/blob/17509f03a7f530c0431ce85377db9b35688c078e/src/main/java/co/nyzo/verifier/util/SignatureUtil.java
"""
# Uses https://github.com/warner/python-ed25519 , c binding, fast
import ed25519
import hashlib
from pynyzo.byteutil import ByteUtil
if __name__ == "__main__":
KeyUtil.main()
# KeyUtil.private_to_public('nyzo-formatted-private-key'.replace('-', ''))
| 38.463158 | 179 | 0.678982 |
601e563b0639154915d91614f293088729954120 | 6,729 | py | Python | mldftdat/scripts/train_gp.py | mir-group/CiderPress | bf2b3536e6bd7432645c18dce5a745d63bc9df59 | [
"MIT"
] | 10 | 2021-09-09T06:51:57.000Z | 2021-12-17T09:48:41.000Z | mldftdat/scripts/train_gp.py | mir-group/CiderPress | bf2b3536e6bd7432645c18dce5a745d63bc9df59 | [
"MIT"
] | null | null | null | mldftdat/scripts/train_gp.py | mir-group/CiderPress | bf2b3536e6bd7432645c18dce5a745d63bc9df59 | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
if __name__ == '__main__':
main()
| 43.412903 | 141 | 0.622975 |
601f1b72f2f10dacace33b87801d53b05bfc4ed8 | 5,684 | py | Python | picoCTF-web/api/routes/admin.py | zaratec/picoCTF | b0a63f03625bb4657a8116f43bea26346ca6f010 | [
"MIT"
] | null | null | null | picoCTF-web/api/routes/admin.py | zaratec/picoCTF | b0a63f03625bb4657a8116f43bea26346ca6f010 | [
"MIT"
] | null | null | null | picoCTF-web/api/routes/admin.py | zaratec/picoCTF | b0a63f03625bb4657a8116f43bea26346ca6f010 | [
"MIT"
] | null | null | null | import api
import bson
from api.annotations import (
api_wrapper,
log_action,
require_admin,
require_login,
require_teacher
)
from api.common import WebError, WebSuccess
from flask import (
Blueprint,
Flask,
render_template,
request,
send_from_directory,
session
)
blueprint = Blueprint("admin_api", __name__)
| 29.450777 | 106 | 0.714814 |
601f307a31ada0a1b790c747cfc5310721f08839 | 724 | py | Python | python code/influxdb_worker.py | thongnbui/MIDS_251_project | 8eee0f4569268e11c2d1d356024dbdc10f180b10 | [
"Apache-2.0"
] | null | null | null | python code/influxdb_worker.py | thongnbui/MIDS_251_project | 8eee0f4569268e11c2d1d356024dbdc10f180b10 | [
"Apache-2.0"
] | null | null | null | python code/influxdb_worker.py | thongnbui/MIDS_251_project | 8eee0f4569268e11c2d1d356024dbdc10f180b10 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import json
import argparse
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(description = 'pull data for softlayer queue' )
parser.add_argument( 'measurement' , help = 'measurement001' )
args = parser.parse_args()
client_influxdb = InfluxDBClient('50.23.117.76', '8086', 'cricket', 'cricket', 'cricket_data')
query = 'SELECT "data_center", "device", "value" FROM "cricket_data"."cricket_retention".'+args.measurement+' WHERE time > now() - 10m order by time'
result = client_influxdb.query(query)
for r in result:
i = 0
for data_center, device, value, time in r:
print args.measurement,'\t',r[i][data_center],'\t',r[i][device],'\t',r[i][time],'\t',r[i][value]
i += 1
| 34.47619 | 149 | 0.705801 |
6021d213fcca1b9fd94f8cf2d534f74eefae66dc | 3,522 | py | Python | src/python/pants/backend/docker/lint/hadolint/subsystem.py | xyzst/pants | d6a357fe67ee7e8e1aefeae625e107f5609f1717 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/docker/lint/hadolint/subsystem.py | xyzst/pants | d6a357fe67ee7e8e1aefeae625e107f5609f1717 | [
"Apache-2.0"
] | 28 | 2021-12-27T15:53:46.000Z | 2022-03-23T11:01:42.000Z | src/python/pants/backend/docker/lint/hadolint/subsystem.py | riisi/pants | b33327389fab67c47b919710ea32f20ca284b1a6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import cast
from pants.core.util_rules.config_files import ConfigFilesRequest
from pants.core.util_rules.external_tool import TemplatedExternalTool
from pants.option.custom_types import file_option, shell_str
| 35.938776 | 122 | 0.615559 |
60226c7d97ac7aadd65011be5f070784ee3088d9 | 8,504 | py | Python | venv/lib/python3.9/site-packages/biorun/fetch.py | LucaCilibrasi/docker_viruclust | 88149c17fd4b94a54397d0cb4a9daece00122c49 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.9/site-packages/biorun/fetch.py | LucaCilibrasi/docker_viruclust | 88149c17fd4b94a54397d0cb4a9daece00122c49 | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.9/site-packages/biorun/fetch.py | LucaCilibrasi/docker_viruclust | 88149c17fd4b94a54397d0cb4a9daece00122c49 | [
"Apache-2.0"
] | null | null | null | """
Handles functionality related to data storege.
"""
import sys, os, glob, re, gzip, json
from biorun import const, utils, objects, ncbi
from biorun.models import jsonrec
import biorun.libs.placlib as plac
# Module level logger.
logger = utils.logger
# A nicer error message on incorrect installation.
try:
from Bio import SeqIO
except ImportError as exc:
print(f"*** Error: {exc}", file=sys.stderr)
print(f"*** This program requires biopython", file=sys.stderr)
print(f"*** Install: conda install -y biopython>=1.78", file=sys.stderr)
sys.exit(-1)
def resolve_fname(name, format='json'):
"""
Resolve a file name given an accession number.
"""
ext = format.lower()
fname = f"{name}.{ext}.gz"
fname = os.path.join(utils.DATADIR, fname)
return fname
def delete_data(text):
"""
Deletes data under a filename.
"""
for name in text.split(","):
fname = resolve_fname(name)
if os.path.isfile(fname):
os.remove(fname)
logger.info(f"removed: {fname}")
else:
logger.info(f"file does not exist: {fname}")
def read_json_file(fname):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_read(fname)
data = json.load(fp)
fp.close()
return data
def save_json_file(fname, data):
"""
Returns the content of a JSON file.
"""
fp = utils.gz_write(fname)
json.dump(data, fp)
fp.close()
logger.info(f"saved {fname}")
return data
def change_seqid(json_name, seqid):
"""
Changes the sequence id stored in a json file.
"""
if os.path.isfile(json_name):
data = read_json_file(json_name)
for item in data:
item[const.SEQID] = seqid
fp = utils.gz_write(json_name)
json.dump(data, fp)
fp.close()
def fetch_data(data, param):
"""
Obtains data from NCBI. Fills each parameter with a json field.
"""
db = "protein" if param.protein else "nuccore"
# Ensure json DB is built
ncbi.build_db()
genbank, taxon_acc, refseq = ncbi.get_data()
for name in data:
# Pretend no data if it is an update.
json = None if param.update else get_json(name)
# The data exists, nothing needs to be done.
if json:
continue
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Genome assembly data.
if name.startswith("GCA") or name.startswith("GCF"):
ncbi.genome(name=name, fname=gbk_name, update=param.update, genbank=genbank,
refseq=refseq)
else:
# Genbank data.
ncbi.genbank_save(name, db=db, fname=gbk_name)
# Convert Genbank to JSON.
data = jsonrec.parse_file(fname=gbk_name, seqid=param.seqid)
# Save JSON file.
save_json_file(fname=json_name, data=data)
def get_json(name, seqid=None, inter=False, strict=False):
"""
Attempts to return a JSON formatted data based on a name.
"""
# Data is an existing path to a JSON file.
if os.path.isfile(name):
try:
data = jsonrec.parse_file(name, seqid=seqid)
except Exception as exc:
logger.error(f"JSON parsing error for file {name}: {exc}")
sys.exit(-1)
return data
# The JSON representation of the data.
json_name = resolve_fname(name=name, format="json")
# GenBank representation of the data.
gbk_name = resolve_fname(name=name, format="gb")
# Found the JSON representation of the file.
if os.path.isfile(json_name):
logger.info(f"found {json_name}")
data = read_json_file(json_name)
return data
# There is no JSON file but there is a GenBank file.
if os.path.isfile(gbk_name):
logger.info(f"found {gbk_name}")
data = jsonrec.parse_file(fname=gbk_name, seqid=seqid)
data = save_json_file(fname=json_name, data=data)
return data
# Interactive input, make JSON from name
if inter:
data = jsonrec.make_jsonrec(name, seqid=seqid)
return data
# Raise error if in strict mode
if strict:
utils.error(f"data not found: {name}")
return None
def rename_data(data, param, newname=None):
"""
Rename data.
"""
# Will only rename a single data
newnames = newname.split(",")
for name1, name2 in zip(data, newnames):
src_json = resolve_fname(name=name1, format="json")
dest_json = resolve_fname(name=name2, format="json")
src_gb = resolve_fname(name=name1, format="gb")
dest_gb = resolve_fname(name=name2, format="gb")
if os.path.isfile(src_json):
logger.info(f"renamed {name1} as {name2}")
os.rename(src_json, dest_json)
if param.seqid:
change_seqid(dest_json, seqid=param.seqid)
else:
logger.info(f"file not found: {src_json}")
if os.path.isfile(src_gb):
if not os.path.isfile(dest_gb):
os.symlink(src_gb, dest_gb)
else:
logger.info(f"file not found: {src_gb}")
def print_data_list():
"""
Returns a list of the files in the data directory
"""
pattern = os.path.join(os.path.join(utils.DATADIR, '*.json.gz'))
matched = glob.glob(pattern)
# Extract the definition from the JSON without parsing it.
patt = re.compile(r'(definition\":\s*)(?P<value>\".+?\")')
collect = []
for path in matched:
fsize = utils.human_size(os.path.getsize(path))
base, fname = os.path.split(path)
fname = fname.rsplit(".", maxsplit=2)[0]
# Parse the first N lines
stream = gzip.open(path, 'rt') if path.endswith('gz') else open(path, 'rt')
text = stream.read(1000)
match = patt.search(text)
title = match.group("value") if match else ''
title = title.strip('", ')
# Trim the title
stitle = title[:100]
stitle = stitle + "..." if len(title) != len(stitle) else stitle
collect.append((str(fsize), f"{fname:10s}", stitle))
collect = sorted(collect, key=lambda x: x[2])
for row in collect:
line = "\t".join(row)
print(line)
| 28.441472 | 94 | 0.61477 |
6022c4c8c548f73dbd95a825913c8b4639f2e4dc | 1,049 | py | Python | game/items/game_item.py | LaverdeS/Genetic_Algorithm_EGame | 89ff8c7870fa90768f4616cab6803227c8613396 | [
"MIT"
] | 2 | 2019-07-02T15:20:46.000Z | 2020-03-04T13:31:12.000Z | game/items/game_item.py | shivaa511/EGame | 6db10cb5cf7431093d2ab09a9e4049d6633fe792 | [
"MIT"
] | 2 | 2019-07-16T16:50:19.000Z | 2020-03-04T12:52:45.000Z | game/items/game_item.py | shivaa511/EGame | 6db10cb5cf7431093d2ab09a9e4049d6633fe792 | [
"MIT"
] | 8 | 2018-06-06T15:14:48.000Z | 2018-07-08T11:46:10.000Z | import numpy as np
from random import randint
from PyQt5.QtGui import QImage
from PyQt5.QtCore import QPointF
| 37.464286 | 77 | 0.611058 |
6022d662d09b473f63deec188827d3c36ba79479 | 6,750 | py | Python | source/deepsecurity/models/application_type_rights.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:09.000Z | 2021-10-30T16:40:09.000Z | source/deepsecurity/models/application_type_rights.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:19:03.000Z | 2021-07-28T20:19:03.000Z | source/deepsecurity/models/application_type_rights.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:02.000Z | 2021-10-30T16:40:02.000Z | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationTypeRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.571429 | 311 | 0.663556 |
60254d5cf06d095bd8f90781b32cfb0d4a95c6e4 | 3,900 | py | Python | code-samples/aws_neptune.py | hardikvasa/database-journal | 7932b5a7fe909f8adb3a909183532b43d450da7b | [
"MIT"
] | 45 | 2019-06-07T07:12:09.000Z | 2022-03-20T19:58:53.000Z | code-samples/aws_neptune.py | hardikvasa/database-journal | 7932b5a7fe909f8adb3a909183532b43d450da7b | [
"MIT"
] | 1 | 2019-06-09T17:23:05.000Z | 2019-06-10T18:36:20.000Z | code-samples/aws_neptune.py | hardikvasa/database-journal | 7932b5a7fe909f8adb3a909183532b43d450da7b | [
"MIT"
] | 15 | 2019-06-07T07:12:12.000Z | 2022-01-02T01:09:53.000Z | from __future__ import print_function # Python 2/3 compatibility
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
#initializing the graph object
graph = Graph()
#creating connection with the remote
remoteConn = DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g')
g = graph.traversal().withRemote(DriverRemoteConnection('wss://<endpoint>:8182/gremlin','g'))
print('Connection created.')
#clearing out all the vertices to start fresh
g.V().drop().iterate()
print('Deleting everything and starting clean.')
#Adding some vertices (nodes)
gerald = g.addV('person').property('age','81').property('first_name','Gerald').property('stays_in','Portland').next()
edith = g.addV('person').property('age','78').property('first_name','Edith').property('stays_in','Portland').next()
peter = g.addV('person').property('age','52').property('first_name','Shane').property('stays_in','Seattle').next()
mary = g.addV('person').property('age','50').property('first_name','Mary').property('stays_in','Seattle').next()
betty = g.addV('person').property('age','19').property('first_name','Betty').property('stays_in','Chicago').next()
print('Added some vertices (nodes).')
#Adding relationships (edges)
edge = g.V().has('first_name', 'Gerald').addE('husband_of').to(g.V().has('first_name', 'Edith')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Edith').addE('wife_of').to(g.V().has('first_name', 'Gerald')).property('married_since','1947').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Gerald')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Gerald').addE('father_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('son_of').to(g.V().has('first_name', 'Edith')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Edith').addE('mother_of').to(g.V().has('first_name', 'Shane')).property('known_since','1964').next()
edge = g.V().has('first_name', 'Shane').addE('husband_of').to(g.V().has('first_name', 'Mary')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Mary').addE('wife_of').to(g.V().has('first_name', 'Shane')).property('known_since','1989').next()
edge = g.V().has('first_name', 'Shane').addE('father_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Shane')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Mary').addE('mother_of').to(g.V().has('first_name', 'Betty')).property('known_since','1991').next()
edge = g.V().has('first_name', 'Betty').addE('daughter_of').to(g.V().has('first_name', 'Mary')).property('known_since','1991').next()
#print out all the node's first names
print('\n Printing first name from all nodes:')
print(g.V().first_name.toList())
#print out all the properties of person whose's first name is Shane
print('\n Printing all properties of person whose first name is Shane:')
print(g.V().has('person','first_name','Shane').valueMap().next())
#traversing the graph starting with Betty to then Shane to then Edith
print('\n Finding Betty and then looking up her parents:')
print(g.V().has('first_name', 'Betty').out('daughter_of').out('son_of').valueMap().toList())
#Print out all the nodes
print('\n Printing out all the nodes:')
people = g.V().valueMap().toList()
print(people)
#Print out all the connections (edges)
print('\n Print out all the connections (edges):')
connections = g.E().valueMap().toList()
print(connections)
#Closing the connection
remoteConn.close()
print('Connection closed!') | 57.352941 | 136 | 0.704615 |
6025b1cfb25bd8e7710a10ffd3f52c87c8e4a3b7 | 15,045 | py | Python | kits19cnn/io/preprocess_train.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | kits19cnn/io/preprocess_train.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | kits19cnn/io/preprocess_train.py | Ramsha04/kits19-2d-reproduce | 66678f1eda3688d6dc64389e9a80ae0b754a3052 | [
"Apache-2.0"
] | null | null | null | import os
from os.path import join, isdir
from pathlib import Path
from collections import defaultdict
from tqdm import tqdm
import nibabel as nib
import numpy as np
import json
from .resample import resample_patient
from .custom_augmentations import resize_data_and_seg, crop_to_bbox
def standardize_per_image(image):
"""
Z-score standardization per image.
"""
mean, stddev = image.mean(), image.std()
return (image - mean) / stddev
def parse_slice_idx_to_str(slice_idx):
"""
Parse the slice index to a three digit string for saving and reading the
2D .npy files generated by io.preprocess.Preprocessor.
Naming convention: {type of slice}_{case}_{slice_idx}
* adding 0s to slice_idx until it reaches 3 digits,
* so sorting files is easier when stacking
"""
return f"{slice_idx:03}"
| 43.482659 | 97 | 0.571685 |
6026a153525e13fa3c171bca805b17cf817349e3 | 1,558 | py | Python | setup.py | opywan/calm-dsl | 1d89436d039a39265a0ae806022be5b52e757ac0 | [
"Apache-2.0"
] | null | null | null | setup.py | opywan/calm-dsl | 1d89436d039a39265a0ae806022be5b52e757ac0 | [
"Apache-2.0"
] | null | null | null | setup.py | opywan/calm-dsl | 1d89436d039a39265a0ae806022be5b52e757ac0 | [
"Apache-2.0"
] | null | null | null | import sys
import setuptools
from setuptools.command.test import test as TestCommand
setuptools.setup(
name="calm.dsl",
version="0.9.0-alpha",
author="Nutanix",
author_email="nucalm@nutanix.com",
description="Calm DSL for blueprints",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/nutanix/calm-dsl",
packages=setuptools.find_namespace_packages(include=["calm.*"]),
namespace_packages=["calm"],
install_requires=read_file("requirements.txt"),
tests_require=read_file("dev-requirements.txt"),
cmdclass={"test": PyTest},
zip_safe=False,
include_package_data=True,
entry_points={"console_scripts": ["calm=calm.dsl.cli:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
)
| 28.327273 | 68 | 0.649551 |
6026e4bb115c40518d8be86f2973d4fb63be08f1 | 2,019 | py | Python | hanlp/pretrained/tok.py | chen88358323/HanLP | ee9066c3b7aad405dfe0ccffb7f66c59017169ae | [
"Apache-2.0"
] | 2 | 2022-03-23T08:50:39.000Z | 2022-03-23T08:50:48.000Z | hanlp/pretrained/tok.py | kingfan1998/HanLP | ee9066c3b7aad405dfe0ccffb7f66c59017169ae | [
"Apache-2.0"
] | null | null | null | hanlp/pretrained/tok.py | kingfan1998/HanLP | ee9066c3b7aad405dfe0ccffb7f66c59017169ae | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 21:12
from hanlp_common.constant import HANLP_URL
SIGHAN2005_PKU_CONVSEG = HANLP_URL + 'tok/sighan2005-pku-convseg_20200110_153722.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 pku dataset.'
SIGHAN2005_MSR_CONVSEG = HANLP_URL + 'tok/convseg-msr-nocrf-noembed_20200110_153524.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on sighan2005 msr dataset.'
CTB6_CONVSEG = HANLP_URL + 'tok/ctb6_convseg_nowe_nocrf_20200110_004046.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on CTB6 dataset.'
PKU_NAME_MERGED_SIX_MONTHS_CONVSEG = HANLP_URL + 'tok/pku98_6m_conv_ngram_20200110_134736.zip'
'Conv model (:cite:`wang-xu-2017-convolutional`) trained on pku98 six months dataset with familiy name and given name merged into one unit.'
LARGE_ALBERT_BASE = HANLP_URL + 'tok/large_corpus_cws_albert_base_20211228_160926.zip'
'ALBERT model (:cite:`Lan2020ALBERT:`) trained on the largest CWS dataset in the world.'
SIGHAN2005_PKU_BERT_BASE_ZH = HANLP_URL + 'tok/sighan2005_pku_bert_base_zh_20201231_141130.zip'
'BERT model (:cite:`devlin-etal-2019-bert`) trained on sighan2005 pku dataset.'
COARSE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/coarse_electra_small_20220220_013548.zip'
'Electra (:cite:`clark2020electra`) small model trained on coarse-grained CWS corpora. Its performance is P=96.97% R=96.87% F1=96.92% which is ' \
'much higher than that of MTL model '
FINE_ELECTRA_SMALL_ZH = HANLP_URL + 'tok/fine_electra_small_20220217_190117.zip'
'Electra (:cite:`clark2020electra`) small model trained on fine-grained CWS corpora. Its performance is P=97.44% R=97.40% F1=97.42% which is ' \
'much higher than that of MTL model '
CTB9_TOK_ELECTRA_SMALL = HANLP_URL + 'tok/ctb9_electra_small_20220215_205427.zip'
'Electra (:cite:`clark2020electra`) small model trained on CTB9. Its performance is P=97.15% R=97.36% F1=97.26% which is ' \
'much higher than that of MTL model '
# Will be filled up during runtime
ALL = {}
| 67.3 | 146 | 0.788014 |
60285f227b486baa95c5fb739b65a5f1c6ce6e02 | 3,364 | py | Python | third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 1 | 2021-05-05T11:11:31.000Z | 2021-05-05T11:11:31.000Z | third_party/webrtc/src/chromium/src/tools/swarming_client/tests/logging_utils_test.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | #!/usr/bin/env python
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
import shutil
import unittest
import re
THIS_FILE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(THIS_FILE)))
from utils import logging_utils
# PID YYYY-MM-DD HH:MM:SS.MMM
_LOG_HEADER = r'^%d \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d' % os.getpid()
_LOG_HEADER_PID = r'^\d+ \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d'
_PHASE = 'LOGGING_UTILS_TESTS_PHASE'
def call(phase, cwd):
"""Calls itself back."""
env = os.environ.copy()
env[_PHASE] = phase
return subprocess.call([sys.executable, '-u', THIS_FILE], env=env, cwd=cwd)
if __name__ == '__main__':
sys.exit(main())
| 28.508475 | 80 | 0.67063 |
6029de67c839bfcae337c354721a055f1b81107e | 2,452 | py | Python | model_selection.py | HrishikV/ineuron_inceome_prediction_internship | 4a97a7f29d80198f394fcfd880cc5250fe2a0d1e | [
"MIT"
] | null | null | null | model_selection.py | HrishikV/ineuron_inceome_prediction_internship | 4a97a7f29d80198f394fcfd880cc5250fe2a0d1e | [
"MIT"
] | null | null | null | model_selection.py | HrishikV/ineuron_inceome_prediction_internship | 4a97a7f29d80198f394fcfd880cc5250fe2a0d1e | [
"MIT"
] | null | null | null | from featur_selection import df,race,occupation,workclass,country
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score,KFold
from sklearn.linear_model import LogisticRegression
from imblearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from imblearn.combine import SMOTETomek
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from sklearn.svm import SVC
from matplotlib import pyplot as plt
import seaborn as sns
df1=df.copy()
salary=df1['salary'].reset_index(drop=True)
df1=df1.drop(['salary'],axis=1)
df1= concat_dataframes(df1)
features=['age_logarthmic','hours_per_week']
scaler = ColumnTransformer(transformers = [('scale_num_features', StandardScaler(), features)], remainder='passthrough')
models = [LogisticRegression(), SVC(), AdaBoostClassifier(), RandomForestClassifier(), XGBClassifier(),DecisionTreeClassifier(), KNeighborsClassifier(), CatBoostClassifier()]
model_labels = ['LogisticReg.','SVC','AdaBoost','RandomForest','Xgboost','DecisionTree','KNN', 'CatBoost']
mean_validation_f1_scores = []
for model in models:
data_pipeline = Pipeline(steps = [
('scaler', scaler),
('resample', SMOTETomek()),
('model', model)
])
mean_validation_f1 = float(cross_val_score(data_pipeline, df1, salary, cv=KFold(n_splits=10), scoring='f1',n_jobs=-1).mean())
mean_validation_f1_scores.append(mean_validation_f1)
print(mean_validation_f1_scores)
fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize = (15,8))
sns.set_style('dark')
sns.barplot(y = model_labels ,x = mean_validation_f1_scores, ax=axes[0])
axes[0].grid(True, color='k')
sns.set_style('whitegrid')
sns.lineplot(x = model_labels, y = mean_validation_f1_scores)
axes[1].grid(True, color='k')
fig.show() | 45.407407 | 240 | 0.722675 |
602aa7539d103136a63769ed24a86373824abc5f | 76 | py | Python | tests/apps/newlayout/tasks/init_data.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | null | null | null | tests/apps/newlayout/tasks/init_data.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 6 | 2016-11-01T18:42:34.000Z | 2020-11-16T16:52:14.000Z | tests/apps/newlayout/tasks/init_data.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T18:20:46.000Z | 2020-01-22T18:20:46.000Z | from __future__ import print_function
| 12.666667 | 37 | 0.736842 |
602b781497fe10bfa361f38ffbff943242a02399 | 3,392 | py | Python | 2021/d8b_bits.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 4 | 2018-12-04T23:33:46.000Z | 2021-12-07T17:33:27.000Z | 2021/d8b_bits.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 17 | 2018-12-12T23:32:09.000Z | 2020-01-04T15:50:31.000Z | 2021/d8b_bits.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pytest
import fileinput
from os.path import splitext, abspath
F_NAME = 'd8'
#implement day8 using bits
def find_ones(d):
'''count number of ones in binary number'''
ones = 0
while d > 0:
ones += d & 1
d >>= 1
return ones
# Assign each segment a 'wire'.
lut = {
'a':0b0000001,
'b':0b0000010,
'c':0b0000100,
'd':0b0001000,
'e':0b0010000,
'f':0b0100000,
'g':0b1000000,
}
if __name__ == '__main__':
import timeit
start = timeit.default_timer()
filename = fileinput.input(F_NAME + '.input')
ans = answer(filename)
print('Answer:', ans)
duration = timeit.default_timer()-start
print(f'Execution time: {duration:.3f} s')
| 26.294574 | 122 | 0.571934 |
602c28a9205e1c1670c905a216255ec8e326af0a | 8,931 | py | Python | frame_dataloader/spatial_dataloader.py | rizkiailham/two-stream-action-recognition-1 | 01221f668e62eb26e3593f4ecd3f257b6b6979ab | [
"Apache-2.0"
] | 67 | 2019-01-02T11:42:44.000Z | 2022-03-24T02:46:39.000Z | frame_dataloader/spatial_dataloader.py | rizkiailham/two-stream-action-recognition-1 | 01221f668e62eb26e3593f4ecd3f257b6b6979ab | [
"Apache-2.0"
] | 10 | 2019-02-06T17:12:23.000Z | 2021-11-10T08:05:27.000Z | frame_dataloader/spatial_dataloader.py | rizkiailham/two-stream-action-recognition-1 | 01221f668e62eb26e3593f4ecd3f257b6b6979ab | [
"Apache-2.0"
] | 25 | 2019-04-03T19:25:41.000Z | 2021-11-22T16:34:15.000Z | """
********************************
* Created by mohammed-alaa *
********************************
Spatial Dataloader implementing sequence api from keras (defines how to load a single item)
this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays
"""
import copy
import random
import cv2
import numpy as np
import tensorflow.keras as keras
from .UCF_splitting_kernel import *
from .helpers import get_training_augmenter, get_validation_augmenter
if __name__ == '__main__':
data_loader = SpatialDataLoader(batch_size=64, use_multiprocessing=True, # data_root_path="data",
ucf_split='01',
testing_samples_per_video=19, width=224, height=224, num_workers=2)
train_loader, test_loader, test_video_level_label = data_loader.run()
print(len(train_loader))
print(len(test_loader))
print(train_loader.get_actual_length())
print(test_loader.get_actual_length())
print(train_loader.sequence[0][0].shape, train_loader.sequence[0][1].shape)
print(train_loader[0][0].shape, train_loader[0][1].shape)
# import tqdm
# progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader))
# for (sampled_frame, label) in progress:
# pass
import matplotlib.pyplot as plt
# preview raw data
print("train sample")
for batch in train_loader.get_epoch_generator():
print(batch[0].shape, batch[1].shape)
print(batch[1])
preview(batch[0], batch[1])
break
print("test sample") # same name will be displayed testing_samples_per_video with no shuffling
for batch in test_loader.get_epoch_generator():
print(batch[1].shape, batch[2].shape)
print(batch[0], batch[2])
preview(batch[1], batch[2])
break
| 42.127358 | 211 | 0.647744 |
602c64e6002e7e17025a13776dc2c4562e176aca | 1,593 | py | Python | dianhua/worker/crawler/china_mobile/hunan/base_request_param.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 6 | 2015-07-09T08:47:08.000Z | 2020-05-16T10:47:31.000Z | dianhua/worker/crawler/china_mobile/hunan/base_request_param.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 7 | 2019-03-27T04:13:12.000Z | 2022-03-02T14:54:56.000Z | dianhua/worker/crawler/china_mobile/hunan/base_request_param.py | Svolcano/python_exercise | a50e05891cc7f1fbb40ebcae324b09b6a14473d2 | [
"MIT"
] | 2 | 2019-06-21T06:46:28.000Z | 2019-12-23T09:31:09.000Z | # -*- coding:utf-8 -*-
"""
@version: v1.0
@author: xuelong.liu
@license: Apache Licence
@contact: xuelong.liu@yulore.com
@software: PyCharm
@file: base_request_param.py
@time: 12/21/16 6:48 PM
"""
| 49.78125 | 183 | 0.756434 |
602c73ce30543054207480d8bbb3a3dcd0069abc | 2,762 | py | Python | day02/puzzle2.py | jack-beach/AdventOfCode2019 | a8ac53eaf03cd7595deb2a9aa798a2d17c21c513 | [
"MIT"
] | null | null | null | day02/puzzle2.py | jack-beach/AdventOfCode2019 | a8ac53eaf03cd7595deb2a9aa798a2d17c21c513 | [
"MIT"
] | 1 | 2019-12-05T19:21:46.000Z | 2019-12-05T19:21:46.000Z | day02/puzzle2.py | jack-beach/AdventOfCode2019 | a8ac53eaf03cd7595deb2a9aa798a2d17c21c513 | [
"MIT"
] | 1 | 2019-12-05T18:05:54.000Z | 2019-12-05T18:05:54.000Z | # stdlib imports
import copy
# vendor imports
import click
# Execute cli function on main
if __name__ == "__main__":
main()
| 29.073684 | 77 | 0.542723 |
602d85326ffa11df7e1d924f6cb4bf41ac71b284 | 984 | py | Python | install.py | X-lab-3D/PANDORA | 02912a03022e814ff8e0ae8ec52f5075f0e2e381 | [
"Apache-2.0"
] | null | null | null | install.py | X-lab-3D/PANDORA | 02912a03022e814ff8e0ae8ec52f5075f0e2e381 | [
"Apache-2.0"
] | 1 | 2022-03-14T19:51:26.000Z | 2022-03-14T19:51:26.000Z | install.py | X-lab-3D/PANDORA | 02912a03022e814ff8e0ae8ec52f5075f0e2e381 | [
"Apache-2.0"
] | null | null | null | import os
dirs = [
'./PANDORA_files', './PANDORA_files/data', './PANDORA_files/data/csv_pkl_files',
'./PANDORA_files/data/csv_pkl_files/mhcseqs', './PANDORA_files/data/PDBs',
'./PANDORA_files/data/PDBs/pMHCI', './PANDORA_files/data/PDBs/pMHCII',
'./PANDORA_files/data/PDBs/Bad', './PANDORA_files/data/PDBs/Bad/pMHCI',
'./PANDORA_files/data/PDBs/Bad/pMHCII', './PANDORA_files/data/PDBs/IMGT_retrieved',
'./PANDORA_files/data/outputs',
'./test/test_data/PDBs/Bad','./test/test_data/PDBs/Bad/pMHCI',
'./test/test_data/PDBs/Bad/pMHCII', './test/test_data/csv_pkl_files'
]
for D in dirs:
try:
os.mkdir(D)
except OSError:
print('Could not make directory: ' + D)
# Install dependenciess
# os.popen("alias KEY_MODELLER='XXXX'").read()
# os.popen("conda install -y -c salilab modeller").read()
# os.popen("conda install -y -c bioconda muscle").read()
# os.popen("pip install -e ./").read()
| 35.142857 | 91 | 0.646341 |
602de82ea89f13dcd9f29b60fb46750634f30aed | 7,711 | py | Python | app/auth/views.py | MainaKamau92/apexselftaught | 9f9a3bd1ba23e57a12e173730917fb9bb7003707 | [
"MIT"
] | 4 | 2019-01-02T19:52:00.000Z | 2022-02-21T11:07:34.000Z | app/auth/views.py | MainaKamau92/apexselftaught | 9f9a3bd1ba23e57a12e173730917fb9bb7003707 | [
"MIT"
] | 2 | 2019-12-04T13:36:54.000Z | 2019-12-04T13:49:21.000Z | app/auth/views.py | MainaKamau92/apexselftaught | 9f9a3bd1ba23e57a12e173730917fb9bb7003707 | [
"MIT"
] | 1 | 2021-11-28T13:23:14.000Z | 2021-11-28T13:23:14.000Z | # app/auth/views.py
import os
from flask import flash, redirect, render_template, url_for, request
from flask_login import login_required, login_user, logout_user, current_user
from . import auth
from .forms import (LoginForm, RegistrationForm,
RequestResetForm, ResetPasswordForm)
from .. import db, mail
from ..models import User
from flask_mail import Message
from werkzeug.security import generate_password_hash
def send_reset_email(user):
try:
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='activecodar@gmail.com',
recipients=[user.email])
msg.body = f''' To reset your password visit the following link
{url_for('auth.reset_password', token=token, _external=True)}
If you did not make this request ignore this email
'''
mail.send(msg)
except Exception as e:
print(e)
| 43.8125 | 99 | 0.664765 |
602e5a99d805700346d56a51e68cf804e5858e7b | 6,174 | py | Python | oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py | devendermishrajio/oslo.messaging | 9e5fb5697d3f7259f01e3416af0582090d20859a | [
"Apache-1.1"
] | 1 | 2021-02-17T15:30:45.000Z | 2021-02-17T15:30:45.000Z | oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py | devendermishrajio/oslo.messaging | 9e5fb5697d3f7259f01e3416af0582090d20859a | [
"Apache-1.1"
] | null | null | null | oslo_messaging/_drivers/zmq_driver/client/publishers/zmq_dealer_publisher.py | devendermishrajio/oslo.messaging | 9e5fb5697d3f7259f01e3416af0582090d20859a | [
"Apache-1.1"
] | 2 | 2015-11-03T03:21:55.000Z | 2015-12-01T08:56:14.000Z | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_messaging._drivers.zmq_driver.client.publishers\
import zmq_publisher_base
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
from oslo_messaging._i18n import _LI, _LW
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
| 34.49162 | 78 | 0.679462 |
602e5ff210d9605bb2e8229e3fbf0370c704bfb0 | 25,175 | py | Python | coba/environments/filters.py | mrucker/banditbenchmark | 0365291b3a0cf1d862d294e0386d0ccad3f360f1 | [
"BSD-3-Clause"
] | null | null | null | coba/environments/filters.py | mrucker/banditbenchmark | 0365291b3a0cf1d862d294e0386d0ccad3f360f1 | [
"BSD-3-Clause"
] | null | null | null | coba/environments/filters.py | mrucker/banditbenchmark | 0365291b3a0cf1d862d294e0386d0ccad3f360f1 | [
"BSD-3-Clause"
] | null | null | null | import pickle
import warnings
import collections.abc
from math import isnan
from statistics import mean, median, stdev, mode
from abc import abstractmethod, ABC
from numbers import Number
from collections import defaultdict
from itertools import islice, chain
from typing import Hashable, Optional, Sequence, Union, Iterable, Dict, Any, List, Tuple, Callable, Mapping
from coba.backports import Literal
from coba import pipes
from coba.random import CobaRandom
from coba.exceptions import CobaException
from coba.statistics import iqr
from coba.pipes import Flatten
from coba.environments.primitives import Interaction
from coba.environments.logged.primitives import LoggedInteraction
from coba.environments.simulated.primitives import SimulatedInteraction
| 38.259878 | 130 | 0.623952 |
602f71483df50285674a0fe43ba737fee526a84e | 6,553 | py | Python | python/cuml/preprocessing/LabelEncoder.py | egoolish/cuml | 5320eff78890b3e9129e04e13437496c0424820d | [
"Apache-2.0"
] | 7 | 2019-02-26T10:41:09.000Z | 2020-06-17T06:08:57.000Z | python/cuml/preprocessing/LabelEncoder.py | danielhanchen/cuml | fab74ca94fdbc5b49281660ce32a48cfd3d66f46 | [
"Apache-2.0"
] | null | null | null | python/cuml/preprocessing/LabelEncoder.py | danielhanchen/cuml | fab74ca94fdbc5b49281660ce32a48cfd3d66f46 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import nvcategory
from librmm_cffi import librmm
import numpy as np
def _enforce_str(y: cudf.Series) -> cudf.Series:
''' Ensure that nvcategory is being given strings
'''
if y.dtype != "object":
return y.astype("str")
return y
| 27.649789 | 78 | 0.574546 |
602fc03ac149fa50fb90ef1d0ffd3dc3832e7d14 | 5,054 | py | Python | cleaning.py | jhamrick/cogsci-proceedings-analysis | c3c8b0abd8b9ce639f6de0aea52aec46c2c8abca | [
"MIT"
] | null | null | null | cleaning.py | jhamrick/cogsci-proceedings-analysis | c3c8b0abd8b9ce639f6de0aea52aec46c2c8abca | [
"MIT"
] | null | null | null | cleaning.py | jhamrick/cogsci-proceedings-analysis | c3c8b0abd8b9ce639f6de0aea52aec46c2c8abca | [
"MIT"
] | 1 | 2020-05-11T10:38:38.000Z | 2020-05-11T10:38:38.000Z | import re
import difflib
import pandas as pd
import numpy as np
from nameparser import HumanName
from nameparser.config import CONSTANTS
CONSTANTS.titles.remove("gen")
CONSTANTS.titles.remove("prin")
if __name__ == "__main__":
import graph
papers = pd.read_csv("cogsci_proceedings_raw.csv")
papers['type'] = papers['section'].apply(parse_paper_type)
papers = extract_authors(papers)
G = graph.make_author_graph(papers)
papers, G = fix_author_misspellings(papers, G)
papers.to_csv("cogsci_proceedings.csv", encoding='utf-8')
| 34.616438 | 79 | 0.551049 |
602fe47995203be2cbe5445ca36c210c61dfb7a1 | 384 | py | Python | quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py | CamilaBodack/template-projeto-selecao | b0a0cf6070bf8abab626a17af5c315c82368b010 | [
"MIT"
] | 1 | 2020-09-01T23:04:07.000Z | 2020-09-01T23:04:07.000Z | quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py | CamilaBodack/template-projeto-selecao | b0a0cf6070bf8abab626a17af5c315c82368b010 | [
"MIT"
] | 4 | 2020-10-07T18:04:41.000Z | 2020-10-07T18:07:58.000Z | quem_foi_para_mar_core/migrations/0004_auto_20200811_1945.py | CamilaBodack/template-projeto-selecao | b0a0cf6070bf8abab626a17af5c315c82368b010 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-11 19:45
from django.db import migrations
| 20.210526 | 62 | 0.609375 |
6030d536392f2700f6b4fca762988c6115c81681 | 268 | py | Python | examples/tinytag/fuzz.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | examples/tinytag/fuzz.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | examples/tinytag/fuzz.py | MJ-SEO/py_fuzz | 789fbfea21bf644ba4d00554fe4141694b0a190a | [
"Apache-2.0"
] | null | null | null | from pythonfuzz.main import PythonFuzz
from tinytag import TinyTag
import io
if __name__ == '__main__':
fuzz()
| 14.105263 | 38 | 0.69403 |
6031aa22f48d39d2c1b21d711d722627277b7cfb | 96 | py | Python | venv/lib/python3.8/site-packages/requests/compat.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 1 | 2022-02-22T04:49:18.000Z | 2022-02-22T04:49:18.000Z | venv/lib/python3.8/site-packages/requests/compat.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/requests/compat.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/d1/fc/c7/6cbbdf9c58b6591d28ed792bbd7944946d3f56042698e822a2869787f6 | 96 | 96 | 0.895833 |
6031df65367df99733ce016cb9fcdddefa51c5dc | 3,951 | py | Python | examples/python-guide/cross_validation_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 2 | 2020-04-12T06:12:17.000Z | 2020-04-12T15:34:01.000Z | examples/python-guide/cross_validation_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | null | null | null | examples/python-guide/cross_validation_example.py | StatMixedML/GPBoost | 786d8be61c5c28da0690e167af636a6d777bf9e1 | [
"Apache-2.0"
] | 1 | 2020-04-12T15:34:12.000Z | 2020-04-12T15:34:12.000Z | # coding: utf-8
# pylint: disable = invalid-name, C0111
import gpboost as gpb
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#--------------------Cross validation for tree-boosting without GP or random effects----------------
print('Simulating data...')
# Simulate and create your dataset
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
def sim_data(n):
"""Function that simulates data. Two covariates of which only one has an effect"""
X = np.random.rand(n, 2)
# mean function plus noise
y = f1d(X[:, 0]) + np.random.normal(scale=0.1, size=n)
return ([X, y])
# Simulate data
n = 1000
data = sim_data(2 * n)
# create dataset for gpb.train
data_train = gpb.Dataset(data[0][0:n, :], data[1][0:n])
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'metric': {'l2', 'l1'},
'learning_rate': 0.1,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# --------------------Combine tree-boosting and grouped random effects model----------------
print('Simulating data...')
# Simulate data
def f1d(x):
"""Non-linear function for simulation"""
return (1.7 * (1 / (1 + np.exp(-(x - 0.5) * 20)) + 0.75 * x))
x = np.linspace(0, 1, 200, endpoint=True)
plt.figure("Mean function")
plt.plot(x, f1d(x), linewidth=2, color="r")
plt.title("Mean function")
plt.show()
n = 1000 # number of samples
np.random.seed(1)
X = np.random.rand(n, 2)
F = f1d(X[:, 0])
# Simulate grouped random effects
m = 25 # number of categories / levels for grouping variable
group = np.arange(n) # grouping variable
for i in range(m):
group[int(i * n / m):int((i + 1) * n / m)] = i
# incidence matrix relating grouped random effects to samples
Z1 = np.zeros((n, m))
for i in range(m):
Z1[np.where(group == i), i] = 1
sigma2_1 = 1 ** 2 # random effect variance
sigma2 = 0.1 ** 2 # error variance
b1 = np.sqrt(sigma2_1) * np.random.normal(size=m) # simulate random effects
eps = Z1.dot(b1)
xi = np.sqrt(sigma2) * np.random.normal(size=n) # simulate error term
y = F + eps + xi # observed data
# define GPModel
gp_model = gpb.GPModel(group_data=group)
gp_model.set_optim_params(params={"optimizer_cov": "fisher_scoring"})
# create dataset for gpb.train
data_train = gpb.Dataset(X, y)
# specify your configurations as a dict
params = {
'objective': 'regression_l2',
'learning_rate': 0.05,
'max_depth': 6,
'min_data_in_leaf': 5,
'verbose': 0
}
print('Starting cross-validation...')
# do cross-validation
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=False,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=False, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
# Include random effect predictions for validation (observe the lower test error)
gp_model = gpb.GPModel(group_data=group)
print("Running cross validation for GPBoost model and use_gp_model_for_validation = TRUE")
cvbst = gpb.cv(params=params, train_set=data_train,
gp_model=gp_model, use_gp_model_for_validation=True,
num_boost_round=100, early_stopping_rounds=5,
nfold=2, verbose_eval=True, show_stdv=Falsem, seed=1)
print("Best number of iterations: " + str(np.argmin(cvbst['l2-mean'])))
cvbst.best_iteration
| 35.276786 | 100 | 0.665148 |
60321018f94dd63905027338dadab96fc7adf06f | 2,230 | py | Python | synapse/rest/synapse/client/unsubscribe.py | Florian-Sabonchi/synapse | c95b04bb0e719d3f5de1714b442f95a39c6e3634 | [
"Apache-2.0"
] | null | null | null | synapse/rest/synapse/client/unsubscribe.py | Florian-Sabonchi/synapse | c95b04bb0e719d3f5de1714b442f95a39c6e3634 | [
"Apache-2.0"
] | null | null | null | synapse/rest/synapse/client/unsubscribe.py | Florian-Sabonchi/synapse | c95b04bb0e719d3f5de1714b442f95a39c6e3634 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from synapse.api.errors import StoreError
from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes
from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
if TYPE_CHECKING:
from synapse.server import HomeServer
| 34.307692 | 80 | 0.689686 |
603213c5e7e394368a3f594930adb85245cbf3c3 | 4,859 | py | Python | pyhanabi/act_group.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | 1 | 2022-03-24T19:41:22.000Z | 2022-03-24T19:41:22.000Z | pyhanabi/act_group.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | null | null | null | pyhanabi/act_group.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | null | null | null | import set_path
import sys
import torch
set_path.append_sys_path()
import rela
import hanalearn
import utils
assert rela.__file__.endswith(".so")
assert hanalearn.__file__.endswith(".so")
| 31.967105 | 83 | 0.537148 |
603237057511914da74cfc53cec432cce1013ccc | 1,128 | py | Python | A_source_code/carbon/code/make_mask.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/make_mask.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | A_source_code/carbon/code/make_mask.py | vanHoek-dgnm/CARBON-DISC | 3ecd5f4efba5e032d43679ee977064d6b25154a9 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
from netCDF4 import Dataset
import numpy as np
import general_path
import accuflux
import ascraster
import get_surrounding_cells
import make_np_grid
| 31.333333 | 90 | 0.62766 |
6032a6052ffc5ac0129ff8a333fbe0b572cb530c | 7,309 | py | Python | Code/Dataset.py | gitFloyd/AAI-Project-2 | c6bb4d389248c3385e58a0c399343322a6dd887f | [
"MIT"
] | null | null | null | Code/Dataset.py | gitFloyd/AAI-Project-2 | c6bb4d389248c3385e58a0c399343322a6dd887f | [
"MIT"
] | null | null | null | Code/Dataset.py | gitFloyd/AAI-Project-2 | c6bb4d389248c3385e58a0c399343322a6dd887f | [
"MIT"
] | null | null | null | from io import TextIOWrapper
import math
from typing import TypeVar
import random
import os
from Settings import Settings
#pist = Pistachio(Dataset.LINUX_NL)
#
#for row in pist.Load()[0:10]:
# print(row)
| 24.363333 | 96 | 0.629498 |
60338466dc34f8421b1477264c6d62ca84ee2404 | 36,939 | py | Python | payments/models.py | wahuneke/django-stripe-payments | 5d4b26b025fc3fa75d3a0aeaafd67fb825325c94 | [
"BSD-3-Clause"
] | null | null | null | payments/models.py | wahuneke/django-stripe-payments | 5d4b26b025fc3fa75d3a0aeaafd67fb825325c94 | [
"BSD-3-Clause"
] | null | null | null | payments/models.py | wahuneke/django-stripe-payments | 5d4b26b025fc3fa75d3a0aeaafd67fb825325c94 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import decimal
import json
import traceback
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
import stripe
from jsonfield.fields import JSONField
from .managers import CustomerManager, ChargeManager, TransferManager
from .settings import (
DEFAULT_PLAN,
INVOICE_FROM_EMAIL,
PAYMENTS_PLANS,
plan_from_stripe_id,
SEND_EMAIL_RECEIPTS,
TRIAL_PERIOD_FOR_USER_CALLBACK,
PLAN_QUANTITY_CALLBACK
)
from .signals import (
cancelled,
card_changed,
subscription_made,
webhook_processing_error,
WEBHOOK_SIGNALS,
)
from .utils import convert_tstamp
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_version = getattr(settings, "STRIPE_API_VERSION", "2012-11-07")
| 37.654434 | 137 | 0.609491 |
6037477e26e980cdc81f047c4b3c12fc1cbcec38 | 2,321 | py | Python | mars/tensor/base/flip.py | tomzhang/mars-1 | 6f1d85e37eb1b383251314cb0ba13e06288af03d | [
"Apache-2.0"
] | 2 | 2019-03-29T04:11:10.000Z | 2020-07-08T10:19:54.000Z | mars/tensor/base/flip.py | JeffroMF/mars | 2805241ac55b50c4f6319baa41113fbf8c723832 | [
"Apache-2.0"
] | null | null | null | mars/tensor/base/flip.py | JeffroMF/mars | 2805241ac55b50c4f6319baa41113fbf8c723832 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..datasource import tensor as astensor
def flip(m, axis):
"""
Reverse the order of elements in a tensor along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input tensor.
axis : integer
Axis in tensor, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip a tensor vertically (axis=0).
fliplr : Flip a tensor horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> import mars.tensor as mt
>>> A = mt.arange(8).reshape((2,2,2))
>>> A.execute()
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> mt.flip(A, 0).execute()
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> mt.flip(A, 1).execute()
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = mt.random.randn(3,4,5)
>>> mt.all(mt.flip(A,2) == A[:,:,::-1,...]).execute()
True
"""
m = astensor(m)
sl = [slice(None)] * m.ndim
try:
sl[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input tensor"
% (axis, m.ndim))
return m[tuple(sl)]
| 25.228261 | 81 | 0.561827 |
6037a51c2f59285acb270192ab5e41f437b7c589 | 1,876 | py | Python | tests/test_ops/test_upfirdn2d.py | imabackstabber/mmcv | b272c09b463f00fd7fdd455f7bd4a055f9995521 | [
"Apache-2.0"
] | null | null | null | tests/test_ops/test_upfirdn2d.py | imabackstabber/mmcv | b272c09b463f00fd7fdd455f7bd4a055f9995521 | [
"Apache-2.0"
] | null | null | null | tests/test_ops/test_upfirdn2d.py | imabackstabber/mmcv | b272c09b463f00fd7fdd455f7bd4a055f9995521 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
_USING_PARROTS = True
try:
from parrots.autograd import gradcheck
except ImportError:
from torch.autograd import gradcheck, gradgradcheck
_USING_PARROTS = False
| 31.79661 | 78 | 0.55597 |
6038e029f5aa9016bb06dc0180b3e06aac57209e | 852 | py | Python | dataset_creation/description_task2.py | rmorain/kirby | ef115dbaed4acd1b23c3e10ca3b496f05b9a2382 | [
"Apache-2.0"
] | 1 | 2021-08-30T11:46:20.000Z | 2021-08-30T11:46:20.000Z | dataset_creation/description_task2.py | rmorain/kirby | ef115dbaed4acd1b23c3e10ca3b496f05b9a2382 | [
"Apache-2.0"
] | 36 | 2020-11-18T20:19:33.000Z | 2021-08-03T23:31:12.000Z | dataset_creation/description_task2.py | rmorain/kirby | ef115dbaed4acd1b23c3e10ca3b496f05b9a2382 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from tqdm import tqdm
data_list = []
debug = False
num_choices = 4
tqdm.pandas(desc="Progress")
df = pd.read_pickle("data/augmented_datasets/pickle/label_description.pkl")
if debug:
df = df.iloc[:10]
df = df.progress_apply(get_questions, axis=1)
new_df = pd.DataFrame(data_list)
if not debug:
new_df.to_pickle("data/augmented_datasets/pickle/description_qa_knowledge.pkl")
else:
__import__("pudb").set_trace()
| 24.342857 | 83 | 0.664319 |
603b2fa764ceaa795942b2f9977849ffd27b7101 | 2,776 | py | Python | scarab/commands/attach.py | gonzoua/scarab | b86474527b7b2ec30710ae79ea3f1cf5b7a93005 | [
"BSD-2-Clause"
] | 5 | 2018-09-01T01:42:43.000Z | 2019-01-04T21:32:55.000Z | scarab/commands/attach.py | gonzoua/scarab | b86474527b7b2ec30710ae79ea3f1cf5b7a93005 | [
"BSD-2-Clause"
] | 1 | 2019-09-18T17:06:11.000Z | 2019-11-29T18:35:08.000Z | scarab/commands/attach.py | gonzoua/scarab | b86474527b7b2ec30710ae79ea3f1cf5b7a93005 | [
"BSD-2-Clause"
] | null | null | null | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
'attach' command implementation'''
"""
from base64 import b64encode
import argparse
import magic
from ..bugzilla import BugzillaError
from ..context import bugzilla_instance
from .. import ui
from .base import Base
| 39.098592 | 98 | 0.617075 |
603b5710a40e621c6b937d72101edf1cadc2be7f | 5,089 | py | Python | test/test_airfoil.py | chabotsi/pygmsh | f2c26d9193c63efd9fa7676ea0860a18de7e8b52 | [
"MIT"
] | null | null | null | test/test_airfoil.py | chabotsi/pygmsh | f2c26d9193c63efd9fa7676ea0860a18de7e8b52 | [
"MIT"
] | null | null | null | test/test_airfoil.py | chabotsi/pygmsh | f2c26d9193c63efd9fa7676ea0860a18de7e8b52 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import numpy
import pygmsh
from helpers import compute_volume
if __name__ == '__main__':
import meshio
meshio.write('airfoil.vtu', *test())
| 31.608696 | 69 | 0.503046 |
603be24384736b5da4440432a56324e5b621091a | 260 | py | Python | examples/test_yield_8.py | MateuszG/django_auth | 4cda699c1b6516ffaa26329f545a674a7c849a16 | [
"MIT"
] | 2 | 2015-01-12T09:43:59.000Z | 2015-01-12T10:39:31.000Z | examples/test_yield_8.py | MateuszG/django_auth | 4cda699c1b6516ffaa26329f545a674a7c849a16 | [
"MIT"
] | null | null | null | examples/test_yield_8.py | MateuszG/django_auth | 4cda699c1b6516ffaa26329f545a674a7c849a16 | [
"MIT"
] | null | null | null | import pytest
| 18.571429 | 34 | 0.653846 |
603c4a28289b42faa48ea562130b7e8125179bd8 | 2,327 | py | Python | modules/google-earth-engine/docker/src/sepalinternal/gee.py | BuddyVolly/sepal | 6a2356a88940a36568b1d83ba3aeaae4283d5445 | [
"MIT"
] | 153 | 2015-10-23T09:00:08.000Z | 2022-03-19T03:24:04.000Z | modules/google-earth-engine/docker/src/sepalinternal/gee.py | BuddyVolly/sepal | 6a2356a88940a36568b1d83ba3aeaae4283d5445 | [
"MIT"
] | 165 | 2015-09-24T09:53:06.000Z | 2022-03-31T09:55:06.000Z | modules/google-earth-engine/docker/src/sepalinternal/gee.py | BuddyVolly/sepal | 6a2356a88940a36568b1d83ba3aeaae4283d5445 | [
"MIT"
] | 46 | 2016-07-10T10:40:09.000Z | 2021-11-14T01:07:33.000Z | import json
from threading import Semaphore
import ee
from flask import request
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
service_account_credentials = None
import logging
export_semaphore = Semaphore(5)
get_info_semaphore = Semaphore(2)
| 27.376471 | 74 | 0.685862 |
603d09d31004383c874fb82ce95f78dc229bb3dd | 481 | py | Python | micropython/007_boat_sink.py | mirontoli/tolle-rasp | 020638e86c167aedd7b556d8515a3adef70724af | [
"MIT"
] | 2 | 2021-06-29T17:18:09.000Z | 2022-01-25T08:29:59.000Z | micropython/007_boat_sink.py | mirontoli/tolle-rasp | 020638e86c167aedd7b556d8515a3adef70724af | [
"MIT"
] | null | null | null | micropython/007_boat_sink.py | mirontoli/tolle-rasp | 020638e86c167aedd7b556d8515a3adef70724af | [
"MIT"
] | null | null | null | #https://microbit-micropython.readthedocs.io/en/latest/tutorials/images.html#animation
from microbit import *
boat1 = Image("05050:05050:05050:99999:09990")
boat2 = Image("00000:05050:05050:05050:99999")
boat3 = Image("00000:00000:05050:05050:05050")
boat4 = Image("00000:00000:00000:05050:05050")
boat5 = Image("00000:00000:00000:00000:05050")
boat6 = Image("00000:00000:00000:00000:00000")
all_boats = [boat1, boat2, boat3, boat4, boat5, boat6]
display.show(all_boats, delay=200) | 48.1 | 86 | 0.765073 |
603d47f5b923ece6ffdc97d38998dad6e0f866c8 | 2,022 | py | Python | examples/api-samples/inc_samples/convert_callback.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/convert_callback.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | examples/api-samples/inc_samples/convert_callback.py | groupdocs-legacy-sdk/python | 80e5ef5a9a14ac4a7815c6cf933b5b2997381455 | [
"Apache-2.0"
] | null | null | null | import os
import json
import shutil
import time
from pyramid.renderers import render_to_response
from pyramid.response import Response
from groupdocs.ApiClient import ApiClient
from groupdocs.AsyncApi import AsyncApi
from groupdocs.StorageApi import StorageApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
| 34.271186 | 68 | 0.62908 |
603e1db8585ef18d062d93564593d2084f744fc9 | 14,585 | py | Python | PyIK/src/litearm.py | AliShug/EvoArm | a5dea204914ee1e25867e4412e88d245329316f2 | [
"CC-BY-3.0"
] | 110 | 2017-01-13T17:19:18.000Z | 2022-02-20T06:50:03.000Z | PyIK/src/litearm.py | igcxl/EvoArm | a5dea204914ee1e25867e4412e88d245329316f2 | [
"CC-BY-3.0"
] | 1 | 2018-08-30T07:27:56.000Z | 2018-08-30T07:27:56.000Z | PyIK/src/litearm.py | igcxl/EvoArm | a5dea204914ee1e25867e4412e88d245329316f2 | [
"CC-BY-3.0"
] | 47 | 2017-03-10T20:34:01.000Z | 2021-11-18T03:44:06.000Z | from __future__ import print_function
import numpy as np
import struct
import solvers
import pid
from util import *
MOTORSPEED = 0.9
MOTORMARGIN = 1
MOTORSLOPE = 30
ERRORLIM = 5.0
def getServoElevator(self):
return 178.21 - degrees(self.shoulder_angle)
def getServoActuator(self):
return degrees(self.actuator_angle) + 204.78
def getServoSwing(self):
return 150 - degrees(self.swing_angle)
def getServoWristX(self):
return 150 - degrees(self.wristXAngle)
def getServoWristY(self):
return 147 + degrees(self.wristYAngle)
def armDiffAngle(self):
return degrees(self.shoulder_angle - self.actuator_angle)
def checkActuator(self):
angle = self.getServoActuator()
return angle >= 95 and angle <= 250
def checkDiff(self):
angle = self.armDiffAngle()
return angle >= 44 and angle <= 175
def checkElevator(self):
angle = self.getServoElevator()
return angle >= 60 and angle <= 210
def checkForearm(self):
angle = degrees(self.elbow_angle + self.shoulder_angle)
return angle < 200 and angle > 80
def checkSwing(self):
angle = self.getServoSwing()
return angle >= 60 and angle <= 240
def checkWristX(self):
angle = self.getServoWristX()
return angle >= 60 and angle <= 240
def checkWristY(self):
angle = self.getServoWristY()
return angle >= 60 and angle <= 160
def checkPositioning(self):
# When Y>0 Forearm always faces outwards
if self.wrist2D[1] > 0 and self.wrist2D[0] < self.elbow2D[0]:
return False
# No valid positions X<=0
if self.wrist2D[0] <= 0:
return False
# Effector height range
if self.effector[1] > 180 or self.effector[1] < -200:
return False
return True
def checkClearance(self):
return (self.checkDiff() and self.checkActuator() and
self.checkElevator() and self.checkSwing() and
self.checkWristX() and self.checkWristY() and
self.checkPositioning() and self.checkForearm())
def serialize(self):
"""Returns a packed struct holding the pose information"""
return struct.pack(
ArmPose.structFormat,
self.swing_angle,
self.shoulder_angle,
self.elbow_angle,
self.wristXAngle,
self.wristYAngle
)
class ArmController:
def __init__(self,
servo_swing,
servo_shoulder,
servo_elbow,
servo_wrist_x,
servo_wrist_y,
arm_config,
motion_enable = False):
# Solvers are responsible for calculating the target servo positions to
# reach a given goal position
self.ik = solvers.IKSolver(
arm_config.main_length,
arm_config.forearm_length,
arm_config.wrist_length,
arm_config.shoulder_offset)
self.physsolver = solvers.PhysicalSolver(
arm_config.main_length,
arm_config.linkage_length,
arm_config.lower_actuator_length,
arm_config.upper_actuator_length)
# Servos
self.servos = {}
self.servos["swing"] = servo_swing
self.servos["shoulder"] = servo_shoulder
self.servos["elbow"] = servo_elbow
self.servos["wrist_x"] = servo_wrist_x
self.servos["wrist_y"] = servo_wrist_y
for key, servo in self.servos.iteritems():
if servo is None:
print ("Warning: {0} servo not connected".format(key))
else:
# Initialise a PID controller for the servo
if servo.protocol == 1:
servo.setGoalSpeed(-MOTORSPEED)
servo.data['pid'] = pid.PIDControl(2.4, 0, 0.4)
else:
servo.setGoalSpeed(0)
servo.data['error'] = 0.0
# Make sure the goal speed is set
servo.setTorqueEnable(1)
if servo.protocol == 1:
print("Setting slope")
servo.setCWMargin(MOTORMARGIN)
servo.setCCWMargin(MOTORMARGIN)
servo.setCWSlope(MOTORSLOPE)
servo.setCCWSlope(MOTORSLOPE)
# Store parameters
self.motion_enable = True
self.enableMovement(False)
self.cfg = arm_config
# Dirty flags for stored poses
self.ik_pose = None
self.ik_dirty = True
self.real_pose = None
self.real_dirty = True
# Current target pose
self.target_pose = None
def pollServos(self):
"""Poll the real-world servo positions"""
for servo in self.servos.itervalues():
if servo is not None:
newPos = servo.getPosition()
if type(newPos) is float:
servo.data['pos'] = newPos
def clearPositionError(self):
"""Clears the servo's position-error accumulators"""
for servo in self.servos.itervalues():
if servo is not None and servo.protocol == 1:
servo.data['error'] = 0.0
def getRealPose(self):
"""Retrieve the real-world arm pose, or None if not all servos are
connected.
"""
if any([servo is None for servo in self.servos.itervalues()]):
return None
# This whole function is essentially just FK based on the known servo
# angles
swing_servo = self.servos['swing'].data['pos']
elevator_servo = self.servos['shoulder'].data['pos']
actuator_servo = self.servos['elbow'].data['pos']
wrist_x_servo = self.servos['wrist_x'].data['pos']
wrist_y_servo = self.servos['wrist_y'].data['pos']
# Find the internal arm-pose angles for the given servo positions
swing_angle = ArmPose.calcSwingAngle(swing_servo)
elevator_angle = ArmPose.calcElevatorAngle(elevator_servo)
actuator_angle = ArmPose.calcActuatorAngle(actuator_servo)
wrist_x_angle = ArmPose.calcWristXAngle(wrist_x_servo)
wrist_y_angle = ArmPose.calcWristYAngle(wrist_y_servo)
# Solve elbow angle for given actuator and elevator angles
# (this is the angle from the elevator arm's direction to the forearm's)
elbow_angle = self.physsolver.solve_forearm(elevator_angle, actuator_angle)
# FK positions from config and angles
offset = self.cfg.shoulder_offset
shoulder2D = np.array([offset[1], 0])
elbow2D = shoulder2D + rotate(vertical, elevator_angle)*self.cfg.main_length
wrist2D = elbow2D + rotate(vertical, elevator_angle + elbow_angle)*self.cfg.forearm_length
effector2D = wrist2D + [self.cfg.wrist_length, 0]
# 3D Effector calculation is a little more involved
td = rotate([offset[0], effector2D[0]], swing_angle)
effector = np.array([td[0], effector2D[1], td[1]])
pose = ArmPose(
self.cfg,
swing_angle, elevator_angle, actuator_angle,
elbow_angle, elbow2D, wrist2D, effector2D,
effector, wrist_x_angle, wrist_y_angle)
return pose
| 37.397436 | 98 | 0.58471 |
60416481c613049aa881c1d91f118e1ecab9fdbf | 1,194 | py | Python | create_augmented_versions.py | jakobabesser/piano_aug | 37f78c77465749c80d7aa91d9e804b89024eb278 | [
"MIT"
] | null | null | null | create_augmented_versions.py | jakobabesser/piano_aug | 37f78c77465749c80d7aa91d9e804b89024eb278 | [
"MIT"
] | null | null | null | create_augmented_versions.py | jakobabesser/piano_aug | 37f78c77465749c80d7aa91d9e804b89024eb278 | [
"MIT"
] | null | null | null | from pedalboard import Reverb, Compressor, Gain, LowpassFilter, Pedalboard
import soundfile as sf
if __name__ == '__main__':
# replace by path of unprocessed piano file if necessar
fn_wav_source = 'live_grand_piano.wav'
# augmentation settings using Pedalboard library
settings = {'rev-': [Reverb(room_size=.4)],
'rev+': [Reverb(room_size=.8)],
'comp+': [Compressor(threshold_db=-15, ratio=20)],
'comp-': [Compressor(threshold_db=-10, ratio=10)],
'gain+': [Gain(gain_db=15)], # clipping
'gain-': [Gain(gain_db=5)],
'lpf-': [LowpassFilter(cutoff_frequency_hz=50)],
'lpf+': [LowpassFilter(cutoff_frequency_hz=250)]}
# create augmented versions
for s in settings.keys():
# load unprocessed piano recording
audio, sample_rate = sf.read(fn_wav_source)
# create Pedalboard object
board = Pedalboard(settings[s])
# create augmented audio
effected = board(audio, sample_rate)
# save it
fn_target = fn_wav_source.replace('.wav', f'_{s}.wav')
sf.write(fn_target, effected, sample_rate)
| 34.114286 | 74 | 0.61139 |
60422bea81360e85bf0b5cf68c083ffc23ea9d15 | 2,867 | py | Python | flux/migrations/versions/9ba67b798fa_add_request_system.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | flux/migrations/versions/9ba67b798fa_add_request_system.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | flux/migrations/versions/9ba67b798fa_add_request_system.py | siq/flux | ca7563deb9ebef14840bbf0cb7bab4d9478b2470 | [
"Linux-OpenIB"
] | null | null | null | """add_request_system
Revision: 9ba67b798fa
Revises: 31b92bf6506d
Created: 2013-07-23 02:49:09.342814
"""
revision = '9ba67b798fa'
down_revision = '31b92bf6506d'
from alembic import op
from spire.schema.fields import *
from spire.mesh import SurrogateType
from sqlalchemy import (Column, ForeignKey, ForeignKeyConstraint, PrimaryKeyConstraint,
CheckConstraint, UniqueConstraint)
from sqlalchemy.dialects import postgresql
| 39.273973 | 87 | 0.659226 |
6043f0f0c5013421d3026505d50e50aa5fb67097 | 9,333 | py | Python | src/python/Vector2_TEST.py | clalancette/ign-math | 84eb1bfe470d00d335c048f102b56c49a15b56be | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2019-08-21T20:50:05.000Z | 2022-03-27T11:48:25.000Z | src/python/Vector2_TEST.py | clalancette/ign-math | 84eb1bfe470d00d335c048f102b56c49a15b56be | [
"ECL-2.0",
"Apache-2.0"
] | 277 | 2020-04-16T23:38:50.000Z | 2022-03-31T11:11:58.000Z | src/python/Vector2_TEST.py | clalancette/ign-math | 84eb1bfe470d00d335c048f102b56c49a15b56be | [
"ECL-2.0",
"Apache-2.0"
] | 48 | 2020-04-15T21:15:43.000Z | 2022-03-14T19:29:04.000Z | # Copyright (C) 2021 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import math
from ignition.math import Vector2d
from ignition.math import Vector2f
if __name__ == '__main__':
unittest.main()
| 29.165625 | 74 | 0.585985 |
604745505e3f84cc6af47e088784a1a28b715d2a | 1,418 | py | Python | fsspec/tests/test_mapping.py | sodre/filesystem_spec | 5fe51c5e85366b57a11ed66637a940970372ea4b | [
"BSD-3-Clause"
] | null | null | null | fsspec/tests/test_mapping.py | sodre/filesystem_spec | 5fe51c5e85366b57a11ed66637a940970372ea4b | [
"BSD-3-Clause"
] | null | null | null | fsspec/tests/test_mapping.py | sodre/filesystem_spec | 5fe51c5e85366b57a11ed66637a940970372ea4b | [
"BSD-3-Clause"
] | null | null | null | import os
import fsspec
from fsspec.implementations.memory import MemoryFileSystem
import pickle
import pytest
| 22.870968 | 76 | 0.612835 |
6047d157ca53f47cf0fb3523f60398cfb109d425 | 990 | py | Python | testedome/questions/quest_5.py | EderReisS/pythonChallenges | a880358c2cb4de0863f4b4cada36b3d439a8a018 | [
"MIT"
] | null | null | null | testedome/questions/quest_5.py | EderReisS/pythonChallenges | a880358c2cb4de0863f4b4cada36b3d439a8a018 | [
"MIT"
] | null | null | null | testedome/questions/quest_5.py | EderReisS/pythonChallenges | a880358c2cb4de0863f4b4cada36b3d439a8a018 | [
"MIT"
] | 1 | 2021-07-29T23:20:17.000Z | 2021-07-29T23:20:17.000Z | """
A
/ |
B C
'B, C'
"""
if __name__ == "__main__":
c = CategoryTree()
c.add_category('A', None)
c.add_category('B', 'A')
c.add_category('C', 'A')
print(','.join(c.get_children('A') or []))
print(','.join(c.get_children('E') or []))
| 22 | 51 | 0.559596 |
60484feb7046b3c272c1b83d25957af04879dd6e | 4,681 | py | Python | sppas/sppas/src/anndata/aio/__init__.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/anndata/aio/__init__.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | sppas/sppas/src/anndata/aio/__init__.py | mirfan899/MTTS | 3167b65f576abcc27a8767d24c274a04712bd948 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
anndata.aio
~~~~~~~~~~~
Readers and writers of annotated data.
:author: Brigitte Bigi
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: develop@sppas.org
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 Brigitte Bigi
"""
from .annotationpro import sppasANT
from .annotationpro import sppasANTX
from .anvil import sppasAnvil
from .audacity import sppasAudacity
from .elan import sppasEAF
from .htk import sppasLab
from .phonedit import sppasMRK
from .phonedit import sppasSignaix
from .praat import sppasTextGrid
from .praat import sppasIntensityTier
from .praat import sppasPitchTier
from .sclite import sppasCTM
from .sclite import sppasSTM
from .subtitle import sppasSubRip
from .subtitle import sppasSubViewer
from .text import sppasRawText
from .text import sppasCSV
from .weka import sppasARFF
from .weka import sppasXRFF
from .xtrans import sppasTDF
from .xra import sppasXRA
# ----------------------------------------------------------------------------
# Variables
# ----------------------------------------------------------------------------
# TODO: get extension from the "default_extension" member of each class
ext_sppas = ['.xra', '.[Xx][Rr][Aa]']
ext_praat = ['.TextGrid', '.PitchTier', '.[Tt][eE][xX][tT][Gg][Rr][Ii][dD]','.[Pp][Ii][tT][cC][hH][Tt][Ii][Ee][rR]']
ext_transcriber = ['.trs','.[tT][rR][sS]']
ext_elan = ['.eaf', '[eE][aA][fF]']
ext_ascii = ['.txt', '.csv', '.[cC][sS][vV]', '.[tT][xX][Tt]', '.info']
ext_phonedit = ['.mrk', '.[mM][rR][kK]']
ext_signaix = ['.hz', '.[Hh][zZ]']
ext_sclite = ['.stm', '.ctm', '.[sScC][tT][mM]']
ext_htk = ['.lab', '.mlf']
ext_subtitles = ['.sub', '.srt', '.[sS][uU][bB]', '.[sS][rR][tT]']
ext_anvil = ['.anvil', '.[aA][aN][vV][iI][lL]']
ext_annotationpro = ['.antx', '.[aA][aN][tT][xX]']
ext_xtrans = ['.tdf', '.[tT][dD][fF]']
ext_audacity = ['.aup']
ext_weka = ['.arff', '.xrff']
primary_in = ['.hz', '.PitchTier']
annotations_in = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.anvil', '.aup', '.trs', '.tdf']
extensions = ['.xra', '.textgrid', '.pitchtier', '.hz', '.eaf', '.trs', '.csv', '.mrk', '.txt', '.mrk', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', 'anvil', '.antx', '.tdf', '.arff', '.xrff']
extensionsul = ext_sppas + ext_praat + ext_transcriber + ext_elan + ext_ascii + ext_phonedit + ext_signaix + ext_sclite + ext_htk + ext_subtitles + ext_anvil + ext_annotationpro + ext_xtrans + ext_audacity + ext_weka
extensions_in = primary_in + annotations_in
extensions_out = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.txt', '.stm', '.ctm', '.lab', '.mlf', '.sub', '.srt', '.antx', '.arff', '.xrff']
extensions_out_multitiers = ['.xra', '.TextGrid', '.eaf', '.csv', '.mrk', '.antx', '.arff', '.xrff']
# ----------------------------------------------------------------------------
__all__ = (
"sppasANT",
"sppasANTX",
"sppasAnvil",
"sppasAudacity",
"sppasEAF",
"sppasLab",
"sppasMRK",
"sppasSignaix",
"sppasTextGrid",
"sppasIntensityTier",
"sppasPitchTier",
"sppasCTM",
"sppasSTM",
"sppasSubRip",
"sppasSubViewer",
"sppasRawText",
"sppasCSV",
"sppasARFF",
"sppasXRFF",
"sppasTDF",
"sppasXRA",
"extensions",
"extensions_in",
"extensions_out"
)
| 36.858268 | 216 | 0.554582 |
6049a1eccd8b14db6687d766205e1b913a98cd6d | 226 | py | Python | models/__init__.py | dapengchen123/hfsoftmax | 467bd90814abdf3e5ad8384e6e05749172b68ae6 | [
"MIT"
] | 1 | 2018-10-11T09:27:53.000Z | 2018-10-11T09:27:53.000Z | models/__init__.py | dapengchen123/hfsoftmax | 467bd90814abdf3e5ad8384e6e05749172b68ae6 | [
"MIT"
] | null | null | null | models/__init__.py | dapengchen123/hfsoftmax | 467bd90814abdf3e5ad8384e6e05749172b68ae6 | [
"MIT"
] | null | null | null | from .resnet import *
from .hynet import *
from .classifier import Classifier, HFClassifier, HNSWClassifier
from .ext_layers import ParameterClient
samplerClassifier = {
'hf': HFClassifier,
'hnsw': HNSWClassifier,
}
| 20.545455 | 64 | 0.756637 |
604a3acc24feaf58c41a047512c8f6cf4cc0bdd1 | 1,397 | py | Python | scripts/multiplayer/server.py | AgnirudraSil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | 3 | 2022-01-11T06:11:08.000Z | 2022-03-10T09:34:42.000Z | scripts/multiplayer/server.py | agnirudrasil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | null | null | null | scripts/multiplayer/server.py | agnirudrasil/tetris | 2a4f4c26190fc8b669f98c116af343f7f1ac51bf | [
"MIT"
] | null | null | null | import pickle
import socket
import _thread
from scripts.multiplayer import game, board, tetriminos
server = "192.168.29.144"
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, port))
except socket.error as e:
print(e)
s.listen()
print("Waiting for connection")
connected = set()
games = {}
idCount = 0
while True:
conn, addr = s.accept()
print("Connected to: ", addr)
idCount += 1
p = 0
game_id = (idCount - 1) // 2
if idCount % 2 == 1:
games[game_id] = game.Game((0, 0, 0), None, board)
else:
games[game_id].ready = True
p = 1
_thread.start_new_thread(threaded_client, (conn, p, game_id))
| 18.878378 | 65 | 0.536149 |
604b01d7a386918b107512b8c4b02b4727b0197f | 2,311 | py | Python | AdventOfCode/2018/src/day-03/app.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 1 | 2020-07-28T17:07:35.000Z | 2020-07-28T17:07:35.000Z | AdventOfCode/2018/src/day-03/app.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 5 | 2021-04-06T18:25:29.000Z | 2021-04-10T15:13:28.000Z | AdventOfCode/2018/src/day-03/app.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | null | null | null | import os
import re
from collections import defaultdict
CURRENT_DIR, _ = os.path.split(__file__)
DATA_FLIE = os.path.join(CURRENT_DIR, 'data.txt')
def part1(claims):
"""
This is basically a single-threaded collision detection method,
implemented in pure python. Computation complexity is obviously
not a consideration.
"""
# Determines how many times each locations was claimed
claimed_space_registry = defaultdict(int)
for claim in claims:
for location in claim.all_locations():
claimed_space_registry[location] += 1
# Generates the set of all locations that were claimed more than once
multi_claimed_spaces = {
location
for location,count in claimed_space_registry.items()
if count > 1
}
# Prints the number of locations that are claimed more than once
# and returns the set of locations that were claimed more than once
print('Multi-Claimed Spaces:', len(multi_claimed_spaces))
return multi_claimed_spaces
def part2(claims, multi_claimed_spaces):
"""
Might not be the optimal solution, but it runs fast enough, and uses
components that were already calculated in part 1.
"""
for claim in claims:
all_locations_are_non_overlapping = all(map(
lambda loc: loc not in multi_claimed_spaces,
claim.all_locations()
))
if all_locations_are_non_overlapping:
print('Non-overlapping claim:', claim.id)
return claim
if __name__ == '__main__':
claims = list(data_file_iter(DATA_FLIE))
mcs = part1(claims)
santas_suit_material = part2(claims, mcs)
| 32.097222 | 73 | 0.638685 |
604b36210d2f64d1a79dd2e280534e5bf39ec7cb | 4,737 | py | Python | facerec-master/py/facerec/distance.py | ArianeFire/HaniCam | 8a940486a613d680a0b556209a596cdf3eb71f53 | [
"MIT"
] | 776 | 2015-01-01T11:34:42.000Z | 2022-02-26T10:25:51.000Z | facerec-master/py/facerec/distance.py | ArianeFire/HaniCam | 8a940486a613d680a0b556209a596cdf3eb71f53 | [
"MIT"
] | 43 | 2015-03-17T07:48:38.000Z | 2019-08-21T05:16:36.000Z | facerec-master/py/facerec/distance.py | ArianeFire/HaniCam | 8a940486a613d680a0b556209a596cdf3eb71f53 | [
"MIT"
] | 479 | 2015-01-01T12:34:38.000Z | 2022-02-28T23:57:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
| 33.595745 | 125 | 0.617479 |
604b9cab87abdc5ce52f2c470f0e9885781ed2dd | 7,162 | py | Python | pgyer_uploader.py | elina8013/android_demo | d8cef19d06a4f21f7cf2c277bbabba8cf10a8608 | [
"Apache-2.0"
] | 666 | 2015-03-18T02:09:34.000Z | 2021-08-25T06:24:27.000Z | pgyer_uploader.py | shanjiaxiang/android_demo | d1afa66c30ae5b3c09a39f4c36c61640615177bb | [
"Apache-2.0"
] | 7 | 2017-04-26T07:06:49.000Z | 2019-07-08T08:05:13.000Z | pgyer_uploader.py | shanjiaxiang/android_demo | d1afa66c30ae5b3c09a39f4c36c61640615177bb | [
"Apache-2.0"
] | 371 | 2015-03-18T02:09:33.000Z | 2021-09-10T02:41:05.000Z | #!/usr/bin/python
#coding=utf-8
import os
import requests
import time
import re
from datetime import datetime
import urllib2
import json
import mimetypes
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# configuration for pgyer
USER_KEY = "f605b7c7826690f796078e3dd23a60d5"
API_KEY = "8bdd05df986d598f01456914e51fc889"
PGYER_UPLOAD_URL = "https://www.pgyer.com/apiv1/app/upload"
repo_path = 'C:/Users/Administrator/.jenkins/workspace/Demo/app'
repo_url = 'https://github.com/r17171709/iite_test'
ipa_path = "C:/Users/Administrator/.jenkins/workspace/Demo/app/build/outputs/apk/app-release.apk"
update_description = ""
# git
#
if __name__ == '__main__':
main()
| 34.76699 | 154 | 0.604021 |
604c11d1662643b5e9e977b3126e196c0ca94747 | 1,944 | py | Python | edit/editpublisher.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | edit/editpublisher.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | edit/editpublisher.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | #!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 Al von Ruff and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdblib import *
from isfdblib_help import *
from isfdblib_print import *
from isfdb import *
from SQLparsing import *
from login import User
if __name__ == '__main__':
publisherID = SESSION.Parameter(0, 'int')
record = SQLGetPublisher(publisherID)
if not record:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Publisher Editor')
PrintNavBar('edit/editpublisher.cgi', publisherID)
help = HelpPublisher()
printHelpBox('publisher', 'EditPublisher')
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/edit/submitpublisher.cgi">'
print '<table border="0">'
print '<tbody id="tagBody">'
# Limit the ability to edit publisher names to moderators
user = User()
user.load()
display_only = 1
if SQLisUserModerator(user.id):
display_only = 0
printfield("Publisher Name", "publisher_name", help, record[PUBLISHER_NAME], display_only)
trans_publisher_names = SQLloadTransPublisherNames(record[PUBLISHER_ID])
printmultiple(trans_publisher_names, "Transliterated Name", "trans_publisher_names", help)
webpages = SQLloadPublisherWebpages(record[PUBLISHER_ID])
printWebPages(webpages, 'publisher', help)
printtextarea('Note', 'publisher_note', help, SQLgetNotes(record[PUBLISHER_NOTE]))
printtextarea('Note to Moderator', 'mod_note', help, '')
print '</tbody>'
print '</table>'
print '<p>'
print '<input NAME="publisher_id" VALUE="%d" TYPE="HIDDEN">' % publisherID
print '<input TYPE="SUBMIT" VALUE="Submit Data" tabindex="1">'
print '</form>'
print '<p>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| 28.173913 | 98 | 0.677469 |
604ecb6f7cdc9275682b21b948b61c6eab42174d | 2,988 | py | Python | src/dispatch/incident_cost/views.py | vj-codes/dispatch | f9354781956380cac290be02fb987eb50ddc1a5d | [
"Apache-2.0"
] | 1 | 2021-06-16T17:02:35.000Z | 2021-06-16T17:02:35.000Z | src/dispatch/incident_cost/views.py | dilbwagsingh/dispatch | ca7c9730dea64e196c6653321552d570dfdad069 | [
"Apache-2.0"
] | 10 | 2021-07-17T04:28:07.000Z | 2022-02-05T00:40:59.000Z | src/dispatch/incident_cost/views.py | dilbwagsingh/dispatch | ca7c9730dea64e196c6653321552d570dfdad069 | [
"Apache-2.0"
] | null | null | null | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from dispatch.database.core import get_db
from dispatch.database.service import common_parameters, search_filter_sort_paginate
from dispatch.auth.permissions import SensitiveProjectActionPermission, PermissionsDependency
from .models import (
IncidentCostCreate,
IncidentCostPagination,
IncidentCostRead,
IncidentCostUpdate,
)
from .service import create, delete, get, update
router = APIRouter()
| 32.835165 | 100 | 0.749665 |
604ecfc2153a2b8f83182b3e8a28bd46fb2056eb | 8,479 | py | Python | tests/views/test_admin_committee_questions.py | Lunga001/pmg-cms-2 | 10cea3979711716817b0ba2a41987df73f2c7642 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | tests/views/test_admin_committee_questions.py | Lunga001/pmg-cms-2 | 10cea3979711716817b0ba2a41987df73f2c7642 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | tests/views/test_admin_committee_questions.py | OpenUpSA/pmg-cms-2 | ec5f259dae81674ac7a8cdb80f124a8b0f167780 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | import os
from urllib.parse import urlparse, parse_qs
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee, CommitteeQuestion
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from flask import escape
from io import BytesIO
| 45.342246 | 927 | 0.644416 |
604f0eeff04eca0db1f9e0f762b1e72dacff74c1 | 2,907 | py | Python | audioanalysis_demo/test_audio_analysis.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null | audioanalysis_demo/test_audio_analysis.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null | audioanalysis_demo/test_audio_analysis.py | tiaotiao/applets | c583a4405ed18c7d74bfba49884525c43d114398 | [
"MIT"
] | null | null | null |
import sys, wave
import AudioAnalysis
FILE_NAME = "snippet.wav"
if __name__ == "__main__":
main()
#testAudioAnalysis()
#testWavWrite()
| 23.827869 | 88 | 0.579635 |
60518bb19a47173a8268f88acf5e74e628053642 | 4,866 | py | Python | syloga/transform/evaluation.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null | syloga/transform/evaluation.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null | syloga/transform/evaluation.py | xaedes/python-symbolic-logic-to-gate | a0dc9be9e04290008cf709fac789d224ab8c14b0 | [
"MIT"
] | null | null | null |
from syloga.core.map_expression_args import map_expression_args
from syloga.utils.identity import identity
from syloga.ast.BooleanNot import BooleanNot
from syloga.ast.BooleanValue import BooleanValue
from syloga.ast.BooleanOr import BooleanOr
from syloga.ast.BooleanAnd import BooleanAnd
from syloga.ast.BooleanNand import BooleanNand
from syloga.ast.BooleanNor import BooleanNor
from syloga.ast.BooleanXor import BooleanXor
from syloga.ast.BreakOut import BreakOut
# from syloga.core.assert_equality_by_table import assert_equality_by_table
| 36.313433 | 102 | 0.621661 |
605202551fbb724a7df19cd7d70079bcc8b5e6d2 | 2,753 | py | Python | oscar/apps/customer/mixins.py | Idematica/django-oscar | 242a0654210d63ba75f798788916c8b2f7abb7fb | [
"BSD-3-Clause"
] | 1 | 2015-08-02T05:36:11.000Z | 2015-08-02T05:36:11.000Z | oscar/apps/customer/mixins.py | elliotthill/django-oscar | 5a71a1f896f2c14f8ed3e68535a36b26118a65c5 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/customer/mixins.py | elliotthill/django-oscar | 5a71a1f896f2c14f8ed3e68535a36b26118a65c5 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.sites.models import get_current_site
from django.db.models import get_model
from oscar.apps.customer.signals import user_registered
from oscar.core.loading import get_class
from oscar.core.compat import get_user_model
User = get_user_model()
CommunicationEventType = get_model('customer', 'CommunicationEventType')
Dispatcher = get_class('customer.utils', 'Dispatcher')
| 34.848101 | 79 | 0.670904 |
60522d3489fa0c5b3c558dbb7d715900c3bb9392 | 2,421 | py | Python | plot_integral.py | vfloeser/TumorDelivery | a48252c17b50397b1f51be21c0cf65ade87e9000 | [
"Apache-2.0"
] | null | null | null | plot_integral.py | vfloeser/TumorDelivery | a48252c17b50397b1f51be21c0cf65ade87e9000 | [
"Apache-2.0"
] | null | null | null | plot_integral.py | vfloeser/TumorDelivery | a48252c17b50397b1f51be21c0cf65ade87e9000 | [
"Apache-2.0"
] | null | null | null | from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
mplt.rc('text', usetex=True)
mplt.rcParams.update({'font.size': 16})
import logging, getopt, sys
import time
import os
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
# activate ylim for w
var1 = w1
var3 = w3
var5 = w5
var10 = w10
var25 = w25
mode = "w" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == "__main__":
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/integral'):
os.makedirs('plots/integral')
print('Created folder plots/integral!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
Ivar1 = np.zeros(Nt)
Ivar3 = np.zeros(Nt)
Ivar5 = np.zeros(Nt)
Ivar10 = np.zeros(Nt)
Ivar25 = np.zeros(Nt)
for i in range(Nt):
# /1000000 because of units
Ivar1[i] = integrate(var1, i,r, Nt)/1000000
Ivar3[i] = integrate(var3, i,r, Nt)/1000000
Ivar5[i] = integrate(var5, i,r, Nt)/1000000
Ivar10[i] = integrate(var10, i,r, Nt)/1000000
Ivar25[i] = integrate(var25, i,r, Nt)/1000000
mplt.plot(t, Ivar1, label=r'$\alpha = 1$')
mplt.plot(t, Ivar3, label=r'$\alpha = 3$')
mplt.plot(t, Ivar5, label=r'$\alpha = 5$')
mplt.plot(t, Ivar10, label=r'$\alpha = 10$')
mplt.plot(t, Ivar25, label=r'$\alpha = 25$')
mplt.xlim(tmin, tmax)
mplt.yscale('log')
mplt.xlabel(r'$t\quad [h]$')
mplt.ylabel(r'$\bar{'+mode+'}\quad [\mu mol]$')
##########################################################################################
# lim for w, because some values dont make sense
mplt.ylim(1e-11, 3e2)
# lim for w, because some values dont make sense
##########################################################################################
mplt.legend(loc=1, bbox_to_anchor=(1, 0.9))
mplt.tight_layout()
mplt.savefig('plots/integral/int'+mode+'.pdf', format='pdf')
mplt.show() | 33.164384 | 90 | 0.467575 |
605343dd026fb3e41372878d610c32ec85aeb812 | 1,196 | py | Python | tests/unit/combiner/Try.py | wangjeaf/CSSCheckStyle | d1b1ed89c61ca80d65f398ec4a07d73789197b04 | [
"BSD-3-Clause"
] | 21 | 2015-04-27T14:54:45.000Z | 2021-11-08T09:12:08.000Z | tests/unit/combiner/Try.py | wangjeaf/CSSCheckStyle | d1b1ed89c61ca80d65f398ec4a07d73789197b04 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/combiner/Try.py | wangjeaf/CSSCheckStyle | d1b1ed89c61ca80d65f398ec4a07d73789197b04 | [
"BSD-3-Clause"
] | 6 | 2015-03-02T08:08:59.000Z | 2016-03-16T14:52:38.000Z | from helper import *
| 56.952381 | 172 | 0.723244 |
60535516e66bf2f9d907ac1cbd0eeb26881ca2c7 | 2,728 | py | Python | tests/tests.py | desdelgado/rheology-data-toolkit | 054b1659c914b8eed86239d27a746e26404395ec | [
"MIT"
] | null | null | null | tests/tests.py | desdelgado/rheology-data-toolkit | 054b1659c914b8eed86239d27a746e26404395ec | [
"MIT"
] | 18 | 2020-04-10T15:06:50.000Z | 2020-06-23T20:57:49.000Z | tests/tests.py | desdelgado/rheology-data-toolkit | 054b1659c914b8eed86239d27a746e26404395ec | [
"MIT"
] | null | null | null | import sys, os
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata/extractors")
import h5py
import pandas as pd
from antonpaar import AntonPaarExtractor as APE
from ARES_G2 import ARES_G2Extractor
# %%
sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata")
from data_converter import rheo_data_transformer
import unittest
extractor = APE()
#converter = data_converter()
if __name__ == '__main__':
unittest.main()
| 36.373333 | 189 | 0.712243 |
6053b76dec55ceda546ea38cd4b295199bfedd36 | 382 | py | Python | openslides_backend/action/topic/delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | openslides_backend/action/topic/delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | openslides_backend/action/topic/delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | from ...models.models import Topic
from ..default_schema import DefaultSchema
from ..generics import DeleteAction
from ..register import register_action
| 25.466667 | 67 | 0.740838 |
605585efa2db2b321777e037a609b7a6f87c04a9 | 686 | py | Python | main.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | null | null | null | main.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | 3 | 2021-04-29T22:57:09.000Z | 2021-05-03T15:32:39.000Z | main.py | Dr3xler/CookieConsentChecker | 816cdfb9d9dc741c57dbcd5e9c9ef59837196631 | [
"MIT"
] | 1 | 2021-08-29T09:53:09.000Z | 2021-08-29T09:53:09.000Z | from core import file_handling as file_h, driver_handling as driver_h
from website_handling import website_check as wc
from cookie_handling import cookie_compare
websites = file_h.website_reader()
driver = driver_h.webdriver_setup()
try:
wc.load_with_addon(driver, websites)
except:
print('ERROR: IN FIREFOX USAGE WITH ADDONS')
finally:
wc.close_driver_session(driver)
# driver need to be reloaded because we need a new session without addons
driver = driver_h.webdriver_setup()
try:
wc.load_without_addon(driver, websites)
except:
print('ERROR: IN VANILLA FIREFOX VERSION')
finally:
wc.close_driver_session(driver)
cookie_compare.compare(websites)
| 20.176471 | 73 | 0.781341 |
60558cb725da5275f2069f7bb3c1bb96b154754f | 4,788 | py | Python | PyPBEC/OpticalMedium.py | photonbec/PyPBEC | fd68fa3e6206671e731bc0c2973af1f67d704f05 | [
"MIT"
] | 1 | 2020-09-07T10:21:52.000Z | 2020-09-07T10:21:52.000Z | PyPBEC/OpticalMedium.py | photonbec/PyPBEC | fd68fa3e6206671e731bc0c2973af1f67d704f05 | [
"MIT"
] | null | null | null | PyPBEC/OpticalMedium.py | photonbec/PyPBEC | fd68fa3e6206671e731bc0c2973af1f67d704f05 | [
"MIT"
] | 1 | 2022-02-04T00:00:59.000Z | 2022-02-04T00:00:59.000Z | import numpy as np
from scipy import constants as sc
from scipy.interpolate import interp1d
from pathlib import Path
from scipy.special import erf as Erf
import pandas as pd
import sys
import os
import csv
| 38.304 | 154 | 0.746658 |
60563aa2ef81de63dbaea0f3ad170ec8ec84759d | 1,251 | py | Python | corehq/apps/appstore/urls.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/apps/appstore/urls.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/appstore/urls.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls.defaults import url, include, patterns
from corehq.apps.appstore.dispatcher import AppstoreDispatcher
store_urls = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore_default', name="appstore_interfaces_default"),
AppstoreDispatcher.url_pattern(),
)
urlpatterns = patterns('corehq.apps.appstore.views',
url(r'^$', 'appstore', name='appstore'),
url(r'^api/', 'appstore_api', name='appstore_api'),
url(r'^store/', include(store_urls)),
url(r'^(?P<domain>[\w\.-]+)/info/$', 'project_info', name='project_info'),
url(r'^deployments/$', 'deployments', name='deployments'),
url(r'^deployments/api/$', 'deployments_api', name='deployments_api'),
url(r'^deployments/(?P<domain>[\w\.-]+)/info/$', 'deployment_info', name='deployment_info'),
url(r'^(?P<domain>[\w\.-]+)/approve/$', 'approve_app', name='approve_appstore_app'),
url(r'^(?P<domain>[\w\.-]+)/copy/$', 'copy_snapshot', name='domain_copy_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/importapp/$', 'import_app', name='import_app_from_snapshot'),
url(r'^(?P<domain>[\w\.-]+)/image/$', 'project_image', name='appstore_project_image'),
url(r'^(?P<domain>[\w\.-]+)/multimedia/$', 'media_files', name='media_files'),
)
| 46.333333 | 96 | 0.657074 |
6057750dc6cf45d0cc166a95aaf751e85207651a | 2,667 | py | Python | faster-rcnn-vgg16-fpn/model/fpn.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 13 | 2019-05-21T13:19:56.000Z | 2022-02-27T14:36:43.000Z | faster-rcnn-vgg16-fpn/model/fpn.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 2 | 2019-06-27T07:02:33.000Z | 2021-06-30T15:51:12.000Z | faster-rcnn-vgg16-fpn/model/fpn.py | fengkaibit/faster-rcnn_vgg16_fpn | 354efd4b5f4d4a42e9c92f48501e02cd7f0c0cdb | [
"MIT"
] | 4 | 2019-05-21T13:19:56.000Z | 2021-06-29T01:10:31.000Z | from __future__ import absolute_import
import torch
from torch.nn import functional
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_() | 37.041667 | 99 | 0.640045 |
6057d15e673e5e8174ccbf2844dfdc2c7b7a4b7d | 2,314 | py | Python | test/setups/finders/finders_test.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 40 | 2015-04-15T09:40:23.000Z | 2022-02-11T11:07:24.000Z | test/setups/finders/finders_test.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 19 | 2015-04-15T18:34:53.000Z | 2018-11-17T00:11:05.000Z | test/setups/finders/finders_test.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 22 | 2015-04-15T09:45:46.000Z | 2020-09-29T17:04:19.000Z | import unittest
from biicode.common.settings.version import Version
from mock import patch
from biicode.client.setups.finders.finders import gnu_version
from biicode.client.setups.rpi_cross_compiler import find_gnu_arm
from biicode.client.workspace.bii_paths import get_biicode_env_folder_path
GCC_VERSION_MAC = '''Configured with: --prefix=/Applications/Xcode.app/Contents/Developer/usr --with-gxx-include-dir=/usr/include/c++/4.2.1
Apple LLVM version 5.1 (clang-503.0.38) (based on LLVM 3.4svn)
Target: x86_64-apple-darwin13.1.0
Thread model: posix'''
GCC_VERSION_UBUNTU = '''gcc (Ubuntu/Linaro 4.8.1-10ubuntu9) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
GCC_VERSION_WIN = '''gcc (GCC) 4.8.1
Copyright (C) 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.'''
| 44.5 | 139 | 0.709594 |
60582c7b916077e6db28ad364408137dc3ff3825 | 784 | py | Python | setup.py | mintmachine/arweave-python-client | 69e8e2d32090de5fd276efdb9b9103d91b4182f6 | [
"MIT"
] | 63 | 2020-01-22T23:43:53.000Z | 2022-03-24T23:18:13.000Z | setup.py | mintmachine/arweave-python-client | 69e8e2d32090de5fd276efdb9b9103d91b4182f6 | [
"MIT"
] | 17 | 2020-01-22T23:41:07.000Z | 2022-01-04T11:43:30.000Z | setup.py | mintmachine/arweave-python-client | 69e8e2d32090de5fd276efdb9b9103d91b4182f6 | [
"MIT"
] | 25 | 2020-08-12T05:00:25.000Z | 2022-03-31T01:43:25.000Z | from distutils.core import setup
setup(
name="arweave-python-client",
packages = ['arweave'], # this must be the same as the name above
version="1.0.15.dev0",
description="Client interface for sending transactions on the Arweave permaweb",
author="Mike Hibbert",
author_email="mike@hibbertitsolutions.co.uk",
url="https://github.com/MikeHibbert/arweave-python-client",
download_url="https://github.com/MikeHibbert/arweave-python-client",
keywords=['arweave', 'crypto'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'arrow',
'python-jose',
'pynacl',
'pycryptodome',
'cryptography',
'requests',
'psutil'
],
)
| 28 | 82 | 0.678571 |
605951901688fbda8e99d2e5f2796e9b32eff1fe | 18,195 | py | Python | exchange_calendars/extensions/exchange_calendar_krx.py | syonoki/exchange_calendars | 639ab0f88a874af99bb601824a8ffef2572820d4 | [
"Apache-2.0"
] | null | null | null | exchange_calendars/extensions/exchange_calendar_krx.py | syonoki/exchange_calendars | 639ab0f88a874af99bb601824a8ffef2572820d4 | [
"Apache-2.0"
] | null | null | null | exchange_calendars/extensions/exchange_calendar_krx.py | syonoki/exchange_calendars | 639ab0f88a874af99bb601824a8ffef2572820d4 | [
"Apache-2.0"
] | null | null | null | """
Last update: 2018-10-26
"""
from exchange_calendars.extensions.calendar_extension import ExtendedExchangeCalendar
from pandas import (
Timestamp,
)
from pandas.tseries.holiday import (
Holiday,
previous_friday,
)
from exchange_calendars.exchange_calendar import HolidayCalendar
from datetime import time
from itertools import chain
from pytz import timezone
KRNewYearsDay = Holiday(
'New Years Day',
month=1,
day=1)
KRIndependenceDay = Holiday(
'Independence Day',
month=3,
day=1
)
KRArbourDay = Holiday(
'Arbour Day',
month=4,
day=5,
end_date=Timestamp('2006-01-01'),
)
KRLabourDay = Holiday(
'Labour Day',
month=5,
day=1
)
KRChildrensDay = Holiday(
'Labour Day',
month=5,
day=5
)
#
KRMemorialDay = Holiday(
'Memorial Day',
month=6,
day=6
)
#
KRConstitutionDay = Holiday(
'Constitution Day',
month=7,
day=17,
end_date=Timestamp('2008-01-01')
)
#
KRLiberationDay = Holiday(
'Liberation Day',
month=8,
day=15
)
#
KRNationalFoundationDay = Holiday(
'NationalFoundationDay',
month=10,
day=3
)
Christmas = Holiday(
'Christmas',
month=12,
day=25
)
#
KRHangulProclamationDay = Holiday(
'Hangul Proclamation Day',
month=10,
day=9,
start_date=Timestamp('2013-01-01')
)
# KRX
KRXEndOfYearClosing = Holiday(
'KRX Year-end Closing',
month=12,
day=31,
observance=previous_friday,
start_date=Timestamp('2001-01-01')
)
KRXEndOfYearClosing2000 = [
Timestamp('2000-12-27', tz='UTC'),
Timestamp('2000-12-28', tz='UTC'),
Timestamp('2000-12-29', tz='UTC'),
Timestamp('2000-12-30', tz='UTC'),
]
# Lunar New Year
KRLunarNewYear = [
# 2000
Timestamp('2000-02-04', tz='UTC'),
# 2001
Timestamp('2001-01-23', tz='UTC'),
Timestamp('2001-01-24', tz='UTC'),
Timestamp('2001-01-25', tz='UTC'),
# 2002
Timestamp('2002-02-11', tz='UTC'),
Timestamp('2002-02-12', tz='UTC'),
Timestamp('2002-02-13', tz='UTC'),
# 2003
Timestamp('2003-01-31', tz='UTC'),
# 2004
Timestamp('2004-01-21', tz='UTC'),
Timestamp('2004-01-22', tz='UTC'),
Timestamp('2004-01-23', tz='UTC'),
# 2005
Timestamp('2005-02-08', tz='UTC'),
Timestamp('2005-02-09', tz='UTC'),
Timestamp('2005-02-10', tz='UTC'),
# 2006
Timestamp('2006-01-28', tz='UTC'),
Timestamp('2006-01-29', tz='UTC'),
Timestamp('2006-01-30', tz='UTC'),
# 2007
Timestamp('2007-02-19', tz='UTC'),
# 2008
Timestamp('2008-02-06', tz='UTC'),
Timestamp('2008-02-07', tz='UTC'),
Timestamp('2008-02-08', tz='UTC'),
# 2009
Timestamp('2009-01-25', tz='UTC'),
Timestamp('2009-01-26', tz='UTC'),
Timestamp('2009-01-27', tz='UTC'),
# 2010
Timestamp('2010-02-13', tz='UTC'),
Timestamp('2010-02-14', tz='UTC'),
Timestamp('2010-02-15', tz='UTC'),
# 2011
Timestamp('2011-02-02', tz='UTC'),
Timestamp('2011-02-03', tz='UTC'),
Timestamp('2011-02-04', tz='UTC'),
# 2012
Timestamp('2012-01-23', tz='UTC'),
Timestamp('2012-01-24', tz='UTC'),
# 2013
Timestamp('2013-02-11', tz='UTC'),
# 2014
Timestamp('2014-01-30', tz='UTC'),
Timestamp('2014-01-31', tz='UTC'),
# 2015
Timestamp('2015-02-18', tz='UTC'),
Timestamp('2015-02-19', tz='UTC'),
Timestamp('2015-02-20', tz='UTC'),
# 2016
Timestamp('2016-02-07', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-02-09', tz='UTC'),
Timestamp('2016-02-10', tz='UTC'),
# 2017
Timestamp('2017-01-27', tz='UTC'),
Timestamp('2017-01-28', tz='UTC'),
Timestamp('2017-01-29', tz='UTC'),
Timestamp('2017-01-30', tz='UTC'),
# 2018
Timestamp('2018-02-15', tz='UTC'),
Timestamp('2018-02-16', tz='UTC'),
Timestamp('2018-02-17', tz='UTC'),
# 2019
Timestamp('2019-02-04', tz='UTC'),
Timestamp('2019-02-05', tz='UTC'),
Timestamp('2019-02-06', tz='UTC'),
# 2020
Timestamp('2020-01-24', tz='UTC'),
Timestamp('2020-01-25', tz='UTC'),
Timestamp('2020-01-26', tz='UTC'),
Timestamp('2020-01-27', tz='UTC'),
# 2021
Timestamp('2021-02-11', tz='UTC'),
Timestamp('2021-02-12', tz='UTC'),
# 2022
Timestamp('2022-01-31', tz='UTC'),
Timestamp('2022-02-01', tz='UTC'),
Timestamp('2022-02-02', tz='UTC'),
]
# Election Days
KRElectionDays = [
Timestamp('2000-04-13', tz='UTC'), # National Assembly
Timestamp('2002-06-13', tz='UTC'), # Regional election
Timestamp('2002-12-19', tz='UTC'), # Presidency
Timestamp('2004-04-15', tz='UTC'), # National Assembly
Timestamp('2006-05-31', tz='UTC'), # Regional election
Timestamp('2007-12-19', tz='UTC'), # Presidency
Timestamp('2008-04-09', tz='UTC'), # National Assembly
Timestamp('2010-06-02', tz='UTC'), # Local election
Timestamp('2012-04-11', tz='UTC'), # National Assembly
Timestamp('2012-12-19', tz='UTC'), # Presidency
Timestamp('2014-06-04', tz='UTC'), # Local election
Timestamp('2016-04-13', tz='UTC'), # National Assembly
Timestamp('2017-05-09', tz='UTC'), # Presidency
Timestamp('2018-06-13', tz='UTC'), # Local election
Timestamp('2020-04-15', tz='UTC'), # National Assembly
Timestamp('2022-03-09', tz='UTC'), # Presidency
Timestamp('2022-06-01', tz='UTC'), # Local election
]
# Buddha's birthday
KRBuddhasBirthday = [
Timestamp('2000-05-11', tz='UTC'),
Timestamp('2001-05-01', tz='UTC'),
Timestamp('2003-05-08', tz='UTC'),
Timestamp('2004-05-26', tz='UTC'),
Timestamp('2005-05-15', tz='UTC'),
Timestamp('2006-05-05', tz='UTC'),
Timestamp('2007-05-24', tz='UTC'),
Timestamp('2008-05-12', tz='UTC'),
Timestamp('2009-05-02', tz='UTC'),
Timestamp('2010-05-21', tz='UTC'),
Timestamp('2011-05-10', tz='UTC'),
Timestamp('2012-05-28', tz='UTC'),
Timestamp('2013-05-17', tz='UTC'),
Timestamp('2014-05-06', tz='UTC'),
Timestamp('2015-05-25', tz='UTC'),
Timestamp('2016-05-14', tz='UTC'),
Timestamp('2017-05-03', tz='UTC'),
Timestamp('2018-05-22', tz='UTC'),
Timestamp('2020-04-30', tz='UTC'),
Timestamp('2021-05-19', tz='UTC'),
]
# Harvest Moon Day
KRHarvestMoonDay = [
# 2000
Timestamp('2000-09-11', tz='UTC'),
Timestamp('2000-09-12', tz='UTC'),
Timestamp('2000-09-13', tz='UTC'),
# 2001
Timestamp('2001-10-01', tz='UTC'),
Timestamp('2001-10-02', tz='UTC'),
# 2002
Timestamp('2002-09-20', tz='UTC'),
# 2003
Timestamp('2003-09-10', tz='UTC'),
Timestamp('2003-09-11', tz='UTC'),
Timestamp('2003-09-12', tz='UTC'),
# 2004
Timestamp('2004-09-27', tz='UTC'),
Timestamp('2004-09-28', tz='UTC'),
Timestamp('2004-09-29', tz='UTC'),
# 2005
Timestamp('2005-09-17', tz='UTC'),
Timestamp('2005-09-18', tz='UTC'),
Timestamp('2005-09-19', tz='UTC'),
# 2006
Timestamp('2006-10-05', tz='UTC'),
Timestamp('2006-10-06', tz='UTC'),
Timestamp('2006-10-07', tz='UTC'),
# 2007
Timestamp('2007-09-24', tz='UTC'),
Timestamp('2007-09-25', tz='UTC'),
Timestamp('2007-09-26', tz='UTC'),
# 2008
Timestamp('2008-09-13', tz='UTC'),
Timestamp('2008-09-14', tz='UTC'),
Timestamp('2008-09-15', tz='UTC'),
# 2009
Timestamp('2009-10-02', tz='UTC'),
Timestamp('2009-10-03', tz='UTC'),
Timestamp('2009-10-04', tz='UTC'),
# 2010
Timestamp('2010-09-21', tz='UTC'),
Timestamp('2010-09-22', tz='UTC'),
Timestamp('2010-09-23', tz='UTC'),
# 2011
Timestamp('2011-09-12', tz='UTC'),
Timestamp('2011-09-13', tz='UTC'),
# 2012
Timestamp('2012-10-01', tz='UTC'),
# 2013
Timestamp('2013-09-18', tz='UTC'),
Timestamp('2013-09-19', tz='UTC'),
Timestamp('2013-09-20', tz='UTC'),
# 2014
Timestamp('2014-09-08', tz='UTC'),
Timestamp('2014-09-09', tz='UTC'),
Timestamp('2014-09-10', tz='UTC'),
# 2015
Timestamp('2015-09-28', tz='UTC'),
Timestamp('2015-09-29', tz='UTC'),
# 2016
Timestamp('2016-09-14', tz='UTC'),
Timestamp('2016-09-15', tz='UTC'),
Timestamp('2016-09-16', tz='UTC'),
# 2017
Timestamp('2017-10-03', tz='UTC'),
Timestamp('2017-10-04', tz='UTC'),
Timestamp('2017-10-05', tz='UTC'),
Timestamp('2017-10-06', tz='UTC'),
# 2018
Timestamp('2018-09-23', tz='UTC'),
Timestamp('2018-09-24', tz='UTC'),
Timestamp('2018-09-25', tz='UTC'),
Timestamp('2018-09-26', tz='UTC'),
# 2019
Timestamp('2019-09-12', tz='UTC'),
Timestamp('2019-09-13', tz='UTC'),
# 2020
Timestamp('2020-09-30', tz='UTC'),
Timestamp('2020-10-01', tz='UTC'),
Timestamp('2020-10-02', tz='UTC'),
# 2021
Timestamp('2021-09-20', tz='UTC'),
Timestamp('2021-09-21', tz='UTC'),
Timestamp('2021-09-22', tz='UTC'),
# 2022
Timestamp('2022-09-09', tz='UTC'),
Timestamp('2022-09-12', tz='UTC'), #
]
#
KRSubstitutionHolidayForChildrensDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
#
KRCelebrationForWorldCupHosting = [
Timestamp('2002-07-01', tz='UTC')
]
KRSeventyYearsFromIndependenceDay = [
Timestamp('2015-08-14', tz='UTC')
]
KRTemporaryHolidayForChildrensDay2016 = [
Timestamp('2016-05-06', tz='UTC')
]
KRTemporaryHolidayForHarvestMoonDay2017 = [
Timestamp('2017-10-02', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2018 = [
Timestamp('2018-05-07', tz='UTC')
]
KRTemporaryHolidayForChildrenDay2019 = [
Timestamp('2019-05-06', tz='UTC')
]
KRTemporaryHolidayForLiberationDay2020 = [
Timestamp('2020-08-17', tz='UTC')
]
KRTemporaryHoliday2021 = [
Timestamp('2021-08-16', tz='UTC'), #
Timestamp('2021-10-04', tz='UTC'), #
Timestamp('2021-10-11', tz='UTC'), #
]
KRTemporaryHoliday2022 = [
Timestamp('2022-10-10', tz='UTC'), #
]
#
HolidaysNeedToCheck = [
Timestamp('2000-01-03', tz='UTC')
]
HolidaysBefore1999 = [
Timestamp('1990-01-01', tz='UTC'),
Timestamp('1990-01-02', tz='UTC'),
Timestamp('1990-01-03', tz='UTC'),
Timestamp('1990-01-29', tz='UTC'),
Timestamp('1990-03-01', tz='UTC'),
Timestamp('1990-04-05', tz='UTC'),
Timestamp('1990-05-02', tz='UTC'),
Timestamp('1990-06-06', tz='UTC'),
Timestamp('1990-07-17', tz='UTC'),
Timestamp('1990-08-15', tz='UTC'),
Timestamp('1990-09-03', tz='UTC'),
Timestamp('1990-10-01', tz='UTC'),
Timestamp('1990-10-03', tz='UTC'),
Timestamp('1990-10-09', tz='UTC'),
Timestamp('1990-12-25', tz='UTC'),
Timestamp('1991-01-01', tz='UTC'),
Timestamp('1991-01-02', tz='UTC'),
Timestamp('1991-02-14', tz='UTC'),
Timestamp('1991-02-15', tz='UTC'),
Timestamp('1991-03-01', tz='UTC'),
Timestamp('1991-04-05', tz='UTC'),
Timestamp('1991-05-21', tz='UTC'),
Timestamp('1991-06-06', tz='UTC'),
Timestamp('1991-07-17', tz='UTC'),
Timestamp('1991-08-15', tz='UTC'),
Timestamp('1991-09-23', tz='UTC'),
Timestamp('1991-10-03', tz='UTC'),
Timestamp('1991-12-25', tz='UTC'),
Timestamp('1991-12-30', tz='UTC'),
Timestamp('1992-01-01', tz='UTC'),
Timestamp('1992-09-10', tz='UTC'),
Timestamp('1992-09-11', tz='UTC'),
Timestamp('1992-10-03', tz='UTC'),
Timestamp('1992-12-25', tz='UTC'),
Timestamp('1992-12-29', tz='UTC'),
Timestamp('1992-12-30', tz='UTC'),
Timestamp('1992-12-31', tz='UTC'),
Timestamp('1993-01-01', tz='UTC'),
Timestamp('1993-01-22', tz='UTC'),
Timestamp('1993-03-01', tz='UTC'),
Timestamp('1993-04-05', tz='UTC'),
Timestamp('1993-05-05', tz='UTC'),
Timestamp('1993-05-28', tz='UTC'),
Timestamp('1993-07-17', tz='UTC'),
Timestamp('1993-09-29', tz='UTC'),
Timestamp('1993-09-30', tz='UTC'),
Timestamp('1993-10-01', tz='UTC'),
Timestamp('1993-12-29', tz='UTC'),
Timestamp('1993-12-30', tz='UTC'),
Timestamp('1993-12-31', tz='UTC'),
Timestamp('1994-01-02', tz='UTC'),
Timestamp('1994-02-09', tz='UTC'),
Timestamp('1994-02-10', tz='UTC'),
Timestamp('1994-02-11', tz='UTC'),
Timestamp('1994-03-01', tz='UTC'),
Timestamp('1994-04-05', tz='UTC'),
Timestamp('1994-05-05', tz='UTC'),
Timestamp('1994-06-06', tz='UTC'),
Timestamp('1994-07-17', tz='UTC'),
Timestamp('1994-08-15', tz='UTC'),
Timestamp('1994-09-19', tz='UTC'),
Timestamp('1994-09-20', tz='UTC'),
Timestamp('1994-09-21', tz='UTC'),
Timestamp('1994-10-03', tz='UTC'),
Timestamp('1994-12-29', tz='UTC'),
Timestamp('1994-12-30', tz='UTC'),
Timestamp('1995-01-02', tz='UTC'),
Timestamp('1995-01-30', tz='UTC'),
Timestamp('1995-01-31', tz='UTC'),
Timestamp('1995-02-01', tz='UTC'),
Timestamp('1995-03-01', tz='UTC'),
Timestamp('1995-05-01', tz='UTC'),
Timestamp('1995-05-05', tz='UTC'),
Timestamp('1995-06-06', tz='UTC'),
Timestamp('1995-06-27', tz='UTC'),
Timestamp('1995-07-17', tz='UTC'),
Timestamp('1995-08-15', tz='UTC'),
Timestamp('1995-09-08', tz='UTC'),
Timestamp('1995-09-09', tz='UTC'),
Timestamp('1995-10-03', tz='UTC'),
Timestamp('1995-12-22', tz='UTC'),
Timestamp('1995-12-25', tz='UTC'),
Timestamp('1995-12-28', tz='UTC'),
Timestamp('1995-12-29', tz='UTC'),
Timestamp('1995-12-30', tz='UTC'),
Timestamp('1995-12-31', tz='UTC'),
Timestamp('1996-01-01', tz='UTC'),
Timestamp('1996-01-02', tz='UTC'),
Timestamp('1996-02-19', tz='UTC'),
Timestamp('1996-02-20', tz='UTC'),
Timestamp('1996-03-01', tz='UTC'),
Timestamp('1996-04-05', tz='UTC'),
Timestamp('1996-04-11', tz='UTC'),
Timestamp('1996-05-01', tz='UTC'),
Timestamp('1996-05-05', tz='UTC'),
Timestamp('1996-05-24', tz='UTC'),
Timestamp('1996-06-06', tz='UTC'),
Timestamp('1996-07-17', tz='UTC'),
Timestamp('1996-08-15', tz='UTC'),
Timestamp('1996-09-26', tz='UTC'),
Timestamp('1996-09-27', tz='UTC'),
Timestamp('1996-09-28', tz='UTC'),
Timestamp('1996-10-03', tz='UTC'),
Timestamp('1996-12-25', tz='UTC'),
Timestamp('1996-12-30', tz='UTC'),
Timestamp('1996-12-31', tz='UTC'),
Timestamp('1997-01-01', tz='UTC'),
Timestamp('1997-01-02', tz='UTC'),
Timestamp('1997-02-07', tz='UTC'),
Timestamp('1997-02-08', tz='UTC'),
Timestamp('1997-03-01', tz='UTC'),
Timestamp('1997-04-05', tz='UTC'),
Timestamp('1997-05-05', tz='UTC'),
Timestamp('1997-05-14', tz='UTC'),
Timestamp('1997-06-06', tz='UTC'),
Timestamp('1997-07-17', tz='UTC'),
Timestamp('1997-08-15', tz='UTC'),
Timestamp('1997-09-16', tz='UTC'),
Timestamp('1997-09-17', tz='UTC'),
Timestamp('1997-10-03', tz='UTC'),
Timestamp('1997-12-25', tz='UTC'),
Timestamp('1998-01-01', tz='UTC'),
Timestamp('1998-01-02', tz='UTC'),
Timestamp('1998-01-27', tz='UTC'),
Timestamp('1998-01-28', tz='UTC'),
Timestamp('1998-01-29', tz='UTC'),
Timestamp('1998-03-01', tz='UTC'),
Timestamp('1998-04-05', tz='UTC'),
Timestamp('1998-05-01', tz='UTC'),
Timestamp('1998-05-03', tz='UTC'),
Timestamp('1998-05-05', tz='UTC'),
Timestamp('1998-06-04', tz='UTC'),
Timestamp('1998-06-06', tz='UTC'),
Timestamp('1998-07-17', tz='UTC'),
Timestamp('1998-08-15', tz='UTC'),
Timestamp('1998-10-03', tz='UTC'),
Timestamp('1998-10-04', tz='UTC'),
Timestamp('1998-10-05', tz='UTC'),
Timestamp('1998-10-06', tz='UTC'),
Timestamp('1998-12-25', tz='UTC'),
Timestamp('1998-12-31', tz='UTC'),
Timestamp('1999-01-01', tz='UTC'),
Timestamp('1999-02-15', tz='UTC'),
Timestamp('1999-02-16', tz='UTC'),
Timestamp('1999-02-17', tz='UTC'),
Timestamp('1999-03-01', tz='UTC'),
Timestamp('1999-04-05', tz='UTC'),
Timestamp('1999-05-05', tz='UTC'),
Timestamp('1999-05-22', tz='UTC'),
Timestamp('1999-06-06', tz='UTC'),
Timestamp('1999-07-17', tz='UTC'),
Timestamp('1999-09-23', tz='UTC'),
Timestamp('1999-09-24', tz='UTC'),
Timestamp('1999-09-25', tz='UTC'),
Timestamp('1999-10-03', tz='UTC'),
Timestamp('1999-12-29', tz='UTC'),
Timestamp('1999-12-30', tz='UTC'),
Timestamp('1999-12-31', tz='UTC'),
]
| 28.474178 | 85 | 0.580599 |
605a9a49370c1c190ccbd51f63a583f9a84128cd | 5,152 | py | Python | utilities.py | ameldocena/StratifiedAggregation | 0031fea120bff00c739eb6c3d654a5c6d3f094bb | [
"MIT"
] | null | null | null | utilities.py | ameldocena/StratifiedAggregation | 0031fea120bff00c739eb6c3d654a5c6d3f094bb | [
"MIT"
] | null | null | null | utilities.py | ameldocena/StratifiedAggregation | 0031fea120bff00c739eb6c3d654a5c6d3f094bb | [
"MIT"
] | null | null | null | import random
import numpy
#import tensorflow as tf
#import torch
from abc import abstractmethod
from sklearn.decomposition import PCA
from aggregators import FedAvg, MultiKrum, AlignedAvg, TrimmedMean, Median, StratifiedAggr
# class StratifiedRandomSelection(SelectionStrategy):
# #We first stratify: Each stratum will be a list of workers
# #Then within each stratum, we randomly select
# #We would need the list of workers and the information about their skews
def select_aggregator(args, name, KWARGS={}):
#Creates an Aggregator object as selected
if name == "FedAvg":
return FedAvg(args, name, KWARGS)
elif name == "AlignedAvg":
return AlignedAvg(args, name, KWARGS)
elif name == "AlignedAvgImpute":
KWARGS.update({"use_impute":"filter","align":"fusion"})
return AlignedAvg(args, name, **KWARGS)
elif name == "MultiKrum":
return MultiKrum(args, name, KWARGS)
elif name == "TrimmedMean":
return TrimmedMean(args, name, KWARGS)
elif name == "Median":
return Median(args, name, KWARGS)
elif (name == "StratKrum") or (name == "StratTrimMean") or (name == "StratMedian") or (name == "StratFedAvg"):
#We may have to change the class name to StratifiedAggregation
return StratifiedAggr(args, name, KWARGS)
else:
raise NotImplementedError(f"Unrecognized Aggregator Name: {name}")
def calculate_pca_of_gradients(logger, gradients, num_components):
# Unchanged from original work
pca = PCA(n_components=num_components)
logger.info("Computing {}-component PCA of gradients".format(num_components))
return pca.fit_transform(gradients)
#So this is here after all
def calculate_model_gradient( model_1, model_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two Torch models.
:param logger: loguru.logger (NOW REMOVED)
:param model_1: torch.nn
:param model_2: torch.nn
"""
model_1_parameters = list(dict(model_1.state_dict()))
model_2_parameters = list(dict(model_2.state_dict()))
return calculate_parameter_gradients(model_1_parameters, model_2_parameters)
def calculate_parameter_gradients(params_1, params_2):
# Minor change from original work
"""
Calculates the gradient (parameter difference) between two sets of Torch parameters.
:param logger: loguru.logger (NOW REMOVED)
:param params_1: dict
:param params_2: dict
"""
#logger.debug("Shape of model_1_parameters: {}".format(str(len(params_1))))
#logger.debug("Shape of model_2_parameters: {}".format(str(len(params_2))))
return numpy.array([x for x in numpy.subtract(params_1, params_2)])
# #Inserted
# def convert2TF(torch_tensor):
# # Converts a pytorch tensor into a Tensorflow.
# # We first convert torch into numpy, then to tensorflow.
# # Arg: torch_tensor - a Pytorch tensor object
# np_tensor = torch_tensor.numpy().astype(float)
# return tf.convert_to_tensor(np_tensor)
#
# def convert2Torch(tf_tensor):
# #Converts a TF tensor to Torch
# #Arg: tf_tensor - a TF tensor
# np_tensor = tf.make_ndarray(tf_tensor)
# return torch.from_numpy(np_tensor)
def generate_uniform_weights(random_workers):
"""
This function generates uniform weights for each stratum in random_workers
:param random_workers:
:return:
"""
strata_weights = dict()
weight = 1.0 / len(list(random_workers.keys()))
for stratum in random_workers:
strata_weights[stratum] = weight
return strata_weights | 39.030303 | 147 | 0.695652 |
605ad59a9efe4d2c5632efa0fb33e3ddefc540bb | 1,301 | py | Python | game/player.py | b1naryth1ef/mmo | 400f66b0ac76896af2d7108ff3540c42614a32f0 | [
"BSD-2-Clause"
] | 7 | 2015-09-29T13:32:36.000Z | 2021-06-22T19:24:01.000Z | game/player.py | b1naryth1ef/mmo | 400f66b0ac76896af2d7108ff3540c42614a32f0 | [
"BSD-2-Clause"
] | null | null | null | game/player.py | b1naryth1ef/mmo | 400f66b0ac76896af2d7108ff3540c42614a32f0 | [
"BSD-2-Clause"
] | 1 | 2019-03-03T23:24:28.000Z | 2019-03-03T23:24:28.000Z | from sprites import PlayerSprite
import time | 30.255814 | 75 | 0.544965 |
605b1532a73c491b1c591dcd0c51687f13109748 | 1,019 | py | Python | toys/layers/pool.py | cbarrick/toys | 0368036ddb7594c0b6e7cdc704aeec918786e58a | [
"MIT"
] | 1 | 2018-04-28T18:29:37.000Z | 2018-04-28T18:29:37.000Z | toys/layers/pool.py | cbarrick/csb | 0368036ddb7594c0b6e7cdc704aeec918786e58a | [
"MIT"
] | null | null | null | toys/layers/pool.py | cbarrick/csb | 0368036ddb7594c0b6e7cdc704aeec918786e58a | [
"MIT"
] | null | null | null | from typing import Sequence
import torch
from torch import nn
| 33.966667 | 67 | 0.62316 |
605c46e1dca45ffe66a05a4a91174510b5abbb04 | 433 | py | Python | src/forecastmgmt/ui/masterdata/person_window.py | vvladych/forecastmgmt | 9eea272d00bb42031f49b5bb5af01388ecce31cf | [
"Unlicense"
] | null | null | null | src/forecastmgmt/ui/masterdata/person_window.py | vvladych/forecastmgmt | 9eea272d00bb42031f49b5bb5af01388ecce31cf | [
"Unlicense"
] | 37 | 2015-07-01T22:18:51.000Z | 2016-03-11T21:17:12.000Z | src/forecastmgmt/ui/masterdata/person_window.py | vvladych/forecastmgmt | 9eea272d00bb42031f49b5bb5af01388ecce31cf | [
"Unlicense"
] | null | null | null | from gi.repository import Gtk
from masterdata_abstract_window import MasterdataAbstractWindow
from person_add_mask import PersonAddMask
from person_list_mask import PersonListMask
| 25.470588 | 124 | 0.757506 |
605ed3488c51cb7e0a5749161c5e9f3896da6586 | 1,792 | py | Python | fastseg/model/utils.py | SeockHwa/Segmentation_mobileV3 | 01d90eeb32232346b8ed071eaf5d03322049be11 | [
"MIT"
] | 274 | 2020-08-12T00:29:30.000Z | 2022-03-29T18:24:40.000Z | fastseg/model/utils.py | dcmartin/fastseg | c30759e07a52c7370eda11a93396c79f2b141778 | [
"MIT"
] | 10 | 2020-08-13T06:15:14.000Z | 2021-03-30T16:12:31.000Z | fastseg/model/utils.py | dcmartin/fastseg | c30759e07a52c7370eda11a93396c79f2b141778 | [
"MIT"
] | 27 | 2020-08-12T00:29:21.000Z | 2021-12-09T02:32:36.000Z | import torch.nn as nn
from .efficientnet import EfficientNet_B4, EfficientNet_B0
from .mobilenetv3 import MobileNetV3_Large, MobileNetV3_Small
def get_trunk(trunk_name):
"""Retrieve the pretrained network trunk and channel counts"""
if trunk_name == 'efficientnet_b4':
backbone = EfficientNet_B4(pretrained=True)
s2_ch = 24
s4_ch = 32
high_level_ch = 1792
elif trunk_name == 'efficientnet_b0':
backbone = EfficientNet_B0(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 1280
elif trunk_name == 'mobilenetv3_large':
backbone = MobileNetV3_Large(pretrained=True)
s2_ch = 16
s4_ch = 24
high_level_ch = 960
elif trunk_name == 'mobilenetv3_small':
backbone = MobileNetV3_Small(pretrained=True)
s2_ch = 16
s4_ch = 16
high_level_ch = 576
else:
raise ValueError('unknown backbone {}'.format(trunk_name))
return backbone, s2_ch, s4_ch, high_level_ch
| 35.137255 | 84 | 0.651786 |
6063184472ef835deb60c56bca4bcbb89e09d477 | 136 | py | Python | python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/inspections/PyTypeCheckerInspection/ModuleTypeParameter/a.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | import module
from types import ModuleType
foo(module)
bar(module) | 12.363636 | 28 | 0.720588 |
60634a727fe7a278b36493fb58ad20aeb22882f6 | 2,151 | py | Python | tests/webapp/test_webapp_actions.py | proofdock/chaos-azure | 85302f8be18153862656c587988eafb5dd37ddf7 | [
"Apache-2.0"
] | 1 | 2021-04-24T20:01:54.000Z | 2021-04-24T20:01:54.000Z | tests/webapp/test_webapp_actions.py | proofdock/chaos-azure | 85302f8be18153862656c587988eafb5dd37ddf7 | [
"Apache-2.0"
] | 23 | 2020-05-22T06:43:14.000Z | 2021-02-25T21:02:28.000Z | tests/webapp/test_webapp_actions.py | proofdock/chaos-azure | 85302f8be18153862656c587988eafb5dd37ddf7 | [
"Apache-2.0"
] | null | null | null | from unittest.mock import patch, MagicMock
from pdchaosazure.webapp.actions import stop, restart, delete
from tests.data import config_provider, secrets_provider, webapp_provider
| 34.693548 | 87 | 0.755927 |
606417a48449b07f2cec077fb5c3441648a8cb09 | 30,091 | py | Python | echopype/model/modelbase.py | leewujung/echopype-lfs-test | b76dcf42631d0ac9cef0efeced9be4afdc15e659 | [
"Apache-2.0"
] | null | null | null | echopype/model/modelbase.py | leewujung/echopype-lfs-test | b76dcf42631d0ac9cef0efeced9be4afdc15e659 | [
"Apache-2.0"
] | null | null | null | echopype/model/modelbase.py | leewujung/echopype-lfs-test | b76dcf42631d0ac9cef0efeced9be4afdc15e659 | [
"Apache-2.0"
] | null | null | null | """
echopype data model that keeps tracks of echo data and
its connection to data files.
"""
import os
import warnings
import datetime as dt
from echopype.utils import uwa
import numpy as np
import xarray as xr
def calc_seawater_absorption(self, src='file'):
"""Base method to be overridden for calculating seawater_absorption for different sonar models
"""
# issue warning when subclass methods not available
print("Seawater absorption calculation has not been implemented for this sonar model!")
def calc_sample_thickness(self):
"""Base method to be overridden for calculating sample_thickness for different sonar models.
"""
# issue warning when subclass methods not available
print('Sample thickness calculation has not been implemented for this sonar model!')
def calc_range(self):
"""Base method to be overridden for calculating range for different sonar models.
"""
# issue warning when subclass methods not available
print('Range calculation has not been implemented for this sonar model!')
def recalculate_environment(self, ss=True, sa=True, st=True, r=True):
""" Recalculates sound speed, seawater absorption, sample thickness, and range using
salinity, temperature, and pressure
Parameters
----------
ss : bool
Whether to calcualte sound speed. Defaults to `True`
sa : bool
Whether to calcualte seawater absorption. Defaults to `True`
st : bool
Whether to calcualte sample thickness. Defaults to `True`
r : bool
Whether to calcualte range. Defaults to `True`
"""
s, t, p = self.salinity, self.temperature, self.pressure
if s is not None and t is not None and p is not None:
if ss:
self.sound_speed = self.calc_sound_speed(src='user')
if sa:
self.seawater_absorption = self.calc_seawater_absorption(src='user')
if st:
self.sample_thickness = self.calc_sample_thickness()
if r:
self.range = self.calc_range()
elif s is None:
print("Salinity was not provided. Environment was not recalculated")
elif t is None:
print("Temperature was not provided. Environment was not recalculated")
else:
print("Pressure was not provided. Environment was not recalculated")
def calibrate(self):
"""Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Calibration has not been implemented for this sonar model!')
def calibrate_TS(self):
"""Base method to be overridden for target strength calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Target strength calibration has not been implemented for this sonar model!')
def validate_path(self, save_path, save_postfix):
"""Creates a directory if it doesnt exist. Returns a valid save path.
"""
if save_path is None:
save_dir = os.path.dirname(self.file_path)
file_out = _assemble_path()
else:
path_ext = os.path.splitext(save_path)[1]
# If given save_path is file, split into directory and file
if path_ext != '':
save_dir, file_out = os.path.split(save_path)
if save_dir == '': # save_path is only a filename without directory
save_dir = os.path.dirname(self.file_path) # use directory from input file
# If given save_path is a directory, get a filename from input .nc file
else:
save_dir = save_path
file_out = _assemble_path()
# Create folder if not already exists
if save_dir == '':
# TODO: should we use '.' instead of os.getcwd()?
save_dir = os.getcwd() # explicit about path to current directory
if not os.path.exists(save_dir):
os.mkdir(save_dir)
return os.path.join(save_dir, file_out)
def _get_proc_Sv(self, source_path=None, source_postfix='_Sv'):
"""Private method to return calibrated Sv either from memory or _Sv.nc file.
This method is called by remove_noise(), noise_estimates() and get_MVBS().
"""
if self.Sv is None: # calibration not yet performed
Sv_path = self.validate_path(save_path=source_path, # wrangle _Sv path
save_postfix=source_postfix)
if os.path.exists(Sv_path): # _Sv exists
self.Sv = xr.open_dataset(Sv_path) # load _Sv file
else:
# if path specification given but file do not exist:
if (source_path is not None) or (source_postfix != '_Sv'):
print('%s no calibrated data found in specified path: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), Sv_path))
else:
print('%s data has not been calibrated. ' % dt.datetime.now().strftime('%H:%M:%S'))
print(' performing calibration now and operate from Sv in memory.')
self.calibrate() # calibrate, have Sv in memory
return self.Sv
def remove_noise(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None,
SNR=0, Sv_threshold=None,
save=False, save_postfix='_Sv_clean', save_path=None):
"""Remove noise by using noise estimates obtained from the minimum mean calibrated power level
along each column of tiles.
See method noise_estimates() for details of noise estimation.
Reference: De Robertis & Higginbottom, 2017, ICES Journal of Marine Sciences
Parameters
----------
source_postfix : str
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
if self.Sv_path is not None:
print('%s Sv source used to calculate MVBS: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
else:
print('%s Sv source used to calculate MVBS: memory' %
dt.datetime.now().strftime('%H:%M:%S'))
# Get tile indexing parameters
self.MVBS_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.MVBS_range_bin_size,
p_tile_sz=self.MVBS_ping_size,
sample_thickness=self.sample_thickness)
# Calculate MVBS
Sv_linear = 10 ** (proc_data.Sv / 10) # convert to linear domain before averaging
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
MVBS = 10 * np.log10(Sv_linear.coarsen(
ping_time=self.MVBS_ping_size,
range_bin=int(np.unique(self.MVBS_range_bin_size / self.sample_thickness)),
boundary='pad').mean())
MVBS.coords['range_bin'] = ('range_bin', np.arange(MVBS['range_bin'].size))
else:
range_bin_coarsen_idx = (self.MVBS_range_bin_size / self.sample_thickness).astype(int)
tmp_MVBS = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(Sv_linear.sel(frequency=freq).coarsen(
ping_time=self.MVBS_ping_size,
range_bin=r_bin.values,
boundary='pad').mean())
tmp_da.coords['range_bin'] = ('range_bin', np.arange(tmp_da['range_bin'].size))
tmp_da.name = 'MVBS'
tmp_MVBS.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
MVBS_val = np.array([zz.values for zz in xr.align(*tmp_MVBS, join='outer')])
MVBS = xr.DataArray(MVBS_val,
coords={'frequency': Sv_linear['frequency'].values,
'ping_time': tmp_MVBS[0]['ping_time'].values,
'range_bin': np.arange(MVBS_val.shape[2])},
dims=['frequency', 'ping_time', 'range_bin']).dropna(dim='range_bin', how='all')
# Set MVBS attributes
MVBS.name = 'MVBS'
MVBS = MVBS.to_dataset()
MVBS['MVBS_range_bin_size'] = ('frequency', self.MVBS_range_bin_size)
MVBS.attrs['MVBS_ping_size'] = self.MVBS_ping_size
# Save results in object and as a netCDF file
self.MVBS = MVBS
if save:
self.MVBS_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving MVBS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.MVBS_path))
MVBS.to_netcdf(self.MVBS_path)
# Close opened resources
proc_data.close()
| 47.763492 | 123 | 0.618557 |