hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c6200ff0e4e0bec1acf3bffde906f26e624c332 | 5,980 | py | Python | infra/utils/launch_ec2.py | philipmac/nephele2 | 50acba6b7bb00da6209c75e26c8c040ffacbaa1e | [
"CC0-1.0"
] | 1 | 2021-02-26T23:00:10.000Z | 2021-02-26T23:00:10.000Z | infra/utils/launch_ec2.py | philipmac/nephele2 | 50acba6b7bb00da6209c75e26c8c040ffacbaa1e | [
"CC0-1.0"
] | 1 | 2020-11-16T01:55:06.000Z | 2020-11-16T01:55:06.000Z | infra/utils/launch_ec2.py | philipmac/nephele2 | 50acba6b7bb00da6209c75e26c8c040ffacbaa1e | [
"CC0-1.0"
] | 2 | 2021-08-12T13:59:49.000Z | 2022-01-19T17:16:26.000Z | #!/usr/bin/env python3
import os
import boto3
import botocore.exceptions
import argparse
import yaml
from nephele2 import NepheleError
mand_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
perm_error = """\n\nIt seems you have not set up your AWS correctly.
Should you be running this with Awssume? Or have profile with appropriate role?
Exiting now.\n"""
def main(args):
"""Launch ec2 instance"""
if args.profile is None:
ec2_resource = boto3.Session(region_name='us-east-1').resource('ec2')
else:
ec2_resource = boto3.Session(region_name='us-east-1', profile_name=args.profile).resource('ec2')
test_sanity(ec2_resource, args)
envs = load_stack_vars(args.yaml_env.name)
start_EC2(ec2_resource, args.ami_id, args.instance_type,
args.key_path, args.label, envs, args.dry_run)
def test_sanity(ec2_resource, args):
"""Test if env vars are set, key exists, and can access ec2"""
if args.profile is None:
for var in mand_vars:
if os.environ.get(var) is None:
print(var + ' must be set as an evironment variable. \nExiting.')
exit(1)
if not os.path.exists(args.key_path):
print('Unable to see your key: {}, exiting now :-('.format(args.key_path))
exit(1)
try:
ec2_resource.instances.all().__iter__().__next__()
except botocore.exceptions.ClientError as expn:
print(expn)
print(perm_error)
exit(1)
def create_EC2(ec2_resource, ami_id, i_type, envs, u_data='', dry_run=True):
"""create ec2 instance. by default DryRun is T, and only checks perms."""
inst = ec2_resource.create_instances(
DryRun=dry_run,
SecurityGroupIds=[envs['INTERNAL_SECURITY_GROUP'],
envs['ecs_cluster_security_group_id']],
IamInstanceProfile={'Arn': envs['N2_WORKER_INSTANCE_PROFILE']},
InstanceType=i_type,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
InstanceInitiatedShutdownBehavior='terminate',
SubnetId=envs['VPC_SUBNET'],
UserData=u_data
)
return inst
def start_EC2(ec2_resource, ami_id, i_type, key_path, label, envs, dry_run):
"""check if have perms to create instance.
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-managing-instances.html#start-and-stop-instances
if so, the create instance and tag with label.
"""
try:
create_EC2(ec2_resource, ami_id, i_type, envs)
except botocore.exceptions.ClientError as e:
if 'DryRunOperation' not in str(e):
print(e.response['Error']['Message'])
print(perm_error)
exit(1)
elif dry_run:
print(e.response['Error']['Message'])
exit(0)
else:
pass
mnt_str = gen_mnt_str(envs['EFS_IP'])
key_str = read_key(key_path)
auth_key_str = 'printf "{}" >> /home/admin/.ssh/authorized_keys;'.format(
key_str)
u_data = '#!/bin/bash\n{mnt_str}\n{auth_key_str}\n'.format(mnt_str=mnt_str,
auth_key_str=auth_key_str)
print('Creating EC2...')
try:
instances = create_EC2(ec2_resource, ami_id, i_type, envs, u_data, False)
except botocore.exceptions.ClientError as bce:
print(bce)
print('\nUnable to launch EC2. \nExiting.')
exit(1)
if len(instances) is not 1:
msg = 'Instances launched: %s' % str(instances)
raise NepheleError.UnableToStartEC2Exception(msg=msg)
instance = instances[0]
instance.wait_until_running()
instance.create_tags(Tags=[{'Key': 'Name', 'Value': label}])
print(str(instance) + ' has been created.')
print('To connect type:\nssh {ip_addr}'.format(
ip_addr=instance.instance_id))
print('To terminate instance type:')
print('awssume aws ec2 terminate-instances --instance-ids ' + instance.instance_id)
if __name__ == "__main__":
usage = 'Eg:\nsource ~/code/neph2-envs/dev/environment_vars\n'\
'awssume launch_ec2.py -e ../../neph2-envs/dev/dev_outputs.yaml -a ami-0ae1b7201f4a236f9 -t m5.4xlarge -k ~/.ssh/id_rsa.pub --label instance_name_tag\n\n'\
'Alternately, pass profile which has correct role/permissions:\n'\
'launch_ec2.py -e dev_outputs.yaml -a ami-003eed27e5bf2ef91 -t t2.micro -k ~/.ssh/id_rsa.pub -l name_tag --profile aws_profile_name'
parser = argparse.ArgumentParser(
description='CLI Interface to N2.', usage=usage)
req = parser.add_argument_group('required args')
req.add_argument("-e", "--yaml_env",
type=argparse.FileType('r'), required=True)
req.add_argument("-t", "--instance_type", type=str, required=True)
req.add_argument("-a", "--ami_id", type=str, required=True)
req.add_argument("-k", "--key_path", type=str, required=True)
req.add_argument("-l", "--label", type=str, required=True)
parser.add_argument("-p", "--profile", type=str)
parser.add_argument("-d", "--dry_run", action='store_true')
args = parser.parse_args()
main(args)
| 39.084967 | 167 | 0.623746 |
0c6419af7c4ea362b8097a85b3a1cb0ca9746ce0 | 9,196 | py | Python | tests/test_wvlns.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 4 | 2019-09-16T15:50:22.000Z | 2021-04-08T15:32:48.000Z | tests/test_wvlns.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 3 | 2018-05-04T09:28:24.000Z | 2018-12-03T09:00:31.000Z | tests/test_wvlns.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 1 | 2020-10-12T15:14:17.000Z | 2020-10-12T15:14:17.000Z | """Test VIMS wavelength module."""
from pathlib import Path
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_array
from pyvims import QUB
from pyvims.vars import ROOT_DATA
from pyvims.wvlns import (BAD_IR_PIXELS, CHANNELS, FWHM, SHIFT,
VIMS_IR, VIMS_VIS, WLNS, YEARS,
bad_ir_pixels, ir_multiplexer, ir_hot_pixels,
is_hot_pixel, median_spectrum, moving_median,
sample_line_axes)
from pytest import approx, raises
DATA = Path(__file__).parent / 'data'
def test_vims_csv():
"""Test CSV global variables."""
assert len(CHANNELS) == len(WLNS) == len(FWHM) == 352
assert CHANNELS[0] == 1
assert CHANNELS[-1] == 352
assert WLNS[0] == .350540
assert WLNS[-1] == 5.1225
assert FWHM[0] == .007368
assert FWHM[-1] == .016
assert len(YEARS) == len(SHIFT) == 58
assert YEARS[0] == 1999.6
assert YEARS[-1] == 2017.8
assert SHIFT[0] == -25.8
assert SHIFT[-1] == 9.8
def test_vims_ir():
"""Test VIMS IR wavelengths."""
# Standard wavelengths
wvlns = VIMS_IR()
assert len(wvlns) == 256
assert wvlns[0] == .884210
assert wvlns[-1] == 5.122500
# Full-width at half maximum value
fwhms = VIMS_IR(fwhm=True)
assert len(fwhms) == 256
assert fwhms[0] == .012878
assert fwhms[-1] == .016
# Wavenumber (cm-1)
wvnb = VIMS_IR(sigma=True)
assert len(wvnb) == 256
assert wvnb[0] == approx(11309.53, abs=1e-2)
assert wvnb[-1] == approx(1952.17, abs=1e-2)
# Single band
assert VIMS_IR(band=97) == .884210
assert VIMS_IR(band=97, fwhm=True) == .012878
assert VIMS_IR(band=97, sigma=True) == approx(11309.53, abs=1e-2)
assert VIMS_IR(band=97, fwhm=True, sigma=True) == approx(164.72, abs=1e-2)
# Selected bands array
assert_array(VIMS_IR(band=[97, 352]), [.884210, 5.122500])
assert_array(VIMS_IR(band=[97, 352], fwhm=True), [.012878, .016])
# Time offset
assert VIMS_IR(band=97, year=2002) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2005) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2001.5) == approx(.885410, abs=1e-6) # +.0012
assert VIMS_IR(band=97, year=2011) == approx(.890210, abs=1e-6) # +.006
# Time offset on all IR bands
wvlns_2011 = VIMS_IR(year=2011)
assert len(wvlns_2011) == 256
assert wvlns_2011[0] == approx(.890210, abs=1e-6)
assert wvlns_2011[-1] == approx(5.128500, abs=1e-6)
# No change in FWHM with time
assert VIMS_IR(band=97, year=2001.5, fwhm=True) == .012878
# Outside IR band range
assert np.isnan(VIMS_IR(band=0))
assert np.isnan(VIMS_IR(band=96, fwhm=True))
assert np.isnan(VIMS_IR(band=353, sigma=True))
def test_vims_vis():
"""Test VIMS VIS wavelengths."""
# Standard wavelengths
wvlns = VIMS_VIS()
assert len(wvlns) == 96
assert wvlns[0] == .350540
assert wvlns[-1] == 1.045980
# Full-width at half maximum value
fwhms = VIMS_VIS(fwhm=True)
assert len(fwhms) == 96
assert fwhms[0] == .007368
assert fwhms[-1] == .012480
# Wavenumber (cm-1)
wvnb = VIMS_VIS(sigma=True)
assert len(wvnb) == 96
assert wvnb[0] == approx(28527.41, abs=1e-2)
assert wvnb[-1] == approx(9560.41, abs=1e-2)
# Single band
assert VIMS_VIS(band=96) == 1.045980
assert VIMS_VIS(band=96, fwhm=True) == .012480
assert VIMS_VIS(band=96, sigma=True) == approx(9560.41, abs=1e-2)
assert VIMS_VIS(band=96, fwhm=True, sigma=True) == approx(114.07, abs=1e-2)
# Selected bands array
assert_array(VIMS_VIS(band=[1, 96]), [.350540, 1.045980])
assert_array(VIMS_VIS(band=[1, 96], fwhm=True), [.007368, .012480])
# Time offset
with raises(ValueError):
_ = VIMS_VIS(band=97, year=2002)
with raises(ValueError):
_ = VIMS_VIS(year=2011)
# Outside IR band range
assert np.isnan(VIMS_VIS(band=0))
assert np.isnan(VIMS_VIS(band=97, fwhm=True))
assert np.isnan(VIMS_VIS(band=353, sigma=True))
def test_bad_ir_pixels():
"""Test bad IR pixels list."""
csv = np.loadtxt(ROOT_DATA / 'wvlns_std.csv',
delimiter=',', usecols=(0, 1, 2, 3),
dtype=str, skiprows=98)
# Extract bad pixels
wvlns = np.transpose([
(int(channel), float(wvln) - .5 * float(fwhm), float(fwhm))
for channel, wvln, fwhm, comment in csv
if comment
])
# Group bad pixels
news = [True] + list((wvlns[0, 1:] - wvlns[0, :-1]) > 1.5)
bads = []
for i, new in enumerate(news):
if new:
bads.append(list(wvlns[1:, i]))
else:
bads[-1][1] += wvlns[2, i]
assert_array(BAD_IR_PIXELS, bads)
coll = bad_ir_pixels()
assert len(coll.get_paths()) == len(bads)
def test_moving_median():
"""Test moving median filter."""
a = [1, 2, 3, 4, 5]
assert_array(moving_median(a, width=1), a)
assert_array(moving_median(a, width=3),
[1.5, 2, 3, 4, 4.5])
assert_array(moving_median(a, width=5),
[2, 2.5, 3, 3.5, 4])
assert_array(moving_median(a, width=2),
[1.5, 2.5, 3.5, 4.5, 5])
assert_array(moving_median(a, width=4),
[2, 2.5, 3.5, 4, 4.5])
def test_is_hot_pixel():
"""Test hot pixel detector."""
# Create random signal
signal = np.random.default_rng().integers(20, size=100)
# Add hot pixels
signal[10::20] = 50
signal[10::30] = 150
hot_pix = is_hot_pixel(signal)
assert len(hot_pix) == 100
assert 3 <= sum(hot_pix) < 6
assert all(hot_pix[10::30])
hot_pix = is_hot_pixel(signal, tol=1.5, frac=90)
assert len(hot_pix) == 100
assert 6 <= sum(hot_pix) < 12
assert all(hot_pix[10::20])
def test_sample_line_axes():
"""Test locatation sample and line axes."""
# 2D case
assert sample_line_axes((64, 352)) == (0, )
assert sample_line_axes((256, 32)) == (1, )
# 3D case
assert sample_line_axes((32, 64, 352)) == (0, 1)
assert sample_line_axes((32, 352, 64)) == (0, 2)
assert sample_line_axes((352, 32, 64)) == (1, 2)
# 1D case
with raises(TypeError):
_ = sample_line_axes((352))
# No band axis
with raises(ValueError):
_ = sample_line_axes((64, 64))
def test_median_spectrum():
"""Test the median spectrum extraction."""
# 2D cases
spectra = [CHANNELS, CHANNELS]
spectrum = median_spectrum(spectra) # (2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.transpose(spectra)) # (352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
# 3D cases
spectra = [[CHANNELS, CHANNELS]]
spectrum = median_spectrum(spectra) # (1, 2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 1, 2)) # (1, 352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 2, 0)) # (352, 1, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
def test_ir_multiplexer():
"""Test spectrum split in each IR multiplexer."""
# Full spectrum
spec_1, spec_2 = ir_multiplexer(CHANNELS)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# IR spectrum only
spec_1, spec_2 = ir_multiplexer(CHANNELS[96:])
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 2D spectra
spectra = [CHANNELS, CHANNELS]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 3D spectra
spectra = [[CHANNELS, CHANNELS]]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# VIS spectrum only
with raises(ValueError):
_ = ir_multiplexer(CHANNELS[:96])
# Dimension too high
with raises(ValueError):
_ = ir_multiplexer([[[CHANNELS]]])
def test_ir_hot_pixels():
"""Test IR hot pixel detector from spectra."""
qub = QUB('1787314297_1', root=DATA)
# 1D spectrum
hot_pixels = ir_hot_pixels(qub['BACKGROUND'][0])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
# 2D spectra
hot_pixels = ir_hot_pixels(qub['BACKGROUND'])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
| 27.450746 | 79 | 0.605154 |
0c661084ef2dc9a119cb718b8362035d15b03067 | 909 | py | Python | Outliers/loss/losses.py | MakotoTAKAMATSU013/Outliers | 80043027d64b8f07355a05b281925f00bbf1a442 | [
"MIT"
] | null | null | null | Outliers/loss/losses.py | MakotoTAKAMATSU013/Outliers | 80043027d64b8f07355a05b281925f00bbf1a442 | [
"MIT"
] | null | null | null | Outliers/loss/losses.py | MakotoTAKAMATSU013/Outliers | 80043027d64b8f07355a05b281925f00bbf1a442 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F | 34.961538 | 73 | 0.660066 |
0c663401e4bd928831a371cae4be0b6a743a91c8 | 5,783 | py | Python | esiosdata/importdemdata.py | azogue/esiosdata | 680c7918955bc6ceee5bded92b3a4485f5ea8151 | [
"MIT"
] | 20 | 2017-06-04T20:34:16.000Z | 2021-10-31T22:55:22.000Z | esiosdata/importdemdata.py | azogue/esiosdata | 680c7918955bc6ceee5bded92b3a4485f5ea8151 | [
"MIT"
] | null | null | null | esiosdata/importdemdata.py | azogue/esiosdata | 680c7918955bc6ceee5bded92b3a4485f5ea8151 | [
"MIT"
] | 4 | 2020-01-28T19:02:24.000Z | 2022-03-08T15:59:11.000Z | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 18:16:24 2015
@author: Eugenio Panadero
A raz del cambio previsto:
DESCONEXIN DE LA WEB PBLICA CLSICA DE ESIOS
La Web pblica clsica de esios (http://www.esios.ree.es) ser desconectada el da 29 de marzo de 2016.
Continuaremos ofreciendo servicio en la nueva Web del Operador del Sistema:
https://www.esios.ree.es.
Por favor, actualice sus favoritos apuntando a la nueva Web.
IMPORTANTE!!!
En la misma fecha (29/03/2016), tambin dejar de funcionar el servicio Solicitar y Descargar,
utilizado para descargar informacin de la Web pblica clsica de esios.
Por favor, infrmese sobre descarga de informacin en
https://www.esios.ree.es/es/pagina/api
y actualice sus procesos de descarga.
"""
import json
import pandas as pd
import re
from dataweb.requestweb import get_data_en_intervalo
from esiosdata.esios_config import DATE_FMT, TZ, SERVER, HEADERS, D_TIPOS_REQ_DEM, KEYS_DATA_DEM
from esiosdata.prettyprinting import print_redb, print_err
__author__ = 'Eugenio Panadero'
__copyright__ = "Copyright 2015, AzogueLabs"
__credits__ = ["Eugenio Panadero"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eugenio Panadero"
RG_FUNC_CONTENT = re.compile('(?P<func>.*)\((?P<json>.*)\);')
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energtica de un da concreto."""
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls
def dem_procesa_datos_dia(key_day, response):
"""Procesa los datos descargados en JSON."""
dfs_import, df_import, dfs_maxmin, hay_errores = [], None, [], 0
for r in response:
tipo_datos, data = _extract_func_json_data(r)
if tipo_datos is not None:
if ('IND_MaxMin' in tipo_datos) and data:
df_import = _import_daily_max_min(data)
dfs_maxmin.append(df_import)
elif data:
df_import = _import_json_ts_data(data)
dfs_import.append(df_import)
if tipo_datos is None or df_import is None:
hay_errores += 1
if hay_errores == 4:
# No hay nada, salida temprana sin retry:
print_redb('** No hay datos para el da {}!'.format(key_day))
return None, -2
else: # if hay_errores < 3:
# TODO formar datos incompletos!! (max-min con NaN's, etc.)
data_import = {}
if dfs_import:
data_import[KEYS_DATA_DEM[0]] = dfs_import[0].join(dfs_import[1])
if len(dfs_maxmin) == 2:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0].join(dfs_maxmin[1])
elif dfs_maxmin:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0]
if not data_import:
print_err('DA: {} -> # ERRORES: {}'.format(key_day, hay_errores))
return None, -2
return data_import, 0
def dem_data_dia(str_dia='2015-10-10', str_dia_fin=None):
"""Obtiene datos de demanda energtica en un da concreto o un intervalo, accediendo directamente a la web."""
params = {'date_fmt': DATE_FMT, 'usar_multithread': False, 'num_retries': 1, "timeout": 10,
'func_procesa_data_dia': dem_procesa_datos_dia, 'func_url_data_dia': dem_url_dia,
'data_extra_request': {'json_req': False, 'headers': HEADERS}}
if str_dia_fin is not None:
params['usar_multithread'] = True
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)
else:
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)
if not hay_errores:
return data
else:
print_err(str_import)
return None
| 39.609589 | 114 | 0.639633 |
a73d0e2b381469762428cb4845c16d12f86b59d9 | 4,744 | py | Python | brainfrick.py | rium9/brainfrick | 37f8e3417cde5828e3ed2c2099fc952259f12844 | [
"MIT"
] | null | null | null | brainfrick.py | rium9/brainfrick | 37f8e3417cde5828e3ed2c2099fc952259f12844 | [
"MIT"
] | null | null | null | brainfrick.py | rium9/brainfrick | 37f8e3417cde5828e3ed2c2099fc952259f12844 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
bfm = BrainfuckMachine(cells=8, out_func=chr)
bi = BInterpreter(bfm)
f = open('helloworld', 'r').read()
code = list(BLexer.lex(f))
bi.interpret_code(code) | 32.493151 | 110 | 0.490304 |
a73f4577fe0a30a2fdd1d7b44615b63fb0d34f1e | 3,476 | bzl | Python | infra_macros/fbcode_macros/build_defs/build_info.bzl | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/fbcode_macros/build_defs/build_info.bzl | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | infra_macros/fbcode_macros/build_defs/build_info.bzl | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | load("@fbcode_macros//build_defs:config.bzl", "config")
load("@fbcode_macros//build_defs/config:read_configs.bzl", "read_int")
load("@fbcode_macros//build_defs:core_tools.bzl", "core_tools")
def _get_build_info(package_name, name, rule_type, platform):
"""
Gets a build_info struct from various configurations (or default values)
This struct has values passed in by the packaging system in order to
stamp things like the build epoch, platform, etc into the final binary.
This returns stable values by default so that non-release builds do not
affect rulekeys.
Args:
package_name: The name of the package that contains the build rule
that needs build info. No leading slashes
name: The name of the rule that needs build info
rule_type: The type of rule that is being built. This should be the
macro name, not the underlying rule type. (e.g. cpp_binary,
not cxx_binary)
platform: The platform that is being built for
"""
build_mode = config.get_build_mode()
if core_tools.is_core_tool(package_name,name):
return _create_build_info(
build_mode,
package_name,
name,
rule_type,
platform,
)
else:
return _create_build_info(
build_mode,
package_name,
name,
rule_type,
platform,
epochtime=read_int("build_info", "epochtime", 0),
host=native.read_config("build_info", "host", ""),
package_name=native.read_config("build_info", "package_name", ""),
package_version=native.read_config("build_info", "package_version", ""),
package_release=native.read_config("build_info", "package_release", ""),
path=native.read_config("build_info", "path", ""),
revision=native.read_config("build_info", "revision", ""),
revision_epochtime=read_int("build_info", "revision_epochtime", 0),
time=native.read_config("build_info", "time", ""),
time_iso8601=native.read_config("build_info", "time_iso8601", ""),
upstream_revision=native.read_config("build_info", "upstream_revision", ""),
upstream_revision_epochtime=read_int("build_info", "upstream_revision_epochtime", 0),
user=native.read_config("build_info", "user", ""),
)
build_info = struct(
get_build_info = _get_build_info,
)
| 35.469388 | 97 | 0.635788 |
a7401ff3c28629b2dc0848d7b3f999f8226d524f | 1,885 | py | Python | src/scan.py | Unitato/github-public-alert | 29dbcf72dd8c18c45385c29f25174c28c3428560 | [
"MIT"
] | null | null | null | src/scan.py | Unitato/github-public-alert | 29dbcf72dd8c18c45385c29f25174c28c3428560 | [
"MIT"
] | null | null | null | src/scan.py | Unitato/github-public-alert | 29dbcf72dd8c18c45385c29f25174c28c3428560 | [
"MIT"
] | null | null | null | #!#!/usr/bin/env python
import os
from github import Github
from libraries.notify import Notify
import json
print("")
print("Scanning Github repos")
GITHUB_API_KEY = os.environ.get('GITHUB_API_KEY')
WHITELIST = json.loads(os.environ.get('GITHUB_WHITELIST').lower())
GITHUB_SCAN = json.loads(os.environ.get('GITHUB_SCAN'))
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
SENDGRID_FROM = os.environ.get('SENDGRID_FROM')
SENDGRID_SUBJECT = os.environ.get('SENDGRID_SUBJECT')
SENDGRID_TEMPLATE = os.environ.get('SENDGRID_TEMPLATE')
SENDGRID_NOTIFY = json.loads(os.environ.get('SENDGRID_NOTIFY'))
results = []
print(" Target: {}".format(GITHUB_SCAN))
print(" Github:{}".format(len(GITHUB_API_KEY[:-4])*"#"+GITHUB_API_KEY[-4:]))
print(" Whitelist: {}".format(WHITELIST))
print("")
# or using an access token
g = Github(GITHUB_API_KEY)
for ITEM in GITHUB_SCAN:
print("Checking {}".format(ITEM))
for repo in g.get_user(ITEM).get_repos():
if repo.name.lower() in WHITELIST:
print(" [-] {}".format(repo.name))
# commits = repo.get_commits()
# for com in commits:
# print(com)
else:
print(" [+] {}".format(repo.name))
results.append("{}/{}".format(ITEM,repo.name))
if results:
print("FOUND NEW REPOs!!! SENDING EMAIL!!!")
#exit()
notify = Notify(SENDGRID_API_KEY)
notify.add_from(SENDGRID_FROM)
notify.add_mailto(SENDGRID_NOTIFY)
notify.add_subject(SENDGRID_SUBJECT)
notify.add_content_html(load_template(SENDGRID_TEMPLATE))
notify.update_content_html("<!--RESULTS-->", results)
notify.send_mail()
else:
print("Nothing found, going to sleep")
| 30.901639 | 76 | 0.671088 |
a7403e0780a57d1602d030f1189826ad5b0324b5 | 3,634 | py | Python | models.py | YavorPaunov/await | 0ea7ad1d0d48b66686e35702d39695268451b688 | [
"MIT"
] | null | null | null | models.py | YavorPaunov/await | 0ea7ad1d0d48b66686e35702d39695268451b688 | [
"MIT"
] | null | null | null | models.py | YavorPaunov/await | 0ea7ad1d0d48b66686e35702d39695268451b688 | [
"MIT"
] | null | null | null | from flask.ext.sqlalchemy import SQLAlchemy
from util import hex_to_rgb, rgb_to_hex
from time2words import relative_time_to_text
from datetime import datetime
from dateutil.tz import tzutc
import pytz
db = SQLAlchemy()
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
| 30.79661 | 94 | 0.632911 |
a74179e3bf17c46fcdccafdc139bb260a2c60cb7 | 732 | py | Python | setup.py | ManuelMeraz/ReinforcementLearning | 5d42a88776428308d35c8031c01bf5afdf080079 | [
"MIT"
] | 1 | 2020-04-19T15:29:47.000Z | 2020-04-19T15:29:47.000Z | setup.py | ManuelMeraz/ReinforcementLearning | 5d42a88776428308d35c8031c01bf5afdf080079 | [
"MIT"
] | null | null | null | setup.py | ManuelMeraz/ReinforcementLearning | 5d42a88776428308d35c8031c01bf5afdf080079 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import setuptools
DIR = os.path.dirname(__file__)
REQUIREMENTS = os.path.join(DIR, "requirements.txt")
with open(REQUIREMENTS) as f:
reqs = f.read().strip().split("\n")
setuptools.setup(
name="rl",
version="0.0.1",
description="Reinforcement Learning: An Introduction",
url="github.com/manuelmeraz/ReinforcementLearning",
author="Manuel Meraz-Rodriguez",
license="MIT",
packages=setuptools.find_packages(),
install_requires=reqs,
entry_points={
"console_scripts": [
"tictactoe = rl.book.chapter_1.tictactoe.main:main",
"bandits = rl.book.chapter_2.main:main",
"rlgrid = rl.rlgrid.main:main",
]
},
)
| 25.241379 | 64 | 0.648907 |
a743058f6e943a66d50447c9ef87971c35895cc0 | 169 | py | Python | taxcalc/tbi/__init__.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | 1 | 2021-02-23T21:03:43.000Z | 2021-02-23T21:03:43.000Z | taxcalc/tbi/__init__.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | taxcalc/tbi/__init__.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | from taxcalc.tbi.tbi import (run_nth_year_taxcalc_model,
run_nth_year_gdp_elast_model,
reform_warnings_errors)
| 42.25 | 58 | 0.585799 |
a7433e8c895ee751d0a668a187a9eb4c45927efe | 6,223 | py | Python | mooc_access_number.py | mengshouer/mooc_access_number | 8de596ce34006f1f8c5d0404f5e40546fb438b2a | [
"MIT"
] | 6 | 2020-05-12T14:36:17.000Z | 2021-12-03T01:56:58.000Z | mooc_access_number.py | mengshouer/mooc_tools | 8de596ce34006f1f8c5d0404f5e40546fb438b2a | [
"MIT"
] | 2 | 2020-05-11T06:21:13.000Z | 2020-05-23T12:34:18.000Z | mooc_access_number.py | mengshouer/mooc_tools | 8de596ce34006f1f8c5d0404f5e40546fb438b2a | [
"MIT"
] | 1 | 2020-05-11T04:19:15.000Z | 2020-05-11T04:19:15.000Z | import requests,time,json,re,base64
requests.packages.urllib3.disable_warnings()
from io import BytesIO
from PIL import Image,ImageDraw,ImageChops
from lxml import etree
from urllib.parse import urlparse, parse_qs
username = "" #
password = "" #
s = requests.Session()
s.headers.update({'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'})
'''
def captchalogin(username,password):
if(username == "" or password == ""):
username = input("")
password = input("")
#
#orc
APIKey = ""
SecretKey = ""
#api
if(APIKey != "" or SecretKey != ""):
getkeyurl = f'https://aip.baidubce.com/oauth/2.0/token'
data = {
"grant_type" : "client_credentials",
"client_id" : APIKey,
"client_secret" : SecretKey
}
getkey = requests.post(getkeyurl,data).text
access_token = json.loads(getkey)["access_token"]
numcode = ""
while 1:
t = int(round(time.time()*1000))
codeurl = f'http://passport2.chaoxing.com/num/code?'+ str(t)
img_numcode = s.get(codeurl).content
img = base64.b64encode(img_numcode)
orcurl = f'https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic?access_token='+access_token
data = {"image":img}
headers = {'content-type': 'application/x-www-form-urlencoded'}
captcha = requests.post(orcurl,data=data,headers=headers).text
numcodelen = json.loads(captcha)["words_result_num"]
if numcodelen == 0:
print("")
time.sleep(1)
else:
numcode = json.loads(captcha)["words_result"][0]["words"]
numcode = re.sub("\D","",numcode)
if len(numcode) < 4:
print("")
time.sleep(1)
else:
print("")
break
else:
t = int(round(time.time()*1000))
url = f'http://passport2.chaoxing.com/num/code?'+ str(t)
web = s.get(url,verify=False)
img = Image.open(BytesIO(web.content))
img.show()
numcode = input('')
url = 'http://passport2.chaoxing.com/login?refer=http://i.mooc.chaoxing.com'
data = {'refer_0x001': 'http%3A%2F%2Fi.mooc.chaoxing.com',
'pid':'-1',
'pidName':'',
'fid':'1467', #id 1467:a
'fidName':'',
'allowJoin':'0',
'isCheckNumCode':'1',
'f':'0',
'productid':'',
'uname':username,
'password':password,
'numcode':numcode,
'verCode':''
}
web = s.post(url,data=data,verify=False)
time.sleep(2)
if('' in str(web.text)):
print('Login success!')
return s
else:
print('')
username = ""
password = ""
captchalogin(username,password)
'''
if __name__ == "__main__":
print("")
try:
#captchalogin(username,password)
login()
main()
except:
print("")
#captchalogin(username,password)
login()
main()
| 32.752632 | 151 | 0.527398 |
a743c86ba9ec1ed2c5e5910bec35a0fda5523988 | 11,174 | py | Python | tests/test_json_api.py | Padraic-O-Mhuiris/fava | 797ae1ee1f7378c8e7347d2970fc52c4be366b01 | [
"MIT"
] | null | null | null | tests/test_json_api.py | Padraic-O-Mhuiris/fava | 797ae1ee1f7378c8e7347d2970fc52c4be366b01 | [
"MIT"
] | null | null | null | tests/test_json_api.py | Padraic-O-Mhuiris/fava | 797ae1ee1f7378c8e7347d2970fc52c4be366b01 | [
"MIT"
] | null | null | null | # pylint: disable=missing-docstring
from __future__ import annotations
import hashlib
from io import BytesIO
from pathlib import Path
from typing import Any
import pytest
from beancount.core.compare import hash_entry
from flask import url_for
from flask.testing import FlaskClient
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import PRETTY_ENCODER
from fava.core.misc import align
from fava.json_api import validate_func_arguments
from fava.json_api import ValidationError
dumps = PRETTY_ENCODER.encode
def assert_api_error(response, msg: str | None = None) -> None:
"""Asserts that the response errored and contains the message."""
assert response.status_code == 200
assert not response.json["success"], response.json
if msg:
assert msg == response.json["error"]
def assert_api_success(response, data: Any | None = None) -> None:
"""Asserts that the request was successful and contains the data."""
assert response.status_code == 200
assert response.json["success"], response.json
if data:
assert data == response.json["data"]
| 31.564972 | 79 | 0.578038 |
a747752e784483f13e0672fa7ef44261d743dd9f | 403 | py | Python | babybuddy/migrations/0017_promocode_max_usage_per_account.py | amcquistan/babyasst | 310a7948f06b71ae0d62593a3b5932abfd4eb444 | [
"BSD-2-Clause"
] | null | null | null | babybuddy/migrations/0017_promocode_max_usage_per_account.py | amcquistan/babyasst | 310a7948f06b71ae0d62593a3b5932abfd4eb444 | [
"BSD-2-Clause"
] | null | null | null | babybuddy/migrations/0017_promocode_max_usage_per_account.py | amcquistan/babyasst | 310a7948f06b71ae0d62593a3b5932abfd4eb444 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-27 20:28
from django.db import migrations, models
| 21.210526 | 49 | 0.615385 |
a74a33620df33a15eb19e53e0f2d731815811072 | 6,218 | py | Python | tests/test_upload.py | LuminosoInsight/luminoso-api-client-python | bae7db9b02123718ded5a8345a860bd12680b367 | [
"MIT"
] | 5 | 2016-09-14T02:02:30.000Z | 2021-06-20T21:11:19.000Z | tests/test_upload.py | LuminosoInsight/luminoso-api-client-python | bae7db9b02123718ded5a8345a860bd12680b367 | [
"MIT"
] | 29 | 2015-01-13T15:07:38.000Z | 2021-06-14T21:03:06.000Z | tests/test_upload.py | LuminosoInsight/luminoso-api-client-python | bae7db9b02123718ded5a8345a860bd12680b367 | [
"MIT"
] | 3 | 2016-03-07T13:04:34.000Z | 2017-08-07T21:15:53.000Z | from luminoso_api.v5_client import LuminosoClient
from luminoso_api.v5_upload import create_project_with_docs, BATCH_SIZE
from unittest.mock import patch
import pytest
BASE_URL = 'http://mock-api.localhost/api/v5/'
DOCS_TO_UPLOAD = [
{'title': 'Document 1', 'text': 'Bonjour', 'extra': 'field'},
{'title': 'Document 2', 'text': 'Au revoir'},
]
DOCS_UPLOADED = [
{'title': 'Document 1', 'text': 'Bonjour', 'metadata': []},
{'title': 'Document 2', 'text': 'Au revoir', 'metadata': []},
]
REPETITIVE_DOC = {'title': 'Yadda', 'text': 'yadda yadda', 'metadata': []}
def _build_info_response(ndocs, language, done):
"""
Construct the expected response when we get the project's info after
requesting a build.
"""
response = {
'json': {
'project_id': 'projid',
'document_count': ndocs,
'language': language,
'last_build_info': {
'number': 1,
'start_time': 0.,
'stop_time': None,
},
}
}
if done:
response['json']['last_build_info']['success'] = True
response['json']['last_build_info']['stop_time'] = 1.
return response
def test_project_creation(requests_mock):
"""
Test creating a project by mocking what happens when it is successful.
"""
# First, configure what the mock responses should be:
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'fr',
'last_build_info': None,
},
)
# Empty responses from further build steps
requests_mock.post(BASE_URL + 'projects/projid/upload/', json={})
requests_mock.post(BASE_URL + 'projects/projid/build/', json={})
# Build status response, which isn't done yet the first time it's checked,
# and is done the second time
requests_mock.get(
BASE_URL + 'projects/projid/',
[
_build_info_response(2, 'fr', done=False),
_build_info_response(2, 'fr', done=True),
],
)
# Now run the main uploader function and get the result
client = LuminosoClient.connect(BASE_URL, token='fake')
with patch('time.sleep', return_value=None):
response = create_project_with_docs(
client,
DOCS_TO_UPLOAD,
language='fr',
name='Projet test',
progress=False,
)
# Test that the right sequence of requests happened
history = requests_mock.request_history
assert history[0].method == 'POST'
assert history[0].url == BASE_URL + 'projects/'
params = history[0].json()
assert params['name'] == 'Projet test'
assert params['language'] == 'fr'
assert history[1].method == 'POST'
assert history[1].url == BASE_URL + 'projects/projid/upload/'
params = history[1].json()
assert params['docs'] == DOCS_UPLOADED
assert history[2].method == 'POST'
assert history[2].url == BASE_URL + 'projects/projid/build/'
assert history[2].json() == {}
assert history[3].method == 'GET'
assert history[3].url == BASE_URL + 'projects/projid/'
assert history[4].method == 'GET'
assert history[4].url == BASE_URL + 'projects/projid/'
assert len(history) == 5
assert response['last_build_info']['success']
def test_missing_text(requests_mock):
"""
Test a project that fails to be created, on the client side, because a bad
document is supplied.
"""
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'en',
'last_build_info': None,
},
)
with pytest.raises(ValueError):
client = LuminosoClient.connect(BASE_URL, token='fake')
create_project_with_docs(
client,
[{'bad': 'document'}],
language='en',
name='Bad project test',
progress=False,
)
def test_pagination(requests_mock):
"""
Test that we can create a project whose documents would be broken into
multiple pages, and when we iterate over its documents, we correctly
request all the pages.
"""
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'fr',
'last_build_info': None,
},
)
# Empty responses from further build steps
requests_mock.post(BASE_URL + 'projects/projid/upload/', json={})
requests_mock.post(BASE_URL + 'projects/projid/build/', json={})
ndocs = BATCH_SIZE + 2
# Build status response, which isn't done yet the first or second time
# it's checked, and is done the third time
requests_mock.get(
BASE_URL + 'projects/projid/',
[
_build_info_response(ndocs, 'fr', done=False),
_build_info_response(ndocs, 'fr', done=False),
_build_info_response(ndocs, 'fr', done=True),
],
)
# Now run the main uploader function and get the result
client = LuminosoClient.connect(BASE_URL, token='fake')
with patch('time.sleep', return_value=None):
create_project_with_docs(
client,
[REPETITIVE_DOC] * (BATCH_SIZE + 2),
language='fr',
name='Projet test',
progress=False,
)
# Test that the right sequence of requests happened, this time just as
# a list of URLs
history = requests_mock.request_history
reqs = [(req.method, req.url) for req in history]
assert reqs == [
('POST', BASE_URL + 'projects/'),
('POST', BASE_URL + 'projects/projid/upload/'),
('POST', BASE_URL + 'projects/projid/upload/'),
('POST', BASE_URL + 'projects/projid/build/'),
('GET', BASE_URL + 'projects/projid/'),
('GET', BASE_URL + 'projects/projid/'),
('GET', BASE_URL + 'projects/projid/'),
]
| 31.72449 | 78 | 0.595529 |
a74ad7dc8ca825fa0b64d0132540f37da6f4e67a | 1,259 | py | Python | src/oca_github_bot/webhooks/on_command.py | eLBati/oca-github-bot | 4fa974f8ec123c9ccfd7bcad22e4baa939c985ac | [
"MIT"
] | null | null | null | src/oca_github_bot/webhooks/on_command.py | eLBati/oca-github-bot | 4fa974f8ec123c9ccfd7bcad22e4baa939c985ac | [
"MIT"
] | null | null | null | src/oca_github_bot/webhooks/on_command.py | eLBati/oca-github-bot | 4fa974f8ec123c9ccfd7bcad22e4baa939c985ac | [
"MIT"
] | null | null | null | # Copyright (c) initOS GmbH 2019
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from ..commands import CommandError, parse_commands
from ..config import OCABOT_EXTRA_DOCUMENTATION, OCABOT_USAGE
from ..router import router
from ..tasks.add_pr_comment import add_pr_comment
| 34.972222 | 73 | 0.629071 |
a74cb2eb35421327d8faf002d2a0cd393a5579ab | 1,151 | py | Python | splitListToParts.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | splitListToParts.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | splitListToParts.py | pflun/learningAlgorithms | 3101e989488dfc8a56f1bf256a1c03a837fe7d97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Definition for singly-linked list.
head = ListNode(1)
p1 = ListNode(2)
p2 = ListNode(3)
p3 = ListNode(4)
p4 = ListNode(5)
p5 = ListNode(6)
p6 = ListNode(7)
p7 = ListNode(8)
p8 = ListNode(9)
p9 = ListNode(10)
head.next = p1
p1.next = p2
p2.next = p3
p3.next = p4
p4.next = p5
p5.next = p6
p6.next = p7
p7.next = p8
p8.next = p9
test = Solution()
print test.splitListToParts(head, 3) | 20.553571 | 47 | 0.536056 |
a74cdb915e99b5e47e7fb18dd30921d17381a256 | 7,744 | py | Python | pmlearn/mixture/tests/test_dirichlet_process.py | john-veillette/pymc-learn | 267b0084438616b869866194bc167c332c3e3547 | [
"BSD-3-Clause"
] | 187 | 2018-10-16T02:33:51.000Z | 2022-03-27T14:06:36.000Z | pmlearn/mixture/tests/test_dirichlet_process.py | john-veillette/pymc-learn | 267b0084438616b869866194bc167c332c3e3547 | [
"BSD-3-Clause"
] | 20 | 2018-10-31T15:13:29.000Z | 2022-01-20T18:54:00.000Z | pmlearn/mixture/tests/test_dirichlet_process.py | john-veillette/pymc-learn | 267b0084438616b869866194bc167c332c3e3547 | [
"BSD-3-Clause"
] | 20 | 2018-10-19T21:32:06.000Z | 2022-02-07T06:04:55.000Z | import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import DirichletProcessMixture
# class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
# class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
| 38.914573 | 84 | 0.594008 |
a74d82ac6813ed8153326a2d69c62b3256148e18 | 1,096 | py | Python | algorithms/utils.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | 2 | 2016-03-20T03:03:18.000Z | 2021-02-15T22:23:44.000Z | algorithms/utils.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | null | null | null | algorithms/utils.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | null | null | null | import random
TUPLE_SIZE = 4
DIGIT_BASE = 10
MAX_GUESS = DIGIT_BASE ** TUPLE_SIZE
| 28.842105 | 86 | 0.603102 |
a74d8736deea9179712853219ede84e9608d42dd | 1,276 | py | Python | utils/utils.py | cheng052/H3DNet | 872dabb37d8c2ca3581cf4e242014e6464debe57 | [
"MIT"
] | 212 | 2020-06-11T01:03:36.000Z | 2022-03-17T17:29:21.000Z | utils/utils.py | cheng052/H3DNet | 872dabb37d8c2ca3581cf4e242014e6464debe57 | [
"MIT"
] | 25 | 2020-06-15T13:35:13.000Z | 2022-03-10T05:44:05.000Z | utils/utils.py | cheng052/H3DNet | 872dabb37d8c2ca3581cf4e242014e6464debe57 | [
"MIT"
] | 24 | 2020-06-11T01:17:24.000Z | 2022-03-30T13:34:45.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
| 27.73913 | 103 | 0.670063 |
a74f41b8c63e9716f46430fe18d6b543d0682cb3 | 8,258 | py | Python | device/app.py | panjanek/IotCenter | e139617d14617c10a18c35515e2d3aaae797bcac | [
"MIT"
] | 2 | 2016-12-12T15:16:16.000Z | 2018-10-30T02:35:36.000Z | device/app.py | panjanek/IotCenter | e139617d14617c10a18c35515e2d3aaae797bcac | [
"MIT"
] | null | null | null | device/app.py | panjanek/IotCenter | e139617d14617c10a18c35515e2d3aaae797bcac | [
"MIT"
] | null | null | null | import logging
import threading
import json
import base64
import os
from subprocess import Popen
import glob
import time
import urllib2
import re
import string
import datetime
| 47.45977 | 177 | 0.552434 |
a74fb2c9000b17ff11193cacddad30429c023b4c | 7,882 | py | Python | deepsource/utils.py | vafaei-ar/deepsource | cbb06f5a2105506b63539ae5bfe73a3e62d4055f | [
"BSD-3-Clause"
] | null | null | null | deepsource/utils.py | vafaei-ar/deepsource | cbb06f5a2105506b63539ae5bfe73a3e62d4055f | [
"BSD-3-Clause"
] | 1 | 2020-12-15T10:03:50.000Z | 2020-12-16T10:39:00.000Z | deepsource/utils.py | vafaei-ar/deepsource | cbb06f5a2105506b63539ae5bfe73a3e62d4055f | [
"BSD-3-Clause"
] | 2 | 2019-09-02T10:24:22.000Z | 2021-03-30T01:29:03.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from skimage import draw
from skimage import measure
from astropy.io import fits
from astropy import units as u
from astropy import wcs, coordinates
from scipy.ndimage.filters import gaussian_filter
def standard(X):
"""
standard : This function makes data ragbe between 0 and 1.
Arguments:
X (numoy array) : input data.
--------
Returns:
standard data.
"""
xmin = X.min()
X = X-xmin
xmax = X.max()
X = X/xmax
return X
def fetch_data(image_file,model_file,do_standard=True,ignore_error=False):
"""
fetch_data : This function reads image and model.
Arguments:
image_file (string) : path to image file.
model_file (string) : path to model file.
do_standard (logical) (default=True) : if true, minimum/maximum value of image will be set to 0/1.
--------
Returns:
image, x coordinates, y coordinates
"""
with fits.open(image_file) as hdulist:
data = hdulist[0].data
header = hdulist[0].header
lx = header['NAXIS1']
ly = header['NAXIS2']
coord_sys = wcs.WCS(header)
model_file = model_file
sources = np.loadtxt(model_file, dtype={'names': ('name', 'ra', 'dec', 'I'),
'formats': ('S10', 'f4', 'f4', 'f4')})
ra, dec = sources['ra'],sources['dec']
num_sources = len(ra)
radec_coords = coordinates.SkyCoord(ra, dec, unit='deg', frame='fk5')
coords_ar = np.vstack([radec_coords.ra*u.deg, radec_coords.dec*u.deg,
np.zeros(num_sources), np.zeros(num_sources)]).T
xy_coords = coord_sys.wcs_world2pix(coords_ar, 0)
x_coords, y_coords = xy_coords[:,0], xy_coords[:,1]
filt = (0<=x_coords) & (x_coords<lx) & (0<=y_coords) & (y_coords<ly)
if ignore_error:
x_coords, y_coords = x_coords[filt], y_coords[filt]
else:
assert np.sum(filt)==num_sources,'There are some sources out of images! The problem might be in coordinate conversion system or simulation!'
if do_standard==True:
data = standard(data)
return np.moveaxis(data, 0, -1), x_coords, y_coords
def fetch_data_3ch(image_file,model_file,do_standard=True):
"""
fetch_data_3ch : This function reads 3 images of 3 robust and model.
Arguments:
image_file (string) : path to robust 0 image file.
model_file (string) : path to model file.
do_standard (logical) (default=True) : if true, minimum/maximum value of image will be set to 0/1.
--------
Returns:
image, x coordinates, y coordinates
"""
data0, x_coords, y_coords = fetch_data(image_file,model_file,do_standard=do_standard)
# lx,ly = data0[0,:,:,0].shape
try:
data1, x_coords, y_coords = fetch_data(image_file.replace('robust-0','robust-1'),model_file,do_standard=do_standard)
except:
assert 0,'Robust 1 does not exist.'
try:
data2, x_coords, y_coords = fetch_data(image_file.replace('robust-0','robust-2'),model_file,do_standard=do_standard)
except:
assert 0,'Robust 1 does not exist.'
return np.concatenate((data0,data1,data2), axis=-1), x_coords, y_coords
def cat2map(lx,ly,x_coords,y_coords):
"""
cat2map : This function converts a catalog to a 0/1 map which are representing background/point source.
Arguments:
lx (int): number of pixels of the image in first dimension.
ly (int): number of pixels of the image in second dimension.
x_coords (numpy array): list of the first dimension of point source positions.
y_coords (numpy array): list of the second dimension of point source positions.
--------
Returns:
catalog image as nupmy array.
"""
cat = np.zeros((lx,ly))
for i,j in zip(x_coords.astype(int), y_coords.astype(int)):
cat[j, i] = 1
return cat
def magnifier(y,radius=15,value=1):
"""
magnifier (numpy array): This function magnifies any pixel with value one by a given value.
Arguments:
y : input 2D map.
radius (int) (default=15) : radius of magnification.
value (float) (default=True) : the value you want to use in magnified pixels.
--------
Returns:
image with magnified objects as numpy array.
"""
mag = np.zeros(y.shape)
for i,j in np.argwhere(y==1):
rr, cc = draw.circle(i, j, radius=radius, shape=mag.shape)
mag[rr, cc] = value
return mag
def circle(y,radius=15):
"""
circle : This function add some circles around any pixel with value one.
Arguments:
y (numpy array): input 2D map.
radius (int) (default=15): circle radius.
--------
Returns:
image with circles around objects.
"""
mag = np.zeros(y.shape)
for i,j in np.argwhere(y==1):
rr, cc = draw.circle_perimeter(i, j, radius=radius, shape=mag.shape)
mag[rr, cc] = 1
return mag
def horn_kernel(y,radius=10,step_height=1):
"""
horn_kernel : Horn shape kernel.
Arguments:
y (numpy array): input 2D map.
radius (int) (default=15): effective radius of kernel.
--------
Returns:
kerneled image.
"""
mag = np.zeros(y.shape)
for r in range(1,radius):
for i,j in np.argwhere(y==1):
rr, cc = draw.circle(i, j, radius=r, shape=mag.shape)
mag[rr, cc] += 1.*step_height/radius
return mag
def gaussian_kernel(y,sigma=7):
"""
gaussian_kernel: Gaussian filter.
Arguments:
y (numpy array): input 2D map.
sigma (float) (default=7): effective length of Gaussian smoothing.
--------
Returns:
kerneled image.
"""
return gaussian_filter(y, sigma)
def ch_mkdir(directory):
"""
ch_mkdir : This function creates a directory if it does not exist.
Arguments:
directory (string): Path to the directory.
--------
Returns:
null.
"""
if not os.path.exists(directory):
os.makedirs(directory)
def the_print(text,style='bold',tc='gray',bgc='red'):
"""
prints table of formatted text format options
"""
colors = ['black','red','green','yellow','blue','purple','skyblue','gray']
if style == 'bold':
style = 1
elif style == 'underlined':
style = 4
else:
style = 0
fg = 30+colors.index(tc)
bg = 40+colors.index(bgc)
form = ';'.join([str(style), str(fg), str(bg)])
print('\x1b[%sm %s \x1b[0m' % (form, text))
#def ps_extract(xp):
# xp = xp-xp.min()
# xp = xp/xp.max()
# nb = []
# for trsh in np.linspace(0,0.2,200):
# blobs = measure.label(xp>trsh)
# nn = np.unique(blobs).shape[0]
# nb.append(nn)
# nb = np.array(nb)
# nb = np.diff(nb)
# trshs = np.linspace(0,0.2,200)[:-1]
# thrsl = trshs[~((-5<nb) & (nb<5))]
# if thrsl.shape[0]==0:
# trsh = 0.1
# else:
# trsh = thrsl[-1]
#2: 15, 20
#3: 30,10
#4: 50, 10
# nnp = 0
# for tr in np.linspace(1,0,1000):
# blobs = measure.label(xp>tr)
# nn = np.unique(blobs).shape[0]
# if nn-nnp>50:
# break
# nnp = nn
# trsh = tr
# blobs = measure.label(xp>trsh)
# xl = []
# yl = []
# pl = []
# for v in np.unique(blobs)[1:]:
# filt = blobs==v
# pnt = np.round(np.mean(np.argwhere(filt),axis=0)).astype(int)
# if filt.sum()>10:
# xl.append(pnt[1])
# yl.append(pnt[0])
# pl.append(np.mean(xp[blobs==v]))
# return np.array([xl,yl]).T,np.array(pl)
| 29.520599 | 148 | 0.586399 |
a74fd79fe36c35a1329c69bf98a54c22cc8f9a55 | 12,349 | py | Python | ftc/lib/net/network.py | efulet/ann_text_classification | fba05a1789a19aa6d607ee36069dda419bb98e28 | [
"MIT"
] | null | null | null | ftc/lib/net/network.py | efulet/ann_text_classification | fba05a1789a19aa6d607ee36069dda419bb98e28 | [
"MIT"
] | null | null | null | ftc/lib/net/network.py | efulet/ann_text_classification | fba05a1789a19aa6d607ee36069dda419bb98e28 | [
"MIT"
] | null | null | null | """
@created_at 2015-01-18
@author Exequiel Fuentes Lettura <efulet@gmail.com>
"""
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import SoftmaxLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError
from pybrain.tools.validation import Validator
# Only needed for data generation and graphical output
import pylab as pl
import numpy as np
# Only needed for saving and loading trained network
import pickle
import os
from lib.util import SystemUtils
from network_exception import NetworkException
| 38.711599 | 128 | 0.581667 |
a75006d06757a5f27ac00ff68ada7211ab1bbdc4 | 342 | py | Python | python2/probe_yd.py | Nzen/run_ydl | 90d7075ba8ec5771b5edcbe2ad52211d95546f83 | [
"WTFPL"
] | null | null | null | python2/probe_yd.py | Nzen/run_ydl | 90d7075ba8ec5771b5edcbe2ad52211d95546f83 | [
"WTFPL"
] | null | null | null | python2/probe_yd.py | Nzen/run_ydl | 90d7075ba8ec5771b5edcbe2ad52211d95546f83 | [
"WTFPL"
] | null | null | null | from sys import argv
from subprocess import call
try :
link = argv[ 1 ]
except IndexError:
link = raw_input( " - which url interests you? " )
try:
ydl_answ = call( "youtube-dl -F "+ link, shell = True )
if ydl_answ is not 0 :
print "-- failed "+ link + " code "+ str(ydl_answ)
except OSError as ose :
print "Execution failed:", ose
| 21.375 | 56 | 0.663743 |
a7538f1279770f7607c3e20bb1757708788234b0 | 9,689 | py | Python | src/cogs/welcome.py | Cr4zi/SynatxBot | eeb59555c1cfa81e05c924b84c601c0b240e5ee3 | [
"MIT"
] | 4 | 2021-08-12T08:11:21.000Z | 2021-08-12T08:15:22.000Z | src/cogs/welcome.py | Cr4zi/SynatxBot | eeb59555c1cfa81e05c924b84c601c0b240e5ee3 | [
"MIT"
] | null | null | null | src/cogs/welcome.py | Cr4zi/SynatxBot | eeb59555c1cfa81e05c924b84c601c0b240e5ee3 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord import Embed
from discord.utils import get
import datetime
import psycopg2
from bot import DB_NAME, DB_PASS, DB_HOST, DB_USER, logger, private_message
| 46.581731 | 146 | 0.58489 |
a7551310f1a028ec26dd2191bdc424bc482a29c5 | 468 | py | Python | etcd_restore_rebuild_util/edit_yaml_for_rebuild.py | Cray-HPE/utils | dd6e13b46500e1c2f6ad887a8c1604044465d1d8 | [
"MIT"
] | null | null | null | etcd_restore_rebuild_util/edit_yaml_for_rebuild.py | Cray-HPE/utils | dd6e13b46500e1c2f6ad887a8c1604044465d1d8 | [
"MIT"
] | null | null | null | etcd_restore_rebuild_util/edit_yaml_for_rebuild.py | Cray-HPE/utils | dd6e13b46500e1c2f6ad887a8c1604044465d1d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import yaml
file_name=sys.argv[1]
file_name = '/root/etcd/' + file_name + '.yaml'
with open(file_name) as f:
y=yaml.safe_load(f)
del y['metadata']['creationTimestamp']
del y['metadata']['generation']
del y['metadata']['resourceVersion']
del y['metadata']['uid']
del y['status']
with open(file_name, 'w') as outputFile:
yaml.dump(y,outputFile, default_flow_style=False, sort_keys=False)
| 22.285714 | 70 | 0.67094 |
a75582560560cf86bc8bb8744feee3c442ea60e2 | 1,514 | py | Python | src/Segmentation/segmentation.py | odigous-labs/video-summarization | c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb | [
"MIT"
] | 1 | 2019-03-05T06:00:38.000Z | 2019-03-05T06:00:38.000Z | src/Segmentation/segmentation.py | odigous-labs/video-summarization | c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb | [
"MIT"
] | 2 | 2019-03-02T05:12:59.000Z | 2019-09-26T17:03:56.000Z | src/Segmentation/segmentation.py | odigous-labs/video-summarization | c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb | [
"MIT"
] | null | null | null | import os
import cv2
from Segmentation import CombinedHist, get_histograms, HistQueue
import matplotlib.pyplot as plt
import numpy as np
listofFiles = os.listdir('generated_frames')
# change the size of queue accordingly
queue_of_hists = HistQueue.HistQueue(25)
x = []
y_r = []
y_g = []
y_b = []
for i in range(0, 4000):
blue_histr, green_histr, red_histr = get_histograms.get_histograms('generated_frames/frame' + str(i) + ".jpg")
hist_of_image = CombinedHist.CombinedHist(blue_histr, green_histr, red_histr)
compare(hist_of_image, i)
queue_of_hists.insert_histr(hist_of_image)
print("frame" + str(i) + ".jpg")
fig = plt.figure(figsize=(18, 5))
y = np.add(np.add(y_r, y_g), y_b) / 3
value = np.percentile(y, 5)
median = np.median(y)
minimum = np.amin(y)
y_sorted = np.sort(y)
getting_index = y_sorted[8]
print("quartile" + str(value))
print("median" + str(median))
plt.plot(x, y, color='k')
plt.axhline(y=value, color='r', linestyle='-')
plt.xticks(np.arange(min(x), max(x) + 1, 100.0))
plt.show()
| 29.115385 | 114 | 0.718626 |
a755b8f4c107bcf90ce08cbfeeeaa2d842ac3f66 | 12,369 | py | Python | stickerbot.py | gumblex/stickerindexbot | 8e8edaabac54d2747e4b620464670a60a65efcb5 | [
"MIT"
] | 1 | 2017-01-20T18:11:46.000Z | 2017-01-20T18:11:46.000Z | stickerbot.py | gumblex/stickerindexbot | 8e8edaabac54d2747e4b620464670a60a65efcb5 | [
"MIT"
] | null | null | null | stickerbot.py | gumblex/stickerindexbot | 8e8edaabac54d2747e4b620464670a60a65efcb5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Telegram Sticker Index Bot
'''
import re
import sys
import time
import json
import queue
import sqlite3
import logging
import requests
import functools
import threading
import collections
import concurrent.futures
import zhconv
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
logger_botapi = logging.getLogger('botapi')
executor = concurrent.futures.ThreadPoolExecutor(5)
HSession = requests.Session()
_re_one_emoji = (
'[-]|'
'(?:(?:[-]|[-]|[-\U0001f6ff]|[\U0001f900-\U0001f9ff])[-]?\u200d)*'
'(?:[-]|[-]|[-\U0001f6ff]|[\U0001f900-\U0001f9ff])[-]?'
)
re_emoji = re.compile('(%s)' % _re_one_emoji)
re_qtag = re.compile(r"#?\w+", re.UNICODE)
re_tag = re.compile(r"#\w+", re.UNICODE)
re_tags = re.compile(r"#\w+(?:\s+#\w+)*", re.UNICODE)
def nt_from_dict(nt, d, default=None):
kwargs = dict.fromkeys(nt._fields, default)
kwargs.update(d)
return nt(**kwargs)
# Bot API
def getupdates():
global CFG, STATE
while 1:
try:
updates = bot_api('getUpdates', offset=STATE.get('offset', 0), timeout=10)
except Exception:
logger_botapi.exception('Get updates failed.')
continue
if updates:
STATE['offset'] = updates[-1]["update_id"] + 1
for upd in updates:
MSG_Q.put(upd)
time.sleep(.2)
# DB stuff
# Query handling
START = 'This is the Sticker Index Bot. Send /help, or directly use its inline mode.'
HELP = ('You can search for stickers by tags or emoji in its inline mode.\n'
'This bot will collect tags for stickers in groups or private chat, '
'after seeing stickers being replied to in the format "#tagone #tagtwo".'
)
if __name__ == '__main__':
CFG = load_config()
MSG_Q = queue.Queue()
DB, STATE = init_db(CFG.database)
try:
apithr = threading.Thread(target=getupdates)
apithr.daemon = True
apithr.start()
logging.info('Satellite launched')
while 1:
handle_api_update(MSG_Q.get())
finally:
STATE.close()
| 32.379581 | 160 | 0.570216 |
a755c3e60d6f4943e03a99183eadd47ca1d97d29 | 4,571 | py | Python | tests.py | AndreLouisCaron/requests-wsgi-adapter | 5506c4785824673147449daabb5c4e06192e5078 | [
"BSD-3-Clause"
] | null | null | null | tests.py | AndreLouisCaron/requests-wsgi-adapter | 5506c4785824673147449daabb5c4e06192e5078 | [
"BSD-3-Clause"
] | null | null | null | tests.py | AndreLouisCaron/requests-wsgi-adapter | 5506c4785824673147449daabb5c4e06192e5078 | [
"BSD-3-Clause"
] | null | null | null | import json
import unittest
import requests
from urllib3._collections import HTTPHeaderDict
from wsgiadapter import WSGIAdapter
def test_multiple_cookies():
app = WSGITestHandler(
extra_headers=[
("Set-Cookie", "flimble=floop; Path=/"),
("Set-Cookie", "flamble=flaap; Path=/")])
session = requests.session()
session.mount('http://localhost', WSGIAdapter(app=app))
session.get(
"http://localhost/cookies/set?flimble=floop&flamble=flaap")
assert session.cookies['flimble'] == "floop"
assert session.cookies['flamble'] == "flaap"
def test_delete_cookies():
session = requests.session()
set_app = WSGITestHandler(
extra_headers=[
("Set-Cookie", "flimble=floop; Path=/"),
("Set-Cookie", "flamble=flaap; Path=/")])
delete_app = WSGITestHandler(
extra_headers=[(
"Set-Cookie",
"flimble=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/")])
session.mount(
'http://localhost/cookies/set', WSGIAdapter(app=set_app))
session.mount(
'http://localhost/cookies/delete', WSGIAdapter(app=delete_app))
session.get(
"http://localhost/cookies/set?flimble=floop&flamble=flaap")
assert session.cookies['flimble'] == "floop"
assert session.cookies['flamble'] == "flaap"
session.get(
"http://localhost/cookies/delete?flimble")
assert 'flimble' not in session.cookies
assert session.cookies['flamble'] == "flaap"
| 39.068376 | 107 | 0.640123 |
a75611852715e07033587bffa7d94fdf7b98243d | 548 | py | Python | setup.py | ducandu/aiopening | 214d8d6dfc928ab4f8db634018092dc43eaf0e3c | [
"MIT"
] | null | null | null | setup.py | ducandu/aiopening | 214d8d6dfc928ab4f8db634018092dc43eaf0e3c | [
"MIT"
] | null | null | null | setup.py | ducandu/aiopening | 214d8d6dfc928ab4f8db634018092dc43eaf0e3c | [
"MIT"
] | null | null | null | """
-------------------------------------------------------------------------
shine -
setup
!!TODO: add file description here!!
created: 2017/06/04 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
from setuptools import setup
setup(name='aiopening', version='1.0', description='AI (but even opener)', url='http://github.com/sven1977/aiopening', author='Sven Mika',
author_email='sven.mika@ducandu.com', license='MIT', packages=['aiopening'], zip_safe=False) | 34.25 | 138 | 0.501825 |
a756ca330f0702ca67f549b4365c53dd8dc05dbc | 1,932 | py | Python | podcast_dl/podcasts.py | RMPR/simple-podcast-dl | bb4419d3beb1a893bfac5aa6546ba25522531b00 | [
"MIT"
] | null | null | null | podcast_dl/podcasts.py | RMPR/simple-podcast-dl | bb4419d3beb1a893bfac5aa6546ba25522531b00 | [
"MIT"
] | null | null | null | podcast_dl/podcasts.py | RMPR/simple-podcast-dl | bb4419d3beb1a893bfac5aa6546ba25522531b00 | [
"MIT"
] | null | null | null | """
List of podcasts and their filename parser types.
"""
from .rss_parsers import BaseItem, TalkPythonItem, ChangelogItem, IndieHackersItem
import attr
PODCASTS = [
Podcast(
name="talkpython",
title="Talk Python To Me",
url="https://talkpython.fm",
rss="https://talkpython.fm/episodes/rss",
rss_parser=TalkPythonItem,
),
Podcast(
name="pythonbytes",
title="Python Bytes",
url="https://pythonbytes.fm/",
rss="https://pythonbytes.fm/episodes/rss",
rss_parser=TalkPythonItem,
),
Podcast(
name="changelog",
title="The Changelog",
url="https://changelog.com/podcast",
rss="https://changelog.com/podcast/feed",
rss_parser=ChangelogItem,
),
Podcast(
name="podcastinit",
title="Podcast.__init__",
url="https://www.podcastinit.com/",
rss="https://www.podcastinit.com/feed/mp3/",
rss_parser=BaseItem,
),
Podcast(
name="indiehackers",
title="Indie Hackers",
url="https://www.indiehackers.com/podcast",
rss="http://feeds.backtracks.fm/feeds/indiehackers/indiehackers/feed.xml",
rss_parser=IndieHackersItem,
),
Podcast(
name="realpython",
title="Real Python",
url="https://realpython.com/podcasts/rpp/",
rss="https://realpython.com/podcasts/rpp/feed",
rss_parser=BaseItem,
),
Podcast(
name="kubernetespodcast",
title="Kubernetes Podcast",
url="https://kubernetespodcast.com/",
rss="https://kubernetespodcast.com/feeds/audio.xml",
rss_parser=BaseItem,
),
]
PODCAST_MAP = {p.name: p for p in PODCASTS}
| 27.6 | 82 | 0.608696 |
a75700da032ade0f2e5909a09f4ffc60c4abd193 | 20,543 | py | Python | 07_spitzer_aor_extraction.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | 07_spitzer_aor_extraction.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | 07_spitzer_aor_extraction.py | rsiverd/ultracool | cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Extract and save extended object catalogs from the specified data and
# uncertainty images. This version of the script jointly analyzes all
# images from a specific AOR/channel to enable more sophisticated
# analysis.
#
# Rob Siverd
# Created: 2021-02-02
# Last modified: 2021-08-24
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.3.5"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
import argparse
import shutil
#import resource
#import signal
import glob
#import gc
import os
import sys
import time
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
##--------------------------------------------------------------------------##
## Disable buffering on stdout/stderr:
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
##--------------------------------------------------------------------------##
## Spitzer pipeline filesystem helpers:
try:
import spitz_fs_helpers
reload(spitz_fs_helpers)
except ImportError:
logger.error("failed to import spitz_fs_helpers module!")
sys.exit(1)
sfh = spitz_fs_helpers
## Spitzer pipeline cross-correlation:
try:
import spitz_xcorr_stacking
reload(spitz_xcorr_stacking)
except ImportError:
logger.error("failed to import spitz_xcor_stacking module!")
sys.exit(1)
sxc = spitz_xcorr_stacking.SpitzerXCorr()
## Catalog pruning helpers:
try:
import catalog_tools
reload(catalog_tools)
except ImportError:
logger.error("failed to import catalog_tools module!")
sys.exit(1)
xcp = catalog_tools.XCorrPruner()
## Spitzer star detection routine:
try:
import spitz_extract
reload(spitz_extract)
spf = spitz_extract.SpitzFind()
except ImportError:
logger.error("spitz_extract module not found!")
sys.exit(1)
## Hybrid stack+individual position calculator:
try:
import spitz_stack_astrom
reload(spitz_stack_astrom)
ha = spitz_stack_astrom.HybridAstrom()
except ImportError:
logger.error("failed to import spitz_stack_astrom module!")
sys.exit(1)
## HORIZONS ephemeris tools:
try:
import jpl_eph_helpers
reload(jpl_eph_helpers)
except ImportError:
logger.error("failed to import jpl_eph_helpers module!")
sys.exit(1)
eee = jpl_eph_helpers.EphTool()
##--------------------------------------------------------------------------##
## Fast FITS I/O:
try:
import fitsio
except ImportError:
logger.error("fitsio module not found! Install and retry.")
sys.stderr.write("\nError: fitsio module not found!\n")
sys.exit(1)
## Save FITS image with clobber (fitsio):
##--------------------------------------------------------------------------##
##------------------ Parse Command Line ----------------##
##--------------------------------------------------------------------------##
## Dividers:
halfdiv = '-' * 40
fulldiv = '-' * 80
## Parse arguments and run script:
## Enable raw text AND display of defaults:
## Parse the command line:
if __name__ == '__main__':
# ------------------------------------------------------------------
prog_name = os.path.basename(__file__)
descr_txt = """
Extract catalogs from the listed Spitzer data/uncertainty images.
Version: %s
""" % __version__
parser = MyParser(prog=prog_name, description=descr_txt)
#formatter_class=argparse.RawTextHelpFormatter)
# ------------------------------------------------------------------
parser.set_defaults(imtype=None) #'cbcd') #'clean')
#parser.set_defaults(sigthresh=3.0)
parser.set_defaults(sigthresh=2.0)
parser.set_defaults(skip_existing=True)
parser.set_defaults(save_registered=True)
#parser.set_defaults(save_reg_subdir=None)
# ------------------------------------------------------------------
#parser.add_argument('firstpos', help='first positional argument')
#parser.add_argument('-w', '--whatever', required=False, default=5.0,
# help='some option with default [def: %(default)s]', type=float)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
iogroup = parser.add_argument_group('File I/O')
iogroup.add_argument('--overwrite', required=False, dest='skip_existing',
action='store_false', help='overwrite existing catalogs')
#iogroup.add_argument('-E', '--ephem_data', default=None, required=True,
# help='CSV file with SST ephemeris data', type=str)
iogroup.add_argument('-I', '--input_folder', default=None, required=True,
help='where to find input images', type=str)
iogroup.add_argument('-O', '--output_folder', default=None, required=False,
help='where to save extended catalog outputs', type=str)
iogroup.add_argument('-W', '--walk', default=False, action='store_true',
help='recursively walk subfolders to find CBCD images')
imtype = iogroup.add_mutually_exclusive_group()
#imtype.add_argument('--cbcd', required=False, action='store_const',
# dest='imtype', const='cbcd', help='use cbcd images')
imtype.add_argument('--hcfix', required=False, action='store_const',
dest='imtype', const='hcfix', help='use hcfix images')
imtype.add_argument('--clean', required=False, action='store_const',
dest='imtype', const='clean', help='use clean images')
imtype.add_argument('--nudge', required=False, action='store_const',
dest='imtype', const='nudge', help='use nudge images')
#iogroup.add_argument('-R', '--ref_image', default=None, required=True,
# help='KELT image with WCS')
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Miscellany:
miscgroup = parser.add_argument_group('Miscellany')
miscgroup.add_argument('--debug', dest='debug', default=False,
help='Enable extra debugging messages', action='store_true')
miscgroup.add_argument('-q', '--quiet', action='count', default=0,
help='less progress/status reporting')
miscgroup.add_argument('-v', '--verbose', action='count', default=0,
help='more progress/status reporting')
# ------------------------------------------------------------------
context = parser.parse_args()
context.vlevel = 99 if context.debug else (context.verbose-context.quiet)
context.prog_name = prog_name
# Unless otherwise specified, output goes into input folder:
if not context.output_folder:
context.output_folder = context.input_folder
# Ensure an image type is selected:
if not context.imtype:
sys.stderr.write("\nNo image type selected!\n\n")
sys.exit(1)
## Use imtype-specific folder for registered file output:
#if not context.save_reg_subdir:
# context.save_reg_subdir = 'aligned_%s' % context.imtype
##--------------------------------------------------------------------------##
##------------------ Make Input Image List ----------------##
##--------------------------------------------------------------------------##
tstart = time.time()
sys.stderr.write("Listing %s frames ... " % context.imtype)
#im_wildpath = 'SPITZ*%s.fits' % context.imtype
#im_wildcard = os.path.join(context.input_folder, 'SPIT*'
#_img_types = ['cbcd', 'clean', 'cbunc']
#_type_suff = dict([(x, x+'.fits') for x in _im_types])
#img_list = {}
#for imsuff in suffixes:
# wpath = '%s/SPITZ*%s.fits' % (context.input_folder, imsuff)
# img_list[imsuff] = sorted(glob.glob(os.path.join(context.
#img_files = sorted(glob.glob(os.path.join(context.input_folder, im_wildpath)))
if context.walk:
img_files = sfh.get_files_walk(context.input_folder, flavor=context.imtype)
else:
img_files = sfh.get_files_single(context.input_folder, flavor=context.imtype)
sys.stderr.write("done.\n")
## Abort in case of no input:
if not img_files:
sys.stderr.write("No input (%s) files found in folder:\n" % context.imtype)
sys.stderr.write("--> %s\n\n" % context.input_folder)
sys.exit(1)
n_images = len(img_files)
## List of uncertainty frames (warn if any missing):
#unc_files = [x.replace(context.imtype, 'cbunc') for x in img_files]
#sys.stderr.write("Checking error-images ... ")
#have_unc = [os.path.isfile(x) for x in unc_files]
#if not all(have_unc):
# sys.stderr.write("WARNING: some uncertainty frames missing!\n")
#else:
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
##------------------ Load SST Ephemeris Data ----------------##
##--------------------------------------------------------------------------##
### Ephemeris data file must exist:
#if not context.ephem_data:
# logger.error("context.ephem_data not set?!?!")
# sys.exit(1)
#if not os.path.isfile(context.ephem_data):
# logger.error("Ephemeris file not found: %s" % context.ephem_data)
# sys.exit(1)
#
### Load ephemeris data:
#eee.load(context.ephem_data)
##--------------------------------------------------------------------------##
##------------------ Unique AOR/Channel Combos ----------------##
##--------------------------------------------------------------------------##
unique_tags = sorted(list(set([sfh.get_irac_aor_tag(x) for x in img_files])))
images_by_tag = {x:[] for x in unique_tags}
for ii in img_files:
images_by_tag[sfh.get_irac_aor_tag(ii)].append(ii)
##--------------------------------------------------------------------------##
##------------------ Diagnostic Region Files ----------------##
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
##------------------ ExtendedCatalog Ephem Format ----------------##
##--------------------------------------------------------------------------##
#def reformat_ephem(edata):
##--------------------------------------------------------------------------##
##------------------ Stack/Image Comparison ----------------##
##--------------------------------------------------------------------------##
#def xcheck(idata, sdata):
# nstack = len(sdata)
# nimage = len(idata)
# sys.stderr.write("nstack: %d\n" % nstack)
# sys.stderr.write("nimage: %d\n" % nimage)
# return
##--------------------------------------------------------------------------##
##------------------ Process All Images ----------------##
##--------------------------------------------------------------------------##
ntodo = 0
nproc = 0
ntotal = len(img_files)
min_sobj = 10 # bark if fewer than this many found in stack
skip_stuff = False
#context.save_registered = False
#context.skip_existing = False
## Reduce bright pixel threshold:
#sxc.set_bp_thresh(10.0)
#sxc.set_bp_thresh(5.0)
sxc.set_bp_thresh(10.0)
#sxc.set_vlevel(10)
sxc.set_roi_rfrac(0.90)
sxc.set_roi_rfrac(2.00)
#sys.exit(0)
#for aor_tag,tag_files in images_by_tag.items():
for aor_tag in unique_tags:
sys.stderr.write("\n\nProcessing images from %s ...\n" % aor_tag)
tag_files = images_by_tag[aor_tag]
n_tagged = len(tag_files)
if n_tagged < 2:
sys.stderr.write("WARNING: only %d images with tag %s\n"
% (n_tagged, aor_tag))
sys.stderr.write("This case is not currently handled ...\n")
sys.exit(1)
# File/folder paths:
aor_dir = os.path.dirname(tag_files[0])
stack_ibase = '%s_%s_stack.fits' % (aor_tag, context.imtype)
stack_cbase = '%s_%s_stack.fcat' % (aor_tag, context.imtype)
medze_ibase = '%s_%s_medze.fits' % (aor_tag, context.imtype)
stack_ipath = os.path.join(aor_dir, stack_ibase)
stack_cpath = os.path.join(aor_dir, stack_cbase)
medze_ipath = os.path.join(aor_dir, medze_ibase)
#sys.stderr.write("stack_ibase: %s\n" % stack_ibase)
#sys.stderr.write("As of this point ...\n")
#sys.stderr.write("sxc._roi_rfrac: %.5f\n" % sxc._roi_rfrac)
sys.stderr.write("Cross-correlating and stacking ... ")
result = sxc.shift_and_stack(tag_files)
sys.stderr.write("done.\n")
sxc.save_istack(stack_ipath)
#sys.exit(0)
#istack = sxc.get_stacked()
#qsave(stack_ipath, istack)
# Dump registered data to disk:
if context.save_registered:
save_reg_subdir = 'aligned_%s_%s' % (aor_tag, context.imtype)
sys.stderr.write("Saving registered frames for inspection ...\n")
#reg_dir = os.path.join(aor_dir, context.save_reg_subdir)
reg_dir = os.path.join(aor_dir, save_reg_subdir)
if os.path.isdir(reg_dir):
shutil.rmtree(reg_dir)
os.mkdir(reg_dir)
sxc.dump_registered_images(reg_dir)
sxc.dump_bright_pixel_masks(reg_dir)
sys.stderr.write("\n")
# Extract stars from stacked image:
spf.use_images(ipath=stack_ipath)
stack_cat = spf.find_stars(context.sigthresh)
sdata = stack_cat.get_catalog()
nsobj = len(sdata)
sys.stderr.write(" \nFound %d sources in stacked image.\n\n" % nsobj)
if (nsobj < min_sobj):
sys.stderr.write("Fewer than %d objects found in stack ... \n" % min_sobj)
sys.stderr.write("Found %d objects.\n\n" % nsobj)
sys.stderr.write("--> %s\n\n" % stack_ipath)
sys.exit(1)
stack_cat.save_as_fits(stack_cpath, overwrite=True)
# region file for diagnostics:
stack_rfile = stack_ipath + '.reg'
regify_excat_pix(sdata, stack_rfile, win=True)
# Make/save 'medianize' stack for comparison:
sxc.make_mstack()
sxc.save_mstack(medze_ipath)
# Set up pruning system:
xshifts, yshifts = sxc.get_stackcat_offsets()
xcp.set_master_catalog(sdata)
xcp.set_image_offsets(xshifts, yshifts)
# Set up hybrid astrometry system:
ha.set_stack_excat(stack_cat) # catalog of detections
ha.set_xcorr_metadata(sxc) # pixel offsets by image
## Stop here for now ...
#if skip_stuff:
# continue
# process individual files with cross-correlation help:
for ii,img_ipath in enumerate(tag_files, 1):
sys.stderr.write("%s\n" % fulldiv)
unc_ipath = img_ipath.replace(context.imtype, 'cbunc')
if not os.path.isfile(unc_ipath):
sys.stderr.write("WARNING: file not found:\n--> %s\n" % unc_ipath)
continue
img_ibase = os.path.basename(img_ipath)
#cat_ibase = img_ibase.replace(context.imtype, 'fcat')
cat_fbase = img_ibase + '.fcat'
cat_pbase = img_ibase + '.pcat'
cat_mbase = img_ibase + '.mcat'
### FIXME ###
### context.output_folder is not appropriate for walk mode ...
save_dir = context.output_folder # NOT FOR WALK MODE
save_dir = os.path.dirname(img_ipath)
cat_fpath = os.path.join(save_dir, cat_fbase)
cat_ppath = os.path.join(save_dir, cat_pbase)
cat_mpath = os.path.join(save_dir, cat_mbase)
### FIXME ###
sys.stderr.write("Catalog %s ... " % cat_fpath)
if context.skip_existing:
if os.path.isfile(cat_mpath):
sys.stderr.write("exists! Skipping ... \n")
continue
nproc += 1
sys.stderr.write("not found ... creating ...\n")
spf.use_images(ipath=img_ipath, upath=unc_ipath)
result = spf.find_stars(context.sigthresh)
## FIXME: this just grabs the ephemeris from the header content
## of the first ExtendedCatalog produced. This should be obtained
## separately to make things easier to follow (and to eliminate
## the need to pre-modify the image headers ...)
eph_data = eee.eph_from_header(result.get_header())
result.set_ephem(eph_data)
result.save_as_fits(cat_fpath, overwrite=True)
nfound = len(result.get_catalog())
frame_rfile = img_ipath + '.reg'
regify_excat_pix(result.get_catalog(), frame_rfile, win=True)
# prune sources not detected in stacked frame:
pruned = xcp.prune_spurious(result.get_catalog(), img_ipath)
npruned = len(pruned)
sys.stderr.write("nfound: %d, npruned: %d\n" % (nfound, npruned))
if (len(pruned) < 5):
sys.stderr.write("BARKBARKBARK\n")
sys.exit(1)
result.set_catalog(pruned)
result.save_as_fits(cat_ppath, overwrite=True)
# build and save hybrid catalog:
mcat = ha.make_hybrid_excat(result)
mcat.set_ephem(eph_data)
mcat.save_as_fits(cat_mpath, overwrite=True)
mxcat_rfile = img_ipath + '.mcat.reg'
#regify_excat_pix(mcat.get_catalog(), mxcat_rfile, win=True)
# stop early if requested:
if (ntodo > 0) and (nproc >= ntodo):
break
#break
#sys.exit(0)
if (ntodo > 0) and (nproc >= ntodo):
break
tstop = time.time()
ttook = tstop - tstart
sys.stderr.write("Extraction completed in %.3f seconds.\n" % ttook)
#import astropy.io.fits as pf
#
#imra = np.array([hh['CRVAL1'] for hh in sxc._im_hdrs])
#imde = np.array([hh['CRVAL2'] for hh in sxc._im_hdrs])
#
##sys.stderr.write("\n\n\n")
##sys.stderr.write("sxc.shift_and_stack(tag_files)\n")
##result = sxc.shift_and_stack(tag_files)
#sys.exit(0)
#
#layers = sxc.pad_and_shift(sxc._im_data, sxc._x_shifts, sxc._y_shifts)
#tstack = sxc.dumb_stack(layers)
#pf.writeto('tstack.fits', tstack, overwrite=True)
#
#tdir = 'zzz'
#if not os.path.isdir(tdir):
# os.mkdir(tdir)
#
##tag_bases = [os.path.basename(x) for x in tag_files]
##for ibase,idata in zip(tag_bases, layers):
## tsave = os.path.join(tdir, 'r' + ibase)
## sys.stderr.write("Saving %s ... \n" % tsave)
## pf.writeto(tsave, idata, overwrite=True)
#
#sys.stderr.write("\n\n\n")
#sys.stderr.write("visual inspection with:\n")
#sys.stderr.write("flztfs %s\n" % ' '.join(tag_files))
##--------------------------------------------------------------------------##
######################################################################
# CHANGELOG (07_spitzer_aor_extraction.py):
#---------------------------------------------------------------------
#
# 2021-02-02:
# -- Increased __version__ to 0.1.0.
# -- First created 07_spitzer_aor_extraction.py.
#
| 37.148282 | 82 | 0.577861 |
a7574a31d3793e68486c1afc1807fc0afcd14ce5 | 6,594 | py | Python | project/apps/CI-producer/app/producers_test.py | Monxun/PortainerPractice | a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4 | [
"MIT"
] | null | null | null | project/apps/CI-producer/app/producers_test.py | Monxun/PortainerPractice | a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4 | [
"MIT"
] | 1 | 2022-03-02T22:54:36.000Z | 2022-03-02T22:54:36.000Z | project/apps/CI-producer/app/producers_test.py | Monxun/PortainerPractice | a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4 | [
"MIT"
] | null | null | null | from os import strerror
import os
import pytest
import datetime
import sqlalchemy
from sqlalchemy import inspect
from sqlalchemy import select
from sqlalchemy.orm import session
from sqlalchemy.sql.expression import func
#################################################
# DATABASE CONNECTOR
user = 'user'
password = 'root'
host = 'localhost'
port = 3306
name = 'alinedb'
engine = sqlalchemy.create_engine(
f'mysql+pymysql://{user}:{password}@{host}:{port}/{name}',
echo=True
)
inspector = inspect(engine)
for table_name in inspector.get_table_names():
print(table_name)
Session = session.sessionmaker()
Session.configure(bind=engine)
my_session = Session()
#################################################
# TEST
'''
Module to test producers
'''
from models import (
Applicant,
Bank,
Merchant,
Application,
Branch,
Member,
Account,
User,
OneTimePasscode,
Transaction,
UserRegistrationToken
)
| 30.957746 | 74 | 0.746588 |
a7574f04a38567a940cb678fc874747f83a2b6d9 | 223 | py | Python | quran/domain/edition.py | octabytes/quran | 974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86 | [
"Apache-2.0"
] | null | null | null | quran/domain/edition.py | octabytes/quran | 974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86 | [
"Apache-2.0"
] | null | null | null | quran/domain/edition.py | octabytes/quran | 974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from quran.domain.entity import Entity
| 14.866667 | 38 | 0.690583 |
a75778c132db31042c63da3f963565d091dded6a | 1,231 | py | Python | dataflow/core/visualization.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 5 | 2021-08-10T23:16:44.000Z | 2022-03-17T17:27:00.000Z | dataflow/core/visualization.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 330 | 2021-06-10T17:28:22.000Z | 2022-03-31T00:55:48.000Z | dataflow/core/visualization.py | alphamatic/amp | 5018137097159415c10eaa659a2e0de8c4e403d4 | [
"BSD-3-Clause"
] | 6 | 2021-06-10T17:20:32.000Z | 2022-03-28T08:08:03.000Z | """
Helper functions to visualize a graph in a notebook or save the plot to file.
Import as:
import dataflow.core.visualization as dtfcorvisu
"""
import IPython
import networkx as networ
import pygraphviz
import dataflow.core.dag as dtfcordag
import helpers.hdbg as hdbg
import helpers.hio as hio
def draw(dag: dtfcordag.DAG) -> IPython.core.display.Image:
"""
Render DAG in a notebook.
"""
agraph = _extract_agraph_from_dag(dag)
image = IPython.display.Image(agraph.draw(format="png", prog="dot"))
return image
def draw_to_file(dag: dtfcordag.DAG, file_name: str = "graph.png") -> str:
"""
Save DAG rendering to a file.
"""
agraph = _extract_agraph_from_dag(dag)
# Save to file.
hio.create_enclosing_dir(file_name)
agraph.draw(file_name, prog="dot")
return file_name
def _extract_agraph_from_dag(dag: dtfcordag.DAG) -> pygraphviz.agraph.AGraph:
"""
Extract a pygraphviz `agraph` from a DAG.
"""
# Extract networkx DAG.
hdbg.dassert_isinstance(dag, dtfcordag.DAG)
graph = dag.dag
hdbg.dassert_isinstance(graph, networ.Graph)
# Convert the DAG into a pygraphviz graph.
agraph = networ.nx_agraph.to_agraph(graph)
return agraph
| 25.122449 | 77 | 0.707555 |
a758f541fb2e3c2ec9bc820cd471a439cd2c4443 | 7,714 | py | Python | scripts/pixel_error.py | ling-k/STOVE | fcf36139f41dee5ef892e90dedf1d2208da6fd3c | [
"MIT"
] | 31 | 2019-10-14T01:48:44.000Z | 2022-01-20T19:19:14.000Z | scripts/pixel_error.py | ling-k/STOVE | fcf36139f41dee5ef892e90dedf1d2208da6fd3c | [
"MIT"
] | 3 | 2020-05-08T11:01:25.000Z | 2021-05-24T07:50:10.000Z | scripts/pixel_error.py | ling-k/STOVE | fcf36139f41dee5ef892e90dedf1d2208da6fd3c | [
"MIT"
] | 9 | 2020-01-13T11:25:16.000Z | 2021-05-10T06:04:08.000Z | """Calculate pixel errors for a single run or all runs in an experiment dir."""
import torch
import itertools
import numpy as np
import imageio
import argparse
import os
import glob
from model.main import main as restore_model
from model.utils.utils import bw_transform
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
def run_fmt(x, with_under=False):
"""Format array x of ints to become valid run folder names."""
return 'run{:03d}'.format(x) if not with_under else 'run_{:03d}'.format(x)
def get_pixel_error(restore, linear=False, path='', real_mpe=False, checkpoint='checkpoint'):
"""Restore a model and calculate error from reconstructions."""
# do not write any new runs
extras = {
'nolog': True,
'checkpoint_path': os.path.join(restore, checkpoint)}
self = restore_model(restore=restore, extras=extras)
# ignore supairvised runs for now
if self.c.supairvised is True:
return None
# make sure all runs access the same data!
print(self.c.testdata)
step = self.c.frame_step
visible = self.c.num_visible
batch_size = self.c.batch_size
skip = self.c.skip
# make sure this is the same
print(step, visible, batch_size, skip)
long_rollout_length = self.c.num_frames // step - visible
lrl = long_rollout_length
total_images = self.test_dataset.total_img
total_labels = self.test_dataset.total_data
# apply step and batch size once
total_images = total_images[:batch_size, ::step]
total_labels = total_labels[:batch_size, ::step]
# true data to compare against
true_images = total_images[:, skip:(visible+long_rollout_length)]
true_images = torch.tensor(true_images).to(self.c.device).type(self.c.dtype)
# First obtain reconstruction of input.
stove_input = total_images[:, :visible]
stove_input = torch.tensor(stove_input).to(self.c.device).type(self.c.dtype)
_, prop_dict2, _ = self.stove(stove_input, self.c.plot_every)
z_recon = prop_dict2['z']
# Use last state to do rollout
if not linear:
z_pred, _ = self.stove.rollout(z_recon[:, -1], long_rollout_length)
else:
# propagate last speed
v = z_recon[:, -1, :, 4:6].unsqueeze(1)
v = v.repeat(1, long_rollout_length, 1, 1)
t = torch.arange(1, long_rollout_length+1)
t = t.repeat(v.shape[0], *v.shape[2:], 1).permute(0, 3, 1, 2).double()
dx = v * t
new_x = z_recon[:, -1, :, 2:4].unsqueeze(1)
new_x = new_x.repeat(1, long_rollout_length, 1, 1) + dx
z_pred = torch.cat(
[z_recon[:, -1, :, :2].unsqueeze(1).repeat(1, lrl, 1, 1),
new_x,
v,
z_recon[:, -1, :, 6:].unsqueeze(1).repeat(1, lrl, 1, 1)],
-1
)
z_seq = torch.cat([z_recon, z_pred], 1)
# sigmoid positions to make errors comparable
if linear:
print('clamp positions to 0.9')
frame_lim = 0.8 if self.c.coord_lim == 10 else 0.9
z_seq = torch.cat([
z_seq[..., :2],
torch.clamp(z_seq[..., 2:4], -frame_lim, frame_lim),
z_seq[..., 6:]], -1)
# Simple Reconstruction of Sequences
# stove_input = total_images[:10]
# stove_input = torch.tensor(stove_input).to(self.c.device).type(self.c.dtype)
# elbo, prop_dict2, _ = self.stove(stove_input, self.c.plot_every)
# z_recon = prop_dict2['z']
# if self.c.debug_bw:
# img = stove_input.sum(2)
# img = torch.clamp(img, 0, 1)
# img = torch.unsqueeze(img, 2)
# model_images = self.stove.reconstruct_from_z(
# z_recon, img[:, skip:], max_activation=False, single_image=False)
# use mpe to get reconstructed images
if real_mpe:
if self.c.debug_bw:
img = stove_input[:, skip].sum(1)
img = torch.clamp(img, 0, 1)
img = torch.unsqueeze(img, 1)
model_images = self.stove.reconstruct_from_z(
z_seq, img, max_activation=False, single_image=True)
else:
model_images = self.stove.reconstruct_from_z(z_seq)
if self.c.debug_bw:
true_images = bw_transform(true_images)
model_images = torch.clamp(model_images, 0, 1)
mse = torch.mean(((true_images - model_images)**2), dim=(0, 2, 3, 4))
plot_sample = model_images[:10, :, 0].detach().cpu().numpy()
plot_sample = (255 * plot_sample.reshape(-1, self.c.height, self.c.width))
plot_sample = plot_sample.astype(np.uint8)
filename = 'linear_' if linear else ''
filename += 'pixel_error_sample.gif'
filepath = os.path.join(path, filename)
print('Saving gif to ', filepath)
imageio.mimsave(
filepath, plot_sample, fps=24)
# also log state differences
# bug_potential... for some reason self.c.coord_lim is 30 but max
# true_states is 10 for gravity
true_states = total_labels[:, skip:(visible+long_rollout_length)]
print(true_states.max(), ' is coord max.')
true_states = torch.tensor(true_states).to(self.c.device).type(self.c.dtype)
permutations = list(itertools.permutations(range(0, self.c.num_obj)))
errors = []
for perm in permutations:
error = ((true_states[:, :5, :, :2]-z_seq[:, :5, perm, 2:4])**2).sum(-1)
error = torch.sqrt(error).mean((1, 2))
errors += [error]
errors = torch.stack(errors, 1)
_, idx = errors.min(1)
selector = list(zip(range(idx.shape[0]), idx.cpu().tolist()))
pos_matched = [z_seq[i, :, permutations[j]] for i, j in selector]
pos_matched = torch.stack(pos_matched, 0)
mse_states = torch.sqrt(((
true_states[..., :2] - pos_matched[..., 2:4])**2).sum(-1)).mean((0, 2))
return mse, mse_states
def main(script_args):
"""Parse arguments, find runs, execute pixel_error."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=str,
help="Set folder from which to create pixel errors for." +
"Must contain runs of model.")
parser.add_argument(
'--linear', action='store_true',
help='create linear errors')
parser.add_argument(
'--no-save', dest='no_save', action='store_true')
parser.add_argument(
'--real-mpe', dest='real_mpe', action='store_true')
parser.add_argument(
'--checkpoint', type=str, default='checkpoint')
args = parser.parse_args(script_args)
filename = 'pixel_errors.csv'
if args.linear:
filename = 'linear_' + filename
if 'run' not in args.path[-10:]:
restores = glob.glob(args.path+'run*')
restores = sorted(restores)
else:
restores = [args.path]
print(restores)
if len(restores) == 0:
raise ValueError('No runs found in path {}.'.format(args.path))
# debug
# mse, mse_states = get_pixel_error(
# restores[0], args.linear, args.path, args.real_mpe, args.checkpoint)
# return 0
for restore in restores:
try:
mse, mse_states = get_pixel_error(
restore, args.linear, args.path, args.real_mpe, args.checkpoint)
except Exception as e:
print(e)
print('Not possible for run {}.'.format(restore))
continue
mse = mse.cpu().detach().numpy()
if args.no_save:
continue
save_dir = os.path.join(args.path, 'test')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, filename), 'a') as f:
f.write(','.join(['{:.6f}'.format(i) for i in mse])+'\n')
with open(os.path.join(save_dir, 'states_'+filename), 'a') as f:
f.write(','.join(['{:.6f}'.format(i) for i in mse_states])+'\n')
| 34.4375 | 93 | 0.621208 |
a75a94acdbd36e7b6da2d3d837a50b906558f9b8 | 770 | py | Python | users/admin.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | users/admin.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | users/admin.py | JVacca12/FIRST | e3906209cae1198e1fbda4d00bc0a906e8294a69 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
"""User admin classes."""
# Django
from django.contrib import admin
# Models
from users.models import User | 23.333333 | 154 | 0.633766 |
a75aa0bd60c43a11405b09d22589cf2d9c586cc5 | 3,469 | py | Python | mud/migrations/0001_initial.py | lambda-mud-cs18/backend | 060c5c1a317d8b6557e778cd539e75f24eff05dd | [
"MIT"
] | 1 | 2022-01-12T17:44:26.000Z | 2022-01-12T17:44:26.000Z | mud/migrations/0001_initial.py | lambda-mud-cs18/backend | 060c5c1a317d8b6557e778cd539e75f24eff05dd | [
"MIT"
] | 8 | 2020-02-12T01:12:46.000Z | 2022-02-10T10:17:28.000Z | mud/migrations/0001_initial.py | lambda-mud-cs18/backend | 060c5c1a317d8b6557e778cd539e75f24eff05dd | [
"MIT"
] | 2 | 2022-01-12T17:44:29.000Z | 2022-01-12T17:44:29.000Z | # Generated by Django 2.2.3 on 2019-07-31 17:10
from django.db import migrations, models
| 39.420455 | 84 | 0.51254 |
a75c1979034dbafe33e7945478e87745ce9ce8e5 | 918 | py | Python | scripts/quest/q22504s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/quest/q22504s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/quest/q22504s.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | sm.setSpeakerID(1013000)
sm.sendNext("Ugh. This isn't going to work. I need something else. No plants. No meat. What, you have no idea? But you're the master, and you're older than me, too. You must know what'd be good for me!")
sm.setPlayerAsSpeaker()
sm.sendSay("#bBut I don't. It's not like age has anything to do with this...")
sm.setSpeakerID(1013000)
if sm.sendAskAccept("Since you're older, you must be more experienced in the world, too. Makes sense that you'd know more than me. Oh, fine. I'll ask someone who's even older than you, master!"):
if not sm.hasQuest(parentID):
sm.startQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendSayOkay("#b#b(You already asked Dad once, but you don't have any better ideas. Time to ask him again!)")
else:
sm.sendNext("No use trying to find an answer to this on my own. I'd better look for #bsomeone older and wiser than master#k!")
sm.dispose() | 61.2 | 203 | 0.721133 |
a75cf13072fe0194f0d08765f3c331975a5d8df7 | 424 | py | Python | user/migrations/0002_user_photo.py | martinlehoux/erp-reloaded | db7dea603095dec558f4b0ad9a0d2dbd20f8703c | [
"MIT"
] | null | null | null | user/migrations/0002_user_photo.py | martinlehoux/erp-reloaded | db7dea603095dec558f4b0ad9a0d2dbd20f8703c | [
"MIT"
] | 5 | 2021-04-08T18:54:04.000Z | 2021-06-10T18:37:26.000Z | user/migrations/0002_user_photo.py | martinlehoux/erp-reloaded | db7dea603095dec558f4b0ad9a0d2dbd20f8703c | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-03-01 00:58
from django.db import migrations, models
import user.models
| 21.2 | 88 | 0.606132 |
a75f0071595f1cf5e30f78a377181f6b55570f76 | 61 | py | Python | core/models/__init__.py | Bhaskers-Blu-Org1/bLVNet-TAM | feadcd3a1a25723dc28bed867580032181e824a3 | [
"Apache-2.0"
] | 62 | 2019-10-22T14:52:30.000Z | 2021-07-27T12:07:38.000Z | core/models/__init__.py | Bhaskers-Blu-Org1/bLVNet-TAM | feadcd3a1a25723dc28bed867580032181e824a3 | [
"Apache-2.0"
] | 6 | 2019-12-16T06:03:42.000Z | 2020-08-31T07:59:04.000Z | core/models/__init__.py | IBM/bLVNet-TAM | feadcd3a1a25723dc28bed867580032181e824a3 | [
"Apache-2.0"
] | 16 | 2019-11-02T06:49:19.000Z | 2021-12-30T14:51:48.000Z |
from .blvnet_tam import bLVNet_TAM
__all__ = ['bLVNet_TAM'] | 15.25 | 34 | 0.770492 |
a760fe286388453e9bf13c54cc23324198919723 | 438 | py | Python | monodepth/geometry/utils.py | vguizilini/packnet-sfm | e462716837f24c11cb227ca99fe30bcf12b3cc56 | [
"MIT"
] | 1 | 2020-04-30T07:32:57.000Z | 2020-04-30T07:32:57.000Z | monodepth/geometry/utils.py | muzi2045/packnet-sfm | fec6d0b493b784cabe5e6bf9c65b996a83c63fe1 | [
"MIT"
] | null | null | null | monodepth/geometry/utils.py | muzi2045/packnet-sfm | fec6d0b493b784cabe5e6bf9c65b996a83c63fe1 | [
"MIT"
] | null | null | null | # Copyright 2020 Toyota Research Institute. All rights reserved.
"""
Geometry utilities
"""
import numpy as np
def invert_pose_numpy(T):
"""
'Invert' 4x4 extrinsic matrix
Parameters
----------
T: 4x4 matrix (world to camera)
Returns
-------
4x4 matrix (camera to world)
"""
Tc = np.copy(T)
R, t = Tc[:3, :3], Tc[:3, 3]
Tc[:3, :3], Tc[:3, 3] = R.T, - np.matmul(R.T, t)
return Tc
| 16.222222 | 65 | 0.547945 |
a7624496ee4975eb04a3c005275217a54323fb5d | 27,209 | py | Python | minesweeper.py | MrAttoAttoAtto/Cool-Programming-Project | 68214d089b612fdcca7fe76dce3464edec35ce2b | [
"MIT"
] | null | null | null | minesweeper.py | MrAttoAttoAtto/Cool-Programming-Project | 68214d089b612fdcca7fe76dce3464edec35ce2b | [
"MIT"
] | null | null | null | minesweeper.py | MrAttoAttoAtto/Cool-Programming-Project | 68214d089b612fdcca7fe76dce3464edec35ce2b | [
"MIT"
] | null | null | null | #Minesweeper!
from tkinter import *
import random, time, math, threading, os.path, os
#Tkinter Class
def openMain(caller, xLength=None, yLength=None, percentOfBombs=None, winChoice=None, master=None): #restarts it outside of the class
global minesweeper
if master != None: #if it has been called from the play again box...
minesweeper = MinesweeperMain(master.xLength, master.yLength, master.percentOfBombs, caller, master.winChoice) #use the old configs
else: #else
minesweeper = MinesweeperMain(xLength, yLength, percentOfBombs, caller, winChoice) #use the new configs
if __name__ == '__main__':
start = StartBox()
minesweeper = None
| 40.429421 | 183 | 0.592745 |
a7625f42a7dd6cbf1419217f4da8ae9f6f00c5f6 | 5,431 | py | Python | cannlytics/utils/scraper.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 7 | 2021-05-31T15:30:22.000Z | 2022-02-05T14:12:31.000Z | cannlytics/utils/scraper.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 17 | 2021-06-09T01:04:27.000Z | 2022-03-18T14:48:12.000Z | cannlytics/utils/scraper.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 5 | 2021-06-07T13:52:33.000Z | 2021-08-04T00:09:39.000Z | # -*- coding: utf-8 -*-
"""
Scrape Website Data | Cannlytics
Copyright 2021 Cannlytics
Author: Keegan Skeate <keegan@cannlytics.com>
Created: 1/10/2021
License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Resources:
https://stackoverflow.com/questions/54416896/how-to-scrape-email-and-phone-numbers-from-a-list-of-websites
https://hackersandslackers.com/scraping-urls-with-beautifulsoup/
TODO:
Improve with requests-html - https://github.com/psf/requests-html
- Get #about
- Get absolute URLs
- Search for text (prices/analyses)
r.html.search('Python is a {} language')[0]
"""
import re
import requests
from bs4 import BeautifulSoup
def get_page_metadata(url):
"""Scrape target URL for metadata."""
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
}
# Handle URLs without http beginning
if not url.startswith("http"):
url = "http://" + url
response = requests.get(url, headers=headers)
html = BeautifulSoup(response.content, "html.parser")
metadata = {
"description": get_description(html),
"image_url": get_image(html), # FIXME: Append URL if relative path.
"favicon": get_favicon(html, url),
"brand_color": get_theme_color(html),
}
return response, html, metadata
def get_description(html):
"""Scrape page description."""
description = None
if html.find("meta", property="description"):
description = html.find("meta", property="description").get("content")
elif html.find("meta", property="og:description"):
description = html.find("meta", property="og:description").get("content")
elif html.find("meta", property="twitter:description"):
description = html.find("meta", property="twitter:description").get("content")
elif html.find("p"):
description = html.find("p").contents
if isinstance(description, list):
try:
description = description[0]
except IndexError:
pass
return description
def get_image(html):
"""Scrape share image."""
image = None
if html.find("meta", property="image"):
image = html.find("meta", property="image").get("content")
elif html.find("meta", property="og:image"):
image = html.find("meta", property="og:image").get("content")
elif html.find("meta", property="twitter:image"):
image = html.find("meta", property="twitter:image").get("content")
elif html.find("img", src=True):
image = html.find_all("img")[0].get("src")
return image
def get_favicon(html, url):
"""Scrape favicon."""
if html.find("link", attrs={"rel": "icon"}):
favicon = html.find("link", attrs={"rel": "icon"}).get("href")
elif html.find("link", attrs={"rel": "shortcut icon"}):
favicon = html.find("link", attrs={"rel": "shortcut icon"}).get("href")
else:
favicon = f'{url.rstrip("/")}/favicon.ico'
return favicon
def get_theme_color(html):
"""Scrape brand color."""
if html.find("meta", property="theme-color"):
color = html.find("meta", property="theme-color").get("content")
return color
return None
def get_phone(html, response):
"""Scrape phone number."""
try:
phone = html.select("a[href*=callto]")[0].text
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-][2-9][0-9]{2}[-][0-9]{4}\b", response.text
)[0]
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-. ]?[2-9][0-9]{2}[-. ]?[0-9]{4}\b", response.text
)[-1]
return phone
except:
print("Phone number not found")
phone = ""
return phone
def get_email(html, response):
"""Get email."""
try:
email = re.findall(
r"([a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)", response.text
)[-1]
return email
except:
pass
try:
email = html.select("a[href*=mailto]")[-1].text
except:
print("Email not found")
email = ""
return email
def find_lab_address():
"""
TODO: Tries to find a lab's address from their website, then Google Maps.
"""
street, city, state, zipcode = None, None, None, None
return street, city, state, zipcode
def find_lab_linkedin():
"""
TODO: Tries to find a lab's LinkedIn URL. (Try to find LinkedIn on homepage?)
"""
return ""
def find_lab_url():
"""
TODO: Find a lab's website URL. (Google search for name?)
"""
return ""
def clean_string_columns(df):
"""Clean string columns in a dataframe."""
for column in df.columns:
try:
df[column] = df[column].str.title()
df[column] = df[column].str.replace("Llc", "LLC")
df[column] = df[column].str.replace("L.L.C.", "LLC")
df[column] = df[column].str.strip()
except AttributeError:
pass
return df
| 30.511236 | 110 | 0.598048 |
a762725a417c914c2de8c1cfaad398234b972ef4 | 22,326 | py | Python | acsm/utils/bird_vis.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 52 | 2020-04-02T12:35:55.000Z | 2022-03-11T07:47:30.000Z | acsm/utils/bird_vis.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 8 | 2020-06-04T07:34:34.000Z | 2021-09-18T21:17:26.000Z | acsm/utils/bird_vis.py | eldar/acsm | 04069e8bb4c12185473dc10c3355e5367fa98968 | [
"Apache-2.0"
] | 6 | 2020-07-12T02:12:18.000Z | 2021-03-06T05:03:33.000Z | """
Code borrowed from
https://github.com/akanazawa/cmr/blob/master/utils/bird_vis.py
Visualization helpers specific to birds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
import numpy as np
import os.path as osp
import cv2
import pdb
from . import cub_parse
from ..nnutils.nmr import NeuralRenderer
from ..utils import transformations
from . import visutil
import pdb
def merge_textures(foreground, background,):
'''
3, H, W
Assume foreground to have 1 in the A channel and 0 for the background.
'''
texture = foreground * (foreground[3,None,...] > 0.5) + background * (foreground[3,None,...] <0.5)
return texture
def kp2im(kp, img, radius=None):
"""
Input is numpy array or torch.cuda.Tensor
img can be H x W, H x W x C, or C x H x W
kp is |KP| x 2
"""
kp_norm = convert2np(kp)
img = convert2np(img)
if img.ndim == 2:
img = np.dstack((img, ) * 3)
# Make it H x W x C:
elif img.shape[0] == 1 or img.shape[0] == 3:
img = np.transpose(img, (1, 2, 0))
if img.shape[2] == 1: # Gray2RGB for H x W x 1
img = np.dstack((img, ) * 3)
# kp_norm is still in [-1, 1], converts it to image coord.
kp = (kp_norm[:, :2] + 1) * 0.5 * img.shape[0]
if kp_norm.shape[1] == 3:
vis = kp_norm[:, 2] > 0
kp[~vis] = 0
kp = np.hstack((kp, vis.reshape(-1, 1)))
else:
vis = np.ones((kp.shape[0], 1))
kp = np.hstack((kp, vis))
kp_img = draw_kp(kp, img, radius=radius)
return kp_img
def draw_kp(kp, img, radius=None):
"""
kp is 15 x 2 or 3 numpy.
img can be either RGB or Gray
Draws bird points.
"""
if radius is None:
radius = max(4, (np.mean(img.shape[:2]) * 0.01).astype(int))
num_kp = kp.shape[0]
# Generate colors
import pylab
cm = pylab.get_cmap('gist_rainbow')
colors = 255 * np.array([cm(1. * i / num_kp)[:3] for i in range(num_kp)])
white = np.ones(3) * 255
image = img.copy()
if isinstance(image.reshape(-1)[0], np.float32):
# Convert to 255 and np.uint8 for cv2..
image = (image * 255).astype(np.uint8)
kp = np.round(kp).astype(int)
for kpi, color in zip(kp, colors):
# This sometimes causes OverflowError,,
if kpi[2] == 0:
continue
cv2.circle(image, (kpi[0], kpi[1]), radius + 1, white, -1)
cv2.circle(image, (kpi[0], kpi[1]), radius, color, -1)
# import matplotlib.pyplot as plt
# plt.ion()
# plt.clf()
# plt.imshow(image)
# import ipdb; ipdb.set_trace()
return image
| 37.585859 | 150 | 0.61874 |
a7641eec8122f15991dc897dc20ebeb0e83b0d20 | 10,764 | py | Python | gatenlp/corpora/files.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 30 | 2020-04-18T12:28:15.000Z | 2022-02-18T21:31:18.000Z | gatenlp/corpora/files.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 133 | 2019-10-16T07:41:59.000Z | 2022-03-31T07:27:07.000Z | gatenlp/corpora/files.py | joancf/python-gatenlp | 21441d72ded19e9348052e99ac5bc1fc6af7ab6e | [
"Apache-2.0"
] | 4 | 2021-01-20T08:12:19.000Z | 2021-10-21T13:29:44.000Z | """
Module that defines Corpus and DocumentSource/DocumentDestination classes which access documents
as lines or parts in a file.
"""
import json
from gatenlp.urlfileutils import yield_lines_from
from gatenlp.document import Document
from gatenlp.corpora.base import DocumentSource, DocumentDestination
from gatenlp.corpora.base import MultiProcessingAble
| 40.164179 | 118 | 0.590673 |
a7646b2e354d22868d6a6f4cc986b8c2069e186b | 709 | py | Python | src/ch5-viewmodels/web/services/AccountPageService.py | saryeHaddadi/Python.Course.WebAppFastAPI | ddc1f1850473c227e715c8ecd2afd741e53c4680 | [
"MIT"
] | null | null | null | src/ch5-viewmodels/web/services/AccountPageService.py | saryeHaddadi/Python.Course.WebAppFastAPI | ddc1f1850473c227e715c8ecd2afd741e53c4680 | [
"MIT"
] | null | null | null | src/ch5-viewmodels/web/services/AccountPageService.py | saryeHaddadi/Python.Course.WebAppFastAPI | ddc1f1850473c227e715c8ecd2afd741e53c4680 | [
"MIT"
] | null | null | null | import fastapi
from starlette.requests import Request
from web.viewmodels.account.AccountViewModel import AccountViewModel
from web.viewmodels.account.LoginViewModel import LoginViewModel
from web.viewmodels.account.RegisterViewModel import RegisterViewModel
router = fastapi.APIRouter()
| 22.870968 | 70 | 0.760226 |
a7659e9cd38acecd1d387852d0d503d7207e98a9 | 29,031 | py | Python | src/opserver/uveserver.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | src/opserver/uveserver.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | src/opserver/uveserver.py | madkiss/contrail-controller | 17f622dfe99f8ab4163436399e80f95dd564814c | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# UVEServer
#
# Operational State Server for UVEs
#
import gevent
import json
import copy
import xmltodict
import redis
import datetime
import sys
from opserver_util import OpServerUtils
import re
from gevent.coros import BoundedSemaphore
from pysandesh.util import UTCTimestampUsec
from pysandesh.connection_info import ConnectionState
from sandesh.viz.constants import UVE_MAP
# end get_uve
# end get_uve_regex
# end multi_uve_get
def get_uve_list(self, table, filters=None, parse_afilter=False,
is_alarm=False):
filters = filters or {}
uve_list = set()
kfilter = filters.get('kfilt')
if kfilter is not None:
patterns = set()
for filt in kfilter:
patterns.add(self.get_uve_regex(filt))
for redis_uve in self._redis_uve_list:
redish = redis.StrictRedis(host=redis_uve[0],
port=redis_uve[1],
password=self._redis_password, db=1)
try:
# For UVE queries, we wanna read both UVE and Alarm table
entries = redish.smembers('ALARM_TABLE:' + table)
if not is_alarm:
entries = entries.union(redish.smembers('TABLE:' + table))
for entry in entries:
info = (entry.split(':', 1)[1]).rsplit(':', 5)
uve_key = info[0]
if kfilter is not None:
kfilter_match = False
for pattern in patterns:
if pattern.match(uve_key):
kfilter_match = True
break
if not kfilter_match:
continue
src = info[1]
sfilter = filters.get('sfilt')
if sfilter is not None:
if sfilter != src:
continue
module = info[2]+':'+info[3]+':'+info[4]
mfilter = filters.get('mfilt')
if mfilter is not None:
if mfilter != module:
continue
typ = info[5]
tfilter = filters.get('cfilt')
if tfilter is not None:
if typ not in tfilter:
continue
if parse_afilter:
if tfilter is not None and len(tfilter[typ]):
valkey = "VALUES:" + table + ":" + uve_key + \
":" + src + ":" + module + ":" + typ
for afilter in tfilter[typ]:
attrval = redish.hget(valkey, afilter)
if attrval is not None:
break
if attrval is None:
continue
uve_list.add(uve_key)
except redis.exceptions.ConnectionError:
self._logger.error('Failed to connect to redis-uve: %s:%d' \
% (redis_uve[0], redis_uve[1]))
except Exception as e:
self._logger.error('Exception: %s' % e)
return set()
return uve_list
# end get_uve_list
# end UVEServer
def aggregate(self, key, flat, base_url = None):
'''
This function does parallel aggregation of this UVE's state.
It aggregates across all sources and return the global state of the UVE
'''
result = {}
try:
for typ in self._state[key].keys():
result[typ] = {}
for objattr in self._state[key][typ].keys():
if self._is_sum(self._state[key][typ][objattr]):
sum_res = self._sum_agg(self._state[key][typ][objattr])
if flat:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(sum_res)
else:
result[typ][objattr] = sum_res
elif self._is_union(self._state[key][typ][objattr]):
union_res = self._union_agg(
self._state[key][typ][objattr])
conv_res = None
if union_res.has_key('@ulink') and base_url and \
union_res['list']['@type'] == 'string':
uterms = union_res['@ulink'].split(":",1)
# This is the linked UVE's table name
m_table = uterms[0]
if self._rev_map.has_key(m_table):
h_table = self._rev_map[m_table]
conv_res = []
sname = ParallelAggregator.get_list_name(union_res)
for el in union_res['list'][sname]:
lobj = {}
lobj['name'] = el
lobj['href'] = base_url + '/analytics/uves/' + \
h_table + '/' + el
if len(uterms) == 2:
lobj['href'] = lobj['href'] + '?cfilt=' + uterms[1]
else:
lobj['href'] = lobj['href'] + '?flat'
conv_res.append(lobj)
if flat:
if not conv_res:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(union_res)
else:
result[typ][objattr] = conv_res
else:
result[typ][objattr] = union_res
elif self._is_append(self._state[key][typ][objattr]):
result[typ][objattr] = self._append_agg(
self._state[key][typ][objattr])
append_res = ParallelAggregator.consolidate_list(
result, typ, objattr)
if flat:
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(append_res)
else:
result[typ][objattr] = append_res
else:
default_res = self._default_agg(
self._state[key][typ][objattr])
if flat:
if (len(default_res) == 1):
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(
default_res[0][0])
else:
nres = []
for idx in range(len(default_res)):
nres.append(default_res[idx])
nres[idx][0] =\
OpServerUtils.uve_attr_flatten(
default_res[idx][0])
result[typ][objattr] = nres
else:
result[typ][objattr] = default_res
except KeyError:
pass
return result
if __name__ == '__main__':
uveserver = UVEServer(None, 0, None, None)
gevent.spawn(uveserver.run())
uve_state = json.loads(uveserver.get_uve("abc-corp:vn02", False))
print json.dumps(uve_state, indent=4, sort_keys=True)
| 41.711207 | 91 | 0.450484 |
a765ce6d1c1eea007b73c094feaef3cfb92302b9 | 6,559 | py | Python | tests/datasets/test_tonas.py | lucaspbastos/mirdata | e591c5411c41591e8606812df869dca1ad52ee0f | [
"BSD-3-Clause"
] | 224 | 2019-05-08T14:46:05.000Z | 2022-03-31T12:14:39.000Z | tests/datasets/test_tonas.py | oriolcolomefont/mirdata | e591c5411c41591e8606812df869dca1ad52ee0f | [
"BSD-3-Clause"
] | 492 | 2019-04-08T16:59:33.000Z | 2022-01-19T13:50:56.000Z | tests/datasets/test_tonas.py | oriolcolomefont/mirdata | e591c5411c41591e8606812df869dca1ad52ee0f | [
"BSD-3-Clause"
] | 46 | 2019-04-11T15:12:18.000Z | 2022-01-19T17:33:50.000Z | import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import tonas
TEST_DATA_HOME = "tests/resources/mir_datasets/tonas"
| 30.649533 | 96 | 0.633938 |
a765ee4d5ce159cb94158867be1e207d0bdc988c | 1,064 | py | Python | pycreds.py | Ennovar/aws-creds-test | fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce | [
"Apache-2.0"
] | 7 | 2017-06-13T15:55:23.000Z | 2019-05-23T18:52:00.000Z | pycreds.py | Ennovar/aws-creds-test | fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce | [
"Apache-2.0"
] | 2 | 2019-02-16T12:56:33.000Z | 2020-07-02T19:32:58.000Z | pycreds.py | Ennovar/aws-creds-test | fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce | [
"Apache-2.0"
] | 8 | 2017-05-17T22:46:07.000Z | 2022-03-11T14:27:56.000Z | import os
import hashlib
import getpass
import hmac
import botocore.session
import botocore.exceptions
if __name__ == '__main__':
main()
| 30.4 | 72 | 0.656015 |
a765f6c349621f1e0308c3686c2a549868853c7d | 1,854 | py | Python | sopel_modules/urban_dictionary/urbandictionary.py | capsterx/sopel-urbandictionary | 188a54badc64c4626b1413dfab93ee685f543cf1 | [
"MIT"
] | null | null | null | sopel_modules/urban_dictionary/urbandictionary.py | capsterx/sopel-urbandictionary | 188a54badc64c4626b1413dfab93ee685f543cf1 | [
"MIT"
] | 1 | 2021-01-10T06:53:49.000Z | 2021-01-13T02:03:30.000Z | sopel_modules/urban_dictionary/urbandictionary.py | capsterx/sopel-urbandictionary | 188a54badc64c4626b1413dfab93ee685f543cf1 | [
"MIT"
] | null | null | null | from sopel.module import commands, example
from sopel import web
import sopel.module
import socket
import re
import urbandictionary as ud
BOLD=chr(0x02)
ITALICS=chr(0x1D)
UNDERLINE=chr(0x1F)
| 29.903226 | 119 | 0.651564 |
a7660deda124d1efd2085f69810453398abdc730 | 324 | py | Python | Aula01 e exercicios/exercicio_06.py | Dorcival/PYTHON | 0dc3fa53699d40b21c6ed721a190ffb4f8404345 | [
"MIT"
] | null | null | null | Aula01 e exercicios/exercicio_06.py | Dorcival/PYTHON | 0dc3fa53699d40b21c6ed721a190ffb4f8404345 | [
"MIT"
] | null | null | null | Aula01 e exercicios/exercicio_06.py | Dorcival/PYTHON | 0dc3fa53699d40b21c6ed721a190ffb4f8404345 | [
"MIT"
] | null | null | null | # Conversor de CELSIUS para FAHRENHEIT v.0.1
# Por Dorcival Leite 202003362174
import time
print("CONVERTER TEMPERATURA DE CELSIUS PARA FAHRENHEIT\n")
c = float(input("Digite a temperatura em CELSIUS: "))
f = float((9 * c)/5)+32
print("\nA temperatura de", c, "graus CELSIUS igual a", f, "graus FAHRENHEIT")
time.sleep(20) | 40.5 | 80 | 0.734568 |
a7687184494cf93d9f5d684cfc40811e7667b3e4 | 772 | py | Python | multranslate.py | anoidgit/NMTServer | f608695c4c1f5319fb3c56f218b1d78056861c62 | [
"Apache-2.0"
] | 3 | 2017-08-29T22:56:38.000Z | 2017-12-12T06:20:35.000Z | multranslate.py | anoidgit/NMTServer | f608695c4c1f5319fb3c56f218b1d78056861c62 | [
"Apache-2.0"
] | 1 | 2017-09-10T08:02:24.000Z | 2017-09-12T01:03:25.000Z | multranslate.py | anoidgit/NMTServer | f608695c4c1f5319fb3c56f218b1d78056861c62 | [
"Apache-2.0"
] | null | null | null | #encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import zmq, sys, json
import seg
import detoken
import datautils
from random import sample
serverl=["tcp://127.0.0.1:"+str(port) for port in xrange(5556,5556+4)]
| 19.794872 | 182 | 0.75 |
a769f370668047fa9ac58fd30c92b5d2a06a8ba0 | 10,995 | py | Python | face_detector.py | duwizerak/Keras_insightface | dae425d7ef5dfeccb50a8ddca5814a0901b2957a | [
"MIT"
] | null | null | null | face_detector.py | duwizerak/Keras_insightface | dae425d7ef5dfeccb50a8ddca5814a0901b2957a | [
"MIT"
] | null | null | null | face_detector.py | duwizerak/Keras_insightface | dae425d7ef5dfeccb50a8ddca5814a0901b2957a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from glob2 import glob
from skimage import transform
from skimage.io import imread, imsave
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
FILE_HASH = {"yolov5s_face_dynamic": "e7854a5cae48ded05b3b31aa93765f0d"}
DEFAULT_DETECTOR = "https://github.com/leondgarse/Keras_insightface/releases/download/v1.0.0/yolov5s_face_dynamic.h5"
DEFAULT_ANCHORS = np.array(
[
[[0.5, 0.625], [1.0, 1.25], [1.625, 2.0]],
[[1.4375, 1.8125], [2.6875, 3.4375], [4.5625, 6.5625]],
[[4.5625, 6.781199932098389], [7.218800067901611, 9.375], [10.468999862670898, 13.531000137329102]],
],
dtype="float32",
)
DEFAULT_STRIDES = np.array([8, 16, 32], dtype="float32")
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"input_path",
type=str,
default=None,
help="Could be: 1. Data path, containing images in class folders; 2. image folder path, containing multiple images; 3. jpg / png image path",
)
parser.add_argument("--use_scrfd", action="store_true", help="Use SCRFD instead of YoloV5FaceDetector")
args = parser.parse_known_args(sys.argv[1:])[0]
det = SCRFD() if args.use_scrfd else YoloV5FaceDetector()
if args.input_path.endswith(".jpg") or args.input_path.endswith(".png"):
print(">>>> Detection in image:", args.input_path)
imm = imread(args.input_path)
bbs, pps, ccs, nimgs = det.detect_in_image(imm)
det.show_result(imm, bbs, pps, ccs)
else:
print(">>>> Detection in folder:", args.input_path)
det.detect_in_folder(args.input_path)
| 46.588983 | 149 | 0.606639 |
a76a5f631eaf931f6a0d7bb1f2bdb5a30e7ae751 | 4,132 | py | Python | pyleus/configuration.py | earthmine/pyleus | 4d9c14c9df470be6ff544f2ad82985f37e582d80 | [
"Apache-2.0"
] | 166 | 2015-01-14T16:06:37.000Z | 2021-11-15T12:17:11.000Z | pyleus/configuration.py | WenbinTan/pyleus | 8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce | [
"Apache-2.0"
] | 105 | 2015-01-16T19:59:06.000Z | 2016-05-13T19:40:45.000Z | pyleus/configuration.py | WenbinTan/pyleus | 8ab87e2d18b8b6a7e0471ceefdbb3ff23a576cce | [
"Apache-2.0"
] | 62 | 2015-01-19T07:42:24.000Z | 2021-06-05T21:02:09.000Z | """Configuration defaults and loading functions.
Pyleus will look for configuration files in the following file paths in order
of increasing precedence. The latter configuration overrides the previous one.
#. /etc/pyleus.conf
#. ~/.config/pyleus.conf
#. ~/.pyleus.conf
You can always specify a configuration file when running any pyleus CLI command
as following:
``$ pyleus -c /path/to/config_file CMD``
This will override previous configurations.
Configuration file example
--------------------------
The following file contains all options you can configure for all pyleus
invocations.
.. code-block:: ini
[storm]
# path to Storm executable (pyleus will automatically look in PATH)
storm_cmd_path: /usr/share/storm/bin/storm
# optional: use -n option of pyleus CLI instead
nimbus_host: 10.11.12.13
# optional: use -p option of pyleus CLI instead
nimbus_port: 6628
# java options to pass to Storm CLI
jvm_opts: -Djava.io.tmpdir=/home/myuser/tmp
[build]
# PyPI server to use during the build of your topologies
pypi_index_url: http://pypi.ninjacorp.com/simple/
# always use system-site-packages for pyleus virtualenvs (default: false)
system_site_packages: true
# list of packages to always include in your topologies
include_packages: foo bar<4.0 baz==0.1
"""
from __future__ import absolute_import
import collections
import os
from pyleus import BASE_JAR_PATH
from pyleus.utils import expand_path
from pyleus.exception import ConfigurationError
from pyleus.compat import configparser
# Configuration files paths in order of increasing precedence
# Please keep in sync with module docstring
CONFIG_FILES_PATH = [
"/etc/pyleus.conf",
"~/.config/pyleus.conf",
"~/.pyleus.conf"
]
Configuration = collections.namedtuple(
"Configuration",
"base_jar config_file debug func include_packages output_jar \
pypi_index_url nimbus_host nimbus_port storm_cmd_path \
system_site_packages topology_path topology_jar topology_name verbose \
wait_time jvm_opts"
)
"""Namedtuple containing all pyleus configuration values."""
DEFAULTS = Configuration(
base_jar=BASE_JAR_PATH,
config_file=None,
debug=False,
func=None,
include_packages=None,
output_jar=None,
pypi_index_url=None,
nimbus_host=None,
nimbus_port=None,
storm_cmd_path=None,
system_site_packages=False,
topology_path="pyleus_topology.yaml",
topology_jar=None,
topology_name=None,
verbose=False,
wait_time=None,
jvm_opts=None,
)
def _validate_config_file(config_file):
"""Ensure that config_file exists and is a file."""
if not os.path.exists(config_file):
raise ConfigurationError("Specified configuration file not"
" found: {0}".format(config_file))
if not os.path.isfile(config_file):
raise ConfigurationError("Specified configuration file is not"
" a file: {0}".format(config_file))
def update_configuration(config, update_dict):
"""Update configuration with new values passed as dictionary.
:return: new configuration ``namedtuple``
"""
tmp = config._asdict()
tmp.update(update_dict)
return Configuration(**tmp)
def load_configuration(cmd_line_file):
"""Load configurations from the more generic to the
more specific configuration file. The latter configurations
override the previous one.
If a file is specified from command line, it is considered
the most specific.
:return: configuration ``namedtuple``
"""
config_files_hierarchy = [expand_path(c) for c in CONFIG_FILES_PATH]
if cmd_line_file is not None:
_validate_config_file(cmd_line_file)
config_files_hierarchy.append(cmd_line_file)
config = configparser.SafeConfigParser()
config.read(config_files_hierarchy)
configs = update_configuration(
DEFAULTS,
dict(
(config_name, config_value)
for section in config.sections()
for config_name, config_value in config.items(section)
)
)
return configs
| 28.694444 | 79 | 0.717086 |
a76b01dad5f2ae8289af31fef183a815e3bdd1f2 | 1,318 | py | Python | tests/conftest.py | sdrobert/pydrobert-param | d9f68bbcebfcc5ca909c639b03b959526a8b1631 | [
"Apache-2.0"
] | 1 | 2021-05-14T18:27:13.000Z | 2021-05-14T18:27:13.000Z | tests/conftest.py | sdrobert/pydrobert-param | d9f68bbcebfcc5ca909c639b03b959526a8b1631 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | sdrobert/pydrobert-param | d9f68bbcebfcc5ca909c639b03b959526a8b1631 | [
"Apache-2.0"
] | null | null | null | from shutil import rmtree
from tempfile import mkdtemp
import pytest
import param
import pydrobert.param.serialization as serial
param.parameterized.warnings_as_exceptions = True
| 24.867925 | 56 | 0.677542 |
a76b06cca3e635c2f5710089e70486f5a0bbb87e | 1,942 | py | Python | tests/test_npaths.py | mtymchenko/npaths | 5019694784afee9f60ab0b5f0f0ef3051e113077 | [
"MIT"
] | null | null | null | tests/test_npaths.py | mtymchenko/npaths | 5019694784afee9f60ab0b5f0f0ef3051e113077 | [
"MIT"
] | null | null | null | tests/test_npaths.py | mtymchenko/npaths | 5019694784afee9f60ab0b5f0f0ef3051e113077 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import matplotlib.pyplot as plt
from npaths import NPathNode, Filter, Circulator
__all__ = [
'TestNPathNode',
'TestFilter',
'TestCirculator'
]
GHz = 1e9
ohm = 1
pF = 1e-12
freqs = np.linspace(0.001, 6, 500)*GHz
if __name__ == '__main__':
unittest.main()
| 21.577778 | 56 | 0.549949 |
a76bc9e0503e467514bbda7a08ff3433e4b780d7 | 2,896 | py | Python | python/o80_pam/o80_ball.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | null | null | null | python/o80_pam/o80_ball.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | 2 | 2021-02-17T12:55:44.000Z | 2021-05-27T14:10:57.000Z | python/o80_pam/o80_ball.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | null | null | null | import o80
import o80_pam
import context
# convenience class for shooting virtual balls
# via o80, playing pre-recorded trajectories (hosted in context package)
| 31.139785 | 88 | 0.632597 |
a76c133ddf548f99aff8129ee6e9cbb2e7608901 | 5,374 | py | Python | pymic/transform/threshold.py | HiLab-git/PyMIC | abf5c43de43668b85f4c049c95a8f1b7cf1d9f16 | [
"Apache-2.0"
] | 147 | 2019-12-23T02:52:04.000Z | 2022-03-06T16:30:43.000Z | pymic/transform/threshold.py | HiLab-git/PyMIC | abf5c43de43668b85f4c049c95a8f1b7cf1d9f16 | [
"Apache-2.0"
] | 4 | 2020-12-18T12:47:21.000Z | 2021-05-21T02:18:01.000Z | pymic/transform/threshold.py | HiLab-git/PyMIC | abf5c43de43668b85f4c049c95a8f1b7cf1d9f16 | [
"Apache-2.0"
] | 32 | 2020-01-08T13:48:50.000Z | 2022-03-12T06:31:13.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import json
import math
import random
import numpy as np
from scipy import ndimage
from pymic.transform.abstract_transform import AbstractTransform
from pymic.util.image_process import *
| 48.854545 | 106 | 0.619092 |
a76da74714c837b30e9c8d8f4fbec4b1ea99f85f | 211 | py | Python | exerc18/18.py | WilliamSampaio/ExerciciosPython | 4317d242d2944b91b5d455da8a4ac3a33e154385 | [
"MIT"
] | null | null | null | exerc18/18.py | WilliamSampaio/ExerciciosPython | 4317d242d2944b91b5d455da8a4ac3a33e154385 | [
"MIT"
] | null | null | null | exerc18/18.py | WilliamSampaio/ExerciciosPython | 4317d242d2944b91b5d455da8a4ac3a33e154385 | [
"MIT"
] | null | null | null | import os
numeros = [0,0]
numeros[0] = float(input('Digite o numero 1: '))
numeros[1] = float(input('Digite o numero 2: '))
print(f'o maior valor entre os dois : {max(numeros)}')
os.system('pause') | 21.1 | 56 | 0.630332 |
a76e729d78669a3e706e9fdd618185c47c67bee8 | 7,958 | py | Python | DictionaryOfNewZealandEnglish/headword/citation/views.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/citation/views.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/citation/views.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Citations
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required, current_user
import logging, sys, re
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from DictionaryOfNewZealandEnglish.database import db
from DictionaryOfNewZealandEnglish.headword.citation.forms import *
from DictionaryOfNewZealandEnglish.headword.citation.models import *
import datetime as dt
blueprint = Blueprint("citations", __name__, url_prefix='/headwords/citations',
static_folder="../static")
#############################################################################
### Private
def __create_citation(form, headword):
date = __form_date(form)
citation = Citation.create(
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = False,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
h = Headword.query.get(headword.id)
h.citations.append(citation)
db.session.add(h)
db.session.commit()
return citation.id
def __form_date(form):
if form.date.data == "":
flash("No date entered.", 'warning')
raise InvalidRequestError
form_date = re.split(r'/\s*', form.date.data)
if len(form_date) < 3:
if form.circa.data:
# pad out data to fit into datetime type
if len(form_date) == 2:
y = form_date[1].strip()
m = form_date[0].strip()
d = "1"
if len(form_date) == 1:
y = form_date[0].strip()
m = "1"
d = "1"
else:
flash("Partial date entered, perhaps 'Circa' should be checked.", 'warning')
raise InvalidRequestError
else:
y = form_date[2].strip()
m = form_date[1].strip()
d = form_date[0].strip()
# dt.datetime(y, m, d)
print "### form_date {0} / {1} / {2}".format(y,m,d)
date = dt.datetime(int(y), int(m), int(d))
return date
def __pretty_print_date(obj, circa=False):
print "### citation {0} {1}".format(obj, circa)
if isinstance(obj, Citation):
d = obj.date.day
m = obj.date.month
y = obj.date.year
circa = obj.circa
if isinstance(obj, dt.datetime):
d = obj.day
m = obj.month
y = obj.year
if circa:
if d == 1:
if m == 1:
m = ""
else:
m = "{0} / ".format(m)
d = ""
else:
d = "{0} / ".format(d)
m = "{0} / ".format(m)
print "test 1 {0}{1}{2}".format(d, m, y)
return "{0}{1}{2}".format(d, m, y)
else:
print "test 2 {0} / {1} / {2}".format(d, m, y)
return "{0} / {1} / {2}".format(d, m, y)
def __set_data_for_citation(citation, form):
try:
date = __form_date(form)
Citation.update(citation,
date = date,
circa = form.circa.data,
author = form.author.data,
source_id = form.source.data.id,
vol_page = form.vol_page.data,
edition = form.edition.data,
quote = form.quote.data,
notes = form.notes.data,
archived = form.archived.data,
updated_at = dt.datetime.utcnow(),
updated_by = current_user.username
)
flash("Edit of citation is saved.", 'success')
return True
except (IntegrityError, InvalidRequestError):
db.session.rollback()
flash("Edit of citation failed.", 'warning')
return False
| 35.846847 | 86 | 0.518975 |
a76eab46ba07f0fb8885169ad3e849032ee2d76c | 81 | py | Python | tests/conf.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | 21 | 2022-02-16T10:18:24.000Z | 2022-03-31T23:40:06.000Z | tests/conf.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | 9 | 2022-03-01T06:40:59.000Z | 2022-03-26T08:12:31.000Z | tests/conf.py | xncbf/django-dynamodb-cache | be6d1b4b8e92d581041043bcd694f2a9f00ee386 | [
"MIT"
] | null | null | null | from random import random
TABLE_NAME = f"test-django-dynamodb-cache-{random()}"
| 20.25 | 53 | 0.765432 |
a76f70dafa18b95735a41dd028a3dcb5cbf10b66 | 1,863 | py | Python | dockerfiles/greeting/0.2/database.py | scherbertlemon/docker-training | f94c79b461f78a4d9242a3e838524efb70a2792e | [
"MIT"
] | 1 | 2021-08-06T17:00:53.000Z | 2021-08-06T17:00:53.000Z | dockerfiles/greeting/0.2/database.py | scherbertlemon/docker-training | f94c79b461f78a4d9242a3e838524efb70a2792e | [
"MIT"
] | null | null | null | dockerfiles/greeting/0.2/database.py | scherbertlemon/docker-training | f94c79b461f78a4d9242a3e838524efb70a2792e | [
"MIT"
] | null | null | null | import psycopg2 as pg
import os
"""
Database (Postgres) module connecting to the database for the simple greeting
app.
"""
# The hostname where the database is running can be determined via environment
PGHOST = os.getenv("PG_HOST") if os.getenv("PG_HOST") else "localhost"
def get_pgconn():
"""
Connects to the database and also triggers the creation of the single
required table if it does not exist yet. Clearly you would not do
that in a production environment.
Returns
-------
psycopg2 database connection
"""
# database credentials hard-coded except for hostname
CRED = {
"host": PGHOST,
"port": 5432,
"database": "postgres",
"user": "postgres",
"password": "holymoly"
}
conn = pg.connect(**CRED)
create(conn)
return conn
def create(db):
"""
helper function to create the required database table if it does not exist
yet.
Parameters
----------
db: psycopg2 database connection
"""
SQL_CREATE = """
CREATE TABLE IF NOT EXISTS messages (
id SERIAL,
message TEXT,
author TEXT,
received TEXT
)
"""
cursor = db.cursor()
cursor.execute(SQL_CREATE)
db.commit()
def insert(db, dct):
"""
Inserts the entered data for author, message and timestamp into the
database.
Parameters
----------
db: psycopg2 database connection
dct: dict
containing the fields message, author, received. Validity is not
checked, every field is expected to be present and to contain a string
as value.
"""
SQL_INSERT = """
INSERT INTO messages(message, author, received) VALUES (
%(message)s,
%(author)s,
%(received)s
)
"""
cursor = db.cursor()
cursor.execute(SQL_INSERT, dct)
db.commit()
| 23 | 78 | 0.616747 |
a76faf50eeea6f4eeb893d3b2fcef43aec0e7eaf | 3,277 | py | Python | generator/constant_aug.py | zhou3968322/pytorch-CycleGAN-and-pix2pix | 30730fddbc6797c5e421cd49c9fef369011d484d | [
"BSD-3-Clause"
] | null | null | null | generator/constant_aug.py | zhou3968322/pytorch-CycleGAN-and-pix2pix | 30730fddbc6797c5e421cd49c9fef369011d484d | [
"BSD-3-Clause"
] | null | null | null | generator/constant_aug.py | zhou3968322/pytorch-CycleGAN-and-pix2pix | 30730fddbc6797c5e421cd49c9fef369011d484d | [
"BSD-3-Clause"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# -*- coding:utf-8 -*-
# email:bingchengzhou@foxmail.com
# create: 2020/11/25
from imgaug import augmenters as iaa
seq_cir = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.4, 0.7), fit_output=True)
],
random_order=True)
seq_cir_big = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.9, 1.5), fit_output=True)
],
random_order=True)
seq_ell = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-20, 20), scale=(0.4, 0.9), fit_output=True)
],
random_order=True)
seq_squ = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.18, 0.35), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.8, 1.4), fit_output=True)
],
random_order=True)
seq_rec = iaa.Sequential(
[
iaa.AdditiveGaussianNoise(scale=0.01 * 255),
# iaa.MultiplyElementwise((0.8, 0.99)),
iaa.Dropout(p=(0, 0.05)),
# iaa.JpegCompression(compression=(80, 99)),
iaa.Affine(rotate=(-90, 90), scale=(0.15, 0.25), fit_output=True)
# iaa.Affine(rotate=(-90, 90), scale=(0.2, 0.4), fit_output=True)
],
random_order=True)
seq_doc_noise = iaa.Sequential(
[
iaa.Sometimes(
0.6,
iaa.OneOf(iaa.Sequential([iaa.GaussianBlur(sigma=(0, 1.0))])
# iaa.AverageBlur(k=(2, 5)),
# iaa.MedianBlur(k=(3, 7))])
)
),
iaa.Sometimes(
0.5,
iaa.LinearContrast((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.Multiply((0.8, 1.2), per_channel=0.5),
),
iaa.Sometimes(
0.3,
iaa.WithBrightnessChannels(iaa.Add((-40, 40))),
),
# iaa.Sometimes(
# 0.3,
# iaa.OneOf(iaa.Sequential([
# iaa.AdditiveGaussianNoise(scale=(0, 0.01*255), per_channel=0.5),
# iaa.SaltAndPepper(0.01)]))
# ),
iaa.Sometimes(
0.5,
iaa.Add((-10, 10), per_channel=0.5),
),
# iaa.Sometimes(
# 0.5,
# iaa.Dropout(p=(0, 0.05))
# ),
# iaa.JpegCompression(compression=(80, 99))
],
random_order=True)
| 30.915094 | 90 | 0.53494 |
a77112792896e19b96e12810cacf0861b725bf41 | 3,873 | py | Python | ooobuild/lo/packages/x_data_sink_encr_support.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/packages/x_data_sink_encr_support.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/packages/x_data_sink_encr_support.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.packages
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
__all__ = ['XDataSinkEncrSupport']
| 39.520408 | 178 | 0.694036 |
a77229f1a130b744660ffd1757e86e6d6dd38d54 | 1,074 | py | Python | questions/q197_choose_and_swap/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null | questions/q197_choose_and_swap/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | 1 | 2021-05-15T07:56:51.000Z | 2021-05-15T07:56:51.000Z | questions/q197_choose_and_swap/code.py | aadhityasw/Competitive-Programs | 901a48d35f024a3a87c32a45b7f4531e8004a203 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
ob = Solution()
t = int (input ())
for _ in range (t):
A = input()
ans = ob.chooseandswap(A)
print(ans)
| 23.347826 | 46 | 0.286778 |
a7730b1c8e64cf80eb7189889ed0d119ac2a5fc8 | 10,625 | py | Python | assignment4/assignment4.py | umamibeef/UBC-EECE-560-Coursework | 4c89fb03a4dacf778e31eeb978423bfdaa95b591 | [
"MIT"
] | null | null | null | assignment4/assignment4.py | umamibeef/UBC-EECE-560-Coursework | 4c89fb03a4dacf778e31eeb978423bfdaa95b591 | [
"MIT"
] | null | null | null | assignment4/assignment4.py | umamibeef/UBC-EECE-560-Coursework | 4c89fb03a4dacf778e31eeb978423bfdaa95b591 | [
"MIT"
] | null | null | null | import argparse
import csv
import matplotlib
import matplotlib.ticker as tck
import matplotlib.pyplot as plt
import numpy as np
# Matplotlib export settings
matplotlib.use('pgf')
import matplotlib.pyplot as plt
matplotlib.rcParams.update({
'pgf.texsystem': 'pdflatex',
'font.size': 10,
'font.family': 'serif', # use serif/main font for text elements
'text.usetex': True, # use inline math for ticks
'pgf.rcfonts': False # don't setup fonts from rc parameters
})
# Main function
if __name__ == '__main__':
# the following sets up the argument parser for the program
parser = argparse.ArgumentParser(description='Assignment 4 solution generator')
args = parser.parse_args()
main(args) | 48.076923 | 155 | 0.660706 |
a774dc8ec0c70281d59955e540db50979da5c0cf | 4,744 | py | Python | src/python/pants/scm/subsystems/changed.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | 1 | 2021-11-11T14:04:24.000Z | 2021-11-11T14:04:24.000Z | src/python/pants/scm/subsystems/changed.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | null | null | null | src/python/pants/scm/subsystems/changed.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | 1 | 2021-11-11T14:04:12.000Z | 2021-11-11T14:04:12.000Z | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.build_environment import get_scm
from pants.base.exceptions import TaskError
from pants.goal.workspace import ScmWorkspace
from pants.scm.change_calculator import BuildGraphChangeCalculator
from pants.subsystem.subsystem import Subsystem
from pants.util.objects import datatype
# TODO: Remove this in 1.5.0dev0.
| 41.982301 | 103 | 0.706788 |
a775681f9ac02e296e8b3818c15064c985162dc4 | 1,825 | py | Python | SOLID/LSP/GoodLSPCode.py | maumneto/DesignPatternCourse | eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8 | [
"MIT"
] | 1 | 2021-06-26T15:32:35.000Z | 2021-06-26T15:32:35.000Z | SOLID/LSP/GoodLSPCode.py | maumneto/DesignPatternCourse | eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8 | [
"MIT"
] | null | null | null | SOLID/LSP/GoodLSPCode.py | maumneto/DesignPatternCourse | eb55a3d4e6a3261265dc98fcc6ec48d7b8e6b7a8 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
commonAccount = AccountCommon(500)
commonAccount.deposit(500)
commonAccount.withdraw(100)
commonAccount.income(0.005)
commonAccount.message()
print(' ------- ')
spetialAccount = AccountSpetial(1000)
spetialAccount.deposit(500)
spetialAccount.withdraw(200)
spetialAccount.message()
| 26.449275 | 65 | 0.629041 |
a7758434e025289995bc94bace734e2f383e3e76 | 818 | py | Python | test/test_global_customer_api.py | ezmaxinc/eZmax-SDK-python | 6794b8001abfb7d9ae18a3b87aba164839b925a0 | [
"MIT"
] | null | null | null | test/test_global_customer_api.py | ezmaxinc/eZmax-SDK-python | 6794b8001abfb7d9ae18a3b87aba164839b925a0 | [
"MIT"
] | null | null | null | test/test_global_customer_api.py | ezmaxinc/eZmax-SDK-python | 6794b8001abfb7d9ae18a3b87aba164839b925a0 | [
"MIT"
] | null | null | null | """
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import unittest
import eZmaxApi
from eZmaxApi.api.global_customer_api import GlobalCustomerApi # noqa: E501
if __name__ == '__main__':
unittest.main()
| 22.108108 | 97 | 0.689487 |
a7762ca16d51e6d7fb512c7980d15ee79dbeff30 | 3,930 | py | Python | reconstruction_model.py | JungahYang/Deep3DFaceReconstruction | 041b89a781f90ba459f3294c4e568b5c1a3cf7da | [
"MIT"
] | 1,424 | 2019-05-07T05:03:12.000Z | 2022-03-31T08:52:29.000Z | reconstruction_model.py | zepengF/Deep3DFaceReconstruction | 5b131a3e67597da67409486e20db50007f48427d | [
"MIT"
] | 194 | 2019-05-08T21:11:23.000Z | 2022-03-30T02:58:25.000Z | reconstruction_model.py | zepengF/Deep3DFaceReconstruction | 5b131a3e67597da67409486e20db50007f48427d | [
"MIT"
] | 359 | 2019-05-10T11:05:41.000Z | 2022-03-28T21:57:42.000Z | import tensorflow as tf
import face_decoder
import networks
import losses
from utils import *
###############################################################################################
# model for single image face reconstruction
###############################################################################################
| 45.697674 | 142 | 0.711705 |
a7779af144a3ba68deaf47c8047f304427889fe5 | 2,002 | py | Python | organizational_area/admin.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | organizational_area/admin.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | organizational_area/admin.py | mspasiano/uniTicket | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import *
from .admin_inlines import *
#@admin.register(TipoDotazione)
#class TipoDotazioneAdmin(admin.ModelAdmin):
#list_display = ('nome', 'descrizione')
#class Media:
#js = ('js/textarea-autosize.js',)
#css = {'all': ('css/textarea-small.css',),}
#@admin.register(Locazione)
#class LocazioneAdmin(admin.ModelAdmin):
#list_display = ('nome', 'indirizzo', 'descrizione_breve',)
#class Media:
#js = ('js/textarea-autosize.js',)
#css = {'all': ('css/textarea-small.css',),}
# @admin.register(OrganizationalStructureFunction)
# class OrganizationalStructureFunction(AbstractAdmin):
# pass
| 29.014493 | 68 | 0.700799 |
a777a11d9cdd73ba24751d88b5b9e8b62e919781 | 2,509 | py | Python | tests/test_symgroup.py | efrembernuz/symeess | d74868bbb8463e0420fcc28e3554fbfa8e6de22f | [
"MIT"
] | 1 | 2017-10-25T01:42:14.000Z | 2017-10-25T01:42:14.000Z | tests/test_symgroup.py | efrembernuz/symeess | d74868bbb8463e0420fcc28e3554fbfa8e6de22f | [
"MIT"
] | null | null | null | tests/test_symgroup.py | efrembernuz/symeess | d74868bbb8463e0420fcc28e3554fbfa8e6de22f | [
"MIT"
] | null | null | null | import unittest
from cosymlib import file_io
from numpy import testing
from cosymlib.molecule.geometry import Geometry
import os
data_dir = os.path.join(os.path.dirname(__file__), 'data')
| 44.017544 | 96 | 0.595058 |
a777d3b6992912736d9d3c1557062ac6df7a8a29 | 5,651 | py | Python | mouth_detecting.py | nuocheng/Face-detection | 84375b0c1bacaf572fb04aa6e05751469fe5f9c8 | [
"MIT"
] | null | null | null | mouth_detecting.py | nuocheng/Face-detection | 84375b0c1bacaf572fb04aa6e05751469fe5f9c8 | [
"MIT"
] | null | null | null | mouth_detecting.py | nuocheng/Face-detection | 84375b0c1bacaf572fb04aa6e05751469fe5f9c8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import FileVideoStream
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np # numpy
import argparse
import imutils
import time
import dlib
import cv2
#
#
#
EYE_AR_THRESH = 0.2
EYE_AR_CONSEC_FRAMES = 3
#
#
MAR_THRESH = 0.5
MOUTH_AR_CONSEC_FRAMES = 3
#
COUNTER = 0
TOTAL = 0
#
mCOUNTER = 0
mTOTAL = 0
# DLIBHOG
print("[INFO] loading facial landmark predictor...")
# dlib.get_frontal_face_detector()
detector = dlib.get_frontal_face_detector()
# dlib.shape_predictor
predictor = dlib.shape_predictor('./model/shape_predictor_68_face_landmarks.dat')
#
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# cv2
cap = cv2.VideoCapture(0)
#
while True:
#
ret, frame = cap.read()
frame = imutils.resize(frame, width=720)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detector(gray, 0)
rects = detector(gray, 0)
# predictor(gray, rect)
for rect in rects:
shape = predictor(gray, rect)
# array
shape = face_utils.shape_to_np(shape)
#
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
#
mouth = shape[mStart:mEnd]
# EAREAR
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
#
mar = mouth_aspect_ratio(mouth)
# cv2.convexHulldrawContours
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
mouthHull = cv2.convexHull(mouth)
cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)
#
left = rect.left()
top = rect.top()
right = rect.right()
bottom = rect.bottom()
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 3)
'''
13
'''
# +1
if ear < EYE_AR_THRESH:# 0.2
COUNTER += 1
else:
# 3
if COUNTER >= EYE_AR_CONSEC_FRAMES:# 3
TOTAL += 1
#
COUNTER = 0
# cv2.putText
cv2.putText(frame, "Faces: {}".format(len(rects)), (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "Blinks: {}".format(TOTAL), (150, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "COUNTER: {}".format(COUNTER), (300, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "EAR: {:.2f}".format(ear), (450, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
'''
133
'''
#
if mar > MAR_THRESH:# 0.5
mCOUNTER += 1
cv2.putText(frame, "Yawning!", (10, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
# 3
if mCOUNTER >= MOUTH_AR_CONSEC_FRAMES:# 3
mTOTAL += 1
#
mCOUNTER = 0
cv2.putText(frame, "Yawning: {}".format(mTOTAL), (150, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "mCOUNTER: {}".format(mCOUNTER), (300, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(frame, "MAR: {:.2f}".format(mar), (480, 60),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# 68
for (x, y) in shape:
cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
print(':{:.2f} '.format(mar)+"\t"+str([False,True][mar > MAR_THRESH]))
print(':{:.2f} '.format(ear)+"\t"+str([False,True][COUNTER>=1]))
#
if TOTAL >= 50 or mTOTAL>=15:
cv2.putText(frame, "SLEEP!!!", (100, 200),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
# q
cv2.putText(frame, "Press 'q': Quit", (20, 500),cv2.FONT_HERSHEY_SIMPLEX, 0.7, (84, 255, 159), 2)
# show with opencv
cv2.imshow("Frame", frame)
# if the `q` key was pressed, break from the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release camera
cap.release()
# do a bit of cleanup
cv2.destroyAllWindows()
| 32.854651 | 117 | 0.603787 |
a7780199003eb4084f3a08db621a30c4ac94b9d2 | 2,894 | py | Python | Scripts/Ros/Identifica_cor.py | pcliquet/robotic_resumo | 3d1d8705820cae39d5be956836a94c7884ab490d | [
"MIT"
] | 1 | 2022-03-26T22:50:26.000Z | 2022-03-26T22:50:26.000Z | Scripts/Ros/Identifica_cor.py | pcliquet/robotic_resumo | 3d1d8705820cae39d5be956836a94c7884ab490d | [
"MIT"
] | null | null | null | Scripts/Ros/Identifica_cor.py | pcliquet/robotic_resumo | 3d1d8705820cae39d5be956836a94c7884ab490d | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
import rospy
import numpy as np
import tf
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros
def identifica_cor(frame):
'''
Segmenta o maior objeto cuja cor parecida com cor_h (HUE da cor, no espao HSV).
'''
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cor_menor = np.array([0, 50, 100])
cor_maior = np.array([6, 255, 255])
segmentado_cor = cv2.inRange(frame_hsv, cor_menor, cor_maior)
cor_menor = np.array([174, 50, 100])
cor_maior = np.array([180, 255, 255])
mask = cv2.inRange(frame_hsv, cor_menor, cor_maior)
kernel = np.ones((5, 5), np.uint8)
morpho = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#segmentado_cor += cv2.inRange(frame_hsv, cor_menor, cor_maior)
# Note que a notaco do numpy encara as imagens como matriz, portanto o enderecamento
# linha, coluna ou (y,x)
# Por isso na hora de montar a tupla com o centro precisamos inverter, porque
centro = (frame.shape[1]//2, frame.shape[0]//2)
segmentado_cor = cv2.morphologyEx(morpho,cv2.MORPH_CLOSE,np.ones((7, 7)))
contornos, arvore = cv2.findContours(morpho.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maior_contorno = None
maior_contorno_area = 0
for cnt in contornos:
area = cv2.contourArea(cnt)
if area > maior_contorno_area:
maior_contorno = cnt
maior_contorno_area = area
# Encontramos o centro do contorno fazendo a mdia de todos seus pontos.
if not maior_contorno is None :
cv2.drawContours(frame, [maior_contorno], -1, [0, 0, 255], 5)
maior_contorno = np.reshape(maior_contorno, (maior_contorno.shape[0], 2))
media = maior_contorno.mean(axis=0)
media = media.astype(np.int32)
cv2.circle(frame, (media[0], media[1]), 5, [0, 255, 0])
cross(frame, centro, [255,0,0], 1, 17)
else:
media = (0, 0)
# Representa a area e o centro do maior contorno no frame
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame,"{:d} {:d}".format(*media),(20,100), 1, 4,(255,255,255),2,cv2.LINE_AA)
cv2.putText(frame,"{:0.1f}".format(maior_contorno_area),(20,50), 1, 4,(255,255,255),2,cv2.LINE_AA)
# cv2.imshow('video', frame)
cv2.imshow('seg', segmentado_cor)
cv2.waitKey(1)
return media, centro, maior_contorno_area | 33.651163 | 129 | 0.664824 |
a77866394277674aa9998582e0c75620917bdb48 | 2,041 | py | Python | integration_tests/test_test_oracle_tax.py | weblucas/mseg-semantic | ec3d179003bb26dd0f1336853b719319721757a4 | [
"MIT"
] | 391 | 2020-06-05T17:30:44.000Z | 2022-03-31T12:01:30.000Z | integration_tests/test_test_oracle_tax.py | weblucas/mseg-semantic | ec3d179003bb26dd0f1336853b719319721757a4 | [
"MIT"
] | 27 | 2020-06-06T15:08:37.000Z | 2022-02-28T07:57:57.000Z | integration_tests/test_test_oracle_tax.py | weblucas/mseg-semantic | ec3d179003bb26dd0f1336853b719319721757a4 | [
"MIT"
] | 57 | 2020-06-09T06:05:30.000Z | 2022-03-28T15:49:36.000Z | #!/usr/bin/python3
from pathlib import Path
from types import SimpleNamespace
from mseg_semantic.scripts.collect_results import parse_result_file
from mseg_semantic.tool.test_oracle_tax import test_oracle_taxonomy_model
REPO_ROOT_ = Path(__file__).resolve().parent.parent
# Replace this variables with your own path to run integration tests.
INTEGRATION_TEST_OUTPUT_DIR = '/srv/scratch/jlambert30/MSeg/mseg-semantic/integration_test_data'
# Copy the mseg-3m-1080p model there
CAMVID_MODEL_PATH = f'{INTEGRATION_TEST_OUTPUT_DIR}/camvid-11-1m.pth'
def test_evaluate_oracle_tax_model():
"""
Ensure oracle model testing script works correctly.
base_sizes=(
#360
720
#1080
python -u mseg_semantic/tool/test_oracle_tax.py --config=${config_fpath}
dataset ${dataset_name} model_path ${model_fpath} model_name ${model_name}
"""
base_size = 1080
d = {
'dataset': 'camvid-11',
'config': f'{REPO_ROOT_}/mseg_semantic/config/test/default_config_${base_size}_ss.yaml',
'model_path': CAMVID_MODEL_PATH,
'model_name': 'mseg-3m-1080p',
'input_file': 'default',
'base_size': base_size,
'test_h': 713,
'test_w': 713,
'scales': [1.0],
'save_folder': 'default',
'arch': 'hrnet',
'index_start': 0,
'index_step': 0,
'workers': 16,
'has_prediction': False,
'split': 'val',
'vis_freq': 20
}
args = SimpleNamespace(**d)
use_gpu = True
test_oracle_taxonomy_model(args, use_gpu)
# Ensure that results match paper
result_file_path = INTEGRATION_TEST_OUTPUT_DIR
result_file_path += f'/camvid-11-1m/camvid-11/{base_size}/ss/results.txt'
assert Path(result_file_path).exists()
mIoU = parse_result_file(result_file_path)
print(f"mIoU: {mIoU}")
# single-scale result
assert mIoU == 78.79
OKGREEN = '\033[92m'
ENDC = '\033[0m'
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
print(OKGREEN + 'Oracle model evalution passed successfully' + ENDC)
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
if __name__ == '__main__':
test_evaluate_oracle_tax_model()
| 28.347222 | 96 | 0.708476 |
a7799a223cdf2e189549e42fb31de6f6391c2873 | 1,911 | py | Python | sports_manager/models/gymnasium.py | hbuyse/dj-sports-manager | 7e32cc41347b968b4ede9ea6846de14d9504c3f9 | [
"MIT"
] | null | null | null | sports_manager/models/gymnasium.py | hbuyse/dj-sports-manager | 7e32cc41347b968b4ede9ea6846de14d9504c3f9 | [
"MIT"
] | null | null | null | sports_manager/models/gymnasium.py | hbuyse/dj-sports-manager | 7e32cc41347b968b4ede9ea6846de14d9504c3f9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Gymnasium implementation."""
# Django
from django.core.validators import RegexValidator
from django.db import models
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _ # noqa
| 36.75 | 103 | 0.588697 |
a779d1a47d9473c22bbee36fab9477af4aad4943 | 228 | py | Python | 01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 04/1/exercicio01.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | # Exercicio 01 Tuplas
x = int(input('Digite o primeiro numero: '))
y = int(input('Digite o segundo numero: '))
cont = 1
soma = x
while cont < y:
soma = soma + x
cont = cont + 1
print('O resultado eh: {}' .format(soma))
| 20.727273 | 44 | 0.618421 |
a77a0a8078a541187f7e349449f50c15dd027ebe | 832 | py | Python | docs/OOPS/Accessing_pvt_var2.py | munyumunyu/Python-for-beginners | 335d001d4b8f13af71f660beed0b7f5fe313aa3b | [
"MIT"
] | 158 | 2018-10-03T23:36:48.000Z | 2022-03-25T00:16:00.000Z | docs/OOPS/Accessing_pvt_var2.py | munyumunyu/Python-for-beginners | 335d001d4b8f13af71f660beed0b7f5fe313aa3b | [
"MIT"
] | 10 | 2018-10-11T03:52:28.000Z | 2019-12-04T02:51:28.000Z | docs/OOPS/Accessing_pvt_var2.py | munyumunyu/Python-for-beginners | 335d001d4b8f13af71f660beed0b7f5fe313aa3b | [
"MIT"
] | 40 | 2018-10-03T10:47:28.000Z | 2022-02-22T19:55:46.000Z | '''
To have a error free way of accessing and updating private variables, we create specific methods for this.
Those methods which are meant to set a value to a private variable are called setter methods and methods
meant to access private variable values are called getter methods.
The below code is an example of getter and setter methods:
'''
c1=Customer(100, "Gopal", 24, 1000)
c1.set_wallet_balance(120)
print(c1.get_wallet_balance())
| 32 | 107 | 0.71274 |
a77b4550c67262bf40db6267243d9f55a2869fd2 | 21,013 | py | Python | src/runmanager/runinstance.py | scherma/antfarm | ad4d1d564eb79bdc7e00780b97ca10594c75cd5c | [
"MIT"
] | 6 | 2018-08-26T10:15:29.000Z | 2022-03-03T21:12:37.000Z | src/runmanager/runinstance.py | scherma/antfarm | ad4d1d564eb79bdc7e00780b97ca10594c75cd5c | [
"MIT"
] | 10 | 2018-03-09T18:18:28.000Z | 2021-05-06T21:37:53.000Z | src/runmanager/runinstance.py | scherma/antfarm | ad4d1d564eb79bdc7e00780b97ca10594c75cd5c | [
"MIT"
] | 3 | 2018-11-29T07:47:30.000Z | 2020-05-24T09:58:57.000Z | #!/usr/bin/env python3
# coding: utf-8
# MIT License https://github.com/scherma
# contact http_error_418 @ unsafehex.com
import logging, os, configparser, libvirt, json, arrow, pyvnc, shutil, time, victimfiles, glob, websockify, multiprocessing, signal
import tempfile, evtx_dates, db_calls, psycopg2, psycopg2.extras, sys, pcap_parser, yarahandler, magic, case_postprocess
import scapy.all as scapy
from lxml import etree
from io import StringIO, BytesIO
from PIL import Image
logger = logging.getLogger("antfarm.worker")
# Manages connection to VM and issuing of commands
# make a screenshot
# https://www.linuxvoice.com/issues/003/LV3libvirt.pdf
def vncsocket(host, lport, dport):
logger.debug("Spinning up websocket process...")
server = websockify.WebSocketProxy(**{"target_host": host, "target_port": dport, "listen_port": lport})
server.start_server()
def get_screen_image(dom, lv_conn):
s = lv_conn.newStream()
# cause libvirt to take the screenshot
dom.screenshot(s, 0)
# copy the data into a buffer
buf = BytesIO()
s.recvAll(sc_writer, buf)
s.finish()
# write the buffer to file
buf.seek(0)
i = Image.open(buf)
return i
def sc_writer(stream, data, b):
b.write(data)
class StopCaptureException(RuntimeError):
| 42.279678 | 158 | 0.555942 |
a77c6d836bc31836353a31c25d2a780968623e8a | 4,104 | py | Python | test-framework/test-suites/integration/tests/list/test_list_repo.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | test-framework/test-suites/integration/tests/list/test_list_repo.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | test-framework/test-suites/integration/tests/list/test_list_repo.py | sammeidinger/stack | a8085dce179dbe903f65f136f4b63bcc076cc057 | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | import json
| 33.096774 | 144 | 0.690058 |
a78199b06e3d85a0cae2dc6b22fe2403a2e45cd5 | 405 | py | Python | Python/luhnchecksum.py | JaredLGillespie/OpenKattis | 71d26883cb5b8a4a1d63a072587de5575d7c29af | [
"MIT"
] | null | null | null | Python/luhnchecksum.py | JaredLGillespie/OpenKattis | 71d26883cb5b8a4a1d63a072587de5575d7c29af | [
"MIT"
] | null | null | null | Python/luhnchecksum.py | JaredLGillespie/OpenKattis | 71d26883cb5b8a4a1d63a072587de5575d7c29af | [
"MIT"
] | null | null | null | # https://open.kattis.com/problems/luhnchecksum
for _ in range(int(input())):
count = 0
for i, d in enumerate(reversed(input())):
if i % 2 == 0:
count += int(d)
continue
x = 2 * int(d)
if x < 10:
count += x
else:
x = str(x)
count += int(x[0]) + int(x[1])
print('PASS' if count % 10 == 0 else 'FAIL')
| 25.3125 | 48 | 0.449383 |
a7822d26e1d72928c623eabd7ce7c6e586e1f9ee | 2,531 | py | Python | dart_board/plotting.py | GSavathrakis/dart_board | 9430d97675d69e381b701499587a02fd71b02990 | [
"MIT"
] | 8 | 2017-12-04T22:32:25.000Z | 2021-10-01T11:45:09.000Z | dart_board/plotting.py | GSavathrakis/dart_board | 9430d97675d69e381b701499587a02fd71b02990 | [
"MIT"
] | 2 | 2018-03-14T00:10:43.000Z | 2021-05-02T18:51:11.000Z | dart_board/plotting.py | GSavathrakis/dart_board | 9430d97675d69e381b701499587a02fd71b02990 | [
"MIT"
] | 2 | 2018-07-17T23:00:01.000Z | 2021-08-25T15:46:38.000Z | import matplotlib.pyplot as plt
import numpy as np
# from dart_board import plotting
# import numpy as np
# import pickle
# chains = pickle.load(open("../data/HMXB_chain.obj", "rb"))
# plotting.plot_chains(chains)
| 30.130952 | 120 | 0.621889 |
a78349abf743773098268654aaf64c037f2be3f7 | 2,063 | py | Python | challenge/eval.py | CodeCrawl/deep_learning | 3f9c208bba5ee17b4b68be74dc10e43839b4f6d0 | [
"Apache-2.0"
] | 8 | 2018-11-03T16:32:35.000Z | 2020-05-18T23:03:17.000Z | challenge/eval.py | CodeCrawl/deep_learning | 3f9c208bba5ee17b4b68be74dc10e43839b4f6d0 | [
"Apache-2.0"
] | null | null | null | challenge/eval.py | CodeCrawl/deep_learning | 3f9c208bba5ee17b4b68be74dc10e43839b4f6d0 | [
"Apache-2.0"
] | 7 | 2018-11-07T14:39:20.000Z | 2020-04-19T23:54:20.000Z | ##
## Evaluation Script
##
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
if __name__ == '__main__':
program_start = time.time()
accuracy = evaluate()
score = calculate_score(accuracy)
program_end = time.time()
total_time = round(program_end - program_start,2)
print()
print("Execution time (seconds) = ", total_time)
print('Accuracy = ' + str(accuracy))
print("Score = ", score)
print()
| 26.792208 | 86 | 0.600582 |
a78364d0cdf1ba12f5219bbb941cde9ada297c73 | 7,793 | py | Python | PaddleRec/tdm/tdm_demo/infer_network.py | danleifeng/models | b87761f8100a545e0015046dd55d886ce90c190e | [
"Apache-2.0"
] | 2 | 2020-03-12T13:35:02.000Z | 2020-03-12T14:54:23.000Z | PaddleRec/tdm/tdm_demo/infer_network.py | danleifeng/models | b87761f8100a545e0015046dd55d886ce90c190e | [
"Apache-2.0"
] | 1 | 2020-07-02T03:05:00.000Z | 2020-07-02T03:05:00.000Z | PaddleRec/tdm/tdm_demo/infer_network.py | danleifeng/models | b87761f8100a545e0015046dd55d886ce90c190e | [
"Apache-2.0"
] | 1 | 2020-09-09T16:53:01.000Z | 2020-09-09T16:53:01.000Z | # -*- coding=utf-8 -*-
"""
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import math
import argparse
import numpy as np
import paddle.fluid as fluid
from utils import tdm_sampler_prepare, tdm_child_prepare, trace_var
from train_network import DnnLayerClassifierNet, InputTransNet
| 39.760204 | 80 | 0.597203 |
a7855fa0e107181fe9f7c866727366717fbbb9d3 | 727 | py | Python | fixtures/requests.py | AzatAza/december-api-tests | dd120fd0c479b035dbe84ccd1fb1dd687d84af5d | [
"Apache-2.0"
] | null | null | null | fixtures/requests.py | AzatAza/december-api-tests | dd120fd0c479b035dbe84ccd1fb1dd687d84af5d | [
"Apache-2.0"
] | null | null | null | fixtures/requests.py | AzatAza/december-api-tests | dd120fd0c479b035dbe84ccd1fb1dd687d84af5d | [
"Apache-2.0"
] | null | null | null | import requests
from requests import Response
| 42.764706 | 119 | 0.645117 |
a7857bc199ab6450358c23073cebf9f0bd31bb0d | 352 | py | Python | rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py | MLH-Fellowship/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 1 | 2021-09-17T06:11:02.000Z | 2021-09-17T06:11:02.000Z | rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py | soma2000-lang/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 5 | 2021-02-03T05:29:41.000Z | 2021-02-08T01:14:11.000Z | rules_default/castervoice/lib/ctrl/mgr/grammar_container/base_grammar_container.py | soma2000-lang/LarynxCode | 840fee18c689a357052825607c27fc8e3e56571c | [
"MIT"
] | 4 | 2021-02-03T05:05:00.000Z | 2021-07-14T06:21:10.000Z | from castervoice.lib.ctrl.mgr.errors.base_class_error import DontUseBaseClassError
| 25.142857 | 82 | 0.75 |
a7871a31d1f892b28ff5af9f08dffdc9caf09213 | 262 | py | Python | main/urls.py | homata/snow_removing | c02585b8ceab3da107b932d6066c8b8344af1ff7 | [
"Apache-2.0"
] | 2 | 2018-12-05T01:03:10.000Z | 2019-03-16T04:27:03.000Z | main/urls.py | homata/snow_removing | c02585b8ceab3da107b932d6066c8b8344af1ff7 | [
"Apache-2.0"
] | null | null | null | main/urls.py | homata/snow_removing | c02585b8ceab3da107b932d6066c8b8344af1ff7 | [
"Apache-2.0"
] | 1 | 2018-12-04T14:18:08.000Z | 2018-12-04T14:18:08.000Z | from django.urls import include, path
from . import views
from django.views.generic.base import RedirectView
#
# https://docs.djangoproject.com/ja/2.0/intro/tutorial03/
app_name = 'main'
urlpatterns = [
path('', views.index, name='index'),
]
| 21.833333 | 57 | 0.736641 |
a7882585c7ab1245006e29c8a68efd228a0cc9dc | 1,114 | py | Python | server/server/urls.py | oSoc17/lopeningent_backend | 3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd | [
"MIT"
] | 4 | 2017-07-04T15:18:59.000Z | 2017-07-08T10:48:37.000Z | server/server/urls.py | oSoc17/lopeningent_backend | 3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd | [
"MIT"
] | 16 | 2017-07-04T15:36:41.000Z | 2017-10-18T07:47:45.000Z | server/server/urls.py | oSoc17/lopeningent_backend | 3e1c149038c3773f66dfbbc2f15ebd0692ecb4cd | [
"MIT"
] | null | null | null | """server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
import interface.stats as stats
import interface.routes as route
import interface.pois as pois
urlpatterns = [
url(r'^stats/check/', stats.get_stats_from_id ),
url(r'^stats/update/', stats.post_stats_from_id),
url(r'^route/generate/', route.generate),
url(r'^route/return/', route.return_home),
url(r'^route/rate/', route.rate_route),
url(r'^poi/coords/', pois.get_coords),
url(r'^poi/types/', pois.get_types)
]
| 37.133333 | 79 | 0.701975 |
a7884b84cf2835ce8244b051ecf8f0adaa14e7d4 | 9,507 | py | Python | app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py | e-lubrini/fake-news-detector | f2464e4cac73d9203e7483ac0aa5cd47ddfba811 | [
"MIT"
] | null | null | null | app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py | e-lubrini/fake-news-detector | f2464e4cac73d9203e7483ac0aa5cd47ddfba811 | [
"MIT"
] | 1 | 2021-11-24T12:23:49.000Z | 2021-11-24T12:23:49.000Z | app/backend/knowledge_base_approach/python-client/swagger_client/api/default_api.py | e-lubrini/fake-news-detector | f2464e4cac73d9203e7483ac0aa5cd47ddfba811 | [
"MIT"
] | 1 | 2021-11-24T18:07:44.000Z | 2021-11-24T18:07:44.000Z | # coding: utf-8
"""
FRED API
FRED is a tool for automatically producing RDF/OWL ontologies and linked data from natural language sentences. The method is based on Combinatory Categorial Grammar, Discourse Representation Theory, Linguistic Frames, and Ontology Design Patterns. Results are enriched with NER and WSD. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
| 54.637931 | 305 | 0.664458 |
a788aafcac15ec56bc56e7dbc0349b85a1880056 | 1,496 | py | Python | geesedb/interpreter/metadata.py | informagi/GeeseDB | b502830cafbcba8676e7e779d13d5bc14ba842f9 | [
"MIT"
] | 12 | 2021-07-05T12:33:20.000Z | 2021-10-11T20:44:12.000Z | geesedb/interpreter/metadata.py | informagi/GeeseDB | b502830cafbcba8676e7e779d13d5bc14ba842f9 | [
"MIT"
] | 7 | 2021-07-28T20:40:36.000Z | 2021-10-12T12:31:51.000Z | geesedb/interpreter/metadata.py | informagi/GeeseDB | b502830cafbcba8676e7e779d13d5bc14ba842f9 | [
"MIT"
] | null | null | null | import json
from ..connection import get_connection
| 34.790698 | 90 | 0.584225 |
a78ab7709a2fb033bbbef0c592de69b2eb89f7f4 | 2,381 | py | Python | content_feeders/in.py | Giapa/ContentAggregator | 978c552406a770791cff435d41eb2bf135b5454d | [
"MIT"
] | null | null | null | content_feeders/in.py | Giapa/ContentAggregator | 978c552406a770791cff435d41eb2bf135b5454d | [
"MIT"
] | 2 | 2020-04-15T09:16:50.000Z | 2020-04-15T09:22:06.000Z | content_feeders/in.py | IEEEdiots/ContentAggregator | 978c552406a770791cff435d41eb2bf135b5454d | [
"MIT"
] | 1 | 2021-03-25T17:58:16.000Z | 2021-03-25T17:58:16.000Z | import requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
crawl_page() | 34.014286 | 216 | 0.554389 |
a78b53e7326a1d9b30856a88ddc123ec056f3a2a | 18,573 | py | Python | resources/lib/database_tv.py | bradyemerson/plugin.video.showtimeanytime | 65e7f130c14c8ef963cb3669638b8cf14860ec82 | [
"Apache-2.0"
] | null | null | null | resources/lib/database_tv.py | bradyemerson/plugin.video.showtimeanytime | 65e7f130c14c8ef963cb3669638b8cf14860ec82 | [
"Apache-2.0"
] | null | null | null | resources/lib/database_tv.py | bradyemerson/plugin.video.showtimeanytime | 65e7f130c14c8ef963cb3669638b8cf14860ec82 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
from datetime import date, datetime
from sqlite3 import dbapi2 as sqlite
from bs4 import BeautifulSoup
import simplejson as json
import xbmcvfs
import xbmcgui
import common
import connection
import database_common as db_common
DB_META_FILE = os.path.join(common.__addonprofile__, 'tv.meta')
_database_meta = False
if xbmcvfs.exists(DB_META_FILE):
f = open(DB_META_FILE, 'r')
_database_meta = json.load(f)
f.close()
else:
_database_meta = {}
DB_FILE = os.path.join(common.__addonprofile__, 'tv.db')
if not xbmcvfs.exists(DB_FILE):
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
create()
else:
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
| 33.769091 | 127 | 0.586658 |
a78c1c68d2605e5b65a1772b489da024f926a771 | 16,450 | py | Python | apps/configuration/editions/base.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 3 | 2019-02-24T14:24:43.000Z | 2019-10-24T18:51:32.000Z | apps/configuration/editions/base.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 17 | 2017-03-14T10:55:56.000Z | 2022-03-11T23:20:19.000Z | apps/configuration/editions/base.py | sotkonstantinidis/testcircle | 448aa2148fbc2c969e60f0b33ce112d4740a8861 | [
"Apache-2.0"
] | 2 | 2016-02-01T06:32:40.000Z | 2019-09-06T04:33:50.000Z | import copy
from configuration.configuration import QuestionnaireConfiguration
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import F
from django.template.loader import render_to_string
from configuration.models import Configuration, Key, Value, Translation, \
Questiongroup, Category
def create_new_translation(
self, translation_type, translation_keys: list=None,
**data) -> Translation:
"""
Create and return a new translation entry.
"""
if translation_keys:
data = {t: data for t in translation_keys}
else:
data = {self.translation_key: data}
translation, __ = self.translation.objects.get_or_create(
translation_type=translation_type, data=data)
return translation
def create_new_question(
self, keyword: str, translation: dict or int, question_type: str,
values: list=None, configuration: dict=None) -> Key:
"""
Create and return a new question (actually, in DB terms, a key), with a
translation.
"""
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='key', **translation)
else:
translation_obj = self.translation.objects.get(pk=translation)
configuration_data = configuration if configuration is not None else {}
configuration_data.update({'type': question_type})
try:
key = self.key.objects.get(keyword=keyword)
key.translation = translation_obj
key.configuration = configuration_data
key.save()
except ObjectDoesNotExist:
key = self.key.objects.create(
keyword=keyword,
translation=translation_obj,
configuration=configuration_data
)
if values is not None:
existing_values = key.values.all()
for new_value in values:
if new_value not in existing_values:
key.values.add(new_value)
return key
def create_new_value(
self, keyword: str, translation: dict or int, order_value: int=None,
configuration: dict=None, configuration_editions: list=None) -> Value:
"""
Create and return a new value, with a translation.
"""
if isinstance(translation, dict):
translation_obj = self.create_new_translation(
translation_type='value',
translation_keys=configuration_editions, **translation)
else:
translation_obj = self.translation.objects.get(pk=translation)
try:
value = self.value.objects.get(keyword=keyword)
value.translation = translation_obj
value.order_value = order_value
value.configuration = configuration
value.save()
except ObjectDoesNotExist:
value = self.value.objects.create(
keyword=keyword, translation=translation_obj,
order_value=order_value, configuration=configuration)
return value
def create_new_values_list(self, values_list: list) -> list:
"""Create and return a list of simple values."""
return [
self.create_new_value(
keyword=k,
translation={
'label': {
'en': l
}
})
for k, l in values_list
]
def add_new_value(
self, question_keyword: str, value: Value, order_value: int=None):
"""
Add a new value to an existing question.
"""
key = self.key.objects.get(keyword=question_keyword)
if order_value and not key.values.filter(pk=value.pk).exists():
# If order_value is provided and the value was not yet added to the
# question, update the ordering of the existing values.
key.values.filter(
order_value__gte=order_value
).update(
order_value=F('order_value') + 1
)
key.values.add(value)
def find_in_data(self, path: tuple, **data: dict) -> dict:
"""
Helper to find and return an element inside a configuration data dict.
Provide a path with keywords pointing to the desired element.
Drills down to the element assuming the following hierarchy of
configuration data:
"data": {
"sections": [
{
"keyword": "<section_keyword>",
"categories": [
{
"keyword": "<category_keyword>",
"subcategories": [
{
"keyword": "<subcategory_keyword>"
"questiongroups": [
{
"keyword": "<questiongroup_keyword>",
"questions": [
{
"keyword": "<question_keyword>"
}
]
}
]
}
]
}
]
}
],
"modules": [
"cca"
]
}
"""
for hierarchy_level, path_keyword in enumerate(path):
# Get the list of elements at the current hierarchy.
element_list = data[self.hierarchy[hierarchy_level]]
# Find the element by its keyword.
data = next((item for item in element_list
if item['keyword'] == path_keyword), None)
if data is None:
raise KeyError(
'No element with keyword %s found in list of %s' % (
path_keyword, self.hierarchy[hierarchy_level]))
return data
def update_config_data(self, path: tuple, updated, level=0, **data):
"""
Helper to update a portion of the nested configuration data dict.
"""
current_hierarchy = self.hierarchy[level]
# Make a copy of the current data, but reset the children.
new_data = copy.deepcopy(data)
new_data[current_hierarchy] = []
for element in data[current_hierarchy]:
if element['keyword'] != path[0]:
new_element = element
elif len(path) > 1:
new_element = self.update_config_data(
path=path[1:], updated=updated, level=level+1, **element)
else:
new_element = updated
new_data[current_hierarchy].append(new_element)
return new_data
def update_data(self, qg_keyword, q_keyword, updated, **data: dict) -> dict:
"""
Helper to update a question of the questionnaire data dict.
"""
questiongroup_data = data.get(qg_keyword, [])
if not questiongroup_data:
return data
updated_questiongroup_data = []
for qg_data in questiongroup_data:
if q_keyword in qg_data:
qg_data[q_keyword] = updated
updated_questiongroup_data.append(qg_data)
data[qg_keyword] = updated_questiongroup_data
return data
def add_new_module(self, updated, **data: dict) -> dict:
"""
Helper to add a module to the configuration
"""
# Modules data is fetched
module_data = data.get(self.hierarchy_modules, [])
if not module_data:
return data
# New module is appended
module_data.append(updated)
# Questionnaire configuration is updated with new module and returned
data[self.hierarchy_modules] = module_data
return data
def append_translation(self, update_pk: int, **data):
"""
Helper to append texts (for choices, checkboxes, labels, etc.).
"""
obj = self.translation.objects.get(pk=update_pk)
obj.data.update(data)
obj.save()
class Operation:
"""
Data structure for an 'operation' method.
Centralized wrapper for all operations, so they can be extended / modified
in a single class.
"""
default_template = 'configuration/partials/release_note.html'
def __init__(self, transform_configuration: callable, release_note: str, **kwargs):
"""
Args:
transform_configuration: callable for the update on the configuration data
release_note: string with release note
**kwargs:
transform_questionnaire: callable. Used to transform the
questionnaire data, e.g. for deleted/moved questions.
"""
self.transform_configuration = transform_configuration
self.release_note = release_note
self.template_name = kwargs.get('template_name', self.default_template)
self.transform_questionnaire = kwargs.get('transform_questionnaire')
| 35.376344 | 101 | 0.599392 |
a78ce58146e32ab5bc583a0b5ea144d7df99f985 | 10,152 | py | Python | EyePatterns/main_test_all_clusters.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | 1 | 2021-12-07T08:02:30.000Z | 2021-12-07T08:02:30.000Z | EyePatterns/main_test_all_clusters.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | null | null | null | EyePatterns/main_test_all_clusters.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import distance
from matplotlib import style
from clustering_algorithms.affinity_propagation import AffinityPropagation
from clustering_algorithms.custom_k_means import KMeans
from clustering_algorithms.custom_mean_shift import MeanShift
from clustering_algorithms.custom_mean_shift_string_edition import MeanShiftStringEdition
from clustering_algorithms.dbscan import DbScan
from prepare_data.format_sequences import format_sequences_from_student
from utils.e_mine import e_mine_find_common_scanpath
from utils.string_compare_algorithm import levenstein_sequence_similarity, is_string_similar, needleman_wunsch, \
needleman_wunsch_with_penalty
import numpy as np
# def initialize_2D_number_data_and_plot_them():
# number_data = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11], [8, 2], [10, 2], [9, 3]])
# # plot data
# plt.scatter(number_data[:, 0], number_data[:, 1])
# plt.show()
# return number_data
#
#
# def test_k_means_with_numbers_then_plot_results():
# clf = KMeans(k=3)
# clf.fit(number_data)
#
# for centroid in clf.centroids:
# plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],
# marker="o", color="k", s=150, linewidths=5)
#
# for classification in clf.classifications:
# color = colors[classification]
# for featureset in clf.classifications[classification]:
# plt.scatter(featureset[0], featureset[1], marker="x", color=color,
# s=150, linewidths=5)
# plt.show()
#
#
# def test_mean_shift_with_numbers_then_plot_results():
# clf_ms = MeanShift()
# clf_ms.fit(number_data)
# plt.scatter(number_data[:, 0], number_data[:, 1], s=150)
# centroids = clf_ms.centroids
# for c in centroids:
# plt.scatter(centroids[c][0], centroids[c][1], color='k', marker="*", s=150)
# plt.show()
'''
1# Initialize number collection and plot style
'''
# style.use('ggplot')
# number_data = initialize_2D_number_data_and_plot_them()
# colors = 10 * ["g", "r", "c", "b", "k"]
'''
Test classification algorithms with numbers
'''
# test_k_means_with_numbers_then_plot_results()
# test_mean_shift_with_numbers_then_plot_results()
'''
2# Initialize string collection and print description on printed form
'''
student_name = "student_1"
string_data = initialize_string_sequences(student_name)
print_description()
'''
Test classification algorithms with strings
'''
test_and_print_results_string_k_means_with_levenshtein_distance()
test_and_print_results_string_k_means_with_needleman_wunsch_distance()
test_and_print_results_string_k_means_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_mean_shift_with_levenshtein_distance()
test_and_print_results_string_mean_shift_with_needleman_wunsch_distance()
test_and_print_results_string_mean_shift_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_affinity_propagation_with_levenstein_distance()
test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance()
test_and_print_results_string_affinity_propagation_with_needleman_wunsch_distance_with_extra_penalty_points()
test_and_print_results_string_db_scan_with_levenstein_distance()
test_and_print_results_string_db_scan_with_needleman_wunsch_distance()
test_and_print_results_string_db_scan_with_needleman_wunsch_distance_with_extra_penalty_points() | 42.476987 | 123 | 0.775611 |
a78d7f529a85265c767d731a1463e302ccbc27fe | 2,381 | py | Python | src/onevision/cv/imgproc/color/integer.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/cv/imgproc/color/integer.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/cv/imgproc/color/integer.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Conversion between single-channel integer value to 3-channels color image.
Mostly used for semantic segmentation.
"""
from __future__ import annotations
import numpy as np
import torch
from multipledispatch import dispatch
from torch import Tensor
from onevision.cv.core import get_num_channels
from onevision.cv.core import to_channel_first
from onevision.type import TensorOrArray
__all__ = [
"integer_to_color",
"is_color_image",
]
# MARK: - Functional
def _integer_to_color(image: np.ndarray, colors: list) -> np.ndarray:
"""Convert the integer-encoded image to color image. Fill an image with
labels' colors.
Args:
image (np.ndarray):
An image in either one-hot or integer.
colors (list):
List of all colors.
Returns:
color (np.ndarray):
Colored image.
"""
if len(colors) <= 0:
raise ValueError(f"No colors are provided.")
# NOTE: Convert to channel-first
image = to_channel_first(image)
# NOTE: Squeeze dims to 2
if image.ndim == 3:
image = np.squeeze(image)
# NOTE: Draw color
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, len(colors)):
idx = image == l
r[idx] = colors[l][0]
g[idx] = colors[l][1]
b[idx] = colors[l][2]
rgb = np.stack([r, g, b], axis=0)
return rgb
def is_color_image(image: TensorOrArray) -> bool:
"""Check if the given image is color encoded."""
if get_num_channels(image) in [3, 4]:
return True
return False
| 25.602151 | 77 | 0.640067 |
a78db64f92c9f41c5d84dd1c53250b84b8159383 | 5,932 | py | Python | doepy/problem_instance.py | scwolof/doepy | acb2cad95428de2c14b28563cff1aa30679e1f39 | [
"MIT"
] | 1 | 2020-04-23T13:43:35.000Z | 2020-04-23T13:43:35.000Z | doepy/problem_instance.py | scwolof/doepy | acb2cad95428de2c14b28563cff1aa30679e1f39 | [
"MIT"
] | null | null | null | doepy/problem_instance.py | scwolof/doepy | acb2cad95428de2c14b28563cff1aa30679e1f39 | [
"MIT"
] | 1 | 2021-06-13T14:38:32.000Z | 2021-06-13T14:38:32.000Z | """
MIT License
Copyright (c) 2019 Simon Olofsson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
| 34.289017 | 78 | 0.658968 |
a78f4a33fda334438866cc5eacb65a1aca2c29e8 | 1,831 | py | Python | snuba/datasets/dataset_schemas.py | Appva/snuba | 988a4312fc9c107bc735fb2295e269b01ef2dea4 | [
"Apache-2.0"
] | null | null | null | snuba/datasets/dataset_schemas.py | Appva/snuba | 988a4312fc9c107bc735fb2295e269b01ef2dea4 | [
"Apache-2.0"
] | null | null | null | snuba/datasets/dataset_schemas.py | Appva/snuba | 988a4312fc9c107bc735fb2295e269b01ef2dea4 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, List, Sequence, Union
from snuba.datasets.schemas import Schema
from snuba.datasets.schemas.tables import TableSchema, WritableTableSchema
| 33.290909 | 100 | 0.677226 |
a790c9288954c501a2b40dde1e0f624366ddda8c | 3,039 | py | Python | Benchmarking/benchmark_alphabet_increase.py | icezyclon/AALpy | 3c2f05fdbbcdc99b47ba6b918540239568fca17f | [
"MIT"
] | 61 | 2021-04-01T10:38:52.000Z | 2022-03-28T13:44:23.000Z | Benchmarking/benchmark_alphabet_increase.py | icezyclon/AALpy | 3c2f05fdbbcdc99b47ba6b918540239568fca17f | [
"MIT"
] | 16 | 2021-04-03T20:14:08.000Z | 2022-02-16T10:21:48.000Z | Benchmarking/benchmark_alphabet_increase.py | haubitzer/AALpy | e5b51742d886d5c5c72ab3e9c20eb349c56e2469 | [
"MIT"
] | 9 | 2021-04-05T13:43:17.000Z | 2022-03-09T14:06:17.000Z | from statistics import mean
import csv
from aalpy.SULs import DfaSUL, MealySUL, MooreSUL
from aalpy.learning_algs import run_Lstar
from aalpy.oracles import RandomWalkEqOracle
from aalpy.utils import generate_random_dfa, generate_random_mealy_machine, generate_random_moore_machine
num_states = 1000
alph_size = 5
repeat = 10
num_increases = 20
states = ['alph_size', alph_size]
times_dfa = ['dfa_pypy_rs']
times_mealy = ['mealy_pypy_rs']
times_moore = ['moore_pypyrs']
cex_processing = 'rs'
for i in range(num_increases):
print(i)
total_time_dfa = []
total_time_mealy = []
total_time_moore = []
for _ in range(repeat):
alphabet = list(range(alph_size))
dfa = generate_random_dfa(num_states, alphabet=alphabet, num_accepting_states=num_states // 2)
sul = DfaSUL(dfa)
# eq_oracle = StatePrefixEqOracle(alphabet, sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul, eq_oracle, cex_processing=cex_processing, cache_and_non_det_check=False,
return_data=True, automaton_type='dfa')
total_time_dfa.append(data['learning_time'])
del dfa
del sul
del eq_oracle
mealy = generate_random_mealy_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
sul_mealy = MealySUL(mealy)
# eq_oracle = StatePrefixEqOracle(alphabet, sul_mealy, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, sul_mealy, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, sul_mealy, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='mealy')
total_time_mealy.append(data['learning_time'])
del mealy
del sul_mealy
del eq_oracle
moore = generate_random_moore_machine(num_states, input_alphabet=alphabet, output_alphabet=alphabet)
moore_sul = MooreSUL(moore)
# eq_oracle = StatePrefixEqOracle(alphabet, moore_sul, walks_per_state=5, walk_len=40)
eq_oracle = RandomWalkEqOracle(alphabet, moore_sul, num_steps=10000, reset_prob=0.09)
_, data = run_Lstar(alphabet, moore_sul, eq_oracle, cex_processing=cex_processing,
cache_and_non_det_check=False,
return_data=True, automaton_type='moore')
total_time_moore.append(data['learning_time'])
alph_size += 5
states.append(alph_size)
# save data and keep averages
times_dfa.append(round(mean(total_time_dfa), 4))
times_mealy.append(round(mean(total_time_mealy), 4))
times_moore.append(round(mean(total_time_moore), 4))
with open('increasing_alphabet_experiments.csv', 'w') as f:
wr = csv.writer(f, dialect='excel')
wr.writerow(states)
wr.writerow(times_dfa)
wr.writerow(times_mealy)
wr.writerow(times_moore)
| 35.337209 | 115 | 0.699901 |
a7910f32dd12a019dc980eaf9b89d7426fb179b4 | 2,505 | py | Python | makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py | Radiian-Arts-Main/Radiian-Arts-BioSource | 51e08da0b3171fe96badc68780fd0f3381d49738 | [
"MIT"
] | 1 | 2022-03-12T03:52:55.000Z | 2022-03-12T03:52:55.000Z | makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py | Phantori/Radiian-Arts-BioSource | 51e08da0b3171fe96badc68780fd0f3381d49738 | [
"MIT"
] | null | null | null | makehuman-master/makehuman/plugins/9_export_obj/mh2obj.py | Phantori/Radiian-Arts-BioSource | 51e08da0b3171fe96badc68780fd0f3381d49738 | [
"MIT"
] | 3 | 2020-05-10T16:11:23.000Z | 2021-05-30T02:11:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Exports proxy mesh to obj
"""
import wavefront
import os
from progress import Progress
import numpy as np
#
# exportObj(human, filepath, config):
#
| 32.960526 | 99 | 0.686228 |
a79274aaddcc40eb1292cf7717dedc453646ab72 | 3,245 | py | Python | util/plot_model.py | libccy/inv.cu | bab31a704b24888a99e07148b60266ff703f0968 | [
"MIT"
] | null | null | null | util/plot_model.py | libccy/inv.cu | bab31a704b24888a99e07148b60266ff703f0968 | [
"MIT"
] | null | null | null | util/plot_model.py | libccy/inv.cu | bab31a704b24888a99e07148b60266ff703f0968 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from os.path import exists
import numpy as np
import pylab
import scipy.interpolate
def read_fortran(filename):
""" Reads Fortran style binary data and returns a numpy array.
"""
with open(filename, 'rb') as f:
# read size of record
f.seek(0)
n = np.fromfile(f, dtype='int32', count=1)[0]
# read contents of record
f.seek(4)
v = np.fromfile(f, dtype='float32')
return v[:-1]
def mesh2grid(v, x, z):
""" Interpolates from an unstructured coordinates (mesh) to a structured
coordinates (grid)
"""
lx = x.max() - x.min()
lz = z.max() - z.min()
nn = v.size
mesh = _stack(x, z)
nx = np.around(np.sqrt(nn*lx/lz))
nz = np.around(np.sqrt(nn*lz/lx))
dx = lx/nx
dz = lz/nz
# construct structured grid
x = np.linspace(x.min(), x.max(), nx)
z = np.linspace(z.min(), z.max(), nz)
X, Z = np.meshgrid(x, z)
grid = _stack(X.flatten(), Z.flatten())
# interpolate to structured grid
V = scipy.interpolate.griddata(mesh, v, grid, 'linear')
# workaround edge issues
if np.any(np.isnan(V)):
W = scipy.interpolate.griddata(mesh, v, grid, 'nearest')
for i in np.where(np.isnan(V)):
V[i] = W[i]
return np.reshape(V, (int(nz), int(nx))), x, z
if __name__ == '__main__':
""" Plots data on 2-D unstructured mesh
Modified from a script for specfem2d:
http://tigress-web.princeton.edu/~rmodrak/visualize/plot2d
Can be used to plot models or kernels created by inv.cu
SYNTAX
plot_model.py folder_name component_name||file_name (time_step)
e.g. ./plot_model.py output vx 1000
./plot_model.py output proc001000_vx.bin
./plot_model.py example/model/checker vs
"""
istr = ''
if len(sys.argv) > 3:
istr = str(sys.argv[3])
while len(istr) < 6:
istr = '0' + istr
else:
istr = '000000'
# parse command line arguments
x_coords_file = '%s/proc000000_x.bin' % sys.argv[1]
z_coords_file = '%s/proc000000_z.bin' % sys.argv[1]
# check that files actually exist
assert exists(x_coords_file)
assert exists(z_coords_file)
database_file = "%s/%s" % (sys.argv[1], sys.argv[2])
if not exists(database_file):
database_file = "%s/%s.bin" % (sys.argv[1], sys.argv[2])
if not exists(database_file):
database_file = "%s/proc%s_%s.bin" % (sys.argv[1], istr, sys.argv[2])
assert exists(database_file)
# read mesh coordinates
#try:
if True:
x = read_fortran(x_coords_file)
z = read_fortran(z_coords_file)
#except:
# raise Exception('Error reading mesh coordinates.')
# read database file
try:
v = read_fortran(database_file)
except:
raise Exception('Error reading database file: %s' % database_file)
# check mesh dimensions
assert x.shape == z.shape == v.shape, 'Inconsistent mesh dimensions.'
# interpolate to uniform rectangular grid
V, X, Z = mesh2grid(v, x, z)
# display figure
pylab.pcolor(X, Z, V)
locs = np.arange(X.min(), X.max() + 1, (X.max() - X.min()) / 5)
pylab.xticks(locs, map(lambda x: "%g" % x, locs / 1e3))
locs = np.arange(Z.min(), Z.max() + 1, (Z.max() - Z.min()) / 5)
pylab.yticks(locs, map(lambda x: "%g" % x, locs / 1e3))
pylab.colorbar()
pylab.xlabel('x / km')
pylab.ylabel('z / km')
pylab.gca().invert_yaxis()
pylab.show()
| 24.216418 | 73 | 0.659476 |