hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4667c374455b11e101ec3e8d25bd29cd21c3a81 | 3,965 | py | Python | tests/downloader_test.py | jkawamoto/roadie-gcp | 96394a47d375bd01e167f351fc86a03905e98395 | [
"MIT"
] | 1 | 2018-09-20T01:51:23.000Z | 2018-09-20T01:51:23.000Z | tests/downloader_test.py | jkawamoto/roadie-gcp | 96394a47d375bd01e167f351fc86a03905e98395 | [
"MIT"
] | 9 | 2016-01-31T11:28:12.000Z | 2021-04-30T20:43:39.000Z | tests/downloader_test.py | jkawamoto/roadie-gcp | 96394a47d375bd01e167f351fc86a03905e98395 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# downloader_test.py
#
# Copyright (c) 2015-2016 Junpei Kawamoto
#
# This software is released under the MIT License.
#
# http://opensource.org/licenses/mit-license.php
#
""" Test for downloader module.
"""
import logging
import shutil
import sys
import unittest
import os
from os import path
import downloader # pylint: disable=import-error
TARGET_FILE = "bin/entrypoint.sh"
SAMPLE_FILE = "https://raw.githubusercontent.com/jkawamoto/roadie-gcp/master/bin/entrypoint.sh"
ORIGINAL_FILE = path.normpath(
path.join(path.dirname(__file__), "..", TARGET_FILE))
ARCHIVE_ROOT = "./roadie-gcp-20160618"
ZIP_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.zip"
TAR_FILE = "https://github.com/jkawamoto/roadie-gcp/archive/v20160618.tar.gz"
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
unittest.main()
| 30.037879 | 95 | 0.642371 |
c466ca50010615bb02d62529ff22d41f7530666b | 1,800 | py | Python | ticle/plotters/plot_phase.py | muma7490/TICLE | bffa64ee488abac17809d02dfc176fe80128541a | [
"MIT"
] | null | null | null | ticle/plotters/plot_phase.py | muma7490/TICLE | bffa64ee488abac17809d02dfc176fe80128541a | [
"MIT"
] | null | null | null | ticle/plotters/plot_phase.py | muma7490/TICLE | bffa64ee488abac17809d02dfc176fe80128541a | [
"MIT"
] | null | null | null | import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_phases,normalize_phase
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
phase_dir = f"{path}/results/phase_plots"
try:
os.makedirs(phase_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{phase_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
p = [(f"Phaseplot {star} - literature","literature",data[2]),
(f"Phaseplot {star} - P={data[1]} days",f"result",data[1])]
for title,save_text,period in p:
masks = get_phases(t_series,period)
fig_phase = pl.figure(figsize=(10,7))
for i in masks:
plot_data = normalize_phase(np.array((t_series[0][i],t_series[1][i])))
pl.plot(plot_data[0],plot_data[1],linewidth = 1)
pl.xlabel("Phase")
pl.ylabel("Flux")
pl.title(title)
fig_phase.savefig(f"{res_dir}/{star}_{save_text}_phase_.pdf")
fig_lightcurve = pl.figure(figsize=(10,7))
for i in masks:
pl.plot(t_series[0][i],t_series[1][i],linewidth = 1)
pl.xlabel("Period(days)")
pl.ylabel("Flux")
pl.title(f"{star} Lightcurve {save_text}")
fig_lightcurve.savefig(f"{res_dir}/{star}_{save_text}_lightcurve.pdf") | 27.692308 | 82 | 0.648889 |
c467d3e82cd1949de48c0e1eac654f4ecca276b3 | 7,267 | py | Python | src/putil/rabbitmq/rabbit_util.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 2 | 2015-10-05T20:36:35.000Z | 2018-11-21T11:45:24.000Z | src/putil/rabbitmq/rabbit_util.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 21 | 2015-03-18T14:39:32.000Z | 2016-07-01T17:16:29.000Z | src/putil/rabbitmq/rabbit_util.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 12 | 2015-03-18T10:53:49.000Z | 2018-06-21T11:19:57.000Z | #!/usr/bin/python
import shlex
import simplejson
from putil.rabbitmq.rabbitmqadmin import Management, make_parser, LISTABLE, DELETABLE
# TODO: Move the management calls from pyon.ion.exchange here
# -------------------------------------------------------------------------
# Helpers
# This function works on exchange, queue, vhost, user
| 42.00578 | 126 | 0.610706 |
c4692b2cd0fdba89e13d15c53467b6b2f916be48 | 5,362 | py | Python | gaternet/main.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | gaternet/main.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | gaternet/main.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads a GaterNet checkpoint and tests on Cifar-10 test set."""
import argparse
import io
import os
from backbone_resnet import Network as Backbone
from gater_resnet import Gater
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
def load_from_state(state_dict, model):
"""Loads the state dict of a checkpoint into model."""
tem_dict = dict()
for k in state_dict.keys():
tem_dict[k.replace('module.', '')] = state_dict[k]
state_dict = tem_dict
ckpt_key = set(state_dict.keys())
model_key = set(model.state_dict().keys())
print('Keys not in current model: {}\n'.format(ckpt_key - model_key))
print('Keys not in checkpoint: {}\n'.format(model_key - ckpt_key))
model.load_state_dict(state_dict, strict=True)
print('Successfully reload from state.')
return model
def test(backbone, gater, device, test_loader):
"""Tests the model on a test set."""
backbone.eval()
gater.eval()
loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
gate = gater(data)
output = backbone(data, gate)
loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
loss /= len(test_loader.dataset)
acy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
loss, correct, len(test_loader.dataset), acy))
return acy
def run(args, device, test_loader):
"""Loads checkpoint into GaterNet and runs test on the test data."""
with open(args.checkpoint_file, 'rb') as fin:
inbuffer = io.BytesIO(fin.read())
state_dict = torch.load(inbuffer, map_location='cpu')
print('Successfully load checkpoint file.\n')
backbone = Backbone(depth=args.backbone_depth, num_classes=10)
print('Loading checkpoint weights into backbone.')
backbone = load_from_state(state_dict['backbone_state_dict'], backbone)
backbone = nn.DataParallel(backbone).to(device)
print('Backbone is ready after loading checkpoint and moving to device:')
print(backbone)
n_params_b = sum(
[param.view(-1).size()[0] for param in backbone.parameters()])
print('Number of parameters in backbone: {}\n'.format(n_params_b))
gater = Gater(depth=20,
bottleneck_size=8,
gate_size=backbone.module.gate_size)
print('Loading checkpoint weights into gater.')
gater = load_from_state(state_dict['gater_state_dict'], gater)
gater = nn.DataParallel(gater).to(device)
print('Gater is ready after loading checkpoint and moving to device:')
print(gater)
n_params_g = sum(
[param.view(-1).size()[0] for param in gater.parameters()])
print('Number of parameters in gater: {}'.format(n_params_g))
print('Total number of parameters: {}\n'.format(n_params_b + n_params_g))
print('Running test on test data.')
test(backbone, gater, device, test_loader)
def parse_flags():
"""Parses input arguments."""
parser = argparse.ArgumentParser(description='GaterNet')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--backbone-depth', type=int, default=20,
help='resnet depth of the backbone subnetwork')
parser.add_argument('--checkpoint-file', type=str, default=None,
help='checkpoint file to run test')
parser.add_argument('--data-dir', type=str, default=None,
help='the directory for storing data')
args = parser.parse_args()
return args
if __name__ == '__main__':
main(parse_flags())
| 35.276316 | 79 | 0.693398 |
c46ae74020d50b1e15aaa99acf255cf154208cb8 | 251 | pyw | Python | client.pyw | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 1,417 | 2015-01-22T00:50:30.000Z | 2022-03-30T18:44:55.000Z | client.pyw | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 975 | 2015-01-05T01:41:40.000Z | 2022-03-31T06:01:50.000Z | client.pyw | thatfuckingbird/hydrus-websocket-server | b55454740dca5101448bf92224432f8bdbec7e77 | [
"WTFPL"
] | 163 | 2015-02-04T13:09:35.000Z | 2022-03-23T01:00:05.000Z | #!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
from hydrus import hydrus_client
if __name__ == '__main__':
hydrus_client.boot()
| 19.307692 | 55 | 0.709163 |
c46b9bf38daa8aa62af17faaff944dc07ddd1de9 | 5,776 | py | Python | fixEngine/fixEngine.py | HNGlez/ExchangeConnector | 5176437963a3e9e671bb059c599c79f39439f4d4 | [
"MIT"
] | null | null | null | fixEngine/fixEngine.py | HNGlez/ExchangeConnector | 5176437963a3e9e671bb059c599c79f39439f4d4 | [
"MIT"
] | null | null | null | fixEngine/fixEngine.py | HNGlez/ExchangeConnector | 5176437963a3e9e671bb059c599c79f39439f4d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ExchangeConnector fixEngine
Copyright (c) 2020 Hugo Nistal Gonzalez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import simplefix
import threading
import logging
import time
import sys
import configparser
from fixClientMessages import FixClientMessages
from connectionHandler import FIXConnectionHandler, SocketConnectionState
| 46.208 | 179 | 0.691136 |
c46bcfd7797c21307852fe37265fa68fac0dbbc3 | 570 | py | Python | plugins/session_list/views.py | farazkhanfk7/ajenti | ff51635bea0d29bf9f35dd7912f145398040541d | [
"MIT"
] | 1 | 2021-04-27T07:16:01.000Z | 2021-04-27T07:16:01.000Z | plugins/session_list/views.py | farazkhanfk7/ajenti | ff51635bea0d29bf9f35dd7912f145398040541d | [
"MIT"
] | null | null | null | plugins/session_list/views.py | farazkhanfk7/ajenti | ff51635bea0d29bf9f35dd7912f145398040541d | [
"MIT"
] | null | null | null | from jadi import component
from aj.api.http import url, HttpPlugin
from aj.auth import authorize
from aj.api.endpoint import endpoint, EndpointError
import aj
import gevent
| 25.909091 | 53 | 0.687719 |
c46cb76d02d71b063cedf52c09eb7f327cd308da | 10,606 | py | Python | now/collection/prov_execution/argument_captors.py | CrystalMei/Prov_Build | 695576c36b7d5615f1cc568954658f8a7ce9eeba | [
"MIT"
] | 2 | 2017-11-10T16:17:11.000Z | 2021-12-19T18:43:22.000Z | now/collection/prov_execution/argument_captors.py | CrystalMei/Prov_Build | 695576c36b7d5615f1cc568954658f8a7ce9eeba | [
"MIT"
] | null | null | null | now/collection/prov_execution/argument_captors.py | CrystalMei/Prov_Build | 695576c36b7d5615f1cc568954658f8a7ce9eeba | [
"MIT"
] | null | null | null | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# Copyright (c) 2018, 2019, 2020 President and Fellows of Harvard College.
# This file is part of ProvBuild.
"""Capture arguments from calls"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import weakref
import itertools
import inspect
from future.utils import viewitems
from ...utils.functions import abstract
from ..prov_definition.utils import ClassDef, Assert, With, Decorator
WITHOUT_PARAMS = (ClassDef, Assert, With)
| 36.826389 | 127 | 0.581463 |
c46dc4849d73685f3bf2bf7edc6ed45dee20d695 | 307 | py | Python | Python/Day8 DictionariesAndMaps.py | codePerfectPlus/30-DaysOfCode-With-Python-And-JavaScript | 570fa12ed30659fa394d86e12583b69f35a2e7a7 | [
"MIT"
] | 8 | 2020-08-03T01:53:13.000Z | 2022-01-09T14:47:58.000Z | Python/Day8 DictionariesAndMaps.py | codePerfectPlus/30-DaysOfCode-With-Python-And-JavaScript | 570fa12ed30659fa394d86e12583b69f35a2e7a7 | [
"MIT"
] | null | null | null | Python/Day8 DictionariesAndMaps.py | codePerfectPlus/30-DaysOfCode-With-Python-And-JavaScript | 570fa12ed30659fa394d86e12583b69f35a2e7a7 | [
"MIT"
] | 4 | 2020-09-29T11:28:53.000Z | 2021-06-02T15:34:55.000Z | N = int(input())
entry = [input().split() for _ in range(N)]
phoneBook = {name: number for name, number in entry}
while True:
try:
name = input()
if name in phoneBook:
print(f"{name}={phoneBook[name]}")
else:
print("Not found")
except:
break
| 21.928571 | 52 | 0.534202 |
c46f3c278fa8309cddd52d6eeccf2dae6ea924e2 | 1,850 | py | Python | 10. Recurrent Neural Network/10-1) Recurrent Neural Network, RNN.py | choijiwoong/-ROKA-torch-tutorial-files | c298fdf911cd64757895c3ab9f71ae7c3467c545 | [
"Unlicense"
] | null | null | null | 10. Recurrent Neural Network/10-1) Recurrent Neural Network, RNN.py | choijiwoong/-ROKA-torch-tutorial-files | c298fdf911cd64757895c3ab9f71ae7c3467c545 | [
"Unlicense"
] | null | null | null | 10. Recurrent Neural Network/10-1) Recurrent Neural Network, RNN.py | choijiwoong/-ROKA-torch-tutorial-files | c298fdf911cd64757895c3ab9f71ae7c3467c545 | [
"Unlicense"
] | null | null | null | #Sequence model. != Recursive Neural Network
#memory cell or RNN cell
#hidden state
#one-to-many_image captioning, many-to-one_sentiment classfication || spam detection, many-to-many_chat bot
#2) create RNN in python
import numpy as np
timesteps=10# _
input_size=4# _
hidden_size=8# ( )
inputs=np.random.random((timesteps, input_size))# 2D
hidden_state_t=np.zeros((hidden_size,))#jiddensize 0
print(hidden_state_t)
Wx=np.random.random((hidden_size, input_size))#
Wh=np.random.random((hidden_size, hidden_size))#
b=np.random.random((hidden_size,))
print(np.shape(Wx))
print(np.shape(Wh))
print(np.shape(b))
total_hidden_states=[]
#memory cell work
for input_t in inputs:
output_t=np.tanh(np.dot(Wx,input_t)+np.dot(Wh,hidden_state_t)+b)
total_hidden_states.append(list(output_t))#
print(np.shape(total_hidden_states))
hidden_state_t=output_t
total_hidden_states=np.stack(total_hidden_states, axis=0)#
print(total_hidden_states)
#3) nn.RNN() in pytorch
import torch
import torch.nn as nn
input_size=5#
hidden_size=8#
inputs=torch.Tensor(1, 10, 5)# 1 10 5
cell=nn.RNN(input_size, hidden_size, batch_first=True)#
outputs, _status=cell(inputs)#2 . ,
print(outputs.shape)
#4) Deep Recurrent Neural Network
inputs=torch.Tensor(1, 10, 5)
cell=nn.RNN(input_size=5, hidden_size=8, num_layers=2, batch_first=True)# 2(cell)
print(outputs.shape)
print(_status.shape)#, ,
#5) Bidirectional Recurrent Neural Network
inputs=torch.Tensor(1, 10, 5)
cell=nn.RNN(input_size=5, hidden_size=8, num_layers=2, batch_first=True, bidirectional=True)#
outputs, _status=cell(inputs)
print(outputs.shape)# 2
print(_status.shape)#2
| 30.327869 | 107 | 0.778378 |
c46f42400056a3b7b9402bc800d3e92633345822 | 720 | py | Python | WeLearn/M3-Python/L3-Python_Object/pet.py | munoz196/moonyosCSSIrep | cdfcd2ae061293471ecdf2d370a27f163efeba97 | [
"Apache-2.0"
] | null | null | null | WeLearn/M3-Python/L3-Python_Object/pet.py | munoz196/moonyosCSSIrep | cdfcd2ae061293471ecdf2d370a27f163efeba97 | [
"Apache-2.0"
] | null | null | null | WeLearn/M3-Python/L3-Python_Object/pet.py | munoz196/moonyosCSSIrep | cdfcd2ae061293471ecdf2d370a27f163efeba97 | [
"Apache-2.0"
] | null | null | null | pet = {
"name":"Doggo",
"animal":"dog",
"species":"labrador",
"age":"5"
}
my_pet= Pet("Fido", 3, "dog")
my_pet.is_hungry= True
print("is my pet hungry? %s"% my_pet.is_hungry)
my_pet.eat()
print("how about now? %s" % my_pet.is_hungry)
print ("My pet is feeling %s" % my_pet.mood)
| 22.5 | 62 | 0.566667 |
c470769346abfe53705868b77ccb1792faae0816 | 1,260 | py | Python | src/repositories/example_repo.py | pybokeh/dagster-examples | 459cfbe00585f1d123e49058685c74149efb867d | [
"MIT"
] | null | null | null | src/repositories/example_repo.py | pybokeh/dagster-examples | 459cfbe00585f1d123e49058685c74149efb867d | [
"MIT"
] | null | null | null | src/repositories/example_repo.py | pybokeh/dagster-examples | 459cfbe00585f1d123e49058685c74149efb867d | [
"MIT"
] | null | null | null | from dagster import job, repository
from ops.sklearn_ops import (
fetch_freehand_text_to_generic_data,
separate_features_from_target_label,
label_encode_target,
count_tfid_transform_train,
count_tfid_transform_test,
create_sgd_classifier_model,
predict
)
| 33.157895 | 141 | 0.768254 |
c4721b4a3c1999fdb50a16efbe7e2d5c42d79e86 | 551 | py | Python | exercicios/Maior_e_Menor_Valores.py | jeversonneves/Python | c31779d8db64b22711fe612cc943da8c5e51788b | [
"MIT"
] | null | null | null | exercicios/Maior_e_Menor_Valores.py | jeversonneves/Python | c31779d8db64b22711fe612cc943da8c5e51788b | [
"MIT"
] | null | null | null | exercicios/Maior_e_Menor_Valores.py | jeversonneves/Python | c31779d8db64b22711fe612cc943da8c5e51788b | [
"MIT"
] | null | null | null | resposta = 'S'
soma = quant = media = maior = menor = 0
while resposta in 'Ss':
n = int(input('Digite um nmero: '))
soma += n
quant += 1
if quant == 1:
maior = menor = n
else:
if n > maior:
maior = n
elif n < menor:
menor = n
resposta = str(input('Quer continuar? [S/N]: ')).upper().strip()[0]
media = soma / quant
print('Voc digitou {} nmeros e a soma foi de {} e media de {}.'.format(quant, soma, media))
print('O maior nmero {} e o menor nmero {}.'.format(maior, menor))
| 30.611111 | 93 | 0.548094 |
c472af02ddcb4584d404fd75d6b5093bc3a9b31d | 554 | py | Python | rbc/opening/opening.py | rebuildingcode/hardware | df38d4b955047fdea69dda6b662c56ac301799a2 | [
"BSD-3-Clause"
] | null | null | null | rbc/opening/opening.py | rebuildingcode/hardware | df38d4b955047fdea69dda6b662c56ac301799a2 | [
"BSD-3-Clause"
] | 27 | 2019-09-04T06:29:34.000Z | 2020-04-19T19:41:44.000Z | rbc/opening/opening.py | rebuildingcode/hardware | df38d4b955047fdea69dda6b662c56ac301799a2 | [
"BSD-3-Clause"
] | 2 | 2020-02-28T02:56:31.000Z | 2020-02-28T03:12:07.000Z |
from shapely.geometry import Polygon
from ..point import Point
| 20.518519 | 80 | 0.539711 |
c47376723d72b33e6ef5ded0c99f0808db10a51e | 4,252 | py | Python | AI/Housing Prices Prediction/HousePricesNN.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | AI/Housing Prices Prediction/HousePricesNN.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | AI/Housing Prices Prediction/HousePricesNN.py | n0rel/self | f9f44af42aa652f9a72279e44ffd8d4387a4bdae | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from numpy.random import uniform
import matplotlib.pyplot as plt
# Import Data
training_amount = 4000
input_scaler = MinMaxScaler((-1, 1))
output_scaler = MinMaxScaler((-1, 1))
data = pd.read_csv('USA_Housing.csv').drop(columns=['Address'])
data = np.insert(data.to_numpy(), 0, np.ones((1, len(data))), axis=1)
x_scaled, y_scaled = input_scaler.fit_transform(data[:, :6]), output_scaler.fit_transform(data[:, 6:7])
x_train, y_train = x_scaled[:training_amount], y_scaled[:training_amount]
x_test, y_test = x_scaled[training_amount:], y_scaled[training_amount:]
hidden_neurons = 10
# Create NN & train it
nn = NeuralNetwork(hidden_neurons, 0.7)
nn.fit(x_train, y_train, epochs=75)
error = 0
amount_to_check = 20
for x, y in zip(x_test[:amount_to_check, :], y_test[:amount_to_check]):
error += abs(output_scaler.inverse_transform(y.reshape(-1, 1))[0][0] -
output_scaler.inverse_transform(nn.f_propagate(x)[1].reshape(-1, 1))[0][0])
print(
f"{output_scaler.inverse_transform(nn.f_propagate(x)[1].reshape(-1, 1))[0][0]} -> {output_scaler.inverse_transform(y.reshape(-1, 1))[0][0]}")
print(f"{(error / len(x_test)):.9f}")
"""
# Keras Version of NN
model = keras.models.Sequential()
model.add(keras.layers.Dense(hidden_neurons, input_dim=5, activation='relu', kernel_initializer='he_normal'))
model.add(keras.layers.Dense(1, input_dim=hidden_neurons, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
history = model.fit(x_train, y_train, epochs=10, batch_size=10)
plt.plot(history.history['mse'])
plt.show()
for x, y in zip(model.predict(x_test), y_test):
print(f"{output_scaler.inverse_transform(y.reshape(-1, 1))[0][0]} -> {output_scaler.inverse_transform(x.reshape(-1, 1))[0][0]}")
"""
| 33.480315 | 149 | 0.63476 |
c4737a166e262dfedd58077027d802632dac9651 | 7,829 | py | Python | tests/test_export_keyword_template_catalina_10_15_4.py | PabloKohan/osxphotos | 2cf3b6bb674c312240c4b12c5d7b558f15be7c85 | [
"MIT"
] | null | null | null | tests/test_export_keyword_template_catalina_10_15_4.py | PabloKohan/osxphotos | 2cf3b6bb674c312240c4b12c5d7b558f15be7c85 | [
"MIT"
] | null | null | null | tests/test_export_keyword_template_catalina_10_15_4.py | PabloKohan/osxphotos | 2cf3b6bb674c312240c4b12c5d7b558f15be7c85 | [
"MIT"
] | null | null | null | import pytest
from osxphotos._constants import _UNKNOWN_PERSON
PHOTOS_DB = "./tests/Test-10.15.4.photoslibrary/database/photos.db"
TOP_LEVEL_FOLDERS = ["Folder1"]
TOP_LEVEL_CHILDREN = ["SubFolder1", "SubFolder2"]
FOLDER_ALBUM_DICT = {"Folder1": [], "SubFolder1": [], "SubFolder2": ["AlbumInFolder"]}
ALBUM_NAMES = ["Pumpkin Farm", "AlbumInFolder", "Test Album", "Test Album"]
ALBUM_PARENT_DICT = {
"Pumpkin Farm": None,
"AlbumInFolder": "SubFolder2",
"Test Album": None,
}
ALBUM_FOLDER_NAMES_DICT = {
"Pumpkin Farm": [],
"AlbumInFolder": ["Folder1", "SubFolder2"],
"Test Album": [],
}
ALBUM_LEN_DICT = {"Pumpkin Farm": 3, "AlbumInFolder": 2, "Test Album": 1}
ALBUM_PHOTO_UUID_DICT = {
"Pumpkin Farm": [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
"1EB2B765-0765-43BA-A90C-0D0580E6172C",
],
"Test Album": [
"F12384F6-CD17-4151-ACBA-AE0E3688539E",
"D79B8D77-BFFC-460B-9312-034F2877D35B",
],
"AlbumInFolder": [
"3DD2C897-F19E-4CA6-8C22-B027D5A71907",
"E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
],
}
UUID_DICT = {
"two_albums": "F12384F6-CD17-4151-ACBA-AE0E3688539E",
"in_album": "E9BC5C36-7CD1-40A1-A72B-8B8FAC227D51",
"xmp": "F12384F6-CD17-4151-ACBA-AE0E3688539E",
}
| 35.107623 | 107 | 0.606463 |
c47490ec669bdd7c9794f49ba2d2ebd89aed558a | 32,808 | py | Python | video_level_models.py | pomonam/youtube-8m | 2d0b9b361785743ec397c6104feb30bb581700e5 | [
"Apache-2.0"
] | 43 | 2018-10-03T13:29:45.000Z | 2020-10-12T09:33:44.000Z | video_level_models.py | pomonam/LearnablePoolingMethodsForVideoClassification | 2d0b9b361785743ec397c6104feb30bb581700e5 | [
"Apache-2.0"
] | 1 | 2018-10-01T01:50:56.000Z | 2019-01-07T17:53:37.000Z | video_level_models.py | pomonam/LearnablePoolingMethodsForVideoClassification | 2d0b9b361785743ec397c6104feb30bb581700e5 | [
"Apache-2.0"
] | 3 | 2018-11-20T14:43:17.000Z | 2019-07-26T13:25:14.000Z | # Copyright 2018 Deep Topology All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
# noinspection PyUnresolvedReferences
import pathmagic
from tensorflow import flags
import attention_modules
import tensorflow as tf
import tensorflow.contrib.slim as slim
import models
import math
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 2,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
###############################################################################
# Baseline (Benchmark) models #################################################
###############################################################################
flags.DEFINE_float(
"moe_l2", 1e-8,
"L2 penalty for MoeModel.")
flags.DEFINE_integer(
"moe_low_rank_gating", -1,
"Low rank gating for MoeModel.")
flags.DEFINE_bool(
"moe_prob_gating", False,
"Prob gating for MoeModel.")
flags.DEFINE_string(
"moe_prob_gating_input", "prob",
"input Prob gating for MoeModel.")
| 43.802403 | 119 | 0.596745 |
c474a170eb0e1f1c4fbbb4250190b02bde10d265 | 4,537 | py | Python | tests/test_refinement.py | qfardet/Pandora2D | 9b36d29a199f2acc67499d22b796c7dd6867bc5f | [
"Apache-2.0"
] | 4 | 2022-02-09T10:07:03.000Z | 2022-03-08T05:16:30.000Z | tests/test_refinement.py | qfardet/Pandora2D | 9b36d29a199f2acc67499d22b796c7dd6867bc5f | [
"Apache-2.0"
] | null | null | null | tests/test_refinement.py | qfardet/Pandora2D | 9b36d29a199f2acc67499d22b796c7dd6867bc5f | [
"Apache-2.0"
] | 4 | 2022-02-03T09:21:28.000Z | 2022-03-25T07:32:13.000Z | #!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA2D
#
# https://github.com/CNES/Pandora2D
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Test refinement step
"""
import unittest
import numpy as np
import xarray as xr
import pytest
from pandora2d import refinement, common
| 32.407143 | 120 | 0.60745 |
c474f216680e6a9b4d600c4b0a1221fea638bba3 | 9,353 | py | Python | goblet/tests/test_scheduler.py | Aaron-Gill/goblet | 30c0dd73b2f39e443adb2ccda6f9009e980c53ee | [
"Apache-2.0"
] | null | null | null | goblet/tests/test_scheduler.py | Aaron-Gill/goblet | 30c0dd73b2f39e443adb2ccda6f9009e980c53ee | [
"Apache-2.0"
] | null | null | null | goblet/tests/test_scheduler.py | Aaron-Gill/goblet | 30c0dd73b2f39e443adb2ccda6f9009e980c53ee | [
"Apache-2.0"
] | null | null | null | from unittest.mock import Mock
from goblet import Goblet
from goblet.resources.scheduler import Scheduler
from goblet.test_utils import (
get_responses,
get_response,
mock_dummy_function,
dummy_function,
)
| 36.678431 | 101 | 0.5626 |
c475cdfc5c22b9c5d0eee35b59b44abcb5b1b364 | 1,027 | py | Python | arachnado/rpc/sites.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | 2 | 2017-12-26T14:50:14.000Z | 2018-06-12T07:04:08.000Z | arachnado/rpc/sites.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | null | null | null | arachnado/rpc/sites.py | wigginzz/arachnado | 8de92625262958e886263b4ccb189f4fc62d7400 | [
"MIT"
] | null | null | null | import logging
from functools import partial
from arachnado.storages.mongotail import MongoTailStorage
| 26.333333 | 64 | 0.650438 |
c476f31483a0cfb0e93a77ded50e7c656f3f727f | 16,628 | py | Python | src/players.py | deacona/the-ball-is-round | 8e91a72084d13d754deb82e4852fa37a86a77084 | [
"MIT"
] | null | null | null | src/players.py | deacona/the-ball-is-round | 8e91a72084d13d754deb82e4852fa37a86a77084 | [
"MIT"
] | null | null | null | src/players.py | deacona/the-ball-is-round | 8e91a72084d13d754deb82e4852fa37a86a77084 | [
"MIT"
] | null | null | null | """players module.
Used for players data processes
"""
import numpy as np
import pandas as pd
import src.config as config
import src.utilities as utilities
from src.utilities import logging
pd.set_option("display.max_columns", 500)
pd.set_option("display.expand_frame_repr", False)
# master_file = config.MASTER_FILES["ftb_players"]
# distance_columns = ["Age", "ChancesInvolved", "DefensiveActions", "FoulsCommited", "FoulsSuffered", "Height", "Minutes", "NPG+A", "Points", "Weight", "SuccessfulPasses"]
def get_outfile(source_name):
"""Return outfile stub for given source.
INPUT:
source_name: String containing name of the data source
OUTPUT:
outfile_stub: Stub to use when saving output
"""
logging.info("Mapping {0} to outfile".format(source_name))
if source_name == "tmk_cnt":
outfile_stub = "players_contract"
elif source_name == "tmk_psm":
outfile_stub = "players_performance"
logging.debug(outfile_stub)
return outfile_stub
def clean_data(source_name, directory=config.MASTER_DIR):
"""Clean raw player data and save processed version.
INPUT:
source_name: String containing name of the data source
directory: Directory to save output to
OUTPUT:
df: Dataframe containing the cleaned data
"""
logging.info("Loading {0} data".format(source_name))
if source_name == "tmk_cnt":
source_header = [
"Shirt number",
"Position",
"Name",
"Date of birth",
"Nationality",
"Height",
"Foot",
"Joined",
"Signed from",
"Contract expires",
"Market value",
]
drop_cols = ["Nationality", "Signed from", "Competition"]
notna_cols = ["Market value"]
elif source_name == "tmk_psm":
source_header = [
"Shirt number",
"Position",
"Name",
"Age",
"Nationality",
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
drop_cols = ["Nationality"]
notna_cols = ["In squad"]
df = utilities.folder_loader(
source_name[:3], source_name, "comp_season", source_header=source_header
)
## Name and Position are mis-aligned in the source files
df["Name"].fillna(method="bfill", inplace=True)
df["Position"] = df.Name.shift(-1)
df.loc[df.Position == df.Name, "Position"] = df.Name.shift(-2)
df.drop(axis=1, columns=drop_cols, inplace=True)
df.dropna(subset=notna_cols, inplace=True)
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
df = df.replace("-", np.nan)
df = df.replace("Was not used during this season", np.nan)
df = df.replace("Not in squad during this season", np.nan)
df = df.replace("Not used during this season", np.nan)
df["Shirt number"] = pd.to_numeric(df["Shirt number"], downcast="integer")
df["Position group"] = None
df.loc[
(df.Position.str.upper().str.contains("KEEPER"))
| (df.Position.str.upper().str.contains("GOAL")),
"Position group",
] = "G"
df.loc[
(df.Position.str.upper().str.contains("BACK"))
| (df.Position.str.upper().str.contains("DEF")),
"Position group",
] = "D"
df.loc[
(df.Position.str.upper().str.contains("MID"))
| (df.Position.str.upper().str.contains("MIT"))
| (df.Position.str.upper().str.contains("WING")),
"Position group",
] = "M"
df.loc[
(df.Position.str.upper().str.contains("STRIKER"))
| (df.Position.str.upper().str.contains("FORW")),
"Position group",
] = "F"
if source_name == "tmk_cnt":
df["Age"] = (
df["Date of birth"].str.extract(r".*([0-9]{2})", expand=False).astype("int")
)
df["Date of birth"] = pd.to_datetime(
df["Date of birth"].str.extract(r"(.*) \([0-9]{2}\)", expand=False),
format="%b %d, %Y",
)
df["Joined"] = pd.to_datetime(df.Joined, format="%b %d, %Y")
df["Contract expires"] = pd.to_datetime(
df["Contract expires"], format="%d.%m.%Y"
)
df["Height"] = (
df["Height"]
.str.strip()
.str.replace(" ", "")
.str.replace(",", "")
.str.replace("m", "")
.replace({"-": np.nan, "": np.nan})
.astype(float)
)
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values),
"Height",
] = (
df.loc[
df.Name.isin(df[df.Height.notna()].Name.values)
& df.Name.isin(df[df.Height.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Height.fillna(method="bfill")
)
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values),
"Foot",
] = (
df.loc[
df.Name.isin(df[df.Foot.notna()].Name.values)
& df.Name.isin(df[df.Foot.isna()].Name.values)
]
.sort_values(by=["Name", "Season"])
.Foot.fillna(method="bfill")
)
df["Market value"] = (
df["Market value"]
.str.strip()
.replace({"-": np.nan})
.replace(r"[kmTh\.]", "", regex=True)
.astype(float)
* df["Market value"]
.str.extract(r"[\d\.]+([kmTh\.]+)", expand=False)
.fillna(1)
.replace(["k", "Th.", "m"], [10 ** 3, 10 ** 3, 10 ** 6])
.astype(int)
/ 10 ** 6
)
elif source_name == "tmk_psm":
df["PPG"] = df["PPG"].str.strip().replace(r"[,]", ".", regex=True).astype(float)
df["Minutes played"] = (
df["Minutes played"]
.str.strip()
.replace(r"[.\']", "", regex=True)
.astype(float)
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].fillna(
0
)
df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
] = df[
[
"In squad",
"Games started",
"Goals",
"Assists",
"Yellow cards",
"Second yellow cards",
"Red cards",
"Substitutions on",
"Substitutions off",
"PPG",
"Minutes played",
]
].astype(
float
)
logging.debug(df.describe(include="all"))
logging.info("Saving processed data to ")
utilities.save_master(df, get_outfile(source_name), directory=directory)
return df
# def get_players():
# """
# INPUT:
# None
# OUTPUT:
# df - Dataframe of aggregated player data
# """
# logging.info("Fetching aggregated player data")
# # fetch from master csv
# # df = pd.read_csv(master_file, sep='|', encoding="ISO-8859-1")
# df = utilities.get_master("players")
# # filter unwanted records
# df = df[(df["Season"] >= "s1314") & (df["Competition"].isin(["chm", "cpo", "prm"]))]
# df.dropna(subset=["Name"], inplace=True)
# # select columns
# group_key = "Name"
# max_cols = ["Age", "Height", "Weight"]
# # p90_cols = ["AerialsWon", "ChancesInvolved", "DefensiveActions", "Dispossesed", "Dribbles", "FoulsCommited", "FoulsSuffered", "NPG+A", "SuccessfulPasses"]
# p90_cols = [
# "AerialsWon",
# "Assists",
# "BadControl",
# "Blocks",
# "CalledOffside",
# "Clearances",
# "Crosses",
# "Dispossesed",
# "Dribbles",
# "DribblesAgainst",
# "FirstYellowCards",
# "FoulsCommited",
# "FoulsSuffered",
# "GoalsConceded",
# "Interceptions",
# "KeyPasses",
# "LongBalls",
# "NonPenaltyGoals",
# "OffsidesWon",
# "OwnGoals",
# "Passes",
# "PenaltyGoals",
# "RedCards",
# "Saves",
# "Shots",
# "ShotsFaced",
# "ShotsOnTarget",
# "Tackles",
# "ThroughBalls",
# "YellowCards",
# ]
# pGm_cols = ["Appearances", "Minutes", "Points"]
# sum_cols = p90_cols + pGm_cols
# selected_columns = [group_key] + max_cols + sum_cols
# df = df[selected_columns]
# # aggregate to player level
# df_max = df[[group_key] + max_cols].groupby(group_key).max()
# df_sum = df[[group_key] + sum_cols].groupby(group_key).sum()
# df = pd.concat([df_max, df_sum], axis=1)
# df = df[(df["Minutes"] >= 900)]
# # convert action totals to per90
# for col in p90_cols:
# df[col + "P90"] = 90 * df[col] / df["Minutes"]
# for col in pGm_cols:
# df[col + "PGm"] = df[col] / df["Appearances"]
# for col in sum_cols:
# del df[col]
# del df["AppearancesPGm"]
# logging.debug(df.describe(include="all"))
# return df
# def find_similar():
# players = get_players()
# # print players
# print("\nNumber of players included: " + str(len(players)))
# # Normalize all of the numeric columns
# players_normalized = (players - players.mean()) / players.std()
# players_normalized.fillna(0, inplace=True)
# # players_normalized.info()
# # print players_normalized.describe(include="all")
# # print players_normalized.index.values
# for (
# name
# ) in (
# players_normalized.index.values
# ): # ["Adam Clayton", "Ben Gibson", "Daniel Ayala", "Tomas Mejias"]:
# # print "\n###############################"
# print("\n" + name, end=" ")
# # selected_player = players.loc[name]
# # print selected_player.name
# # print selected_player.to_frame().T #.name
# # Normalize all of the numeric columns
# selected_normalized = players_normalized.loc[name]
# # print selected_normalized
# # Find the distance between select player and everyone else.
# euclidean_distances = players_normalized.apply(
# lambda row: distance.euclidean(row, selected_normalized), axis=1
# )
# # Create a new dataframe with distances.
# distance_frame = pd.DataFrame(
# data={"dist": euclidean_distances, "idx": euclidean_distances.index}
# )
# distance_frame.sort_values("dist", inplace=True)
# most_similar_players = distance_frame.iloc[1:4]["idx"]
# # most_similar_players = players.loc[nearest_neighbours] #["Name"]
# # print most_similar_players
# print("... is similar to... ", end=" ")
# print(list(most_similar_players.index.values))
# def make_prediction():
# players = get_players()
# pred_col = "AssistsP90"
# x_columns = list(players.columns.values)
# x_columns.remove(pred_col)
# y_column = [pred_col]
# # # The columns that we will be making predictions with.
# # x_columns = ['Age', 'Height', 'Weight', 'AerialsWonP90', 'AssistsP90', 'BadControlP90', 'BlocksP90', 'CalledOffsideP90', 'ClearancesP90', 'CrossesP90', 'DispossesedP90', 'DribblesP90', 'DribblesAgainstP90', 'FirstYellowCardsP90', 'FoulsCommitedP90', 'FoulsSufferedP90', 'GoalsConcededP90', 'InterceptionsP90', 'KeyPassesP90', 'LongBallsP90', 'NonPenaltyGoalsP90', 'OffsidesWonP90', 'OwnGoalsP90', 'PassesP90', 'PenaltyGoalsP90', 'RedCardsP90', 'SavesP90', 'ShotsP90', 'ShotsFacedP90', 'ShotsOnTargetP90', 'TacklesP90', 'ThroughBallsP90', 'YellowCardsP90', 'MinutesPGm']
# # print x_columns
# # # The column that we want to predict.
# # y_column = [pred_col]
# # print y_column
# ###Generating training and testing sets
# # Randomly shuffle the index of nba.
# random_indices = permutation(players.index)
# # Set a cutoff for how many items we want in the test set (in this case 1/3 of the items)
# test_cutoff = math.floor(len(players) / 3)
# # Generate the test set by taking the first 1/3 of the randomly shuffled indices.
# test = players.loc[random_indices[1:test_cutoff]]
# test.fillna(0, inplace=True)
# # test.info()
# # print test.describe(include="all")
# # Generate the train set with the rest of the data.
# train = players.loc[random_indices[test_cutoff:]]
# train.fillna(0, inplace=True)
# # train.info()
# # print train.describe(include="all")
# ###Using sklearn for k nearest neighbors
# # print "Using sklearn for k nearest neighbors..."
# from sklearn.neighbors import KNeighborsRegressor
# # Create the knn model.
# # Look at the five closest neighbors.
# knn = KNeighborsRegressor(n_neighbors=5)
# # print knn
# # Fit the model on the training data.
# knn.fit(train[x_columns], train[y_column])
# # print knn
# # Make point predictions on the test set using the fit model.
# predictions = knn.predict(test[x_columns])
# # print "\nPredicted PointsPGm:"
# # print predictions.shape
# ###Computing error
# # Get the actual values for the test set.
# actual = test[y_column].copy()
# # Compute the mean squared error of our predictions.
# mse = (((predictions - actual) ** 2).sum()) / len(predictions)
# print("\nMean Squared Error:")
# print(mse)
# actual["Predicted" + pred_col] = predictions
# actual["Diff"] = actual[pred_col] - actual["Predicted" + pred_col]
# print("\nActual and Predicted " + pred_col + ":")
# print(actual.sort_values(["Diff"], ascending=False))
# def test_opinions():
# players = get_players()
# players = players.reset_index()
# players = players[
# players["Name"].isin(
# [
# "Alvaro Negredo",
# "Patrick Bamford",
# "Jordan Rhodes",
# "Garcia Kike",
# "Cristhian Stuani",
# "David Nugent",
# "Danny Graham",
# "Jelle Vossen",
# "Kei Kamara",
# ]
# )
# ]
# # df_info(players)
# players["ShotAccuracy"] = players["ShotsOnTargetP90"] / players["ShotsP90"]
# players["ShotEfficiency"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsP90"]
# players["ShotPercentage"] = (
# players["NonPenaltyGoalsP90"] + players["PenaltyGoalsP90"].fillna(0)
# ) / players["ShotsOnTargetP90"]
# players = players[
# [
# "Name",
# "NonPenaltyGoalsP90",
# "PenaltyGoalsP90",
# "ShotsP90",
# "ShotsOnTargetP90",
# "ShotAccuracy",
# "ShotEfficiency",
# "ShotPercentage",
# ]
# ]
# # df_info(players)
# print(players.describe())
# print(players)
def main():
"""Use the Main for CLI usage."""
logging.info("Executing players module")
clean_data("tmk_cnt")
clean_data("tmk_psm")
# get_players()
# find_similar()
# make_prediction()
# test_opinions()
if __name__ == "__main__":
main()
| 31.793499 | 580 | 0.53446 |
c47739874e06f42c7eb96ea82d6382fed8af2e9d | 2,035 | py | Python | Z_ALL_FILE/Py/code_qry.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Py/code_qry.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Py/code_qry.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | 1 | 2021-04-29T21:46:02.000Z | 2021-04-29T21:46:02.000Z | import pandas as pd
import os
#opt = itertools.islice(ls, len(ls))
#st = map(lambda x : )
| 28.263889 | 200 | 0.456511 |
c478a3bd10411c7f1ec8a901267dc3442748c724 | 1,463 | py | Python | eats/tests/common/base_test_setup.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | eats/tests/common/base_test_setup.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | 5 | 2021-03-18T21:34:44.000Z | 2022-03-11T23:35:23.000Z | eats/tests/common/base_test_setup.py | Etiqa/eats | 8c8e2da93d0014f6fbb208185712c5526dba1174 | [
"BSD-2-Clause"
] | null | null | null | import socket
import unittest
from eats.webdriver import PytractorWebDriver
from eats.tests.common import SimpleWebServerProcess as SimpleServer | 30.479167 | 93 | 0.663705 |
c47907817d94beb66a4ec9f0e248f596065c0464 | 231 | py | Python | autoprep/service/sqlite_project_service.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | null | null | null | autoprep/service/sqlite_project_service.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | 4 | 2019-01-15T01:55:46.000Z | 2019-02-21T04:15:25.000Z | autoprep/service/sqlite_project_service.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | null | null | null | from autoprep.service.project_service import ProjectService
| 16.5 | 59 | 0.69697 |
c479ce0c9f3fb47a8ec7bf6ff4db304b73d1a05c | 2,262 | py | Python | p1_navigation/model.py | Alexandr0s93/deep-reinforcement-learning | 02a508d25d2ba3c76c76a8410b3ae27f0d14e13f | [
"MIT"
] | null | null | null | p1_navigation/model.py | Alexandr0s93/deep-reinforcement-learning | 02a508d25d2ba3c76c76a8410b3ae27f0d14e13f | [
"MIT"
] | null | null | null | p1_navigation/model.py | Alexandr0s93/deep-reinforcement-learning | 02a508d25d2ba3c76c76a8410b3ae27f0d14e13f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
| 30.16 | 63 | 0.545977 |
c479cee1b61267e6a98fae5c6efa9dd6f54fec33 | 74 | py | Python | const.py | TakosukeGH/pmx_bone_importer | 412cc066867cb0e0fd889101630277f9f9ba3a6a | [
"MIT"
] | null | null | null | const.py | TakosukeGH/pmx_bone_importer | 412cc066867cb0e0fd889101630277f9f9ba3a6a | [
"MIT"
] | null | null | null | const.py | TakosukeGH/pmx_bone_importer | 412cc066867cb0e0fd889101630277f9f9ba3a6a | [
"MIT"
] | 1 | 2019-10-05T01:18:54.000Z | 2019-10-05T01:18:54.000Z | ADDON_NAME = "pmx_bone_importer"
LOG_FILE_NAME = "pmx_bone_importer.log"
| 18.5 | 39 | 0.810811 |
c47bf0eadf4438f1d2983cdc88c09d3954cd62d8 | 17,789 | py | Python | pox/lib/interfaceio/__init__.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 416 | 2015-01-05T18:16:36.000Z | 2022-03-28T21:44:26.000Z | pox/lib/interfaceio/__init__.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 140 | 2015-01-18T23:32:34.000Z | 2022-03-17T05:40:24.000Z | pox/lib/interfaceio/__init__.py | korrigans84/pox_network | cd58d95d97c94b3d139bc2026fd1be0a30987911 | [
"Apache-2.0"
] | 344 | 2015-01-08T06:44:23.000Z | 2022-03-26T04:06:27.000Z | # Copyright 2017 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input and output from network interfaces.
This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative
interface to network interfaces.
Currently limited to Linux.
"""
from pox.lib.pxpcap import PCap
from queue import Queue
from pox.lib.revent import Event, EventMixin
from pox.lib.ioworker.io_loop import ReadLoop
from pox.core import core
import struct
from fcntl import ioctl
import socket
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.addresses import parse_cidr, cidr_to_netmask
import os
import ctypes
IFNAMESIZ = 16
IFREQ_SIZE = 40
# from linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
IFF_TUN_EXCL = 0x8000
IFF_MULTI_QUEUE = 0x0100
IFF_ATTACH_QUEUE = 0x0200
IFF_DETACH_QUEUE = 0x0400
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
#from linux/if.h (flags)
IFF_UP = 1<<0
IFF_BROADCAST = 1<<1
IFF_DEBUG = 1<<2
IFF_LOOPBACK = 1<<3
IFF_POINTOPOINT = 1<<4
IFF_NOTRAILERS = 1<<5
IFF_RUNNING = 1<<6
IFF_NOARP = 1<<7
IFF_PROMISC = 1<<8
IFF_ALLMULTI = 1<<9
IFF_MASTER = 1<<10
IFF_SLAVE = 1<<11
IFF_MULTICAST = 1<<12
IFF_PORTSEL = 1<<13
IFF_AUTOMEDIA = 1<<14
IFF_DYNAMIC = 1<<15
IFF_LOWER_UP = 1<<16
IFF_DORMANT = 1<<17
IFF_ECHO = 1<<18
# Unless IFF_NO_PI, there's a header on packets:
# 16 bits of flags
# 16 bits (big endian?) protocol number
# from /usr/include/linux/sockios.h
SIOCGIFHWADDR = 0x8927
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCSIFNAME = 0x8923
SIOCADDRT = 0x890B # rtentry (route.h) for IPv4, in6_rtmsg for IPv6
SIOCDELRT = 0x890C
# from /usr/include/linux/if_arp.h
ARPHRD_ETHER = 1
ARPHRD_IEEE802 = 1
ARPHRD_IEEE1394 = 24
ARPHRD_EUI64 = 27
ARPHRD_LOOPBACK = 772
ARPHRD_IPGRE = 778
ARPHRD_IEE802_TR = 800
ARPHRD_IEE80211 = 801
ARPHRD_IEE80211_PRISM = 802
ARPHRD_IEE80211_RADIOTAP = 803
ARPHRD_IP6GRE = 823
def unset_flags (self, flags):
self.flags = self.flags & (flags ^ 0xffFF)
def _ioctl_get_ipv4 (self, which):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
return self._get_ipv4(ret[IFNAMESIZ:])
def _ioctl_set_ipv4 (self, which, value):
value = IPAddr(value)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sHHI", self.name, socket.AF_INET, 0,
value.toUnsigned(networkOrder=True))
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
def add_default_route (self, *args, **kw):
return self.add_route("0.0.0.0/0", *args, **kw)
def add_route (self, network, gateway=None, dev=(), metric=0):
"""
Add routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCADDRT)
def del_route (self, network, gateway=None, dev=(), metric=0):
"""
Remove a routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCDELRT)
def _add_del_route (self, network, gateway=None, dev=(), metric=0,
command=None):
"""
Add or remove a routing table entry
If dev is unspecified, it defaults to this device
"""
r = rtentry()
if isinstance(network, tuple):
addr,mask = network
addr = str(addr)
if isinstance(mask, int):
mask = cidr_to_netmask(mask)
mask = str(mask)
network = "%s/%s" % (addr,mask)
host = False
if isinstance(network, IPAddr) or (isinstance(network, str)
and "/" not in network):
host = True
network,bits = parse_cidr(network)
r.rt_dst = network
r.rt_genmask = cidr_to_netmask(bits)
if gateway is not None:
r.rt_gateway = IPAddr(gateway)
r.rt_flags |= r.RTF_GATEWAY
r.rt_metric = metric
if dev is (): dev = self
if isinstance(dev, Interface): dev = dev.name
if dev: r.rt_dev = dev
if host: r.rt_flags |= r.RTF_HOST
r.rt_flags |= r.RTF_UP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rv = ioctl(sock, command, r.pack())
class TunTap (object):
"""
Simple wrapper for tun/tap interfaces
Looks like a file-like object. You should be able to read/write it, select
on it, etc.
"""
def __init__ (self, name=None, tun=False, raw=False):
"""
Create tun or tap
By default, it creates a new tun or tap with a default name. If you
specify a name, it will either try to create it (if it doesn't exist),
or try to use an existing interface (for which you must have permission).
Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode.
Specify raw=True to skip the 32 bits of flag/protocol metadata.
"""
if name is None: name = ""
openflags = os.O_RDWR
try:
openflow |= os.O_BINARY
except:
pass
self._f = os.open("/dev/net/tun", openflags)
# an ifreq is IFREQ_SIZE bytes long, starting with an interface name
# (IFNAMESIZ bytes) followed by a big union.
self.is_tun = tun
self.is_tap = not tun
self.is_raw = raw
flags = 0
if tun: flags |= IFF_TUN
else: flags |= IFF_TAP
if raw: flags |= IFF_NO_PI
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, flags)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNSETIFF, ifr)
self.name = ret[:IFNAMESIZ]
iflags = flags
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, 0)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNGETIFF, ifr)
flags = struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
self.is_tun = (flags & IFF_TUN) == IFF_TUN
self.is_tap = not self.is_tun
#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI
def _do_rx (self):
data = self.tap.read(self.max_read_size)
if not self.tap.is_raw:
flags,proto = struct.unpack("!HH", data[:4])
#FIXME: This may invert the flags...
self.last_flags = flags
self.last_protocol = proto
data = data[4:] # Cut off header
self.raiseEvent(RXData, self, data)
def fileno (self):
# Support fileno so that this can be used in IO loop directly
return self.tap.fileno()
def close (self):
if self.tap:
self.tap.close()
self.tap = None
self.io_loop.remove(self)
| 27.034954 | 78 | 0.652426 |
c47c240782affe27a9180c58c326bd1012c03ca6 | 5,754 | py | Python | icarus_simulator/strategies/atk_geo_constraint/geo_constr_strat.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 5 | 2021-08-31T08:07:41.000Z | 2022-01-04T02:09:25.000Z | icarus_simulator/strategies/atk_geo_constraint/geo_constr_strat.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 3 | 2021-09-23T09:06:35.000Z | 2021-12-08T04:53:01.000Z | icarus_simulator/strategies/atk_geo_constraint/geo_constr_strat.py | RubenFr/ICARUS-framework | e57a1f50c3bb9522b2a279fee6b625628afd056f | [
"MIT"
] | 2 | 2022-01-19T17:50:56.000Z | 2022-03-06T18:59:41.000Z | # 2020 Tommaso Ciussani and Giacomo Giuliari
import os
import json
import numpy as np
from typing import Set, List
from geopy.distance import great_circle
from scipy.spatial.ckdtree import cKDTree
from shapely.geometry import Polygon, shape, Point
from icarus_simulator.sat_core.coordinate_util import geo2cart
from icarus_simulator.strategies.atk_geo_constraint.base_geo_constraint_strat import (
BaseGeoConstraintStrat,
)
from icarus_simulator.structure_definitions import GridPos
dirname = os.path.dirname(__file__)
strategies_dirname = os.path.split(dirname)[0]
library_dirname = os.path.split(strategies_dirname)[0]
data_dirname = os.path.join(library_dirname, "data")
COUNTRIES_FILE: str = os.path.join(data_dirname, "natural_earth_world_small.geo.json")
# noinspection PyTypeChecker
def get_allowed_gridpoints(geo_location: str, grid_pos: GridPos, geo_data) -> Set[int]:
# Get a list of all possible source points
if geo_location in geo_data["countries"]:
indices = [geo_data["countries"][geo_location]]
elif geo_location in geo_data["subregions"]:
indices = geo_data["subregions"][geo_location]
elif geo_location in geo_data["continents"]:
indices = geo_data["continents"][geo_location]
else:
raise ValueError("Invalid geographic constraint")
geometries = [geo_data["geometries"][index] for index in indices]
allowed_points = set()
# Create a unique shape, union of all shapes in the region, and take the points include within
shp = Polygon()
for idx, geo in enumerate(geometries):
shp = shp.union(shape(geo))
for idx, pos in grid_pos.items():
if Point(pos.lat, pos.lon).within(shp):
allowed_points.add(idx)
# Extract the border points
x, y = [], []
if shp.geom_type == "MultiPolygon":
for idx, shap in enumerate(shp.geoms):
if True:
x1, y1 = shap.exterior.xy
x.extend(x1)
y.extend(y1)
else:
x1, y1 = shp.exterior.xy
x.extend(x1)
y.extend(y1)
# plotter.plot_points({idx: GeodeticPosInfo({"lat": x[idx], "lon": y[idx], "elev": 0.0})
# for idx in range(len(x))}, "GRID", "TEST", "aa", "asas",)
grid_cart = np.zeros((len(grid_pos), 3))
grid_map = {}
i = 0
for idx, pos in grid_pos.items():
grid_map[i] = idx
grid_cart[i] = geo2cart({"elev": 0, "lon": pos.lon, "lat": pos.lat})
i += 1
# Put the homogeneous grid into a KD-tree and query the border points to include also point slightly in the sea
kd = cKDTree(grid_cart)
for idx in range(len(x)):
_, closest_grid_idx = kd.query(
geo2cart({"elev": 0, "lon": y[idx], "lat": x[idx]}), k=1
)
grid_id = grid_map[closest_grid_idx]
if (
great_circle(
(grid_pos[grid_id].lat, grid_pos[grid_id].lon), (x[idx], y[idx])
).meters
< 300000
):
# 300000 -> number elaborated to keep the out-of-coast values without including wrong points
allowed_points.add(grid_map[closest_grid_idx])
return allowed_points
# noinspection PyTypeChecker
| 38.36 | 115 | 0.616093 |
c47c8df17ea394b09ef2defebfcd36f91bad20ef | 8,861 | py | Python | grafeas/models/deployable_deployment_details.py | nyc/client-python | e73eab8953abf239305080673f7c96a54b776f72 | [
"Apache-2.0"
] | null | null | null | grafeas/models/deployable_deployment_details.py | nyc/client-python | e73eab8953abf239305080673f7c96a54b776f72 | [
"Apache-2.0"
] | null | null | null | grafeas/models/deployable_deployment_details.py | nyc/client-python | e73eab8953abf239305080673f7c96a54b776f72 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts. # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from grafeas.models.deployment_details_platform import DeploymentDetailsPlatform # noqa: F401,E501
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeployableDeploymentDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.091228 | 153 | 0.623632 |
c47e515541dd250050db71c9315d649403e7ce2b | 1,575 | py | Python | lib/python/test/test_trans.py | qxo/cat | 08170af3c8e2ae3724036833d67312964721c99b | [
"Apache-2.0"
] | 5 | 2018-12-13T17:46:39.000Z | 2022-03-29T02:07:47.000Z | lib/python/test/test_trans.py | qxo/cat | 08170af3c8e2ae3724036833d67312964721c99b | [
"Apache-2.0"
] | 42 | 2019-12-08T18:41:13.000Z | 2021-08-28T13:08:55.000Z | lib/python/test/test_trans.py | qxo/cat | 08170af3c8e2ae3724036833d67312964721c99b | [
"Apache-2.0"
] | 8 | 2018-12-25T04:19:01.000Z | 2021-03-24T17:02:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: stdrickforce (Tengyuan Fan)
# Email: <stdrickforce@gmail.com> <fantengyuan@baixing.com>
import cat
import time
def test2():
'''
Use via context manager
'''
with cat.Transaction("Trans", "T2") as t:
cat.log_event("Event", "E2")
try:
do_something()
except Exception:
t.set_status(cat.CAT_ERROR)
t.add_data("context-manager")
t.add_data("foo", "bar")
if __name__ == '__main__':
cat.init("pycat", debug=True, logview=False)
for i in range(100):
test1()
test2()
test3()
time.sleep(0.01)
time.sleep(1)
| 22.183099 | 64 | 0.572698 |
c47eb0be6f206f7a309aab7d8baf760825081212 | 19,781 | py | Python | src/ui/ui_hw_recovery_wdg.py | frosted97/dash-masternode-tool | d824740309ab878d745e41d39f274e952111542f | [
"MIT"
] | 75 | 2017-03-20T06:33:14.000Z | 2022-02-15T16:16:45.000Z | src/ui/ui_hw_recovery_wdg.py | frosted97/dash-masternode-tool | d824740309ab878d745e41d39f274e952111542f | [
"MIT"
] | 42 | 2017-10-25T06:34:54.000Z | 2022-02-10T20:53:46.000Z | src/ui/ui_hw_recovery_wdg.py | frosted97/dash-masternode-tool | d824740309ab878d745e41d39f274e952111542f | [
"MIT"
] | 98 | 2017-03-20T05:27:36.000Z | 2022-03-20T05:03:08.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file ui_hw_recovery_wdg.ui
#
# Created by: PyQt5 UI code generator
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| 64.016181 | 222 | 0.745008 |
c47ed8028e53c0742399199be9ea4ca791d59010 | 1,108 | py | Python | datahandler/analyser.py | ameliecordier/IIK | 57b40d6b851a1c2369604049d1820e5b572c6227 | [
"MIT"
] | null | null | null | datahandler/analyser.py | ameliecordier/IIK | 57b40d6b851a1c2369604049d1820e5b572c6227 | [
"MIT"
] | null | null | null | datahandler/analyser.py | ameliecordier/IIK | 57b40d6b851a1c2369604049d1820e5b572c6227 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# CLASS ANALYSER
| 25.181818 | 104 | 0.598375 |
c47ef70151ad606b1f9596045a1960c4c4dec6a6 | 1,948 | py | Python | binary_trees/next_right.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null | binary_trees/next_right.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null | binary_trees/next_right.py | xxaxdxcxx/miscellaneous-code | cdb88783f39e1b9a89fdb12f7cddfe62619e4357 | [
"MIT"
] | null | null | null | # Definition for binary tree with next pointer.
| 31.419355 | 63 | 0.464579 |
c47f26765a0cb339776a2ad95fc385826831ad79 | 982 | py | Python | 6.all_species/species_data/merge_species_data.py | oaxiom/episcan | b6616536d621ff02b92a7678f80b5bfbd38c6dc8 | [
"MIT"
] | null | null | null | 6.all_species/species_data/merge_species_data.py | oaxiom/episcan | b6616536d621ff02b92a7678f80b5bfbd38c6dc8 | [
"MIT"
] | null | null | null | 6.all_species/species_data/merge_species_data.py | oaxiom/episcan | b6616536d621ff02b92a7678f80b5bfbd38c6dc8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys, os, glob
from glbase3 import *
all_species = glload('species_annotations/species.glb')
newl = []
for file in glob.glob('pep_counts/*.txt'):
oh = open(file, 'rt')
count = int(oh.readline().split()[0])
oh.close()
species_name = os.path.split(file)[1].split('.')[0].lower() # seems a simple rule
assembly_name = os.path.split(file)[1].replace('.txt', '')
if count < 5000:
continue
newl.append({'species': species_name, 'assembly_name': assembly_name, 'num_pep': count})
pep_counts = genelist()
pep_counts.load_list(newl)
all_species = all_species.map(genelist=pep_counts, key='species')
all_species = all_species.removeDuplicates('name')
print(all_species)
all_species = all_species.getColumns(['name', 'species', 'division' ,'num_pep', 'assembly_name'])
all_species.sort('name')
all_species.saveTSV('all_species.tsv')
all_species.save('all_species.glb')
# and add the peptide counts for all species
| 25.179487 | 97 | 0.701629 |
c481812f6f75096a79bbca57dd3f97e48ea22078 | 3,845 | py | Python | modules/lex_managers/lex_intent_manager.py | adamhamden/lex-bot | 3c21b8d60607950c707b97ff5ba8491d40e31592 | [
"MIT"
] | null | null | null | modules/lex_managers/lex_intent_manager.py | adamhamden/lex-bot | 3c21b8d60607950c707b97ff5ba8491d40e31592 | [
"MIT"
] | null | null | null | modules/lex_managers/lex_intent_manager.py | adamhamden/lex-bot | 3c21b8d60607950c707b97ff5ba8491d40e31592 | [
"MIT"
] | null | null | null | import boto3
from prettytable import PrettyTable
| 39.639175 | 129 | 0.490507 |
c4819144b63cb938bdc3a631c3adcbd846e22f52 | 80 | py | Python | src/__init__.py | Victorpc98/CE888-Project | 99c20adc78eb53ac4d3c87543ef8da1ef4d10adc | [
"MIT"
] | 1 | 2020-04-18T21:03:28.000Z | 2020-04-18T21:03:28.000Z | src/__init__.py | Victorpc98/CE888-Project | 99c20adc78eb53ac4d3c87543ef8da1ef4d10adc | [
"MIT"
] | null | null | null | src/__init__.py | Victorpc98/CE888-Project | 99c20adc78eb53ac4d3c87543ef8da1ef4d10adc | [
"MIT"
] | null | null | null | import sys
sys.path.append("..") # Adds higher directory to python modules path. | 40 | 69 | 0.75 |
c4821b9a95d728a178a666ea50065578f645972b | 7,025 | py | Python | wxtbx/wx4_compatibility.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | wxtbx/wx4_compatibility.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | wxtbx/wx4_compatibility.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
'''
Author : Lyubimov, A.Y.
Created : 04/14/2014
Last Changed: 11/05/2018
Description : wxPython 3-4 compatibility tools
The context managers, classes, and other tools below can be used to make the
GUI code compatible with wxPython 3 and 4. Mostly, the tools convert the
functions, enumerations, and classes which have been renamed in wxPython 4;
the name mismatches result in exceptions.
Use case 1: subclassing wx.PyControl or wx.Control:
from wxtbx import wx4_compatibility as wx4c
WxCtrl = wx4c.get_wx_mod(wx, wx.Control)
class MyCustomControl(WxCtrl): ...
Use case 2: brush style (NOTE: you can do that with fonts as well, but it
doesn't seem to be necessary):
from wxtbx import wx4_compatibility as wx4c
bkgrd = self.GetBackgroundColour()
with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle:
brush = wx.Brush(bkgrd, bstyle)
Use case 3: Toolbars
from wxtbx import wx4_compatibility as wx4c, bitmaps
class MyFrame(wx.Frame):
def __init__(self, parent, id, title, *args, **kwargs):
wx.Frame.__init__(self, parent, id, title, *args, **kwargs)
self.toolbar = wx4c.ToolBar(self, style=wx.TB_TEXT)
self.quit_button = self.toolbar.AddTool(toolId=wx.ID_ANY,
label='Quit',
kind=wx.ITEM_NORMAL,
bitmap=bitmaps.fetch_icon_bitmap('actions', 'exit')
shortHelp='Exit program')
...
self.SetToolBar(self.toolbar)
self.toolbar.Realize()
'''
import wx
from contextlib import contextmanager
import importlib
wx4 = wx.__version__[0] == '4'
modnames = [
('PyControl', 'Control'),
('PyDataObjectSimple', 'DataObjectSimple'),
('PyDropTarget', 'DropTarget'),
('PyEvtHandler', 'EvtHandler'),
('PyImageHandler', 'ImageHandler'),
('PyLocale', 'Locale'),
('PyLog', 'Log'),
('PyPanel', 'Panel'),
('PyPickerBase', 'PickerBase'),
('PyPreviewControlBar', 'PreviewControlBar'),
('PyPreviewFrame', 'PreviewFrame'),
('PyPrintPreview', 'PrintPreview'),
('PyScrolledWindow', 'ScrolledWindow'),
('PySimpleApp', 'App'),
('PyTextDataObject', 'TextDataObject'),
('PyTimer', 'Timer'),
('PyTipProvider', 'adv.TipProvider'),
('PyValidator', 'Validator'),
('PyWindow'', Window')
]
font_families = [
(wx.DEFAULT, wx.FONTFAMILY_DEFAULT),
(wx.DECORATIVE, wx.FONTFAMILY_DECORATIVE),
(wx.ROMAN, wx.FONTFAMILY_ROMAN),
(wx.SCRIPT, wx.FONTFAMILY_SCRIPT),
(wx.SWISS, wx.FONTFAMILY_SWISS),
(wx.MODERN, wx.FONTFAMILY_MODERN),
(wx.TELETYPE, wx.FONTFAMILY_TELETYPE)
]
font_weights = [
(wx.NORMAL, wx.FONTWEIGHT_NORMAL),
(wx.LIGHT, wx.FONTWEIGHT_LIGHT),
(wx.BOLD, wx.FONTWEIGHT_BOLD)
]
font_styles = [
(wx.NORMAL, wx.FONTSTYLE_NORMAL),
(wx.ITALIC, wx.FONTSTYLE_ITALIC),
(wx.SLANT, wx.FONTSTYLE_SLANT)
]
pen_styles = [
(wx.SOLID, wx.PENSTYLE_SOLID),
(wx.DOT, wx.PENSTYLE_DOT),
(wx.LONG_DASH, wx.PENSTYLE_LONG_DASH),
(wx.SHORT_DASH, wx.PENSTYLE_SHORT_DASH),
(wx.DOT_DASH, wx.PENSTYLE_DOT_DASH),
(wx.USER_DASH, wx.PENSTYLE_USER_DASH),
(wx.TRANSPARENT, wx.PENSTYLE_TRANSPARENT)
]
brush_styles = [
(wx.SOLID, wx.BRUSHSTYLE_SOLID),
(wx.TRANSPARENT, wx.BRUSHSTYLE_TRANSPARENT),
(wx.STIPPLE_MASK_OPAQUE, wx.BRUSHSTYLE_STIPPLE_MASK_OPAQUE),
(wx.STIPPLE_MASK, wx.BRUSHSTYLE_STIPPLE_MASK),
(wx.STIPPLE, wx.BRUSHSTYLE_STIPPLE),
(wx.BDIAGONAL_HATCH, wx.BRUSHSTYLE_BDIAGONAL_HATCH),
(wx.CROSSDIAG_HATCH, wx.BRUSHSTYLE_CROSSDIAG_HATCH),
(wx.FDIAGONAL_HATCH, wx.BRUSHSTYLE_FDIAGONAL_HATCH),
(wx.CROSS_HATCH, wx.BRUSHSTYLE_CROSS_HATCH),
(wx.HORIZONTAL_HATCH, wx.BRUSHSTYLE_HORIZONTAL_HATCH),
(wx.VERTICAL_HATCH, wx.BRUSHSTYLE_VERTICAL_HATCH),
]
class Wx3ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 4-style AddTool command and
converts it to a wxPython 3-style AddLabelTool command '''
def AddTool(self, toolId, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddLabelTool, which in
wxPython 3 is the same as AddTool in wxPython 4 '''
return self.AddLabelTool(id=toolId, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
class Wx4ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 3-style AddLabelTool command
and converts it to a wxPython 4-style AddTool command '''
def AddLabelTool(self, id, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddTool, which in
wxPython 4 is the same as AddLabelTool in wxPython 3 '''
return self.AddTool(toolId=id, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
# Use this ToolBar class to create toolbars in frames
ToolBar = Wx4ToolBar if wx4 else Wx3ToolBar
| 32.981221 | 96 | 0.691103 |
c483b92cbfbdabe1b45008c539e6179a5bd43a9f | 1,548 | py | Python | BMVC_version/utils.py | ZhengyuZhao/ACE | 5065cde807fe689115849c55d440783d8a471901 | [
"MIT"
] | 19 | 2020-05-13T07:51:00.000Z | 2021-06-13T11:03:47.000Z | BMVC_version/utils.py | ZhengyuZhao/AdvCF | 5065cde807fe689115849c55d440783d8a471901 | [
"MIT"
] | 1 | 2020-09-09T09:39:28.000Z | 2020-09-10T20:30:02.000Z | BMVC_version/utils.py | ZhengyuZhao/AdvCF | 5065cde807fe689115849c55d440783d8a471901 | [
"MIT"
] | 3 | 2020-09-05T11:32:23.000Z | 2021-03-30T01:41:07.000Z | import torch
import torch.nn as nn
import csv
#image quantization
#picecwise-linear color filter
#parsing the data annotation
# simple Module to normalize an image
# values are standard normalization for ImageNet images,
# from https://github.com/pytorch/examples/blob/master/imagenet/main.py
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
| 29.769231 | 99 | 0.660207 |
c4844ed8e45f32c88606465081cf2391a8999d1d | 4,849 | py | Python | lemonpie/_nbdev.py | corazonlabs/ehr_preprocessing | 5bf3be1f04d9dc6db002b58331800b30cf668e69 | [
"Apache-2.0"
] | 3 | 2021-04-03T01:16:18.000Z | 2021-07-31T20:44:47.000Z | lemonpie/_nbdev.py | corazonlabs/ehr_preprocessing | 5bf3be1f04d9dc6db002b58331800b30cf668e69 | [
"Apache-2.0"
] | 5 | 2021-03-30T21:23:47.000Z | 2022-02-26T10:17:12.000Z | lemonpie/_nbdev.py | vin00d/lemonpie | 5bf3be1f04d9dc6db002b58331800b30cf668e69 | [
"Apache-2.0"
] | 1 | 2020-11-26T00:35:28.000Z | 2020-11-26T00:35:28.000Z | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"get_device": "00_basics.ipynb",
"settings_template": "00_basics.ipynb",
"read_settings": "00_basics.ipynb",
"DEVICE": "00_basics.ipynb",
"settings": "00_basics.ipynb",
"DATA_STORE": "00_basics.ipynb",
"LOG_STORE": "00_basics.ipynb",
"MODEL_STORE": "00_basics.ipynb",
"EXPERIMENT_STORE": "00_basics.ipynb",
"PATH_1K": "00_basics.ipynb",
"PATH_10K": "00_basics.ipynb",
"PATH_20K": "00_basics.ipynb",
"PATH_100K": "00_basics.ipynb",
"FILENAMES": "00_basics.ipynb",
"SYNTHEA_DATAGEN_DATES": "00_basics.ipynb",
"CONDITIONS": "00_basics.ipynb",
"LOG_NUMERICALIZE_EXCEP": "00_basics.ipynb",
"read_raw_ehrdata": "01_preprocessing_clean.ipynb",
"split_patients": "01_preprocessing_clean.ipynb",
"split_ehr_dataset": "01_preprocessing_clean.ipynb",
"cleanup_pts": "01_preprocessing_clean.ipynb",
"cleanup_obs": "01_preprocessing_clean.ipynb",
"cleanup_algs": "01_preprocessing_clean.ipynb",
"cleanup_crpls": "01_preprocessing_clean.ipynb",
"cleanup_meds": "01_preprocessing_clean.ipynb",
"cleanup_img": "01_preprocessing_clean.ipynb",
"cleanup_procs": "01_preprocessing_clean.ipynb",
"cleanup_cnds": "01_preprocessing_clean.ipynb",
"cleanup_immns": "01_preprocessing_clean.ipynb",
"cleanup_dataset": "01_preprocessing_clean.ipynb",
"extract_ys": "01_preprocessing_clean.ipynb",
"insert_age": "01_preprocessing_clean.ipynb",
"clean_raw_ehrdata": "01_preprocessing_clean.ipynb",
"load_cleaned_ehrdata": "01_preprocessing_clean.ipynb",
"load_ehr_vocabcodes": "01_preprocessing_clean.ipynb",
"EhrVocab": "02_preprocessing_vocab.ipynb",
"ObsVocab": "02_preprocessing_vocab.ipynb",
"EhrVocabList": "02_preprocessing_vocab.ipynb",
"get_all_emb_dims": "02_preprocessing_vocab.ipynb",
"collate_codes_offsts": "03_preprocessing_transform.ipynb",
"get_codenums_offsts": "03_preprocessing_transform.ipynb",
"get_demographics": "03_preprocessing_transform.ipynb",
"Patient": "03_preprocessing_transform.ipynb",
"get_pckl_dir": "03_preprocessing_transform.ipynb",
"PatientList": "03_preprocessing_transform.ipynb",
"cpu_cnt": "03_preprocessing_transform.ipynb",
"create_all_ptlists": "03_preprocessing_transform.ipynb",
"preprocess_ehr_dataset": "03_preprocessing_transform.ipynb",
"EHRDataSplits": "04_data.ipynb",
"LabelEHRData": "04_data.ipynb",
"EHRDataset": "04_data.ipynb",
"EHRData": "04_data.ipynb",
"accuracy": "05_metrics.ipynb",
"null_accuracy": "05_metrics.ipynb",
"ROC": "05_metrics.ipynb",
"MultiLabelROC": "05_metrics.ipynb",
"plot_rocs": "05_metrics.ipynb",
"plot_train_valid_rocs": "05_metrics.ipynb",
"auroc_score": "05_metrics.ipynb",
"auroc_ci": "05_metrics.ipynb",
"save_to_checkpoint": "06_learn.ipynb",
"load_from_checkpoint": "06_learn.ipynb",
"get_loss_fn": "06_learn.ipynb",
"RunHistory": "06_learn.ipynb",
"train": "06_learn.ipynb",
"evaluate": "06_learn.ipynb",
"fit": "06_learn.ipynb",
"predict": "06_learn.ipynb",
"plot_loss": "06_learn.ipynb",
"plot_losses": "06_learn.ipynb",
"plot_aurocs": "06_learn.ipynb",
"plot_train_valid_aurocs": "06_learn.ipynb",
"plot_fit_results": "06_learn.ipynb",
"summarize_prediction": "06_learn.ipynb",
"count_parameters": "06_learn.ipynb",
"dropout_mask": "07_models.ipynb",
"InputDropout": "07_models.ipynb",
"linear_layer": "07_models.ipynb",
"create_linear_layers": "07_models.ipynb",
"init_lstm": "07_models.ipynb",
"EHR_LSTM": "07_models.ipynb",
"init_cnn": "07_models.ipynb",
"conv_layer": "07_models.ipynb",
"EHR_CNN": "07_models.ipynb",
"get_data": "08_experiment.ipynb",
"get_optimizer": "08_experiment.ipynb",
"get_model": "08_experiment.ipynb",
"Experiment": "08_experiment.ipynb"}
modules = ["basics.py",
"preprocessing/clean.py",
"preprocessing/vocab.py",
"preprocessing/transform.py",
"data.py",
"metrics.py",
"learn.py",
"models.py",
"experiment.py"]
doc_url = "https://corazonlabs.github.io/lemonpie/"
git_url = "https://github.com/corazonlabs/lemonpie/tree/main/"
| 44.486239 | 70 | 0.630852 |
c4847cc6bababbdf22257962d4c32b15d776c5ed | 8,277 | py | Python | tensorboard/plugins/graph_edit/c2graph_util.py | qzhong0605/tensorboardplugins | 92bfc7ca96b933cdbdf074a08f26f5c715d8421d | [
"Apache-2.0"
] | null | null | null | tensorboard/plugins/graph_edit/c2graph_util.py | qzhong0605/tensorboardplugins | 92bfc7ca96b933cdbdf074a08f26f5c715d8421d | [
"Apache-2.0"
] | null | null | null | tensorboard/plugins/graph_edit/c2graph_util.py | qzhong0605/tensorboardplugins | 92bfc7ca96b933cdbdf074a08f26f5c715d8421d | [
"Apache-2.0"
] | null | null | null | # Convert the caffe2 model into tensorboard GraphDef
#
# The details of caffe2 model is on the compat/proto/caffe2/caffe2.proto
# And the details of GraphDef model is on the compat/proto/graph.proto
#
################################################################################
from tensorboard.compat.proto import graph_pb2
from tensorboard.compat.proto import attr_value_pb2
from tensorboard.compat.proto import node_def_pb2
from tensorboard.compat.proto import tensor_shape_pb2
from tensorboard.compat.proto import tensor_pb2
from tensorboard.compat.proto import types_pb2
from tensorboard.compat.proto.caffe2 import caffe2_pb2
from tensorboard.util import tb_logging
from tensorboard.plugins.graph_edit import tbgraph_base
from google.protobuf import text_format
logger = tb_logging.get_logger()
| 44.5 | 106 | 0.618461 |
c4852e08624ac34e2478471564d3403491679e03 | 1,251 | py | Python | src/Homework2_1.py | alexaquino/TUM-AUTONAVx | 95c6829fa2e31e1a11bf2c7726386593e7adbdce | [
"MIT"
] | null | null | null | src/Homework2_1.py | alexaquino/TUM-AUTONAVx | 95c6829fa2e31e1a11bf2c7726386593e7adbdce | [
"MIT"
] | null | null | null | src/Homework2_1.py | alexaquino/TUM-AUTONAVx | 95c6829fa2e31e1a11bf2c7726386593e7adbdce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2014 Alex Aquino dos Santos
# Technische Universitt Mnchen (TUM)
# Autonomous Navigation for Flying Robots
# Homework 2.1
from plot import plot
| 34.75 | 109 | 0.686651 |
c4855377edb8f2377a14569ead5ae6f4b477315f | 1,651 | py | Python | src_tf/templates/tf_estimator_template/model/example.py | ashishpatel26/finch | bf2958c0f268575e5d51ad08fbc08b151cbea962 | [
"MIT"
] | 1 | 2019-02-12T09:22:00.000Z | 2019-02-12T09:22:00.000Z | src_tf/templates/tf_estimator_template/model/example.py | loopzxl/finch | bf2958c0f268575e5d51ad08fbc08b151cbea962 | [
"MIT"
] | null | null | null | src_tf/templates/tf_estimator_template/model/example.py | loopzxl/finch | bf2958c0f268575e5d51ad08fbc08b151cbea962 | [
"MIT"
] | 1 | 2020-10-15T21:34:17.000Z | 2020-10-15T21:34:17.000Z | from configs import args
import tensorflow as tf
| 34.395833 | 80 | 0.637795 |
c485ee350fbe503865765122e5205b0c6d84fd8d | 1,300 | py | Python | {{cookiecutter.project_slug}}/core/management/commands/snippets/fastapi_project/core/security.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 8 | 2021-08-13T17:48:27.000Z | 2022-02-22T02:34:15.000Z | {{cookiecutter.project_slug}}/core/management/commands/snippets/fastapi_project/core/security.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2022-03-24T20:39:00.000Z | 2022-03-24T20:39:48.000Z | {{cookiecutter.project_slug}}/core/management/commands/snippets/fastapi_project/core/security.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | 2 | 2021-09-21T00:05:27.000Z | 2022-01-03T10:50:05.000Z | from datetime import datetime, timedelta
from typing import Any, Union
from jose import jwt
from passlib.context import CryptContext
from .config import settings
pwd_context = CryptContext(
default="django_pbkdf2_sha256",
schemes=["django_argon2", "django_bcrypt", "django_bcrypt_sha256",
"django_pbkdf2_sha256", "django_pbkdf2_sha1",
"django_disabled"])
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 8 days
'''
Arquivo de configurao de segurana dos tokens JWT
- Mtodos de verificao e criao de hash de senha
- Mtodo para criar o token jwt vlido
'''
| 29.545455 | 81 | 0.713077 |
c487c6e672ed0de9246b310bca5ef690e836e2e6 | 10,241 | py | Python | margarita/main.py | w0de/margarita | 50c7c07b8ee3d5d6c801833be7c147533c33fd70 | [
"Unlicense"
] | 3 | 2018-07-27T22:19:02.000Z | 2019-09-06T18:08:58.000Z | margarita/main.py | w0de/margarita | 50c7c07b8ee3d5d6c801833be7c147533c33fd70 | [
"Unlicense"
] | null | null | null | margarita/main.py | w0de/margarita | 50c7c07b8ee3d5d6c801833be7c147533c33fd70 | [
"Unlicense"
] | 1 | 2019-05-21T18:07:46.000Z | 2019-05-21T18:07:46.000Z | #!/usr/bin/env python
from flask import Flask
from flask import jsonify, render_template, redirect
from flask import request, Response
from saml_auth import BaseAuth, SamlAuth
import os, sys
try:
import json
except ImportError:
# couldn't find json, try simplejson library
import simplejson as json
import getopt
from operator import itemgetter
from distutils.version import LooseVersion
from reposadolib import reposadocommon
apple_catalog_version_map = {
'index-10.14-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.14',
'index-10.13-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.13',
'index-10.12-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.12',
'index-10.11-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.11',
'index-10.10-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.10',
'index-10.9-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.9',
'index-mountainlion-lion-snowleopard-leopard.merged-1.sucatalog': '10.8',
'index-lion-snowleopard-leopard.merged-1.sucatalog': '10.7',
'index-leopard-snowleopard.merged-1.sucatalog': '10.6',
'index-leopard.merged-1.sucatalog': '10.5',
'index-1.sucatalog': '10.4',
'index.sucatalog': '10.4',
}
BASE_AUTH_CLASS = BaseAuth
app, auth = build_app()
# cache the keys of the catalog version map dict
apple_catalog_suffixes = apple_catalog_version_map.keys()
def versions_from_catalogs(cats):
'''Given an iterable of catalogs return the corresponding OS X versions'''
versions = set()
for cat in cats:
# take the last portion of the catalog URL path
short_cat = cat.split('/')[-1]
if short_cat in apple_catalog_suffixes:
versions.add(apple_catalog_version_map[short_cat])
return versions
def json_response(r):
'''Glue for wrapping raw JSON responses'''
return Response(json.dumps(r), status=200, mimetype='application/json')
def get_description_content(html):
if len(html) == 0:
return None
# in the interest of (attempted) speed, try to avoid regexps
lwrhtml = html.lower()
celem = 'p'
startloc = lwrhtml.find('<' + celem + '>')
if startloc == -1:
startloc = lwrhtml.find('<' + celem + ' ')
if startloc == -1:
celem = 'body'
startloc = lwrhtml.find('<' + celem)
if startloc != -1:
startloc += 6 # length of <body>
if startloc == -1:
# no <p> nor <body> tags. bail.
return None
endloc = lwrhtml.rfind('</' + celem + '>')
if endloc == -1:
endloc = len(html)
elif celem != 'body':
# if the element is a body tag, then don't include it.
# DOM parsing will just ignore it anyway
endloc += len(celem) + 3
return html[startloc:endloc]
def product_urls(cat_entry):
'''Retreive package URLs for a given reposado product CatalogEntry.
Will rewrite URLs to be served from local reposado repo if necessary.'''
packages = cat_entry.get('Packages', [])
pkg_urls = []
for package in packages:
pkg_urls.append({
'url': reposadocommon.rewriteOneURL(package['URL']),
'size': package['Size'],
})
return pkg_urls
| 31.804348 | 128 | 0.721023 |
c48919ef78498ed664eb6156c8117a86edb141da | 3,344 | py | Python | python/pato/transport/uart.py | kloper/pato | bfbbee4109227735934f990c5909616a6e8af0b9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/pato/transport/uart.py | kloper/pato | bfbbee4109227735934f990c5909616a6e8af0b9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/pato/transport/uart.py | kloper/pato | bfbbee4109227735934f990c5909616a6e8af0b9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- python -*-
"""@file
@brief pyserial transport for pato
Copyright (c) 2014-2015 Dimitry Kloper <kloper@users.sf.net>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import serial
from util.protocol import ProtocolException
| 34.122449 | 79 | 0.712022 |
c489ac681275868dff6ed544c5b85d56c81ef128 | 4,072 | py | Python | PYQT5/Games/RockPapperScissorsGame.py | Amara-Manikanta/Python-GUI | 0356e7cae7f1c51d0781bf431c386ee7262608b1 | [
"MIT"
] | null | null | null | PYQT5/Games/RockPapperScissorsGame.py | Amara-Manikanta/Python-GUI | 0356e7cae7f1c51d0781bf431c386ee7262608b1 | [
"MIT"
] | null | null | null | PYQT5/Games/RockPapperScissorsGame.py | Amara-Manikanta/Python-GUI | 0356e7cae7f1c51d0781bf431c386ee7262608b1 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QFont, QPixmap
from PyQt5.QtCore import QTimer
from random import randint
font = QFont("Times", 14)
buttonFont = QFont("Arial", 12)
computerScore = 0
playerScore = 0
if __name__ == '__main__':
main()
| 34.218487 | 116 | 0.590128 |
c489f0bb6aee13c77e0b4caf8c6ecbaa282336f5 | 539 | py | Python | services/neural/traindatabase.py | vitorecomp/hackaton-deep-learn | 962eac133ac92d56d8a55136773c2afe4da2e0b5 | [
"MIT"
] | null | null | null | services/neural/traindatabase.py | vitorecomp/hackaton-deep-learn | 962eac133ac92d56d8a55136773c2afe4da2e0b5 | [
"MIT"
] | null | null | null | services/neural/traindatabase.py | vitorecomp/hackaton-deep-learn | 962eac133ac92d56d8a55136773c2afe4da2e0b5 | [
"MIT"
] | null | null | null | from os import walk
import h5py
import numpy as np
from config.Database import Base
from config.Database import engine
from config.Database import Session
from models.Music import Music
from kmeans.kmeans import Kmeans
mypath = './dataset/datatr/'
if __name__ == "__main__":
main()
| 15.852941 | 41 | 0.736549 |
c48abebb839f713d689a09683874c38aef9511d6 | 1,128 | py | Python | projects/TGS_salt/binary_classifier/model.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 280 | 2018-10-21T01:07:18.000Z | 2021-12-30T11:29:48.000Z | projects/TGS_salt/binary_classifier/model.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 3 | 2018-11-13T08:04:48.000Z | 2020-04-17T09:20:03.000Z | projects/TGS_salt/binary_classifier/model.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 59 | 2018-10-21T04:38:23.000Z | 2021-03-29T07:58:47.000Z | import torch.nn as nn
import pretrainedmodels
| 31.333333 | 109 | 0.60461 |
6700a5bb5f070e2573ae2cc0040f1d1a36a7e4ca | 13,050 | py | Python | code/algorithm/assr.py | ShuhuaGao/bcn_opt_dc | 93234f6b799670bc80daf83794c51841f1a24715 | [
"MIT"
] | null | null | null | code/algorithm/assr.py | ShuhuaGao/bcn_opt_dc | 93234f6b799670bc80daf83794c51841f1a24715 | [
"MIT"
] | null | null | null | code/algorithm/assr.py | ShuhuaGao/bcn_opt_dc | 93234f6b799670bc80daf83794c51841f1a24715 | [
"MIT"
] | null | null | null | """
Given a Boolean function/network, get its algebraic state-space representation.
A logical vector `\delta_n^i` is represented by an integer `i` for space efficiency. Consequently, a logical matrix
is represented by a list, each element for one column, (also known as the "condensed form").
[1] Conversion from an infix expression to a postfix one:
https://runestone.academy/runestone/books/published/pythonds/BasicDS/InfixPrefixandPostfixExpressions.html
[2] Logical connectives: https://en.wikipedia.org/wiki/Logical_connective
Author: Gao Shuhua
"""
import operator
import os
from typing import List, Union, Tuple, Iterable, Dict
from .bcn import BooleanNetwork, BooleanControlNetwork
_COMMENT = '#'
_STATES = '[STATES]'
_CONTROLS = '[CONTROLS]'
LOGICAL_CONNECTIVES = {
'NOT': LogicalConnective('NOT', 'not', 1, 0, operator.not_),
'XOR': LogicalConnective('XOR', 'exclusive disjunction', 2, 1, operator.xor),
'AND': LogicalConnective('AND', 'and', 2, 2, operator.and_),
'OR': LogicalConnective('OR', 'or', 2, 3, operator.or_),
'IMPLY': LogicalConnective('IMPLY', 'implication', 2, 4, _imply),
'EQUIV': LogicalConnective('EQUIV', 'equivalent', 2, 5, _xnor)
}
def _infix_to_postfix(expression: str) -> List[Union[LogicalConnective, str]]:
"""
Convert an infix expression to its postfix form.
:param expression: infix, separated by spaces
:return: postfix expression, a list, whose element is an operator (LogicalConnective) or a variable (str)
"""
# parse tokens: handle ( and ) specially, which may not be separated by spaces, e.g., 'A OR (B AND C)'
items = expression.split()
tokens = []
for item in items:
token = ''
for c in item:
if c in '()':
if token:
tokens.append(token)
token = ''
tokens.append(c)
else:
token = token + c
if token:
tokens.append(token)
# conversion
op_stack = []
output = []
for token in tokens:
if token.upper() in LOGICAL_CONNECTIVES: # an operator
connective = LOGICAL_CONNECTIVES[token.upper()]
while op_stack and isinstance(op_stack[-1], LogicalConnective) and \
op_stack[-1].precedence < connective.precedence:
output.append(op_stack.pop())
op_stack.append(connective)
elif token == '(':
op_stack.append(token)
elif token == ')':
left_parenthesis_found = False
while op_stack:
top = op_stack.pop()
if top == '(':
left_parenthesis_found = True
break
else:
output.append(top)
if not left_parenthesis_found:
raise RuntimeError("Unmatched parentheses are encountered: an extra ')'!")
elif token.upper() in ['1', 'TRUE']:
output.append('TRUE')
elif token.upper() in ['0', 'FALSE']:
output.append('FALSE')
else: # a variable
output.append(token)
while op_stack:
top = op_stack.pop()
if top == '(':
raise RuntimeError("Unmatched parentheses are encountered: an extra '('!")
output.append(top)
return output
def _evaluate_postfix(expression, values: {}):
"""
Evaluate a postfix expression with the given parameter values.
:param expression: postfix
:param values: a dict: variable --> value (0/1 or False/True)
:return: a Boolean variable, or 0/1
"""
operand_stack = []
for token in expression:
if isinstance(token, str): # a variable
if token in values:
val = values[token]
operand_stack.append(val)
elif token == 'TRUE':
operand_stack.append(True)
elif token == 'FALSE':
operand_stack.append(False)
else:
raise RuntimeError(f"Unrecognized variable: '{token}'")
else: # a logical connective
arguments = []
for _ in range(token.arity):
arguments.append(operand_stack.pop())
result = token(*arguments[::-1])
operand_stack.append(result)
return operand_stack.pop()
def _assr_function(pf_expr: List[Union[LogicalConnective, str]], states: List[str], controls: List[str]) -> List[int]:
"""
Compute the ASSR for a Boolean function.
:param pf_expr: the postfix expression of a Boolean function
:param states: the state variables
:param controls: the control inputs. If `None`, then no inputs.
:return: the structure matrix, a list of length MN
"""
n = len(states)
m = len(controls)
N = 2 ** n
M = 2 ** m
MN = M * N
all_variables = controls + states
structure_matrix = [None] * MN
# enumerate the binary sequences to get the truth table
for h in range(MN):
bh = f'{h:0{m+n}b}'
values = {var: int(val) for var, val in zip(all_variables, bh)}
output = _evaluate_postfix(pf_expr, values)
k = MN - h
if output: # 1 (True)
structure_matrix[k - 1] = 1
else:
structure_matrix[k - 1] = 2
return structure_matrix
def _tokenize(state_to_expr: Dict[str, str], controls: Iterable[str]=None) -> Tuple[Dict[str, List[Union[LogicalConnective, str]]], List[str]]:
"""
(1) Parse the `exprs` into postfix forms
(2) Infer the control inputs, if `controls` is `None`
:return: the tokenized expressions and the controls
"""
state_to_pf_expr = {s: _infix_to_postfix(e) for s, e in state_to_expr.items()}
if controls is None:
# infer controls
controls = []
for pf_expr in state_to_pf_expr.values():
for t in pf_expr:
if isinstance(t, str): # t is a variable, or 'TRUE' or 'FALSE'
if t not in ['TRUE', 'FALSE'] and t not in state_to_pf_expr: # a control
if t not in controls:
controls.append(t)
else:
controls = list(controls)
# validate
for s, pf_expr in state_to_pf_expr.items():
for t in pf_expr:
if isinstance(t, str):
assert t in state_to_pf_expr or t in controls, f"Unrecognized variable: '{t}' in equation of {s}"
return state_to_pf_expr, controls
def _assr_network(state_to_pf_expr: Dict[str, List[Union[LogicalConnective, str]]], states: List[str],
controls: List[str], verbose: bool=True) -> List[int]:
"""
Get the ASSR of a Boolean (control) network.
:param state_to_pf_expr: state -> its postfix expression
:param states: state variables
:param controls: control inputs.
:return: network transition matrix, each column is represented by an integer
"""
assert len(state_to_pf_expr) == len(states), 'The number of Boolean functions must be equal to the number of state states'
# get the structure matrix of each state (i.e., its Boolean equation)
state_to_sms = {}
for s, pf_expr in state_to_pf_expr.items():
if verbose:
print(f'\tComputing the structure matrix for state {s} ...')
state_to_sms[s] = _assr_function(pf_expr, states, controls)
n = len(states)
m = len(controls)
transition_matrix = [None] * (2 ** m * 2 ** n)
stp = lambda i, j: (i - 1) * 2 + j
if verbose:
print('\tComposing the complete network transition matrix...')
for k in range(len(transition_matrix)): # k-th column
r = 1
for s in states:
sm = state_to_sms[s]
r = stp(r, sm[k])
transition_matrix[k] = r
return transition_matrix
def build_ASSR(source: Union[str, Iterable[str]], states: List[str]=None,
controls: List[str]=None, verbose: bool=True) -> Union[BooleanNetwork, BooleanControlNetwork]:
"""
Build the ASSR for a given Boolean network in a string form.
Each Boolean function is given by the form: state = f(states, controls).
If a text file is given, each Boolean function is provided per line, and '#' starts a comment line
:param source: str or a list of str. (1) str: a single Boolean function or a text file, which contains one or more
Boolean functions (i.e., a network), each per line; (2) a list of str: multiple Boolean functions
:param states: state variables. If `None`, then inferred automatically.
:param controls: control inputs. If this a Boolean network with no inputs, then give it an empty List.
If `None`, then inferred automatically.
:param verbose: whether to print more information
:return: a Boolean network if there are no inputs; otherwise, a Boolean control network
.. note::
If the states and controls are inferred, the order of states corresponds to the line order, whereas the order
of controls depend on their appearance order in the equations. To precisely control the order (especially for
controls), two additional lines may be appended after the state equations that begin with "[STATES]" or "[CONTROLS]".
For example, line "[STATES] AKT MKK EGFR" specifies the state order (AKT, MKK, EGFR).
Of course, both "[STATES]" and "[CONTROLS]" lines are optional.
The non-None arguments `states` and `controls` have higher precedence than "[STATES]" and "[CONTROLS]" lines respectively.
"""
# get the strings of a network
net = []
if isinstance(source, str):
if os.path.isfile(source):
if verbose:
print(f'User provided a network file: {source}\nParsing...')
with open(source, 'r') as f:
for line in f:
line = line.strip()
if line.startswith(_COMMENT):
continue
elif line.startswith(_STATES):
if states is None:
words = line.split()
states = [w.strip() for w in words[1:]]
elif line.startswith(_CONTROLS):
if controls is None:
words = line.split()
controls = [w.strip() for w in words[1:]]
else:
if line: # skip empty lines if any
net.append(line)
else:
if verbose:
print(f'User provided a single Boolean equation.')
net.append(source)
else:
if verbose:
print(f'User provided a list of Boolean equations.')
net = list(source)
# extract the states and equations
state_to_expr = {}
inferred_states = []
for eq in net:
state, expr = eq.split('=')
state = state.strip()
expr = expr.strip()
if states is not None:
assert state in states, f'Unexpected state {state} is encountered!'
else:
inferred_states.append(state)
assert state not in state_to_expr, f'More than one equation is provided for state {state}'
state_to_expr[state] = expr
if states is not None:
for s in states:
assert s in state_to_expr, f'The equation for state {s} is missing'
else:
states = inferred_states
if verbose:
print('Tokenizing...')
# tokenize
state_to_pf_expr, controls = _tokenize(state_to_expr, controls)
assert set(states).isdisjoint(controls), 'States and controls should be disjoint'
if verbose:
print(f'States are {states}')
print(f'Controls are {controls}')
print('Computing...')
# get the ASSR the network
L = _assr_network(state_to_pf_expr, states, controls, verbose)
# wrap them into a Boolean (control) network
m = len(controls)
n = len(states)
if m == 0:
return BooleanNetwork(n, L, states)
return BooleanControlNetwork(n, m, L, states, controls)
| 39.071856 | 144 | 0.604828 |
6701184b0bdf306dd90792d6a104891f22b55364 | 4,953 | py | Python | datasets/voc_dataset.py | ming71/DAL | 48cd29fdbf5eeea1b5b642bd1f04bbf1863b31e3 | [
"Apache-2.0"
] | 206 | 2020-09-12T06:17:00.000Z | 2022-03-28T08:05:51.000Z | datasets/voc_dataset.py | JOOCHANN/DAL | 0f379de70ba01c6c9162f4e980a8bd2491976e9c | [
"Apache-2.0"
] | 47 | 2020-10-21T06:14:18.000Z | 2022-03-16T01:54:28.000Z | datasets/voc_dataset.py | JOOCHANN/DAL | 0f379de70ba01c6c9162f4e980a8bd2491976e9c | [
"Apache-2.0"
] | 38 | 2020-10-22T10:39:51.000Z | 2022-03-17T12:36:46.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# Extended by Linjie Deng
# --------------------------------------------------------
import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from utils.bbox import quad_2_rbox
if __name__ == '__main__':
pass | 37.240602 | 93 | 0.557238 |
67046e56ceee4d6e7815e597ff49d092a5c53d48 | 1,907 | py | Python | neploid.py | GravityI/neploid | 4b68e682fcda97a95d155bea288aa90740842b66 | [
"MIT"
] | null | null | null | neploid.py | GravityI/neploid | 4b68e682fcda97a95d155bea288aa90740842b66 | [
"MIT"
] | null | null | null | neploid.py | GravityI/neploid | 4b68e682fcda97a95d155bea288aa90740842b66 | [
"MIT"
] | null | null | null | import discord
import random
import asyncio
import logging
import urllib.request
from discord.ext import commands
bot = commands.Bot(command_prefix='nep ', description= "Nep Nep")
counter = 0
countTask = None
token = "insert token here"
bot.run(token) | 28.893939 | 153 | 0.681699 |
6706396f498d795e0d71e25c46fb2f83e80c424d | 1,025 | py | Python | odoo/base-addons/l10n_tr/__manifest__.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/l10n_tr/__manifest__.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/l10n_tr/__manifest__.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Turkey - Accounting',
'version': '1.0',
'category': 'Localization',
'description': """
Trkiye iin Tek dzen hesap plan ablonu Odoo Modl.
==========================================================
Bu modl kurulduktan sonra, Muhasebe yaplandrma sihirbaz alr
* Sihirbaz sizden hesap plan ablonu, plann kurulaca irket, banka hesap
bilgileriniz, ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altnk, Can Tecim',
'maintainer':'https://launchpad.net/~openerp-turkey, http://www.cantecim.com',
'depends': [
'account',
],
'data': [
'data/l10n_tr_chart_data.xml',
'data/account.account.template.csv',
'data/l10n_tr_chart_post_data.xml',
'data/account_data.xml',
'data/account_tax_template_data.xml',
'data/account_chart_template_data.xml',
],
'license': 'LGPL-3',
}
| 33.064516 | 82 | 0.61561 |
6706ffad81c03f382360a4810c2bf16d4cc561bb | 4,364 | py | Python | Source Codes/SMF_Python/smf_main.py | mmaher22/iCV-SBR | 72effab621a9f8f5cee0d584b5a2f0e98524ffd6 | [
"MIT"
] | 20 | 2020-08-25T06:10:14.000Z | 2022-03-27T15:42:55.000Z | Source Codes/SMF_Python/smf_main.py | mmaher22/iCV-SBR | 72effab621a9f8f5cee0d584b5a2f0e98524ffd6 | [
"MIT"
] | null | null | null | Source Codes/SMF_Python/smf_main.py | mmaher22/iCV-SBR | 72effab621a9f8f5cee0d584b5a2f0e98524ffd6 | [
"MIT"
] | 7 | 2020-09-25T15:12:53.000Z | 2022-03-25T15:23:43.000Z | import os
import time
import argparse
import pandas as pd
from smf import SessionMF
parser = argparse.ArgumentParser()
parser.add_argument('--K', type=int, default=20, help="K items to be used in Recall@K and MRR@K")
parser.add_argument('--factors', type=int, default=100, help="Number of latent factors.")
parser.add_argument('--batch', type=int, default=32, help="Batch size for the training process")
parser.add_argument('--momentum', type=float, default=0.0, help="Momentum of the optimizer adagrad_sub")
parser.add_argument('--regularization', type=float, default=0.0001, help="Regularization Amount of the objective function")
parser.add_argument('--dropout', type=float, default=0.0, help="Share of items that are randomly discarded from the current session while training")
parser.add_argument('--skip', type=float, default=0.0, help="Probability that an item is skiped and the next one is used as the positive example")
parser.add_argument('--neg_samples', type=int, default=2048, help="Number of items that are sampled as negative examples")
parser.add_argument('--activation', type=str, default='linear', help="Final activation function (linear, sigmoid, uf_sigmoid, hard_sigmoid, relu, softmax, softsign, softplus, tanh)")
parser.add_argument('--objective', type=str, default='bpr_max', help="Loss Function (bpr_max, top1_max, bpr, top1)")
parser.add_argument('--epochs', type=int, default=10, help="Number of Epochs")
parser.add_argument('--lr', type=float, default=0.001, help="Learning Rate")
parser.add_argument('--itemid', default='ItemID', type=str)
parser.add_argument('--sessionid', default='SessionID', type=str)
parser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)
parser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)
parser.add_argument('--data_folder', default='/home/icvuser/Desktop/Recsys cleaned data/RecSys15 Dataset Splits', type=str)
# Get the arguments
args = parser.parse_args()
train_data = os.path.join(args.data_folder, args.train_data)
x_train = pd.read_csv(train_data)
x_train.sort_values(args.sessionid, inplace=True)
x_train = x_train.iloc[-int(len(x_train) / 64) :] #just take 1/64 last instances
valid_data = os.path.join(args.data_folder, args.valid_data)
x_valid = pd.read_csv(valid_data)
x_valid.sort_values(args.sessionid, inplace=True)
print('Finished Reading Data \nStart Model Fitting...')
# Fitting Model
t1 = time.time()
model = SessionMF(factors = args.factors, session_key = args.sessionid, item_key = args.itemid,
batch = args.batch, momentum = args.momentum, regularization = args.regularization,
dropout = args.dropout, skip = args.skip, samples = args.neg_samples,
activation = args.activation, objective = args.objective, epochs = args.epochs, learning_rate = args.lr)
model.fit(x_train)
t2 = time.time()
print('End Model Fitting with total time =', t2 - t1, '\n Start Predictions...')
# Test Set Evaluation
test_size = 0.0
hit = 0.0
MRR = 0.0
cur_length = 0
cur_session = -1
last_items = []
t1 = time.time()
index_item = x_valid.columns.get_loc(args.itemid)
index_session = x_valid.columns.get_loc(args.sessionid)
train_items = model.unique_items
counter = 0
for row in x_valid.itertuples( index=False ):
counter += 1
if counter % 10000 == 0:
print('Finished Prediction for ', counter, 'items.')
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
cur_length = 0
if item_id in model.item_map.keys():
if len(last_items) > cur_length: #make prediction
cur_length += 1
test_size += 1
# Predict the most similar items to items
predictions = model.predict_next(last_items, K = args.K)
# Evaluation
rank = 0
for predicted_item in predictions:
#print(predicted_item, item_id, '###')
rank += 1
if int(predicted_item) == item_id:
hit += 1.0
MRR += 1/rank
break
last_items.append(item_id)
t2 = time.time()
print('Recall: {}'.format(hit / test_size))
print ('\nMRR: {}'.format(MRR / test_size))
print('End Model Predictions with total time =', t2 - t1) | 47.956044 | 182 | 0.695921 |
6707397442e36941efca1b5ee8ee3696d4dcdf31 | 25,163 | py | Python | sdks/python/appcenter_sdk/models/Device.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/Device.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/Device.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Device):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.21895 | 505 | 0.632953 |
6707b1d92879723bb590b117c8481d4a309bdf74 | 5,591 | py | Python | src/providers/snmp.py | tcuthbert/napi | 12ea1a4fb1075749b40b2d93c3d4ab7fb75db8b5 | [
"MIT"
] | null | null | null | src/providers/snmp.py | tcuthbert/napi | 12ea1a4fb1075749b40b2d93c3d4ab7fb75db8b5 | [
"MIT"
] | null | null | null | src/providers/snmp.py | tcuthbert/napi | 12ea1a4fb1075749b40b2d93c3d4ab7fb75db8b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author : Thomas Cuthbert
import os, sys
from providers.provider import Provider
from config.config import Config
sys.path.append('../')
def _strip_oid_from_list(oids, strip):
"""Iterates through list of oids and strips snmp tree off index.
Returns sorted list of indexes.
Keyword Arguments:
self --
oid -- Regular numeric oid index
strip -- Value to be stripped off index
"""
sorted_oids = []
for index in oids:
s = index[0].replace(strip, "")
sorted_oids.append((s, index[1]))
return sorted(sorted_oids)
def _get_snmp(oid, hostname, community):
"""SNMP Wrapper function. Returns tuple of oid, value
Keyword Arguments:
oid --
community --
"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmd_gen = cmdgen.CommandGenerator()
error_indication, error_status, error_index, var_bind = cmd_gen.getCmd(
cmdgen.CommunityData(community),
cmdgen.UdpTransportTarget((hostname, 161)),
oid)
if error_indication:
print(error_indication)
else:
if error_status:
print ('%s at %s' % (
error_status.prettyPrint(),
error_index and var_bind[int(error_index)-1] or '?')
)
else:
for name, value in var_bind:
return (name.prettyPrint(), value.prettyPrint())
| 31.587571 | 137 | 0.603291 |
6707dd7b43e33c316be804768ef020a089466983 | 14,107 | py | Python | visionpack/stable_baselines3/common/off_policy_algorithm.py | joeljosephjin/gvgai-rl | 57281629c313abb43312950b22d043a3d67639cf | [
"Apache-2.0"
] | null | null | null | visionpack/stable_baselines3/common/off_policy_algorithm.py | joeljosephjin/gvgai-rl | 57281629c313abb43312950b22d043a3d67639cf | [
"Apache-2.0"
] | null | null | null | visionpack/stable_baselines3/common/off_policy_algorithm.py | joeljosephjin/gvgai-rl | 57281629c313abb43312950b22d043a3d67639cf | [
"Apache-2.0"
] | null | null | null | import time
import os
import pickle
import warnings
from typing import Union, Type, Optional, Dict, Any, Callable
import gym
import torch as th
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.utils import safe_mean
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.common.type_aliases import GymEnv, RolloutReturn
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.buffers import ReplayBuffer
| 50.382143 | 122 | 0.607571 |
6707dda4f20fd2cb10f818588c5b114047a6d11c | 2,743 | py | Python | src/oscar/apps/dashboard/app.py | frmdstryr/django-oscar | 32bf8618ebb688df6ba306dc7703de8e61b4e78c | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/dashboard/app.py | frmdstryr/django-oscar | 32bf8618ebb688df6ba306dc7703de8e61b4e78c | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/dashboard/app.py | frmdstryr/django-oscar | 32bf8618ebb688df6ba306dc7703de8e61b4e78c | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
from oscar.core.application import (
DashboardApplication as BaseDashboardApplication)
from oscar.core.loading import get_class
application = DashboardApplication()
| 44.967213 | 91 | 0.654028 |
67081cebddc67151d15ce739da186891614e2d4d | 4,783 | py | Python | wedding/migrations/0004_auto_20170407_2017.py | chadgates/thetravelling2 | 3646d64acb0fbf5106066700f482c9013f5fb7d0 | [
"MIT"
] | null | null | null | wedding/migrations/0004_auto_20170407_2017.py | chadgates/thetravelling2 | 3646d64acb0fbf5106066700f482c9013f5fb7d0 | [
"MIT"
] | null | null | null | wedding/migrations/0004_auto_20170407_2017.py | chadgates/thetravelling2 | 3646d64acb0fbf5106066700f482c9013f5fb7d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-07 20:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
| 46.892157 | 135 | 0.582061 |
6708d69bfe7f1ec1d25240a2e512900542ce4a78 | 820 | py | Python | taskonomy/utils/log_utils.py | shikhar-srivastava/hover_net | d4e8e129a4ad72f5d574a78c036449b496421529 | [
"MIT"
] | null | null | null | taskonomy/utils/log_utils.py | shikhar-srivastava/hover_net | d4e8e129a4ad72f5d574a78c036449b496421529 | [
"MIT"
] | null | null | null | taskonomy/utils/log_utils.py | shikhar-srivastava/hover_net | d4e8e129a4ad72f5d574a78c036449b496421529 | [
"MIT"
] | null | null | null | import pandas as pd
import pickle | 43.157895 | 164 | 0.684146 |
6709a543eab8bce61601cfd76117d243faac013b | 5,373 | py | Python | train_DEU.py | JosephineRabbit/MLMSNet | 755e07afd1c19797b02cf88b7bbb195112ffec77 | [
"MIT"
] | 61 | 2019-04-23T15:17:36.000Z | 2021-08-20T15:48:11.000Z | train_DEU.py | zhuxinang/MLMSNet | a824a70fa37aeb4536bc72d8032e871328c687e8 | [
"MIT"
] | 8 | 2019-05-04T04:38:26.000Z | 2020-08-16T15:15:15.000Z | train_DEU.py | JosephineRabbit/MLMSNet | 755e07afd1c19797b02cf88b7bbb195112ffec77 | [
"MIT"
] | 7 | 2019-06-12T07:02:06.000Z | 2020-09-20T02:37:36.000Z | from D_E_U import *
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
U.cuda()
data_dirs = [
("/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask"),
]
test_dirs = [("/home/rabbit/Datasets/SED1/SED1-Image",
"/home/rabbit/Datasets/SED1/SED1-Mask")]
D_E.base.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/weights/vgg16_feat.pth'))
initialize_weights(U)
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE, betas=(0.5, 0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
BCE_loss = torch.nn.BCELoss().cuda()
batch_size =BATCH_SIZE
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in data_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES_TEST.extend(X)
GT_FILES_TEST.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
train_folder = DataFolder(IMGS_train, GT_train, True)
train_data = DataLoader(train_folder, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True,
drop_last=True)
test_folder = DataFolder(IMG_FILES_TEST, GT_FILES_TEST, trainable=False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=NUM_WORKERS, shuffle=False)
best_eval = None
x = 0
ma = 1
for epoch in range(1, config.NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(train_data):
D_E.train()
x = x + 1
# print(img_batch.size())
label_batch = Variable(label_batch).cuda()
# print(torch.typename(label_batch))
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
edges = Variable(edges).cuda()
##########DSS#########################
######train dis
##fake
f,y1,y2 = D_E(img_batch)
m_l_1,e_l_1 = cal_DLoss(y1,y2,label_batch,edges)
DE_optimizer.zero_grad()
DE_l_1 = m_l_1 +e_l_1
DE_l_1.backward()
DE_optimizer.step()
w = [2,2,3,3]
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4],label_batch)
for i in range(4):
pre_ms_l +=w[i] * F.binary_cross_entropy(masks[i],label_batch)
DE_optimizer.zero_grad()
DE_l_1 = pre_ms_l/20+30*pre_m_l
DE_l_1.backward()
DE_optimizer.step()
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4], label_batch)
for i in range(4):
pre_ms_l += w[i] * F.binary_cross_entropy(masks[i], label_batch)
U_optimizer.zero_grad()
U_l_1 = pre_ms_l/20+30*pre_m_l
U_l_1.backward()
U_optimizer.step()
sum_train_mae += ma.data.cpu()
print("Epoch:{}\t {}/{}\ \t mae:{}".format(epoch, iter_cnt + 1,
len(train_folder) / config.BATCH_SIZE,
sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), './checkpoint/DSS/with_e_2/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), './checkpoint/DSS/with_e_2/Uis.pkl')
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(label_batch).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
f,y1,y2 = D_E(img_batch)
masks, DIC = U(f)
mae_v2 = torch.abs(label_batch - masks[4]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
| 24.760369 | 116 | 0.594826 |
670bfcaeeccc178a263df62b6b3d972d4904cdc0 | 5,122 | py | Python | machine-learning-ex2/ex2/ex2.py | DuffAb/coursera-ml-py | efcfb0847ac7d1e181cb6b93954b0176ce6162d4 | [
"MIT"
] | null | null | null | machine-learning-ex2/ex2/ex2.py | DuffAb/coursera-ml-py | efcfb0847ac7d1e181cb6b93954b0176ce6162d4 | [
"MIT"
] | null | null | null | machine-learning-ex2/ex2/ex2.py | DuffAb/coursera-ml-py | efcfb0847ac7d1e181cb6b93954b0176ce6162d4 | [
"MIT"
] | null | null | null | # Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order.
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
| 34.608108 | 128 | 0.689184 |
670c1bac34e09541ccb5d179f3199b3e5c901751 | 2,866 | py | Python | tests/test_apiFunc.py | Reid1923/py-GoldsberryTest | 3c7e9e2f4ef75720e1a13c4c41018a2072487ddd | [
"MIT"
] | null | null | null | tests/test_apiFunc.py | Reid1923/py-GoldsberryTest | 3c7e9e2f4ef75720e1a13c4c41018a2072487ddd | [
"MIT"
] | null | null | null | tests/test_apiFunc.py | Reid1923/py-GoldsberryTest | 3c7e9e2f4ef75720e1a13c4c41018a2072487ddd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import goldsberry
test_data = [
(goldsberry._nbaLeague, 'NBA', '00'),
(goldsberry._nbaLeague, 'WNBA', '10'),
(goldsberry._nbaLeague, 'NBADL', '20'),
(goldsberry._nbaSeason, 1999, '1999-00'),
(goldsberry._nbaSeason, 2000, '2000-01'),
(goldsberry._seasonID, 1999, '21999'),
(goldsberry._measureType, 1, 'Base'),
(goldsberry._measureType, 2, 'Advanced'),
(goldsberry._Scope, 1, ''),
(goldsberry._PerModeSmall48, 1, 'Totals'),
(goldsberry._PerModeSmall36, 1, 'Totals'),
(goldsberry._PerModeMini, 1, 'Totals'),
(goldsberry._PerModeLarge, 1, 'Totals'),
(goldsberry._AheadBehind, 1, 'Ahead or Behind'),
(goldsberry._ClutchTime, 1, 'Last 5 Minutes'),
(goldsberry._GameScope, 2, 'Yesterday'),
(goldsberry._PlayerExperience, 2, 'Rookie'),
(goldsberry._PlayerPosition, 2, 'F'),
(goldsberry._StarterBench, 2, 'Starters'),
(goldsberry._PlusMinus, 2, 'Y'),
(goldsberry._PaceAdjust, 2, 'Y'),
(goldsberry._Rank, 2, 'Y'),
(goldsberry._SeasonType, 1, 'Regular Season'),
(goldsberry._SeasonType4, 1, 'Regular Season'),
(goldsberry._Outcome, 2, 'W'),
(goldsberry._Location, 2, 'Home'),
(goldsberry._SeasonSegment, 2, 'Post All-Star'),
(goldsberry._VsConference, 2, 'East'),
(goldsberry._VsDivision, 2, 'Atlantic'),
(goldsberry._GameSegment, 2, 'First Half'),
(goldsberry._DistanceRange, 1, '5ft Range'),
(goldsberry._valiDate, '', ''),
(goldsberry._valiDate, '2015-01-02', '2015-01-02'),
(goldsberry._ContextMeasure, 1, 'FGM'),
(goldsberry._Position, 2, 'Guard'),
(goldsberry._StatCategory, 1, 'MIN'),
]
| 33.717647 | 56 | 0.691207 |
670d0a8e1a1197c9ec69df947dabd43d08e4160b | 4,295 | py | Python | sasmodels/models/poly_gauss_coil.py | zattala/sasmodels | a547aa73d43145b3bd34770b0ea27ba8882170a3 | [
"BSD-3-Clause"
] | null | null | null | sasmodels/models/poly_gauss_coil.py | zattala/sasmodels | a547aa73d43145b3bd34770b0ea27ba8882170a3 | [
"BSD-3-Clause"
] | null | null | null | sasmodels/models/poly_gauss_coil.py | zattala/sasmodels | a547aa73d43145b3bd34770b0ea27ba8882170a3 | [
"BSD-3-Clause"
] | null | null | null | #poly_gauss_coil model
#conversion of Poly_GaussCoil.py
#converted by Steve King, Mar 2016
r"""
This empirical model describes the scattering from *polydisperse* polymer
chains in theta solvents or polymer melts, assuming a Schulz-Zimm type
molecular weight distribution.
To describe the scattering from *monodisperse* polymer chains, see the
:ref:`mono-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\
P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\
Z &= [(q R_g)^2] / (1 + 2U) \\
U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the
volume of a polymer coil, $M$ is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the $q$ vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
.. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404
.. [#] J S Higgins, H C Benoit, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996)
.. [#] S M King, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999)
.. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, expm1, power
name = "poly_gauss_coil"
title = "Scattering from polydisperse polymer coils"
description = """
Evaluates the scattering from
polydisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
rg = 10**np.random.uniform(0, 4)
#rg = 1e3
polydispersity = 10**np.random.uniform(0, 3)
pars = dict(
#scale=1, background=0,
i_zero=1e7, # i_zero is a simple scale
rg=rg,
polydispersity=polydispersity,
)
return pars
demo = dict(scale=1.0,
i_zero=70.0,
rg=75.0,
polydispersity=2.0,
background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0,
'polydispersity': 2.0, 'background': 0.0},
[0.0106939, 0.469418], [57.6405, 0.169016]],
]
| 32.293233 | 116 | 0.584633 |
670fa5323287fc9c400ddc9fd03e291ab3a5896f | 4,939 | py | Python | examples/information_extraction/msra_ner/eval.py | BenfengXu/PaddleNLP | eca87fde4a1814a8f028e0e900d1792cbaa5c700 | [
"Apache-2.0"
] | 1 | 2021-07-22T08:33:53.000Z | 2021-07-22T08:33:53.000Z | examples/information_extraction/msra_ner/eval.py | BenfengXu/PaddleNLP | eca87fde4a1814a8f028e0e900d1792cbaa5c700 | [
"Apache-2.0"
] | null | null | null | examples/information_extraction/msra_ner/eval.py | BenfengXu/PaddleNLP | eca87fde4a1814a8f028e0e900d1792cbaa5c700 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import ast
import random
import time
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer
from paddlenlp.metrics import ChunkEvaluator
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(list(BertTokenizer.pretrained_init_configuration.keys())))
parser.add_argument("--init_checkpoint_path", default=None, type=str, required=True, help="The model checkpoint path.", )
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", )
parser.add_argument("--batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", )
parser.add_argument("--device", default="gpu", type=str, choices=["cpu", "gpu", "xpu"] ,help="The device to select to train the model, is must be cpu/gpu/xpu.")
# yapf: enable
if __name__ == "__main__":
args = parser.parse_args()
do_eval(args)
| 39.512 | 226 | 0.70905 |
670fb8129b5e60d52265e167fb8a005a31688d39 | 14,814 | py | Python | src/python/module/z5py/util.py | constantinpape/z5 | 20e364cc614b744a0ee3cb733531c4b872839721 | [
"MIT"
] | 82 | 2018-02-02T04:03:49.000Z | 2022-03-25T07:41:08.000Z | src/python/module/z5py/util.py | constantinpape/z5 | 20e364cc614b744a0ee3cb733531c4b872839721 | [
"MIT"
] | 152 | 2017-09-18T15:49:05.000Z | 2022-03-16T21:07:07.000Z | src/python/module/z5py/util.py | constantinpape/z5 | 20e364cc614b744a0ee3cb733531c4b872839721 | [
"MIT"
] | 27 | 2017-09-19T14:52:56.000Z | 2021-11-25T14:43:47.000Z | import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
g_in.visititems(copy_object)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
| 37.887468 | 120 | 0.645876 |
671044f92c1e2bb7a547bce5cdc307d31e50194b | 8,485 | py | Python | custom_components/waste_collection_schedule/sensor.py | trstns/hacs_waste_collection_schedule | f8f297b43c8e87510e17a558347a88a95f790d7b | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/sensor.py | trstns/hacs_waste_collection_schedule | f8f297b43c8e87510e17a558347a88a95f790d7b | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/sensor.py | trstns/hacs_waste_collection_schedule | f8f297b43c8e87510e17a558347a88a95f790d7b | [
"MIT"
] | null | null | null | """Sensor platform support for Waste Collection Schedule."""
import collections
import datetime
import logging
from enum import Enum
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_VALUE_TEMPLATE, STATE_UNKNOWN
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
_LOGGER = logging.getLogger(__name__)
CONF_SOURCE_INDEX = "source_index"
CONF_DETAILS_FORMAT = "details_format"
CONF_COUNT = "count"
CONF_LEADTIME = "leadtime"
CONF_DATE_TEMPLATE = "date_template"
CONF_APPOINTMENT_TYPES = "types"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SOURCE_INDEX, default=0): cv.positive_int,
vol.Optional(CONF_DETAILS_FORMAT, default="upcoming"): cv.enum(DetailsFormat),
vol.Optional(CONF_COUNT): cv.positive_int,
vol.Optional(CONF_LEADTIME): cv.positive_int,
vol.Optional(CONF_APPOINTMENT_TYPES): cv.ensure_list,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DATE_TEMPLATE): cv.template,
}
)
def _set_state(self, upcoming):
"""Set entity state with default format."""
if len(upcoming) == 0:
self._state = ""
self._icon = None
self._picture = None
return
appointment = upcoming[0]
# appointment::=CollectionAppointmentGroup{date=2020-04-01, types=['Type1', 'Type2']}
if self._value_template is not None:
self._state = self._value_template.async_render_with_possible_json_value(
appointment, None
)
else:
self._state = f"{self._separator.join(appointment.types)} in {appointment.daysTo} days"
self._icon = appointment.icon
self._picture = appointment.picture
def _render_date(self, appointment):
if self._date_template is not None:
return self._date_template.async_render_with_possible_json_value(
appointment, None
)
else:
return appointment.date.isoformat()
| 32.140152 | 99 | 0.635357 |
671186e2f94db3759070c3a35c61ae043b2efdd5 | 2,622 | py | Python | qidian.py | kivson/qidian-dl | 9b42f4c530b7938ff80f160ef32aa51cc43671f6 | [
"MIT"
] | null | null | null | qidian.py | kivson/qidian-dl | 9b42f4c530b7938ff80f160ef32aa51cc43671f6 | [
"MIT"
] | null | null | null | qidian.py | kivson/qidian-dl | 9b42f4c530b7938ff80f160ef32aa51cc43671f6 | [
"MIT"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
from functools import partial
from json import JSONDecodeError
import requests
from funcy.calc import cache
from funcy.debug import print_calls
from funcy.simple_funcs import curry
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/58.0.3029.110 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
HOME_URL = "https://www.webnovel.com/"
def novels():
for page in range(1, 10000):
response = requests.get("https://www.webnovel.com/apiajax/listing/popularAjax", headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'category': '',
'pageIndex': page
})
data = _response_to_json(response)
if 'data' not in data or 'items' not in data['data'] or 'isLast' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['items']
if data['data']['isLast'] == 1:
break
| 29.133333 | 113 | 0.666667 |
6712802d8a80e0d4a1dc7de07b3fd9bb724b208d | 4,398 | py | Python | srcWatteco/TICs/_poubelle/TIC_ICEp.py | OStephan29/Codec-Python | 76d651bb23daf1d9307c8b84533d9f24a59cea28 | [
"BSD-3-Clause"
] | 1 | 2022-01-12T15:46:58.000Z | 2022-01-12T15:46:58.000Z | srcWatteco/TICs/_poubelle/TIC_ICEp.py | OStephan29/Codec-Python | 76d651bb23daf1d9307c8b84533d9f24a59cea28 | [
"BSD-3-Clause"
] | null | null | null | srcWatteco/TICs/_poubelle/TIC_ICEp.py | OStephan29/Codec-Python | 76d651bb23daf1d9307c8b84533d9f24a59cea28 | [
"BSD-3-Clause"
] | 1 | 2021-10-05T08:40:15.000Z | 2021-10-05T08:40:15.000Z | # -*- coding: utf-8 -*-
# Pour passer de TICDataXXXFromBitfields @ TICDataBatchXXXFromFieldIndex
# Expressions rgulire notepad++
# Find : TICDataSelectorIfBit\( ([0-9]*), Struct\("([^\"]*)"\/([^\)]*).*
# Replace: \1 : \3, # \2
from ._TIC_Tools import *
from ._TIC_Types import *
TICDataICEpFromBitfields = Struct(
TICDataSelectorIfBit( 0, Struct("DEBUTp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 1, Struct("FINp"/TYPE_DMYhms)),
TICDataSelectorIfBit( 2, Struct("CAFp"/Int16ub) ),
TICDataSelectorIfBit( 3, Struct("DATE_EAp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 4, Struct("EApP"/Int24ub) ),
TICDataSelectorIfBit( 5, Struct("EApPM"/Int24ub) ),
TICDataSelectorIfBit( 6, Struct("EApHCE"/Int24ub) ),
TICDataSelectorIfBit( 7, Struct("EApHCH"/Int24ub) ),
TICDataSelectorIfBit( 8, Struct("EApHH"/Int24ub) ),
TICDataSelectorIfBit( 9, Struct("EApHCD"/Int24ub) ),
TICDataSelectorIfBit( 10, Struct("EApHD"/Int24ub) ),
TICDataSelectorIfBit( 11, Struct("EApJA"/Int24ub) ),
TICDataSelectorIfBit( 12, Struct("EApHPE"/Int24ub) ),
TICDataSelectorIfBit( 13, Struct("EApHPH"/Int24ub) ),
TICDataSelectorIfBit( 14, Struct("EApHPD"/Int24ub) ),
TICDataSelectorIfBit( 15, Struct("EApSCM"/Int24ub) ),
TICDataSelectorIfBit( 16, Struct("EApHM"/Int24ub) ),
TICDataSelectorIfBit( 17, Struct("EApDSM"/Int24ub) ),
TICDataSelectorIfBit( 18, Struct("DATE_ERPp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 19, Struct("ERPpP"/Int24ub) ),
TICDataSelectorIfBit( 20, Struct("ERPpPM"/Int24ub) ),
TICDataSelectorIfBit( 21, Struct("ERPpHCE"/Int24ub) ),
TICDataSelectorIfBit( 22, Struct("ERPpHCH"/Int24ub) ),
TICDataSelectorIfBit( 23, Struct("ERPpHH"/Int24ub) ),
TICDataSelectorIfBit( 24, Struct("ERPpHCD"/Int24ub) ),
TICDataSelectorIfBit( 25, Struct("ERPpHD"/Int24ub) ),
TICDataSelectorIfBit( 26, Struct("ERPpJA"/Int24ub) ),
TICDataSelectorIfBit( 27, Struct("ERPpHPE"/Int24ub) ),
TICDataSelectorIfBit( 28, Struct("ERPpHPH"/Int24ub) ),
TICDataSelectorIfBit( 29, Struct("ERPpHPD"/Int24ub) ),
TICDataSelectorIfBit( 30, Struct("ERPpSCM"/Int24ub) ),
TICDataSelectorIfBit( 31, Struct("ERPpHM"/Int24ub) ),
TICDataSelectorIfBit( 32, Struct("ERPpDSM"/Int24ub) ),
TICDataSelectorIfBit( 33, Struct("DATE_ERNp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 34, Struct("ERNpP"/Int24ub) ),
TICDataSelectorIfBit( 35, Struct("ERNpPM"/Int24ub) ),
TICDataSelectorIfBit( 36, Struct("ERNpHCE"/Int24ub) ),
TICDataSelectorIfBit( 37, Struct("ERNpHCH"/Int24ub) ),
TICDataSelectorIfBit( 38, Struct("ERNpHH"/Int24ub) ),
TICDataSelectorIfBit( 39, Struct("ERNpHCD"/Int24ub) ),
TICDataSelectorIfBit( 40, Struct("ERNpHD"/Int24ub) ),
TICDataSelectorIfBit( 41, Struct("ERNpJA"/Int24ub) ),
TICDataSelectorIfBit( 42, Struct("ERNpHPE"/Int24ub) ),
TICDataSelectorIfBit( 43, Struct("ERNpHPH"/Int24ub) ),
TICDataSelectorIfBit( 44, Struct("ERNpHPD"/Int24ub) ),
TICDataSelectorIfBit( 45, Struct("ERNpSCM"/Int24ub) ),
TICDataSelectorIfBit( 46, Struct("ERNpHM"/Int24ub) ),
TICDataSelectorIfBit( 47, Struct("ERNpDSM"/Int24ub) )
)
# NOTE: For Batch only scalar/numeric values are accepeted
TICDataBatchICEpFromFieldIndex = Switch( FindFieldIndex,
{
#0 : TYPE_DMYhms, # DEBUTp
#1 : TYPE_DMYhms, # FINp
2 : Int16ub, # CAFp
#3 : TYPE_DMYhms, # DATE_EAp
4 : Int24ub, # EApP
5 : Int24ub, # EApPM
6 : Int24ub, # EApHCE
7 : Int24ub, # EApHCH
8 : Int24ub, # EApHH
9 : Int24ub, # EApHCD
10 : Int24ub, # EApHD
11 : Int24ub, # EApJA
12 : Int24ub, # EApHPE
13 : Int24ub, # EApHPH
14 : Int24ub, # EApHPD
15 : Int24ub, # EApSCM
16 : Int24ub, # EApHM
17 : Int24ub, # EApDSM
#18 : TYPE_DMYhms, # DATE_ERPp
19 : Int24ub, # ERPpP
20 : Int24ub, # ERPpPM
21 : Int24ub, # ERPpHCE
22 : Int24ub, # ERPpHCH
23 : Int24ub, # ERPpHH
24 : Int24ub, # ERPpHCD
25 : Int24ub, # ERPpHD
26 : Int24ub, # ERPpJA
27 : Int24ub, # ERPpHPE
28 : Int24ub, # ERPpHPH
29 : Int24ub, # ERPpHPD
30 : Int24ub, # ERPpSCM
31 : Int24ub, # ERPpHM
32 : Int24ub, # ERPpDSM
#33 : TYPE_DMYhms, # DATE_ERNp
34 : Int24ub, # ERNpP
35 : Int24ub, # ERNpPM
36 : Int24ub, # ERNpHCE
37 : Int24ub, # ERNpHCH
38 : Int24ub, # ERNpHH
39 : Int24ub, # ERNpHCD
40 : Int24ub, # ERNpHD
41 : Int24ub, # ERNpJA
42 : Int24ub, # ERNpHPE
43 : Int24ub, # ERNpHPH
44 : Int24ub, # ERNpHPD
45 : Int24ub, # ERNpSCM
46 : Int24ub, # ERNpHM
47 : Int24ub, # ERNpDSM
}, default = TICUnbatchableFieldError()
)
| 33.572519 | 74 | 0.698272 |
6714f1b0e63e554da53c6d95c385058b29428db0 | 2,095 | py | Python | tests/test_check_types.py | oliel/python-ovirt-engine-sdk4 | c0b13982b45dee664ebc063bda7686124b402c14 | [
"Apache-2.0"
] | 3 | 2022-01-14T00:37:58.000Z | 2022-03-26T12:26:32.000Z | tests/test_check_types.py | oliel/python-ovirt-engine-sdk4 | c0b13982b45dee664ebc063bda7686124b402c14 | [
"Apache-2.0"
] | 29 | 2021-07-20T12:42:44.000Z | 2022-03-28T13:01:33.000Z | tests/test_check_types.py | oliel/python-ovirt-engine-sdk4 | c0b13982b45dee664ebc063bda7686124b402c14 | [
"Apache-2.0"
] | 12 | 2021-07-20T12:27:07.000Z | 2022-02-24T11:10:12.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ovirtsdk4.services as services
import ovirtsdk4.types as types
import unittest
from nose.tools import (
assert_in,
assert_raises,
)
from .server import TestServer
| 30.362319 | 74 | 0.630072 |
6715015a823d4efe629d554c1f06e22bd2b8c5e4 | 7,518 | py | Python | nsi/shell.py | NextStepInnovation/nsi-tools | ee4c9a9e512a2fb4942699d88920bc8210a3d701 | [
"MIT"
] | null | null | null | nsi/shell.py | NextStepInnovation/nsi-tools | ee4c9a9e512a2fb4942699d88920bc8210a3d701 | [
"MIT"
] | null | null | null | nsi/shell.py | NextStepInnovation/nsi-tools | ee4c9a9e512a2fb4942699d88920bc8210a3d701 | [
"MIT"
] | null | null | null | import os
import io
import sys
import subprocess
import shlex
import logging
from threading import Timer
from typing import Callable, Any, List
from pathlib import Path # noqa: for doctest
import tempfile # noqa: for doctest
from .toolz import (
merge, map, pipe, curry, do, cprint
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
| 28.477273 | 91 | 0.580607 |
6715fb7acc45572b00524312f06dff2708091d1d | 8,934 | py | Python | ICLR_2022/Cubic_10D/PIVEN/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 11 | 2021-11-08T20:38:50.000Z | 2022-01-30T02:46:39.000Z | ICLR_2022/Cubic_10D/PIVEN/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 1 | 2022-01-13T19:46:32.000Z | 2022-02-09T16:23:56.000Z | ICLR_2022/Cubic_10D/PIVEN/DataGen.py | streeve/PI3NN | f7f08a195096e0388bb9230bc67c6acd6f41581a | [
"Apache-2.0"
] | 1 | 2021-12-17T18:38:26.000Z | 2021-12-17T18:38:26.000Z | """
Data creation:
Load the data, normalize it, and split into train and test.
"""
'''
Added the capability of loading pre-separated UCI train/test data
function LoadData_Splitted_UCI
'''
import numpy as np
import os
import pandas as pd
import tensorflow as tf
DATA_PATH = "../UCI_Datasets"
| 36.317073 | 123 | 0.580479 |
67161d52650aa2e5bc2f66de7b2914c066936052 | 362 | py | Python | after/config.py | mauvilsa/2021-config | 870fd832bda269a1be7bfba32dd327df9987e74a | [
"MIT"
] | 5 | 2021-12-25T15:16:16.000Z | 2022-03-19T09:04:39.000Z | after/config.py | ArjanCodes/2021-config | 7c2c3babb0fb66d69eac81590356fae512c5e784 | [
"MIT"
] | 1 | 2022-01-14T08:02:13.000Z | 2022-01-14T08:02:13.000Z | after/config.py | mauvilsa/2021-config | 870fd832bda269a1be7bfba32dd327df9987e74a | [
"MIT"
] | 1 | 2022-01-14T06:32:44.000Z | 2022-01-14T06:32:44.000Z | from dataclasses import dataclass
| 12.066667 | 33 | 0.679558 |
671650e9876f386bef01f59b8d08f601fc6d3ed8 | 14,103 | py | Python | lab7/lab7.py | cudaczek/nlp-labs-2020 | 8e40fe04d2350c6e43a36b29f4428a34aedb6dea | [
"MIT"
] | null | null | null | lab7/lab7.py | cudaczek/nlp-labs-2020 | 8e40fe04d2350c6e43a36b29f4428a34aedb6dea | [
"MIT"
] | null | null | null | lab7/lab7.py | cudaczek/nlp-labs-2020 | 8e40fe04d2350c6e43a36b29f4428a34aedb6dea | [
"MIT"
] | null | null | null | import pprint
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import manifold
from gensim.models import KeyedVectors
# Download polish word embeddings for word2vec github/Google drive:
# https://github.com/sdadas/polish-nlp-resources
# with 100 dimensionality
word2vec_100 = KeyedVectors.load("word2vec/word2vec_100_3_polish.bin")
# with 300 dimensionality
word2vec_300 = KeyedVectors.load("word2vec_300_3_polish/word2vec_300_3_polish.bin")
# Using the downloaded models find the most similar words for the following expressions...
# And display 5 most similar words according to each model:
# kpk
# szkoda
# wypadek
# kolizja
# nieszczcie
# rozwd
words = ['kpk', 'szkoda', 'wypadek', 'kolizja', 'nieszczcie', 'rozwd']
for word in words:
get_most_similar_words(word)
# --------- Most similar words for kpk ---------
# word2vec_100:
# [('kilopond', 0.6665806770324707),
# ('kpzs', 0.6363496780395508),
# ('kpu', 0.6300562024116516),
# ('sownarkomu', 0.6254925727844238),
# ('wcik', 0.6224358677864075)]
# word2vec_300:
# [('ksh', 0.5774794220924377),
# ('cywilnego', 0.5498510599136353),
# ('postpowania', 0.5285828113555908),
# ('kilopond', 0.5151568055152893),
# ('kkkw', 0.48344212770462036)]
#
# --------- Most similar words for szkoda ---------
# word2vec_100:
# [('krzywda', 0.6817898750305176),
# ('poytek', 0.6121943593025208),
# ('strata', 0.5968126654624939),
# ('ryzyko', 0.5745570659637451),
# ('uszczerbek', 0.5639551877975464)]
# word2vec_300:
# [('uszczerbek', 0.6027276515960693),
# ('krzywda', 0.5920778512954712),
# ('strata', 0.550269365310669),
# ('despekt', 0.5382484197616577),
# ('poytek', 0.531347393989563)]
#
# --------- Most similar words for wypadek ---------
# word2vec_100:
# [('przypadek', 0.7544811964035034),
# ('okolicznoci', 0.7268072366714478),
# ('padku', 0.6788284182548523),
# ('incydent', 0.6418948173522949),
# ('zdarzenie', 0.6114422082901001)]
# word2vec_300:
# [('przypadek', 0.7066895961761475),
# ('okolicznoci', 0.6121077537536621),
# ('padku', 0.6056742072105408),
# ('padki', 0.5596078634262085),
# ('incydent', 0.5496981143951416)]
#
# --------- Most similar words for kolizja ---------
# word2vec_100:
# [('zderzenie', 0.8431548476219177),
# ('awaria', 0.7090569734573364),
# ('kraksa', 0.6777161359786987),
# ('turbulencja', 0.6613468527793884),
# ('polizg', 0.6391660571098328)]
# word2vec_300:
# [('zderzenie', 0.7603178024291992),
# ('awaria', 0.611009955406189),
# ('kraksa', 0.5939033031463623),
# ('turbulencja', 0.5664489269256592),
# ('polizg', 0.5569967031478882)]
#
# --------- Most similar words for nieszczcie ---------
# word2vec_100:
# [('niebezpieczestwo', 0.7519958019256592),
# ('cierpienia', 0.7408335208892822),
# ('strapienie', 0.7345459461212158),
# ('cierpienie', 0.7262567281723022),
# ('utrapienie', 0.7251379489898682)]
# word2vec_300:
# [('utrapienie', 0.6610732674598694),
# ('cierpienia', 0.6526124477386475),
# ('niedola', 0.6478177309036255),
# ('strapienie', 0.6300181150436401),
# ('cierpienie', 0.6248573064804077)]
#
# --------- Most similar words for rozwd ---------
# word2vec_100:
# [('maestwo', 0.7646843194961548),
# ('separacja', 0.7547168135643005),
# ('adopcja', 0.7333694696426392),
# ('lub', 0.7324203848838806),
# ('uniewanienie', 0.7096400856971741)]
# word2vec_300:
# [('separacja', 0.7053208351135254),
# ('maestwo', 0.6689504384994507),
# ('lub', 0.6553219556808472),
# ('rozwodowy', 0.614338219165802),
# ('uniewanienie', 0.6127183437347412)]
# Find the most similar words for the following expressions (average the representations for each word):
# sd najwyszy
# trybuna konstytucyjny
# szkoda majtkowy
# kodeks cywilny
# sd rejonowy
# Display 7 most similar words according to each model.
expressions = ['sd najwyszy', 'trybuna konstytucyjny', 'szkoda majtkowy', 'kodeks cywilny', 'sd rejonowy']
get_most_similiar_words_for_expression_avg(expressions)
# --------- Most similar words for sd najwyszy ---------
# word2vec_100:
# [('sd', 0.8644266128540039),
# ('trybuna', 0.7672435641288757),
# ('najwyszy', 0.7527138590812683),
# ('trybunat', 0.6843459010124207),
# ('sdzia', 0.6718415021896362),
# ('areopag', 0.6571060419082642),
# ('sprawiedliwo', 0.6562486886978149)]
# word2vec_300:
# [('sd', 0.8261206150054932),
# ('trybuna', 0.711520791053772),
# ('najwyszy', 0.7068409323692322),
# ('sdzia', 0.6023203730583191),
# ('sdowy', 0.5670486688613892),
# ('trybunat', 0.5525928735733032),
# ('sprawiedliwo', 0.5319530367851257)]
#
# --------- Most similar words for trybuna konstytucyjny ---------
# word2vec_100:
# [('trybuna', 0.9073251485824585),
# ('konstytucyjny', 0.7998723387718201),
# ('sd', 0.7972990274429321),
# ('buna', 0.7729247808456421),
# ('senat', 0.7585273385047913),
# ('bunau', 0.7441976070404053),
# ('trybunat', 0.7347140908241272)]
# word2vec_300:
# [('trybuna', 0.8845913410186768),
# ('konstytucyjny', 0.7739969491958618),
# ('sd', 0.7300779819488525),
# ('trybunat', 0.6758428812026978),
# ('senat', 0.6632090210914612),
# ('parlament', 0.6614581346511841),
# ('bunau', 0.6404117941856384)]
#
# --------- Most similar words for szkoda majtkowy ---------
# word2vec_100:
# [('szkoda', 0.8172438144683838),
# ('majtkowy', 0.7424530386924744),
# ('krzywda', 0.6498408317565918),
# ('wiadczenie', 0.6419471502304077),
# ('odszkodowanie', 0.6392182111740112),
# ('dochd', 0.637932538986206),
# ('wydatek', 0.6325603127479553)]
# word2vec_300:
# [('szkoda', 0.7971925735473633),
# ('majtkowy', 0.7278684973716736),
# ('uszczerbek', 0.5841633081436157),
# ('korzy', 0.5474051237106323),
# ('krzywda', 0.5431190729141235),
# ('majtek', 0.525060772895813),
# ('strata', 0.5228629112243652)]
#
# --------- Most similar words for kodeks cywilny ---------
# word2vec_100:
# [('kodeks', 0.8756389617919922),
# ('cywilny', 0.8532464504241943),
# ('pasztunwali', 0.6438998579978943),
# ('deksu', 0.6374959945678711),
# ('teodozjaskim', 0.6283917427062988),
# ('pozakodeksowy', 0.6153194904327393),
# ('sdowo', 0.6136723160743713)]
# word2vec_300:
# [('kodeks', 0.8212110996246338),
# ('cywilny', 0.7886406779289246),
# ('amiatyski', 0.5660314559936523),
# ('cywilnego', 0.5531740188598633),
# ('deksu', 0.5472918748855591),
# ('isps', 0.5369160175323486),
# ('jei', 0.5361183881759644)]
#
# --------- Most similar words for sd rejonowy ---------
# word2vec_100:
# [('sd', 0.8773891925811768),
# ('prokuratura', 0.8396657705307007),
# ('rejonowy', 0.7694871425628662),
# ('trybuna', 0.755321204662323),
# ('sdowy', 0.7153753042221069),
# ('magistrat', 0.7151126861572266),
# ('prokurator', 0.7081375122070312)]
# word2vec_300:
# [('sd', 0.8507211208343506),
# ('rejonowy', 0.7344856262207031),
# ('prokuratura', 0.711697518825531),
# ('trybuna', 0.6748420596122742),
# ('sdowy', 0.6426382064819336),
# ('okrgowy', 0.6349465847015381),
# ('apelacyjny', 0.599929690361023)]
# Find the result of the following equations (5 top results, both models):
# sd + konstytucja - kpk
# pasaer + kobieta - mczyzna
# pilot + kobieta - mczyzna
# lekarz + kobieta - mczyzna
# nauczycielka + mczyzna - kobieta
# przedszkolanka + mczyzna - 'kobieta
# samochd + rzeka - droga
equations = [(['sd', 'konstytucja'], ['kpk']),
(['pasaer', 'kobieta'], ['mczyzna']),
(['pilot', 'kobieta'], ['mczyzna']),
(['lekarz', 'kobieta'], ['mczyzna']),
(['nauczycielka', 'mczyzna'], ['kobieta']),
(['przedszkolanka', 'mczyzna'], ['kobieta']),
(['samochd', 'rzeka'], ['droga'])]
for equa in equations:
get_result_of_equation(equa[0], equa[1])
# --------- Result for + ['sd', 'konstytucja'] and - ['kpk'] ---------
# word2vec_100:
# [('trybuna', 0.6436409950256348),
# ('ustawa', 0.6028786897659302),
# ('elekcja', 0.5823959112167358),
# ('deklaracja', 0.5771891474723816),
# ('dekret', 0.5759621262550354)]
# word2vec_300:
# [('trybuna', 0.5860734581947327),
# ('senat', 0.5112544298171997),
# ('ustawa', 0.5023636817932129),
# ('dekret', 0.48704710602760315),
# ('wadza', 0.4868926703929901)]
#
# --------- Result for + ['pasaer', 'kobieta'] and - ['mczyzna'] ---------
# word2vec_100:
# [('pasaerka', 0.7234811186790466),
# ('stewardessa', 0.6305270195007324),
# ('stewardesa', 0.6282645463943481),
# ('takswka', 0.619726300239563),
# ('podrny', 0.614517092704773)]
# word2vec_300:
# [('pasaerka', 0.6741673946380615),
# ('stewardesa', 0.5810248255729675),
# ('stewardessa', 0.5653151273727417),
# ('podrny', 0.5060371160507202),
# ('pasaerski', 0.4896503686904907)]
#
# --------- Result for + ['pilot', 'kobieta'] and - ['mczyzna'] ---------
# word2vec_100:
# [('nawigator', 0.6925703287124634),
# ('oblatywacz', 0.6686224937438965),
# ('lotnik', 0.6569937467575073),
# ('pilotka', 0.6518791913986206),
# ('awionetka', 0.6428645849227905)]
# word2vec_300:
# [('pilotka', 0.6108255386352539),
# ('lotnik', 0.6020804047584534),
# ('stewardesa', 0.5943204760551453),
# ('nawigator', 0.5849766731262207),
# ('oblatywacz', 0.5674178600311279)]
#
# --------- Result for + ['lekarz', 'kobieta'] and - ['mczyzna'] ---------
# word2vec_100:
# [('lekarka', 0.7690489292144775),
# ('ginekolog', 0.7575511336326599),
# ('pediatra', 0.7478542923927307),
# ('psychiatra', 0.732271671295166),
# ('poona', 0.7268943786621094)]
# word2vec_300:
# [('lekarka', 0.7388788461685181),
# ('pielgniarka', 0.6719920635223389),
# ('ginekolog', 0.658279299736023),
# ('psychiatra', 0.6389409303665161),
# ('chirurg', 0.6305986642837524)]
#
# --------- Result for + ['nauczycielka', 'mczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('uczennica', 0.7441667318344116),
# ('studentka', 0.7274973392486572),
# ('nauczyciel', 0.7176114916801453),
# ('wychowawczyni', 0.7153530120849609),
# ('koleanka', 0.678418755531311)]
# word2vec_300:
# [('nauczyciel', 0.6561620235443115),
# ('wychowawczyni', 0.6211140155792236),
# ('uczennica', 0.6142012476921082),
# ('koleanka', 0.5501158237457275),
# ('przedszkolanka', 0.5497692823410034)]
#
# --------- Result for + ['przedszkolanka', 'mczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('staysta', 0.6987776756286621),
# ('wychowawczyni', 0.6618361473083496),
# ('krelarka', 0.6590923070907593),
# ('pielgniarz', 0.6492814421653748),
# ('siedmiolatek', 0.6483469009399414)]
# word2vec_300:
# [('staysta', 0.5117638111114502),
# ('pierwszoklasista', 0.49398648738861084),
# ('wychowawczyni', 0.49037522077560425),
# ('praktykant', 0.48884207010269165),
# ('pielgniarz', 0.4795262813568115)]
#
# --------- Result for + ['samochd', 'rzeka'] and - ['droga'] ---------
# word2vec_100:
# [('jeep', 0.6142987608909607),
# ('buick', 0.5962571501731873),
# ('dip', 0.5938510894775391),
# ('ponton', 0.580719530582428),
# ('landrower', 0.5799552202224731)]
# word2vec_300:
# [('dip', 0.5567235946655273),
# ('jeep', 0.5533617734909058),
# ('auto', 0.5478508472442627),
# ('ciarwka', 0.5461742281913757),
# ('wz', 0.5204571485519409)]
# Using the t-SNE algorithm compute the projection of the random 1000 words with the following words highlighted (both models):
# szkoda
# strata
# uszczerbek
# krzywda
# niesprawiedliwo
# nieszczcie
# kobieta
# mczyzna
# pasaer
# pasaerka
# student
# studentka
# lekarz
# lekarka
words = np.array(['szkoda', 'strata', 'uszczerbek', 'krzywda', 'niesprawiedliwo', 'nieszczcie', 'kobieta',
'mczyzna', 'pasaer', 'pasaerka', 'student', 'studentka', 'lekarz', 'lekarka'])
wv = word2vec_300
plot_with_tsne(wv, words)
wv = word2vec_100
plot_with_tsne(wv, words)
| 33.901442 | 127 | 0.667801 |
671762a970ef464f89d67b583ec5b5c7d9146820 | 1,427 | py | Python | Nimbus-Controller/sqs-fastreader.py | paulfdoyle/NIMBUS | 0f309b620c00a9438c55404e685bb1cafc44d200 | [
"MIT"
] | null | null | null | Nimbus-Controller/sqs-fastreader.py | paulfdoyle/NIMBUS | 0f309b620c00a9438c55404e685bb1cafc44d200 | [
"MIT"
] | null | null | null | Nimbus-Controller/sqs-fastreader.py | paulfdoyle/NIMBUS | 0f309b620c00a9438c55404e685bb1cafc44d200 | [
"MIT"
] | null | null | null | # This script adds a new message to a specific SQS queue
#
# Author - Paul Doyle Aug 2013
#
#
#from __future__ import print_function
import sys
import Queue
import boto.sqs
import argparse
import socket
import datetime
import sys
import time
from boto.sqs.attributes import Attributes
parser = argparse.ArgumentParser()
parser.add_argument('queuearg',help='name of the sqs queue to use',metavar="myQueueName")
parser.add_argument('experiment',help='name of the experiment queue to use')
args = parser.parse_args()
from boto.sqs.message import Message
import threading
conn = boto.sqs.connect_to_region("us-east-1", aws_access_key_id='AKIAINWVSI3MIXIB5N3Q', aws_secret_access_key='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')
sqs_queue = conn.get_queue(args.queuearg)
queue = Queue.Queue(0)
threads = []
for n in xrange(40):
queue.put(n)
t = Sender()
t.start()
threads.append(t)
for t in threads:
t.join()
| 24.603448 | 154 | 0.733006 |
6718237fd3891c8aa0d6df664410cd0f7651353e | 1,547 | py | Python | dero/ml/results/reformat.py | whoopnip/dero | 62e081b341cc711ea8e1578e7c65b581eb74fa3f | [
"MIT"
] | null | null | null | dero/ml/results/reformat.py | whoopnip/dero | 62e081b341cc711ea8e1578e7c65b581eb74fa3f | [
"MIT"
] | 3 | 2020-03-24T17:57:46.000Z | 2021-02-02T22:25:37.000Z | dero/ml/results/reformat.py | whoopnip/dero | 62e081b341cc711ea8e1578e7c65b581eb74fa3f | [
"MIT"
] | null | null | null | from typing import Optional
import pandas as pd
from dero.ml.typing import ModelDict, AllModelResultsDict, DfDict
| 37.731707 | 97 | 0.700711 |
67194761b98bb4ec0d555cbb6324bf54ba4345ac | 663 | py | Python | engine/view.py | amirgeva/py2d | 88210240b71446d53ee85cf07ca8d253d522a265 | [
"BSD-2-Clause"
] | null | null | null | engine/view.py | amirgeva/py2d | 88210240b71446d53ee85cf07ca8d253d522a265 | [
"BSD-2-Clause"
] | null | null | null | engine/view.py | amirgeva/py2d | 88210240b71446d53ee85cf07ca8d253d522a265 | [
"BSD-2-Clause"
] | null | null | null | import pygame
from engine.utils import Rect
from engine.app import get_screen_size
# EXPORT
| 23.678571 | 89 | 0.600302 |
67194cbd5bb79a7249d2ae1d8a3b2168422d756c | 1,640 | py | Python | oldplugins/coin.py | sonicrules1234/sonicbot | 07a22d08bf86ed33dc715a800957aee3b45f3dde | [
"BSD-3-Clause"
] | 1 | 2019-06-27T08:45:23.000Z | 2019-06-27T08:45:23.000Z | oldplugins/coin.py | sonicrules1234/sonicbot | 07a22d08bf86ed33dc715a800957aee3b45f3dde | [
"BSD-3-Clause"
] | null | null | null | oldplugins/coin.py | sonicrules1234/sonicbot | 07a22d08bf86ed33dc715a800957aee3b45f3dde | [
"BSD-3-Clause"
] | null | null | null | import shelve, random
arguments = ["self", "info", "args", "world"]
minlevel = 2
helpstring = "coin <bet>"
def main(connection, info, args, world) :
"""Decides heads or tails based on the coinchance variable. Adds or removes appropriate amount of money"""
money = shelve.open("money-%s.db" % (connection.networkname), writeback=True)
if money.has_key(info["sender"]) :
bet = int(args[1])
if bet <= money[info["sender"]]["money"] and bet >= 1 :
answer = random.choice(money[info["sender"]]["coinchance"])
if answer :
money[info["sender"]]["money"] += bet
money.sync()
connection.msg(info["channel"], _("Congrats %(sender)s! You just won %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
else :
money[info["sender"]]["money"] -= bet
money.sync()
connection.msg(info["channel"], _("Sorry %(sender)s! You just lost %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
if money[info["sender"]]["money"] > money[info["sender"]]["maxmoney"] :
money[info["sender"]]["maxmoney"] = money[info["sender"]]["money"]
money.sync()
else : connection.msg(info["channel"], _("%(sender)s: You don't have enough money to do that!") % dict(sender=info["sender"]))
else : connection.msg(info["channel"], _("%(sender)s: You have not set up a money account. If you aren't already, please register with me. Then, say moneyreset. After that you should be able to use this command.") % dict(sender=info["sender"]))
| 60.740741 | 251 | 0.587805 |
6719b8a502c31dfe0118ee06e1a1b37092b216f3 | 13,562 | py | Python | src/rbvfit/vfit_mcmc.py | manoranjan-s/rbvfit | a5c450f721c08dda02c431a5a079945a73a0cfc2 | [
"MIT"
] | null | null | null | src/rbvfit/vfit_mcmc.py | manoranjan-s/rbvfit | a5c450f721c08dda02c431a5a079945a73a0cfc2 | [
"MIT"
] | null | null | null | src/rbvfit/vfit_mcmc.py | manoranjan-s/rbvfit | a5c450f721c08dda02c431a5a079945a73a0cfc2 | [
"MIT"
] | null | null | null | from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
######## Computing Likelihoods######
| 36.262032 | 204 | 0.539891 |
671a19cd137db70202b7e3303f276604903cd2b5 | 6,409 | py | Python | yolox/data/dataloading.py | XHYsdjkdsjsk2021/Yolox_xhy | a60f585d4d2bf36f9fa90b0a078efb7b315e0118 | [
"Apache-2.0"
] | null | null | null | yolox/data/dataloading.py | XHYsdjkdsjsk2021/Yolox_xhy | a60f585d4d2bf36f9fa90b0a078efb7b315e0118 | [
"Apache-2.0"
] | null | null | null | yolox/data/dataloading.py | XHYsdjkdsjsk2021/Yolox_xhy | a60f585d4d2bf36f9fa90b0a078efb7b315e0118 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import torch
from torch.utils.data.dataloader import DataLoader as torchDataLoader
from torch.utils.data.dataloader import default_collate
import os
import random
from .samplers import YoloBatchSampler
def get_yolox_datadir():
"""
get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
this function will return value of the environment variable. Otherwise, use data
"""
yolox_datadir = os.getenv("YOLOX_DATADIR", None)
if yolox_datadir is None:
import yolox
yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
yolox_datadir = os.path.join(yolox_path, "datasets")
return yolox_datadir
def list_collate(batch):
"""
Function that collates lists or tuples together into one list (of lists/tuples).
Use this as the collate function in a Dataloader, if you want to have a list of
items as an output, as opposed to tensors (eg. Brambox.boxes).
"""
items = list(zip(*batch))
for i in range(len(items)):
if isinstance(items[i][0], (list, tuple)):
items[i] = list(items[i])
else:
items[i] = default_collate(items[i])
return items
| 35.804469 | 99 | 0.555469 |
671a1a30341f98dfd27e877827d5eea516829e2a | 7,765 | py | Python | env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py | unbounce/aws-name-asg-instances | e0379442e3ce71bf66ba9b8975b2cc57a2c7648d | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py | unbounce/aws-name-asg-instances | e0379442e3ce71bf66ba9b8975b2cc57a2c7648d | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py | unbounce/aws-name-asg-instances | e0379442e3ce71bf66ba9b8975b2cc57a2c7648d | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_vpc_vpn_info
version_added: 1.0.0
short_description: Gather information about VPN Connections in AWS.
description:
- Gather information about VPN Connections in AWS.
- This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: Madhura Naniwadekar (@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
required: false
type: dict
vpn_connection_ids:
description:
- Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
required: false
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all vpn connections
community.aws.ec2_vpc_vpn_info:
- name: Gather information about a filtered list of vpn connections, based on tags
community.aws.ec2_vpc_vpn_info:
filters:
"tag:Name": test-connection
register: vpn_conn_info
- name: Gather information about vpn connections by specifying connection IDs.
community.aws.ec2_vpc_vpn_info:
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_info
'''
RETURN = r'''
vpn_connections:
description: List of one or more VPN Connections.
returned: always
type: complex
contains:
category:
description: The category of the VPN connection.
returned: always
type: str
sample: VPN
customer_gatway_configuration:
description: The configuration information for the VPN connection's customer gateway (in the native XML format).
returned: always
type: str
customer_gateway_id:
description: The ID of the customer gateway at your end of the VPN connection.
returned: always
type: str
sample: cgw-17a53c37
options:
description: The VPN connection options.
returned: always
type: dict
sample: {
"static_routes_only": false
}
routes:
description: List of static routes associated with the VPN connection.
returned: always
type: complex
contains:
destination_cidr_block:
description: The CIDR block associated with the local subnet of the customer data center.
returned: always
type: str
sample: 10.0.0.0/16
state:
description: The current state of the static route.
returned: always
type: str
sample: available
state:
description: The current state of the VPN connection.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the VPN connection.
returned: always
type: dict
sample: {
"Name": "test-conn"
}
type:
description: The type of VPN connection.
returned: always
type: str
sample: ipsec.1
vgw_telemetry:
description: Information about the VPN tunnel.
returned: always
type: complex
contains:
accepted_route_count:
description: The number of accepted routes.
returned: always
type: int
sample: 0
last_status_change:
description: The date and time of the last change in status.
returned: always
type: str
sample: "2018-02-09T14:35:27+00:00"
outside_ip_address:
description: The Internet-routable IP address of the virtual private gateway's outside interface.
returned: always
type: str
sample: 13.127.79.191
status:
description: The status of the VPN tunnel.
returned: always
type: str
sample: DOWN
status_message:
description: If an error occurs, a description of the error.
returned: always
type: str
sample: IPSEC IS DOWN
certificate_arn:
description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
returned: when a private certificate is used for authentication
type: str
sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
vpn_connection_id:
description: The ID of the VPN connection.
returned: always
type: str
sample: vpn-f700d5c0
vpn_gateway_id:
description: The ID of the virtual private gateway at the AWS side of the VPN connection.
returned: always
type: str
sample: vgw-cbe56bfb
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
)
if __name__ == '__main__':
main()
| 35.619266 | 157 | 0.642112 |
671aa126c99ce28f4a40eb764f765d0b5bf6665c | 10,454 | py | Python | cogs/roleselector.py | YouGotSchott/tcs-discord-bot | 696db5da129ef42f4c5047679d289aeb6ed122a9 | [
"MIT"
] | 1 | 2021-04-30T06:38:31.000Z | 2021-04-30T06:38:31.000Z | cogs/roleselector.py | YouGotSchott/tcs-discord-bot | 696db5da129ef42f4c5047679d289aeb6ed122a9 | [
"MIT"
] | null | null | null | cogs/roleselector.py | YouGotSchott/tcs-discord-bot | 696db5da129ef42f4c5047679d289aeb6ed122a9 | [
"MIT"
] | 1 | 2019-04-28T03:33:35.000Z | 2019-04-28T03:33:35.000Z | import discord
from discord.ext import commands
from pathlib import Path
from config import bot
from collections import OrderedDict
import json
| 46.052863 | 147 | 0.505931 |
671b9c9f7b2c7728391666847cc8f06a6c3abea1 | 468 | py | Python | Bunnies.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Bunnies.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | Bunnies.py | fatih-iver/Intro-to-Computer-Science-with-Python | 7b8127681415dfd100a0e70fe8a672cec696bbb7 | [
"MIT"
] | null | null | null | # Define a procedure, fibonacci, that takes a natural number as its input, and
# returns the value of that fibonacci number.
# Two Base Cases:
# fibonacci(0) => 0
# fibonacci(1) => 1
# Recursive Case:
# n > 1 : fibonacci(n) => fibonacci(n-1) + fibonacci(n-2)
print (fibonacci(0))
#>>> 0
print (fibonacci(1))
#>>> 1
print (fibonacci(15))
#>>> 610 | 24.631579 | 79 | 0.604701 |
671bdca4dcc88d2670523ab9386ad959165e1bf4 | 1,876 | py | Python | symphony/cli/graphql_compiler/tests/test_utils_codegen.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 1 | 2020-06-05T09:01:40.000Z | 2020-06-05T09:01:40.000Z | symphony/cli/graphql_compiler/tests/test_utils_codegen.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 14 | 2019-11-15T12:01:18.000Z | 2019-12-12T14:37:42.000Z | symphony/cli/graphql_compiler/tests/test_utils_codegen.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 3 | 2019-11-15T15:56:25.000Z | 2019-11-21T10:34:59.000Z | #!/usr/bin/env python3
from .base_test import BaseTest
from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk
| 24.363636 | 73 | 0.537846 |
671c056e5378258e43c069fd46366a89b0af73b7 | 202 | py | Python | api/__init__.py | zhangyouliang/TencentComicBook | 74d8e7e787f70554d5d982687540a6ac3225b9ed | [
"MIT"
] | null | null | null | api/__init__.py | zhangyouliang/TencentComicBook | 74d8e7e787f70554d5d982687540a6ac3225b9ed | [
"MIT"
] | null | null | null | api/__init__.py | zhangyouliang/TencentComicBook | 74d8e7e787f70554d5d982687540a6ac3225b9ed | [
"MIT"
] | null | null | null | from flask import Flask
| 18.363636 | 39 | 0.70297 |
671c98674cb5f008f240bb63dd21b79174a4ca79 | 898 | py | Python | misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py | a-a-egorovich/training_extensions | e0bbdfa4266c6ccfebf23ef303204a4a62fc290d | [
"Apache-2.0"
] | null | null | null | misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py | a-a-egorovich/training_extensions | e0bbdfa4266c6ccfebf23ef303204a4a62fc290d | [
"Apache-2.0"
] | null | null | null | misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py | a-a-egorovich/training_extensions | e0bbdfa4266c6ccfebf23ef303204a4a62fc290d | [
"Apache-2.0"
] | 1 | 2021-05-08T04:29:44.000Z | 2021-05-08T04:29:44.000Z | import os
import json
def get_config(action, optimised = False):
""" action: train, test, export or gdrive
optimised: False --> DenseNet121
True --> DenseNet121Eff
"""
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
config_path = os.path.join(root_path, 'configs')
if action == 'download':
with open(os.path.join(config_path, 'download_configs.json')) as f1:
config = json.load(f1)
else:
if optimised:
with open(os.path.join(config_path, 'densenet121eff_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
else:
with open(os.path.join(config_path, 'densenet121_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
return config
| 33.259259 | 93 | 0.609131 |
671d6732bc9abaae404bc6f0b8c59f26d23ca716 | 3,337 | py | Python | src/udpa/annotations/versioning_pb2.py | pomerium/enterprise-client-python | 366d72cc9cd6dc05fae704582deb13b1ccd20a32 | [
"Apache-2.0"
] | 1 | 2021-09-14T04:34:29.000Z | 2021-09-14T04:34:29.000Z | src/udpa/annotations/versioning_pb2.py | pomerium/enterprise-client-python | 366d72cc9cd6dc05fae704582deb13b1ccd20a32 | [
"Apache-2.0"
] | 3 | 2021-09-15T15:10:41.000Z | 2022-01-04T21:03:03.000Z | src/udpa/annotations/versioning_pb2.py | pomerium/enterprise-client-python | 366d72cc9cd6dc05fae704582deb13b1ccd20a32 | [
"Apache-2.0"
] | 1 | 2021-09-13T21:51:37.000Z | 2021-09-13T21:51:37.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: udpa/annotations/versioning.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='udpa/annotations/versioning.proto',
package='udpa.annotations',
syntax='proto3',
serialized_options=b'Z\"github.com/cncf/xds/go/annotations',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!udpa/annotations/versioning.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"5\n\x14VersioningAnnotation\x12\x1d\n\x15previous_message_type\x18\x01 \x01(\t:^\n\nversioning\x12\x1f.google.protobuf.MessageOptions\x18\xd3\x88\xe1\x03 \x01(\x0b\x32&.udpa.annotations.VersioningAnnotationB$Z\"github.com/cncf/xds/go/annotationsb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
VERSIONING_FIELD_NUMBER = 7881811
versioning = _descriptor.FieldDescriptor(
name='versioning', full_name='udpa.annotations.versioning', index=0,
number=7881811, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_VERSIONINGANNOTATION = _descriptor.Descriptor(
name='VersioningAnnotation',
full_name='udpa.annotations.VersioningAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='previous_message_type', full_name='udpa.annotations.VersioningAnnotation.previous_message_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=142,
)
DESCRIPTOR.message_types_by_name['VersioningAnnotation'] = _VERSIONINGANNOTATION
DESCRIPTOR.extensions_by_name['versioning'] = versioning
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VersioningAnnotation = _reflection.GeneratedProtocolMessageType('VersioningAnnotation', (_message.Message,), {
'DESCRIPTOR' : _VERSIONINGANNOTATION,
'__module__' : 'udpa.annotations.versioning_pb2'
# @@protoc_insertion_point(class_scope:udpa.annotations.VersioningAnnotation)
})
_sym_db.RegisterMessage(VersioningAnnotation)
versioning.message_type = _VERSIONINGANNOTATION
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(versioning)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.258824 | 374 | 0.802218 |
671ef5ab0fb204c856b7864f6aaa3913e2ce45e8 | 2,787 | py | Python | modules/action/scan_smbclient_nullsession.py | mrpnkt/apt2 | 542fb0593069c900303421f3f24a499ce8f3a6a8 | [
"MIT"
] | 37 | 2018-08-24T20:13:19.000Z | 2022-02-22T08:41:24.000Z | modules/action/scan_smbclient_nullsession.py | zu3s/apt2-1 | 67325052d2713a363183c23188a67e98a379eec7 | [
"MIT"
] | 4 | 2020-06-14T23:16:45.000Z | 2021-03-08T14:18:21.000Z | modules/action/scan_smbclient_nullsession.py | zu3s/apt2-1 | 67325052d2713a363183c23188a67e98a379eec7 | [
"MIT"
] | 23 | 2018-11-15T13:00:09.000Z | 2021-08-07T18:53:04.000Z | import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
| 42.227273 | 108 | 0.545748 |
67217c13d08aaa4cb02ed01fdfa62904c93ef245 | 2,652 | py | Python | UserSpace/Python/Cosmo.py | dkaramit/MiMeS | a3c97a4877f181b54e880d7b144271c5659291b5 | [
"MIT"
] | 2 | 2022-01-27T20:10:19.000Z | 2022-01-29T04:26:16.000Z | UserSpace/Python/Cosmo.py | dkaramit/MiMeS | a3c97a4877f181b54e880d7b144271c5659291b5 | [
"MIT"
] | null | null | null | UserSpace/Python/Cosmo.py | dkaramit/MiMeS | a3c97a4877f181b54e880d7b144271c5659291b5 | [
"MIT"
] | null | null | null | from numpy import logspace
from sys import path as sysPath
sysPath.append('../../src')
#load the module
from interfacePy import Cosmo
cosmo=Cosmo('../../src/data/eos2020.dat',0,1e5)
for T in logspace(-5,5,50):
print(
'T=',T,'GeV\t',
'H=',cosmo.Hubble(T),'GeV\t',
'h_eff=',cosmo.heff(T),'\t',
'g_eff=',cosmo.geff(T),'\t',
's=',cosmo.s(T),'GeV^3\t',
)
if False:
import matplotlib.pyplot as plt
#########-----g_eff and h_eff-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
gt=[cosmo.geff(i) for i in T]
ht=[cosmo.heff(i) for i in T]
sub.plot(T,gt,linestyle='--',c='xkcd:red',label=r"$g_{\rm eff} (T)$")
sub.plot(T,ht,linestyle=':',c='xkcd:black',label=r"$h_{\rm eff} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'rel. dof')
sub.legend(bbox_to_anchor=(1, 0.0),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('log')
sub.set_xscale('log')
fig.savefig('rdofs-T_examplePlot.pdf',bbox_inches='tight')
#########-----dg_effdT and dh_effdT-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dg=[cosmo.dgeffdT (i) for i in T]
dh=[cosmo.dheffdT(i) for i in T]
sub.plot(T,dg,linestyle='--',c='xkcd:red',label=r"$\dfrac{d g_{\rm eff}}{dT} (T)$")
sub.plot(T,dh,linestyle=':',c='xkcd:black',label=r"$\dfrac{d h_{\rm eff}}{dT} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.legend(bbox_to_anchor=(1, 0.5),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('symlog')
sub.set_xscale('log')
fig.savefig('drdofsdT-T_examplePlot.pdf',bbox_inches='tight')
#########-----dh-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dht=[cosmo.dh(i) for i in T]
sub.plot(T,dht,linestyle='-',c='xkcd:black')
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'$\delta_h = 1 + \dfrac{1}{3} \dfrac{d \log h_{\rm eff} }{d \log T}$')
sub.set_yscale('linear')
sub.set_xscale('log')
fig.savefig('dh-T_examplePlot.pdf',bbox_inches='tight')
| 28.212766 | 92 | 0.584465 |
6721e6112f2f0c4cefe44686fc888d2b7c5c0f42 | 5,236 | py | Python | src/psion/oauth2/endpoints/revocation.py | revensky/psion | dfe38a1a4f4d6a5029d0973dbe1326415df6d512 | [
"MIT"
] | 2 | 2021-02-22T22:12:23.000Z | 2021-02-22T22:48:33.000Z | src/psion/oauth2/endpoints/revocation.py | revensky/psion | dfe38a1a4f4d6a5029d0973dbe1326415df6d512 | [
"MIT"
] | null | null | null | src/psion/oauth2/endpoints/revocation.py | revensky/psion | dfe38a1a4f4d6a5029d0973dbe1326415df6d512 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Optional
from psion.oauth2.exceptions import InvalidClient, OAuth2Error, UnsupportedTokenType
from psion.oauth2.models import JSONResponse, Request
from .base import BaseEndpoint
| 41.228346 | 86 | 0.661383 |
67221620473d936c0d65eea07a40a563dbd162cf | 1,851 | py | Python | experiments/Browser/browser.py | rajKarra69420/bento | 1324189e26acfe3a372882519bd78e037d93997c | [
"BSD-3-Clause"
] | 3 | 2021-12-01T02:11:15.000Z | 2022-02-03T22:45:00.000Z | experiments/Browser/browser.py | rajKarra69420/bento | 1324189e26acfe3a372882519bd78e037d93997c | [
"BSD-3-Clause"
] | 4 | 2021-11-27T11:04:36.000Z | 2022-02-17T02:53:21.000Z | experiments/Browser/browser.py | rajKarra69420/bento | 1324189e26acfe3a372882519bd78e037d93997c | [
"BSD-3-Clause"
] | 5 | 2021-07-01T20:23:43.000Z | 2022-03-12T18:10:34.000Z | #!/usr/bin/env python3
import argparse
import logging
import sys
import zlib
sys.path.append("../..")
from bento.client.api import ClientConnection
from bento.common.protocol import *
import bento.common.util as util
function_name= "browser"
function_code= """
import requests
import zlib
import os
def browser(url, padding):
body= requests.get(url, timeout=1).content
compressed= zlib.compress(body)
final= compressed
if padding - len(final) > 0:
final= final + (os.urandom(padding - len(final)))
else:
final= final + (os.urandom((len(final) + padding) % padding))
api.send(final)
"""
if __name__ == '__main__':
main()
| 27.626866 | 76 | 0.686116 |
67224f47630e980eac0b94abcd62dd84644278c0 | 3,429 | py | Python | app/views/v1/search.py | daghan/Ostrich | b12057bee7b8b92aedf09ec40edc97a60340527b | [
"MIT"
] | null | null | null | app/views/v1/search.py | daghan/Ostrich | b12057bee7b8b92aedf09ec40edc97a60340527b | [
"MIT"
] | null | null | null | app/views/v1/search.py | daghan/Ostrich | b12057bee7b8b92aedf09ec40edc97a60340527b | [
"MIT"
] | null | null | null | from app import webapp, mysql
from app.models import Search , Utils, Collection, WebUtils
from flask import request, jsonify
from flask.ext.jsonpify import jsonify as jsonp
import json
'''
Generic search call
@params
q: search query
page: the page number of search results (default 0)
type: type of search: {default: free(all fields), category, isbn}
@response
List of search result objects(ES)
'''
| 34.636364 | 84 | 0.680082 |
6722b1ddb17bb6d89f4ea39b1f185bec7d6cfcf6 | 555 | py | Python | run.py | orest-d/pointcloud-viewer-rs | 0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b | [
"MIT"
] | null | null | null | run.py | orest-d/pointcloud-viewer-rs | 0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b | [
"MIT"
] | null | null | null | run.py | orest-d/pointcloud-viewer-rs | 0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b | [
"MIT"
] | null | null | null | from flask import Flask, make_response
app = Flask(__name__)
if __name__ == "__main__":
app.run(debug=True,port=8080)
| 20.555556 | 57 | 0.625225 |
6724bee4efbfb26d55e405a724ed5a24e2b08168 | 8,496 | py | Python | engine/audio/audio_director.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
] | 3 | 2017-12-07T19:17:36.000Z | 2021-07-29T18:24:25.000Z | engine/audio/audio_director.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
] | 41 | 2017-11-11T06:00:08.000Z | 2022-03-28T23:27:25.000Z | engine/audio/audio_director.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
] | 2 | 2018-08-31T23:49:00.000Z | 2021-09-21T00:42:48.000Z | from .audio_source import AudioSource
from engine import disk
import pyglet.media
| 38.27027 | 79 | 0.631474 |
6726c80fc78ce012124f71d544ed59aef2223c32 | 2,858 | py | Python | source/windows10 system repair tool.py | programmer24680/windows10-system-repair-tool | 130e9c55a7448811994a4bc04f2c3362d96cf9c9 | [
"MIT"
] | 1 | 2021-01-25T06:44:45.000Z | 2021-01-25T06:44:45.000Z | source/windows10 system repair tool.py | programmer24680/windows10-system-repair-tool | 130e9c55a7448811994a4bc04f2c3362d96cf9c9 | [
"MIT"
] | null | null | null | source/windows10 system repair tool.py | programmer24680/windows10-system-repair-tool | 130e9c55a7448811994a4bc04f2c3362d96cf9c9 | [
"MIT"
] | null | null | null | import os
import time
print("=====================================================================")
print(" ")
print(" STARTING SYSTEM REPAIR ")
print(" ")
print("=====================================================================")
print(" ")
print("These are the jobs this application can do for you.")
print("1.Clean The DISM Component Store")
print("2.Repair Corrupted Windows Files Using SFC")
print("3.Repair Corrupted Windows Files Using DISM")
choice = input("Enter the serial number of the job which you want this application to do (1/2/3): ")
if choice == "1":
print("Analyzing Component Store")
os.system("dism.exe /Online /Cleanup-Image /AnalyzeComponentStore")
time.sleep(3)
print("Warning: You have to cleanup component store only if necessary.")
time.sleep(3)
Confirmation = input("Do you want to cleanup the component store?(y/n): ")
if Confirmation.upper() == "Y":
os.system("dism.exe /Online /Cleanup-Image /StartComponentCleanup")
time.sleep(3)
print("Now Exiting!")
elif Confirmation.upper() == "N":
print("Skipping Component Cleanup As Per The User's Instructions")
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
else:
print('You have to enter only "y" or "n"')
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
elif choice == "2":
print("Starting SFC Repair Job")
os.system("SFC /SCANNOW")
time.sleep(3)
print("Operation Cpmpleted Successfully!")
time.sleep(3)
print("Now Exiting!")
elif choice == "3":
Internet_Connection = input("Do you have an active internet connection?(y/n): ")
if Internet_Connection.upper() == "N":
iso_file = input("Do you have windows10 wim file?(y/n): ")
if iso_file.upper() == "Y":
Location = input("Enter the location of the wim file: ")
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth /Source:" + Location + " /LimitAccess")
time.sleep(3)
print("Now Exiting!")
else:
print("Sorry but you need either internet connection or wim file in order to run Dism")
time.sleep(3)
print("Now Exiting!")
elif Internet_Connection.upper() == "Y":
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth")
time.sleep(3)
print("Now Exiting")
else:
print("You have to enter only Y/N")
time.sleep(3)
else:
print("Choice Not Valid")
time.sleep(3)
print("Now Exiting!")
| 42.029412 | 109 | 0.537089 |
6728b39bc11d9e4b1e1974a7a10fb1bb5d2f22d9 | 3,368 | py | Python | tests/test_fid_score.py | jwblangley/pytorch-fid | 3d604a25516746c3a4a5548c8610e99010b2c819 | [
"Apache-2.0"
] | 1,732 | 2018-03-05T19:20:48.000Z | 2022-03-31T08:11:03.000Z | tests/test_fid_score.py | jwblangley/pytorch-fid | 3d604a25516746c3a4a5548c8610e99010b2c819 | [
"Apache-2.0"
] | 70 | 2018-06-29T07:48:43.000Z | 2022-03-29T13:14:07.000Z | tests/test_fid_score.py | jwblangley/pytorch-fid | 3d604a25516746c3a4a5548c8610e99010b2c819 | [
"Apache-2.0"
] | 357 | 2018-03-14T06:35:24.000Z | 2022-03-31T11:04:39.000Z | import numpy as np
import pytest
import torch
from PIL import Image
from pytorch_fid import fid_score, inception
| 32.699029 | 77 | 0.540974 |
6728f13a7364357219192b47721a96d415fff8dc | 873 | py | Python | run/client.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | 6 | 2018-04-13T09:48:26.000Z | 2020-06-22T13:42:10.000Z | run/client.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | null | null | null | run/client.py | withcouragetol/codebee-10l | 2636b8fc1b456a85201b868201cf9c147d739031 | [
"Apache-2.0"
] | 2 | 2018-09-04T07:09:50.000Z | 2019-08-18T15:11:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import time
if __name__=="__main__":
emsc = emsc_client()
emsc.run() | 24.942857 | 77 | 0.514318 |
672a7017194500a70a969cf6e26d3c8f610f807f | 2,765 | py | Python | src/sonic_ax_impl/main.py | stepanblyschak/sonic-snmpagent | 45edd7e689922ecf90697d099285f7cce99742c8 | [
"Apache-2.0"
] | 13 | 2016-03-09T20:38:16.000Z | 2021-02-04T17:39:27.000Z | src/sonic_ax_impl/main.py | stepanblyschak/sonic-snmpagent | 45edd7e689922ecf90697d099285f7cce99742c8 | [
"Apache-2.0"
] | 167 | 2017-02-01T23:16:11.000Z | 2022-03-31T02:22:08.000Z | src/sonic_ax_impl/main.py | xumia/sonic-snmpagent | 4e063e4ade89943f2413a767f24564aecfa2cd1c | [
"Apache-2.0"
] | 89 | 2016-03-09T20:38:18.000Z | 2022-03-09T09:16:13.000Z | """
SNMP subagent entrypoint.
"""
import asyncio
import functools
import os
import signal
import sys
import ax_interface
from sonic_ax_impl.mibs import ieee802_1ab
from . import logger
from .mibs.ietf import rfc1213, rfc2737, rfc2863, rfc3433, rfc4292, rfc4363
from .mibs.vendor import dell, cisco
# Background task update frequency ( in seconds )
DEFAULT_UPDATE_FREQUENCY = 5
event_loop = asyncio.get_event_loop()
shutdown_task = None
| 32.151163 | 111 | 0.718626 |
672a72c5fc5af6da05a603f68e577831d5bb4e8d | 8,000 | py | Python | btk_server.py | bedrin/keyboard_mouse_emulate_on_raspberry | 2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c | [
"MIT"
] | null | null | null | btk_server.py | bedrin/keyboard_mouse_emulate_on_raspberry | 2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c | [
"MIT"
] | null | null | null | btk_server.py | bedrin/keyboard_mouse_emulate_on_raspberry | 2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from __future__ import absolute_import, print_function
from optparse import OptionParser, make_option
import os
import sys
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
import time
import socket
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
import logging
from logging import debug, info, warning, error
import keymap
logging.basicConfig(level=logging.DEBUG)
# main routine
if __name__ == "__main__":
try:
DBusGMainLoop(set_as_default=True)
myservice = BTKbService()
loop = GLib.MainLoop()
loop.run()
except KeyboardInterrupt:
sys.exit()
| 34.188034 | 103 | 0.59525 |
672b2fd274da4c3abef696a1ce2183fc11422e30 | 11,479 | py | Python | ai2thor/util/visualize_3D_bbox.py | KuoHaoZeng/ai2thor-1 | 7cc3295f8ac7a272078159f44b74bf61d1d2bb56 | [
"Apache-2.0"
] | null | null | null | ai2thor/util/visualize_3D_bbox.py | KuoHaoZeng/ai2thor-1 | 7cc3295f8ac7a272078159f44b74bf61d1d2bb56 | [
"Apache-2.0"
] | null | null | null | ai2thor/util/visualize_3D_bbox.py | KuoHaoZeng/ai2thor-1 | 7cc3295f8ac7a272078159f44b74bf61d1d2bb56 | [
"Apache-2.0"
] | null | null | null | import ai2thor.controller
import numpy as np
from PIL import Image, ImageDraw
if __name__ == "__main__":
# give the height and width of the 2D image and scene id
w, h = 900, 900
scene = "FloorPlan2{:02d}_physics".format(1)
# allocate controller and initialize the scene and agent
# local_path = "src/ai2thor/unity/builds/thor-local-OSXIntel64.app/Contents/MacOS/AI2-Thor"
local_path = ""
controller = ai2thor.controller.Controller(local_path=local_path)
_ = controller.start(width=w, height=h)
_ = controller.reset(scene)
event = controller.step(dict(action='Initialize',
gridSize=0.25,
renderClassImage=True,
renderObjectImage=True,
renderDepthImage=True,
fieldOfView=90))
# do something then draw the 3D bbox in 2D image
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="Rotate", rotation=dict(x=0, y=30, z=0)))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output1.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output2.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output3.png")
| 46.100402 | 119 | 0.4787 |
672b4006ae24930b53edb66efd8fb73b92773911 | 3,754 | py | Python | sa/profiles/ElectronR/KO01M/get_metrics.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/ElectronR/KO01M/get_metrics.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/ElectronR/KO01M/get_metrics.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# ElectronR.KO01M.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
| 43.149425 | 98 | 0.485615 |