hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b252bb8863e2cde9dc1c8cf3fba5014be866dbed | 5,607 | py | Python | gnuradio-3.7.13.4/gr-qtgui/apps/plot_spectrogram_base.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | 1 | 2021-03-09T07:32:37.000Z | 2021-03-09T07:32:37.000Z | gnuradio-3.7.13.4/gr-qtgui/apps/plot_spectrogram_base.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | gnuradio-3.7.13.4/gr-qtgui/apps/plot_spectrogram_base.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os, sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
try:
import scipy
except ImportError:
print "Error: Scipy required (www.scipy.org)."
sys.exit(1)
try:
from gnuradio.qtgui.plot_form import *
from gnuradio.qtgui.plot_base import *
except ImportError:
from plot_form import *
from plot_base import *
| 32.789474 | 97 | 0.619939 |
b252ce6e7da24bbb6a02a3119c677f69f7ea2e58 | 4,699 | py | Python | unused/csv_slicer_crop_threshold.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | unused/csv_slicer_crop_threshold.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | unused/csv_slicer_crop_threshold.py | eufmike/storm_image_processing | 076335519be0be3b66d289a180421d36770ab820 | [
"CC-BY-4.0"
] | null | null | null | # %%
# slice the csv according to the frame size
import os, sys
import pandas as pd
# from tkinter import *
# Functions Section Begins ----------------------------------------------------- #
def dircheck(targetpaths):
"""
dircheck checks the target folder and create the folder if it does not exist.
targetdirlist: list of folderpath
"""
# print(type(targetpaths))
if isinstance(targetpaths, str):
print(os.path.exists(targetpaths))
if not os.path.exists(targetpaths):
os.makedirs(targetpaths)
elif isinstance(targetpaths, list):
for path in targetpaths:
if not os.path.exists(path):
os.makedirs(path)
def getpendinglist(src_dir, op_dir, src_ext = '.nd2', op_ext = '.csv'):
"""
getpendinglist compares the files from src_dir and the accomplisjed file in op_dir,
then creates a pending list of unprocessed image.
"""
srclist = listfiles(src_dir, src_ext)
srclist = srclist['fileabslist']
oplist = listfiles(op_dir, op_ext)
oplist = oplist['fileabslist']
oplist_basename = []
for i in oplist:
name = os.path.basename(i)
print('name: {}'.format(name))
basename = os.path.splitext(name)[0]
print('basename: {}'.format(basename))
oplist_basename.append(basename)
pendingfllist = []
pendingpathlist_input = []
pendingpathlist_output = []
for i in range(len(srclist)):
srcflname = os.path.basename(srclist[i])
srcflbasename = os.path.splitext(srcflname)[0]
if not srcflbasename in oplist_basename:
pendingfllist.append(srcflbasename)
pendingpathlist_input.append(srclist[i])
pendingpathlist_output.append(os.path.join(op_dir, srcflbasename + op_ext))
return (pendingfllist, pendingpathlist_input, pendingpathlist_output)
# Functions Section Ends ----------------------------------------------------- #
# create input path
# load the csv file
path = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging'
analysis_dir = 'analysis_20190308'
analysis_subdir = 'tstorm'
csvdata_dir = 'csvdata_crop'
nchannel = 2
crop_region = 3
ip_path = os.path.join(path, analysis_dir, analysis_subdir, csvdata_dir)
# create output path
dir_for_check = []
op_dir = 'csvdata_crop_th'
op_path = os.path.join(path, analysis_dir, analysis_subdir, op_dir)
dir_for_check.append(op_path)
for i in range(nchannel):
dir_tmp = os.path.join(op_path, str(i+1))
dir_for_check.append(dir_tmp)
dircheck(dir_for_check)
# %%
# load crop data
dir_par = 'par'
path_cropdata = os.path.join(path, analysis_dir, dir_par, 'cropsize.csv')
df_cropdata = pd.read_csv(path_cropdata, header = 0)
display(df_cropdata)
# %%
# load image stat
path_imgstat = os.path.join(path, analysis_dir, 'preprocessing', 'imginfo', 'imgstat.csv')
df_imgstat = pd.read_csv(path_imgstat, header = 0)
display(df_imgstat)
# %%
# covert ROI in pixel to m
df_cropdata['x_min_nm'] = df_cropdata['x'] * 160
df_cropdata['y_min_nm'] = df_cropdata['y'] * 160
df_cropdata['dx_nm'] = df_cropdata['dx'] * 160
df_cropdata['dy_nm'] = df_cropdata['dy'] * 160
df_cropdata['x_max_nm'] = df_cropdata['x_min_nm'] + df_cropdata['dx_nm']
df_cropdata['y_max_nm'] = df_cropdata['y_min_nm'] + df_cropdata['dy_nm']
display(df_cropdata)
print(df_cropdata.shape[0])
# %%
# slice the csv file
#for i in range(1):
threshold = {
'1': 10000,
'2': 15000,
}
for i in range(df_cropdata.shape[0]):
imgname = df_cropdata['name'][i]
x_min = df_cropdata['x_min_nm'][i]
x_max = df_cropdata['x_max_nm'][i]
y_min = df_cropdata['y_min_nm'][i]
y_max = df_cropdata['y_max_nm'][i]
img_region = df_cropdata['img'][i]
for j in range(nchannel):
path_csv_ip = os.path.join(ip_path, str(j+1), imgname + '.csv')
print(path_csv_ip)
data = pd.read_csv(path_csv_ip, header=0)
data_sliced = data[(data['x [nm]'] >= x_min) & (data['x [nm]'] < x_max) & \
(data['y [nm]'] >= y_min) & (data['y [nm]'] < y_max)]
threshold_temp = threshold[str(j+1)]
data_sliced = data_sliced[(data['intensity [photon]'] > threshold_temp)]
path_csv_op = os.path.join(op_path, str(j+1), imgname + '_r' + str(img_region) + '.csv')
data_sliced.to_csv(path_csv_op, index = False)
| 32.631944 | 100 | 0.683337 |
b25374f98c200b684bc06d7e6e70a0fae5c15a98 | 4,682 | py | Python | doodle.py | plasticuproject/DoodleNet | 1abbf05b2302ce6d8a47d369ddb45d4c5a0dc26d | [
"MIT"
] | 2 | 2020-03-16T01:26:42.000Z | 2020-06-19T12:04:37.000Z | doodle.py | plasticuproject/DoodleNet | 1abbf05b2302ce6d8a47d369ddb45d4c5a0dc26d | [
"MIT"
] | null | null | null | doodle.py | plasticuproject/DoodleNet | 1abbf05b2302ce6d8a47d369ddb45d4c5a0dc26d | [
"MIT"
] | null | null | null | import pygame
import random
import numpy as np
import cv2
from dutil import add_pos
#User constants
device = "gpu"
model_fname = 'Model.h5'
background_color = (210, 210, 210)
input_w = 144
input_h = 192
image_scale = 3
image_padding = 10
mouse_interps = 10
#Derived constants
drawing_w = input_w * image_scale
drawing_h = input_h * image_scale
window_width = drawing_w*2 + image_padding*3
window_height = drawing_h + image_padding*2
doodle_x = image_padding
doodle_y = image_padding
generated_x = doodle_x + drawing_w + image_padding
generated_y = image_padding
#Global variables
prev_mouse_pos = None
mouse_pressed = False
needs_update = True
cur_color_ix = 1
cur_drawing = None
clear_drawing()
cur_gen = np.zeros((3, input_h, input_w), dtype=np.uint8)
rgb_array = np.zeros((input_h, input_w, 3), dtype=np.uint8)
image_result = np.zeros((input_h, input_w, 3), dtype=np.uint8)
#Keras
print("Loading Keras...")
import os
os.environ['THEANORC'] = "./" + device + ".theanorc"
os.environ['KERAS_BACKEND'] = "theano"
import theano
print("Theano Version: " + theano.__version__)
from keras.models import Sequential, load_model
from keras import backend as K
K.set_image_data_format('channels_first')
#Load the model
print("Loading Model...")
model = load_model(model_fname)
#Open a window
pygame.init()
screen = pygame.display.set_mode((window_width, window_height))
doodle_surface_mini = pygame.Surface((input_w, input_h))
doodle_surface = screen.subsurface((doodle_x, doodle_y, drawing_w, drawing_h))
gen_surface_mini = pygame.Surface((input_w, input_h))
gen_surface = screen.subsurface((generated_x, generated_y, drawing_w, drawing_h))
pygame.display.set_caption('Doodle Net')
#Main loop
running = True
while running:
#Process events
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
break
elif event.type == pygame.MOUSEBUTTONDOWN:
if pygame.mouse.get_pressed()[0]:
prev_mouse_pos = pygame.mouse.get_pos()
update_mouse(prev_mouse_pos)
mouse_pressed = True
elif pygame.mouse.get_pressed()[2]:
clear_drawing()
needs_update = True
elif event.type == pygame.MOUSEBUTTONUP:
mouse_pressed = False
prev_mouse_pos = None
elif event.type == pygame.MOUSEMOTION and mouse_pressed:
update_mouse_line(pygame.mouse.get_pos())
#Check if we need an update
if needs_update:
fdrawing = np.expand_dims(cur_drawing.astype(np.float32) / 255.0, axis=0)
pred = model.predict(add_pos(fdrawing), batch_size=1)[0]
cur_gen = (pred * 255.0).astype(np.uint8)
rgb_array = sparse_to_rgb(cur_drawing)
needs_update = False
#Draw to the screen
screen.fill(background_color)
draw_doodle()
draw_generated()
#Flip the screen buffer
pygame.display.flip()
pygame.time.wait(10)
| 30.012821 | 90 | 0.683682 |
b253aa300dbf2d178cf0b2b7ef4c04bdb3c8a3ab | 2,259 | py | Python | tests/dmon/test_dmon.py | Bounti/avatar2_dmon | c24a908b2cd3faea290380b4d0364d23b4430d2e | [
"Apache-2.0"
] | null | null | null | tests/dmon/test_dmon.py | Bounti/avatar2_dmon | c24a908b2cd3faea290380b4d0364d23b4430d2e | [
"Apache-2.0"
] | null | null | null | tests/dmon/test_dmon.py | Bounti/avatar2_dmon | c24a908b2cd3faea290380b4d0364d23b4430d2e | [
"Apache-2.0"
] | null | null | null | from avatar2 import *
import sys
import os
import logging
import time
import argparse
import subprocess
import struct
import ctypes
from random import randint
# For profiling
import pstats
import numpy as np
import numpy.testing as npt
logging.basicConfig(filename='/tmp/inception-tests.log', level=logging.INFO)
GDB_PORT = 3000
firmware = "./LPC1850_WEBSERVER.elf"
dmon_stub_firmware = './DMON_ZYNQ_7020_STUB.elf'
if __name__ == '__main__':
# start the hw_server which offers a GDBMI interface for remote debugging
gdbserver = subprocess.Popen(
['hw_server', '-s TCP:localhost:%d' % GDB_PORT], shell=False
#['xsdb', '-eval', 'xsdbserver start -host localhost -port %d' % 3121], shell=False
)
time.sleep(2)
# Initialize avatar for ARMV7M architecture
avatar = Avatar(arch=ARMV7M, output_directory='/tmp/xsdb-tests')
# Instantiate the DMon platform
# It takes as inputs:
# - the ps7 init script which is used for initializing the FPGA fabric and the zynq CPU
# - the system.hdf that defines the zynq memory mapping
# - the dmon_stub_firmware that points to the ELF of the DMon stub
dmon_zynq_7020 = avatar.add_target(DMonTarget, "./ps7_init.tcl", "./system.hdf", dmon_stub_firmware, gdb_port=GDB_PORT, name='dmon_zynq_7020')
avatar.init_targets()
print("[*] DMon initialized")
pc = dmon_zynq_7020.read_register("pc")
npt.assert_equal(pc, 0x100a58)
print("[*] DMon stub has initialized the MMU")
# file ./LPC1850_WEBSERVER.elf
dmon_zynq_7020.set_file(firmware)
# load
dmon_zynq_7020.download()
print("[*] Tested firmware has been loaded on the DMon target")
# set $pc=0x1c000115
dmon_zynq_7020.write_register("pc", 0x1c000115)
# b main
ret = dmon_zynq_7020.set_breakpoint("main", hardware=True)
npt.assert_equal(ret, True)
# continue
dmon_zynq_7020.cont()
dmon_zynq_7020.wait()
print("[*] DMon reaches main function")
dmon_zynq_7020.cont()
print("[*] DMon running for 10 seconds")
time.sleep(10)
dmon_zynq_7020.stop()
dmon_zynq_7020.shutdown()
gdbserver.terminate()
#Stop all threads for the profiler
print("[*] Test completed")
avatar.stop()
| 29.337662 | 146 | 0.698097 |
b255a55b50c0a4e6111dcdc38c9b04c04072f949 | 7,716 | py | Python | lexer/scanner.py | lohhans/Compiladores-2020.4 | c196c11d0c1ec3b25b54b01e0729474205f328ed | [
"MIT"
] | 3 | 2021-01-08T03:41:35.000Z | 2021-01-11T04:22:31.000Z | lexer/scanner.py | laisy/Compiladores-2020.4 | c196c11d0c1ec3b25b54b01e0729474205f328ed | [
"MIT"
] | 1 | 2021-01-17T07:56:56.000Z | 2021-01-17T07:56:56.000Z | lexer/scanner.py | laisy/Compiladores-2020.4 | c196c11d0c1ec3b25b54b01e0729474205f328ed | [
"MIT"
] | 3 | 2021-01-08T00:13:27.000Z | 2021-09-09T13:56:54.000Z | from lexer.token import Token
| 31.365854 | 100 | 0.424313 |
b256d93e962708f149cc2aba7b423f5e16306972 | 2,295 | py | Python | tests/test_laser.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | tests/test_laser.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | tests/test_laser.py | chiragjn/laserembeddings | 37f2aaf723966f24fe0a8d473241725fba46f691 | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import numpy as np
from laserembeddings import Laser
SIMILARITY_TEST = os.getenv('SIMILARITY_TEST')
| 32.785714 | 151 | 0.57342 |
b2587d1aad26d95bdbf9bbeb64895092e8199eaa | 1,467 | py | Python | alipay/aop/api/domain/TaxReceiptOnceInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/TaxReceiptOnceInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/TaxReceiptOnceInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 26.196429 | 83 | 0.611452 |
b2590f93012b66c0c656914441825de752b36b9c | 1,371 | py | Python | Make-Sense-of-Census/code.py | NishantNair14/greyatom-python-for-data-science | e269530300c996eb67e7c1f2317d0b279b8091ae | [
"MIT"
] | null | null | null | Make-Sense-of-Census/code.py | NishantNair14/greyatom-python-for-data-science | e269530300c996eb67e7c1f2317d0b279b8091ae | [
"MIT"
] | null | null | null | Make-Sense-of-Census/code.py | NishantNair14/greyatom-python-for-data-science | e269530300c996eb67e7c1f2317d0b279b8091ae | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data_file='subset_1000.csv'
data=np.genfromtxt(path,delimiter=",",skip_header=1)
print(data)
census=np.concatenate((new_record,data),axis=0)
print(census)
# --------------
#Code starts here
age=census[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
# --------------
#Code starts here
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print(len_0,len_1,len_2,len_3,len_4)
minority_race=3
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=round(high.mean(axis=0)[7],2)
avg_pay_low=round(low.mean(axis=0)[7],2)
print(avg_pay_high,avg_pay_low)
a=avg_pay_high-avg_pay_low
print(a)
| 19.585714 | 61 | 0.676878 |
b259ceb8b82845e18e8b6159d8f807dea2a352fc | 1,478 | py | Python | scripts/rgb2labels.py | theRealSuperMario/supermariopy | 9fff8275278ff26caff50da86109c25d276bb30b | [
"MIT"
] | 36 | 2019-07-14T16:10:37.000Z | 2022-03-29T10:11:03.000Z | scripts/rgb2labels.py | theRealSuperMario/supermariopy | 9fff8275278ff26caff50da86109c25d276bb30b | [
"MIT"
] | 3 | 2019-10-09T15:11:13.000Z | 2021-07-31T02:17:43.000Z | scripts/rgb2labels.py | theRealSuperMario/supermariopy | 9fff8275278ff26caff50da86109c25d276bb30b | [
"MIT"
] | 14 | 2019-08-29T14:11:54.000Z | 2022-03-06T13:41:56.000Z | import numpy as np
from matplotlib import pyplot as plt
"""
https://stackoverflow.com/questions/42750910/convert-rgb-image-to-index-image/62980021#62980021
convert semantic labels from RGB coding to index coding
Steps:
1. define COLORS (see below)
2. hash colors
3. run rgb2index(segmentation_rgb)
see example below
TODO: apparently, using cv2.LUT is much simpler (and maybe faster?)
"""
COLORS = np.array([[0, 0, 0], [0, 0, 255], [255, 0, 0], [0, 255, 0]])
W = np.power(255, [0, 1, 2])
HASHES = np.sum(W * COLORS, axis=-1)
HASH2COLOR = {h: c for h, c in zip(HASHES, COLORS)}
HASH2IDX = {h: i for i, h in enumerate(HASHES)}
def rgb2index(segmentation_rgb):
"""
turn a 3 channel RGB color to 1 channel index color
"""
s_shape = segmentation_rgb.shape
s_hashes = np.sum(W * segmentation_rgb, axis=-1)
print(np.unique(segmentation_rgb.reshape((-1, 3)), axis=0))
func = lambda x: HASH2IDX[int(x)] # noqa
segmentation_idx = np.apply_along_axis(func, 0, s_hashes.reshape((1, -1)))
segmentation_idx = segmentation_idx.reshape(s_shape[:2])
return segmentation_idx
segmentation = np.array([[0, 0, 0], [0, 0, 255], [255, 0, 0]] * 3).reshape((3, 3, 3))
segmentation_idx = rgb2index(segmentation)
print(segmentation)
print(segmentation_idx)
fig, axes = plt.subplots(1, 2, figsize=(6, 3))
axes[0].imshow(segmentation)
axes[0].set_title("Segmentation RGB")
axes[1].imshow(segmentation_idx)
axes[1].set_title("Segmentation IDX")
plt.show()
| 28.980392 | 95 | 0.696888 |
b25a1d4640dfacab5e05d7eaa4739062eb18d83d | 9,857 | py | Python | app/models/template.py | FireFragment/memegen | f0a1b3ba465b8cd68a873951ab50eeaa91d57a35 | [
"MIT"
] | null | null | null | app/models/template.py | FireFragment/memegen | f0a1b3ba465b8cd68a873951ab50eeaa91d57a35 | [
"MIT"
] | null | null | null | app/models/template.py | FireFragment/memegen | f0a1b3ba465b8cd68a873951ab50eeaa91d57a35 | [
"MIT"
] | null | null | null | import asyncio
import shutil
from functools import cached_property
from pathlib import Path
import aiopath
from datafiles import datafile, field
from furl import furl
from sanic import Request
from sanic.log import logger
from .. import settings, utils
from ..types import Dimensions
from .overlay import Overlay
from .text import Text
| 32.747508 | 84 | 0.573501 |
b25a3c66ad289a972f5766ff0bd4fc4b5518f26d | 833 | py | Python | corpora_toolbox/utils/io.py | laurahzdz/corpora_toolbox | 14a14534df1d80e6a7b2f37ce5f547f1cb5e81a4 | [
"MIT"
] | null | null | null | corpora_toolbox/utils/io.py | laurahzdz/corpora_toolbox | 14a14534df1d80e6a7b2f37ce5f547f1cb5e81a4 | [
"MIT"
] | null | null | null | corpora_toolbox/utils/io.py | laurahzdz/corpora_toolbox | 14a14534df1d80e6a7b2f37ce5f547f1cb5e81a4 | [
"MIT"
] | null | null | null | import codecs
import os
# Function to save a string into a file
# Function to read all files in a dir with a specific extension
# Function to read a file into a string
# Function to create a directory
| 25.242424 | 68 | 0.698679 |
b25bc6b4a0128eafc07471d9e7edfbe8c99fcc86 | 4,108 | py | Python | Games/WarCardGame.py | AyselHavutcu/PythonGames | 8144f56a4c015e43a94ab529244475c3db9adee4 | [
"MIT"
] | null | null | null | Games/WarCardGame.py | AyselHavutcu/PythonGames | 8144f56a4c015e43a94ab529244475c3db9adee4 | [
"MIT"
] | null | null | null | Games/WarCardGame.py | AyselHavutcu/PythonGames | 8144f56a4c015e43a94ab529244475c3db9adee4 | [
"MIT"
] | null | null | null | import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8,
'Nine':9, 'Ten':10, 'Jack':11, 'Queen':12, 'King':13, 'Ace':14}
#Deck class will create 52 instances of Card class
#create the players
player_one = Player('John')
player_two = Player('Marrie')
#create a deck of cards and shuffle them
new_deck = Deck()
new_deck.shuffle()
#share the cards between players
for x in range(26):
player_one.add_cards(new_deck.deal_one())
player_two.add_cards(new_deck.deal_one())
game_on = True
round_num = 0
while game_on:
#count the rounds
round_num += 1
print("Round {}".format(round_num))
#check for the players cards
if len(player_one.all_cards) == 0:
print("Player ONE is out of cards.Player TWO Wins!")
game_on = False
break
#check for the player 2
if len(player_two.all_cards) == 0:
print("Player TWO is out of cards.Player ONE Wins!")
game_on = False
break
#START A NEW ROUND
player_one_cards = [] #played cards
player_one_cards.append(player_one.remove_one()) #remove the card from the top and play with it
player_two_cards = []
player_two_cards.append(player_two.remove_one())
#check if the players are war
at_war = True
while at_war:
if player_one_cards[-1].value > player_two_cards[-1].value:
#then player one gets the all cards
player_one.add_cards(player_one_cards)
player_one.add_cards(player_two_cards)
at_war = False
elif player_one_cards[-1].value < player_two_cards[-1].value:
#then player two gets the all cards
player_two.add_cards(player_one_cards)
player_two.add_cards(player_two_cards)
at_war = False
else:
print("WAR!")
#the cards are equal then they are at war check if the player's cards are out of range number
if len(player_one.all_cards) < 5:
print("Player ONE cannot declare war.Player TWO Wins!")
game_on = False
break
elif len(player_two.all_cards) < 5:
print("Player TWO cannot declare war.Player ONE Wins!")
game_on = False
break
else:
#continue the game
for num in range(5):
player_one_cards.append(player_one.remove_one())
player_two_cards.append(player_two.remove_one())
| 30.42963 | 115 | 0.595424 |
b25bc80a13089b17ce70ec72af0643fdd3cdbaca | 16,503 | py | Python | startracker/beast/beast.py | Oregon-Tech-Rocketry-and-Aerospace/space-debris-card | d72303436b6cb1a409d5217d0518db0b0335d10a | [
"MIT"
] | null | null | null | startracker/beast/beast.py | Oregon-Tech-Rocketry-and-Aerospace/space-debris-card | d72303436b6cb1a409d5217d0518db0b0335d10a | [
"MIT"
] | null | null | null | startracker/beast/beast.py | Oregon-Tech-Rocketry-and-Aerospace/space-debris-card | d72303436b6cb1a409d5217d0518db0b0335d10a | [
"MIT"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _beast
else:
import _beast
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
return wrapper
PI = _beast.PI
TWOPI = _beast.TWOPI
# Register star in _beast:
_beast.star_swigregister(star)
cvar = _beast.cvar
# Register star_db in _beast:
_beast.star_db_swigregister(star_db)
# Register star_fov in _beast:
_beast.star_fov_swigregister(star_fov)
# Register star_query in _beast:
_beast.star_query_swigregister(star_query)
# Register constellation in _beast:
_beast.constellation_swigregister(constellation)
# Register constellation_pair in _beast:
_beast.constellation_pair_swigregister(constellation_pair)
# Register constellation_lt in _beast:
_beast.constellation_lt_swigregister(constellation_lt)
# Register constellation_db in _beast:
_beast.constellation_db_swigregister(constellation_db)
# Register match_result in _beast:
_beast.match_result_swigregister(match_result)
# Register db_match in _beast:
_beast.db_match_swigregister(db_match)
| 39.57554 | 150 | 0.712719 |
b25c863ab03cce95c0e614b48a6296f7ce35eeb0 | 2,522 | py | Python | development_playgrounds/transformation_planar_flow_test.py | ai-di/Brancher | 01d51137b0e6fc81512994c21cc3a19287353767 | [
"MIT"
] | 208 | 2019-06-15T13:48:40.000Z | 2021-10-16T05:03:46.000Z | development_playgrounds/transformation_planar_flow_test.py | ai-di/Brancher | 01d51137b0e6fc81512994c21cc3a19287353767 | [
"MIT"
] | 18 | 2019-06-17T11:22:13.000Z | 2019-09-26T10:45:59.000Z | development_playgrounds/transformation_planar_flow_test.py | ai-di/Brancher | 01d51137b0e6fc81512994c21cc3a19287353767 | [
"MIT"
] | 32 | 2019-06-15T19:08:53.000Z | 2020-02-16T13:39:41.000Z | import matplotlib.pyplot as plt
import numpy as np
import torch
from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, LogNormalVariable
import brancher.functions as BF
from brancher.visualizations import plot_density
from brancher.transformations import PlanarFlow
from brancher import inference
from brancher.visualizations import plot_posterior
# Model
M = 8
y = NormalVariable(torch.zeros((M,)), 1.*torch.ones((M,)), "y")
y0 = DeterministicVariable(y[1], "y0")
d = NormalVariable(y, torch.ones((M,)), "d")
model = ProbabilisticModel([d, y, y0])
# get samples
d.observe(d.get_sample(55, input_values={y: 1.*torch.ones((M,))}))
# Variational distribution
u1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u1", learnable=True)
w1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w1", learnable=True)
b1 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b1", learnable=True)
u2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u2", learnable=True)
w2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w2", learnable=True)
b2 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b2", learnable=True)
z = NormalVariable(torch.zeros((M, 1)), torch.ones((M, 1)), "z", learnable=True)
Qy = PlanarFlow(w2, u2, b2)(PlanarFlow(w1, u1, b1)(z))
Qy.name = "y"
Qy0 = DeterministicVariable(Qy[1], "y0")
#Qy._get_sample(4)[Qy].shape
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.5)
loss_list1 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
# Variational distribution
Qy = NormalVariable(torch.zeros((M,)), 0.5*torch.ones((M,)), "y", learnable=True)
Qy0 = DeterministicVariable(Qy[1], "y0")
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.01)
loss_list2 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
plt.plot(loss_list1)
plt.plot(loss_list2)
plt.show()
| 31.525 | 96 | 0.676447 |
b25e6638db74f47962fb3638fca683037c34ed82 | 3,837 | py | Python | src/onegov/people/models/membership.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/people/models/membership.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/people/models/membership.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from onegov.core.orm import Base
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.mixins import UTCPublicationMixin
from onegov.core.orm.types import UUID
from onegov.search import ORMSearchable
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy.orm import backref
from sqlalchemy.orm import object_session
from sqlalchemy.orm import relationship
from uuid import uuid4
| 31.195122 | 84 | 0.661194 |
b2600eaa1ce4c305aedb5991b27f9834888e24d3 | 512 | py | Python | setup.py | drrobotk/multilateral_index_calc | 7b1cf2f178e4407167c90ed64743f9357da1d4f0 | [
"MIT"
] | 3 | 2021-11-27T00:00:56.000Z | 2022-02-14T09:58:33.000Z | setup.py | drrobotk/multilateral_index_calc | 7b1cf2f178e4407167c90ed64743f9357da1d4f0 | [
"MIT"
] | null | null | null | setup.py | drrobotk/multilateral_index_calc | 7b1cf2f178e4407167c90ed64743f9357da1d4f0 | [
"MIT"
] | null | null | null | from gettext import find
from setuptools import setup, find_packages
setup(
name='PriceIndexCalc',
version='0.1-dev9',
description='Price Index Calculator using bilateral and multilateral methods',
author='Dr. Usman Kayani',
url='https://github.com/drrobotk/PriceIndexCalc',
author_email='usman.kayani@ons.gov.uk',
license='MIT',
packages=find_packages(where="src"),
package_dir={'': 'src'},
install_requires=['pandas', 'numpy', 'scipy'],
include_package_data=True,
) | 32 | 82 | 0.703125 |
b2604e0c3e4e10fe06252e6006860caca1b86c21 | 480 | py | Python | cryptofeed_werks/bigquery_storage/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | cryptofeed_werks/bigquery_storage/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | cryptofeed_werks/bigquery_storage/constants.py | globophobe/crypto-tick-data | 7ec5d1e136b9bc27ae936f55cf6ab7fe5e37bda4 | [
"MIT"
] | null | null | null | import os
try:
from google.cloud import bigquery # noqa
except ImportError:
BIGQUERY = False
else:
BIGQUERY = True
GOOGLE_APPLICATION_CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS"
BIGQUERY_LOCATION = "BIGQUERY_LOCATION"
BIGQUERY_DATASET = "BIGQUERY_DATASET"
| 20 | 65 | 0.729167 |
b261027bb447ffd4f357da57323ee5f92a50b62a | 599 | py | Python | todoapp/todos/models.py | Buddheshwar-Nath-Keshari/test-ubuntu | 5e801ecd21503f160e52c091120a1a0c80c6600d | [
"MIT"
] | null | null | null | todoapp/todos/models.py | Buddheshwar-Nath-Keshari/test-ubuntu | 5e801ecd21503f160e52c091120a1a0c80c6600d | [
"MIT"
] | null | null | null | todoapp/todos/models.py | Buddheshwar-Nath-Keshari/test-ubuntu | 5e801ecd21503f160e52c091120a1a0c80c6600d | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import smart_text as smart_unicode
from django.utils.translation import ugettext_lazy as _
| 31.526316 | 81 | 0.707846 |
b2612d097a5e022b18b2c108ce7b4e1fdc16b1dc | 6,054 | py | Python | ultra_config_tests/unit_tests/test_ultra_config.py | timmartin19/ultra-config | 9af6a1313f49bf86b230be8e8beeb1c3479b9ab6 | [
"MIT"
] | 1 | 2017-01-05T18:32:22.000Z | 2017-01-05T18:32:22.000Z | ultra_config_tests/unit_tests/test_ultra_config.py | timmartin19/ultra-config | 9af6a1313f49bf86b230be8e8beeb1c3479b9ab6 | [
"MIT"
] | 239 | 2018-08-10T19:28:42.000Z | 2022-03-28T09:40:20.000Z | ultra_config_tests/unit_tests/test_ultra_config.py | timmartin19/ultra-config | 9af6a1313f49bf86b230be8e8beeb1c3479b9ab6 | [
"MIT"
] | 1 | 2019-06-10T14:14:15.000Z | 2019-06-10T14:14:15.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
from ultra_config import simple_config, load_json_file_settings, \
load_configparser_settings, load_python_object_settings, load_dict_settings, \
UltraConfig
from ultra_config_tests.unit_tests import default_config
| 38.316456 | 103 | 0.66964 |
b264318ef812ccb5494cb1fbb53e013385e1b79c | 970 | py | Python | leetcode/87. Scramble String.py | CSU-FulChou/IOS_er | 4286677854c4afe61f745bfd087527e369402dc7 | [
"MIT"
] | 2 | 2020-02-10T15:20:03.000Z | 2020-02-23T07:23:57.000Z | leetcode/87. Scramble String.py | CSU-FulChou/IOS_er | 4286677854c4afe61f745bfd087527e369402dc7 | [
"MIT"
] | null | null | null | leetcode/87. Scramble String.py | CSU-FulChou/IOS_er | 4286677854c4afe61f745bfd087527e369402dc7 | [
"MIT"
] | 1 | 2020-02-24T04:46:44.000Z | 2020-02-24T04:46:44.000Z | # 2021.04.16 hard: | 33.448276 | 98 | 0.5 |
b264888cc9f1eb496c9df03db998069fffdf3f86 | 3,079 | py | Python | packaging/setup/plugins/ovirt-engine-setup/all-in-one/super_user.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | 1 | 2021-02-02T05:38:35.000Z | 2021-02-02T05:38:35.000Z | packaging/setup/plugins/ovirt-engine-setup/all-in-one/super_user.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-setup/all-in-one/super_user.py | SunOfShine/ovirt-engine | 7684597e2d38ff854e629e5cbcbb9f21888cb498 | [
"Apache-2.0"
] | null | null | null | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
AIO super user password plugin.
"""
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from otopi import constants as otopicons
from ovirt_engine_setup import constants as osetupcons
# vim: expandtab tabstop=4 shiftwidth=4
| 28.775701 | 76 | 0.616759 |
b2666be5a27dd8e787680368717223bfc00f077e | 4,296 | py | Python | deploy/deploy.py | ColdStack-Network/blockchain | 3852f888e9d184a4fbc71365514a55dd9c510adb | [
"Unlicense"
] | null | null | null | deploy/deploy.py | ColdStack-Network/blockchain | 3852f888e9d184a4fbc71365514a55dd9c510adb | [
"Unlicense"
] | null | null | null | deploy/deploy.py | ColdStack-Network/blockchain | 3852f888e9d184a4fbc71365514a55dd9c510adb | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import argparse
import subprocess
import json
parser = argparse.ArgumentParser(description='Deploy blockchain')
parser.add_argument('--validator-node',
help='validator node ssh address. First node becomes boot node and active validator.',
nargs='+'
)
parser.add_argument('--api-node', help='api node ssh address', nargs='+', default=[])
parser.add_argument('--boot-node-addr', help='first (boot) node ip address', required=True)
parser.add_argument('--secrets', help='secrets file', required=True)
parser.add_argument('--env', help='production or staging', choices=['prod', 'stage'], required=True)
parser.add_argument('--tag', help='tag of docker image', required=True)
parser.add_argument('--with-existing-data',
help='Do not initialize data directory, just start containers',
action='store_true'
)
args = parser.parse_args()
print('Parsed CLI args', args)
secrets = read_secrets_file()
for i, host in enumerate(args.validator_node):
run_validator_node(host, is_boot_node = (i == 0), is_validator = (i == 0))
for host in args.api_node:
run_api_node(host)
| 28.078431 | 100 | 0.657588 |
b267e740ceab58f8898f41e8edaa0a8a6747e59b | 6,299 | py | Python | 4_neural_networks.py | scientificprogrammer123/Udacity_Machine-Learning | e6f5a73724ac51c9dcc9c28ee1652991982598ca | [
"MIT"
] | null | null | null | 4_neural_networks.py | scientificprogrammer123/Udacity_Machine-Learning | e6f5a73724ac51c9dcc9c28ee1652991982598ca | [
"MIT"
] | null | null | null | 4_neural_networks.py | scientificprogrammer123/Udacity_Machine-Learning | e6f5a73724ac51c9dcc9c28ee1652991982598ca | [
"MIT"
] | 1 | 2021-04-14T22:04:52.000Z | 2021-04-14T22:04:52.000Z | # lesson 1: neural networks
# cell body, neuron, axon, synapse
# spike trains travel down the axon, and causes excitation to occur at other axons.
# a computation unit.
#
# x1 -> w1 ->
# x2 -> w2 -> theta -> y
# x3 -> w3 ->
#
# sum_{=1}^{k} xi*wi, activation
# >=theta, firing threshold
#
# For perceptron, yes: y=1
# no: y=0
#
# lesson 2, ANN
# x1 1 w1 0.5 theta=0, y=0
# x2 0 w2 0.6
# x3 -1.5 w3 1
# lesson 3, how powerful is a perceptron? and
# y = 0,1
# w1 = 1/2
# w2 = 1/2
# theta = 3/4
#
# if x1=0, x2*1/2=3/4, x2=3/2
# if x2=0, x1*1/2=3/4, x1=3/2
#
# r = return 0, g = return 1
#
# 1 g g g g g
# 0.75 rg g g g g
# 0.5 r rg g g g
# 0.25 r r rg g g
# 0 r r r rg g
# 0 0.25 0.5 0.75 1
# lesson 4, how powerful is a perceptron 4?
# if we focus on x1 E {0,1}, x2 E {0,1}
# what is y? y is and
# lesson 5, how powerful is a perceptron 5?
# w1 = 0.5
# w2 = 0.5
# theta = 1/4
#
# if we focus on x1 E {0,1}, x2 E {0,1}
# what is y? y is or
#
#
# 1 g g g g g
# 0.75 g g g g g
# 0.5 g g g g g
# 0.25 rg g g g g
# 0 r rg g g g
# 0 0.25 0.5 0.75 1
# lesson 6, how powerful is a perceptron? not
# x1=1, y=0
# x1=0, y=1
# w1=-0.5, theta=0
#
# G R
# -1 0 1 2
#
# and or not are all expressible as perceptron units
# lesson 7, xor function
# theta = 0.5
# x1-> -> 0.5 ->
# and -> -1 -> or -> y
# x2-> -> 0.5 ->
#
# x1 x2 and or xor=or-and
# 0 0 0 0 0
# 0 1 0 1 1
# 1 0 0 1 1
# 1 1 1 1 0
# lesson 8, perceptron training
# perceptron rule -> single unit
# wi = wi + delta wi
# delta wi = nu(yi- yi^hat)xi
# yi^hat = (sum_i wi yi >= 0)
#
# y: target
# y_hat: output
# nu: learning rate
# x: input
#
# repeat x,y
# bias x y (0/1)
# | xxxx y
# | xxxx y
# | xxxx y
# | xxxx y
# | xxxx y
# | xxxx y
# | xxxx y
# | xxxx y
# theta w
#
# y y_hat y-y_hat
# 0 0 0
# 0 1 -1
# 1 0 1
# 1 1 0
#
# 2D training set, learn a half plane
# if the half plane is linearly separable, then perceptron rule will find it in
# finite number of iterations.
#
# if the data is not linearly seperable, see if it ever stops,
# problem, this algorithm never stops,
# so run while there are some errors, if you solve the halting problem then you
# can solve the halting problem.
# lesson 9, gradient descent
# need something that can work for linearly non-separability.
#
# a = sum_i x_i w_i
# y^hat = {a>=0}
# E(w) = 1/2 sum_{(x,y) E D} (y-a)^2
# d E(w) / d w_i = d/dw_i 1/2 sum_{(x,y) E D} (y-a)^2
# = sum_{(x,y) E D} (y-a) d/dw_i -sum_i x_i w_i'
# = sum_{(x,y) E D} (y-a)(-x_i) <- looks a lot like the perceptron rule
# lesson 10, comparison of learning rules
# delta w_i = nu (y-y^hat) x_i, perceptron: guarantee of finite convergence, in the case of linearly separable
# delta w_i = nu (y-a) x_i, gradient descent: calculus, robust, converge to local optimum
# activation, vs activation and thresholding it
# lesson 11, comparison of learning rules
# quiz: why not do gradient descent on y^hat
# intractable, no
# non differentiable, yes
# grows too fast, no
# multiple answers, no
# lesson 12, sigmoid
# sigma(a) = 1 / (1+e^(-a))
# as a -> -infinity, sigma(a)->0
# as a -> +infinity, sigma(a)->1
# D sigma(a) = sigma(a) (1-sigma(a))
# lesson 13, neural network sketch
# input, hidden layers, hidden layers, output
#
# whole thing is differentiable,
#
# back propogation, computationally beneficial organization of the chain rule
# we are just computing the derivatives with respect to the different weights
# in the network, all in one convenient way, that has, this lovely interpretation
# of having information flowing from the inputs to the outputs. And then error
# information flowing back from the outputs towards the inputs, and that tells you
# how to compute all the derivatives. And then, therefore how to make all the weight
# updates to make the network produce something more like what you want it to
# produce. so this is where the learning is actually taking place.
#
# the error function can have many local minimums, or local optima, stuck
# lesson 14, optimizing weights
# -> gradient descent
# -> advanced optimization methods, optimization and learning are the same according to people
#
# momentum terms in the gradient, in gradient descent, continue in direction,
# higher order derivatives, combinations of weights hamiltonia, and what not
# randmized optimization
# penalty for complexity
# philosophy based optimization, has this been tried?
#
# add more nodes,
# add more layers,
# higher weights
# these parameters make the network more complex
# make it as simple as possible.
# lesson 15, restrition bias
# restriction bias tells you the representational power, i.e. what you are able to represent
# set of hypotheses we will consider
# perceptron units are linear
# half spaces
# sigmoids
# complex
# much more complex, not as much
# Boolean: network of threshold-like units
# continuous function: connected, no jumps, hidden
# arbitrary: stitched together
#
# dangers of overfitting: cross validation
# error - iterations
# cross validation error can increase again, so if it works, then just stop
# lesson 16, preference bias
# preference bias tells you, given two representations, why I would prefer one
# over the other.
# prefer correct tree, prefer shorter tree
# how do we start weights:
# small, random values, for weights, avoid local minima, variability,
# large weights leads to overfitting,
# small random values, simple explaination,
# neural networks implement simpler explaination, occam's razor
# don't make something more complex unnecessarily
# better generalization
# lesson 17, summary
# perceptron, linear threshold unit, can create boolean function
# perceptron rule - finite time for linearly separable
# general differentiable - backprop and gradient descent
# preference/restriction bias of neural networks | 29.712264 | 111 | 0.62883 |
b26837a3549f4fc5b6bc64ba12abe7c4d44f56e0 | 267 | py | Python | gitprivacy/dateredacter/__init__.py | fapdash/git-privacy | 357a2952d8feb9e193373e18284e57a26d14b96c | [
"BSD-2-Clause"
] | 7 | 2019-10-15T08:30:02.000Z | 2021-12-26T20:37:18.000Z | gitprivacy/dateredacter/__init__.py | fapdash/git-privacy | 357a2952d8feb9e193373e18284e57a26d14b96c | [
"BSD-2-Clause"
] | 30 | 2019-04-22T15:08:34.000Z | 2022-02-16T20:39:28.000Z | gitprivacy/dateredacter/__init__.py | cburkert/pyGitPrivacy | d522e62f85446e7554f6b66b5287f9c3a6aa33c2 | [
"BSD-2-Clause"
] | 2 | 2021-06-22T18:17:01.000Z | 2021-12-21T09:48:33.000Z | import abc
from datetime import datetime
from .reduce import ResolutionDateRedacter
| 19.071429 | 54 | 0.71161 |
b26aa848ad9a71009d6da1cdab45cb44abfe1110 | 2,178 | py | Python | functions/cm_plotter.py | evanmy/keymorph | 5b57d86047ca13c73f494e21fdf271f261912f84 | [
"MIT"
] | null | null | null | functions/cm_plotter.py | evanmy/keymorph | 5b57d86047ca13c73f494e21fdf271f261912f84 | [
"MIT"
] | null | null | null | functions/cm_plotter.py | evanmy/keymorph | 5b57d86047ca13c73f494e21fdf271f261912f84 | [
"MIT"
] | null | null | null | import torch
from skimage.filters import gaussian
def blur_cm_plot(Cm_plot, sigma):
"""
Blur the keypoints/center-of-masses for better visualiztion
Arguments
---------
Cm_plot : tensor with the center-of-masses
sigma : how much to blur
Return
------
out : blurred points
"""
n_batch = Cm_plot.shape[0]
n_reg = Cm_plot.shape[1]
out = []
for n in range(n_batch):
cm_plot = Cm_plot[n, :, :, :]
blur_cm_plot = []
for r in range(n_reg):
_blur_cm_plot = gaussian(cm_plot[r, :, :, :],
sigma=sigma,
mode='nearest')
_blur_cm_plot = torch.from_numpy(_blur_cm_plot).float().unsqueeze(0)
blur_cm_plot += [_blur_cm_plot]
blur_cm_plot = torch.cat(blur_cm_plot, 0)
out += [blur_cm_plot.unsqueeze(0)]
return torch.cat(out, 0)
def get_cm_plot(Y_cm, dim0, dim1, dim2):
"""
Convert the coordinate of the keypoint/center-of-mass to points in an tensor
Arguments
---------
Y_cm : keypoints coordinates/center-of-masses[n_bath, 3, n_reg]
dim : dim of the image
Return
------
out : tensor it assigns value of 1 where keypoints are located otherwise 0
"""
n_batch = Y_cm.shape[0]
out = []
for n in range(n_batch):
Y = Y_cm[n, :, :]
n_reg = Y.shape[1]
axis2 = torch.linspace(-1, 1, dim2).float()
axis1 = torch.linspace(-1, 1, dim1).float()
axis0 = torch.linspace(-1, 1, dim0).float()
index0 = []
for i in range(n_reg):
index0.append(torch.argmin((axis0 - Y[2, i]) ** 2).item())
index1 = []
for i in range(n_reg):
index1.append(torch.argmin((axis1 - Y[1, i]) ** 2).item())
index2 = []
for i in range(n_reg):
index2.append(torch.argmin((axis2 - Y[0, i]) ** 2).item())
cm_plot = torch.zeros(n_reg, dim0, dim1, dim2)
for i in range(n_reg):
cm_plot[i, index0[i], index1[i], index2[i]] = 1
out += [cm_plot.unsqueeze(0)]
return torch.cat(out, 0)
| 26.888889 | 82 | 0.5427 |
b26afc8ce026caaf2cd97fb955bcaaad804230cc | 3,790 | py | Python | examples/cooperative_work_examples.py | hfs/maproulette-python-client | 0a3e4b68af7892700463c2afc66a1ae4dcbf0825 | [
"Apache-2.0"
] | null | null | null | examples/cooperative_work_examples.py | hfs/maproulette-python-client | 0a3e4b68af7892700463c2afc66a1ae4dcbf0825 | [
"Apache-2.0"
] | null | null | null | examples/cooperative_work_examples.py | hfs/maproulette-python-client | 0a3e4b68af7892700463c2afc66a1ae4dcbf0825 | [
"Apache-2.0"
] | null | null | null | import maproulette
import json
import base64
# Create a configuration object for MapRoulette using your API key:
config = maproulette.Configuration(api_key="API_KEY")
# Create an API objects with the above config object:
api = maproulette.Task(config)
# Setting a challenge ID in which we'll place our cooperative task
challenge_id = 14452
# We'll start by creating some 'child' operations to apply to the target objects add them to a list:
child_operations_list = [maproulette.ChildOperationModel(operation="setTags",
data={"test_tag_1": "True",
"test_tag_2": "True",
"test_tag_3": "True"}).to_dict(),
maproulette.ChildOperationModel(operation="setTags",
data={"test_tag_4": "True"}).to_dict(),
maproulette.ChildOperationModel(operation="setTags",
data={"test_tag_5": "True"}).to_dict()]
# Now we'll pass these operations into a 'parent' operation list to specify the objects to which the changes
# will be applied:
test_parent_relation = [maproulette.ParentOperationModel(operation_type="modifyElement",
element_type="way",
osm_id="175208404",
child_operations=child_operations_list).to_dict()]
# The below flags error when handling is in the constructor, but not when in the setter:
test_2 = maproulette.ParentOperationModel(operation_type="modifyElement",
element_type="way",
osm_id="175208404",
child_operations=child_operations_list)
# Now that we have a Parent Operation containing the Child Operations we'd like to implement, we
# can pass this into our Cooperative Work model:
test_cooperative_work = maproulette.CooperativeWorkModel(version=2,
type=1,
parent_operations=test_parent_relation).to_dict()
# Now we can create a basic task to apply these suggested changes to:
with open('data/Example_Geometry.geojson', 'r') as data_file:
data = json.loads(data_file.read())
test_task = maproulette.TaskModel(name="Test_Coop_Task_Kastellet",
parent=challenge_id,
geometries=data,
cooperative_work=test_cooperative_work).to_dict()
# Finally, we'll pass our task object to into the create_task method to call the /task
# endpoint, creating this new task with our cooperative work model applied
print(json.dumps(api.create_task(test_task), indent=4, sort_keys=True))
# Alternatively, cooperative work can be populated as in-progress edits via an OSM changefile (osc file)
# as 'type 2' cooperative work:
with open('data/ExampleChangefile.osc', 'rb') as data_file:
osc_file = base64.b64encode(data_file.read()).decode('ascii')
test_osc_cooperative_work = maproulette.CooperativeWorkModel(type=2,
content=osc_file).to_dict()
test_osc_task = maproulette.TaskModel(name="Test_Coop_Task_Kastellet_OSC_2",
parent=challenge_id,
geometries=data,
cooperative_work=test_osc_cooperative_work).to_dict()
print(json.dumps(api.create_task(test_osc_task), indent=4, sort_keys=True))
| 50.533333 | 108 | 0.58628 |
b26b274e30196d87e8ffb7a61d1cdb928d240314 | 1,101 | py | Python | src/biopsykit/sleep/sleep_wake_detection/algorithms/_base.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 10 | 2020-11-05T13:34:55.000Z | 2022-03-11T16:20:10.000Z | src/biopsykit/sleep/sleep_wake_detection/algorithms/_base.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 14 | 2021-03-11T14:43:52.000Z | 2022-03-10T19:44:57.000Z | src/biopsykit/sleep/sleep_wake_detection/algorithms/_base.py | Zwitscherle/BioPsyKit | 7200c5f1be75c20f53e1eb4c991aca1c89e3dd88 | [
"MIT"
] | 3 | 2021-09-13T13:14:38.000Z | 2022-02-19T09:13:25.000Z | """Module for sleep/wake detection base class."""
from biopsykit.utils._types import arr_t
from biopsykit.utils.datatype_helper import SleepWakeDataFrame
| 29.756757 | 110 | 0.62852 |
b26b2d344b00d14f7c80d63267fca336b474dfed | 287 | py | Python | FluentPython/ch02/cartesian.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | FluentPython/ch02/cartesian.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | FluentPython/ch02/cartesian.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python
colors = ['white', 'black']
sizes = ['S', 'M', 'L']
tshirts = [(color, size) for size in sizes
for color in colors ]
print(tshirts)
tshirts = [(color, size) for color in colors
for size in sizes ]
print(tshirts)
| 22.076923 | 46 | 0.54007 |
b26b3c9bc4d2fbf8cbbb44a23143622070070eef | 316 | py | Python | create.py | devanshsharma22/ONE | 27450ff2e9e07164527043a161274495ef3a1178 | [
"CC-BY-3.0"
] | null | null | null | create.py | devanshsharma22/ONE | 27450ff2e9e07164527043a161274495ef3a1178 | [
"CC-BY-3.0"
] | null | null | null | create.py | devanshsharma22/ONE | 27450ff2e9e07164527043a161274495ef3a1178 | [
"CC-BY-3.0"
] | null | null | null | from flask import Flask
from models import *
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db.init_app(app)
if __name__ == "__main__":
with app.app_context():
main()
| 15.8 | 70 | 0.708861 |
b26beef2d3bbff1212787d1023080b96af3af78b | 1,160 | py | Python | jmetex/main.py | innovocloud/jmetex | 5e7c4d9695174fe2f5c3186b8bbb41857e9715df | [
"Apache-2.0"
] | 2 | 2018-02-19T14:21:31.000Z | 2018-03-15T03:16:05.000Z | jmetex/main.py | innovocloud/jmetex | 5e7c4d9695174fe2f5c3186b8bbb41857e9715df | [
"Apache-2.0"
] | null | null | null | jmetex/main.py | innovocloud/jmetex | 5e7c4d9695174fe2f5c3186b8bbb41857e9715df | [
"Apache-2.0"
] | null | null | null | import sys
import time
import argparse
from prometheus_client import start_http_server, Metric, REGISTRY, Summary
from .interfacecollector import InterfaceCollector
from .opticalcollector import OpticalCollector
| 40 | 96 | 0.675862 |
b26ccd47df1988e1e17fa1b203b55759ef55fe03 | 472 | py | Python | vispp/io.py | c-cameron/vispp | a985c0fd5a7add968968ec025da17ad0c5ab0f73 | [
"BSD-3-Clause"
] | null | null | null | vispp/io.py | c-cameron/vispp | a985c0fd5a7add968968ec025da17ad0c5ab0f73 | [
"BSD-3-Clause"
] | null | null | null | vispp/io.py | c-cameron/vispp | a985c0fd5a7add968968ec025da17ad0c5ab0f73 | [
"BSD-3-Clause"
] | null | null | null | from matplotlib.backends.backend_pdf import PdfPages
def better_savefig(fig, figfile, format="pdf", **kwargs):
"""To be used instead of .savefig
This function saves pdfs without creation date. So subsequent
overwrites of pdf files does not cause e.g. git modified.
"""
if format == "pdf":
with PdfPages(figfile, metadata={"CreationDate": None}) as pdf:
pdf.savefig(fig, **kwargs)
else:
fig.savefig(figfile, **kwargs)
| 31.466667 | 71 | 0.667373 |
b26de06366dede83defa5d174c6610df50dcc9a0 | 1,133 | py | Python | mappings.py | timeseries-ru/EL | 2528fe50b92efd0b28611ddd9b531d085a12d0df | [
"MIT"
] | null | null | null | mappings.py | timeseries-ru/EL | 2528fe50b92efd0b28611ddd9b531d085a12d0df | [
"MIT"
] | null | null | null | mappings.py | timeseries-ru/EL | 2528fe50b92efd0b28611ddd9b531d085a12d0df | [
"MIT"
] | null | null | null | import sklearn.decomposition as decomposition
import sklearn.preprocessing as preprocessing
import sklearn.linear_model as linear_model
import sklearn.ensemble as ensemble
import sklearn.cluster as cluster
import sklearn.neighbors as neighbors
import sklearn.neural_network as neural_network
| 40.464286 | 77 | 0.752868 |
b26eb6867abd481f8fa7df4a751d92de7df14d0f | 231 | py | Python | Find_the_Runner_Up_Score_.py | KrShivanshu/264136_Python_Daily | 8caeae12a495509675544b957af3ffbaa50e6ed2 | [
"CC0-1.0"
] | null | null | null | Find_the_Runner_Up_Score_.py | KrShivanshu/264136_Python_Daily | 8caeae12a495509675544b957af3ffbaa50e6ed2 | [
"CC0-1.0"
] | null | null | null | Find_the_Runner_Up_Score_.py | KrShivanshu/264136_Python_Daily | 8caeae12a495509675544b957af3ffbaa50e6ed2 | [
"CC0-1.0"
] | null | null | null | if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
max = -9999999
max2 = -9999999
for i in arr:
if(i>max):
max2=max
max=i
elif i>max2 and max>i:
max2=i
print(max2) | 16.5 | 35 | 0.532468 |
b26fb5509497d72210ea4f3275edb63a6b2bc440 | 85 | py | Python | tests/__init__.py | doublechiang/qsmcmd | 63e31390de020472c6ff4284cbe2d2c5147cb13d | [
"MIT"
] | 1 | 2021-05-07T09:57:01.000Z | 2021-05-07T09:57:01.000Z | tests/__init__.py | doublechiang/qsmcmd | 63e31390de020472c6ff4284cbe2d2c5147cb13d | [
"MIT"
] | 30 | 2017-08-24T21:21:03.000Z | 2021-01-21T19:32:36.000Z | tests/__init__.py | doublechiang/qsmcmd | 63e31390de020472c6ff4284cbe2d2c5147cb13d | [
"MIT"
] | null | null | null | import os, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../src'))
| 21.25 | 68 | 0.694118 |
b26fea559278660731c5b3eb16d98ce810c85f89 | 7,669 | py | Python | mindspore/python/mindspore/rewrite/namer.py | httpsgithu/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | 1 | 2022-02-23T09:13:43.000Z | 2022-02-23T09:13:43.000Z | mindspore/python/mindspore/rewrite/namer.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | mindspore/python/mindspore/rewrite/namer.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unique name producer for target, name of node, class name, etc."""
from typing import Union
from .node import Node
from .api.node_type import NodeType
| 36.519048 | 120 | 0.604903 |
b270dcf5ee3dfde551682fd9a8c7f93e84cb34a6 | 3,391 | py | Python | tests/test_autopilot.py | aidanmelen/bobcat_miner | 5ce85e17e93332a126db0a196c29b01433dc90d4 | [
"Apache-2.0"
] | 6 | 2022-01-06T05:50:14.000Z | 2022-03-25T09:41:34.000Z | tests/test_autopilot.py | aidanmelen/bobcat_miner | 5ce85e17e93332a126db0a196c29b01433dc90d4 | [
"Apache-2.0"
] | 9 | 2022-01-19T03:16:33.000Z | 2022-02-20T20:37:56.000Z | tests/test_autopilot.py | aidanmelen/bobcat_miner | 5ce85e17e93332a126db0a196c29b01433dc90d4 | [
"Apache-2.0"
] | 3 | 2022-01-06T05:50:00.000Z | 2022-02-15T16:24:58.000Z | from unittest.mock import patch, call, PropertyMock, AsyncMock, MagicMock, mock_open
import unittest
from bobcat_miner import BobcatAutopilot, Bobcat, OnlineStatusCheck
import mock_endpoints
if __name__ == "__main__":
unittest.main()
| 37.677778 | 100 | 0.604247 |
b271a810a148e7642fe7f668a6757b9d19a1951c | 5,687 | py | Python | fig/project.py | kazoup/fig | d34dc45b783f830ed64988c3c8ffb3d4f550d059 | [
"BSD-3-Clause"
] | null | null | null | fig/project.py | kazoup/fig | d34dc45b783f830ed64988c3c8ffb3d4f550d059 | [
"BSD-3-Clause"
] | null | null | null | fig/project.py | kazoup/fig | d34dc45b783f830ed64988c3c8ffb3d4f550d059 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T01:08:39.000Z | 2019-12-11T01:08:39.000Z | from __future__ import unicode_literals
from __future__ import absolute_import
import logging
from .service import Service
log = logging.getLogger(__name__)
def get_services(self, service_names=None):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if
service_names is None or [].
Preserves the original order of self.services.
Raises NoSuchService if any of the named services
do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.services
else:
unsorted = [self.get_service(name) for name in service_names]
return [s for s in self.services if s in unsorted]
class NoSuchService(Exception):
class ConfigurationError(Exception):
class DependencyError(ConfigurationError):
pass
| 34.053892 | 183 | 0.604713 |
b2743a4c76c53ef106ccb49cbbfbe8057b1bd708 | 2,136 | py | Python | input/utils/chi-squared-contingency-tests.py | g-p-m/GPM | 00aa3ea664e14b99eedd6cbeabbc2b85edf2b208 | [
"BSD-3-Clause"
] | null | null | null | input/utils/chi-squared-contingency-tests.py | g-p-m/GPM | 00aa3ea664e14b99eedd6cbeabbc2b85edf2b208 | [
"BSD-3-Clause"
] | null | null | null | input/utils/chi-squared-contingency-tests.py | g-p-m/GPM | 00aa3ea664e14b99eedd6cbeabbc2b85edf2b208 | [
"BSD-3-Clause"
] | null | null | null | import numpy, scipy.stats
T1 = numpy.asarray([
[ 316, 378, 393, 355, 391, 371, 400, 397, 385, 371, 382, 371, ],
[ 336, 339, 322, 341, 314, 311, 339, 310, 331, 355, 316, 306, ],
[ 375, 364, 375, 381, 381, 401, 374, 396, 422, 417, 372, 435, ],
[ 238, 231, 263, 268, 239, 259, 243, 206, 257, 228, 252, 203, ]])
T2 = numpy.asarray([
[ 378, 415, 389, 383, 369, 382, 382, 340, 359, 377, 372, 364, ],
[ 312, 326, 356, 319, 294, 325, 345, 315, 326, 324, 346, 332, ],
[ 368, 382, 384, 401, 367, 399, 417, 397, 387, 408, 415, 368, ],
[ 246, 226, 264, 242, 229, 237, 227, 233, 251, 244, 262, 226, ]])
T3 = numpy.asarray([
[ 331, 409, 409, 392, 364, 336, 317, 345, 351, 414, 406, 436, ],
[ 351, 355, 313, 328, 296, 291, 312, 320, 339, 307, 339, 369, ],
[ 407, 416, 400, 363, 355, 350, 380, 388, 386, 391, 436, 421, ],
[ 297, 270, 231, 236, 206, 243, 217, 222, 229, 246, 244, 246, ]])
print(scipy.stats.chi2_contingency(T1)[1]) # Pyswisseph
print(scipy.stats.chi2_contingency(T2)[1])
print(scipy.stats.chi2_contingency(T3)[1])
print()
T1 = numpy.asarray([
[ 316, 378, 393, 355, 391, 371, 400, 397, 385, 371, 382, 371, ],
[ 336, 338, 323, 341, 314, 311, 339, 310, 331, 355, 316, 306, ],
[ 375, 364, 375, 381, 381, 401, 374, 396, 422, 417, 372, 435, ],
[ 238, 231, 263, 268, 239, 259, 243, 206, 257, 228, 252, 203, ]])
T2 = numpy.asarray([
[ 378, 415, 389, 383, 369, 382, 382, 340, 359, 377, 372, 364, ],
[ 312, 326, 356, 319, 294, 325, 345, 315, 326, 324, 346, 332, ],
[ 368, 382, 384, 401, 367, 399, 417, 397, 387, 409, 414, 368, ],
[ 246, 226, 264, 242, 229, 237, 227, 234, 250, 244, 262, 226, ]])
T3 = numpy.asarray([
[ 331, 411, 406, 393, 364, 333, 322, 344, 350, 413, 408, 435, ],
[ 352, 355, 313, 331, 291, 293, 314, 318, 339, 308, 338, 368, ],
[ 406, 416, 400, 364, 356, 348, 380, 392, 383, 390, 437, 421, ],
[ 296, 270, 231, 238, 202, 245, 217, 222, 229, 247, 244, 246, ]])
print(scipy.stats.chi2_contingency(T1)[1]) # Ephem
print(scipy.stats.chi2_contingency(T2)[1])
print(scipy.stats.chi2_contingency(T3)[1])
| 46.434783 | 65 | 0.558521 |
b2760534184d4098001909eaf620372388d8db5f | 4,916 | py | Python | inference_speed.py | guillesanbri/DPT | d65d1e4adade95bb6265c28ab29e009028b3f9a8 | [
"MIT"
] | null | null | null | inference_speed.py | guillesanbri/DPT | d65d1e4adade95bb6265c28ab29e009028b3f9a8 | [
"MIT"
] | null | null | null | inference_speed.py | guillesanbri/DPT | d65d1e4adade95bb6265c28ab29e009028b3f9a8 | [
"MIT"
] | null | null | null | import os
import wandb
import torch
import warnings
import numpy as np
import torchvision.transforms
from fvcore.nn import FlopCountAnalysis
from dpt.models import DPTDepthModel
# Hyperparameters and config
# Input
net_w, net_h = 640, 192
h_kitti, w_kitti = 352, 1216
# Model architecture
backbone = "vitb_rn50_384" # "vitb_effb0"
transformer_hooks = "str:8,11"
attention_variant = None # "performer"
attention_heads = 12
mixed_precision = False
config_dict = {
"input_size": f"{net_h},{net_w}",
"downsampling": "Resize image along w and h",
"mixed_precision": mixed_precision,
"backbone": backbone,
"transformer_hooks": transformer_hooks,
"attention_variant": attention_variant,
"attention_heads": attention_heads,
}
if __name__ == "__main__":
warnings.simplefilter("ignore", UserWarning)
# Init wandb
wandb.init(config=config_dict)
config = wandb.config
# Re-read config for wandb-sweep-managed inference
mixed_precision = config["mixed_precision"]
backbone = config["backbone"]
transformer_hooks = config["transformer_hooks"]
attention_variant = config["attention_variant"]
if attention_variant == "None":
attention_variant = None
attention_heads = config["attention_heads"]
input_size = config["input_size"]
net_h = int(input_size.split(",")[0])
net_w = int(input_size.split(",")[1])
# Convert str hooks to list (wandb hacky solution to display hooks correctly)
assert isinstance(transformer_hooks, str) and transformer_hooks[:4] == "str:", \
'Hooks are not in the format "str:[att_hook1, att_hook2]"'
conv_hooks = {"vitb_rn50_384": [0, 1], "vitb_effb0": [1, 2]}[backbone]
transformer_hooks = [int(hook) for hook in transformer_hooks.split(":")[-1].split(",")]
hooks = conv_hooks + transformer_hooks
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
# Create model
model = DPTDepthModel(
path=None,
scale=0.00006016, # KITTI
shift=0.00579,
invert=True,
backbone=backbone,
attention_heads=attention_heads,
hooks=hooks,
non_negative=True,
enable_attention_hooks=False,
attention_variant=attention_variant).to(device)
n_inferences = 500
wandb.log({"num_inferences": n_inferences})
measures = np.zeros((n_inferences, 1))
x = torch.rand(1, 3, h_kitti, w_kitti).to(device)
print(f"Kitti size: {h_kitti}, {w_kitti} | Network input size: {net_h}, {net_w}")
# Cuda events
t0 = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
# Measure inference time
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=mixed_precision):
dummy = torchvision.transforms.Resize((net_h, net_w))(x)
_ = model(dummy) # Warm-up
for i in range(n_inferences):
t0.record()
if net_h != h_kitti or net_w != w_kitti:
x = torchvision.transforms.Resize((net_h, net_w))(x)
y = model(x)
if net_h != h_kitti or net_w != w_kitti:
_ = torch.nn.functional.interpolate(y.unsqueeze(1),
size=(h_kitti, w_kitti),
mode="bicubic",
align_corners=True)
end.record()
torch.cuda.synchronize()
measures[i] = t0.elapsed_time(end)
mean_ms = np.mean(measures)
std_ms = np.std(measures)
fps = 1000/measures
mean_fps = np.mean(fps)
std_fps = np.std(fps)
GFLOPs = get_flops(model.to("cpu"), x.to("cpu"))
model_MB = get_model_size(model)
wandb.log({"FPS": mean_fps, "std_fps": std_fps, "ms": mean_ms, "std_ms": std_ms, "GFLOPs": GFLOPs, "MB": model_MB})
print(f"FPS: {mean_fps:.2f} +- {1/std_fps:.2f} || Inference speed (ms): {mean_ms:.4f} +- {std_ms:.4f}")
print(f"GFLOPs: {GFLOPs:.3f} || Model size (MB): {model_MB:.2f}")
| 35.114286 | 119 | 0.605574 |
b2768c03376cae3fece006df9dcfa990067b957c | 5,122 | py | Python | cybox/common/tools.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | null | null | null | cybox/common/tools.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | null | null | null | cybox/common/tools.py | siemens/python-cybox | b692a98c8a62bd696e2a0dda802ada7359853482 | [
"BSD-3-Clause"
] | 1 | 2019-04-16T18:37:32.000Z | 2019-04-16T18:37:32.000Z | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.cybox_common as common_binding
from cybox.common import HashList, StructuredText, VocabString
def to_dict(self):
toolinfo_dict = {}
if self.id_ is not None:
toolinfo_dict['id'] = self.id_
if self.idref is not None:
toolinfo_dict['idref'] = self.idref
if self.name is not None:
toolinfo_dict['name'] = self.name
if self.type_:
toolinfo_dict['type'] = [x.to_dict() for x in self.type_]
if self.description is not None:
toolinfo_dict['description'] = self.description.to_dict()
if self.vendor is not None:
toolinfo_dict['vendor'] = self.vendor
if self.version is not None:
toolinfo_dict['version'] = self.version
if self.service_pack is not None:
toolinfo_dict['service_pack'] = self.service_pack
if self.tool_hashes:
toolinfo_dict['tool_hashes'] = self.tool_hashes.to_list()
return toolinfo_dict
| 33.25974 | 89 | 0.650527 |
b2769b5ec360ec5dc6f9171e3632b3ef3f3dc0c8 | 570 | py | Python | python/ray/rllib/models/tf/tf_modelv2.py | alex-petrenko/ray | dfc94ce7bcd5d9d008822efdeec17c3f6bb9c606 | [
"Apache-2.0"
] | 1 | 2020-09-27T08:48:11.000Z | 2020-09-27T08:48:11.000Z | python/ray/rllib/models/tf/tf_modelv2.py | JunpingDu/ray | 214f09d969480279930994cabbcc2a75535cc6ca | [
"Apache-2.0"
] | 4 | 2019-03-04T13:03:24.000Z | 2019-06-06T11:25:07.000Z | python/ray/rllib/models/tf/tf_modelv2.py | JunpingDu/ray | 214f09d969480279930994cabbcc2a75535cc6ca | [
"Apache-2.0"
] | 1 | 2020-04-30T09:06:20.000Z | 2020-04-30T09:06:20.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
| 23.75 | 74 | 0.642105 |
b27869ddfe009d8e2d025f4f2f3d4a1de697cced | 1,401 | py | Python | EventManager/Home/models.py | 201901407/woc3.0-eventmanager-DarshilParikh | 8174cd5373e3f3e4723a9fd6381266a56dddc4e6 | [
"MIT"
] | 1 | 2021-01-03T13:57:38.000Z | 2021-01-03T13:57:38.000Z | EventManager/Home/models.py | 201901407/woc3.0-eventmanager-DarshilParikh | 8174cd5373e3f3e4723a9fd6381266a56dddc4e6 | [
"MIT"
] | null | null | null | EventManager/Home/models.py | 201901407/woc3.0-eventmanager-DarshilParikh | 8174cd5373e3f3e4723a9fd6381266a56dddc4e6 | [
"MIT"
] | null | null | null | from django.db import models
import uuid, datetime
from django.utils import timezone
# Create your models here.
| 35.025 | 97 | 0.751606 |
b278da741753c0353d746ae92b8910102ad49380 | 2,450 | py | Python | zulip_bots/zulip_bots/terminal.py | maanuanubhav999/python-zulip-api | abebf28077b31d6b3a7183044c6493230d890d91 | [
"Apache-2.0"
] | 1 | 2020-07-09T17:23:15.000Z | 2020-07-09T17:23:15.000Z | zulip_bots/zulip_bots/terminal.py | maanuanubhav999/python-zulip-api | abebf28077b31d6b3a7183044c6493230d890d91 | [
"Apache-2.0"
] | null | null | null | zulip_bots/zulip_bots/terminal.py | maanuanubhav999/python-zulip-api | abebf28077b31d6b3a7183044c6493230d890d91 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import argparse
from zulip_bots.finder import import_module_from_source, resolve_bot_path
from zulip_bots.simple_lib import TerminalBotHandler
current_dir = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
main()
| 31.410256 | 106 | 0.624082 |
b27948c3537469faf68e7dba6797c0ed2aa2c1dd | 3,606 | py | Python | tesHistMatch.py | cliffeby/Duckpin2 | 9b1b0891e898625373409f7b4b7d4e058184c45e | [
"MIT"
] | null | null | null | tesHistMatch.py | cliffeby/Duckpin2 | 9b1b0891e898625373409f7b4b7d4e058184c45e | [
"MIT"
] | 1 | 2018-04-23T21:35:32.000Z | 2018-10-04T03:15:00.000Z | tesHistMatch.py | cliffeby/Duckpin2 | 9b1b0891e898625373409f7b4b7d4e058184c45e | [
"MIT"
] | null | null | null | # import the necessary packages
import io
import time
import cropdata1024, cropdata1440
import numpy as np
import threading
import cv2
mask_crop_ranges = cropdata1440.ballCrops
crop_ranges = cropdata1024.pin_crop_ranges
arm_crop_ranges = cropdata1440.resetArmCrops
scrop_ranges = cropdata1024.special_crop_ranges
x=y=x1=y1=0
rmax = [0,0,0,0,0,0,0,0,0,0,0,-1]
smax = [0,0,0]
oldHist =olb=olg=olr=oub=oug=our = -999
img = cv2.imread('C:/Users/cliff/pictures/BArmMask.jpg',1)
imge = cv2.imread('C:/Users/cliff/pictures/BArmMaskerase.jpg',1)
findPins(img, imge)
drawPinRectangles(imge)
# cv2.imshow('ddd',imge)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| 33.700935 | 136 | 0.567942 |
b27a7998ddf212b0241aa835db7ce95126acc646 | 2,929 | py | Python | authlib/integrations/flask_client/remote_app.py | bobh66/authlib | e3e18da74d689b61a8dc8db46775ff77a57c6c2a | [
"BSD-3-Clause"
] | 1 | 2021-12-09T07:11:05.000Z | 2021-12-09T07:11:05.000Z | authlib/integrations/flask_client/remote_app.py | bobh66/authlib | e3e18da74d689b61a8dc8db46775ff77a57c6c2a | [
"BSD-3-Clause"
] | null | null | null | authlib/integrations/flask_client/remote_app.py | bobh66/authlib | e3e18da74d689b61a8dc8db46775ff77a57c6c2a | [
"BSD-3-Clause"
] | 2 | 2021-05-24T20:34:12.000Z | 2022-03-26T07:46:17.000Z | from flask import redirect
from flask import request as flask_req
from flask import _app_ctx_stack
from ..base_client import RemoteApp
def authorize_redirect(self, redirect_uri=None, **kwargs):
"""Create a HTTP Redirect for Authorization Endpoint.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: A HTTP redirect response.
"""
rv = self.create_authorization_url(redirect_uri, **kwargs)
if self.request_token_url:
request_token = rv.pop('request_token', None)
self._save_request_token(request_token)
self.save_authorize_data(flask_req, redirect_uri=redirect_uri, **rv)
return redirect(rv['url'])
def authorize_access_token(self, **kwargs):
"""Authorize access token."""
if self.request_token_url:
request_token = self._fetch_request_token()
else:
request_token = None
params = self.retrieve_access_token_params(flask_req, request_token)
params.update(kwargs)
token = self.fetch_access_token(**params)
self.token = token
return token
| 35.719512 | 84 | 0.665073 |
b27a85f2428bee55c3eb4af112108417cb5d5e83 | 2,552 | py | Python | models/cnn_stft.py | gumpy-hybridBCI/GUMPY- | 12a679626836c0be0063dd4012380ec2fa0245cb | [
"MIT"
] | 27 | 2018-02-20T14:17:42.000Z | 2021-04-16T02:36:40.000Z | models/cnn_stft.py | gumpy-hybridBCI/GUMPY- | 12a679626836c0be0063dd4012380ec2fa0245cb | [
"MIT"
] | 3 | 2019-02-22T12:18:40.000Z | 2021-06-13T17:09:08.000Z | models/cnn_stft.py | gumpy-hybridBCI/GUMPY- | 12a679626836c0be0063dd4012380ec2fa0245cb | [
"MIT"
] | 15 | 2018-03-19T20:04:50.000Z | 2022-02-24T10:12:06.000Z | from .model import KerasModel
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.layers import BatchNormalization, Dropout, Conv2D, MaxPooling2D
import kapre
from kapre.utils import Normalization2D
from kapre.time_frequency import Spectrogram
| 37.529412 | 87 | 0.575627 |
b27aa21ef3977e3a19e7a6820a49fc999d5453c5 | 1,347 | py | Python | test/test_http.py | tylerlong/ringcentral-python | 518a6b2b493360a40f2ee0eaa8ae3f12e01d4f52 | [
"MIT"
] | 3 | 2017-01-26T01:58:50.000Z | 2018-12-26T09:06:21.000Z | test/test_http.py | tylerlong/ringcentral-python | 518a6b2b493360a40f2ee0eaa8ae3f12e01d4f52 | [
"MIT"
] | 3 | 2017-03-25T21:50:04.000Z | 2018-09-05T23:35:26.000Z | test/test_http.py | tylerlong/ringcentral-python | 518a6b2b493360a40f2ee0eaa8ae3f12e01d4f52 | [
"MIT"
] | 1 | 2017-02-14T22:27:16.000Z | 2017-02-14T22:27:16.000Z | from .test_base import BaseTestCase
| 43.451613 | 144 | 0.589458 |
b27aa7dc89425beb1b8dd2de335e508e06185c2e | 6,685 | py | Python | src/scaffold/models/abstract/meta.py | Su-yj/django-scaffold-tools | db97b1feece8cc57131e3a14b292857204e8e574 | [
"Apache-2.0"
] | 2 | 2021-02-25T17:52:03.000Z | 2021-05-25T23:49:40.000Z | src/scaffold/models/abstract/meta.py | Su-yj/django-scaffold-tools | db97b1feece8cc57131e3a14b292857204e8e574 | [
"Apache-2.0"
] | null | null | null | src/scaffold/models/abstract/meta.py | Su-yj/django-scaffold-tools | db97b1feece8cc57131e3a14b292857204e8e574 | [
"Apache-2.0"
] | 1 | 2022-03-24T09:40:57.000Z | 2022-03-24T09:40:57.000Z | from datetime import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from scaffold.exceptions.exceptions import AppError
# def patch_methods(model_class):
# def do_patch(cls):
# for k in cls.__dict__:
# obj = getattr(cls, k)
# if not k.startswith('_') and callable(obj):
# setattr(model_class, k, obj)
#
# return do_patch
| 21.495177 | 74 | 0.57472 |
b27aafce477f2a5f5a7f14f7e8edc439ed8f615c | 3,740 | py | Python | tests/unit/client/resources/box/test_box.py | etingof/softboxen | 2a7ba85669d563de9824e3962bd48a0849482e3f | [
"BSD-2-Clause"
] | 2 | 2020-02-08T20:43:35.000Z | 2020-06-24T18:46:59.000Z | tests/unit/client/resources/box/test_box.py | etingof/softboxen | 2a7ba85669d563de9824e3962bd48a0849482e3f | [
"BSD-2-Clause"
] | 2 | 2020-03-07T08:07:17.000Z | 2021-09-15T21:12:12.000Z | tests/unit/client/resources/box/test_box.py | etingof/softboxen | 2a7ba85669d563de9824e3962bd48a0849482e3f | [
"BSD-2-Clause"
] | 1 | 2020-05-04T06:10:45.000Z | 2020-05-04T06:10:45.000Z | #
# This file is part of softboxen software.
#
# Copyright (c) 2020, Ilya Etingof <etingof@gmail.com>
# License: https://github.com/etingof/softboxen/LICENSE.rst
#
import json
import sys
import unittest
from unittest import mock
from softboxen.client.resources.box import box
from softboxen.client.resources.box import credentials
from softboxen.client.resources.box import route
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| 31.428571 | 79 | 0.674866 |
b27af965481a6eface77ab77feda170f704b5500 | 543 | py | Python | photoseleven/db.py | photoseleven/photoseleven-backend | 2e511d5e48477b6b41a6d98f0630b1bcada8a298 | [
"MIT"
] | null | null | null | photoseleven/db.py | photoseleven/photoseleven-backend | 2e511d5e48477b6b41a6d98f0630b1bcada8a298 | [
"MIT"
] | null | null | null | photoseleven/db.py | photoseleven/photoseleven-backend | 2e511d5e48477b6b41a6d98f0630b1bcada8a298 | [
"MIT"
] | 1 | 2020-03-29T11:20:40.000Z | 2020-03-29T11:20:40.000Z | import click
from flask import current_app, g
from flask.cli import with_appcontext
from flask_pymongo import PyMongo
from werkzeug.security import check_password_hash, generate_password_hash
| 19.392857 | 73 | 0.692449 |
b27e012ad9d98a21878536e044d958f15626c65e | 4,354 | py | Python | c1_tools/c1_Preferences.py | jacobmartinez3d/c1_tools | e317d52e91a375c6ac1b6914a74787056118484e | [
"MIT"
] | null | null | null | c1_tools/c1_Preferences.py | jacobmartinez3d/c1_tools | e317d52e91a375c6ac1b6914a74787056118484e | [
"MIT"
] | null | null | null | c1_tools/c1_Preferences.py | jacobmartinez3d/c1_tools | e317d52e91a375c6ac1b6914a74787056118484e | [
"MIT"
] | null | null | null | # preferences panel to allow inputting cutom parameters for the structure of a project and its
# naming conventions.
# --------------------------------------------------------------------------------------------------
import hashlib
import nuke
from nukescripts.panels import PythonPanel
import fileinput
import os
import smtplib
import sys
| 37.213675 | 120 | 0.579927 |
b27e60d49c438918ac9f9898312b3fc091fc3cf6 | 35,738 | py | Python | src/proto/runtime_pb2_grpc.py | layotto/python-sdk | dac5833ebbfe16d6e5b6322041ca65431096f14b | [
"Apache-2.0"
] | null | null | null | src/proto/runtime_pb2_grpc.py | layotto/python-sdk | dac5833ebbfe16d6e5b6322041ca65431096f14b | [
"Apache-2.0"
] | 1 | 2022-02-23T14:37:01.000Z | 2022-02-23T14:37:01.000Z | src/proto/runtime_pb2_grpc.py | layotto/python-sdk | dac5833ebbfe16d6e5b6322041ca65431096f14b | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import runtime_pb2 as runtime__pb2
def add_RuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=runtime__pb2.SayHelloRequest.FromString,
response_serializer=runtime__pb2.SayHelloResponse.SerializeToString,
),
'InvokeService': grpc.unary_unary_rpc_method_handler(
servicer.InvokeService,
request_deserializer=runtime__pb2.InvokeServiceRequest.FromString,
response_serializer=runtime__pb2.InvokeResponse.SerializeToString,
),
'GetConfiguration': grpc.unary_unary_rpc_method_handler(
servicer.GetConfiguration,
request_deserializer=runtime__pb2.GetConfigurationRequest.FromString,
response_serializer=runtime__pb2.GetConfigurationResponse.SerializeToString,
),
'SaveConfiguration': grpc.unary_unary_rpc_method_handler(
servicer.SaveConfiguration,
request_deserializer=runtime__pb2.SaveConfigurationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteConfiguration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteConfiguration,
request_deserializer=runtime__pb2.DeleteConfigurationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'SubscribeConfiguration': grpc.stream_stream_rpc_method_handler(
servicer.SubscribeConfiguration,
request_deserializer=runtime__pb2.SubscribeConfigurationRequest.FromString,
response_serializer=runtime__pb2.SubscribeConfigurationResponse.SerializeToString,
),
'TryLock': grpc.unary_unary_rpc_method_handler(
servicer.TryLock,
request_deserializer=runtime__pb2.TryLockRequest.FromString,
response_serializer=runtime__pb2.TryLockResponse.SerializeToString,
),
'Unlock': grpc.unary_unary_rpc_method_handler(
servicer.Unlock,
request_deserializer=runtime__pb2.UnlockRequest.FromString,
response_serializer=runtime__pb2.UnlockResponse.SerializeToString,
),
'GetNextId': grpc.unary_unary_rpc_method_handler(
servicer.GetNextId,
request_deserializer=runtime__pb2.GetNextIdRequest.FromString,
response_serializer=runtime__pb2.GetNextIdResponse.SerializeToString,
),
'GetState': grpc.unary_unary_rpc_method_handler(
servicer.GetState,
request_deserializer=runtime__pb2.GetStateRequest.FromString,
response_serializer=runtime__pb2.GetStateResponse.SerializeToString,
),
'GetBulkState': grpc.unary_unary_rpc_method_handler(
servicer.GetBulkState,
request_deserializer=runtime__pb2.GetBulkStateRequest.FromString,
response_serializer=runtime__pb2.GetBulkStateResponse.SerializeToString,
),
'SaveState': grpc.unary_unary_rpc_method_handler(
servicer.SaveState,
request_deserializer=runtime__pb2.SaveStateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteState': grpc.unary_unary_rpc_method_handler(
servicer.DeleteState,
request_deserializer=runtime__pb2.DeleteStateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteBulkState': grpc.unary_unary_rpc_method_handler(
servicer.DeleteBulkState,
request_deserializer=runtime__pb2.DeleteBulkStateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ExecuteStateTransaction': grpc.unary_unary_rpc_method_handler(
servicer.ExecuteStateTransaction,
request_deserializer=runtime__pb2.ExecuteStateTransactionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'PublishEvent': grpc.unary_unary_rpc_method_handler(
servicer.PublishEvent,
request_deserializer=runtime__pb2.PublishEventRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetFile': grpc.unary_stream_rpc_method_handler(
servicer.GetFile,
request_deserializer=runtime__pb2.GetFileRequest.FromString,
response_serializer=runtime__pb2.GetFileResponse.SerializeToString,
),
'PutFile': grpc.stream_unary_rpc_method_handler(
servicer.PutFile,
request_deserializer=runtime__pb2.PutFileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListFile': grpc.unary_unary_rpc_method_handler(
servicer.ListFile,
request_deserializer=runtime__pb2.ListFileRequest.FromString,
response_serializer=runtime__pb2.ListFileResp.SerializeToString,
),
'DelFile': grpc.unary_unary_rpc_method_handler(
servicer.DelFile,
request_deserializer=runtime__pb2.DelFileRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetFileMeta': grpc.unary_unary_rpc_method_handler(
servicer.GetFileMeta,
request_deserializer=runtime__pb2.GetFileMetaRequest.FromString,
response_serializer=runtime__pb2.GetFileMetaResponse.SerializeToString,
),
'InvokeBinding': grpc.unary_unary_rpc_method_handler(
servicer.InvokeBinding,
request_deserializer=runtime__pb2.InvokeBindingRequest.FromString,
response_serializer=runtime__pb2.InvokeBindingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'spec.proto.runtime.v1.Runtime', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| 45.352792 | 129 | 0.651603 |
b27fdc318377fdd21756f01199453a4713d91df6 | 1,794 | py | Python | forecast_box/validate.py | kyleclo/forecast-box | 5b965f0c7f45c92e800c31df1c7a12a6d08527b1 | [
"Apache-2.0"
] | 1 | 2017-02-08T19:34:35.000Z | 2017-02-08T19:34:35.000Z | forecast_box/validate.py | kyleclo/forecast-box | 5b965f0c7f45c92e800c31df1c7a12a6d08527b1 | [
"Apache-2.0"
] | null | null | null | forecast_box/validate.py | kyleclo/forecast-box | 5b965f0c7f45c92e800c31df1c7a12a6d08527b1 | [
"Apache-2.0"
] | null | null | null | """
Validation
"""
import numpy as np
import pandas as pd
from model import Model
# TODO: different versions with resampling or subsampling
# TODO: return DataFrame of forecasted_values along with metric?
def validate_model(name, params, time_series, metric_fun):
"""Evaluates performance of Model forecast method on time series"""
min_size = max(params['forward_steps']) + params['ar_order']
max_size = time_series.size - max(params['forward_steps'])
metric = []
for n in range(min_size, max_size + 1):
print 'Simulating forecasts for ' + str(time_series.index[n - 1])
sub_time_series = time_series.head(n)
model = Model.create(name, params)
model.train(sub_time_series)
forecasted_values = model.forecast(sub_time_series)
actual_values = time_series[forecasted_values.index]
metric.append(metric_fun(actual_values, forecasted_values))
return pd.Series(data=metric,
index=time_series.index[(min_size - 1):max_size])
# def validate_forecaster(forecaster, time_series, performance_fun):
# """Applies a forecaster to a time series to evaluate performance"""
#
# performance = []
# min_size = forecaster.min_size
# max_size = time_series.size - max(forecaster.forward_steps)
# for n in range(min_size, max_size + 1):
# print 'Simulating forecaster for ' + str(time_series.index[n - 1])
# sub_time_series = time_series.head(n)
# forecasted_values = forecaster.forecast(sub_time_series)
# actual_values = time_series[forecasted_values.index]
# performance.append(performance_fun(actual_values, forecasted_values))
#
# return pd.Series(data=performance,
# index=time_series.index[min_size - 1:max_size])
| 36.612245 | 79 | 0.696767 |
b280b873fa11a9c22244c5a88ce9b4b92bf52fa9 | 338 | py | Python | config/api_router.py | summerthe/django_api_starter | 8f6c83fccc3a138a636850f7d23d9aac72e06f8f | [
"MIT"
] | null | null | null | config/api_router.py | summerthe/django_api_starter | 8f6c83fccc3a138a636850f7d23d9aac72e06f8f | [
"MIT"
] | null | null | null | config/api_router.py | summerthe/django_api_starter | 8f6c83fccc3a138a636850f7d23d9aac72e06f8f | [
"MIT"
] | null | null | null | from django.conf import settings
from django.urls.conf import include, path
from rest_framework.routers import DefaultRouter, SimpleRouter
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
app_name = "api"
urlpatterns = [
path("", include("summers_api.users.api.urls")),
]
urlpatterns += router.urls
| 22.533333 | 62 | 0.745562 |
b28150bc596dbcfe86da754ccfece409615ba261 | 339 | py | Python | backstack/__init__.py | pixlie/platform | 10782e9ddfb1dc2311e22987a16e9e77f3d71d34 | [
"MIT"
] | 2 | 2019-06-06T11:21:35.000Z | 2021-12-19T12:17:02.000Z | backstack/__init__.py | pixlie/backstack | 10782e9ddfb1dc2311e22987a16e9e77f3d71d34 | [
"MIT"
] | null | null | null | backstack/__init__.py | pixlie/backstack | 10782e9ddfb1dc2311e22987a16e9e77f3d71d34 | [
"MIT"
] | null | null | null | from .models import SystemModel, BaseModel
from .errors import ServerError, Errors
from .config import settings
from .db import db, Base
from .commands import Commands
name = "platform"
__all__ = [
"name",
"SystemModel",
"BaseModel",
"ServerError",
"Errors",
"settings",
"db",
"Base",
"Commands",
]
| 15.409091 | 42 | 0.646018 |
b282134e67aa67a11713d58542eb8a80ec036fb7 | 1,571 | py | Python | samples/archive/stream/stream.py | zzzDavid/heterocl | 977aae575d54a30c5bf6d869e8f71bdc815cf7e9 | [
"Apache-2.0"
] | 236 | 2019-05-19T01:48:11.000Z | 2022-03-31T09:03:54.000Z | samples/archive/stream/stream.py | zzzDavid/heterocl | 977aae575d54a30c5bf6d869e8f71bdc815cf7e9 | [
"Apache-2.0"
] | 248 | 2019-05-17T19:18:36.000Z | 2022-03-30T21:25:47.000Z | samples/archive/stream/stream.py | AlgaPeng/heterocl-2 | b5197907d1fe07485466a63671a2a906a861c939 | [
"Apache-2.0"
] | 85 | 2019-05-17T20:09:27.000Z | 2022-02-28T20:19:00.000Z | import heterocl as hcl
hcl.init()
target = hcl.Platform.xilinx_zc706
initiation_interval = 4
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.placeholder((10, 20), name="c")
d = hcl.placeholder((10, 20), name="d")
e = hcl.placeholder((10, 20), name="e")
# compute customization
s = hcl.create_schedule([a, b, c, d, e], add_mul)
# op1 = add_mul.ret_add.c
# op2 = add_mul.ret_mul.c
# s[op1].pipeline(op1.axis[0], initiation_interval)
# stream into modules / device
a0, b0 = s.to([a, b], target.xcel)
d0 = s.to(d, target.xcel)
#s.partition(b0, dim=2, factor=2)
s.to([a0, b0], s[add_mul.ret_add])
s.to(d0, s[add_mul.ret_mul])
# within device move producer to consumer
s.to(c, s[add_mul.ret_mul],
s[add_mul.ret_add], depth=10)
# return tensor for inter-device move
# e0 = s.stream_to(e, hcl.CPU('riscv'))
# print(add_mul.ret_mul._buf, c._buf)
print(hcl.lower(s))
code = hcl.build(s, target)
print(code)
#
# with open("example.cl", "w") as f:
# f.write(code)
# f.close()
| 26.627119 | 64 | 0.589433 |
b282a97791327fc19ad1bc909b5a0f67419da315 | 653 | py | Python | setup.py | eminaktas/k8s-workload-scaler | 388ebd9c472911c5dd783610d12ae314c1e4adad | [
"MIT"
] | 3 | 2021-06-11T08:33:19.000Z | 2022-03-01T23:32:35.000Z | setup.py | eminaktas/k8s-workload-scaler | 388ebd9c472911c5dd783610d12ae314c1e4adad | [
"MIT"
] | null | null | null | setup.py | eminaktas/k8s-workload-scaler | 388ebd9c472911c5dd783610d12ae314c1e4adad | [
"MIT"
] | null | null | null | import os
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as readme_file:
README = readme_file.read()
setup(
name='k8s-workload-scaler',
version='0.0.2',
packages=['k8s_workload_scaler'],
url='github.com/eminaktas/k8s-workload-scaler',
license='MIT',
author='emin.aktas',
author_email='eminaktas34@gmail.com',
description='Kubernetes workload scaler',
long_description=README,
install_requires=[
'setuptools~=54.2.0',
'kubernetes~=12.0.1',
'requests~=2.25.1',
'prometheus-api-client~=0.4.2',
]
)
| 25.115385 | 58 | 0.653905 |
b2839dcc8ba1e2c6405ad07dce2a45037d7c2944 | 13,561 | py | Python | ros/dynamic_reconfigure/src/dynamic_reconfigure/client.py | numberen/apollo-platform | 8f359c8d00dd4a98f56ec2276c5663cb6c100e47 | [
"Apache-2.0"
] | 2 | 2018-12-11T16:35:20.000Z | 2019-01-23T16:42:17.000Z | opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/client.py | Roboy/roboy_managing_node_fpga | 64ffe5aec2f2c98a051bb1a881849c195b8d052c | [
"BSD-3-Clause"
] | 1 | 2018-12-28T21:11:50.000Z | 2018-12-28T21:11:50.000Z | opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/client.py | Roboy/roboy_managing_node_fpga | 64ffe5aec2f2c98a051bb1a881849c195b8d052c | [
"BSD-3-Clause"
] | 3 | 2018-01-29T12:22:56.000Z | 2020-12-08T09:08:46.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python client API for dynamic_reconfigure (L{DynamicReconfigureClient}) as well as
example server implementation (L{DynamicReconfigureServer}).
"""
from __future__ import with_statement
try:
import roslib; roslib.load_manifest('dynamic_reconfigure')
except:
pass
import rospy
import rosservice
import sys
import threading
import time
import types
from dynamic_reconfigure import DynamicReconfigureParameterException
from dynamic_reconfigure.srv import Reconfigure as ReconfigureSrv
from dynamic_reconfigure.msg import Config as ConfigMsg
from dynamic_reconfigure.msg import ConfigDescription as ConfigDescrMsg
from dynamic_reconfigure.msg import IntParameter, BoolParameter, StrParameter, DoubleParameter, ParamDescription
from dynamic_reconfigure.encoding import *
| 39.080692 | 138 | 0.594794 |
b2842a9629f4ea0e56df84c21b6edd075792d02d | 7,803 | py | Python | l0bnb/relaxation/core.py | jonathan-taylor/l0bnb | 0c2beef67b92861ec51bc3514d485eabad43c611 | [
"MIT"
] | 25 | 2020-04-14T00:32:04.000Z | 2022-03-23T11:49:06.000Z | l0bnb/relaxation/core.py | jonathan-taylor/l0bnb | 0c2beef67b92861ec51bc3514d485eabad43c611 | [
"MIT"
] | 1 | 2021-10-12T16:37:04.000Z | 2021-10-12T16:37:04.000Z | l0bnb/relaxation/core.py | jonathan-taylor/l0bnb | 0c2beef67b92861ec51bc3514d485eabad43c611 | [
"MIT"
] | 9 | 2020-05-14T04:15:44.000Z | 2022-03-04T14:58:25.000Z | import copy
from time import time
from collections import namedtuple
import numpy as np
from numba.typed import List
from numba import njit
from ._coordinate_descent import cd_loop, cd
from ._cost import get_primal_cost, get_dual_cost
from ._utils import get_ratio_threshold, get_active_components
from . import GS_FLAG
def _above_threshold(x, y, beta, zub, gs_xtr, gs_xb, r, threshold):
if GS_FLAG and gs_xtr is None:
above_threshold, rx, gs_xtr, gs_xb = \
_above_threshold_indices_root_first_call_gs(
zub, r, x, y, threshold)
elif GS_FLAG:
above_threshold, rx, gs_xtr, gs_xb = _above_threshold_indices_gs(
zub, r, x, y, threshold, gs_xtr, gs_xb, beta)
else:
above_threshold, rx = _above_threshold_indices(zub, r, x, threshold)
return above_threshold, rx, gs_xtr, gs_xb
def solve(x, y, l0, l2, m, zlb, zub, gs_xtr, gs_xb, xi_norm=None,
warm_start=None, r=None,
rel_tol=1e-4, tree_upper_bound=None, mio_gap=0,
check_if_integral=True, cd_max_itr=100, kkt_max_itr=100):
zlb_main, zub_main = zlb.copy(), zub.copy()
st = time()
_sol_str = \
'primal_value dual_value support primal_beta sol_time z r gs_xtr gs_xb'
Solution = namedtuple('Solution', _sol_str)
beta, r, support, zub, zlb, xi_norm = \
_initialize(x, y, l0, l2, m, zlb, zub, xi_norm, warm_start, r)
cost, _ = get_primal_cost(beta, r, l0, l2, m, zlb, zub)
dual_cost = None
_, threshold = get_ratio_threshold(l0, l2, m)
cd_tol = rel_tol / 2
counter = 0
while counter < kkt_max_itr:
beta, cost, r = cd(x, beta, cost, l0, l2, m, xi_norm, zlb, zub,
support, r, cd_tol, cd_max_itr)
above_threshold, rx, gs_xtr, gs_xb = \
_above_threshold(x, y, beta, zub, gs_xtr, gs_xb, r, threshold)
outliers = [i for i in above_threshold if i not in support]
if not outliers:
typed_a = List()
[typed_a.append(x) for x in support]
dual_cost = get_dual_cost(y, beta, r, rx, l0, l2, m, zlb, zub,
typed_a)
if not check_if_integral or tree_upper_bound is None:
cur_gap = -2
tree_upper_bound = dual_cost + 1
else:
cur_gap = (tree_upper_bound - cost) / tree_upper_bound
if cur_gap < mio_gap and tree_upper_bound > dual_cost:
if ((cost - dual_cost) / abs(cost) < rel_tol) or \
(cd_tol < 1e-8 and check_if_integral):
break
else:
cd_tol /= 100
else:
break
support = support | set([i.item() for i in outliers])
counter += 1
if counter == kkt_max_itr:
print('Maximum KKT check iterations reached, increase kkt_max_itr '
'to avoid this warning')
active_set = [i.item() for i in beta.nonzero()[0]]
beta_active, x_active, xi_norm_active, zlb_active, zub_active = \
get_active_components(active_set, x, beta, zlb, zub, xi_norm)
primal_cost, z_active = get_primal_cost(beta_active, r, l0, l2, m,
zlb_active, zub_active)
z_active = np.minimum(np.maximum(zlb_active, z_active), zub_active)
if dual_cost is not None:
prim_dual_gap = (cost - dual_cost) / abs(cost)
else:
prim_dual_gap = 1
if check_if_integral:
if prim_dual_gap > rel_tol:
if is_integral(z_active, 1e-4):
ws = {i: j for i, j in zip(active_set, beta_active)}
sol = solve(x=x, y=y, l0=l0, l2=l2, m=m, zlb=zlb_main,
zub=zub_main, gs_xtr=gs_xtr, gs_xb=gs_xb,
xi_norm=xi_norm, warm_start=ws, r=r,
rel_tol=rel_tol, tree_upper_bound=tree_upper_bound,
mio_gap=1, check_if_integral=False)
return sol
sol = Solution(primal_value=primal_cost, dual_value=dual_cost,
support=active_set, primal_beta=beta_active,
sol_time=time() - st, z=z_active, r=r, gs_xtr=gs_xtr,
gs_xb=gs_xb)
return sol
| 39.015 | 79 | 0.60387 |
b2842ba57b4666045fc4763a33435c2f652b5394 | 5,668 | py | Python | uroboros-diversification/src/diversification/bb_branchfunc_diversify.py | whj0401/RLOBF | 2755eb5e21e4f2445a7791a1159962e80a5739ca | [
"MIT"
] | 3 | 2020-12-11T06:15:17.000Z | 2021-04-24T07:09:03.000Z | uroboros-diversification/src/diversification/bb_branchfunc_diversify.py | whj0401/RLOBF | 2755eb5e21e4f2445a7791a1159962e80a5739ca | [
"MIT"
] | null | null | null | uroboros-diversification/src/diversification/bb_branchfunc_diversify.py | whj0401/RLOBF | 2755eb5e21e4f2445a7791a1159962e80a5739ca | [
"MIT"
] | 2 | 2021-03-10T17:46:33.000Z | 2021-03-31T08:00:27.000Z | from analysis.visit import *
from disasm.Types import *
from utils.ail_utils import *
from utils.pp_print import *
from junkcodes import get_junk_codes
obfs_proportion = 0.015
| 46.459016 | 128 | 0.534227 |
b284e34183349b94655f2ba4c0ad549e6e0f8f3f | 273 | py | Python | dsfaker/generators/str.py | pajachiet/dsfaker | 0e65ba336608c2ccc5e32a541f3b66dfad019b35 | [
"MIT"
] | 3 | 2017-03-12T22:08:59.000Z | 2017-05-22T16:57:17.000Z | dsfaker/generators/str.py | pajachiet/dsfaker | 0e65ba336608c2ccc5e32a541f3b66dfad019b35 | [
"MIT"
] | 12 | 2017-03-01T10:14:08.000Z | 2017-04-23T12:15:10.000Z | dsfaker/generators/str.py | pajachiet/dsfaker | 0e65ba336608c2ccc5e32a541f3b66dfad019b35 | [
"MIT"
] | 2 | 2017-05-04T15:36:21.000Z | 2018-02-07T13:49:13.000Z | from random import Random
from rstr import Rstr
from . import Generator
| 22.75 | 41 | 0.677656 |
b285955d688db6c4b472e2c5faffe22749cd5bcf | 7,081 | py | Python | ssh/factorcheck.py | riquelmev/cs338 | cdbff5e25b112a9fb2e039f59c0ebf036649ffd8 | [
"MIT"
] | null | null | null | ssh/factorcheck.py | riquelmev/cs338 | cdbff5e25b112a9fb2e039f59c0ebf036649ffd8 | [
"MIT"
] | null | null | null | ssh/factorcheck.py | riquelmev/cs338 | cdbff5e25b112a9fb2e039f59c0ebf036649ffd8 | [
"MIT"
] | null | null | null | import numpy
import math
print(math.lcm(0x00eca08bfa42dcad582302232a80813894fd2e4b842dca21eba465619a0d464a9f864ab2e9c0be42367d63c595e81385dcb66bbf8242cddb848969f883af2fbb8c1490a3932c03d15b2d7dfb08dd2c61e05978fbfd337e70ba838574cfe443658910aef9303e968d32351339c14a3c08920a5c1a854cea5af98bd32f1098a2fc5f8a468009c7c063f48c29a688bc485f580625883b8a13ff655d34a11f927ddcfadfdc25c9e318127a83e8fb48ada3f531a5160fc9849852e2e51cba9001cc18e4,
0x00d63e8c9986e6067792268a91b4b65721256fe5ff7de459f80348b882d67a024032e38d9dc3d12943e95f97c9efe381399f16697311ad2766ab98dbe08c30fcd312754bbeb344c88fa2f8ff7ce6ac36d68e4950dfd6599270cfa9b36cec3384323efe64731a69aedee1761104f65a6f84eab6806c90af902b7a24c422cf4673986eb7b18650de51b10109de23668e471354f543b2d05386f4aa44feaf00fe0e0ca8335ba9cd0a0cd7b44233fcec489a3217eb3da1d9b51c4d8e9ba40cfd6cb7aa))
print (( (65537 * 2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113)
% 2247039172418436668592154415151015126222786674452760187503368863970509536315956942465946330840400804713521295730929741305714657992353620380964165912192341731136307469898957232004091102824338674617377312450939870608493589894180315797731195699072185635394040726997130798478842130796557413577261032584072916023035927031809993907276633856706151009517313622397019910955492822225070876581131226412459152580542808796183783690613859162091921205452946458684438170181390092687592585015747357730389512738725469097581172245064706069050974691027868509488068610750445862693733466299013534093773154038841250698994256296984775707305557541589235662563155223305238362859813517247589601725306580259839877045186180003746975834031900204620211932784805784617611303338578827900908401922205156339089130334248484128507875195736838993177401998121291885662897832705086377879426528514698451483880180031084401254280385901954419537599741014039443185713588 == 1))
print((32**65537) % 2247039172418436668592154415151015126222786674452760187503368863970509536315956942465946330840400804713521295730929741305714657992353620380964165912192341731136307469898957232004091102824338674617377312450939870608493589894180315797731195699072185635394040726997130798478842130796557413577261032584072916023035927031809993907276633856706151009517313622397019910955492822225070876581131226412459152580542808796183783690613859162091921205452946458684438170181390092687592585015747357730389512738725469097581172245064706069050974691027868509488068610750445862693733466299013534093773154038841250698994256296984775707305557541589235662563155223305238362859813517247589601725306580259839877045186180003746975834031900204620211932784805784617611303338578827900908401922205156339089130334248484128507875195736838993177401998121291885662897832705086377879426528514698451483880180031084401254280385901954419537599741014039443185713588)
print(2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113%0x00eca08bfa42dcad582302232a80813894fd2e4b842dca21eba465619a0d464a9f864ab2e9c0be42367d63c595e81385dcb66bbf8242cddb848969f883af2fbb8c1490a3932c03d15b2d7dfb08dd2c61e05978fbfd337e70ba838574cfe443658910aef9303e968d32351339c14a3c08920a5c1a854cea5af98bd32f1098a2fc5f8a468009c7c063f48c29a688bc485f580625883b8a13ff655d34a11f927ddcfadfdc25c9e318127a83e8fb48ada3f531a5160fc9849852e2e51cba9001cc18e4
== 0x283f4a6fbfad9f424d7a10972b124f986fd3cefe65776afb9493b5dd2902dab0757c0120672b3541e563f1f88467c5adfbcd29deb31426914d7a1bcdf21f314c2b374acb3e824bbab16b2b269fcfebb9e81dfee65b3ad75bb201221436240c821ab758250f9035e5e34728dcaa8eb97a758ea2e82763f92356d80dba49ebf6f71d22cea65b366b09ee492b4d38912abe6315412db7579d6a15475d5c6c634211ddbfa921c4a1948b0822b992ec0de6279287c519a696ee0a2fa40a4b7232cfcd)
print(2943845207193600139849586921660530062979514836939652252911168510314905302166532845264906113584033646531012076406573806987025047457519902435411802267739360377120761697446091031629022721340581940013244671666962132695199042194704089512690548281464483553640422003142860526990759194808923501682158662399385088877090264964084503057490757632128265341366808789218428209326618760642760356184383281196480504761667539912421070047089521150757775831975677601090160692307767419292257798639731268363386233177395498370665722400495226560396671910091288741087409721516597979322885628216630331527097105539998928620712679031068142304793554336036922257467880853151468114731275288628988864368750827488439382991282564278525342098508917887127750683566587189942598936549588448717091038482697327056078134954278878301931522106687291086778640089700384840670406150969051320700177941289226071446754539534444766951378823161600415971105082067617171855980113%
0x00d63e8c9986e6067792268a91b4b65721256fe5ff7de459f80348b882d67a024032e38d9dc3d12943e95f97c9efe381399f16697311ad2766ab98dbe08c30fcd312754bbeb344c88fa2f8ff7ce6ac36d68e4950dfd6599270cfa9b36cec3384323efe64731a69aedee1761104f65a6f84eab6806c90af902b7a24c422cf4673986eb7b18650de51b10109de23668e471354f543b2d05386f4aa44feaf00fe0e0ca8335ba9cd0a0cd7b44233fcec489a3217eb3da1d9b51c4d8e9ba40cfd6cb7aa
== 0x47d9c4577cc94a23f1ace14e0a5818927236bbe0da7ca9bba6864df2fb3101ee3be2daccad2e49739021d20b145bad2c00f1883de210bb2510a97c1c2b880652575f651eb88a79e4ca184dbebab1c8d65df3b29ecf094d366e3e9081181a12dcb309a7f07e4c312c685aab4c89be3ca64bfd16c6d2233eeb85d42cbf2bda89cbf65dbeb8b8084747607cc9b5ff9ff9b03f0ede3c6ae7885c277a6a1b90eea311959b5bc36f934e494d17e2cd9104ac49de81b332c38b9cc959e952b4548d906f)
| 337.190476 | 1,320 | 0.990679 |
b286d23fc369a16764ed55694919ccd382975d06 | 138 | py | Python | main1.py | dubblin27/bible-of-algo | 4f893ba0d32d8d169abf4c4485f105cc8169cdbb | [
"MIT"
] | null | null | null | main1.py | dubblin27/bible-of-algo | 4f893ba0d32d8d169abf4c4485f105cc8169cdbb | [
"MIT"
] | null | null | null | main1.py | dubblin27/bible-of-algo | 4f893ba0d32d8d169abf4c4485f105cc8169cdbb | [
"MIT"
] | null | null | null | su = 0
a = [3,5,6,2,7,1]
print(sum(a))
x, y = input("Enter a two value: ").split()
x = int(x)
y = int(y)
su = a[y] + sum(a[:y])
print(su) | 17.25 | 44 | 0.514493 |
b2887d26206a7158175689bb0d3fde0011f6d15d | 8,099 | py | Python | reagent/test/training/test_qrdqn.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 1,156 | 2019-10-02T12:15:31.000Z | 2022-03-31T16:01:27.000Z | reagent/test/training/test_qrdqn.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 448 | 2019-10-03T13:40:52.000Z | 2022-03-28T07:49:15.000Z | reagent/test/training/test_qrdqn.py | dmitryvinn/ReAgent | f98825b9d021ec353a1f9087840a05fea259bf42 | [
"BSD-3-Clause"
] | 214 | 2019-10-13T13:28:33.000Z | 2022-03-24T04:11:52.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.parameters import QRDQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer
from reagent.workflow.types import RewardOptions
| 40.293532 | 88 | 0.633782 |
b28976d7d07ee0d85891e3ce1f95a592baa06a72 | 717 | py | Python | highway_env/__init__.py | songanz/highway-env | ac21d1da25e224dbdbf8ba39509f4013bd029f52 | [
"MIT"
] | 1 | 2019-11-06T15:28:27.000Z | 2019-11-06T15:28:27.000Z | highway_env/__init__.py | songanz/highway-env | ac21d1da25e224dbdbf8ba39509f4013bd029f52 | [
"MIT"
] | null | null | null | highway_env/__init__.py | songanz/highway-env | ac21d1da25e224dbdbf8ba39509f4013bd029f52 | [
"MIT"
] | 1 | 2019-07-22T03:37:09.000Z | 2019-07-22T03:37:09.000Z | from gym.envs.registration import register
register(
id='highway-v0',
entry_point='highway_env.envs:HighwayEnv',
)
register(
id='highway-continuous-v0',
entry_point='highway_env.envs:HighwayEnvCon',
)
register(
id='highway-continuous-intrinsic-rew-v0',
entry_point='highway_env.envs:HighwayEnvCon_intrinsic_rew',
)
register(
id='merge-v0',
entry_point='highway_env.envs:MergeEnv',
)
register(
id='roundabout-v0',
entry_point='highway_env.envs:RoundaboutEnv',
)
register(
id='two-way-v0',
entry_point='highway_env.envs:TwoWayEnv',
max_episode_steps=15
)
register(
id='parking-v0',
entry_point='highway_env.envs:ParkingEnv',
max_episode_steps=20
)
| 18.384615 | 63 | 0.714086 |
b28b3da62fcf1d7ad1f84230a298ab9d0ed79266 | 700 | py | Python | twitcaspy/auth/app.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | null | null | null | twitcaspy/auth/app.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | 18 | 2021-10-01T13:40:01.000Z | 2021-10-18T12:34:57.000Z | twitcaspy/auth/app.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | null | null | null | # Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
#
# based on tweepy(https://github.com/tweepy/tweepy)
# Copyright (c) 2009-2021 Joshua Roesslein
from .auth import AuthHandler
from .oauth import OAuth2Basic
| 22.580645 | 57 | 0.671429 |
b28d0dae8fb9ed9ee50b81bbf1aae13554854cbe | 1,352 | py | Python | src/baskerville/models/model_interface.py | deflect-ca/baskerville | 9659f4b39ab66fcf5329a4eccff15e97245b04f0 | [
"CC-BY-4.0"
] | 2 | 2021-12-03T11:26:38.000Z | 2022-01-12T22:24:29.000Z | src/baskerville/models/model_interface.py | deflect-ca/baskerville | 9659f4b39ab66fcf5329a4eccff15e97245b04f0 | [
"CC-BY-4.0"
] | 3 | 2022-01-19T15:17:37.000Z | 2022-03-22T04:55:22.000Z | src/baskerville/models/model_interface.py | deflect-ca/baskerville | 9659f4b39ab66fcf5329a4eccff15e97245b04f0 | [
"CC-BY-4.0"
] | null | null | null | # Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import logging
| 26 | 71 | 0.621302 |
b28d6cf5837de54ecfea09556ec7ac0f5538da24 | 2,253 | py | Python | setup_win(MPL2).py | iefan/army_holiday | 0c79cf89c4dbb16bd87ca754265821f82b298f13 | [
"Apache-2.0"
] | null | null | null | setup_win(MPL2).py | iefan/army_holiday | 0c79cf89c4dbb16bd87ca754265821f82b298f13 | [
"Apache-2.0"
] | null | null | null | setup_win(MPL2).py | iefan/army_holiday | 0c79cf89c4dbb16bd87ca754265821f82b298f13 | [
"Apache-2.0"
] | null | null | null | # Used successfully in Python2.5 with matplotlib 0.91.2 and PyQt4 (and Qt 4.3.3)
from distutils.core import setup
import py2exe
import sys
# no arguments
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# We need to import the glob module to search for all files.
import glob
# We need to exclude matplotlib backends not being used by this executable. You may find
# that you need different excludes to create a working executable with your chosen backend.
# We also need to include include various numerix libraries that the other functions call.
opts = {
'py2exe': { "includes" : ["matplotlib.backends", "matplotlib.backends.backend_qt4agg",
"matplotlib.figure","pylab", "numpy", "matplotlib.numerix.fft",
"matplotlib.numerix.linear_algebra", "matplotlib.numerix.random_array",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2', '_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll'],
"compressed": 1,
}
}
# Save matplotlib-data to mpl-data ( It is located in the matplotlib\mpl-data
# folder and the compiled programs will look for it in \mpl-data
# note: using matplotlib.get_mpldata_info
data_files = [(r'mpl-data', glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\*.*')),
# Because matplotlibrc does not have an extension, glob does not find it (at least I think that's why)
# So add it manually here:
(r'mpl-data', [r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\matplotlibrc']),
(r'mpl-data\images',glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\images\*.*')),
(r'mpl-data\fonts',glob.glob(r'C:\Python25\Lib\site-packages\matplotlib\mpl-data\fonts\*.*'))]
# for console program use 'console = [{"script" : "scriptname.py"}]
setup(windows=[{"script" : "frmlogin.pyw", "icon_resources": [(0, "bitmap/PHRLogo.ico")]}], options=opts, \
zipfile = None, data_files=data_files)
| 51.204545 | 122 | 0.625388 |
b28f9f150dd905146af9d33f4c81aae2c96483db | 1,529 | py | Python | GeeksForGeeks/Sudo Placement 2019/Find the closest number.py | nayanapardhekar/Python | 55ea0cc1dd69192b25cb71358cd03cc2ce13be0a | [
"MIT"
] | 37 | 2019-04-03T07:19:57.000Z | 2022-01-09T06:18:41.000Z | GeeksForGeeks/Sudo Placement 2019/Find the closest number.py | nayanapardhekar/Python | 55ea0cc1dd69192b25cb71358cd03cc2ce13be0a | [
"MIT"
] | 16 | 2020-08-11T08:09:42.000Z | 2021-10-30T17:40:48.000Z | GeeksForGeeks/Sudo Placement 2019/Find the closest number.py | nayanapardhekar/Python | 55ea0cc1dd69192b25cb71358cd03cc2ce13be0a | [
"MIT"
] | 130 | 2019-10-02T14:40:20.000Z | 2022-01-26T17:38:26.000Z | # Find the closest number
# Difficulty: Basic Marks: 1
'''
Given an array of sorted integers. The task is to find the closest value to the given number in array. Array may contain duplicate values.
Note: If the difference is same for two values print the value which is greater than the given number.
Input:
The first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of two lines. First line of each test case contains two integers N & K and the second line contains N space separated array elements.
Output:
For each test case, print the closest number in new line.
Constraints:
1<=T<=100
1<=N<=105
1<=K<=105
1<=A[i]<=105
Example:
Input:
2
4 4
1 3 6 7
7 4
1 2 3 5 6 8 9
Output:
3
5
'''
for _ in range(int(input())):
n1,n2=map(int,input().split())
a=list(map(int,input().split()))
a.append(n2)
a.sort()
for i in range(len(a)):
if a[-1]==n2:
print(a[-2])
break
else:
if a[i]==n2:
if a[i+1]==n2:
print(n2)
break
else:
if abs(n2-a[i+1])==abs(n2-a[i-1]):
print(a[i+1])
break
else:
if abs(n2-a[i+1])>abs(n2-a[i-1]):
print(a[i-1])
break
else:
print(a[i+1])
break
| 26.824561 | 264 | 0.517986 |
b2910846552317313e27d4630f9b125c62fc3263 | 4,391 | py | Python | qcodes/tests/test_sweep_values.py | riju-pal/QCoDeS_riju | 816e76809160e9af457f6ef6d4aca1b0dd5eea82 | [
"MIT"
] | 223 | 2016-10-29T15:00:24.000Z | 2022-03-20T06:53:34.000Z | qcodes/tests/test_sweep_values.py | M1racleShih/Qcodes | c03029a6968e16379155aadc8b083a02e01876a6 | [
"MIT"
] | 3,406 | 2016-10-25T10:44:50.000Z | 2022-03-31T09:47:35.000Z | qcodes/tests/test_sweep_values.py | nikhartman/Qcodes | 042c5e25ab9e40b20c316b4055c4842844834d1e | [
"MIT"
] | 263 | 2016-10-25T11:35:36.000Z | 2022-03-31T08:53:20.000Z | import pytest
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.sweep_values import SweepValues
from qcodes.utils.validators import Numbers
def test_errors(c0, c1, c2):
# only complete 3-part slices are valid
with pytest.raises(TypeError):
c0[1:2] # For Int params this could be defined as step=1
with pytest.raises(TypeError):
c0[:2:3]
with pytest.raises(TypeError):
c0[1::3]
with pytest.raises(TypeError):
c0[:] # For Enum params we *could* define this one too...
# fails if the parameter has no setter
with pytest.raises(TypeError):
c2[0:0.1:0.01]
# validates every step value against the parameter's Validator
with pytest.raises(ValueError):
c0[5:15:1]
with pytest.raises(ValueError):
c0[5.0:15.0:1.0]
with pytest.raises(ValueError):
c0[-12]
with pytest.raises(ValueError):
c0[-5, 12, 5]
with pytest.raises(ValueError):
c0[-5, 12:8:1, 5]
# cannot combine SweepValues for different parameters
with pytest.raises(TypeError):
c0[0.1] + c1[0.2]
# improper use of extend
with pytest.raises(TypeError):
c0[0.1].extend(5)
# SweepValue object has no getter, even if the parameter does
with pytest.raises(AttributeError):
c0[0.1].get
def test_valid(c0):
c0_sv = c0[1]
# setter gets mapped
assert c0_sv.set == c0.set
# normal sequence operations access values
assert list(c0_sv) == [1]
assert c0_sv[0] == 1
assert 1 in c0_sv
assert not (2 in c0_sv)
# in-place and copying addition
c0_sv += c0[1.5:1.8:0.1]
c0_sv2 = c0_sv + c0[2]
assert list(c0_sv) == [1, 1.5, 1.6, 1.7]
assert list(c0_sv2) == [1, 1.5, 1.6, 1.7, 2]
# append and extend
c0_sv3 = c0[2]
# append only works with straight values
c0_sv3.append(2.1)
# extend can use another SweepValue, (even if it only has one value)
c0_sv3.extend(c0[2.2])
# extend can also take a sequence
c0_sv3.extend([2.3])
# as can addition
c0_sv3 += [2.4]
c0_sv4 = c0_sv3 + [2.5, 2.6]
assert list(c0_sv3) == [2, 2.1, 2.2, 2.3, 2.4]
assert list(c0_sv4) == [2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6]
# len
assert len(c0_sv3) == 5
# in-place and copying reverse
c0_sv.reverse()
c0_sv5 = reversed(c0_sv)
assert list(c0_sv) == [1.7, 1.6, 1.5, 1]
assert list(c0_sv5) == [1, 1.5, 1.6, 1.7]
# multi-key init, where first key is itself a list
c0_sv6 = c0[[1, 3], 4]
# copying
c0_sv7 = c0_sv6.copy()
assert list(c0_sv6) == [1, 3, 4]
assert list(c0_sv7) == [1, 3, 4]
assert not (c0_sv6 is c0_sv7)
def test_base():
p = Parameter('p', get_cmd=None, set_cmd=None)
with pytest.raises(NotImplementedError):
iter(SweepValues(p))
def test_snapshot(c0):
assert c0[0].snapshot() == {
'parameter': c0.snapshot(),
'values': [{'item': 0}]
}
assert c0[0:5:0.3].snapshot()['values'] == [{
'first': 0,
'last': 4.8,
'num': 17,
'type': 'linear'
}]
sv = c0.sweep(start=2, stop=4, num=5)
assert sv.snapshot()['values'] == [{
'first': 2,
'last': 4,
'num': 5,
'type': 'linear'
}]
# mixture of bare items, nested lists, and slices
sv = c0[1, 7, 3.2, [1, 2, 3], 6:9:1, -4.5, 5.3]
assert sv.snapshot()['values'] == [{
'first': 1,
'last': 5.3,
'min': -4.5,
'max': 8,
'num': 11,
'type': 'sequence'
}]
assert (c0[0] + c0[1]).snapshot()['values'] == [
{'item': 0},
{'item': 1}
]
assert (c0[0:3:1] + c0[4, 6, 9]).snapshot()['values'] == [
{'first': 0, 'last': 2, 'num': 3, 'type': 'linear'},
{'first': 4, 'last': 9, 'min': 4, 'max': 9, 'num': 3,
'type': 'sequence'}
]
def test_repr(c0):
sv = c0[0]
assert repr(sv) == (
f'<qcodes.instrument.sweep_values.SweepFixedValues: c0 at {id(sv)}>'
)
| 25.235632 | 76 | 0.566614 |
b2920a5b35fa8d9589396ec223bdc4d33e30fd7a | 350 | py | Python | src/django_powerdns_api/urls.py | andrzej-jankowski/django-powerdns-api | c7bc793022ba9fde2dd0e3564c3c63398611540b | [
"Apache-2.0"
] | null | null | null | src/django_powerdns_api/urls.py | andrzej-jankowski/django-powerdns-api | c7bc793022ba9fde2dd0e3564c3c63398611540b | [
"Apache-2.0"
] | null | null | null | src/django_powerdns_api/urls.py | andrzej-jankowski/django-powerdns-api | c7bc793022ba9fde2dd0e3564c3c63398611540b | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from django_powerdns_api.routers import router
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
)
| 20.588235 | 51 | 0.768571 |
b292be09587a07ede608a3607cc6852e3db17188 | 925 | py | Python | tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py | Passer-D/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 1,210 | 2020-08-18T07:57:36.000Z | 2022-03-31T15:06:05.000Z | tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 37 | 2020-08-24T02:48:38.000Z | 2022-01-30T06:41:52.000Z | tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py | guokaiSama/GameAISDK | a089330a30b7bfe1f6442258a12d8c0086240606 | [
"Apache-2.0"
] | 275 | 2020-08-18T08:35:16.000Z | 2022-03-31T15:06:07.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import platform
__is_windows_system = platform.platform().lower().startswith('window')
__is_linux_system = platform.platform().lower().startswith('linux')
if __is_windows_system:
from .demo_windows.PlatformWeTest import PlatformWeTest
from .demo_windows.common.AdbTool import AdbTool
elif __is_linux_system:
from .demo_ubuntu16.PlatformWeTest import PlatformWeTest
from .demo_ubuntu16.common.AdbTool import AdbTool
else:
raise Exception('system is not support!')
| 35.576923 | 111 | 0.776216 |
b293b0671b5147e6e833e70a808c61e5033f825f | 579 | py | Python | python/codingbat/src/sum_double.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | python/codingbat/src/sum_double.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | 2 | 2022-03-10T03:49:14.000Z | 2022-03-14T00:49:54.000Z | python/codingbat/src/sum_double.py | christopher-burke/warmups | 140c96ada87ec5e9faa4622504ddee18840dce4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""sum_double
Given two int values, return their sum.
Unless the two values are the same, then return double their sum.
sum_double(1, 2) 3
sum_double(3, 2) 5
sum_double(2, 2) 8
source: https://codingbat.com/prob/p141905
"""
def sum_double(a: int, b: int) -> int:
"""Sum Double.
Return the sum or if a == b return double the sum.
"""
multiply = 1
if a == b:
multiply += 1
return (a + b) * multiply
if __name__ == "__main__":
print(sum_double(1, 2))
print(sum_double(3, 2))
print(sum_double(2, 2))
| 18.09375 | 65 | 0.618307 |
b293c4e951eab343a95232f50c197cd3ae253ad6 | 126 | py | Python | database_email_backend/__init__.py | enderlabs/django-database-email-backend | aad6bade66d076b5425f772430adc7e77e60f5ce | [
"MIT"
] | 1 | 2016-01-15T18:54:59.000Z | 2016-01-15T18:54:59.000Z | database_email_backend/__init__.py | enderlabs/django-database-email-backend | aad6bade66d076b5425f772430adc7e77e60f5ce | [
"MIT"
] | 1 | 2015-11-04T22:19:21.000Z | 2015-11-04T22:19:21.000Z | database_email_backend/__init__.py | enderlabs/django-database-email-backend | aad6bade66d076b5425f772430adc7e77e60f5ce | [
"MIT"
] | 4 | 2015-11-04T20:45:16.000Z | 2021-03-03T06:28:20.000Z | # -*- coding: utf-8 -*-
VERSION = (1, 0, 4)
__version__ = "1.0.4"
__authors__ = ["Stefan Foulis <stefan.foulis@gmail.com>", ]
| 25.2 | 59 | 0.611111 |
b293f0ceac4f743a52151b0799d4e433f9e36af9 | 366 | py | Python | src/draw.py | mattdesl/inkyphat-mods | 2867161e66ffce87b75170e081f5ab481ce5e6b1 | [
"MIT"
] | 7 | 2020-04-25T09:24:18.000Z | 2022-01-02T03:24:24.000Z | src/draw.py | mattdesl/inkyphat-mods | 2867161e66ffce87b75170e081f5ab481ce5e6b1 | [
"MIT"
] | null | null | null | src/draw.py | mattdesl/inkyphat-mods | 2867161e66ffce87b75170e081f5ab481ce5e6b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
from PIL import Image
from inky import InkyPHAT
print("""Inky pHAT/wHAT: Logo
Displays the Inky pHAT/wHAT logo.
""")
type = "phat"
colour = "black"
inky_display = InkyPHAT(colour)
inky_display.set_border(inky_display.BLACK)
img = Image.open("assets/InkypHAT-212x104-bw.png")
inky_display.set_image(img)
inky_display.show() | 18.3 | 50 | 0.762295 |
b296a32574784e1bd7a3f60cbb896711ff7dd880 | 1,230 | py | Python | newsapp/tests.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | newsapp/tests.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | newsapp/tests.py | Esther-Anyona/four-one-one | 6a5e019b35710941a669c1b49e993b683c99d615 | [
"MIT"
] | null | null | null | from django.test import TestCase
from .models import *
from django.contrib.auth.models import User
# Create your tests here.
user = User.objects.get(id=1)
profile = Profile.objects.get(id=1)
neighbourhood = Neighbourhood.objects.get(id=1)
| 30 | 170 | 0.702439 |
b296bd14330ba64af65527855f690dd49d0a2709 | 4,620 | py | Python | ssdlite/load_caffe_weights.py | kkrpawkal/MobileNetv2-SSDLite | b434ed07b46d6e7f733ec97e180b57c8db30cae3 | [
"MIT"
] | null | null | null | ssdlite/load_caffe_weights.py | kkrpawkal/MobileNetv2-SSDLite | b434ed07b46d6e7f733ec97e180b57c8db30cae3 | [
"MIT"
] | null | null | null | ssdlite/load_caffe_weights.py | kkrpawkal/MobileNetv2-SSDLite | b434ed07b46d6e7f733ec97e180b57c8db30cae3 | [
"MIT"
] | null | null | null | import numpy as np
import sys,os
caffe_root = '/home/yaochuanqi/work/ssd/caffe/'
sys.path.insert(0, caffe_root + 'python')
import caffe
deploy_proto = 'deploy.prototxt'
save_model = 'deploy.caffemodel'
weights_dir = 'output'
box_layers = ['conv_13/expand', 'Conv_1', 'layer_19_2_2', 'layer_19_2_3', 'layer_19_2_4', 'layer_19_2_5']
net_deploy = caffe.Net(deploy_proto, caffe.TEST)
load_data(net_deploy)
net_deploy.save(save_model)
| 54.352941 | 124 | 0.541775 |
b2977674be0d43e625cea5afb3180e9f200426a4 | 996 | py | Python | qa327/frontend/exceptions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | null | null | null | qa327/frontend/exceptions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | 39 | 2020-10-11T02:31:14.000Z | 2020-12-15T20:18:56.000Z | qa327/frontend/exceptions.py | rickyzhangca/CISC-327 | e419caafa6ae3fe77aa411228b6b58b237fe6a61 | [
"MIT"
] | 1 | 2020-10-17T02:44:43.000Z | 2020-10-17T02:44:43.000Z | '''
This is the exceptions module:
'''
'''
Exception of when user do not have the access to certain pages.
'''
'''
Exception of the first password and the second password does not match during registration.
'''
'''
Exception of when the user input format is wrong.
'''
'''
Exception of when the ticket name is wrong.
'''
'''
Exception of when the ticket quantity is wrong.
'''
'''
Exception of when the ticket quantity is wrong.
'''
'''
Exception of when the email already exists in user data (already registered).
''' | 21.191489 | 91 | 0.736948 |
b299f61f9bab8f0fdfd0cbba6dbcac61cd8b37ce | 239 | py | Python | dags/minimal_dag.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | 1 | 2019-01-24T04:22:39.000Z | 2019-01-24T04:22:39.000Z | dags/minimal_dag.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | null | null | null | dags/minimal_dag.py | MarcusJones/kaggle_petfinder_adoption | 2d745b48405f4d4211b523eae272b9169fcf9fa2 | [
"MIT"
] | null | null | null | import airflow as af
from airflow.operators.dummy_operator import DummyOperator
from datetime import datetime
with af.DAG('minimal_dag', start_date=datetime(2016, 1, 1)) as dag:
op = DummyOperator(task_id='op')
op.dag is dag # True
| 23.9 | 67 | 0.76569 |
b29ab73d546b03f1d056e040fdce2adc50067aef | 2,567 | py | Python | app.py | paulinaacostac/GPT2 | 4d06584b2e8adfa708f1306e38dadd48c899ac8a | [
"MIT"
] | 2 | 2022-01-06T17:48:58.000Z | 2022-01-06T18:23:31.000Z | app.py | paulinaacostac/gpt2-WebAPI | 4d06584b2e8adfa708f1306e38dadd48c899ac8a | [
"MIT"
] | null | null | null | app.py | paulinaacostac/gpt2-WebAPI | 4d06584b2e8adfa708f1306e38dadd48c899ac8a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
import numpy as np
import tensorflow.compat.v1 as tf
from src import model, sample, encoder
from flask import Flask
from flask import request, jsonify
import time
######model
########API
gen = interact_model()
sess, context, output, enc = next(gen)
app = Flask(__name__)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=105) | 26.463918 | 159 | 0.603039 |
b29b61190657129eadf2448fe993cb4e944db000 | 1,096 | py | Python | t/unit/utils/test_div.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 1,920 | 2015-01-03T15:43:23.000Z | 2022-03-30T19:30:35.000Z | t/unit/utils/test_div.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 949 | 2015-01-02T18:56:00.000Z | 2022-03-31T23:14:59.000Z | t/unit/utils/test_div.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 833 | 2015-01-07T23:56:35.000Z | 2022-03-31T22:04:11.000Z | import pickle
from io import BytesIO, StringIO
from kombu.utils.div import emergency_dump_state
| 23.319149 | 69 | 0.595803 |
b29c8d36ba3db7e707e861825377dec464aebc9b | 3,754 | py | Python | intents/oversights/more_than_just_topk.py | googleinterns/debaised-analysis | 0dad1186a177a171956a33c49999d9387b9f989d | [
"Apache-2.0"
] | 1 | 2020-06-26T19:16:15.000Z | 2020-06-26T19:16:15.000Z | intents/oversights/more_than_just_topk.py | bhagyakjain/debaised-analysis | 6b8b27575bf3f60a6711e370bfad838e29f5cc8a | [
"Apache-2.0"
] | 30 | 2020-06-01T13:42:25.000Z | 2022-03-31T03:58:55.000Z | intents/oversights/more_than_just_topk.py | googleinterns/debaised-analysis | 0dad1186a177a171956a33c49999d9387b9f989d | [
"Apache-2.0"
] | 10 | 2020-06-10T05:43:59.000Z | 2020-08-20T10:32:24.000Z | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This module implements detection of the more than just topk oversight
in the top-k intent.
More_than_just_topk is the oversight which arises when the user misses
rows after the kth row that have metric equal-to or close-by the kth row.
Here we use the difference with the kth row normalized by the standard
deviation of top-k to decide if any row is similar to the
"""
from util import constants, enums
def more_than_just_topk(result_table, k, metric):
"""This function gives suggestions if 'more than just top-k' oversight is
detected in the results generated by the top-k.
This function gives suggestions to increasse k if some of the rows after
the kth row are very similar to the kth row.
Parameter used to decide if a row is similar to the kth row.
absolute value of (row - kth_row) / std_dev
standard deviation is calculated for the top-k rows only
std_dev -> standard deviation of metric of the top-k rows
row, kth_row -> value of metric of the considered row
The cut-off is fixed in the util/constants module
Args:
topk_results: Type-pandas dataframe
contain the results without cropping rows not in top-k.
k: integer
It is the number of entries to be taken in the top-k results.
metric: str
It is the column name of the metric column
Returns:
suggestion : dictonary with keys 'suggestion', 'oversight_name',
'change_list', 'confidence_score'.
change_list is an efficient way of encoding the new suggested query
json that we suggest the user to try.
"""
num_rows = result_table.shape[0]
# No suggestion if all rows already in the result
if k >= num_rows or k == -1:
return
# standard deviation of top k rows
standard_deviation_topk = None
if k == 1:
standard_deviation_topk = 0
else:
standard_deviation_topk = result_table[:k][metric].std()
# lower bound & upper bound for the value of metric
val_lower_bound = result_table[metric][k - 1] - standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD
val_upper_bound = result_table[metric][k - 1] + standard_deviation_topk * constants.MORE_THAN_JUST_TOPK_THRESHOLD
# init the k in suggested query as k in original query
new_k = k
confidence_score = 0
for row in range(k, num_rows):
# value of metric at row
val = result_table[metric][row]
if val_lower_bound <= val and val <= val_upper_bound:
new_k = row + 1
else:
break
if standard_deviation_topk == 0:
return
confidence_score = abs(result_table[metric][new_k - 1] - result_table[metric][k - 1]) / standard_deviation_topk
if new_k != k:
change_list = {'topKLimit':new_k}
suggestion = {}
suggestion['change_list'] = change_list
suggestion['suggestion'] = 'value of ' + metric + ' in some rows after the top-k is similar to the Kth row'
suggestion['confidence_score'] = confidence_score
suggestion['oversight'] = enums.Oversights.MORE_THAN_JUST_TOPK
return suggestion
else:
return
| 37.168317 | 117 | 0.697389 |
b29e142efe612167f93b68a27b4c24715a4da2ff | 1,058 | py | Python | zkpytb/json.py | zertrin/zkpytb | 066662d9c7bd233f977302cb11cf888a2a1828d2 | [
"MIT"
] | 2 | 2021-07-17T19:30:17.000Z | 2022-02-14T04:55:46.000Z | zkpytb/json.py | zertrin/zkpytb | 066662d9c7bd233f977302cb11cf888a2a1828d2 | [
"MIT"
] | null | null | null | zkpytb/json.py | zertrin/zkpytb | 066662d9c7bd233f977302cb11cf888a2a1828d2 | [
"MIT"
] | null | null | null | """
Helper functions related to json
Author: Marc Gallet
"""
import datetime
import decimal
import json
import uuid
import pathlib
| 27.128205 | 88 | 0.618147 |
b29e7d32ca4c3f659315bd72acd899c4542a2363 | 1,960 | py | Python | back_end/consts.py | DoctorChe/crash_map | e540ab8a45f67ff78c9993ac3eb1b413d4786cd9 | [
"MIT"
] | 1 | 2019-04-04T21:55:24.000Z | 2019-04-04T21:55:24.000Z | back_end/consts.py | DoctorChe/crash_map | e540ab8a45f67ff78c9993ac3eb1b413d4786cd9 | [
"MIT"
] | 2 | 2019-04-14T10:11:25.000Z | 2019-04-25T20:49:54.000Z | back_end/consts.py | DoctorChe/crash_map | e540ab8a45f67ff78c9993ac3eb1b413d4786cd9 | [
"MIT"
] | null | null | null | # encoding: utf-8
# input data constants
MARI_EL = ' '
YOSHKAR_OLA = ' , -'
VOLZHSK = ' , '
VOLZHSK_ADM = ' , '
MOUNTIN = ' , '
ZVENIGOVO = ' , '
KILEMARY = ' , '
KUZHENER = ' , '
TUREK = ' , - '
MEDVEDEVO = ' , '
MORKI = ' , '
NEW_TORYAL = ' , '
ORSHANKA = ' , '
PARANGA = ' , '
SERNUR = ' , '
SOVETSKIY = ' , '
YURINO = ' , '
ADMINISTRATIVE = [YOSHKAR_OLA, VOLZHSK, VOLZHSK_ADM, MOUNTIN, ZVENIGOVO, KILEMARY, KUZHENER, TUREK, MEDVEDEVO, MORKI, NEW_TORYAL, ORSHANKA, PARANGA, SERNUR, SOVETSKIY, YURINO]
# data indices
DATE = 0
TIME = 1
TYPE = 2
LOCATION = 3
STREET = 4
HOUSE_NUMBER = 5
ROAD = 6
KILOMETER = 7
METER = 8
LONGITUDE = 9
LATITUDE = 10
DEATH = 11
DEATH_CHILDREN = 12
INJURY = 13
INJURY_CHILDREN = 14
LONGITUDE_GEOCODE = 15
LATITUDE_GEOCODE = 16
VALID = 17
VALID_STRICT = 18
STREET_REPLACE_DICTIONARY = {
'': '',
' -': ' ',
' ': ' ',
'.': '',
' ': ' ',
' ': ' ',
' ': ' '
}
# coordinates grid borders
MARI_EL_WEST = 45.619745
MARI_EL_EAST = 50.200041
MARI_EL_SOUTH = 55.830512
MARI_EL_NORTH = 57.343631
YOSHKAR_OLA_WEST = 47.823484
YOSHKAR_OLA_EAST = 47.972560
YOSHKAR_OLA_SOUTH = 56.603073
YOSHKAR_OLA_NORTH = 56.669722
EARTH_MEAN_RADIUS = 6371000
MAX_DISTANCE = 150
# Yandex API constants
HOUSE_YANDEX = 'house' | 26.849315 | 175 | 0.758673 |
b29fec21f725de737210b497e78b6e2a1d2273be | 7,195 | py | Python | tests/unit/modules/win_iis_test.py | matt-malarkey/salt | c06860730d99e4f4941cbc164ee6db40157a07c9 | [
"Apache-2.0"
] | 1 | 2018-09-19T22:42:54.000Z | 2018-09-19T22:42:54.000Z | tests/unit/modules/win_iis_test.py | matt-malarkey/salt | c06860730d99e4f4941cbc164ee6db40157a07c9 | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/win_iis_test.py | matt-malarkey/salt | c06860730d99e4f4941cbc164ee6db40157a07c9 | [
"Apache-2.0"
] | 1 | 2019-07-23T13:42:23.000Z | 2019-07-23T13:42:23.000Z | # -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows IIS Module 'module.win_iis'
:platform: Windows
:maturity: develop
versionadded:: Carbon
'''
# Import Python Libs
from __future__ import absolute_import
import json
# Import Salt Libs
from salt.exceptions import SaltInvocationError
from salt.modules import win_iis
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON,
)
ensure_in_syspath('../../')
# Globals
win_iis.__salt__ = {}
# Make sure this module runs on Windows system
HAS_IIS = win_iis.__virtual__()
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(WinIisTestCase, needs_daemon=False)
| 40.421348 | 88 | 0.522168 |
b2a0afa260118cc81d83a6eee84100a7f5b452a7 | 6,217 | py | Python | scripts/loader_to_sharepoint.py | lawrkelly/python-useful-scripts | dfa044049e41bd0faed96473a79b4a25e051c198 | [
"MIT"
] | null | null | null | scripts/loader_to_sharepoint.py | lawrkelly/python-useful-scripts | dfa044049e41bd0faed96473a79b4a25e051c198 | [
"MIT"
] | 4 | 2020-09-18T09:58:14.000Z | 2021-12-13T20:47:39.000Z | scripts/loader_to_sharepoint.py | lawrkelly/python-useful-scripts | dfa044049e41bd0faed96473a79b4a25e051c198 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Loader_to_sharepoint.py
#
#
from pathlib import Path
import os.path
import requests,json,urllib
import pandas as pd
import collections
from collections import defaultdict
import xmltodict
import getpass
from shareplum import Office365
from shareplum.site import Version
from shareplum import Site
from requests_ntlm import HttpNtlmAuth
import smtplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
from email.mime.text import MIMEText
from email.message import EmailMessage
import pprint
# print("\nEnter Your MS ID: ")
MSID = input("\nEnter Your MS ID: ")
# print("\nEnter MS Password: ")
MSID_password = getpass.getpass("\nEnter MS Password: ")
url1="http://server.com/sites/Lists/MIA%20Testing/AllItems.aspx"
url2="http://server.com/sites/Lists/MIS%20MIA%20testing/AllItems.aspx"
head={'Accept': "application/json",'content-type': "application/json;odata=verbose", "X-HTTP-Method": "MERGE"}
# headers = {'Accept': "application/json",'content-type': "application/json;odata=verbose", 'X-RequestDigest': form_digest, "X-HTTP-Method": "MERGE"}
# "X-RequestDigest": digest_value}
##"DOMAIN\username",password
cred=HttpNtlmAuth(MSID, MSID_password)
#cred=HttpNtlmAuth("jsmith", "")
#except: #except OSError as e:
#print(e)
# print(PC_user)
#PC_user=decom_row["Primary Contact"]
#print(PC_user)
#emailer(PC_user)
# PC_user=decom_row["Primary Contact"]
if __name__ == '__main__':
decom_load()
| 32.89418 | 149 | 0.579379 |
b2a1766bc5fbc87d90f9559b3c26e49052f3b261 | 869 | py | Python | tests/test_tunnels_released.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 24 | 2018-08-21T18:04:48.000Z | 2022-02-07T22:50:06.000Z | tests/test_tunnels_released.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 16 | 2018-08-21T18:03:52.000Z | 2022-03-01T17:15:42.000Z | tests/test_tunnels_released.py | jhaapako/tcf | ecd75404459c6fec9d9fa1522b70a8deab896644 | [
"Apache-2.0"
] | 29 | 2018-08-22T19:40:59.000Z | 2021-12-21T11:13:23.000Z | #! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# pylint: disable = missing-docstring
import os
import socket
import commonl.testing
import tcfl
import tcfl.tc
srcdir = os.path.dirname(__file__)
ttbd = commonl.testing.test_ttbd(config_files = [
# strip to remove the compiled/optimized version -> get source
os.path.join(srcdir, "conf_%s" % os.path.basename(__file__.rstrip('cd')))
])
| 24.138889 | 77 | 0.700806 |
b2a18a1d5893e676f4cfbf5555c659a91725ab53 | 52,309 | py | Python | tagger-algo.py | li992/MAT | a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292 | [
"Apache-2.0"
] | null | null | null | tagger-algo.py | li992/MAT | a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292 | [
"Apache-2.0"
] | null | null | null | tagger-algo.py | li992/MAT | a5fb87b2d1ef667e5eb4a1c4e87caae6f1f75292 | [
"Apache-2.0"
] | null | null | null | import glob,os,stanza,argparse
from datetime import datetime
# route initiation
directory_path = os.getcwd()
#stanford tagger initiation
nlp = stanza.Pipeline('en')
dimDict ={}
# type specifiers
have = ["have","has","'ve","had","having","hath"]
do = ["do","does","did","doing","done"]
wp = ["who","whom","whose","which"]
be = ["be","am","is","are","was","were","been","being","'s","'m","'re"]
who = ["what","where","when","how","whether","why","whoever","whomever","whichever","wherever","whenever","whatever","however"]
preposition = ["against","amid","amidst","among","amongst","at","besides","between","by","despite","during","except","for","from","in","into","minus","notwithstanding","of","off","on","onto","opposite","out","per","plus","pro","than","through","throughout","thru","toward","towards","upon","versus","via","with","within","without"]
public = ["acknowledge","acknowledged","acknowledges","acknowledging","add","adds","adding","added","admit","admits","admitting","admitted","affirm","affirms","affirming","affirmed","agree","agrees","agreeing","agreed","allege","alleges","alleging","alleged","announce","announces","announcing","announced","argue","argues","arguing","argued","assert","asserts","asserting","asserted","bet","bets","betting","boast","boasts","boasting","boasted","certify","certifies","certifying","certified","claim","claims","claiming","claimed","comment","comments","commenting","commented","complain","complains","complaining","complained","concede","concedes","conceding","conceded","confess","confesses","confessing","confessed","confide","confides","confiding","confided","confirm","confirms","confirming","confirmed","contend","contends","contending","contended","convey","conveys","conveying","conveyed","declare","declares","declaring","declared","deny","denies","denying","denied","disclose","discloses","disclosing","disclosed","exclaim","exclaims","exclaiming","exclaimed","explain","explains","explaining","explained","forecast","forecasts","forecasting","forecasted","foretell","foretells","foretelling","foretold","guarantee","guarantees","guaranteeing","guaranteed","hint","hints","hinting","hinted","insist","insists","insisting","insisted","maintain","maintains","maintaining","maintained","mention","mentions","mentioning","mentioned","object","objects","objecting","objected","predict","predicts","predicting","predicted","proclaim","proclaims","proclaiming","proclaimed","promise","promises","promising","promised","pronounce","pronounces","pronouncing","pronounced","prophesy","prophesies","prophesying","prophesied","protest","protests","protesting","protested","remark","remarks","remarking","remarked","repeat","repeats","repeating","repeated","reply","replies","replying","replied","report","reports","reporting","reported","say","says","saying","said","state","states","stating","stated","submit","submits","submitting","submitted","suggest","suggests","suggesting","suggested","swear","swears","swearing","swore","sworn","testify","testifies","testifying","testified","vow","vows","vowing","vowed","warn","warns","warning","warned","write","writes","writing","wrote","written"]
private = ["accept","accepts","accepting","accepted","anticipate","anticipates","anticipating","anticipated","ascertain","ascertains","ascertaining","ascertained","assume","assumes","assuming","assumed","believe","believes","believing","believed","calculate","calculates","calculating","calculated","check","checks","checking","checked","conclude","concludes","concluding","concluded","conjecture","conjectures","conjecturing","conjectured","consider","considers","considering","considered","decide","decides","deciding","decided","deduce","deduces","deducing","deduced","deem","deems","deeming","deemed","demonstrate","demonstrates","demonstrating","demonstrated","determine","determines","determining","determined","discern","discerns","discerning","discerned","discover","discovers","discovering","discovered","doubt","doubts","doubting","doubted","dream","dreams","dreaming","dreamt","dreamed","ensure","ensures","ensuring","ensured","establish","establishes","establishing","established","estimate","estimates","estimating","estimated","expect","expects","expecting","expected","fancy","fancies","fancying","fancied","fear","fears","fearing","feared","feel","feels","feeling","felt","find","finds","finding","found","foresee","foresees","foreseeing","foresaw","forget","forgets","forgetting","forgot","forgotten","gather","gathers","gathering","gathered","guess","guesses","guessing","guessed","hear","hears","hearing","heard","hold","holds","holding","held","hope","hopes","hoping","hoped","imagine","imagines","imagining","imagined","imply","implies","implying","implied","indicate","indicates","indicating","indicated","infer","infers","inferring","inferred","insure","insures","insuring","insured","judge","judges","judging","judged","know","knows","knowing","knew","known","learn","learns","learning","learnt","learned","mean","means","meaning","meant","note","notes","noting","noted","notice","notices","noticing","noticed","observe","observes","observing","observed","perceive","perceives","perceiving","perceived","presume","presumes","presuming","presumed","presuppose","presupposes","presupposing","presupposed","pretend","pretend","pretending","pretended","prove","proves","proving","proved","realize","realise","realising","realizing","realises","realizes","realised","realized","reason","reasons","reasoning","reasoned","recall","recalls","recalling","recalled","reckon","reckons","reckoning","reckoned","recognize","recognise","recognizes","recognises","recognizing","recognising","recognized","recognised","reflect","reflects","reflecting","reflected","remember","remembers","remembering","remembered","reveal","reveals","revealing","revealed","see","sees","seeing","saw","seen","sense","senses","sensing","sensed","show","shows","showing","showed","shown","signify","signifies","signifying","signified","suppose","supposes","supposing","supposed","suspect","suspects","suspecting","suspected","think","thinks","thinking","thought","understand","understands","understanding","understood"]
suasive = ["agree","agrees","agreeing","agreed","allow","allows","allowing","allowed","arrange","arranges","arranging","arranged","ask","asks","asking","asked","beg","begs","begging","begged","command","commands","commanding","commanded","concede","concedes","conceding","conceded","decide","decides","deciding","decided","decree","decrees","decreeing","decreed","demand","demands","demanding","demanded","desire","desires","desiring","desired","determine","determines","determining","determined","enjoin","enjoins","enjoining","enjoined","ensure","ensures","ensuring","ensured","entreat","entreats","entreating","entreated","grant","grants","granting","granted","insist","insists","insisting","insisted","instruct","instructs","instructing","instructed","intend","intends","intending","intended","move","moves","moving","moved","ordain","ordains","ordaining","ordained","order","orders","ordering","ordered","pledge","pledges","pledging","pledged","pray","prays","praying","prayed","prefer","prefers","preferring","preferred","pronounce","pronounces","pronouncing","pronounced","propose","proposes","proposing","proposed","recommend","recommends","recommending","recommended","request","requests","requesting","requested","require","requires","requiring","required","resolve","resolves","resolving","resolved","rule","rules","ruling","ruled","stipulate","stipulates","stipulating","stipulated","suggest","suggests","suggesting","suggested","urge","urges","urging","urged","vote","votes","voting","voted"]
symbols = [",",".","!","@","#","$","%","^","&","*","(",")","<",">","/","?","{","}","[","]","\\","|","-","+","=","~","`"]
indefinitePN = ["anybody","anyone","anything","everybody","everyone","everything","nobody","none","nothing","nowhere","somebody","someone","something"]
quantifier = ["each","all","every","many","much","few","several","some","any"]
quantifierPN = ["everybody","somebody","anybody","everyone","someone","anyone","everything","something","anything"]
conjunctives = ["alternatively","consequently","conversely","eg","e.g.","furthermore","hence","however","i.e.","instead","likewise","moreover","namely","nevertheless","nonetheless","notwithstanding","otherwise","similarly","therefore","thus","viz."]
timeABV = ["afterwards","again","earlier","early","eventually","formerly","immediately","initially","instantly","late","lately","later","momentarily","now","nowadays","once","originally","presently","previously","recently","shortly","simultaneously","subsequently","today","to-day","tomorrow","to-morrow","tonight","to-night","yesterday"]
placeABV = ["aboard","above","abroad","across","ahead","alongside","around","ashore","astern","away","behind","below","beneath","beside","downhill","downstairs","downstream","east","far","hereabouts","indoors","inland","inshore","inside","locally","near","nearby","north","nowhere","outdoors","outside","overboard","overland","overseas","south","underfoot","underground","underneath","uphill","upstairs","upstream","west"]
narrative = ["ask","asks","asked","asking","tell","tells","told","telling"]
# tag specifiers
v = ["VBG","VBN","VB","VBD","VBP","VBZ"]
nn = ["NN","NNP","NNPS","NNS"]
parser = argparse.ArgumentParser(description="MAT tagging algorithm")
parser.add_argument('-f','--fragment',type=str,default="false",help='To generate tags for merged files, set this value to false; To generate tags for file fragments, set this value to true')
parser.add_argument('-r','--restart',type=str,default="false",help='If you want to restart the program to let it process from beginning, set this value to true; otherwise, set it to false')
if not os.path.exists('Results'):
os.mkdir(os.path.join(os.getcwd(),'Results'))
os.chdir(os.path.join(os.getcwd(),'Results'))
if not os.path.exists('StanfordTags'):
os.mkdir(os.path.join(os.getcwd(),'StanfordTags'))
if not os.path.exists('ModifiedTags'):
os.mkdir(os.path.join(os.getcwd(),'ModifiedTags'))
if not os.path.exists('StanfordTagsFragment'):
os.mkdir(os.path.join(os.getcwd(),'StanfordTagsFragment'))
if not os.path.exists('ModifiedTagsFragment'):
os.mkdir(os.path.join(os.getcwd(),'ModifiedTagsFragment'))
os.chdir('..')
args = parser.parse_args()
if args.fragment == "true":
if args.restart == "true":
if os.path.exists('fList.txt'):
os.remove(os.path.join(directory_path,'fList.txt'))
fragments()
else:
if args.restart == "true":
if os.path.exists('mList.txt'):
os.remove(os.path.join(directory_path,'mList.txt'))
merged() | 71.853022 | 3,008 | 0.580569 |
b2a64ad7dcb9aaa41898aea3c2d8af7ef4fc0f3f | 1,582 | py | Python | template.py | deepak7376/design_pattern | 855aa0879d478f7b2682c2ae5e92599b5c81a1c6 | [
"MIT"
] | null | null | null | template.py | deepak7376/design_pattern | 855aa0879d478f7b2682c2ae5e92599b5c81a1c6 | [
"MIT"
] | null | null | null | template.py | deepak7376/design_pattern | 855aa0879d478f7b2682c2ae5e92599b5c81a1c6 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class FileAverageCalculator(AverageCalculator):
class MemoryAverageCalculator(AverageCalculator):
mac = MemoryAverageCalculator([3, 1, 4, 1, 5, 9, 2, 6, 5, 3])
print(mac.average()) # Call the template method
# fac = FileAverageCalculator(open('data.txt'))
# print(fac.average()) # Call the template method | 21.972222 | 78 | 0.583439 |
b2a90936580b1ab7bbc9587223bca80795b6020a | 2,906 | py | Python | conanfile.py | helmesjo/conan-lua | da8f0c54ac9d1949c6ac64d9ab64639df8226061 | [
"MIT"
] | null | null | null | conanfile.py | helmesjo/conan-lua | da8f0c54ac9d1949c6ac64d9ab64639df8226061 | [
"MIT"
] | 1 | 2019-12-26T18:53:06.000Z | 2020-02-12T13:45:40.000Z | conanfile.py | helmesjo/conan-lua | da8f0c54ac9d1949c6ac64d9ab64639df8226061 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
| 42.735294 | 151 | 0.66724 |
b2a93406f378840531084977a82ef40530d2aedf | 3,800 | py | Python | train.py | mcao610/My_BART | 0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3 | [
"MIT"
] | null | null | null | train.py | mcao610/My_BART | 0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3 | [
"MIT"
] | null | null | null | train.py | mcao610/My_BART | 0f5963ff8688986e28b2ff94a9cc7a3a0adcf3a3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import torch
import logging
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import Dataset, DataLoader, BatchSampler
from torch.utils.data.distributed import DistributedSampler
from fairseq.tasks.translation import TranslationTask
from fairseq.data.language_pair_dataset import collate
from modules.data_utils import FairseqDataset
from modules.trainer import Trainer
from modules.utils import init_arg_parser
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairseq.train')
def load_dictionary(path, src_dict_name='source', tgt_dict_name='target'):
"""Load source & target fairseq dictionary.
"""
# path = self.args.data_name_or_path
src_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(src_dict_name)))
tgt_dict = TranslationTask.load_dictionary(os.path.join(path, 'dict.{}.txt'.format(tgt_dict_name)))
assert src_dict.bos() == tgt_dict.bos() == 0
assert src_dict.pad() == tgt_dict.pad() == 1
assert src_dict.eos() == tgt_dict.eos() == 2
assert src_dict.unk() == tgt_dict.unk() == 3
logger.info('[{}] dictionary: {} types'.format('source', len(src_dict)))
logger.info('[{}] dictionary: {} types'.format('target', len(tgt_dict)))
return src_dict, tgt_dict
if __name__ == "__main__":
parser = init_arg_parser()
# TranslationTask.add_args(parser)
args = parser.parse_args()
# main(args)
n_gpus = torch.cuda.device_count()
mp.spawn(main,
args=(args, n_gpus),
nprocs=n_gpus,
join=True) | 31.932773 | 103 | 0.669737 |
b2aa5d4587a6ca679b22dbefb38488aae64a9c0e | 4,555 | py | Python | yaml-to-md.py | phlummox/pptx-to-md | 6bd16c9cdf28946cd0ab9b8766b6eea1410de705 | [
"Unlicense"
] | 2 | 2022-02-19T11:45:56.000Z | 2022-03-07T13:34:09.000Z | yaml-to-md.py | phlummox/pptx-to-md | 6bd16c9cdf28946cd0ab9b8766b6eea1410de705 | [
"Unlicense"
] | null | null | null | yaml-to-md.py | phlummox/pptx-to-md | 6bd16c9cdf28946cd0ab9b8766b6eea1410de705 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
"""
intermediate yaml to markdown conversion
"""
import sys
import yaml
def yaml_to_markdown(yaml, outfile):
"""Given a list of dicts representing PowerPoint slides
-- presumably loaded from a YAML file -- convert to
markdown and print the result on the file-like
object 'outfile'.
"""
for slide in yaml:
slide_to_markdown(slide, outfile)
def get_title(slide):
"""return title or None. Deletes title from dict"""
shapes = slide["conts"]
found = False
for i, shape in enumerate(shapes):
if shape["ShapeType"] == "com.sun.star.presentation.TitleTextShape":
found = True
title = shape
break
if found:
del shapes[i]
return title["String"].replace("\n", " ")
def add_text(shape, outfile):
"""
convert a text-like Shape to a string, and
print to 'outfile'
"""
print( shape["String"].strip() + "\n", file=outfile)
def add_list(shape, outfile):
"""
Given a shape that represents an 'Outline' --
OpenOffice's representation of a bulleted or numbered
list -- attempt to convert the elements into
a sensible Markdown list, and write to
"outfile".
"""
els = shape["elements"]
indent = 0
# handle first item
output = [item_to_str(els[0])]
if len(els) == 1:
dump_output()
return
# handle rest of items
last_el = els[0]
for el in els[1:]:
# int-ify the level if None
if el["NumberingLevel"] is None:
el["NumberingLevel"] = 0
if last_el["NumberingLevel"] is None:
last_el["NumberingLevel"] = 0
# new indent
if el["NumberingLevel"] > last_el["NumberingLevel"]:
indent += 1
elif el["NumberingLevel"] < last_el["NumberingLevel"]:
indent = max(0, indent-1)
else:
pass
#print(" new indent:", indent)
if len(el["String"]) > 1:
output.append(item_to_str(el))
last_el = el
dump_output()
def add_graphic(shape, outfile):
"""
Given a Shape representing some graphics object
(e.g. jpg, png, MetaFile, SVG), write out
the markdown to show it on "outfile".
"""
if "String" in shape and shape["String"]:
alt_text = shape["String"]
else:
alt_text = ""
if "exported_svg_filename" in shape:
filename = shape["exported_svg_filename"]
else:
filename = shape["exported_filename"]
link = "s)" % { "alt_text" : alt_text,
"filename" : filename }
print(link + "\n", file=outfile)
# typical image types:
# image/jpeg, image/png, image/gif
# text shapes:
# TextShape, NotesShape, SubtitleShape, OutlinerShape,
# TitleTextShape, ?CustomShape, possibly ?RectangleShape
def convert_file(input_file, output_file):
"""start an soffice server, then convert input file to output file
using image dir."""
with open(input_file, "r") as input:
y = yaml.load(input, Loader=yaml.SafeLoader)
with open(output_file, "w") as output:
yaml_to_markdown(y, output)
MAIN="__main__"
#MAIN=None
def main():
"""main"""
args = sys.argv[1:]
if len(args) != 2:
print("usage: pptx-to-md.py INPUT_FILE OUTPUT_FILE")
sys.exit(1)
input_file, output_file = args
convert_file(input_file, output_file)
if __name__ == MAIN:
main()
| 25.305556 | 88 | 0.630077 |
b2aacb8c58e5a1abfc8fe218bf0ba965384b2044 | 1,032 | py | Python | library/real/display_real.py | console-beaver/MIT-Racecar-cbeast | f7f9c156e7072da7acc680ae1ad1de344253ae05 | [
"MIT"
] | null | null | null | library/real/display_real.py | console-beaver/MIT-Racecar-cbeast | f7f9c156e7072da7acc680ae1ad1de344253ae05 | [
"MIT"
] | null | null | null | library/real/display_real.py | console-beaver/MIT-Racecar-cbeast | f7f9c156e7072da7acc680ae1ad1de344253ae05 | [
"MIT"
] | null | null | null | """
Copyright Harvey Mudd College
MIT License
Spring 2020
Contains the Display module of the racecar_core library
"""
import cv2 as cv
import os
from nptyping import NDArray
from display import Display
| 23.454545 | 78 | 0.587209 |
b2ad711075be04cba1f9b409149e9a9fc3958436 | 749 | py | Python | DominantSparseEigenAD/tests/demos/2ndderivative.py | buwantaiji/DominantSparseEigenAD | 36d534b6713ba256309b07116ebc542bee01cd51 | [
"Apache-2.0"
] | 23 | 2019-10-29T03:35:18.000Z | 2022-02-11T16:38:24.000Z | DominantSparseEigenAD/tests/demos/2ndderivative.py | navyTensor/DominantSparseEigenAD | 3a5ac361edafd82f98ecf4d9fcad5c4e0b242178 | [
"Apache-2.0"
] | null | null | null | DominantSparseEigenAD/tests/demos/2ndderivative.py | navyTensor/DominantSparseEigenAD | 3a5ac361edafd82f98ecf4d9fcad5c4e0b242178 | [
"Apache-2.0"
] | 6 | 2019-11-06T09:09:45.000Z | 2022-02-09T06:24:15.000Z | """
A small toy example demonstrating how the process of computing 1st
derivative can be added to the original computation graph to produce an enlarged
graph whose back-propagation yields the 2nd derivative.
"""
import torch
x = torch.randn(10, requires_grad=True)
exp = torch.exp(x)
cos = torch.cos(x)
y = exp * cos
cosbar = exp
expbar = cos
minussin = -torch.sin(x)
grad1 = cosbar * minussin
grad2 = expbar * exp
dydx = grad1 + grad2
d2ydx2 = torch.autograd.grad(dydx, x, grad_outputs=torch.ones(dydx.shape[0]))
print("y: ", y, "\ngroundtruth: ", torch.exp(x) * torch.cos(x))
print("dy/dx: ", dydx, "\ngroundtruth: ", torch.exp(x) * (torch.cos(x)- torch.sin(x)))
print("d2y/dx2: ", d2ydx2, "\ngroundtruth", -2 * torch.exp(x) * torch.sin(x))
| 32.565217 | 86 | 0.695594 |
b2adb9d7006450ffeda3b214aef1de0a2d913357 | 1,335 | py | Python | test_default.py | dukedhx/tokenflex-reporting-python-script | f837b4e4a1cf388620da94abbaddab6bcabd51a8 | [
"MIT"
] | 4 | 2018-12-17T09:09:44.000Z | 2020-12-15T16:35:47.000Z | test_default.py | dukedhx/tokenflex-reporting-python-script | f837b4e4a1cf388620da94abbaddab6bcabd51a8 | [
"MIT"
] | null | null | null | test_default.py | dukedhx/tokenflex-reporting-python-script | f837b4e4a1cf388620da94abbaddab6bcabd51a8 | [
"MIT"
] | 4 | 2019-09-01T10:08:32.000Z | 2021-01-09T10:12:46.000Z | #####################################################################
## Copyright (c) Autodesk, Inc. All rights reserved
## Written by Forge Partner Development
##
## Permission to use, copy, modify, and distribute this software in
## object code form for any purpose and without fee is hereby granted,
## provided that the above copyright notice appears in all copies and
## that both that copyright notice and the limited warranty and
## restricted rights notice below appear in all supporting
## documentation.
##
## AUTODESK PROVIDES THIS PROGRAM "AS IS" AND WITH ALL FAULTS.
## AUTODESK SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTY OF
## MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. AUTODESK, INC.
## DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
## UNINTERRUPTED OR ERROR FREE.
#####################################################################
import simple_http_server as SimpleHTTPServer
import consumption_reporting as ConsumptionReporting
from threading import Thread
from time import sleep
import pytest
| 32.560976 | 70 | 0.691386 |
b2ae0f0ae136e69e9eedb942d08d354586e0fafa | 4,850 | py | Python | HyperAPI/hdp_api/routes/nitro.py | RomainGeffraye/HyperAPI | 6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/nitro.py | RomainGeffraye/HyperAPI | 6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef | [
"BSD-3-Clause"
] | null | null | null | HyperAPI/hdp_api/routes/nitro.py | RomainGeffraye/HyperAPI | 6bcd831ee48abb3a4f67f85051bc0d2a07c7aaef | [
"BSD-3-Clause"
] | null | null | null | from HyperAPI.hdp_api.routes import Resource, Route
from HyperAPI.hdp_api.routes.base.version_management import available_since
| 38.188976 | 113 | 0.640412 |
b2b1ab378336c1f38be58369252277dd0f368208 | 4,883 | py | Python | third_party/pyth/p2w_autoattest.py | dendisuhubdy/wormhole | 29cd5a3934aaf489a1b7aa45495414c5cb974c82 | [
"Apache-2.0"
] | 695 | 2020-08-29T22:42:51.000Z | 2022-03-31T05:33:57.000Z | third_party/pyth/p2w_autoattest.py | dendisuhubdy/wormhole | 29cd5a3934aaf489a1b7aa45495414c5cb974c82 | [
"Apache-2.0"
] | 478 | 2020-08-30T16:48:42.000Z | 2022-03-30T23:00:11.000Z | third_party/pyth/p2w_autoattest.py | dendisuhubdy/wormhole | 29cd5a3934aaf489a1b7aa45495414c5cb974c82 | [
"Apache-2.0"
] | 230 | 2020-10-19T06:44:13.000Z | 2022-03-28T11:11:47.000Z | #!/usr/bin/env python3
# This script sets up a simple loop for periodical attestation of Pyth data
from pyth_utils import *
from http.client import HTTPConnection
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import re
import subprocess
import time
import threading
P2W_ADDRESS = "P2WH424242424242424242424242424242424242424"
P2W_ATTEST_INTERVAL = float(os.environ.get("P2W_ATTEST_INTERVAL", 5))
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", f"/usr/src/solana/keys/p2w_owner.json")
P2W_ATTESTATIONS_PORT = int(os.environ.get("P2W_ATTESTATIONS_PORT", 4343))
PYTH_ACCOUNTS_HOST = "pyth"
PYTH_ACCOUNTS_PORT = 4242
WORMHOLE_ADDRESS = "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
ATTESTATIONS = {
"pendingSeqnos": [],
}
def serve_attestations():
"""
Run a barebones HTTP server to share Pyth2wormhole attestation history
"""
server_address = ('', P2W_ATTESTATIONS_PORT)
httpd = HTTPServer(server_address, P2WAutoattestStatusEndpoint)
httpd.serve_forever()
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True).stdout.strip()
# Top up pyth2wormhole owner
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", P2W_OWNER_KEYPAIR,
"--commitment", "finalized",
], capture_output=True)
# Initialize pyth2wormhole
init_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"init",
"--wh-prog", WORMHOLE_ADDRESS,
"--owner", P2W_OWNER_ADDRESS,
"--pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True, die=False)
if init_result.returncode != 0:
print("NOTE: pyth2wormhole-client init failed, retrying with set_config")
run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"set-config",
"--owner", P2W_OWNER_KEYPAIR,
"--new-owner", P2W_OWNER_ADDRESS,
"--new-wh-prog", WORMHOLE_ADDRESS,
"--new-pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True)
# Retrieve current price/product pubkeys from the pyth publisher
conn = HTTPConnection(PYTH_ACCOUNTS_HOST, PYTH_ACCOUNTS_PORT)
conn.request("GET", "/")
res = conn.getresponse()
pyth_accounts = None
if res.getheader("Content-Type") == "application/json":
pyth_accounts = json.load(res)
else:
print(f"Bad Content type {res.getheader('Content-Type')}", file=sys.stderr)
sys.exit(1)
price_addr = pyth_accounts["price"]
product_addr = pyth_accounts["product"]
nonce = 0
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
print("p2w_autoattest ready to roll.")
print(f"ACCOUNTS: {pyth_accounts}")
print(f"Attest Interval: {P2W_ATTEST_INTERVAL}")
# Serve p2w endpoint
endpoint_thread = threading.Thread(target=serve_attestations, daemon=True)
endpoint_thread.start()
# Let k8s know the service is up
readiness_thread = threading.Thread(target=readiness, daemon=True)
readiness_thread.start()
seqno_regex = re.compile(r"^Sequence number: (\d+)")
nonce = 1
while True:
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
time.sleep(P2W_ATTEST_INTERVAL)
matches = seqno_regex.match(attest_result.stdout)
if matches is not None:
seqno = int(matches.group(1))
print(f"Got seqno {seqno}")
ATTESTATIONS["pendingSeqnos"].append(seqno)
else:
print(f"Warning: Could not get sequence number")
nonce += 1
readiness_thread.join()
| 27.587571 | 87 | 0.683596 |
a22accaa90f9f185eea9b823f9c8bb986540fecb | 3,644 | py | Python | hands-on_introduction/3 - model_validation.py | varunpandey0502/skyfi_labs_ml_workshop | 6a209a16ca3674c1d2cd75e4dcc2e695f50dc583 | [
"MIT"
] | null | null | null | hands-on_introduction/3 - model_validation.py | varunpandey0502/skyfi_labs_ml_workshop | 6a209a16ca3674c1d2cd75e4dcc2e695f50dc583 | [
"MIT"
] | null | null | null | hands-on_introduction/3 - model_validation.py | varunpandey0502/skyfi_labs_ml_workshop | 6a209a16ca3674c1d2cd75e4dcc2e695f50dc583 | [
"MIT"
] | null | null | null | import pandas as pd
melbourne_file_path = './melbourne_housing_data.csv'
melbourne_data = pd.read_csv(melbourne_file_path)
melbourne_data.dropna(axis=0)
y = melbourne_data.Price
melbourne_features = ['Rooms','Bathroom','Landsize','Lattitude','Longtitude']
X = melbourne_data[melbourne_features]
X.describe()
X.head(n=10)
from sklearn.tree import DecisionTreeRegressor
melbourne_model = DecisionTreeRegressor(random_state=1)
#Fit model
melbourne_model.fit(X,y)
#Make predictions for first five rows
#print(X.head())
#Predictions
#print(melbourne_model.predict(X.head()))
#What is Model Validation
#You'll want to evaluate almost every model you ever build. In most (though not all) applications, the relevant measure of model quality is predictive accuracy. In other words, will the model's predictions be close to what actually happens.
#
#Many people make a huge mistake when measuring predictive accuracy. They make predictions with their training data and compare those predictions to the target values in the training data. You'll see the problem with this approach and how to solve it in a moment, but let's think about how we'd do this first.
#
#You'd first need to summarize the model quality into an understandable way. If you compare predicted and actual home values for 10,000 houses, you'll likely find mix of good and bad predictions. Looking through a list of 10,000 predicted and actual values would be pointless. We need to summarize this into a single metric.
#
#There are many metrics for summarizing model quality, but we'll start with one called Mean Absolute Error (also called MAE). Let's break down this metric starting with the last word, error.
from sklearn.metrics import mean_absolute_error
predicted_home_prices = melbourne_model.predict(X)
mean_absolute_error(y,predicted_home_prices)
#The Problem with "In-Sample" Scores
#The measure we just computed can be called an "in-sample" score. We used a single "sample" of houses for both building the model and evaluating it. Here's why this is bad.
#
#Imagine that, in the large real estate market, door color is unrelated to home price.
#
#However, in the sample of data you used to build the model, all homes with green doors were very expensive. The model's job is to find patterns that predict home prices, so it will see this pattern, and it will always predict high prices for homes with green doors.
#
#Since this pattern was derived from the training data, the model will appear accurate in the training data.
#
#But if this pattern doesn't hold when the model sees new data, the model would be very inaccurate when used in practice.
#
#Since models' practical value come from making predictions on new data, we measure performance on data that wasn't used to build the model. The most straightforward way to do this is to exclude some data from the model-building process, and then use those to test the model's accuracy on data it hasn't seen before. This data is called validation data.
from sklearn.model_selection import train_test_split
# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X,test_X,train_y,test_y = train_test_split(X,y,random_state=0)
#Define the model
melbourne_model = DecisionTreeRegressor()
#Fit the model
melbourne_model.fit(train_X,train_y)
# get predicted prices on validation data
test_predictions = melbourne_model.predict(test_X)
mean_absolute_error(test_y,test_predictions)
| 35.378641 | 353 | 0.791164 |
a22cbabe9b6d8f3afdad45c7ee147591f90ad7e9 | 3,406 | py | Python | src/npu/comprehension.py | feagi/feagi | 598abbe294b5d9cd7ff34861fa6568ba899b2ab8 | [
"Apache-2.0"
] | 1 | 2022-03-17T08:27:11.000Z | 2022-03-17T08:27:11.000Z | src/npu/comprehension.py | feagi/feagi | 598abbe294b5d9cd7ff34861fa6568ba899b2ab8 | [
"Apache-2.0"
] | 1 | 2022-02-10T16:30:35.000Z | 2022-02-10T16:33:21.000Z | src/npu/comprehension.py | feagi/feagi | 598abbe294b5d9cd7ff34861fa6568ba899b2ab8 | [
"Apache-2.0"
] | 1 | 2022-02-07T22:15:54.000Z | 2022-02-07T22:15:54.000Z |
# Copyright 2016-2022 The FEAGI Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| 44.233766 | 120 | 0.620376 |
a22ccf953739987c462b05149a48bd232390c0be | 5,286 | py | Python | policyhandler/onap/process_info.py | alex-sh2020/dcaegen2-platform-policy-handler | e969b079e331cc32b1ca361c49ee7b56e43900a7 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2020-07-14T18:54:07.000Z | 2020-07-14T19:16:06.000Z | policyhandler/onap/process_info.py | alex-sh2020/dcaegen2-platform-policy-handler | e969b079e331cc32b1ca361c49ee7b56e43900a7 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | policyhandler/onap/process_info.py | alex-sh2020/dcaegen2-platform-policy-handler | e969b079e331cc32b1ca361c49ee7b56e43900a7 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2020-07-14T18:53:46.000Z | 2021-10-15T16:55:54.000Z | # ================================================================================
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""generic class to keep get real time info about the current process"""
import gc
import sys
import threading
import traceback
from functools import wraps
import psutil
def safe_operation(func):
"""safequard the function against any exception"""
if not func:
return
return wrapper
| 34.54902 | 95 | 0.573023 |