hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f4c2801d9bd553871475afb7b65130adbc0816c | 7,963 | py | Python | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
] | 3 | 2019-02-03T00:11:34.000Z | 2020-12-08T03:12:29.000Z | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
] | null | null | null | raster_statistic.py | Summer0328/DeeplabforRS | e2f7fcee7a226bcf4ac0dfe481e5559cbd86c602 | [
"MIT"
] | 8 | 2019-03-08T03:20:24.000Z | 2021-12-29T09:12:54.000Z | #!/usr/bin/env python
# Filename: raster_statistic
"""
introduction: conduct statistic based on vectos, similar to https://github.com/perrygeo/python-rasterstats,
# but allow image tiles (multi-raster).
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 02 March, 2021
"""
import os,sys
import vector_gpd
from shapely.geometry import mapping # transform to GeJSON format
import raster_io
import basic_src.io_function as io_function
import basic_src.map_projection as map_projection
import basic_src.basic as basic
import numpy as np
from multiprocessing import Pool
def zonal_stats_multiRasters(in_shp, raster_file_or_files, nodata=None, band = 1, stats = None, prefix='',
range=None,all_touched=True, process_num=1):
'''
zonal statistic based on vectors, along multiple rasters (image tiles)
Args:
in_shp: input vector file
raster_file_or_files: a raster file or multiple rasters
nodata:
band: band
stats: like [mean, std, max, min]
range: interested values [min, max], None means infinity
all_touched:
process_num: process number for calculation
Returns:
'''
io_function.is_file_exist(in_shp)
if stats is None:
basic.outputlogMessage('warning, No input stats, set to ["mean"])')
stats = ['mean']
if isinstance(raster_file_or_files,str):
io_function.is_file_exist(raster_file_or_files)
image_tiles = [raster_file_or_files]
elif isinstance(raster_file_or_files,list):
image_tiles = raster_file_or_files
else:
raise ValueError('unsupport type for %s'%str(raster_file_or_files))
# check projection (assume we have the same projection), check them outside this function
# get image box
img_tile_boxes = [raster_io.get_image_bound_box(tile) for tile in image_tiles]
img_tile_polygons = [vector_gpd.convert_image_bound_to_shapely_polygon(box) for box in img_tile_boxes]
polygons = vector_gpd.read_polygons_gpd(in_shp)
if len(polygons) < 1:
basic.outputlogMessage('No polygons in %s'%in_shp)
return False
# polygons_json = [mapping(item) for item in polygons] # no need when use new verion of rasterio
# process polygons one by one polygons and the corresponding image tiles (parallel and save memory)
# also to avoid error: daemonic processes are not allowed to have children
if process_num == 1:
stats_res_list = []
for idx, polygon in enumerate(polygons):
out_stats = zonal_stats_one_polygon(idx, polygon, image_tiles, img_tile_polygons, stats, nodata=nodata, range=range,
band=band, all_touched=all_touched)
stats_res_list.append(out_stats)
elif process_num > 1:
threadpool = Pool(process_num)
para_list = [ (idx, polygon, image_tiles, img_tile_polygons, stats, nodata, range,band, all_touched)
for idx, polygon in enumerate(polygons)]
stats_res_list = threadpool.starmap(zonal_stats_one_polygon,para_list)
else:
raise ValueError('Wrong process number: %s '%str(process_num))
# save to shapefile
add_attributes = {}
new_key_list = [ prefix + '_' + key for key in stats_res_list[0].keys()]
for new_ley in new_key_list:
add_attributes[new_ley] = []
for stats_result in stats_res_list:
for key in stats_result.keys():
add_attributes[prefix + '_' + key].append(stats_result[key])
vector_gpd.add_attributes_to_shp(in_shp,add_attributes)
pass
if __name__=='__main__':
basic.setlogfile('raster_statistic.log')
main() | 39.034314 | 128 | 0.672485 |
6f4f6ae38349e41996b32d4d35373858a72fda8b | 1,226 | py | Python | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
] | 2 | 2016-03-15T15:07:06.000Z | 2016-05-10T23:01:05.000Z | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
] | null | null | null | ariadne/old/defutils.py | microns-ariadne/ariadne-pipeline-test-harness | 73e749c48d1ff103fee2044833778e33c70be73b | [
"MIT"
] | null | null | null | # Defutils.py -- Contains parsing functions for definition files.
# Produces an organized list of tokens in the file.
| 22.703704 | 65 | 0.596248 |
6f501da078c8264f3aa97dd237a4fd33b8efc2d3 | 492 | py | Python | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
] | null | null | null | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
] | null | null | null | qnap_locate_parser.py | killruana/snippets | c4c63dc61c727cce53fd44175bdf0dbaa6ca2b3e | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
import json
import sys
import clize
if __name__ == '__main__':
clize.run(main)
| 17.571429 | 56 | 0.628049 |
6f51e1b0451d36b7c6fa181d10bcac54b6aff907 | 2,254 | py | Python | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | evennia/contrib/tutorial_examples/mirror.py | lootcrawl/evennia | a5f736ca0ff89e4f7da7d3f89a8886f1ea3698aa | [
"BSD-3-Clause"
] | null | null | null | """
TutorialMirror
A simple mirror object to experiment with.
"""
from evennia import DefaultObject
from evennia.utils import make_iter, is_iter
from evennia import logger
| 35.777778 | 96 | 0.612689 |
6f52486edca89f5433834a3b9b6ee311e8cbfc7a | 1,087 | py | Python | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
] | null | null | null | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
] | null | null | null | python/patternlock.py | Floozutter/silly | 8273b4a33e2001c0a530e859c12dbc30b9590a94 | [
"Unlicense"
] | null | null | null | from tkinter import Tk
from turtle import ScrolledCanvas, TurtleScreen, RawTurtle
DIGIT2POS = dict(zip(
"123456789",
((100 * (j - 1), 100 * (-i + 1)) for i in range(3) for j in range(3))
))
if __name__ == "__main__":
main("61834927")
| 25.880952 | 73 | 0.618215 |
6f52a901e875d32b20f9451889c4b2196619f283 | 3,879 | py | Python | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
] | null | null | null | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
] | null | null | null | synthesizing/gui/python-portmidi-0.0.7/test_pyportmidi.py | Chiel92/MusicTheory | ddaaa60042c2db3522144e90ceabcd1bbd9818c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# test code for PyPortMidi
# a port of a subset of test.c provided with PortMidi
# John Harrison
# harrison [at] media [dot] mit [dot] edu
# March 15, 2005: accommodate for SysEx messages and preferred list formats
# SysEx test code contributed by Markus Pfaff
# February 27, 2005: initial release
import pypm
import array
import time
NUM_MSGS = 100 # number of MIDI messages for input before closing
INPUT=0
OUTPUT=1
# main code begins here
pypm.Initialize() # always call this first, or OS may crash when you try to open a stream
x=0
while (x<1) | (x>2):
print """
enter your choice...
1: test input
2: test output
"""
x=int(raw_input())
if x==1: TestInput()
else: TestOutput()
pypm.Terminate()
| 38.405941 | 105 | 0.63109 |
6f541abd19a1111ebb51682e3f0933b11c46ab1e | 1,043 | py | Python | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
] | null | null | null | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
] | null | null | null | cnn_model/Compute_accuarcy.py | csJd/dg_text_contest_2018 | 24bf8ded51841ebc1b1487f239471d65ee1a2b18 | [
"MIT"
] | null | null | null | #coding=utf-8
import pandas as pd
if __name__ == "__main__":
correct_rate,error_index = get_labels("../processed_data/dev_processed_data_split.csv","./result/result_predict.txt")
print("correct_rate : "+str(correct_rate))
print("error_email : "+str(error_index))
| 21.729167 | 121 | 0.663471 |
6f54410348912d7242d35c3b0676a8ad9e832eda | 1,219 | py | Python | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
] | null | null | null | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
] | null | null | null | ex01/arquivo/__init__.py | duartele/exerc-python | fc149a5bcd0686ba4cad87e95277658f9bbdc63b | [
"MIT"
] | null | null | null | from ex01.funcoes import *
| 23 | 82 | 0.528302 |
6f54793f102a2f9346990845e8357d9f1db537d3 | 4,330 | py | Python | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
] | 3 | 2022-02-08T16:11:43.000Z | 2022-03-23T16:18:59.000Z | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
] | null | null | null | ck_airport.py | 58565856/checkinpanel | 58f2292d9c4d65f15ffd6bc4fa4b9f23214d3d72 | [
"MIT"
] | 2 | 2022-02-01T05:35:56.000Z | 2022-02-10T01:37:38.000Z | # -*- coding: utf-8 -*-
"""
:author @Icrons
cron: 20 10 * * *
new Env('');
"""
import json
import re
import traceback
import requests
import urllib3
from notify_mtr import send
from utils import get_data
urllib3.disable_warnings()
if __name__ == "__main__":
data = get_data()
_check_items = data.get("AIRPORT", [])
res = SspanelQd(check_items=_check_items).main()
send("", res)
| 31.151079 | 138 | 0.505081 |
6f55278da18ee1b87b293e3ecbf2009597eacc92 | 2,541 | py | Python | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
] | null | null | null | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
] | null | null | null | src/04_exploration/03_determine_fire_season.py | ranarango/fuegos-orinoquia | d82941ef0c90fe66162c8678b6f4a4c010d4313b | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------
# Author: Marcelo Villa-Pieros
#
# Purpose: Determines the fire season for each window. The fire season is
# defined as the minimum number of consecutive months that contain more
# than 80% of the burned area (Archibald ett al 2013; Abatzoglou et al.
# 2018).
#
# References:
# * Archibald, S., Lehmann, C. E. R., Gmez-Dans, J. L., & Bradstock,
# R. A. (2013). Defining pyromes and global syndromes of fire regimes.
# Proceedings of the National Academy of Sciences of the United States
# of America, 110(16), 64426447.
#
# * Abatzoglou, J. T., Williams, A. P., Boschetti, L., Zubkova, M., &
# Kolden, C. A. (2018). Global patterns of interannual climatefire
# relationships. Global Change Biology, 24(11), 51645175.
# -----------------------------------------------------------------------
import os
from calendar import month_abbr
import pandas as pd
from src.utils.constants import REGIONS, BURNED_AREA_THRESHOLD
if __name__ == "__main__":
# Project's root
os.chdir("../..")
output_folder = "results/csv"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
df = pd.DataFrame(columns=["window", "months"])
for region in REGIONS:
month_groups = pd.read_excel(
f"results/xlsx/{region['name']}/fire_groups.xlsx", sheet_name="Month"
)
# Compute 80% threshold.
threshold = month_groups["area"].sum() * BURNED_AREA_THRESHOLD
# Sort months from larger to smallest burned area and compute the
# cumulative sum.
sorted_groups = month_groups.sort_values(by="area", ascending=False)
sorted_groups = sorted_groups.reset_index(drop=True)
sorted_groups["cumulative_area"] = sorted_groups["area"].cumsum()
# Get the months with the largest burned area that compose more
# than 80% of the total burned area and change from month int to
# month abbreviation.
above_threshold = sorted_groups["cumulative_area"] >= threshold
fire_season_months = sorted_groups["month"].loc[:above_threshold.idxmax()]
fire_season_months = fire_season_months.sort_values()
fire_season_months = fire_season_months.apply(lambda x: month_abbr[x])
months = fire_season_months.str.cat(sep="-")
df = df.append({"window": region["name"], "months": months}, ignore_index=True)
save_to = os.path.join(output_folder, "fire_season_months.csv")
df.to_csv(save_to, index=False)
| 38.5 | 87 | 0.646989 |
6f5611d11711ac2d42a20770b0203d11ed9c22de | 5,719 | py | Python | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
] | 1 | 2021-08-01T02:26:59.000Z | 2021-08-01T02:26:59.000Z | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
] | null | null | null | holo/modules/blender.py | chinarjoshi/holo | 45da9a8b4186b405d4f7338b953e10b335b76573 | [
"MIT"
] | null | null | null | import bpy
import json
from bpy.types import SpaceView3D
from bpy.app.handlers import persistent
from mathutils import Quaternion, Matrix, Vector
from holo.gestures import prediction_from_camera
def duplicate_window(window_type: str = 'INVOKE_DEFAULT') -> None:
"""Duplicates a new window into bpy.data.screens from current active window."""
context_window = bpy.context.copy()
context_window['area'] = [area for area in bpy.context.screen.areas if area.type == 'VIEW_3D'][0]
bpy.ops.screen.area_dupli(context_window, window_type)
def convert_quadview(area: SpaceView3D) -> None:
"""Converts a given window into quad-view."""
region = [region for region in RENDER_AREA.regions if region.type == 'WINDOW'][0]
override = {'area': RENDER_AREA, 'region': region, 'edit_object': bpy.context.edit_object}
bpy.ops.screen.region_quadview(override)
def configure_scene(screen_data: SpaceView3D) -> None:
"""Removes all overlay elements from the 3D viewport."""
screen_data.shading.background_type = 'VIEWPORT'
screen_data.shading.background_color = (0, 0, 0)
screen_data.overlay.show_overlays = False
for attribute in 'show_gizmo', 'show_region_toolbar', 'show_region_tool_header':
setattr(screen_data, attribute, False)
def initial_config(values: list) -> None:
"""Sets the camera position and rotation values during initialization of new frame."""
for index, window in enumerate(values):
for key, attribute in window.items():
if key not in {'perspective_matrix', 'window_matrix'}: # BUG These values are read only and need a setter
setattr(QUAD_VIEWS[index], key, attribute)
def transform_rotate(direction: 'str', confidence: int) -> None:
"""Given a direction and confidence value (Out of 100%), rotate the object by its corresponding vector."""
magnitude = confidence / 100
if direction not in {'retract', 'expand'}:
bpy.ops.transform.rotate(
value=magnitude,
orient_axis='Z',
orient_type='VIEW',
orient_matrix=((0.85153, 0.277963, -0.44456),
(0.15535, 0.676067, 0.720278),
(0.500763, -0.6824, 0.53251)),
orient_matrix_type='VIEW',
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
else:
for window in QUAD_VIEWS:
window.view_distance += magnitude if direction == 'expand' else magnitude * -1
def get_gestures() -> None:
"""Retrieves gestures from camera and applies the corresponding tranformation to the object."""
rotation_mapping = {
'Fist' : 'X',
'L' : 'Y',
'Okay' : 'Z',
}
for gesture in prediction_from_camera():
transform_rotate(direction=rotation_mapping(gesture.gesture), magnitude=gesture.confidence)
def initial_config_values() -> list:
"""Returns initial config values as a convenience utility."""
return [
{
"view_distance": 4.183098793029785,
"view_location": Vector((-0.8385156989097595, 0.05902576446533203, 0.48941677808761597)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.6414357423782349, -0.6326250433921814, 0.3170725703239441, 0.2963286340236664))
},
{
"view_distance": 4.183099269866943,
"view_location": Vector((-0.4491613209247589, 1.5609432458877563, 0.014791678637266159)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.4915403723716736, 0.6154682636260986, -0.25714513659477234, -0.559877872467041)),
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((-0.9179283380508423, -0.46830159425735474, 0.334771990776062)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((-0.22622741758823395, 0.6814441084861755, -0.1789524108171463, 0.6726300716400146))
},
{
"view_distance": 5.019718647003174,
"view_location": Vector((0.797123372554779, 0.7804675102233887, 0.635741114616394)),
"view_perspective": "PERSP",
"view_rotation": Quaternion((0.687656581401825, 0.6367506384849548, -0.2974682152271271, 0.1821804791688919))
}
]
if __name__ == '__main__':
duplicate_window()
RENDER_AREA = bpy.data.window_managers[0].windows[-1].screen.areas[0]
MAIN_VIEW = [area for area in bpy.data.window_managers[0].windows[0].screen.areas if area.type == 'VIEW_3D'][0].spaces[0].region_3d
QUAD_VIEWS = RENDER_AREA.spaces[0].region_quadviews
convert_quadview(area=RENDER_AREA)
configure_scene(screen_data=RENDER_AREA.spaces[0])
initial_config(initial_config_values())
get_gestures()
# bpy.data.window_managers[0].windows[1].screen.areas[0].spaces[0].region_3d.view_rotation.rotate(Euler((1, 10, .1)))
for window in bpy.data.window_managers[0].windows: # let's find what's what
for area in window.screen.areas:
if area.type == 'VIEW_3D':
if len(area.spaces[0].region_quadviews) > 0: #if quadviews are active
quad_views = area.spaces[0].region_quadviews
else:
main_view = area.spaces[0].region_3d
bpy.app.handlers.frame_change_post.append(update_handler)
| 43.656489 | 135 | 0.673719 |
6f57b93666fc12f3542b15b4104bbd2e0df4bc2a | 2,506 | py | Python | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
] | 2 | 2020-10-21T23:54:28.000Z | 2020-12-26T14:00:07.000Z | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
] | null | null | null | ncp/models/det_mix_ncp.py | JoeMWatson/ncp | 705634393cc5b739323009aaa3ad0bd02f540728 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow_probability import distributions as tfd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from ncp import tools
| 41.081967 | 75 | 0.7502 |
6f59ecc61ca13580a763007d05b1e7a17bc242cb | 165 | py | Python | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
] | 1 | 2020-08-08T17:06:43.000Z | 2020-08-08T17:06:43.000Z | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
] | 2 | 2021-03-30T01:01:59.000Z | 2021-03-30T01:01:59.000Z | api/models/target.py | zanachka/proxy-service | 769e263606a6e520efdfe7d119bb717487d0e27e | [
"MIT"
] | 4 | 2020-12-22T18:13:24.000Z | 2021-11-26T13:03:45.000Z | """
DB operations for Targets
"""
from api.models.base import DBModel
| 13.75 | 39 | 0.684848 |
6f5a67d0b0a7c3b0ab3f8a01d7d7b783ef80e5c4 | 15,107 | py | Python | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
] | null | null | null | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
] | null | null | null | myTeam.py | alexrichardson21/PacmanDQNAgent | 7a7aff6f8fa80c0e00e107adb07380194e2fc2d3 | [
"MIT"
] | null | null | null | # myTeam.py
# ---------
# Licensing Infoesmation: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# TO DISCUSS:
# Walkthru
# Replay Func
# Agent state vs position
# Normalizing state values
# Actions vs. Legal Actions
# Reward Func
import random
import time
import math
import json
import os
from util import nearestPoint
from collections import deque
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from game import Directions
from captureAgents import CaptureAgent
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first='OffDQNAgent', second='DefDQNAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
| 34.256236 | 112 | 0.583769 |
6f5c96a2170db005f0df74623642b0c6df9f9c2a | 433 | py | Python | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
] | 3 | 2021-12-11T20:57:07.000Z | 2021-12-14T22:20:42.000Z | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
] | 20 | 2021-11-15T17:08:54.000Z | 2022-03-25T10:32:52.000Z | setup.py | astrodeepnet/sbi_experiments | 70af041da08565ba15e0c011145b11ab3fd973d7 | [
"MIT"
] | 3 | 2021-11-22T21:44:04.000Z | 2021-12-14T10:31:46.000Z | from setuptools import setup, find_packages
setup(
name='SBIExperiments',
version='0.0.1',
url='https://github.com/astrodeepnet/sbi_experiments',
author='Justine Zeghal and friends',
description='Package for numerical experiments of SBI tools',
packages=find_packages(),
install_requires=[
'numpy>=1.19.2',
'jax>=0.2.0',
'tensorflow_probability>=0.14.1',
'scikit-learn>=0.21',
'jaxopt>=0.2'
],
)
| 24.055556 | 63 | 0.681293 |
6f5dc8fddaaa9b918695e316e6b45bac16a19712 | 880 | py | Python | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
] | null | null | null | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
] | null | null | null | deploy/deploy_asterisk_provider2.py | orpolaczek/astricon-2017-demos | 1d3f24a72b19bf7ecf70831fd6b122cde59ea47b | [
"MIT"
] | 1 | 2018-09-14T08:32:07.000Z | 2018-09-14T08:32:07.000Z | import datetime
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from pprint import pprint
Engine = get_driver(Provider.ELASTICHOSTS)
driver = Engine("733b7dc7-7498-4db4-9dc4-74d3fee8abed",
secret="6w6CDAqL6JyXFj3xNkWW2zpUjYfv9dYaLVdaaR4Y",
secure=False)
images = driver.list_images()
sizes = driver.list_sizes()
IMAGE_ID = '38df09864d854b76b5023878ffc80161'
image = [i for i in images if i.id == IMAGE_ID][0]
pprint(images)
pprint(sizes)
node = driver.deploy_node(
name="astricon-{}".format(datetime.datetime.now().strftime('%Y-%m-%dt%H%M%S')),
image=image,
size=sizes[3],
script='deploy-script.sh',
enable_root=True,
vnc_password="myStr0ngr00tpa55wo7d")
print("Waiting for Node")
driver.wait_until_running([node], 10, 1000)
print("Node is now running")
| 25.882353 | 83 | 0.729545 |
6f5df725ff569b1c32118a15233cd3613598d3f9 | 95 | py | Python | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/admin.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | from django.contrib import admin
from .models import TodoModel
admin.site.register(TodoModel)
| 19 | 32 | 0.831579 |
6f60ca83d2a6e347812ac821a5981278c5493d55 | 1,853 | py | Python | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
] | null | null | null | assignments/06-python-first-lines/first_lines.py | antoniog1995/biosys-analytics | f4ac78f0918c402b5405bcb95b6ad1f76f2d9f08 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : antoniog1
Date : 2019-02-21
Purpose: Rock the Casbah
"""
import argparse
import sys
import os
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Argparse Python script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('positional', metavar='DIR', type = str, help='A positional argument', nargs="+")
parser.add_argument('-w', '--width', help='A named integer argument', metavar='int', type=int, default=50)
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
width = args.width
directory = args.positional
for dir_name in directory:
dir_dict = {}
if not os.path.isdir(dir_name):
warn('"{}" is not a directory'.format(dir_name))
continue
print(dir_name)
for filename in os.listdir(dir_name):
path = os.path.join(dir_name,filename)
with open(path) as f:
first_line = f.readline().rstrip()
dir_dict[first_line] = filename
for line, file in sorted(dir_dict.items()):
num_per = width - len(line) - len(file)
ellipses = "." * num_per
print('{} {} {}'.format(line,ellipses,file))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 28.953125 | 110 | 0.51538 |
6f60f993f98cc6ec2f6e673c50ecaa903ed57a90 | 4,042 | py | Python | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
] | null | null | null | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
] | null | null | null | lvmsurveysim/target/target.py | albireox/lvmsurveysim | 8ebe8ae7a90e5f50c4ea186b947a49720b7ed9ed | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# @Author: Jos Snchez-Gallego
# @Date: Oct 10, 2017
# @Filename: target.py
# @License: BSD 3-Clause
# @Copyright: Jos Snchez-Gallego
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import pathlib
import yaml
from . import regions
from .. import config
| 30.854962 | 79 | 0.596487 |
6f628605ce1d839a711154a74d7ae1743fe66d28 | 1,889 | py | Python | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
] | 4 | 2021-08-15T04:55:44.000Z | 2022-02-01T09:19:57.000Z | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
] | 53 | 2021-07-16T11:02:44.000Z | 2022-03-07T16:39:20.000Z | samples/snippets/test_export_to_bigquery.py | renovate-bot/python-contact-center-insights | d133f4028d862cc39d10ba4b0879df256a3505c1 | [
"Apache-2.0"
] | 5 | 2021-07-15T18:17:53.000Z | 2022-01-29T08:09:16.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
import google.auth
from google.cloud import bigquery
import pytest
import export_to_bigquery
GCLOUD_TESTS_PREFIX = "python_samples_tests"
def test_export_data_to_bigquery(capsys, project_id, bigquery_resources):
dataset_id, table_id = bigquery_resources
export_to_bigquery.export_to_bigquery(project_id, project_id, dataset_id, table_id)
out, err = capsys.readouterr()
assert "Exported data to BigQuery" in out
| 28.621212 | 87 | 0.755956 |
6f62e27b225bd0318b85812c8f42343dc3b0fca8 | 1,436 | py | Python | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
] | 3 | 2021-12-11T13:43:56.000Z | 2022-03-31T02:36:05.000Z | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
] | 9 | 2021-10-01T22:46:57.000Z | 2021-12-16T18:01:31.000Z | Author/admin.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 63c0ba2a03f0b462e3673ce7a4bf6bae7999440c | [
"Apache-2.0"
] | 2 | 2021-12-16T16:37:10.000Z | 2021-12-16T20:30:12.000Z | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
# Register your models here.
set_active.short_description = 'Set Account Status: Active'
deactivate.short_description = 'Set Account Status: Inactive'
# admin.site.unregister(User)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Inbox)
admin.site.register(Like)
admin.site.register(Liked)
admin.site.register(FriendRequest)
admin.site.register(Followers) | 31.217391 | 96 | 0.639972 |
6f637c0d8807f40cbf867588212e880e31335fd4 | 26,912 | py | Python | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
] | null | null | null | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
] | null | null | null | TP2/pyApp/venv/lib/python3.8/site-packages/pyloco/task.py | MariusBallot/09-2021-Robotics-EFREI-Files | cf6bdb7b9d3b9f368970fbed42c6b403f56b0eeb | [
"W3C"
] | null | null | null | # -*- coding: utf-8 -*-
"""task module."""
from __future__ import unicode_literals
import sys
import os
import pydoc
import time
import json
import logging
import collections
import pkg_resources
import subprocess
import webbrowser
import websocket
from pyloco.parse import TaskArgParser, PylocoArgParser
from pyloco.proxy import ParentProxy
from pyloco.util import (load_pymod, type_check, pyloco_print, OS, urlparse, teval,
split_assert_expr, get_port, pack_websocket_message,
is_ipv6, pyloco_import, PylocoPickle, import_modulepath)
from pyloco.error import TestError, InternalError, UsageError
from pyloco.base import Object, Global, pyloco_builtins
| 31.329453 | 111 | 0.506986 |
6f644b09bfe662762ed95cb2b170c8fc73f84411 | 1,376 | py | Python | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | null | null | null | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | null | null | null | azure-iot-device/azure/iot/device/aio/__init__.py | olivakar/azure-iot-sdk-python | d8f2403030cf94510d381d8d5ac37af6e8d306f8 | [
"MIT"
] | null | null | null | """Azure IoT Device Library - Asynchronous
This library provides asynchronous clients for communicating with Azure IoT services
from an IoT device.
"""
from azure.iot.device.iothub.aio import *
from azure.iot.device.provisioning.aio import *
from . import patch_documentation
# Dynamically patch the clients to add shim implementations for all the inherited methods.
# This is necessary to generate accurate online docs.
# It SHOULD not impact the functionality of the methods themselves in any way.
# NOTE In the event of addition of new methods and generation of accurate documentation
# for those methods we have to append content to "patch_documentation.py" file.
# In order to do so please uncomment the "patch.add_shims" lines below,
# enable logging with level "DEBUG" in a python terminal and do
# "import azure.iot.device". The delta between the newly generated output
# and the existing content of "patch_documentation.py" should be appended to
# the function "execute_patch_for_sync" in "patch_documentation.py".
# Once done please again omment out the "patch.add_shims" lines below.
# patch.add_shims_for_inherited_methods(IoTHubDeviceClient) # noqa: F405
# patch.add_shims_for_inherited_methods(IoTHubModuleClient) # noqa: F405
# patch.add_shims_for_inherited_methods(ProvisioningDeviceClient) # noqa: F405
patch_documentation.execute_patch_for_async()
| 45.866667 | 90 | 0.805233 |
6f647632e2c96c2063ca3a82382e2a10a7664a9e | 1,716 | py | Python | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:48.000Z | 2018-01-12T14:09:48.000Z | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | 1 | 2019-09-10T05:03:47.000Z | 2019-09-10T05:03:47.000Z | lino_xl/lib/reception/__init__.py | khchine5/xl | b1634937a9ce87af1e948eb712b934b11f221d9d | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2013-2016 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""This module is for managing a reception desk and a waiting queue:
register clients into a waiting queue as they present themselves at a
reception desk (Empfangsschalter), and unregister them when they leave
again.
It depends on :mod:`lino_xl.lib.cal`. It does not add any model, but
adds some workflow states, actions and tables.
Extended by :mod:`lino_welfare.modlib.reception`.
.. autosummary::
:toctree:
models
workflows
"""
from lino.api import ad, _
| 28.6 | 72 | 0.701049 |
6f659c58598cee6e53216640aed93bdbc6f2a194 | 320 | py | Python | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | old/accent_analyser/rules/RuleRemoveThe.py | stefantaubert/eng2ipa-accent-transformer | d620c70b06c83119402e255085046747ade87444 | [
"MIT"
] | null | null | null | from accent_analyser.rules.EngRule import EngRule
| 22.857143 | 59 | 0.69375 |
6f6a38a0483844a3f770817a9b327db335f9b10a | 255 | py | Python | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
] | 6 | 2019-06-07T19:25:51.000Z | 2019-10-30T01:56:29.000Z | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
] | 1 | 2019-08-26T00:05:50.000Z | 2019-08-26T00:05:50.000Z | ssrl/providers/base.py | AspirinGeyer/PySSRL | bdb27d7ada2fc317b8e2ea18f389e280d58e24ac | [
"Apache-2.0"
] | 1 | 2019-10-30T01:56:33.000Z | 2019-10-30T01:56:33.000Z | # -*- coding:utf-8 -*-
| 19.615385 | 58 | 0.670588 |
6f6b28d63b93b95d61bab409bb560af9d95cf417 | 1,505 | py | Python | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/tips | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
] | 2 | 2019-05-07T03:08:25.000Z | 2020-05-22T10:10:00.000Z | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/tips | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
] | 7 | 2020-05-22T13:29:42.000Z | 2021-09-23T23:30:25.000Z | tornado_demo/web2py/applications/examples/controllers/global.py | ls-2018/py | 1f5f5195d7181b5dd4616db02166f7f92c97f1cd | [
"MIT"
] | null | null | null | session.forget()
def vars():
"""the running controller function!"""
title = '.'.join(request.args)
attributes = {}
if not request.args:
(doc, keys, t, c, d, value) = ('Global variables', globals(), None, None, [], None)
elif len(request.args) < 3:
obj = get(request.args)
if obj:
doc = getattr(obj, '__doc__', 'no documentation')
keys = dir(obj)
t = type(obj)
c = getattr(obj, '__class__', None)
d = getattr(obj, '__bases__', None)
for key in keys:
a = getattr(obj, key, None)
if a and not isinstance(a, DAL):
doc1 = getattr(a, '__doc__', '')
t1 = type(a)
c1 = getattr(a, '__class__', None)
d1 = getattr(a, '__bases__', None)
key = '.'.join(request.args) + '.' + key
attributes[key] = (doc1, t1, c1, d1)
else:
doc = 'Unkown'
keys = []
t = c = d = None
else:
raise HTTP(400)
return dict(
title=title,
args=request.args,
t=t,
c=c,
d=d,
doc=doc,
attributes=attributes,
)
| 27.363636 | 91 | 0.453821 |
6f6c2c1c13418649733376c632ea6395a15039ac | 857 | py | Python | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | medium/python3/c0108_223_rectangle-area/00_leetcode_0108.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | 3 | 2018-02-09T02:46:48.000Z | 2021-02-20T08:32:03.000Z | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#223. Rectangle Area
#Find the total area covered by two rectilinear rectangles in a 2D plane.
#Each rectangle is defined by its bottom left corner and top right corner as shown in the figure.
#Assume that the total area is never beyond the maximum possible value of int.
#Credits:
#Special thanks to @mithmatt for adding this problem, creating the above image and all test cases.
#class Solution:
# def computeArea(self, A, B, C, D, E, F, G, H):
# """
# :type A: int
# :type B: int
# :type C: int
# :type D: int
# :type E: int
# :type F: int
# :type G: int
# :type H: int
# :rtype: int
# """
# Time Is Money | 32.961538 | 98 | 0.655776 |
6f6c63911e71ae7c84e18bedf35df7f0d63d41aa | 437 | py | Python | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
] | 1 | 2019-04-29T16:07:51.000Z | 2019-04-29T16:07:51.000Z | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
] | null | null | null | serialTest.py | fmuno003/SeniorDesign | 113bdcf4cc906042f44736a1ffddb6ffff3a217e | [
"BSD-3-Clause"
] | null | null | null | import serial
import RPi.GPIO as GPIO
import time
ser=serial.Serial("/dev/ttyACM0",9600)
start_time = time.time()
imu = open("IMU.txt","w")
while time.time() - start_time <= 1:
ser.readline()
while time.time() - start_time <= 8:
read_ser=ser.readline()
if float(read_ser) == 0.00:
pass
else:
read = read_ser.strip('\n')
imu.write(read)
imu.write('\n')
imu.close()
| 19.863636 | 39 | 0.578947 |
6f6e858702c8ce5b6a0c7be5155f97db4b0d395c | 1,950 | py | Python | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | src/pyg_base/_zip.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | 1 | 2022-01-03T21:56:14.000Z | 2022-01-03T21:56:14.000Z | from pyg_base._types import is_iterable
from pyg_base._loop import len0
__all__ = ['zipper', 'lens']
def lens(*values):
"""
measures (and enforces) a common length across all values
:Parameters:
----------------
*values : lists
Raises
------
ValueError
if you have values with multi lengths.
:Returns:
-------
int
common length.
:Example:
--------------
>>> assert lens() == 0
>>> assert lens([1,2,3], [2,4,5]) == 3
>>> assert lens([1,2,3], [2,4,5], [6]) == 3
"""
if len0(values) == 0:
return 0
all_lens = [len0(value) for value in values]
lens = set(all_lens) - {1}
if len(lens)>1:
raise ValueError('found multiple lengths %s '%lens)
return list(lens)[0] if lens else 1
def zipper(*values):
"""
a safer version of zip
:Examples: zipper works with single values as well as full list:
---------------
>>> assert list(zipper([1,2,3], 4)) == [(1, 4), (2, 4), (3, 4)]
>>> assert list(zipper([1,2,3], [4,5,6])) == [(1, 4), (2, 5), (3, 6)]
>>> assert list(zipper([1,2,3], [4,5,6], [7])) == [(1, 4, 7), (2, 5, 7), (3, 6, 7)]
>>> assert list(zipper([1,2,3], [4,5,6], None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
>>> assert list(zipper((1,2,3), np.array([4,5,6]), None)) == [(1, 4, None), (2, 5, None), (3, 6, None)]
:Examples: zipper rejects multi-length lists
---------------
>>> import pytest
>>> with pytest.raises(ValueError):
>>> zipper([1,2,3], [4,5])
:Parameters:
----------------
*values : lists
values to be zipped
:Returns:
-------
zipped values
"""
values = [list(value) if isinstance(value, zip) else value if is_iterable(value) else [value] for value in values]
n = lens(*values)
values = [value * n if len(value) == 1 else value for value in values]
return zip(*values)
| 27.083333 | 118 | 0.510256 |
6f6e961109cfe080e1074fb4fb957b034dcf9958 | 1,997 | py | Python | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
] | null | null | null | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
] | 13 | 2022-02-17T06:05:44.000Z | 2022-03-17T02:47:49.000Z | cli/pawls/preprocessors/grobid.py | vtcaregorodtcev/pawls-1 | 32cfb7bc56edac2fe972467a1133a31ae901c727 | [
"Apache-2.0"
] | 2 | 2021-09-28T08:01:42.000Z | 2021-09-28T08:18:31.000Z | import json
from typing import List
import requests
from pawls.preprocessors.model import Page
def process_grobid(
pdf_file: str,
grobid_host: str = "http://localhost:8070"
):
"""
Integration for importing annotations from grobid.
Depends on a grobid API built from our fork https://github.com/allenai/grobid.
Fetches a PDF by sha, sends it to the Grobid API and returns them.
pdf_file: str
The path to the pdf file to process.
grobid_host: str (optional, default="http://localhost:8070")
The forked grobid API which we use to produce the annotations.
"""
grobid_structure = fetch_grobid_structure(pdf_file, grobid_host)
annotations = parse_annotations(grobid_structure)
return annotations
| 31.203125 | 86 | 0.593891 |
6f6fd6c2d42d2b9282a1e6483b23196da4a8aeeb | 2,614 | py | Python | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:53.000Z | 2022-02-06T13:00:14.000Z | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
] | 80 | 2020-10-31T09:14:46.000Z | 2021-01-12T23:38:15.000Z | scripts/run_custom_eslint_tests.py | lheureuxe13/oppia | 7110e3e5d5a53527c31d7b33e14d25e8d5b981f9 | [
"Apache-2.0"
] | 1 | 2020-10-02T13:28:26.000Z | 2020-10-02T13:28:26.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tests for custom eslint checks."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
from core import python_utils
from scripts import common
def main():
"""Run the tests."""
node_path = os.path.join(common.NODE_PATH, 'bin', 'node')
nyc_path = os.path.join('node_modules', 'nyc', 'bin', 'nyc.js')
mocha_path = os.path.join('node_modules', 'mocha', 'bin', 'mocha')
filepath = 'scripts/linters/custom_eslint_checks/rules/'
proc_args = [node_path, nyc_path, mocha_path, filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_tests_stdout, encoded_tests_stderr = proc.communicate()
# Standard and error output is in bytes, we need to decode the line to
# print it.
tests_stdout = encoded_tests_stdout.decode('utf-8')
tests_stderr = encoded_tests_stderr.decode('utf-8')
if tests_stderr:
python_utils.PRINT(tests_stderr)
sys.exit(1)
python_utils.PRINT(tests_stdout)
if 'failing' in tests_stdout:
python_utils.PRINT('---------------------------')
python_utils.PRINT('Tests not passed')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All tests passed')
python_utils.PRINT('---------------------------')
coverage_result = re.search = re.search(
r'All files\s*\|\s*(?P<stmts>\S+)\s*\|\s*(?P<branch>\S+)\s*\|\s*'
r'(?P<funcs>\S+)\s*\|\s*(?P<lines>\S+)\s*\|\s*', tests_stdout)
if (coverage_result.group('stmts') != '100' or
coverage_result.group('branch') != '100' or
coverage_result.group('funcs') != '100' or
coverage_result.group('lines') != '100'):
raise Exception('Eslint test coverage is not 100%')
if __name__ == '__main__':
main()
| 35.808219 | 74 | 0.649579 |
6f6fddc36a83d5396bed90f0e96d5995bd58f9a5 | 6,274 | py | Python | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
] | null | null | null | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
] | null | null | null | nmpc_mhe/tst_algorithmsv2_nmpc_hi_t0115_setp.py | joycezyu/cappresse | 45b40d0e9202180a0a07e1c03960cf30b08a4557 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from pyomo.environ import *
from pyomo.core.base import Constraint, Objective, Suffix, minimize
from pyomo.opt import ProblemFormat, SolverFactory
from nmpc_mhe.dync.NMPCGenv2 import NmpcGen
from nmpc_mhe.mods.bfb.nob5_hi_t import bfb_dae
from snap_shot import snap
import sys, os
import itertools, sys
from numpy.random import normal as npm
# SWITCH TO JUST ONE COLLOCATION POINT AND FINITE ELEMENT
states = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
# x_noisy = ["Ngb", "Hgb", "Ngc", "Hgc", "Nsc", "Hsc", "Nge", "Hge", "Nse", "Hse", "mom"]
# x_noisy = ["Hse"]
x_noisy = ["Hgc", "Nsc", "Hsc", "Hge", "Nse", "Hse"]
u = ["u1"]
u_bounds = {"u1":(162.183495794 * 0.0005, 162.183495794 * 10000)}
ref_state = {("c_capture", ((),)): 0.63}
# Known targets 0.38, 0.4, 0.5
nfe_mhe = 10
y = ["Tgb", "vg"]
nfet = 10
ncpx = 3
nfex = 5
tfe = [i for i in range(1, nfe_mhe + 1)]
lfe = [i for i in range(1, nfex + 1)]
lcp = [i for i in range(1, ncpx + 1)]
lc = ['c', 'h', 'n']
y_vars = {
"Tgb": [i for i in itertools.product(lfe, lcp)],
"vg": [i for i in itertools.product(lfe, lcp)]
}
# x_vars = dict()
x_vars = {
# "Nge": [i for i in itertools.product(lfe, lcp, lc)],
# "Hge": [i for i in itertools.product(lfe, lcp)],
"Nsc": [i for i in itertools.product(lfe, lcp, lc)],
"Hsc": [i for i in itertools.product(lfe, lcp)],
"Nse": [i for i in itertools.product(lfe, lcp, lc)],
"Hse": [i for i in itertools.product(lfe, lcp)],
"Hgc": [i for i in itertools.product(lfe, lcp)],
"Hge": [i for i in itertools.product(lfe, lcp)],
# "mom": [i for i in itertools.product(lfe, lcp)]
}
# States -- (5 * 3 + 6) * fe_x * cp_x.
# For fe_x = 5 and cp_x = 3 we will have 315 differential-states.
e = NmpcGen(bfb_dae, 400/nfe_mhe, states, u,
ref_state=ref_state, u_bounds=u_bounds,
nfe_tnmpc=nfe_mhe, ncp_tnmpc=1,
nfe_t=5, ncp_t=1)
# 10 fe & _t=1000 definitely degenerate
# 10 fe & _t=900 definitely degenerate
# 10 fe & _t=120 sort-of degenerate
# 10 fe & _t=50 sort-of degenerate
# 10 fe & _t=50 eventually sort-of degenerate
# 10 fe & _t=1 eventually sort-of degenerate
e.SteadyRef.dref = snap
e.load_iguess_steady()
e.SteadyRef.create_bounds()
e.solve_steady_ref()
e.SteadyRef.report_zL(filename="mult_ss")
e.load_d_s(e.PlantSample)
e.PlantSample.create_bounds()
e.solve_dyn(e.PlantSample)
q_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp, lc):
q_cov[("Nse", j), ("Nse", j), i] = 7525.81478168 * 0.005
q_cov[("Nsc", j), ("Nsc", j), i] = 117.650089456 * 0.005
# q_cov[("Nse", j), ("Nse", j), i] = 735.706082714 * 0.005
for i in tfe:
for j in itertools.product(lfe, lcp):
# q_cov[("Hge", j), ("Hge", j), i] = 2194.25390583 * 0.005
q_cov[("Hse", j), ("Hse", j), i] = 731143.716603 * 0.005
q_cov[("Hsc", j), ("Hsc", j), i] = 16668.3312216 * 0.005
q_cov[("Hge", j), ("Hge", j), i] = 2166.86838591 * 0.005
q_cov[("Hgc", j), ("Hgc", j), i] = 47.7911012193 * 0.005
# q_cov[("mom", j), ("mom", j), i] = 1.14042251669 * 0.005
# for i in lfe:
# for j in [(1,1, 'c'), (5,3, 'c')]:
# m_cov[("yb", j), ("yb", j), i] = 1e-04
u_cov = {}
for i in [i for i in range(1, nfe_mhe+1)]:
u_cov["u1", i] = 162.183495794 * 0.005
m_cov = {}
for i in tfe:
for j in itertools.product(lfe, lcp):
m_cov[("Tgb", j), ("Tgb", j), i] = 40 * 0.005
m_cov[("vg", j), ("vg", j), i] = 0.902649386907 * 0.005
e.find_target_ss() #: Compute target-steady state (beforehand)
#: Create NMPC
e.create_nmpc()
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
ipsr = SolverFactory('ipopt', executable="/home/dav0/Apps/IpoptSR/Ipopt/build/bin/ipoptSR")
ref_state = {("c_capture", ((),)): 0.50}
e.find_target_ss(ref_state=ref_state) #: Compute target-steady state (beforehand)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+08)
for i in range(1, 1000):
ps = e.solve_dyn(e.PlantSample, stop_if_nopt=False)
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
if ps != 0:
e.PlantSample.write_nl(name="baddie.nl")
e.PlantSample.pprint(filename="baddie.txt")
e.PlantSample.snap_shot(filename="baddie.py")
e.PlantSample.report_zL(filename="bad_bounds")
e.solve_dyn(e.PlantSample, stop_if_nopt=True)
e.update_state_real() # update the current state
e.update_soi_sp_nmpc()
#
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
# if stat_nmpc != 0:
# stat_nmpc = e.solve_dyn(e.olnmpc,
# stop_if_nopt=True,
# skip_update=False,
# iter_max=300, ma57_pivtol=1e-12)
if stat_nmpc != 0:
strategy = 1
if strategy == 1:
if e.nfe_tnmpc == 1:
pass
else:
e.create_nmpc(newnfe=e.ncp_tnmpc-1, newncp=1)
e.update_targets_nmpc()
e.compute_QR_nmpc(n=-1)
e.new_weights_olnmpc(10000, 1e+02)
e.initialize_olnmpc(e.PlantSample, "real")
e.load_init_state_nmpc(src_kind="state_dict", state_dict="real")
stat_nmpc = e.solve_dyn(e.olnmpc, skip_update=False, max_cpu_time=300)
else:
e.olnmpc.write_nl(name="bad.nl")
# e.olnmpc.pprint(filename="bad_" + str(i))
with open("ipopt.opt", "w") as f:
f.write("linear_solver ma57\n"
"ma57_dep_tol 1e-8\nbig_M 1e30\n")
f.close()
ipsr.solve(e.olnmpc, tee=True)
e.update_u(e.olnmpc)
e.print_r_nmpc()
e.cycleSamPlant(plant_step=True)
e.plant_uinject(e.PlantSample, src_kind="dict", nsteps=10, skip_homotopy=True)
# e.plant_input_gen(e.PlantSample, "mod", src=e.ss2)
| 36.690058 | 91 | 0.603602 |
6f709ca217e6ed7e435bf5ff768925bbdc7d9c7c | 493 | py | Python | csvapi/security.py | quaxsze/csvapi | 7e5ab5839fb6cbf667c756798a55c9b719394602 | [
"MIT"
] | 15 | 2019-08-23T09:57:54.000Z | 2021-11-08T10:38:03.000Z | csvapi/security.py | quaxsze/csvapi | 7e5ab5839fb6cbf667c756798a55c9b719394602 | [
"MIT"
] | 36 | 2019-08-21T10:05:53.000Z | 2022-03-23T08:58:02.000Z | csvapi/security.py | opendatateam/csvapi | 4e4ea7167f7265782c8f654619b060dc04112392 | [
"MIT"
] | 1 | 2018-04-25T09:55:25.000Z | 2018-04-25T09:55:25.000Z | from urllib.parse import urlparse
from quart import current_app as app, request, jsonify
| 24.65 | 54 | 0.614604 |
6f70b2504b0ddf0927280e069e308de02195aea2 | 447 | py | Python | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
] | 8 | 2019-06-11T14:09:12.000Z | 2021-09-09T09:37:47.000Z | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
] | 7 | 2020-02-12T02:55:11.000Z | 2020-08-27T09:54:54.000Z | linkit/models.py | what-digital/linkit | 58fb7dc966e7b76b654c9bc5e52253eb81731e98 | [
"MIT"
] | 2 | 2020-06-18T09:54:20.000Z | 2022-02-17T08:33:13.000Z | from django.db import models
from filer.fields.file import FilerFileField
| 37.25 | 115 | 0.753915 |
6f722918045c200389c503a068fc9c4194103a3f | 9,679 | py | Python | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
] | null | null | null | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
] | null | null | null | tests/helper.py | nirs/python-manhole | 26821e083eefdc87492b13ebdd20ba000a616141 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import atexit
import errno
import logging
import os
import select
import signal
import sys
import time
from process_tests import setup_coverage
TIMEOUT = int(os.getenv('MANHOLE_TEST_TIMEOUT', 10))
SOCKET_PATH = '/tmp/manhole-socket'
OUTPUT = sys.__stdout__
# Handling sigterm ensure that atexit functions are called, and we do not leave
# leftover /tmp/manhole-pid sockets.
signal.signal(signal.SIGTERM, handle_sigterm)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
test_name = sys.argv[1]
try:
setup_coverage()
if os.getenv('PATCH_THREAD', False):
import manhole
setup_greenthreads(True)
else:
setup_greenthreads(True)
import manhole
if test_name == 'test_activate_on_usr2':
manhole.install(activate_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_install_once':
manhole.install()
try:
manhole.install()
except manhole.AlreadyInstalled:
print('ALREADY_INSTALLED')
else:
raise AssertionError("Did not raise AlreadyInstalled")
elif test_name == 'test_stderr_doesnt_deadlock':
import subprocess
manhole.install()
for i in range(50):
print('running iteration', i)
p = subprocess.Popen(['true'])
print('waiting for process', p.pid)
p.wait()
print('process ended')
path = '/tmp/manhole-%d' % p.pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
print('SUCCESS')
elif test_name == 'test_fork_exec':
manhole.install(reinstall_delay=5)
print("Installed.")
time.sleep(0.2)
pid = os.fork()
print("Forked, pid =", pid)
if pid:
os.waitpid(pid, 0)
path = '/tmp/manhole-%d' % pid
if os.path.exists(path):
os.unlink(path)
raise AssertionError(path + ' exists !')
else:
try:
time.sleep(1)
print("Exec-ing `true`")
os.execvp('true', ['true'])
finally:
os._exit(1)
print('SUCCESS')
elif test_name == 'test_activate_on_with_oneshot_on':
manhole.install(activate_on='USR2', oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_interrupt_on_accept':
signal.signal(signal.SIGUSR2, handle_usr2)
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
pthread_kill = libpthread.pthread_kill
pthread_kill.argtypes = [ctypes.c_void_p, ctypes.c_int]
pthread_kill.restype = ctypes.c_int
manhole.install(sigmask=None)
for i in range(15):
time.sleep(0.1)
print("Sending signal to manhole thread ...")
pthread_kill(manhole._INST.ident, signal.SIGUSR2)
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name == 'test_oneshot_on_usr2':
manhole.install(oneshot_on='USR2')
for i in range(TIMEOUT * 100):
time.sleep(0.1)
elif test_name.startswith('test_signalfd_weirdness'):
if 'negative' in test_name:
manhole.install(sigmask=None)
else:
manhole.install(sigmask=[signal.SIGCHLD])
time.sleep(0.3) # give the manhole a bit enough time to start
print('Starting ...')
import signalfd
signalfd.sigprocmask(signalfd.SIG_BLOCK, [signal.SIGCHLD])
fd = signalfd.signalfd(0, [signal.SIGCHLD], signalfd.SFD_NONBLOCK|signalfd.SFD_CLOEXEC)
for i in range(200):
print('Forking %s:' % i)
pid = os.fork()
print(' - [%s/%s] forked' % (i, pid))
if pid:
while 1:
print(' - [%s/%s] selecting on: %s' % (i, pid, [fd]))
read_ready, _, errors = select.select([fd], [], [fd], 1)
if read_ready:
try:
print(' - [%s/%s] reading from signalfd ...' % (i, pid))
print(' - [%s] read from signalfd: %r ' % (i, os.read(fd, 128)))
break
except OSError as exc:
print(' - [%s/%s] reading from signalfd failed with errno %s' % (i, pid, exc.errno))
else:
print(' - [%s/%s] reading from signalfd failed - not ready !' % (i, pid))
if 'negative' in test_name:
time.sleep(1)
if errors:
raise RuntimeError("fd has error")
else:
print(' - [%s/%s] exiting' % (i, pid))
os._exit(0)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_auth_fail':
manhole.get_peercred = lambda _: (-1, -1, -1)
manhole.install()
time.sleep(TIMEOUT * 10)
elif test_name == 'test_socket_path':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT * 10)
elif test_name == 'test_daemon_connection':
manhole.install(daemon_connection=True)
time.sleep(TIMEOUT)
elif test_name == 'test_socket_path_with_fork':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
do_fork()
elif test_name == 'test_locals':
manhole.install(socket_path=SOCKET_PATH,
locals={'k1': 'v1', 'k2': 'v2'})
time.sleep(TIMEOUT)
elif test_name == 'test_locals_after_fork':
manhole.install(locals={'k1': 'v1', 'k2': 'v2'})
do_fork()
elif test_name == 'test_redirect_stderr_default':
manhole.install(socket_path=SOCKET_PATH)
time.sleep(TIMEOUT)
elif test_name == 'test_redirect_stderr_disabled':
manhole.install(socket_path=SOCKET_PATH, redirect_stderr=False)
time.sleep(TIMEOUT)
elif test_name == 'test_sigmask':
manhole.install(socket_path=SOCKET_PATH, sigmask=[signal.SIGUSR1])
time.sleep(TIMEOUT)
else:
manhole.install()
time.sleep(0.3) # give the manhole a bit enough time to start
if test_name == 'test_simple':
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_forkpty':
time.sleep(1)
pid, masterfd = os.forkpty()
if pid:
while not os.waitpid(pid, os.WNOHANG)[0]:
try:
os.write(2, os.read(masterfd, 1024))
except OSError as e:
print("Error while reading from masterfd:", e)
else:
time.sleep(TIMEOUT * 10)
elif test_name == 'test_with_fork':
time.sleep(1)
do_fork()
else:
raise RuntimeError('Invalid test spec.')
except: # pylint: disable=W0702
print('Died with %s.' % sys.exc_info()[0].__name__, file=OUTPUT)
import traceback
traceback.print_exc(file=OUTPUT)
print('DIED.', file=OUTPUT)
| 37.226923 | 116 | 0.510073 |
6f73d54d3a1a664d942bd0ee6d760eedb4233760 | 1,054 | py | Python | ecommerce/User/admin.py | AwaleRohin/commerce-fm | cb5b43c999ae5be37957b29de9c07d5affc66fb0 | [
"MIT"
] | 18 | 2020-12-05T14:12:32.000Z | 2022-03-11T20:15:22.000Z | ecommerce/User/admin.py | AwaleRohin/commerce-fm | cb5b43c999ae5be37957b29de9c07d5affc66fb0 | [
"MIT"
] | 1 | 2021-07-22T09:23:13.000Z | 2021-07-22T09:23:13.000Z | ecommerce/User/admin.py | shakyasaijal/commerce-fm | 358b6925f4b569dc374010d7cc7d4d560ede2b48 | [
"MIT"
] | 13 | 2020-10-15T10:17:35.000Z | 2022-01-29T06:56:24.000Z | from django.contrib import admin
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import models
if settings.HAS_ADDITIONAL_USER_DATA:
try:
except (Exception, KeyError) as e:
raise ImproperlyConfigured("User/admin.py:: Multi Vendor is turned on.")
admin.site.register(models.User, UserAdmin)
admin.site.register(models.IpAddress)
admin.site.register(models.CityFromIpAddress)
admin.site.register(models.Marketing) | 31 | 80 | 0.712524 |
6f741a22f6b69a36890074cd2db8d9ec2d946c37 | 38,189 | py | Python | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
] | null | null | null | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
] | null | null | null | client/external/xp_tracker.py | Suirdna/OR-Origin | 8eb7d99a87d835a7d590d56e0088ec79746f4630 | [
"MIT"
] | null | null | null | from client import exception, embed_creator, console_interface, discord_manager, file_manager, ini_manager, json_manager, origin, permissions, server_timer
from client.config import config as c, language as l
from discord.ext import commands, tasks
from client.external.hiscores import hiscores_xp
from PIL import Image, ImageDraw, ImageFont
import discord, locale | 52.601928 | 366 | 0.468224 |
6f75a1523bdd37ab1cd4cc70ef59345c182747bf | 1,437 | py | Python | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
] | null | null | null | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
] | null | null | null | days/01/part1.py | gr3yknigh1/aoc2021 | 55dca0685cc4213f0a14970ae9bfc882a59e82aa | [
"MIT"
] | null | null | null | from __future__ import annotations
import os
import collections
BASE_PATH = os.path.dirname(__file__)
INPUT_PATH = os.path.join(BASE_PATH, "input.txt")
OUTPUT_PATH = os.path.join(BASE_PATH, "output.txt")
if __name__ == "__main__":
raise SystemExit(main())
| 26.127273 | 69 | 0.599165 |
6f75fde6361af1d1bfaca77b15e701086bf2e3b2 | 13,684 | py | Python | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
] | 290 | 2017-12-29T01:55:21.000Z | 2022-03-28T10:00:32.000Z | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
] | 3 | 2018-08-02T11:45:51.000Z | 2020-09-24T14:34:58.000Z | src/ensemble_nn/agent_nn.py | AbhinavGopal/ts_tutorial | 147ff28dc507172774693f225071f8e244e5994e | [
"MIT"
] | 76 | 2018-01-17T06:19:51.000Z | 2021-11-10T06:18:20.000Z | """Agents for neural net bandit problems.
We implement three main types of agent:
- epsilon-greedy (fixed epsilon, annealing epsilon)
- dropout (arXiv:1506.02142)
- ensemble sampling
All code is specialized to the setting of 2-layer fully connected MLPs.
"""
import numpy as np
import numpy.random as rd
from base.agent import Agent
from ensemble_nn.env_nn import TwoLayerNNBandit
| 33.621622 | 80 | 0.656168 |
6f762afe905140cf74ce1d262513f6770e5cf96a | 1,314 | py | Python | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
] | null | null | null | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
] | null | null | null | leaflet_storage/management/commands/storagei18n.py | Biondilbiondo/django-leaflet-storage-concurrent-editing | 98cc3be7c74ea545ed8a75b9ae198acfcbba03a3 | [
"WTFPL"
] | null | null | null | import io
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.staticfiles import finders
from django.template.loader import render_to_string
from django.utils.translation import to_locale
| 33.692308 | 79 | 0.542618 |
6f76bbd91ccc6729e6385bce7b0f809d9736e91f | 37,910 | py | Python | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
] | null | null | null | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
] | null | null | null | spikemetrics/metrics.py | MarineChap/spikemetrics | c83a2e1e12efab5d2987d38d129ee6862cb4a454 | [
"MIT"
] | null | null | null | # Copyright 2019. Allen Institute. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
# ==========================================================
# IMPLEMENTATION OF ACTUAL METRICS:
# ==========================================================
def isi_violations(spike_train, duration, isi_threshold, min_isi=0):
"""Calculate Inter-Spike Interval (ISI) violations for a spike train.
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Originally written in Matlab by Nick Steinmetz (https://github.com/cortex-lab/sortingQuality)
Converted to Python by Daniel Denman
Inputs:
-------
spike_train : array of monotonically increasing spike times (in seconds) [t1, t2, t3, ...]
duration : length of recording (seconds)
isi_threshold : threshold for classifying adjacent spikes as an ISI violation
- this is the biophysical refractory period
min_isi : minimum possible inter-spike interval (default = 0)
- this is the artificial refractory period enforced by the data acquisition system
or post-processing algorithms
Outputs:
--------
fpRate : rate of contaminating spikes as a fraction of overall rate
- higher values indicate more contamination
num_violations : total number of violations detected
"""
isis_initial = np.diff(spike_train)
if min_isi > 0:
duplicate_spikes = np.where(isis_initial <= min_isi)[0]
spike_train = np.delete(spike_train, duplicate_spikes + 1)
isis = np.diff(spike_train)
num_spikes = len(spike_train)
num_violations = sum(isis < isi_threshold)
violation_time = 2 * num_spikes * (isi_threshold - min_isi)
total_rate = firing_rate(spike_train, duration)
violation_rate = num_violations / violation_time
fpRate = violation_rate / total_rate
return fpRate, num_violations
def presence_ratio(spike_train, duration, num_bin_edges=101):
"""Calculate fraction of time the unit is present within an epoch.
Inputs:
-------
spike_train : array of spike times
duration : length of recording (seconds)
num_bin_edges : number of bin edges for histogram
- total bins = num_bin_edges - 1
Outputs:
--------
presence_ratio : fraction of time bins in which this unit is spiking
"""
h, b = np.histogram(spike_train, np.linspace(0, duration, num_bin_edges))
return np.sum(h > 0) / (num_bin_edges - 1)
def firing_rate(spike_train, duration):
"""Calculate firing rate for a spike train.
If either temporal bound is not specified, the first and last spike time are used by default.
Inputs:
-------
spike_train : array of spike times (in seconds)
duration : length of recording (in seconds)
Outputs:
--------
fr : float
Firing rate in Hz
"""
fr = spike_train.size / duration
return fr
def amplitude_cutoff(amplitudes, num_histogram_bins=500, histogram_smoothing_value=3):
""" Calculate approximate fraction of spikes missing from a distribution of amplitudes
Assumes the amplitude histogram is symmetric (not valid in the presence of drift)
Inspired by metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Input:
------
amplitudes : numpy.ndarray
Array of amplitudes (don't need to be in physical units)
num_histogram_bins : int
Number of bins for calculating amplitude histogram
histogram_smoothing_value : float
Gaussian filter window for smoothing amplitude histogram
Output:
-------
fraction_missing : float
Fraction of missing spikes (ranges between 0 and 0.5)
If more than 50% of spikes are missing, an accurate estimate isn't possible
"""
h, b = np.histogram(amplitudes, num_histogram_bins, density=True)
pdf = gaussian_filter1d(h, histogram_smoothing_value)
support = b[:-1]
peak_index = np.argmax(pdf)
G = np.argmin(np.abs(pdf[peak_index:] - pdf[0])) + peak_index
bin_size = np.mean(np.diff(support))
fraction_missing = np.sum(pdf[G:]) * bin_size
fraction_missing = np.min([fraction_missing, 0.5])
return fraction_missing
def mahalanobis_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates isolation distance and L-ratio (metrics computed from Mahalanobis distance)
Based on metrics described in Schmitzer-Torbert et al. (2005) Neurosci 131: 1-11
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
isolation_distance : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
pcs_for_this_unit = all_pcs[all_labels == this_unit_id, :]
pcs_for_other_units = all_pcs[all_labels != this_unit_id, :]
mean_value = np.expand_dims(np.mean(pcs_for_this_unit, 0), 0)
try:
VI = np.linalg.inv(np.cov(pcs_for_this_unit.T))
except np.linalg.linalg.LinAlgError: # case of singular matrix
return np.nan, np.nan
mahalanobis_other = np.sort(cdist(mean_value,
pcs_for_other_units,
'mahalanobis', VI=VI)[0])
mahalanobis_self = np.sort(cdist(mean_value,
pcs_for_this_unit,
'mahalanobis', VI=VI)[0])
n = np.min([pcs_for_this_unit.shape[0], pcs_for_other_units.shape[0]]) # number of spikes
if n >= 2:
dof = pcs_for_this_unit.shape[1] # number of features
l_ratio = np.sum(1 - chi2.cdf(pow(mahalanobis_other, 2), dof)) / mahalanobis_self.shape[0]
isolation_distance = pow(mahalanobis_other[n - 1], 2)
# if math.isnan(l_ratio):
# print("NaN detected", mahalanobis_other, VI)
else:
l_ratio = np.nan
isolation_distance = np.nan
return isolation_distance, l_ratio
def lda_metrics(all_pcs, all_labels, this_unit_id):
""" Calculates d-prime based on Linear Discriminant Analysis
Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
Outputs:
--------
d_prime : float
Isolation distance of this unit
l_ratio : float
L-ratio for this unit
"""
X = all_pcs
y = np.zeros((X.shape[0],), dtype='bool')
y[all_labels == this_unit_id] = True
lda = LDA(n_components=1)
X_flda = lda.fit_transform(X, y)
flda_this_cluster = X_flda[np.where(y)[0]]
flda_other_cluster = X_flda[np.where(np.invert(y))[0]]
d_prime = (np.mean(flda_this_cluster) - np.mean(flda_other_cluster)) / np.sqrt(
0.5 * (np.std(flda_this_cluster) ** 2 + np.std(flda_other_cluster) ** 2))
return d_prime
def nearest_neighbors_metrics(all_pcs, all_labels, this_unit_id, spikes_for_nn, n_neighbors):
""" Calculates unit contamination based on NearestNeighbors search in PCA space
Based on metrics described in Chung, Magland et al. (2017) Neuron 95: 1381-1394
A is a (hopefully) representative subset of cluster X
NN_hit(X) = 1/k \sum_i=1^k |{x in A such that ith closest neighbor is in X}| / |A|
Inputs:
-------
all_pcs : numpy.ndarray (num_spikes x PCs)
2D array of PCs for all spikes
all_labels : numpy.ndarray (num_spikes x 0)
1D array of cluster labels for all spikes
this_unit_id : Int
number corresponding to unit for which these metrics will be calculated
spikes_for_nn : Int
number of spikes to use (calculation can be very slow when this number is >20000)
n_neighbors : Int
number of neighbors to use
Outputs:
--------
hit_rate : float
Fraction of neighbors for target cluster that are also in target cluster
miss_rate : float
Fraction of neighbors outside target cluster that are in target cluster
"""
total_spikes = all_pcs.shape[0]
ratio = spikes_for_nn / total_spikes
this_unit = all_labels == this_unit_id
X = np.concatenate((all_pcs[this_unit, :], all_pcs[np.invert(this_unit), :]), 0)
n = np.sum(this_unit)
if ratio < 1:
inds = np.arange(0, X.shape[0] - 1, 1 / ratio).astype('int')
X = X[inds, :]
n = int(n * ratio)
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
this_cluster_inds = np.arange(n)
this_cluster_nearest = indices[:n, 1:].flatten()
other_cluster_nearest = indices[n:, 1:].flatten()
hit_rate = np.mean(this_cluster_nearest < n)
miss_rate = np.mean(other_cluster_nearest < n)
return hit_rate, miss_rate
# ==========================================================
# HELPER FUNCTIONS:
# ==========================================================
def make_index_mask(spike_clusters, unit_id, min_num, max_num, seed=None):
""" Create a mask for the spike index dimensions of the pc_features array
Inputs:
-------
spike_clusters : numpy.ndarray (num_spikes x 0)
Contains cluster IDs for all spikes in pc_features array
unit_id : Int
ID for this unit
min_num : Int
Minimum number of spikes to return; if there are not enough spikes for this unit, return all False
max_num : Int
Maximum number of spikes to return; if too many spikes for this unit, return a random subsample
seed: int
Random seed for reproducibility
Output:
-------
index_mask : numpy.ndarray (boolean)
Mask of spike indices for pc_features array
"""
index_mask = spike_clusters == unit_id
inds = np.where(index_mask)[0]
if len(inds) < min_num:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
else:
index_mask = np.zeros((spike_clusters.size,), dtype='bool')
order = np.random.RandomState(seed=seed).permutation(inds.size)
index_mask[inds[order[:max_num]]] = True
return index_mask
def make_channel_mask(unit_id, pc_feature_ind, channels_to_use):
""" Create a mask for the channel dimension of the pc_features array
Inputs:
-------
unit_id : Int
ID for this unit
pc_feature_ind : np.ndarray
Channels used for PC calculation for each unit
channels_to_use : np.ndarray
Channels to use for calculating metrics
Output:
-------
channel_mask : numpy.ndarray
Channel indices to extract from pc_features array
"""
these_inds = pc_feature_ind[unit_id, :]
channel_mask = [np.argwhere(these_inds == i)[0][0] for i in channels_to_use]
return np.array(channel_mask)
def get_unit_pcs(these_pc_features, index_mask, channel_mask):
""" Use the index_mask and channel_mask to return PC features for one unit
Inputs:
-------
these_pc_features : numpy.ndarray (float)
Array of pre-computed PC features (num_spikes x num_PCs x num_channels)
index_mask : numpy.ndarray (boolean)
Mask for spike index dimension of pc_features array
channel_mask : numpy.ndarray (boolean)
Mask for channel index dimension of pc_features array
Output:
-------
unit_PCs : numpy.ndarray (float)
PCs for one unit (num_spikes x num_PCs x num_channels)
"""
unit_PCs = these_pc_features[index_mask, :, :]
unit_PCs = unit_PCs[:, :, channel_mask]
return unit_PCs
def find_neighboring_channels(peak_channel, channel_list, num_channels_to_compare, channel_locations):
"""
Finds k nearest channels to the peak channel of a unit
Parameters
----------
peak_channel: int
ID of channel with largest waveform amplitude
channel_list: numpy.ndarray
IDs of channels being considered
num_channels_to_compare: int
Number of nearest channels to return
channel_locations: numpy.ndarray, (n_channels, 2)
x,y coordinates of the channels in channel_list
Returns
-------
neighboring_channels: array_like
id of k channels that neighbor peak channel (including the peak channel itself)
"""
# get peak channel location
channel_idx = list(channel_list).index(peak_channel)
peak_channel_location = channel_locations[channel_idx]
# compute pairwise distance
distances = [np.linalg.norm(peak_channel_location - loc) for loc in channel_locations]
# get k closest channels (+1 because distance 0 is peak_channel)
neighboring_channels_inds = np.argsort(distances)[:num_channels_to_compare]
neighboring_channels = channel_list[neighboring_channels_inds]
return neighboring_channels
| 39.163223 | 120 | 0.603324 |
6f76bcfc2a09b5cceb410578869827df3cb772bb | 23,746 | py | Python | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
] | null | null | null | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
] | null | null | null | pdpbox/pdp_plot_utils.py | flinder/PDPbox | b832e37f840ae885d39a0ba8ff458f4be27dcc65 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import copy
from .pdp_calc_utils import _sample_data, _find_onehot_actual, _find_closest
from sklearn.cluster import MiniBatchKMeans, KMeans
def _pdp_plot_title(n_grids, feature_name, ax, multi_flag, which_class, plot_params):
"""
Draw pdp plot title
:param n_grids: number of grids
:param feature_name: name of the feature
:param ax: axes to plot on
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'PDP for %s' % feature_name
subtitle = "Number of unique grid points: %d" % n_grids
title_fontsize = 15
subtitle_fontsize = 12
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if multi_flag:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.45, "For Class %d" % which_class, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family)
ax.text(0, 0.25, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
else:
ax.text(0, 0.7, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
ax.text(0, 0.4, subtitle, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family, color='grey')
ax.axis('off')
def _pdp_plot(pdp_isolate_out, feature_name, center, plot_org_pts, plot_lines, frac_to_plot,
cluster, n_cluster_centers, cluster_method, x_quantile, ax, plot_params):
"""
Plot partial dependent plot
:param pdp_isolate_out: instance of pdp_isolate_obj
a calculated pdp_isolate_obj instance
:param feature_name: string
name of the feature, not necessary the same as the column name
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
xticks_rotation = 0
if plot_params is not None:
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
# modify axes
_axes_modify(font_family, ax)
ax.set_xlabel(feature_name, fontsize=10)
feature_type = pdp_isolate_out.feature_type
feature_grids = pdp_isolate_out.feature_grids
display_columns = pdp_isolate_out.display_columns
actual_columns = pdp_isolate_out.actual_columns
if feature_type == 'binary' or feature_type == 'onehot' or x_quantile:
x = range(len(feature_grids))
ax.set_xticks(x)
ax.set_xticklabels(display_columns, rotation=xticks_rotation)
else:
# for numeric feature
x = feature_grids
ice_lines = copy.deepcopy(pdp_isolate_out.ice_lines)
pdp_y = copy.deepcopy(pdp_isolate_out.pdp)
# whether to fill between std upper and lower
# whether to highlight pdp line
std_fill = True
pdp_hl = False
# whether to center the plot
if center:
pdp_y -= pdp_y[0]
for col in feature_grids[1:]:
ice_lines[col] -= ice_lines[feature_grids[0]]
ice_lines['actual_preds'] -= ice_lines[feature_grids[0]]
ice_lines[feature_grids[0]] = 0
if cluster or plot_lines:
std_fill = False
pdp_hl = True
if cluster:
_ice_cluster_plot(x=x, ice_lines=ice_lines, feature_grids=feature_grids, n_cluster_centers=n_cluster_centers,
cluster_method=cluster_method, ax=ax, plot_params=plot_params)
else:
ice_plot_data = _sample_data(ice_lines=ice_lines, frac_to_plot=frac_to_plot)
_ice_line_plot(x=x, ice_plot_data=ice_plot_data, feature_grids=feature_grids, ax=ax, plot_params=plot_params)
if plot_org_pts:
ice_lines_temp = ice_lines.copy()
if feature_type == 'onehot':
ice_lines_temp['x'] = ice_lines_temp[actual_columns].apply(lambda x: _find_onehot_actual(x), axis=1)
ice_lines_temp = ice_lines_temp[~ice_lines_temp['x'].isnull()].reset_index(drop=True)
elif feature_type == 'numeric':
feature_grids = pdp_isolate_out.feature_grids
ice_lines_temp = ice_lines_temp[(ice_lines_temp[actual_columns[0]] >= feature_grids[0])
& (ice_lines_temp[actual_columns[0]] <= feature_grids[-1])]
if x_quantile:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]].apply(lambda x: _find_closest(x, feature_grids))
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
else:
ice_lines_temp['x'] = ice_lines_temp[actual_columns[0]]
ice_plot_data_pts = _sample_data(ice_lines=ice_lines_temp, frac_to_plot=frac_to_plot)
_ice_plot_pts(ice_plot_data_pts=ice_plot_data_pts, ax=ax, plot_params=plot_params)
std = ice_lines[feature_grids].std().values
_pdp_std_plot(x=x, y=pdp_y, std=std, std_fill=std_fill, pdp_hl=pdp_hl, ax=ax, plot_params=plot_params)
def _pdp_std_plot(x, y, std, std_fill, pdp_hl, ax, plot_params):
"""
PDP basic plot
:param x: x axis values
:param y: pdp values
:param std: std values
:param std_fill: whether to fill between std upper and lower
:param pdp_hl: whether to highlight pdp line
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
upper = y + std
lower = y - std
pdp_color = '#1A4E5D'
pdp_hl_color = '#FEDC00'
pdp_linewidth = 2
zero_color = '#E75438'
zero_linewidth = 1.5
fill_color = '#66C2D7'
fill_alpha = 0.2
markersize = 5
if plot_params is not None:
if 'pdp_color' in plot_params.keys():
pdp_color = plot_params['pdp_color']
if 'pdp_hl_color' in plot_params.keys():
pdp_hl_color = plot_params['pdp_hl_color']
if 'pdp_linewidth' in plot_params.keys():
pdp_linewidth = plot_params['pdp_linewidth']
if 'zero_color' in plot_params.keys():
zero_color = plot_params['zero_color']
if 'zero_linewidth' in plot_params.keys():
zero_linewidth = plot_params['zero_linewidth']
if 'fill_color' in plot_params.keys():
fill_color = plot_params['fill_color']
if 'fill_alpha' in plot_params.keys():
fill_alpha = plot_params['fill_alpha']
if 'markersize' in plot_params.keys():
markersize = plot_params['markersize']
if pdp_hl:
ax.plot(x, y, color=pdp_hl_color, linewidth=pdp_linewidth * 3, alpha=0.8)
ax.plot(x, y, color=pdp_color, linewidth=pdp_linewidth, marker='o', markersize=markersize)
ax.plot(x, [0] * y, linestyle='--', linewidth=zero_linewidth, color=zero_color)
if std_fill:
ax.fill_between(x, upper, lower, alpha=fill_alpha, color=fill_color)
ax.set_ylim(np.min([np.min(lower) * 2, 0]), np.max([np.max(upper) * 2, 0]))
def _ice_plot_pts(ice_plot_data_pts, ax, plot_params):
"""
Plot the real data points
:param ice_plot_data_pts: data points to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
point_size = 50
point_pos_color = '#5BB573'
point_neg_color = '#E75438'
if plot_params is not None:
if 'point_size' in plot_params.keys():
point_size = plot_params['point_size']
if 'point_pos_color' in plot_params.keys():
point_pos_color = plot_params['point_pos_color']
if 'point_neg_color' in plot_params.keys():
point_neg_color = plot_params['point_neg_color']
ice_plot_data_pts['color'] = ice_plot_data_pts['actual_preds'].apply(lambda x: point_pos_color if x >= 0 else point_neg_color)
ax.scatter(ice_plot_data_pts['x'], ice_plot_data_pts['actual_preds'], s=point_size, marker="+", linewidth=1,
color=ice_plot_data_pts['color'])
def _ice_line_plot(x, ice_plot_data, feature_grids, ax, plot_params):
"""
Plot the ice lines
:param x: x axis values
:param ice_plot_data: ice lines to plot
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
linewidth = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
linealpha = np.max([1.0 / np.log10(ice_plot_data.shape[0]), 0.3])
line_cmap = 'Blues'
if plot_params is not None:
if 'line_cmap' in plot_params.keys():
line_cmap = plot_params['line_cmap']
colors = plt.get_cmap(line_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(ice_plot_data)):
y = list(ice_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=linewidth, c=colors[i % 10], alpha=linealpha)
def _ice_cluster_plot(x, ice_lines, feature_grids, n_cluster_centers, cluster_method, ax, plot_params):
"""
Cluster the ice lines and plot out the cluster centers
:param x: x axis values
:param ice_lines: ice lines
:param n_cluster_centers: number of cluster centers
:param cluster_method: cluster method
:param ax: axes to plot on
:param plot_params: dictionary of plot config
"""
if cluster_method == 'approx':
kmeans = MiniBatchKMeans(n_clusters=n_cluster_centers, random_state=0, verbose=0)
else:
kmeans = KMeans(n_clusters=n_cluster_centers, random_state=0, n_jobs=1)
kmeans.fit(ice_lines[feature_grids])
cluster_plot_data = pd.DataFrame(kmeans.cluster_centers_, columns=feature_grids)
cluster_cmap = 'Blues'
if plot_params is not None:
if 'cluster_cmap' in plot_params.keys():
cluster_cmap = plot_params['cluster_cmap']
colors = plt.get_cmap(cluster_cmap)(np.linspace(0, 1, 20))[5:15]
for i in range(len(cluster_plot_data)):
y = list(cluster_plot_data[feature_grids].iloc[i].values)
ax.plot(x, y, linewidth=1, c=colors[i % 10])
def _pdp_interact_plot_title(pdp_interact_out, feature_names, ax,
multi_flag, which_class, only_inter, plot_params):
"""
Draw pdp interaction plot title
:param pdp_interact_out: instance of pdp_interact_obj
:param feature_name: name of the features
:param ax: axes to plot on
:param figsize: figure size
:param multi_flag: whether it is a subplot of a multi-classes plot
:param which_class: which class to plot
:param only_inter: whether only draw interaction plot
:param plot_params: values of plot parameters
"""
font_family = 'Arial'
title = 'Interaction PDP between %s and %s' % (feature_names[0], feature_names[1])
title_fontsize = 14
subtitle_fontsize = 12
if type(pdp_interact_out) == dict:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out['class_0'].feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out['class_0'].feature_grids[1]))
else:
subtitle1 = 'Number of unique grid points of %s: %d' % (
feature_names[0], len(pdp_interact_out.feature_grids[0]))
subtitle2 = 'Number of unique grid points of %s: %d' % (
feature_names[1], len(pdp_interact_out.feature_grids[1]))
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'font_family' in plot_params.keys():
font_family = plot_params['font_family']
if 'title' in plot_params.keys():
title = plot_params['title']
if 'title_fontsize' in plot_params.keys():
title_fontsize = plot_params['title_fontsize']
if 'subtitle_fontsize' in plot_params.keys():
subtitle_fontsize = plot_params['subtitle_fontsize']
ax.set_facecolor('white')
if only_inter:
ax.text(0, 0.8, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.62, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.45, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.3, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.55, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.4, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.6, title, va="top", ha="left", fontsize=title_fontsize, fontname=font_family)
if multi_flag:
ax.text(0, 0.53, "For Class %d" % which_class, va="top", ha="left", fontsize=title_fontsize,
fontname=font_family)
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
else:
ax.text(0, 0.4, subtitle1, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.text(0, 0.35, subtitle2, va="top", ha="left", fontsize=subtitle_fontsize, fontname=font_family,
color='grey')
ax.axis('off')
def _pdp_interact_plot(pdp_interact_out, feature_names, center, plot_org_pts, plot_lines, frac_to_plot, cluster,
n_cluster_centers, cluster_method, x_quantile, figsize, plot_params, multi_flag, which_class):
"""
Plot interaction plot
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param center: boolean, default=True
whether to center the plot
:param plot_org_pts: boolean, default=False
whether to plot out the original points
:param plot_lines: boolean, default=False
whether to plot out the individual lines
:param frac_to_plot: float or integer, default=1
how many points or lines to plot, can be a integer or a float
:param cluster: boolean, default=False
whether to cluster the individual lines and only plot out the cluster centers
:param n_cluster_centers: integer, default=None
number of cluster centers
:param cluster_method: string, default=None
cluster method to use, default is KMeans, if 'approx' is passed, MiniBatchKMeans is used
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param figsize: figure size
:param plot_params: dict, default=None
values of plot parameters
:param multi_flag: boolean, default=False
whether it is a subplot of a multi-class plot
:param which_class: integer, default=None
must not be None under multi-class mode
"""
if figsize is None:
fig = plt.figure(figsize=(15, 15))
else:
fig = plt.figure(figsize=figsize)
pdp_plot_params = None
if plot_params is not None:
if 'pdp' in plot_params.keys():
pdp_plot_params = plot_params['pdp']
gs = GridSpec(2, 2)
ax0 = plt.subplot(gs[0, 0])
_pdp_interact_plot_title(pdp_interact_out=pdp_interact_out, feature_names=feature_names, ax=ax0,
multi_flag=multi_flag, which_class=which_class, only_inter=False, plot_params=plot_params)
ax1 = plt.subplot(gs[0, 1])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out1, feature_name=feature_names[0], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile,
ax=ax1, plot_params=pdp_plot_params)
ax2 = plt.subplot(gs[1, 0])
_pdp_plot(pdp_isolate_out=pdp_interact_out.pdp_isolate_out2, feature_name=feature_names[1], center=center,
plot_org_pts=plot_org_pts, plot_lines=plot_lines, frac_to_plot=frac_to_plot, cluster=cluster,
n_cluster_centers=n_cluster_centers, cluster_method=cluster_method, x_quantile=x_quantile, ax=ax2,
plot_params=pdp_plot_params)
ax3 = plt.subplot(gs[1, 1])
_pdp_contour_plot(pdp_interact_out=pdp_interact_out, feature_names=feature_names, x_quantile=x_quantile,
ax=ax3, fig=fig, plot_params=plot_params)
def _pdp_contour_plot(pdp_interact_out, feature_names, x_quantile, ax, fig, plot_params):
"""
Plot PDP contour
:param pdp_interact_out: instance of pdp_interact_obj
a calculated pdp_interact_obj instance
:param feature_names: list of feature names
:param x_quantile: boolean, default=False
whether to construct x axis ticks using quantiles
:param ax: axes to plot on
:param fig: plt figure
:param plot_params: dict, default=None
values of plot parameters
"""
font_family = 'Arial'
contour_color = 'white'
contour_cmap = 'viridis'
xticks_rotation = 0
if plot_params is not None:
if 'pdp_inter' in plot_params.keys():
if 'contour_color' in plot_params['pdp_inter'].keys():
contour_color = plot_params['pdp_inter']['contour_color']
if 'contour_cmap' in plot_params['pdp_inter'].keys():
contour_cmap = plot_params['pdp_inter']['contour_cmap']
if 'font_family' in plot_params['pdp_inter'].keys():
font_family = plot_params['pdp_inter']['font_family']
if 'xticks_rotation' in plot_params.keys():
xticks_rotation = plot_params['xticks_rotation']
_axes_modify(font_family, ax)
feature_types = pdp_interact_out.feature_types
pdp = copy.deepcopy(pdp_interact_out.pdp)
new_feature_names = []
for i, feature_type in enumerate(feature_types):
if feature_type == 'onehot':
new_col = 'onehot_%d' % (i)
pdp[new_col] = pdp.apply(lambda x: list(x[pdp_interact_out.features[i]]).index(1), axis=1)
new_feature_names.append(new_col)
else:
new_feature_names.append(pdp_interact_out.features[i])
if (feature_types[0] == 'numeric') and x_quantile:
pdp[new_feature_names[0]] = pdp[new_feature_names[0]].apply(
lambda x: list(pdp_interact_out.feature_grids[0]).index(x))
if (feature_types[1] == 'numeric') and x_quantile:
pdp[new_feature_names[1]] = pdp[new_feature_names[1]].apply(
lambda x: list(pdp_interact_out.feature_grids[1]).index(x))
X, Y = np.meshgrid(pdp[new_feature_names[0]].unique(), pdp[new_feature_names[1]].unique())
Z = []
for i in range(X.shape[0]):
zs = []
for j in range(X.shape[1]):
x = X[i, j]
y = Y[i, j]
z = pdp[(pdp[new_feature_names[0]] == x) & (pdp[new_feature_names[1]] == y)]['preds'].values[0]
zs.append(z)
Z.append(zs)
Z = np.array(Z)
if feature_types[0] == 'onehot':
ax.set_xticks(range(X.shape[1]))
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
elif feature_types[0] == 'binary':
ax.set_xticks([0, 1])
ax.set_xticklabels(pdp_interact_out.pdp_isolate_out1.display_columns, rotation=xticks_rotation)
else:
if x_quantile:
ax.set_xticks(range(len(pdp_interact_out.feature_grids[0])))
ax.set_xticklabels(pdp_interact_out.feature_grids[0], rotation=xticks_rotation)
if feature_types[1] == 'onehot':
ax.set_yticks(range(Y.shape[0]))
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
elif feature_types[1] == 'binary':
ax.set_yticks([0, 1])
ax.set_yticklabels(pdp_interact_out.pdp_isolate_out2.display_columns)
else:
if x_quantile:
ax.set_yticks(range(len(pdp_interact_out.feature_grids[1])))
ax.set_yticklabels(pdp_interact_out.feature_grids[1])
level = np.min([X.shape[0], X.shape[1]])
c1 = ax.contourf(X, Y, Z, N=level, origin='lower', cmap=contour_cmap)
c2 = ax.contour(c1, levels=c1.levels, colors=contour_color, origin='lower')
ax.clabel(c2, contour_label_fontsize=9, inline=1)
ax.set_xlabel(feature_names[0], fontsize=10)
ax.set_ylabel(feature_names[1], fontsize=10)
ax.get_yaxis().tick_right()
if fig is not None:
cax = fig.add_axes([0, 0, 0, 0], axes_locator=ColorBarLocator(ax))
fig.colorbar(c1, cax=cax, orientation='horizontal')
| 40.730703 | 130 | 0.660111 |
6f76ec963af630c9f2623b7e32036a92ed42bb1c | 8,778 | py | Python | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
] | 1 | 2020-08-31T09:32:39.000Z | 2020-08-31T09:32:39.000Z | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
] | 13 | 2020-04-08T10:53:26.000Z | 2022-03-30T09:51:29.000Z | tests/basic_step_tests.py | kodexa-ai/kodexa | 568466b3dc4758babf2d318dc91b1c09ec60845d | [
"Apache-2.0"
] | 1 | 2020-04-12T13:10:51.000Z | 2020-04-12T13:10:51.000Z | import os
import pytest
from kodexa import Document, Pipeline, PipelineContext, TagsToKeyValuePairExtractor, RollupTransformer
| 45.71875 | 224 | 0.691729 |
6f79392055980ee88fc9adbd173f470e11c846bf | 158 | py | Python | dftimewolf/lib/containers/__init__.py | fooris/dftimewolf | 5df863dad1518e4c4109f0563efa7458df26f7d2 | [
"Apache-2.0"
] | 1 | 2021-01-21T19:53:37.000Z | 2021-01-21T19:53:37.000Z | dftimewolf/lib/containers/__init__.py | joachimmetz/dftimewolf | 9181bd9e860a467495ca4ab66e2c3873cbcbf529 | [
"Apache-2.0"
] | null | null | null | dftimewolf/lib/containers/__init__.py | joachimmetz/dftimewolf | 9181bd9e860a467495ca4ab66e2c3873cbcbf529 | [
"Apache-2.0"
] | null | null | null | """Make containers available here."""
from .report import Report
from .threat_intelligence import ThreatIntelligence
from .stackdriver import StackdriverLogs
| 31.6 | 51 | 0.835443 |
6f79949d19627c5156b74487a315345109a1b4e7 | 2,327 | py | Python | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
] | null | null | null | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
] | null | null | null | egs/skl_historical_poly_regression_variable_window_overmqtt/client_mqtt_random.py | COMEA-TUAS/mcx-public | 8ff486739f5332d075aeaaf7ea5dd33a04857b5c | [
"MIT"
] | 1 | 2022-03-01T06:42:04.000Z | 2022-03-01T06:42:04.000Z | #!/usr/bin/env python3
"""Script for simulating IOT measurement stream to ModelConductor experiment."""
import pandas as pd
import numpy as np
import sqlalchemy as sqla
from datetime import datetime as dt
from time import sleep, time
import logging
import sys, os, asyncio
from hbmqtt.client import MQTTClient, ConnectException
from hbmqtt.version import get_version
from docopt import docopt
from hbmqtt.utils import read_yaml_config
from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2
logger = logging.getLogger(__name__)
formatter = "[%(asctime)s] :: %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
csv_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'experiment_2019-10-03_20-37-36.csv')
data = np.random.rand(100, 4)
data = np.insert(data, 0, np.arange(100), axis=1)
data = pd.DataFrame(data, columns =['time', 'A', 'B', 'C', 'D'])
BROKER_URL = "mqtt://localhost:1883"
if __name__ == "__main__":
main() | 33.724638 | 111 | 0.689729 |
6f7aa07e116a65a70f05b7ef70691b8f299b021f | 694 | py | Python | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
] | null | null | null | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
] | null | null | null | 5/challenge2.py | roryeiffe/Adent-of-Code | 80f123663fcf04bf5f0d6733807b4a2dd53bc68c | [
"MIT"
] | null | null | null | import sys
import math
L = []
f = open(sys.argv[1],"r")
for item in f:
L.append(item.strip())
ids = []
max_id = 0
for sequence in L:
id = find_id(sequence)
ids.append(id)
if id > max_id:
max_id = id
ids.sort()
old = 35
for id in ids:
print(id)
old = id
| 13.09434 | 35 | 0.597983 |
488a4b657eabc94e1d145860d1dd73207641241d | 2,890 | py | Python | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
] | 1 | 2018-03-26T23:46:01.000Z | 2018-03-26T23:46:01.000Z | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
] | 4 | 2018-03-24T15:41:12.000Z | 2018-04-08T23:49:35.000Z | Injector/injector.py | MateusGabi/Binary-Hacking-on-Super-Mario | e75292aee6b419aad2d8fe173c2fab85d9ce23ee | [
"MIT"
] | 1 | 2018-04-03T23:49:37.000Z | 2018-04-03T23:49:37.000Z | # -*- coding: utf-8 -*-
"""
Injector.
A partir de um arquivo binario, de uma tabela binaria gerada com o Finder,
e um arquivo de substituio, o Injector capaz de injetar um texto
no binario trocando o texto in-game
O Injector faz automaticamente a adequao do tamanho do texto ao tamanho da caixa,
truncando se maior e colocando corretamente as quebras de linha
@author Yan Uehara
"""
from __future__ import print_function
import os
import sys
import binascii
import pickle
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Use: python extractor.py [sfc] [tbl] [substituto]")
sys.exit(1)
sfc = sys.argv[1]
tbl = sys.argv[2]
substituto = sys.argv[3]
if os.path.exists(sfc) and os.path.isfile(tbl):
inj = Injector(sfc, tbl, substituto)
inj.run()
| 29.489796 | 96 | 0.565052 |
488aa98c813700f0bcd537993c300646573e9ada | 10,556 | py | Python | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/scs_io/packages/cudnn/package.py | scs-lab/spack | 77956aad6aa523c2a6c7256eb3c75094bf955c35 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-15T02:37:59.000Z | 2020-09-21T04:34:38.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
| 45.304721 | 98 | 0.670519 |
488b91ca767e9611a3e2258e676d32094fa0687f | 4,023 | py | Python | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | python/svm.py | mwalton/em-machineLearning | efd76961fa3b78e042ca481733152a683074d15c | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import os.path
import plots as plot
from sklearn.preprocessing import StandardScaler
from sklearn.grid_search import GridSearchCV
import time
from sklearn import svm
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import StratifiedKFold
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-x", "--xTrain", required = True,
help = "path to training feature set")
ap.add_argument("-y", "--yTrain", required = True,
help = "path to training target set")
ap.add_argument("-X", "--xTest", required = True,
help = "path to testing feature set")
ap.add_argument("-Y", "--yTest", required = True,
help = "path to testing target set")
ap.add_argument("-o", "--optimize", type = int, default = 0,
help = "optomization mode: 0 use default, 1 optomize, 2 use pkl model if possible")
ap.add_argument("-m", "--multiClass", type = int, default=1,
help = "exclusive multi class or regression")
ap.add_argument("-p", "--pickle", default="models/svmModel.pkl",
help = "pickle dump of model (output if optomize = 1, input if optomize = 0)")
ap.add_argument("-v", "--visualize", type=int, default=0,
help = "whether or not to show visualizations after a run")
args = vars(ap.parse_args())
(trainX, trainY) = loadData(args["xTrain"], args["yTrain"])
(testX, testY) = loadData(args["xTest"], args["yTest"])
# required scaling for SVM
trainX = standardize(trainX)
testX = standardize(testX)
if (args["multiClass"] == 1):
trainY = convertToClasses(trainY)
testY = convertToClasses(testY)
# check to see if a grid search should be done
if args["optimize"] == 1:
#configure stratified k-fold cross validation
cv = StratifiedKFold(y=trainY, n_folds=4, shuffle=True)
# perform a grid search on the 'C' and 'gamma' parameter
# of SVM
print "SEARCHING SVM"
C_range = 2. ** np.arange(-15, 15, step=1)
gamma_range = 2. ** np.arange(-15, 15, step=1)
param_grid = dict(gamma=gamma_range, C=C_range)
start = time.time()
gs = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, n_jobs = -1, verbose = 2)
gs.fit(trainX, trainY)
# print diagnostic information to the user and grab the
# best model
print "done in %0.3fs" % (time.time() - start)
print "best score: %0.3f" % (gs.best_score_)
print "SVM PARAMETERS"
bestParams = gs.best_estimator_.get_params()
# loop over the parameters and print each of them out
# so they can be manually set
print("Best Estimator: %s" % gs.best_estimator_)
#for p in sorted(params.keys()):
# print "\t %s: %f" % (p, bestParams[p])
print("Accuracy Score On Validation Set: %s\n" % accuracy_score(testY, gs.predict(testX)))
# show a reminder message
print "\nIMPORTANT"
print "Now that your parameters have been searched, manually set"
print "them and re-run this script with --optomize 0"
joblib.dump(gs.best_estimator_, args["pickle"])
# otherwise, use the manually specified parameters
else:
# evaluate using SVM
if (os.path.isfile(args["pickle"]) and args["optimize"] == 2):
clf = joblib.load(args["pickle"])
else:
clf = svm.SVC()
clf.fit(trainX, trainY)
print "SVM PERFORMANCE"
pred = clf.predict(testX)
print classification_report(testY, pred)
print("Accuracy Score: %s\n" % accuracy_score(testY, pred))
if (args["visualize"] == 1):
plot.accuracy(testY, pred, "SVM")
| 35.60177 | 94 | 0.675864 |
488d15bc02d47b9fb1ebe771ea194aa64ab3caea | 3,545 | py | Python | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
] | 4 | 2019-10-23T06:58:31.000Z | 2021-11-01T17:08:34.000Z | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
] | 1 | 2021-06-02T00:35:23.000Z | 2021-06-02T00:35:23.000Z | aio_logstash/formatter.py | SinaKhorami/aio-logstash | ac820bd683c79389bcc2750c753ea860eb35c686 | [
"MIT"
] | 1 | 2019-10-23T06:58:45.000Z | 2019-10-23T06:58:45.000Z | import abc
import json
import logging
import socket
import sys
import time
import aio_logstash
import traceback
from aio_logstash import constants
from datetime import datetime, date
class V1Formatter(BaseFormatter):
| 29.297521 | 97 | 0.618336 |
488df2d8a33bbefd7d27eb53f611e19d0eba095d | 18,352 | py | Python | .venv/lib/python2.7/site-packages/celery/events/cursesmon.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
] | 4 | 2018-10-19T04:36:20.000Z | 2020-02-13T16:14:09.000Z | .venv/lib/python2.7/site-packages/celery/events/cursesmon.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
] | 3 | 2020-02-11T23:03:45.000Z | 2021-06-10T18:05:11.000Z | oo/lib/python3.5/site-packages/celery/events/cursesmon.py | chunky2808/SPOJ-history-Django-App | 490c58b1593cd3626f0ddc27fdd09c6e8d1c56e1 | [
"MIT"
] | 1 | 2019-10-26T04:20:52.000Z | 2019-10-26T04:20:52.000Z | # -*- coding: utf-8 -*-
"""Graphical monitor of Celery events using curses."""
from __future__ import absolute_import, print_function, unicode_literals
import curses
import sys
import threading
from datetime import datetime
from itertools import count
from textwrap import wrap
from time import time
from math import ceil
from celery import VERSION_BANNER
from celery import states
from celery.app import app_or_default
from celery.five import items, values
from celery.utils.text import abbr, abbrtask
__all__ = ['CursesMonitor', 'evtop']
BORDER_SPACING = 4
LEFT_BORDER_OFFSET = 3
UUID_WIDTH = 36
STATE_WIDTH = 8
TIMESTAMP_WIDTH = 8
MIN_WORKER_WIDTH = 15
MIN_TASK_WIDTH = 16
# this module is considered experimental
# we don't care about coverage.
STATUS_SCREEN = """\
events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
"""
keyalias = {curses.KEY_DOWN: 'J',
curses.KEY_UP: 'K',
curses.KEY_ENTER: 'I'}
def capture_events(app, state, display): # pragma: no cover
while 1:
print('-> evtop: starting capture...', file=sys.stderr)
with app.connection_for_read() as conn:
try:
conn.ensure_connection(on_connection_error,
app.conf.broker_connection_max_retries)
recv = app.events.Receiver(conn, handlers={'*': state.event})
display.resetscreen()
display.init_screen()
recv.capture()
except conn.connection_errors + conn.channel_errors as exc:
print('Connection lost: {0!r}'.format(exc), file=sys.stderr)
def evtop(app=None): # pragma: no cover
"""Start curses monitor."""
app = app_or_default(app)
state = app.events.State()
display = CursesMonitor(state, app)
display.init_screen()
refresher = DisplayThread(display)
refresher.start()
try:
capture_events(app, state, display)
except Exception:
refresher.shutdown = True
refresher.join()
display.resetscreen()
raise
except (KeyboardInterrupt, SystemExit):
refresher.shutdown = True
refresher.join()
display.resetscreen()
if __name__ == '__main__': # pragma: no cover
evtop()
| 33.797422 | 78 | 0.520325 |
488ea1167c4ff5c98e7760397218e331d094d166 | 1,705 | py | Python | features/extraction/3_extraction/feature_extractors/utilization.py | bayesimpact/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
] | 19 | 2016-10-06T18:10:36.000Z | 2018-04-04T02:30:09.000Z | features/extraction/3_extraction/feature_extractors/utilization.py | BeaconLabs/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
] | 2 | 2017-10-26T19:22:58.000Z | 2017-11-16T07:44:58.000Z | features/extraction/3_extraction/feature_extractors/utilization.py | bayesimpact/readmission-risk | 5b0f6c93826601e2dbb9c8c276e92801772e17c4 | [
"Apache-2.0"
] | 9 | 2016-11-15T14:13:20.000Z | 2021-12-19T20:27:58.000Z | """A feature extractor for patients' utilization."""
from __future__ import absolute_import
import logging
import pandas as pd
from sutter.lib import postgres
from sutter.lib.feature_extractor import FeatureExtractor
log = logging.getLogger('feature_extraction')
| 34.1 | 95 | 0.626979 |
4893210d0b7c805a88b25dd46688e23dd6ed78a0 | 6,517 | py | Python | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
] | 120 | 2021-08-16T13:55:47.000Z | 2022-03-31T10:31:42.000Z | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
] | 10 | 2021-10-19T07:19:23.000Z | 2022-03-24T18:43:02.000Z | safe_control_gym/math_and_models/normalization.py | catgloss/safe-control-gym | b3f69bbed8577f64fc36d23677bf50027e991b2d | [
"MIT"
] | 24 | 2021-08-28T17:21:09.000Z | 2022-03-31T10:31:44.000Z | """Perform normalization on inputs or rewards.
"""
import numpy as np
import torch
from gym.spaces import Box
def normalize_angle(x):
"""Wraps input angle to [-pi, pi].
"""
return ((x + np.pi) % (2 * np.pi)) - np.pi
| 27.041494 | 98 | 0.584778 |
48935c63c2620e531593d07e9af2473ca805cfae | 5,125 | py | Python | networking/pycat.py | itsbriany/PythonSec | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
] | 1 | 2016-01-12T19:38:59.000Z | 2016-01-12T19:38:59.000Z | networking/pycat.py | itsbriany/Security-Tools | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
] | null | null | null | networking/pycat.py | itsbriany/Security-Tools | eda5dc3f7ac069cd77d9525e93be5cfecc00db16 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import socket
import threading
import sys # Support command line args
import getopt # Support command line option parsing
import os # Kill the application
import signal # Catch an interrupt
import time # Thread sleeping
# Global variables definitions
target = ""
port = False
listen = False
command = ""
upload = False
# This tool should be able to replace netcat
# The tool should be able to act as a server and as a client depending on the arguments
###############################################################################
# Start menu
###############################################################################
# Connect as a client
###############################################################################
# Handle the connection from the client.
###############################################################################
# This is the listening functionality of the program
###############################################################################
# main definition
###############################################################################
# Program execution
try:
main()
except KeyboardInterrupt:
print ""
sys.exit(0)
| 23.617512 | 121 | 0.607415 |
48938090ba940fdf1245ccfb1e1b41da0dfdb8ec | 4,356 | py | Python | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | 1 | 2020-09-23T22:16:10.000Z | 2020-09-23T22:16:10.000Z | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | null | null | null | code/striatal_model/neuron_model_tuning.py | weidel-p/go-robot-nogo-robot | 026f1f753125089a03504320cc94a76888a0efc5 | [
"MIT"
] | null | null | null | import nest
import pylab as pl
import pickle
from nest import voltage_trace
from nest import raster_plot as rplt
import numpy as np
from params import *
seed = [np.random.randint(0, 9999999)] * num_threads
calcFI()
# checkConninMV()
| 32.75188 | 97 | 0.597107 |
4893c3ed4760195e110268be8d490ec224a54ecd | 1,434 | py | Python | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
] | 690 | 2020-07-31T15:37:59.000Z | 2022-03-31T20:51:46.000Z | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
] | 90 | 2020-07-25T11:00:15.000Z | 2022-03-31T01:59:59.000Z | fastf1/tests/test_livetiming.py | JellybeanAsh/Fast-F1 | cf0cb20fdd3e89fdee3755097722db5ced3a23b5 | [
"MIT"
] | 68 | 2020-07-21T23:21:29.000Z | 2022-03-30T16:12:01.000Z | import os
from fastf1.core import Session, Weekend
from fastf1.livetiming.data import LiveTimingData
| 34.142857 | 87 | 0.679219 |
48946b441f56097b2a5a11c0168a86635a484d94 | 1,768 | py | Python | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
] | 4 | 2021-12-13T09:16:07.000Z | 2022-01-06T15:45:02.000Z | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
] | null | null | null | src/plot/S0_read_jld2.py | OUCyf/NoiseCC | ad47e6894568bd007cd0425f766ba8aa243f83e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 21 20:09:08 2021
######################
##### read h5 ########
######################
# 1.read h5-file
h5_file = h5py.File(files[1],'r')
# 2.show all keys in h5-file
h5_file.keys()
# 3. keys in h5-file
for key in h5_file.keys():
onekey = key
onekey_name = h5_file[key].name
# 4.group key "NN"
h5_file["NN"]
h5_file["NN"].keys()
f_dict = dict(h5_file["NN"])
f_dict.keys() # keyword
# 5. group datasets
data = f_dict["data"][()] #
data = f_dict["data"].value # data numpy ndarray
trace = data[0] #
# 6. group Int Float
baz = f_dict["baz"].value
baz = h5_file["NN"]["baz"].value
# 7. group
# encodeunicodestr2.encode(utf8)unicodestr2utf8
comp = h5_file["NN"]["comp"].value[0].decode('utf-8')
# 8.
f_dict.close()
######################
##### write h5 ########
######################
@author: yf
"""
#%%
import numpy as np
import h5py
import os
import glob
#%% 1. set parameter
file = "../../data/BJ.081_BJ.084__2020_04_11_00_00_00T2021_04_13_00_00_00__all.jld2"
chan = "NN"
dt = 0.005
#%% 2. read h5
# open file
f = h5py.File(file,'r')
# read data
data = f[chan]["data"][0]
# read parameters
azi = f[chan]["azi"][()]
baz = f[chan]["baz"][()]
maxlag = f[chan]["maxlag"][()]
cc_len = f[chan]["cc_len"][()]
cc_step = f[chan]["cc_step"][()]
corr_type = f[chan]["corr_type"][()]
comp = f[chan]["comp"][()]
dist = f[chan]["dist"][()] # dist = f[chan]["dist"].value
lat = f[chan]["lat"][()]
lon = f[chan]["lon"][()]
N_glob = f[chan]["N_glob"][()]
N_read = f[chan]["N_read"][()]
N_good = f[chan]["N_good"][()]
name = f[chan]["name"][()][0].decode('utf-8')
# close h5-file
f.close()
| 19.644444 | 84 | 0.581448 |
48946d309358ecb51872d9f0d80dff7d64dcb48a | 872 | py | Python | setup.py | MrJakeSir/theming | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
] | 3 | 2021-10-02T02:23:50.000Z | 2021-10-02T16:03:33.000Z | setup.py | MrJakeSir/themify | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
] | null | null | null | setup.py | MrJakeSir/themify | fd572c871fb4fd67cc4f9517558570d652ad1f0c | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name = 'colormate',
packages = ['colormate'],
version = '0.2214',
license='MIT',
description = 'A package to theme terminal scripts with custom colors and text formatting',
author = 'Rodrigo',
author_email = 'roarba011@gmail.com',
url = 'https://github.com/mrjakesir/themify',
download_url = 'https://github.com/MrJakeSir/themify/archive/refs/tags/v_0.3.1.tar.gz',
keywords = ['Colors', 'Scripting', 'Theme', 'Theming'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 36.333333 | 93 | 0.661697 |
4894cec7ad1d16f91926da91173205b79ee1b463 | 1,620 | py | Python | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
] | 37 | 2019-08-28T08:16:48.000Z | 2022-03-14T21:18:39.000Z | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
] | 1 | 2019-09-02T23:13:29.000Z | 2019-09-08T01:43:10.000Z | tests/test_compound_where.py | WinVector/data_algebra | 3d6002ddf8231d310e03537a0435df0554b62234 | [
"BSD-3-Clause"
] | 3 | 2019-08-28T12:23:11.000Z | 2020-02-08T19:22:31.000Z | import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
| 28.421053 | 80 | 0.56358 |
4895a29e1cbfd7f3cbc0290d21c2ee285348e317 | 385 | py | Python | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
] | null | null | null | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
] | 7 | 2021-03-19T01:09:50.000Z | 2022-03-12T00:20:49.000Z | students/admin.py | eustone/sms | 0b785c8a6cc7f8c6035f1b46092d5b8e8750ab7f | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import Student
# Register your models here.
admin.site.register(Student,StudentAdmin)
| 27.5 | 56 | 0.696104 |
4896bd7de479f88113218577909931ad2456610b | 18,819 | py | Python | lshmm/viterbi/vit_diploid_variants_samples.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
] | null | null | null | lshmm/viterbi/vit_diploid_variants_samples.py | jeromekelleher/lshmm | 58e0c3395f222e756bb10a0063f5118b20176a01 | [
"MIT"
] | 9 | 2022-02-24T14:20:09.000Z | 2022-03-01T17:54:47.000Z | lshmm/vit_diploid_variants_samples.py | astheeggeggs/ls_hmm | 11af1eb886ef3db2869cdd50954fba5565fcef51 | [
"MIT"
] | 1 | 2022-02-28T17:07:36.000Z | 2022-02-28T17:07:36.000Z | """Collection of functions to run Viterbi algorithms on dipoid genotype data, where the data is structured as variants x samples."""
import numba as nb
import numpy as np
# https://github.com/numba/numba/issues/1269
# def forwards_viterbi_dip_naive(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m, n, n))
# P = np.zeros((m, n, n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V[l-1,k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[l,j1,j2] = np.amax(v) * e[l, index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
# def forwards_viterbi_dip_naive_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# r_n = r/n
# # Take a look at Haploid Viterbi implementation in Jeromes code and see if we can pinch some ideas.
# # Diploid Viterbi, with smaller memory footprint.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# # Get the vector to maximise over
# v = np.zeros((n,n))
# for k1 in range(n):
# for k2 in range(n):
# v[k1, k2] = V_previous[k1, k2]
# if ((k1 == j1) and (k2 == j2)):
# v[k1, k2] *= ((1 - r[l])**2 + 2*(1-r[l]) * r_n[l] + r_n[l]**2)
# elif ((k1 == j1) or (k2 == j2)):
# v[k1, k2] *= (r_n[l] * (1 - r[l]) + r_n[l]**2)
# else:
# v[k1, k2] *= r_n[l]**2
# V[j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V)
# V_previous = np.copy(V) / c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
# def forwards_viterbi_dip_low_mem(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((n, n))
# P = np.zeros((m,n,n)).astype(np.int64)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V_previous = 1/(n**2) * e[0,index]
# c = np.ones(m)
# r_n = r/n
# # Diploid Viterbi, with smaller memory footprint, rescaling, and using the structure of the HMM.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# c[l] = np.amax(V_previous)
# argmax = np.argmax(V_previous)
# V_previous *= 1/c[l]
# V_rowcol_max = np_amax(V_previous, 0)
# arg_rowcol_max = np_argmax(V_previous, 0)
# no_switch = (1 - r[l])**2 + 2*(r_n[l]*(1 - r[l])) + r_n[l]**2
# single_switch = r_n[l]*(1 - r[l]) + r_n[l]**2
# double_switch = r_n[l]**2
# j1_j2 = 0
# for j1 in range(n):
# for j2 in range(n):
# V_single_switch = max(V_rowcol_max[j1], V_rowcol_max[j2])
# P_single_switch = np.argmax(np.array([V_rowcol_max[j1], V_rowcol_max[j2]]))
# if P_single_switch == 0:
# template_single_switch = j1*n + arg_rowcol_max[j1]
# else:
# template_single_switch = arg_rowcol_max[j2]*n + j2
# V[j1,j2] = V_previous[j1,j2] * no_switch # No switch in either
# P[l, j1, j2] = j1_j2
# # Single or double switch?
# single_switch_tmp = single_switch * V_single_switch
# if (single_switch_tmp > double_switch):
# # Then single switch is the alternative
# if (V[j1,j2] < single_switch * V_single_switch):
# V[j1,j2] = single_switch * V_single_switch
# P[l, j1, j2] = template_single_switch
# else:
# # Double switch is the alternative
# if V[j1, j2] < double_switch:
# V[j1, j2] = double_switch
# P[l, j1, j2] = argmax
# V[j1,j2] *= e[l, index[j1, j2]]
# j1_j2 += 1
# V_previous = np.copy(V)
# ll = np.sum(np.log10(c)) + np.log10(np.amax(V))
# return V, P, ll
# def forwards_viterbi_dip_naive_vec(n, m, G, s, e, r):
# # Initialise
# V = np.zeros((m,n,n))
# P = np.zeros((m,n,n)).astype(np.int64)
# c = np.ones(m)
# index = (
# 4*np.equal(G[0,:,:], s[0,0]).astype(np.int64) +
# 2*(G[0,:,:] == 1).astype(np.int64) +
# np.int64(s[0,0] == 1)
# )
# V[0,:,:] = 1/(n**2) * e[0,index]
# r_n = r/n
# # Jumped the gun - vectorising.
# for l in range(1,m):
# index = (
# 4*np.equal(G[l,:,:], s[0,l]).astype(np.int64) +
# 2*(G[l,:,:] == 1).astype(np.int64) +
# np.int64(s[0,l] == 1)
# )
# for j1 in range(n):
# for j2 in range(n):
# v = (r_n[l]**2) * np.ones((n,n))
# v[j1,j2] += (1-r[l])**2
# v[j1, :] += (r_n[l] * (1 - r[l]))
# v[:, j2] += (r_n[l] * (1 - r[l]))
# v *= V[l-1,:,:]
# V[l,j1,j2] = np.amax(v) * e[l,index[j1, j2]]
# P[l,j1,j2] = np.argmax(v)
# c[l] = np.amax(V[l,:,:])
# V[l,:,:] *= 1/c[l]
# ll = np.sum(np.log10(c))
# return V, P, ll
def forwards_viterbi_dip_naive_full_vec(n, m, G, s, e, r):
"""Fully vectorised naive LS diploid Viterbi algorithm using numpy."""
char_both = np.eye(n * n).ravel().reshape((n, n, n, n))
char_col = np.tile(np.sum(np.eye(n * n).reshape((n, n, n, n)), 3), (n, 1, 1, 1))
char_row = np.copy(char_col).T
rows, cols = np.ogrid[:n, :n]
# Initialise
V = np.zeros((m, n, n))
P = np.zeros((m, n, n)).astype(np.int64)
c = np.ones(m)
index = (
4 * np.equal(G[0, :, :], s[0, 0]).astype(np.int64)
+ 2 * (G[0, :, :] == 1).astype(np.int64)
+ np.int64(s[0, 0] == 1)
)
V[0, :, :] = 1 / (n ** 2) * e[0, index]
r_n = r / n
for l in range(1, m):
index = (
4 * np.equal(G[l, :, :], s[0, l]).astype(np.int64)
+ 2 * (G[l, :, :] == 1).astype(np.int64)
+ np.int64(s[0, l] == 1)
)
v = (
(r_n[l] ** 2)
+ (1 - r[l]) ** 2 * char_both
+ (r_n[l] * (1 - r[l])) * (char_col + char_row)
)
v *= V[l - 1, :, :]
P[l, :, :] = np.argmax(v.reshape(n, n, -1), 2) # Have to flatten to use argmax
V[l, :, :] = v.reshape(n, n, -1)[rows, cols, P[l, :, :]] * e[l, index]
c[l] = np.amax(V[l, :, :])
V[l, :, :] *= 1 / c[l]
ll = np.sum(np.log10(c))
return V, P, ll
def get_phased_path(n, path):
"""Obtain the phased path."""
return np.unravel_index(path, (n, n))
| 33.307965 | 132 | 0.435145 |
4896e1b1c5caef0d1e5aee9a140b1ba801b67e72 | 6,704 | py | Python | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | src/test/test_pg_function.py | gyana/alembic_utils | a4bc7f5f025335faad7b178eb84ab78093e525ec | [
"MIT"
] | null | null | null | from alembic_utils.pg_function import PGFunction
from alembic_utils.replaceable_entity import register_entities
from alembic_utils.testbase import TEST_VERSIONS_ROOT, run_alembic_command
TO_UPPER = PGFunction(
schema="public",
signature="toUpper(some_text text default 'my text!')",
definition="""
returns text
as
$$ begin return upper(some_text) || 'abc'; end; $$ language PLPGSQL;
""",
)
| 34.735751 | 100 | 0.702118 |
48976b6d6b5db52348271fa437cb2c3858865703 | 1,723 | py | Python | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
] | null | null | null | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
] | null | null | null | proof_of_work/multiagent/turn_based/v6/environmentv6.py | michaelneuder/parkes_lab_fa19 | 18d9f564e0df9c17ac5d54619ed869d778d4f6a4 | [
"MIT"
] | null | null | null | import numpy as np
np.random.seed(0)
if __name__ == "__main__":
main() | 31.327273 | 77 | 0.585607 |
4897778aee005c5aa1bda6eba1bb9679879bf2ca | 549 | py | Python | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
] | null | null | null | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
] | 1 | 2020-08-11T22:00:51.000Z | 2020-08-11T23:55:48.000Z | passgen-py/setup.py | hassanselim0/PassGen | 70e0187bfd58e0dc1fba5dbeea5b95769a599f60 | [
"MIT"
] | 1 | 2020-08-10T15:50:21.000Z | 2020-08-10T15:50:21.000Z | from setuptools import setup, find_packages
setup(
name='passgen-py',
packages=find_packages(),
version='1.1',
description='Generate Passwords Deterministically based on a Master Password.',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
python_requires='>=3.6, <4',
entry_points={
'console_scripts': [
'passgen=src:cli',
],
},
install_requires=['click', 'pyperclip'],
)
| 26.142857 | 83 | 0.599271 |
489780fa9ccacfe9a097c426e6e4d2cf96e01913 | 163 | py | Python | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
] | null | null | null | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
] | null | null | null | python-peculiarities/source/MultiplicationComplication.py | noamt/presentations | c5031ae0558d19be920ee1641ba2fc5f4fd88773 | [
"Unlicense"
] | null | null | null | # https://codegolf.stackexchange.com/a/11480
multiplication = []
for i in range(10):
multiplication.append(i * (i + 1))
for x in multiplication:
print(x) | 20.375 | 44 | 0.680982 |
4898793ace916333da3e62990ff5fb14ce91eb0e | 4,762 | py | Python | bin/mkSampleInfo.py | icbi-lab/nextNEOpi | d9f6ccf5178e7ef1742b95e740ce3f39405f21dd | [
"BSD-3-Clause-Clear"
] | 24 | 2021-06-16T07:20:43.000Z | 2022-03-23T05:40:01.000Z | bin/mkSampleInfo.py | abyssum/nextNEOpi | f7de4c76c7d98be485f8db0999ad278cd17fa642 | [
"BSD-3-Clause-Clear"
] | 2 | 2021-12-09T16:43:45.000Z | 2022-02-18T14:03:36.000Z | bin/mkSampleInfo.py | abyssum/nextNEOpi | f7de4c76c7d98be485f8db0999ad278cd17fa642 | [
"BSD-3-Clause-Clear"
] | 5 | 2021-08-25T06:54:47.000Z | 2022-03-03T06:11:31.000Z | #!/usr/bin/env python
"""
Requirements:
* Python >= 3.7
* Pysam
Copyright (c) 2021 Dietmar Rieder <dietmar.rieder@i-med.ac.at>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = (
"0",
"1",
)
__version__ = ".".join(__version_info__)
__version__ += "-dev" if not RELEASE else ""
import os
import sys
import argparse
def _file_write(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "w")
def _file_read(fname):
"""Returns an open file handle if the given filename exists."""
return open(fname, "r")
if __name__ == "__main__":
usage = __doc__.split("\n\n\n")
parser = argparse.ArgumentParser(description="Compile sample info sheet")
parser.add_argument(
"--tmb",
required=True,
type=_file_read,
help="TMB file",
)
parser.add_argument(
"--tmb_coding",
required=True,
type=_file_read,
help="TMB coding file",
)
parser.add_argument(
"--csin",
required=True,
type=_file_read,
help="CSiN file",
)
parser.add_argument(
"--out",
required=True,
type=_file_write,
help="Output file",
)
parser.add_argument(
"--sample_name",
required=True,
type=str,
help="Sample name",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__
)
args = parser.parse_args()
tmb = args.tmb
tmb_coding = args.tmb_coding
csin = args.csin
out = args.out
sample_name = args.sample_name
tmb_info = {
"cov_genome": 0,
"cov_coding": 0,
"variants_tot": 0,
"variants_coding": 0,
"TMB": 0,
"TMB_clonal": 0,
"TMB_coding": 0,
"TMB_clonal_coding": 0,
}
csin_info = {"MHCI": 0, "MHCII": 0, "combined": 0}
tmb_info = parse_tmb(tmb, tmb_info, "all")
tmb_info = parse_tmb(tmb_coding, tmb_info, "coding")
csin_info = parse_csin(csin, csin_info)
write_output(out, tmb_info, csin_info, sample_name)
out.close()
| 26.309392 | 77 | 0.545569 |
489a6ae22cd0b248814c3b6aa65494aabadf9db8 | 3,115 | py | Python | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
] | null | null | null | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
] | null | null | null | garrick.py | SebNickel/garrick | b2ebf24054bc2770ced1674bd102022f8d01b169 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import colorama
from pick_db_file import pick_db_file
import db_connection
import card_repository
from review_cards import review_cards
from new_card import new_card
from new_cards import new_cards
import review
from user_colors import print_info, print_instruction, print_error
from usage_info import print_usage_info
if __name__ == '__main__':
main()
| 40.454545 | 97 | 0.65618 |
489b07c5f60a2349d39829b932ee2b381db5353d | 14,996 | py | Python | perceiver/train/dataset.py | kawa-work/deepmind-research | 8fb75643598f680fdde8d20342b1b82bd2c0abb2 | [
"Apache-2.0"
] | 10,110 | 2019-08-27T20:05:30.000Z | 2022-03-31T16:31:56.000Z | perceiver/train/dataset.py | subhayuroy/deepmind-research | 769bfdbeafbcb472cb8e2c6cfa746b53ac82efc2 | [
"Apache-2.0"
] | 317 | 2019-11-09T10:19:10.000Z | 2022-03-31T00:05:19.000Z | perceiver/train/dataset.py | subhayuroy/deepmind-research | 769bfdbeafbcb472cb8e2c6cfa746b53ac82efc2 | [
"Apache-2.0"
] | 2,170 | 2019-08-28T12:53:36.000Z | 2022-03-31T13:15:11.000Z | # Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with pre-processing and augmentation.
Deng, et al CVPR 2009 - ImageNet: A large-scale hierarchical image database.
https://image-net.org/
"""
import enum
from typing import Any, Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from perceiver.train import autoaugment
Batch = Mapping[Text, np.ndarray]
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
AUTOTUNE = tf.data.experimental.AUTOTUNE
INPUT_DIM = 224 # The number of pixels in the image resize.
# cutmix_padding, my_cutmix, my_mixup, and my_mixup_cutmix taken from:
# https://github.com/deepmind/deepmind-research/blob/master/nfnets/dataset.py
def cutmix_padding(h, w):
"""Returns image mask for CutMix.
Taken from (https://github.com/google/edward2/blob/master/experimental
/marginalization_mixup/data_utils.py#L367)
Args:
h: image height.
w: image width.
"""
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
# Beta dist in paper, but they used Beta(1,1) which is just uniform.
image1_proportion = tf.random.uniform([])
patch_length_ratio = tf.math.sqrt(1 - image1_proportion)
r_w = tf.cast(patch_length_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_length_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def my_cutmix(batch):
"""Apply CutMix: https://arxiv.org/abs/1905.04899."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
mask = batch['mask'][:bs]
images = (mask * batch['images'][:bs] + (1.0 - mask) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = batch['cutmix_ratio'][:bs]
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup(batch):
"""Apply mixup: https://arxiv.org/abs/1710.09412."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 2
ratio = batch['mixup_ratio'][:bs, None, None, None]
images = (ratio * batch['images'][:bs] + (1.0 - ratio) * batch['images'][bs:])
mix_labels = batch['labels'][bs:]
labels = batch['labels'][:bs]
ratio = ratio[..., 0, 0, 0] # Unsqueeze
return {'images': images, 'labels': labels,
'mix_labels': mix_labels, 'ratio': ratio}
def my_mixup_cutmix(batch):
"""Apply mixup to half the batch, and cutmix to the other."""
batch = dict(**batch)
bs = tf.shape(batch['images'])[0] // 4
mixup_ratio = batch['mixup_ratio'][:bs, None, None, None]
mixup_images = (mixup_ratio * batch['images'][:bs]
+ (1.0 - mixup_ratio) * batch['images'][bs:2*bs])
mixup_labels = batch['labels'][:bs]
mixup_mix_labels = batch['labels'][bs:2*bs]
cutmix_mask = batch['mask'][2*bs:3*bs]
cutmix_images = (cutmix_mask * batch['images'][2*bs:3*bs]
+ (1.0 - cutmix_mask) * batch['images'][-bs:])
cutmix_labels = batch['labels'][2*bs:3*bs]
cutmix_mix_labels = batch['labels'][-bs:]
cutmix_ratio = batch['cutmix_ratio'][2*bs : 3*bs]
return {'images': tf.concat([mixup_images, cutmix_images], axis=0),
'labels': tf.concat([mixup_labels, cutmix_labels], axis=0),
'mix_labels': tf.concat([mixup_mix_labels, cutmix_mix_labels], 0),
'ratio': tf.concat([mixup_ratio[..., 0, 0, 0], cutmix_ratio], axis=0)}
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (
Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(
split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
augmentation_settings: Mapping[str, Any],
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Returns processed and resized images."""
# Get the image crop.
if is_training:
image, im_shape = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image, im_shape = _decode_and_center_crop(image_bytes)
assert image.dtype == tf.uint8
# Optionally apply RandAugment: https://arxiv.org/abs/1909.13719
if is_training:
if augmentation_settings['randaugment'] is not None:
# Input and output images are dtype uint8.
image = autoaugment.distort_image_with_randaugment(
image,
num_layers=augmentation_settings['randaugment']['num_layers'],
magnitude=augmentation_settings['randaugment']['magnitude'])
# Resize and normalize the image crop.
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(
image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image, im_shape
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Generates cropped_image using one of the bboxes randomly distorted."""
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
jpeg_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = [offset_y, offset_x, target_height, target_width]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([target_height, target_width])
return image, im_shape
def _decode_whole_image(image_bytes: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
image = tf.io.decode_jpeg(image_bytes, channels=3)
im_shape = tf.io.extract_jpeg_shape(image_bytes, output_type=tf.int32)
return image, im_shape
def _decode_and_random_crop(
image_bytes: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Make a random crop of INPUT_DIM."""
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image, im_shape = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image, im_shape = _decode_and_center_crop(image_bytes, jpeg_shape)
return image, im_shape
def _center_crop(image, crop_dim):
"""Center crops an image to a target dimension."""
image_height = image.shape[0]
image_width = image.shape[1]
offset_height = ((image_height - crop_dim) + 1) // 2
offset_width = ((image_width - crop_dim) + 1) // 2
return tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_dim, crop_dim)
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
if image_bytes.dtype == tf.dtypes.string:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
else:
jpeg_shape = tf.shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((INPUT_DIM / (INPUT_DIM + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = [offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size]
if image_bytes.dtype == tf.dtypes.string:
image = tf.image.decode_and_crop_jpeg(image_bytes,
tf.stack(crop_window),
channels=3)
else:
image = tf.image.crop_to_bounding_box(image_bytes, *crop_window)
im_shape = tf.stack([padded_center_crop_size, padded_center_crop_size])
return image, im_shape
| 35.367925 | 80 | 0.684449 |
489b33857f5199eb9d0dc568f0aa601495f6f304 | 12,621 | py | Python | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
] | null | null | null | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
] | null | null | null | pyapprox/manipulate_polynomials.py | samtx/pyapprox | c926d910e30fbcfed7d0621175d3b0268d59f852 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import factorial
from pyapprox.indexing import hash_array
from pyapprox.indexing import compute_hyperbolic_level_indices
def multiply_multivariate_polynomials(indices1,coeffs1,indices2,coeffs2):
"""
TODO: instead of using dictionary to colect terms consider using
unique_indices,repeated_idx=np.unique(
indices[active_idx,:],axis=1,return_inverse=True)
as is done in multivariate_polynomials.conditional_moments_of_polynomial_chaos_expansion. Choose which one is faster
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
"""
num_vars = indices1.shape[0]
num_indices1 = indices1.shape[1]
num_indices2 = indices2.shape[1]
assert num_indices1==coeffs1.shape[0]
assert num_indices2==coeffs2.shape[0]
assert num_vars==indices2.shape[0]
indices_dict = dict()
max_num_indices = num_indices1*num_indices2
indices = np.empty((num_vars,max_num_indices),int)
coeffs = np.empty((max_num_indices),float)
kk = 0
for ii in range(num_indices1):
index1 = indices1[:,ii]
coeff1 = coeffs1[ii]
for jj in range(num_indices2):
index= index1+indices2[:,jj]
key = hash_array(index)
coeff = coeff1*coeffs2[jj]
if key in indices_dict:
coeffs[indices_dict[key]]+=coeff
else:
indices_dict[key]=kk
indices[:,kk]=index
coeffs[kk]=coeff
kk+=1
indices = indices[:,:kk]
coeffs = coeffs[:kk]
return indices, coeffs
def coeffs_of_power_of_nd_linear_polynomial(num_vars, degree, linear_coeffs):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a linear multivariate polynomial (no constant term) to some power.
Parameters
----------
num_vars : integer
The number of variables
degree : integer
The power of the linear polynomial
linear_coeffs: np.ndarray (num_vars)
The coefficients of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
assert len(linear_coeffs)==num_vars
coeffs, indices=multinomial_coeffs_of_power_of_nd_linear_polynomial(
num_vars, degree)
for ii in range(indices.shape[1]):
index = indices[:,ii]
for dd in range(num_vars):
degree = index[dd]
coeffs[ii] *= linear_coeffs[dd]**degree
return coeffs, indices
def substitute_polynomial_for_variables_in_single_basis_term(
indices_in,coeffs_in,basis_index,basis_coeff,var_idx,global_var_idx,
num_global_vars):
"""
var_idx : np.ndarray (nsub_vars)
The dimensions in basis_index which will be substituted
global_var_idx : [ np.ndarray(nvars[ii]) for ii in num_inputs]
The index of the active variables for each input
"""
num_inputs = var_idx.shape[0]
assert num_inputs==len(indices_in)
assert num_inputs==len(coeffs_in)
assert basis_coeff.shape[0]==1
assert var_idx.max()<basis_index.shape[0]
assert basis_index.shape[1]==1
assert len(global_var_idx)==num_inputs
# store input indices in global_var_idx
temp = []
for ii in range(num_inputs):
ind = np.zeros((num_global_vars,indices_in[ii].shape[1]))
ind[global_var_idx,:] = indices_in[ii]
temp.append(ind)
indices_in = temp
jj=0
degree = basis_index[var_idx[jj]]
c1,ind1 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
for jj in range(1,var_idx.shape[0]):
degree = basis_index[var_idx[jj]]
c2,ind2 = coeffs_of_power_of_polynomial(
indices_in,coeffs_in[:,jj:jj+1],degree)
ind1,c1 = multiply_multivariate_polynomials(ind1,c1,ind2,c2)
# this mask may be wrong. I might be confusing global and var idx
mask = np.ones(basis_index.shape[0],dtype=bool); mask[var_idx]=False
print(ind1.shape,mask.shape)
ind1[mask,:] += basis_index[mask]
c1*=basis_coeff
return ind1, c1
def coeffs_of_power_of_polynomial(indices, coeffs, degree):
"""
Compute the polynomial (coefficients and indices) obtained by raising
a multivariate polynomial to some power.
TODO: Deprecate coeffs_of_power_of_nd_linear_polynomial as that function
can be obtained as a special case of this function
Parameters
----------
indices : np.ndarray (num_vars,num_terms)
The indices of the multivariate polynomial
coeffs: np.ndarray (num_vars)
The coefficients of the polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
The coefficients of the new polynomial
indices : np.ndarray (num_vars, num_terms)
The set of multivariate indices that define the new polynomial
"""
num_vars, num_terms = indices.shape
assert indices.shape[1]==coeffs.shape[0]
multinomial_coeffs, multinomial_indices = \
multinomial_coeffs_of_power_of_nd_linear_polynomial(num_terms, degree)
new_indices = np.zeros((num_vars,multinomial_indices.shape[1]))
new_coeffs = np.tile(multinomial_coeffs[:,np.newaxis],coeffs.shape[1])
for ii in range(multinomial_indices.shape[1]):
multinomial_index = multinomial_indices[:,ii]
for dd in range(num_terms):
deg = multinomial_index[dd]
new_coeffs[ii] *= coeffs[dd]**deg
new_indices[:,ii] += indices[:,dd]*deg
return new_coeffs, new_indices
def multinomial_coefficient(index):
"""Compute the multinomial coefficient of an index [i1,i2,...,id].
Parameters
----------
index : multidimensional index
multidimensional index specifying the polynomial degree in each
dimension
Returns
-------
coeff : double
the multinomial coefficient
"""
level = index.sum()
denom = np.prod(factorial(index))
coeff = factorial(level)/denom
return coeff
def multinomial_coeffs_of_power_of_nd_linear_polynomial(num_vars,degree):
""" Compute the multinomial coefficients of the individual terms
obtained when taking the power of a linear polynomial
(without constant term).
Given a linear multivariate polynomial e.g.
e.g. (x1+x2+x3)**2 = x1**2+2*x1*x2+2*x1*x3+2*x2**2+x2*x3+x3**2
return the coefficients of each quadratic term, i.e.
[1,2,2,1,2,1]
Parameters
----------
num_vars : integer
the dimension of the multivariate polynomial
degree : integer
the power of the linear polynomial
Returns
-------
coeffs: np.ndarray (num_terms)
the multinomial coefficients of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
indices: np.ndarray (num_terms)
the indices of the polynomial obtained when
raising the linear multivariate polynomial to the power=degree
"""
indices = compute_hyperbolic_level_indices(num_vars,degree,1.0)
coeffs = multinomial_coefficients(indices)
return coeffs, indices
def add_polynomials(indices_list, coeffs_list):
"""
Add many polynomials together.
Example:
p1 = x1**2+x2+x3, p2 = x2**2+2*x3
p3 = p1+p2
return the degrees of each term in the the polynomial
p3 = x1**2+x2+3*x3+x2**2
[2, 1, 1, 2]
and the coefficients of each of these terms
[1., 1., 3., 1.]
Parameters
----------
indices_list : list [np.ndarray (num_vars,num_indices_i)]
List of polynomial indices. indices_i may be different for each
polynomial
coeffs_list : list [np.ndarray (num_indices_i,num_qoi)]
List of polynomial coefficients. indices_i may be different for each
polynomial. num_qoi must be the same for each list element.
Returns
-------
indices: np.ndarray (num_vars,num_terms)
the polynomial indices of the polynomial obtained from
summing the polynomials. This will be the union of the indices
of the input polynomials
coeffs: np.ndarray (num_terms,num_qoi)
the polynomial coefficients of the polynomial obtained from
summing the polynomials
"""
num_polynomials = len(indices_list)
assert num_polynomials==len(coeffs_list)
indices_dict = dict()
indices = []
coeff = []
ii=0; kk=0
for jj in range(indices_list[ii].shape[1]):
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
index=indices_list[ii][:,jj]
indices_dict[hash_array(index)]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
for ii in range(1,num_polynomials):
#print indices_list[ii].T,num_polynomials
assert coeffs_list[ii].ndim==2
assert coeffs_list[ii].shape[0]==indices_list[ii].shape[1]
for jj in range(indices_list[ii].shape[1]):
index=indices_list[ii][:,jj]
key = hash_array(index)
if key in indices_dict:
nn = indices_dict[key]
coeff[nn]+=coeffs_list[ii][jj,:]
else:
indices_dict[key]=kk
indices.append(index)
coeff.append(coeffs_list[ii][jj,:].copy())
kk+=1
indices = np.asarray(indices).T
coeff = np.asarray(coeff)
return indices, coeff
def get_indices_double_set(indices):
"""
Given muultivariate indices
[i1,i2,...,]
Compute its double set by
[i1*i1,i1*i2,...,i2*i2,i2*i3...]
The double set will only contain unique indices
Parameters
----------
indices : np.ndarray (num_vars,num_indices)
The initial indices
Returns
-------
double_set_indices : np.ndarray (num_vars,num_indices)
The double set of indices
"""
dummy_coeffs = np.zeros(indices.shape[1])
double_set_indices = multiply_multivariate_polynomials(
indices,dummy_coeffs,indices,dummy_coeffs)[0]
return double_set_indices
#Some of these functions can be replaced by numpy functions described at
#https://docs.scipy.org/doc/numpy/reference/routines.polynomials.polynomial.html
| 32.44473 | 120 | 0.659853 |
489cc2435903d89dac82418e6c3f47ec952a38f4 | 12,303 | py | Python | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
] | null | null | null | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
] | null | null | null | core/data/load_data.py | Originofamonia/mcan-vqa | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | [
"Apache-2.0"
] | null | null | null | # --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuhao Cui https://github.com/cuiyuhao1996
# --------------------------------------------------------
import h5py
import pickle
import random
import numpy as np
from numpy.random import default_rng
import pandas as pd
import glob, json, torch, time
from torch.utils.data import Dataset, DataLoader
from core.data.data_utils import img_feat_path_load, img_feat_load, ques_load, tokenize, ans_stat
from core.data.data_utils import pad_img_feat, proc_ques, proc_ans, proc_mimic_ans
| 42.71875 | 142 | 0.600992 |
489dcb5eb95e27bdfa01e5e5808a8eedc54c5b9e | 140 | py | Python | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
] | null | null | null | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
] | null | null | null | src/scrapers/models/__init__.py | jskroodsma/helpradar | d9a2198db30995e790ab4f1611e15b85540cd3f8 | [
"MIT"
] | null | null | null | from .database import Db
from .initiatives import InitiativeBase, Platform, ImportBatch, InitiativeImport, BatchImportState, InitiativeGroup | 70 | 115 | 0.864286 |
489e4aad3b2feb84feec86ee49098494b4522381 | 3,987 | py | Python | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
] | 1 | 2019-06-12T17:31:10.000Z | 2019-06-12T17:31:10.000Z | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
] | null | null | null | spyder/widgets/ipythonconsole/debugging.py | Bhanditz/spyder | 903ee4ace0f85ece730bcb670b1b92d464486f1a | [
"MIT"
] | 1 | 2019-01-16T06:51:50.000Z | 2019-01-16T06:51:50.000Z | # -*- coding: utf-8 -*-
#
# Copyright Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Widget that handles communications between a console in debugging
mode and Spyder
"""
import ast
from qtpy.QtCore import Qt
from qtconsole.rich_jupyter_widget import RichJupyterWidget
| 36.577982 | 80 | 0.600953 |
489e5789fc9bdd522af9556ca44141058ccb8f59 | 27 | py | Python | python/testData/completion/relativeImport/pkg/main.after.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/completion/relativeImport/pkg/main.after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/completion/relativeImport/pkg/main.after.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | from .string import <caret> | 27 | 27 | 0.777778 |
48a016a1659ce678fbb42661714443c79c8a4486 | 9,848 | py | Python | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | 1 | 2021-11-28T09:29:33.000Z | 2021-11-28T09:29:33.000Z | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | null | null | null | code/scripts/train_fxns_nonimage.py | estherrolf/representation-matters | 502e351e21fc6b33aaa5c96b8c1409c76807f5a7 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import sklearn.metrics
from dataset_chunking_fxns import subsample_df_by_groups
import sklearn
import sklearn.linear_model
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import time
# learning function: logistic regression multi-class
# learning function: logistic regression
| 39.870445 | 109 | 0.491166 |
48a147ad6df1458c845aa4fd687c23becb0926e9 | 6,206 | py | Python | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
] | null | null | null | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
] | null | null | null | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-7/PSET-7/phraseTriggers.py | lilsweetcaligula/MIT6.00.1x | ee2902782a08ff685e388b2f40c09ea8c9c5fcfe | [
"MIT"
] | null | null | null | """
PSET-7
Part 2: Triggers (PhraseTriggers)
At this point, you have no way of writing a trigger that matches on
"New York City" -- the only triggers you know how to write would be
a trigger that would fire on "New" AND "York" AND "City" -- which
also fires on the phrase "New students at York University love the
city". It's time to fix this. Since here you're asking for an exact
match, we will require that the cases match, but we'll be a little
more flexible on word matching. So, "New York City" will match:
* New York City sees movie premiere
* In the heart of New York City's famous cafe
* New York Cityrandomtexttoproveapointhere
but will not match:
* I love new york city
* I love New York City!!!!!!!!!!!!!!
PROBLEM 9
Implement a phrase trigger (PhraseTrigger) that fires when a given
phrase is in any of the story's subject, title, or summary. The
phrase should be an argument to the class's constructor.
"""
# Enter your code for WordTrigger, TitleTrigger,
# SubjectTrigger, SummaryTrigger, and PhraseTrigger in this box
| 41.099338 | 150 | 0.549468 |
48a22194d70cb5daa8b009c12fc1c26cc1c8d905 | 1,522 | py | Python | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
] | 1 | 2017-03-02T19:32:31.000Z | 2017-03-02T19:32:31.000Z | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
] | null | null | null | vault/tests/unit/test_views.py | Natan7/vault | ad0e9d5434dc59c9573afefef5e4eb390a7383ae | [
"Apache-2.0"
] | 2 | 2018-03-14T16:56:53.000Z | 2018-03-14T17:20:07.000Z | # -*- coding: utf-8 -*-
from unittest import TestCase
from mock import Mock, patch
from vault.tests.fakes import fake_request
from vault.views import SetProjectView
from django.utils.translation import ugettext as _
| 33.086957 | 78 | 0.704336 |
48a2c6f00acb55753f06b34ad48a128100334441 | 2,364 | py | Python | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 4 | 2020-04-08T03:42:02.000Z | 2020-10-01T20:34:48.000Z | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 93 | 2020-03-26T14:29:14.000Z | 2020-11-12T05:54:55.000Z | qa/tasks/cephfs/test_dump_tree.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 23 | 2020-03-24T10:28:44.000Z | 2020-09-24T09:42:19.000Z | from tasks.cephfs.cephfs_test_case import CephFSTestCase
import random
import os
| 35.283582 | 76 | 0.571489 |
48a598d9751db785f23d9a8e28422d557cff93bc | 966 | py | Python | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
] | null | null | null | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
] | null | null | null | catkin_ws/src/devel_scripts/stepper.py | AROMAeth/robo_code | d920adee8eb7ab285ba50aa31c71d631adc35480 | [
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
control_pins = [7,11,13,15]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[1,0,0,0],
[1,1,0,0],
[0,1,0,0],
[0,1,1,0],
[0,0,1,0],
[0,0,1,1],
[0,0,0,1],
[1,0,0,1]
]
# speed from 0 to 1 (one being the fastest)
# steps 50 steps = one rotation
for k in range(1,10,1):
move_forward(50,0.1)
time.sleep(0.5)
#move_forward(50,0.25)
time.sleep(1)
#move_backward(500,0.5)
GPIO.cleanup()
| 18.576923 | 67 | 0.635611 |
48a7ec0bb39b709f9863a091b7d85367791f1dab | 2,924 | py | Python | Experimental/OpenCVExp.py | awesomesauce12/6DBytes-CV | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
] | 1 | 2016-06-24T23:09:43.000Z | 2016-06-24T23:09:43.000Z | Experimental/OpenCVExp.py | awesomesauce12/image-recognition | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
] | null | null | null | Experimental/OpenCVExp.py | awesomesauce12/image-recognition | 8e48c6e629eedcd5098a0b0f8c90c48e38d5abf8 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import os
import math
os.system("fswebcam -r 507x456 --no-banner image11.jpg")
img = cv2.imread('image11.jpg',-1)
height, width, channel = img.shape
topy= height
topx = width
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0,255,255])
upper_color = np.array([0,255,255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(img,img, mask=mask)
'''def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x,y), 100, (255,255,255), -1)'''
'''cap = cv2.VideoCapture(-1)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('hjhj', gray)
if cv2.waitKey(0) & 0xFF -- ord('q'):
break
cap.release()
cv2.destroyAllWindows()'''
propx = (topx/512)
propy = (topy/512)
'''lineX1 = int(0*propx)
lineY2 = int(0*propy)
lineX2 = int(511*propx)
lineY1 = int(511*propy)
img = cv2.line(img, (lineX1,lineY1), (lineX2, lineY2), (255,255,255), 5)'''
w = 100*(propx+propy)/2
x1 = int(topx/2 - w/2)
x2 = int(topx/2 + w/2)
y1 = int(topy/2 + w/2)
y2 = int(topy/2 - w/2)
img = cv2.rectangle(res, (x1,y1), (x2,y2), (0,255,0),3)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, 0)
showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#showImage(image)
cv2.drawContours(img, contours, 0, (0,255,0), 3)
showImage(img)
print('Num of Contours ', len(contours))
cnt = contours[0]
M = cv2.moments(cnt)
print (M)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
area = cv2.contourArea(cnt)
print (cx)
print (cy)
print (area)
'''xCircle = 40*propx
xCircle = int(xCircle)
yCircle = xCircle
radCircle = xCircle
img = cv2.circle(img, (xCircle, yCircle), radCircle, (0,0,255),-1)
x3 = int(topx - 60*propx)
y3 = int(topy - 110*propy)
minAx = int(50*propx)
majAx = int(100*propy)
img = cv2.ellipse(img, (x3, y3), (minAx,majAx), 0, 0, 360, (0,150,255), -1)'''
'''pt1X = int(70*propx)
pt1Y = int(60*propy)
pt2X = int(154*propx)
pt2Y = int(23*propy)
pt3X = int(500*propx)
pt3Y = int(3*propy)'''
#pts = np.array([[pt1X, pt1Y], [pt2X, pt2Y], [pt3X, pt3Y]], np.int32)
#pts = pts.reshape((-1,1,2))
#img = cv2.polylines(img, [pts], True, (100,100,234))
#font = cv2.FONT_HERSHEY_SIMPLEX
#startPtX = int(240*propx)
#startPtY = int(240*propy)
#scale = 2*(propx + propy)/2
#cv2.putText(img, 'Apurva', (startPtX, startPtY), font, scale, (210, 80, 150), 4, cv2.LINE_AA)
#cv2.imshow("kl", img)
'''cv2.setMouseCallback('kl', draw_circle)'''
''''''
#cv2.imshow('frame', img)
#cv2.imshow('mask',mask)
cv2.imshow('res',res)
'''sd = img[130:200, 175:245]
img[20:90, 140:210]=sd
cv2.imshow("kl", img)'''
cv2.waitKey(0)
cv2.destroyAllWindows()
| 21.5 | 94 | 0.651505 |
48a84cb7d32acc3cbc3af963ca0e81cc7ff163d9 | 424 | py | Python | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
] | null | null | null | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
] | null | null | null | poem/Poem/urls_public.py | kzailac/poem | 9f898e3cc3378ef1c49517b4cf6335a93a3f49b0 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include
from django.http import HttpResponseRedirect
from django.urls import re_path
from Poem.poem_super_admin.admin import mysuperadmin
urlpatterns = [
re_path(r'^$', lambda x: HttpResponseRedirect('/poem/superadmin/')),
re_path(r'^superadmin/', mysuperadmin.urls),
re_path(r'^saml2/', include(('djangosaml2.urls', 'poem'),
namespace='saml2')),
]
| 32.615385 | 72 | 0.688679 |
48a8ac917e1b840d71d262dd221cf4cb43769865 | 902 | py | Python | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | null | null | null | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 1 | 2022-03-12T00:01:12.000Z | 2022-03-12T00:01:12.000Z | optimism/ReadMesh.py | btalamini/optimism | 023e1b2a0b137900a7517e4c7ac5056255cf7bbe | [
"MIT"
] | 3 | 2021-12-23T19:53:31.000Z | 2022-03-27T23:12:03.000Z | import json
from optimism.JaxConfig import *
from optimism import Mesh
| 31.103448 | 74 | 0.643016 |
48a9df7987bdd5e6e1faa8cd6a7c8279d997c6ae | 1,058 | py | Python | networkx/algorithms/approximation/ramsey.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/ramsey.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/ramsey.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | # -*- coding: utf-8 -*-
"""
Ramsey numbers.
"""
# Copyright (C) 2011 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
from ...utils import arbitrary_element
__all__ = ["ramsey_R2"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def ramsey_R2(G):
r"""Approximately computes the Ramsey number `R(2;s,t)` for graph.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_pair : (set, set) tuple
Maximum clique, Maximum independent set.
"""
if not G:
return set(), set()
node = arbitrary_element(G)
nbrs = nx.all_neighbors(G, node)
nnbrs = nx.non_neighbors(G, node)
c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
c_1.add(node)
i_2.add(node)
# Choose the larger of the two cliques and the larger of the two
# independent sets, according to cardinality.
return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
| 24.604651 | 70 | 0.63138 |
48acd263a6439030b9241f1881827f94f5753592 | 677 | py | Python | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
] | 1 | 2021-11-10T15:24:36.000Z | 2021-11-10T15:24:36.000Z | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
] | 12 | 2020-01-15T00:19:41.000Z | 2021-05-11T14:52:04.000Z | pysyte/oss/linux.py | git-wwts/pysyte | 625658138cdb5affc1a6a89a9f2c7e3667ee80c2 | [
"MIT"
] | 2 | 2015-01-31T11:51:06.000Z | 2015-01-31T21:29:19.000Z | """Linux-specific code"""
from pysyte.types import paths
def xdg_home():
"""path to $XDG_CONFIG_HOME
>>> assert xdg_home() == paths.path('~/.config').expand()
"""
return paths.environ_path("XDG_CONFIG_HOME", "~/.config")
def xdg_home_config(filename):
"""path to that file in $XDG_CONFIG_HOME
>>> assert xdg_home_config('fred') == paths.path('~/.config/fred').expand()
"""
return xdg_home() / filename
def xdg_dirs():
"""paths in $XDG_CONFIG_DIRS"""
return paths.environ_paths("XDG_CONFIG_DIRS")
bash_paste = "xclip -selection clipboard"
bash_copy = "xclip -selection clipboard -o"
| 19.911765 | 79 | 0.660266 |
48ae6c1d7db7737a61286051c58656fa1c61b3ae | 387 | py | Python | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
] | 2 | 2020-07-30T14:07:19.000Z | 2020-08-01T05:28:29.000Z | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
] | null | null | null | osu/osu_overlay.py | HQupgradeHQ/Daylight | a110a0f618877f5cccd66c4d75115c765d8f62a0 | [
"MIT"
] | null | null | null | import mpv
import keyboard
import time
p = mpv.MPV()
p.play("song_name.mp4")
keyboard.add_hotkey("e", play_pause)
keyboard.add_hotkey("2", full)
keyboard.add_hotkey("1", go_to_start)
while 1:
time.sleep(40)
| 12.09375 | 38 | 0.620155 |
48b02d948060f886a636e1dc8c11abff122b4be1 | 91,989 | py | Python | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
] | null | null | null | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
] | null | null | null | test/unit/common/test_db.py | dreamhost/swift | e90424e88bfaae17bf16f5c32b4d18deb5a6e71f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.db """
from __future__ import with_statement
import hashlib
import os
import unittest
from shutil import rmtree, copy
from StringIO import StringIO
from time import sleep, time
from uuid import uuid4
import simplejson
import sqlite3
import swift.common.db
from swift.common.db import AccountBroker, chexor, ContainerBroker, \
DatabaseBroker, DatabaseConnectionError, dict_factory, get_db_connection
from swift.common.utils import normalize_timestamp
from swift.common.exceptions import LockTimeout
def premetadata_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the metadata column was
added; used for testing with TestContainerBrokerBeforeMetadata.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
def prexsync_create_container_stat_table(self, conn, put_timestamp=None):
"""
Copied from swift.common.db.ContainerBroker before the
x_container_sync_point[12] columns were added; used for testing with
TestContainerBrokerBeforeXSync.
Create the container_stat table which is specifc to the container DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript("""
CREATE TABLE container_stat (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
object_count INTEGER,
bytes_used INTEGER,
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO container_stat (object_count, bytes_used)
VALUES (0, 0);
""")
conn.execute('''
UPDATE container_stat
SET account = ?, container = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, self.container, normalize_timestamp(time()),
str(uuid4()), put_timestamp))
def premetadata_create_account_stat_table(self, conn, put_timestamp):
"""
Copied from swift.common.db.AccountBroker before the metadata column was
added; used for testing with TestAccountBrokerBeforeMetadata.
Create account_stat table which is specific to the account DB.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0'
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?
''', (self.account, normalize_timestamp(time()), str(uuid4()),
put_timestamp))
if __name__ == '__main__':
unittest.main()
| 45.270177 | 94 | 0.582309 |
48b05f987fe0e54d587244c5320a33f91ef59a44 | 10,824 | py | Python | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
] | 92 | 2020-01-22T22:15:29.000Z | 2022-03-31T05:19:16.000Z | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
] | 604 | 2020-01-25T17:13:27.000Z | 2022-03-31T18:58:24.000Z | robocorp-code/tests/robocorp_code_tests/fixtures.py | mardukbp/robotframework-lsp | 57b4b2b14b712c9bf90577924a920fb9b9e831c7 | [
"ECL-2.0",
"Apache-2.0"
] | 39 | 2020-02-06T00:38:06.000Z | 2022-03-15T06:14:19.000Z | import os
import pytest
from robocorp_ls_core.protocols import IConfigProvider
from robocorp_ls_core.robotframework_log import get_logger
from robocorp_ls_core.unittest_tools.cases_fixture import CasesFixture
from robocorp_code.protocols import IRcc, ActionResult
import sys
from typing import Any
from pathlib import Path
from robocorp_code_tests.protocols import IRobocorpLanguageServerClient
log = get_logger(__name__)
IMAGE_IN_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAb8AAAAiCAYAAADPnNdbAAAAAXNSR0IArs4c6QAAAJ1JREFUeJzt1TEBACAMwDDAv+fhAo4mCvp1z8wsAAg5vwMA4DXzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgBzzAyDH/ADIMT8AcswPgJwLXQ0EQMJRx4AAAAAASUVORK5CYII="
_WS_INFO = (
{
"id": "workspace_id_1",
"name": "CI workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "654321",
"shortName": "123456", # Can be some generated number or something provided by the user.
"state": "active",
"url": "http://url1",
},
{
"id": "workspace_id_2",
"name": "My Other workspace",
"orgId": "affd282c8f9fe",
"orgName": "My Org Name",
"orgShortName": "1234567",
"shortName": "7654321",
"state": "active",
"url": "http://url2",
},
)
_PACKAGE_INFO_WS_2: dict = {}
_PACKAGE_INFO_WS_1: dict = {
"activities": [
{"id": "452", "name": "Package Name 1"},
{"id": "453", "name": "Package Name 2"},
]
}
| 30.490141 | 324 | 0.610957 |
48b0b31dabd46b83a7d8a1c53e2be4a3ab952b42 | 2,182 | py | Python | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:14:45.000Z | 2019-10-10T06:14:45.000Z | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_optimization/python/core/quantization/keras/quantize_emulatable_layer.py | akarmi/model-optimization | 2d3faaa361ecb3639f4a29da56e0e6ed52336318 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:14:48.000Z | 2019-10-10T06:14:48.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract Base Class for quantize emulation in custom keras layers."""
import abc
import six
| 35.770492 | 80 | 0.706691 |
48b322c1b5c9322a3e7a06f6f8cf4904f59abc42 | 1,373 | py | Python | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
] | null | null | null | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
] | 1 | 2019-08-04T22:44:54.000Z | 2019-08-04T22:44:54.000Z | GEN_cell_culture/phase_plotting.py | dezeraecox/GEN_cell_culture | 70ca933bef53347e916e20e6b86dc9dc9da11825 | [
"MIT"
] | null | null | null | import os
import re
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from GEN_Utils import FileHandling
from loguru import logger
logger.info("Import OK")
# Set sample-specific variables
input_path = 'examples/python/gauss_models/'
output_path = 'examples/python/phase_plotting/'
plate_sample = ['TPE only', '1', '1.5', '2', '3', '4']*4
plate_cords = [f'{x}{y}' for x in string.ascii_uppercase[0:4]
for y in range(1, 7)]
sample_map = dict(zip(plate_cords, plate_sample))
if not os.path.exists(output_path):
os.mkdir(output_path)
# Read in summary df and preview
summary = pd.read_excel(f'{input_path}summary.xlsx')
# Assign sample-specific descriptors to summary table
summary['plate'] = summary['sample'].str[0]
summary['well'] = summary['sample'].str[1:]
summary['sample'] = summary['well'].map(sample_map)
phase_name = ['G', 'S', 'M']
phase_num = [1, 2, 3]
phase_map = dict(zip(phase_name, phase_num))
# Generate line-plot
fig = plt.subplots()
for phase in phase_name:
sns.lineplot(summary['sample'], summary[phase], label=phase, ci='sd')
plt.ylabel("Proportion of cells in phase")
plt.xlabel(r'Density(x 10$^ 5$)')
plt.title('Phase distribution')
plt.legend(bbox_to_anchor=(1.1, 1.0), title='Phase')
plt.tight_layout()
plt.autoscale()
plt.savefig(f'{output_path}line_plot.png')
| 27.46 | 73 | 0.718135 |
48b3cc7ab2adb8652b3ac164a64a50173d354d2a | 4,759 | py | Python | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
] | 10 | 2020-01-05T19:33:33.000Z | 2022-02-04T14:56:09.000Z | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
] | 1 | 2019-12-18T15:16:44.000Z | 2019-12-18T15:16:44.000Z | PlatformerGame/malmopy/explorers.py | MrMaik/platformer-ml-game | bbcabe3ddea1e3cfddb01b4cd60c8dd1bd79acac | [
"MIT"
] | 6 | 2019-12-18T14:45:37.000Z | 2021-09-13T12:48:28.000Z | # --------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------------------------------
"""Module containing explorer classes"""
from numpy import random as np_random
from .summaries import ScalarSummary
from .triggers import each_step
from .abc import Explorer, EpsilonFunction, Visualizable
| 36.328244 | 100 | 0.65497 |
48b5904c4c46f166269a35d1d5aae2ecfb57bef7 | 765 | py | Python | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
] | null | null | null | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
] | 1 | 2020-06-07T17:56:13.000Z | 2020-06-07T17:56:13.000Z | Lib/icecreamscrape/__main__.py | kdwatt15/icecreamscrape | aefe18d795bb9ae8daabda7f8e26653df7d47c44 | [
"MIT"
] | null | null | null | # Standard imports
import sys
# Project imports
from icecreamscrape.cli import cli
from icecreamscrape.webdriver import driver_factory
from icecreamscrape import composites as comps
from icecreamscrape.composites import create_timestamped_dir
def main(args=sys.argv[1:]):
""" Main function. :param: args is used for testing """
user_inputs = cli(args)
url = user_inputs.params.url
active_features = user_inputs.active_features
if len(active_features) > 0:
time_dir = create_timestamped_dir()
with driver_factory(url) as driver:
for feature in active_features:
getattr(sys.modules[comps.__name__],
feature)(driver, time_dir)
def init():
""" Init construction allows for testing """
if __name__ == "__main__":
sys.exit(main())
init()
| 24.677419 | 60 | 0.75817 |
48b8c62b25b3330d58b5291c6fc3a3f2df2e485f | 5,051 | py | Python | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/programdb/mission/mission_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.controllers.mission.mission_unit_test.py is part of The
# RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Test class for testing Mission module algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKMissionRecord
from ramstk.models.dbtables import RAMSTKMissionTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
| 33.673333 | 88 | 0.713324 |
48b9335e8465f09c7a066bfa90b273be5d354b55 | 569 | py | Python | src/streamlink/packages/flashmedia/flv.py | RomanKornev/streamlink | acdefee0822b9c10628b91a166f9abe084e44800 | [
"BSD-2-Clause"
] | 5 | 2019-07-26T17:03:26.000Z | 2020-10-17T23:23:43.000Z | src/streamlink/packages/flashmedia/flv.py | RomanKornev/streamlink | acdefee0822b9c10628b91a166f9abe084e44800 | [
"BSD-2-Clause"
] | 9 | 2018-01-14T15:20:23.000Z | 2021-03-08T20:29:51.000Z | src/streamlink/packages/flashmedia/flv.py | bumplzz69/streamlink | 34abc43875d7663ebafa241573dece272e93d88b | [
"BSD-2-Clause"
] | 4 | 2018-01-14T13:27:25.000Z | 2021-11-15T22:28:30.000Z | #!/usr/bin/env python
from .error import FLVError
from .compat import is_py2
from .tag import Header, Tag
__all__ = ["FLV"]
| 18.966667 | 62 | 0.606327 |
48b9e626c31a3acad3ffc788ac2313af13310a0d | 120 | py | Python | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
] | 24 | 2018-05-25T15:23:30.000Z | 2021-06-09T10:56:52.000Z | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
] | 14 | 2019-04-30T10:51:01.000Z | 2020-09-16T20:37:30.000Z | tests/core/test_core_renderer.py | timvink/pheasant | eb5b0a8b5473baad5ad4903984433fe763f5312e | [
"MIT"
] | 9 | 2019-06-12T10:54:18.000Z | 2022-01-15T21:19:05.000Z | from pheasant.renderers.jupyter.jupyter import Jupyter
jupyter = Jupyter()
jupyter.findall("{{3}}3{{5}}")
jupyter.page
| 20 | 54 | 0.75 |
48ba4f165b3430e0ef9885d29722f28bf1be64bd | 687 | py | Python | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
] | null | null | null | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
] | null | null | null | chapter2-5-your-code-in-multiple-servers/packer/webapp.py | andrecp/devops-fundamentals-to-k8s | 9ea1cfdcfcc07faf195bd26faa5917628385cdfc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
from http.server import HTTPServer, BaseHTTPRequestHandler
num_requests = 0
if __name__ == "__main__":
http_service = HTTPServer(("0.0.0.0", 8000), Handler)
print(f"Starting http service on 0.0.0.0:8000")
http_service.serve_forever()
| 27.48 | 75 | 0.678311 |
48bb529c5d5a0817b3c6e3353e857c62a73b8a16 | 91 | py | Python | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | run.py | ellotecnologia/galadriel | 16b592818d8beb8407805e43f2f881975b245d94 | [
"MIT"
] | null | null | null | from app.app import create_app
from config import BaseConfig
app = create_app(BaseConfig)
| 18.2 | 30 | 0.824176 |
48bb6abe88059d9888226636da8508d01f476cba | 3,896 | py | Python | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
] | 1 | 2021-06-20T16:58:21.000Z | 2021-06-20T16:58:21.000Z | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
] | null | null | null | retarget/make_data.py | EggPool/rx-experiments | a8659a94e1b0822a9e7f4121407fb2b5ededa192 | [
"MIT"
] | 2 | 2019-11-26T12:18:18.000Z | 2022-03-28T19:22:55.000Z | """
Create data for simulations
(c) 2019 - EggdraSyl
"""
import json
# from mockup import Blockchain, Block
from minersimulator import MinerSimulator
from math import sin, pi
SPECIAL_MIN_TIME = 5 * 60
if __name__ == "__main__":
init_stable(
0,
1000,
block_time=3600,
target="0000000000000028acfa28a803d2000000000000000000000000000000000000",
file="stable_3600_14.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000000ffffffffff28acfa28a803d20000000000000000000000000000000000",
file="stable_300_6.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="00000ffffffffff28acfa28a803d200000000000000000000000000000000000",
file="stable_300_5.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="0000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_4.json",
)
init_stable(
0,
1000,
block_time=60 * 5,
target="000ffffffffff28acfa28a803d2000000000000000000000000000000000000",
file="stable_300_3.json",
)
hash_stable(10000, 167, file="stable_167.json")
hash_stable(10000, 1670, file="stable_1670.json")
hash_stable(10000, 16700, file="stable_16700.json")
hash_arithmetic(10000, 167, 16, file="arithmetic_167_16.json")
hash_step(10000, 167, 500, file="step_up_167_500.json")
hash_step(10000, 500, 167, file="step_down_500_167.json")
hash_sinus(10000, 300, 150, 60*12, file="sinus_300_150_720.json")
hash_sinus(10000, 300, 100, 1440, file="sinus_300_100_1440.json")
hash_sinus(10000, 300, 100, 2880, file="sinus_300_100_2880.json")
| 30.4375 | 91 | 0.641427 |
48bb8a2d0cac5d726a9c18529c0114315a34c2c3 | 13,473 | py | Python | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | 3 | 2020-08-20T10:27:13.000Z | 2021-11-02T20:28:16.000Z | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | null | null | null | software/pynguin/tests/testcase/statements/test_primitivestatements.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | null | null | null | # This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
from unittest import mock
from unittest.mock import MagicMock
import pytest
import pynguin.configuration as config
import pynguin.testcase.defaulttestcase as dtc
import pynguin.testcase.statements.primitivestatements as prim
import pynguin.testcase.testcase as tc
import pynguin.testcase.variable.variablereferenceimpl as vri
def test_none_statement_equals_clone():
test_case = MagicMock(tc.TestCase)
statement = prim.NoneStatement(test_case, type(None))
test_case.statements = [statement]
test_case2 = MagicMock(tc.TestCase)
clone = statement.clone(test_case2)
test_case2.statements = [clone]
assert statement.__eq__(clone)
| 34.724227 | 88 | 0.720255 |
48bbe200dfeacc3fe42e8fdff56e3de41ac32c2b | 725 | py | Python | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | 1 | 2022-03-30T15:06:18.000Z | 2022-03-30T15:06:18.000Z | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | src/data/download/datasets/download_tencent_test.py | lcn-kul/conferencing-speech-2022 | 1089b2baaf2fcf3ac8ef44c65b80da2e5b2c331b | [
"MIT"
] | null | null | null | from pathlib import Path
from src import constants
from src.data.download.utils.download_dataset_zip import download_dataset_zip
def download_tencent_test(
tmp_dir: Path = None,
tqdm_name: str = None,
tqdm_idx: int = None,
):
"""Download the test set of the Tencent Corpus and extract it to the
appropriate directory."""
download_dataset_zip(
name="tencent_test",
data_url=constants.TENCENT_TEST_URL,
output_dir=constants.TENCENT_TEST_DIR,
extracted_name=constants.TENCENT_TEST_ZIP_FOLDER,
tmp_dir=tmp_dir,
tqdm_name=tqdm_name,
tqdm_idx=tqdm_idx,
)
if __name__ == "__main__":
download_tencent_test(tqdm_name="tencent", tqdm_idx=0)
| 25.892857 | 77 | 0.714483 |
48bc446a06d58d6a75df610f9236257a1d789475 | 9,669 | py | Python | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 111 | 2020-08-31T04:58:54.000Z | 2022-03-29T15:44:18.000Z | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 14 | 2020-12-16T07:27:22.000Z | 2022-03-15T17:39:01.000Z | malaya_speech/train/model/fastspeechsplit/model.py | ishine/malaya-speech | fd34afc7107af1656dff4b3201fa51dda54fde18 | [
"MIT"
] | 29 | 2021-02-09T08:57:15.000Z | 2022-03-12T14:09:19.000Z | import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
| 34.532143 | 79 | 0.617541 |