hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a22d14123c5934e462a7334c1d55b574adf6c9be | 3,403 | py | Python | 10-19/14. normalize_sentences/test_normalize_sentences.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 1 | 2021-11-30T05:03:24.000Z | 2021-11-30T05:03:24.000Z | 10-19/14. normalize_sentences/test_normalize_sentences.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | null | null | null | 10-19/14. normalize_sentences/test_normalize_sentences.py | dcragusa/PythonMorsels | 5f75b51a68769036e4004e9ccdada6b220124ab6 | [
"MIT"
] | 2 | 2021-04-18T05:26:43.000Z | 2021-11-28T18:46:43.000Z | import unittest
from textwrap import dedent
from normalize_sentences import normalize_sentences
if __name__ == "__main__":
unittest.main(verbosity=2)
| 33.362745 | 78 | 0.601234 |
a22d9fe19ea5e2d8a40235675b25713b84b3f165 | 2,673 | py | Python | graph/renkolib.py | kUNWAR-DIVYANSHU/stockui | f85a26b461512fefd33a4f2acfa30d178de3d118 | [
"MIT"
] | 2 | 2021-08-28T20:37:01.000Z | 2021-08-30T12:01:33.000Z | graph/renkolib.py | kUNWAR-DIVYANSHU/stockui | f85a26b461512fefd33a4f2acfa30d178de3d118 | [
"MIT"
] | null | null | null | graph/renkolib.py | kUNWAR-DIVYANSHU/stockui | f85a26b461512fefd33a4f2acfa30d178de3d118 | [
"MIT"
] | null | null | null | import atrlib
import pandas as pd
# module for calculation of data for renko graph
| 37.647887 | 63 | 0.412645 |
a22ef44872867d8b0cd94176f76c246bfbaa7a25 | 2,846 | py | Python | utils/utils.py | SoliareofAstora/Metagenomic-DeepFRI | 7ee12c5bc34f9103f113e93f570719686f856372 | [
"BSD-3-Clause"
] | null | null | null | utils/utils.py | SoliareofAstora/Metagenomic-DeepFRI | 7ee12c5bc34f9103f113e93f570719686f856372 | [
"BSD-3-Clause"
] | null | null | null | utils/utils.py | SoliareofAstora/Metagenomic-DeepFRI | 7ee12c5bc34f9103f113e93f570719686f856372 | [
"BSD-3-Clause"
] | 1 | 2022-01-12T10:41:51.000Z | 2022-01-12T10:41:51.000Z | import os
import pathlib
import requests
import shutil
import subprocess
import time
ENV_PATHS = set()
| 28.747475 | 132 | 0.627899 |
a22fe2112341437f4d8c36db1b3319ad00230552 | 2,274 | py | Python | fuzzinator/tracker/github_tracker.py | akosthekiss/fuzzinator | 194e199bb0efea26b857ad05f381f72e7a9b8f66 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/tracker/github_tracker.py | akosthekiss/fuzzinator | 194e199bb0efea26b857ad05f381f72e7a9b8f66 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/tracker/github_tracker.py | akosthekiss/fuzzinator | 194e199bb0efea26b857ad05f381f72e7a9b8f66 | [
"BSD-3-Clause"
] | 1 | 2018-06-28T05:21:21.000Z | 2018-06-28T05:21:21.000Z | # Copyright (c) 2016-2022 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
try:
# FIXME: very nasty, but a recent PyGithub version began to depend on
# pycrypto transitively, which is a PITA on Windows (can easily fail with an
# ``ImportError: No module named 'winrandom'``) -- so, we just don't care
# for now if we cannot load the github module at all. This workaround just
# postpones the error to the point when ``GithubTracker`` is actually used,
# so be warned, don't do that on Windows!
from github import Github, GithubException
except ImportError:
pass
from .tracker import Tracker, TrackerError
| 34.454545 | 145 | 0.670624 |
a2315dd43508aee4e316bc2ccbff15322163a590 | 2,624 | py | Python | qmdz_const.py | cygnushan/measurement | 644e8b698faf50dcc86d88834675d6adf1281b10 | [
"MIT"
] | 1 | 2022-03-18T18:38:02.000Z | 2022-03-18T18:38:02.000Z | qmdz_const.py | cygnushan/measurement | 644e8b698faf50dcc86d88834675d6adf1281b10 | [
"MIT"
] | null | null | null | qmdz_const.py | cygnushan/measurement | 644e8b698faf50dcc86d88834675d6adf1281b10 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
from init_op import read_config
# ROOT_PATH = os.path.split(os.path.realpath(__file__))[0]
if getattr(sys, 'frozen', None):
ROOT_DIR = os.path.dirname(sys.executable)
else:
ROOT_DIR = os.path.dirname(__file__)
VI_CONF_PATH = ROOT_DIR + "\conf\VI_CONF.ini"
ST_CONF_PATH = ROOT_DIR + "\conf\ST_CONF.ini"
SC_CONF_PATH = ROOT_DIR + "\conf\SC_CONF.ini"
SYS_CONF_PATH = ROOT_DIR + "\conf\SYS_CONF.ini"
vrange_dict = {0:"AUTO", 1:"1e-6", 2:"10e-6", 3:"100e-6",4:"1e-3", 5:"10e-3",
6:"100e-3", 7:"1", 8:"10", 9:"210"}
irange_dict= {0:"AUTO", 1:"10e-9", 2:"100e-9", 3:"1e-6", 4:"10e-6", 5:"100e-6",
6:"1e-3", 7:"10e-3", 8:"100e-3", 9:"1"}
gas_coef = {0:1.000, 1:1.400, 2:0.446, 3:0.785, 4:0.515, 5:0.610, 6:0.500,
7:0.250, 8:0.410, 9:0.350, 10:0.300, 11:0.250, 12:0.260, 13:1.000,
14:0.740, 15:0.790, 16:1.010, 17:1.000, 18:1.400, 19:1.400, 20:1.000,
21:0.510, 22:0.990, 23:0.710, 24:1.400, 25:0.985, 26:0.630, 27:0.280,
28:0.620, 29:1.360}
res_range = {0:"100", 1:"1e3", 2:"10e3", 3:"100e3", 4:"1e6", 5:"10e6", 6:"100e6", 7:"200e6"}
res_det = 0
VI_ILIST = []
IV_VLIST = []
VI_GAS = []
ST_GAS_AUTO = [0,0,0,0,0,0,0,0]
ST_GAS_MODE = 0 # 0: 1
SC_GAS_MODE = 0 # 0: 1
SC_FLOW1 = []
SC_FLOW2 = []
SC_FLOW3 = []
SC_GAS_PARA = []
hold_time = 60
low_offset = 0.2
high_offset = 1
up_slot = 1
down_slot = 1
critical_temp = 500
measure_times = 1
temp_list = []
Auto_Range = 1
# 2400
MEAS_MODE = 0 #0214
OUTPUT_MODE = 0 # 01
VI_MODE = 1
#
TIME_t1 = 0
TIME_t2 = 0
TIME_t3 = 0
TIME_t4 = 0
TIME_SUM = 0
#[11,22,33,,]
t1_gas = []
t2_gas = []
t3_gas = []
t4_gas = []
flowmeter1_state = 0
flowmeter2_state = 0
flowmeter3_state = 0
airpump_state = 0
color_list = ["Aqua","Black","Fuchsia","Gray","Green","Lime","Maroon","Navy",
"Red","Silver","Teal","Yellow","Blue","Olive","Purple","White"]
PARA_NAME = ['SteP','HIAL','LoAL','HdAL','LdAL','AHYS','CtrL','M5',
'P','t','CtI','InP','dPt','SCL','SCH','AOP',
'Scb','OPt','OPL','OPH','AF','RUNSTA','Addr','FILt',
'AmAn','Loc','c01','t01','c02','t02', 'c03','t03']
PARA_DEFAULT = [1,8000,-1960,9999,9999,2,3,50,65,20,2,0,1,0,
5000,5543,0,0,0,100,6,12,1,10,27,808]
flow1_range = int(get_range('flow1_range'))
flow2_range = int(get_range('flow2_range'))
flow3_range = int(get_range('flow3_range'))
| 24.523364 | 92 | 0.596418 |
a231a6c5e1e9bfd374c54640c8a12d24c01e3857 | 93 | py | Python | lattedb/linksmear/apps.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | 1 | 2019-12-11T02:33:23.000Z | 2019-12-11T02:33:23.000Z | lattedb/linksmear/apps.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | 10 | 2020-01-29T17:06:01.000Z | 2021-05-31T14:41:19.000Z | lattedb/linksmear/apps.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
| 15.5 | 33 | 0.763441 |
a232ee55bbdd0227f3c92c01f62af655cba96907 | 2,088 | py | Python | project/repository/user.py | tobiasaditya/fastapi-blog | 0f50f4261755f926ce9e951db8237a5f38384dcb | [
"MIT"
] | null | null | null | project/repository/user.py | tobiasaditya/fastapi-blog | 0f50f4261755f926ce9e951db8237a5f38384dcb | [
"MIT"
] | null | null | null | project/repository/user.py | tobiasaditya/fastapi-blog | 0f50f4261755f926ce9e951db8237a5f38384dcb | [
"MIT"
] | null | null | null | from typing import List
from fastapi import APIRouter
from fastapi.params import Depends
from fastapi import HTTPException, status
from sqlalchemy.orm.session import Session
from project import schema, models, database, hashing
router = APIRouter(
prefix="/user",
tags=['Users']
)
# @router.put('/{id}')
# def update_project_id(id:int,request:schema.Project,db:Session = Depends(database.get_db)):
# #Search for projects' id
# selected_project = db.query(models.Project).filter(models.Project.id == id)
# if not selected_project.first():
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# selected_project.update(dict(request))
# return {'status':f'project {id} updated'}
# @router.delete('/{id}')
# def delete_project_id(id:int,db:Session = Depends(database.get_db)):
# selected_project = db.query(models.Project).filter(models.Project.id == id).first()
# if not selected_project:
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,detail=f"Project {id} not found.")
# db.delete(selected_project)
# db.commit()
# return {'status':f'delete project_id {id} successful'}
| 33.142857 | 102 | 0.724617 |
a23471f40d09455ca7a0123fbc08ae7b2e5ada89 | 17,643 | py | Python | milking_cowmask/data_sources/imagenet_data_source.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | milking_cowmask/data_sources/imagenet_data_source.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | milking_cowmask/data_sources/imagenet_data_source.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet input pipeline.
"""
import os
import pickle
import jax
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
TRAIN_IMAGES = 1281167
TEST_IMAGES = 50000
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def random_crop(image,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,):
"""Randomly crop an input image.
Args:
image: The image to be cropped.
min_object_covered: The minimal percentage of the target object that should
be in the final crop.
aspect_ratio_range: The cropped area of the image must have an aspect
ratio = width / height within this range.
area_range: The cropped area of the image must contain a fraction of the
input image within this range.
max_attempts: Number of attempts at generating a cropped region of the image
of the specified constraints. After max_attempts failures,
the original image is returned.
Returns:
A random crop of the supplied image.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop = tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_height, target_width)
return crop
def center_crop(image, image_size, crop_padding=32):
"""Crop an image in the center while preserving aspect ratio.
Args:
image: The image to be cropped.
image_size: the desired crop size.
crop_padding: minimal distance of the crop from the edge of the image.
Returns:
The center crop of the provided image.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
padded_center_crop_size,
padded_center_crop_size)
return crop
def colour_jitter(image, greyscale_prob=0.0):
"""Colour jitter augmentation.
Args:
image: The image to be augmented
greyscale_prob: probability of greyscale conversion
Returns:
Augmented image
"""
# Make sure it has 3 channels so random_saturation and random_hue don't
# fail on greyscale images
image = image * tf.ones([1, 1, 3], dtype=image.dtype)
if greyscale_prob > 0.0:
p = tf.random.uniform([1])
image = tf.cond(tf.less(p[0], greyscale_prob), f_grey, f_colour)
else:
image = tf.image.random_saturation(image, 0.7, 1.4)
image = tf.image.random_hue(image, 0.1)
image = tf.image.random_contrast(image, 0.7, 1.4)
image = tf.image.random_brightness(image, 0.4)
return image
def preprocess_train_image(image, apply_colour_jitter=False,
greyscale_prob=0.0, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
apply_colour_jitter: If True, apply colour jitterring.
greyscale_prob: Probability of converting image to greyscale.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = random_crop(image)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
if apply_colour_jitter:
image = colour_jitter(image, greyscale_prob=greyscale_prob)
image = normalize_image(image)
return image
def preprocess_eval_image(image, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = center_crop(image, image_size)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
image = normalize_image(image)
return image
_JPEG_ENCODED_FEATURE_DESCRIPTION = {
'label': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image': tf.io.FixedLenFeature([], tf.string),
'file_name': tf.io.FixedLenFeature([], tf.string),
}
def _load_tfds_imagenet(split_name, n_total):
"""Load ImageNet from TFDS."""
split_size = float(n_total) // jax.host_count()
start = split_size * jax.host_id()
end = start + split_size
start_index = int(round(start))
end_index = int(round(end))
split = '{}[{}:{}]'.format(split_name, start_index, end_index)
return tfds.load('imagenet2012:5.*.*', split=split)
def _load_custom_imagenet_split(split_path):
"""Load a custom split of the ImageNet dataset."""
if not tf.io.gfile.exists(split_path):
raise RuntimeError('Cannot find {}'.format(split_path))
shard_filenames = tf.io.gfile.listdir(split_path)
shard_filenames.sort()
if jax.host_count() > 1:
n_hosts = jax.host_count()
host_id = jax.host_id()
shard_filenames = [f for i, f in enumerate(shard_filenames)
if (i % n_hosts) == host_id]
files_in_split = [os.path.join(split_path, f) for f in shard_filenames]
ds = tf.data.TFRecordDataset(files_in_split, buffer_size=128 * 1024 * 1024,
num_parallel_reads=len(files_in_split))
# ds = deserialize_and_decode_image_dataset(ds, batch_size=256)
ds = deserialize_and_decode_image_dataset(ds, batch_size=1)
return ds
_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_{n_sup}_seed{subset_seed}'
_VAL_TVSPLIT_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_split.pkl'
_VAL_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_val'
_VAL_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_{n_sup}_seed{subset_seed}'
| 36.75625 | 100 | 0.677719 |
a2397ee156e882b19d6dbf902268121905eaf802 | 4,293 | py | Python | utils/image.py | ariel415el/Efficient-GPNN | 05f6588c3cc920e810d71fc9ed001f8915d7fc8a | [
"Apache-2.0"
] | 7 | 2021-11-11T22:57:14.000Z | 2022-03-23T08:47:00.000Z | utils/image.py | ariel415el/Efficient-GPNN | 05f6588c3cc920e810d71fc9ed001f8915d7fc8a | [
"Apache-2.0"
] | null | null | null | utils/image.py | ariel415el/Efficient-GPNN | 05f6588c3cc920e810d71fc9ed001f8915d7fc8a | [
"Apache-2.0"
] | 4 | 2021-11-18T07:24:09.000Z | 2022-03-26T22:35:05.000Z | import os
import cv2
import torch
from torch.nn import functional as F
from torchvision import transforms
import torchvision.utils
def blur(img, pyr_factor):
"""Blur image by downscaling and then upscaling it back to original size"""
if pyr_factor < 1:
d_img = downscale(img, pyr_factor)
img = transforms.Resize(img.shape[-2:], antialias=True)(d_img)
return img
def match_image_sizes(input, target):
"""resize and crop input image so that it has the same aspect ratio as target"""
assert(len(input.shape) == len(target.shape) and len(target.shape) == 4)
input_h, input_w = input.shape[-2:]
target_h, target_w = target.shape[-2:]
input_scale_factor = input_h / input_w
target_scale_factor = target_h / target_w
if target_scale_factor > input_scale_factor:
input = transforms.Resize((target_h, int(input_w/input_h*target_h)), antialias=True)(input)
pixels_to_cut = input.shape[-1] - target_w
if pixels_to_cut > 0:
input = input[:, :, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
else:
input = transforms.Resize((int(input_h/input_w*target_w), target_w), antialias=True)(input)
pixels_to_cut = input.shape[-2] - target_h
if pixels_to_cut > 1:
input = input[:, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
input = transforms.Resize(target.shape[-2:], antialias=True)(input)
return input
def extract_patches(src_img, patch_size, stride):
"""
Splits the image to overlapping patches and returns a pytorch tensor of size (N_patches, 3*patch_size**2)
"""
channels = 3
patches = F.unfold(src_img, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0)) # shape (b, 3*p*p, N_patches)
patches = patches.squeeze(dim=0).permute((1, 0)).reshape(-1, channels * patch_size**2)
return patches
def combine_patches(patches, patch_size, stride, img_shape):
"""
Combines patches into an image by averaging overlapping pixels
:param patches: patches to be combined. pytorch tensor of shape (N_patches, 3*patch_size**2)
:param img_shape: an image of a shape that if split into patches with the given stride and patch_size will give
the same number of patches N_patches
returns an image of shape img_shape
"""
patches = patches.permute(1,0).unsqueeze(0)
combined = F.fold(patches, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
# normal fold matrix
input_ones = torch.ones(img_shape, dtype=patches.dtype, device=patches.device)
divisor = F.unfold(input_ones, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0))
divisor = F.fold(divisor, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
divisor[divisor == 0] = 1.0
return (combined / divisor).squeeze(dim=0).unsqueeze(0) | 35.775 | 133 | 0.663406 |
a23daef3bb54fa9c84f160a660ef817f0e87362d | 499 | py | Python | docs/user/visualization/matplotlib/pythonstyle.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | 1 | 2021-03-31T23:27:09.000Z | 2021-03-31T23:27:09.000Z | docs/user/visualization/matplotlib/pythonstyle.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | null | null | null | docs/user/visualization/matplotlib/pythonstyle.py | joelfrederico/mytools | 7bf57c49c7dde0a8b0aa337fbd2fbd527ce7a67f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
# Create data to plot
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
# Create a grid
gs = gridspec.GridSpec(1, 2)
# Create a figure
fig = plt.figure(figsize=(16, 6))
# Create axes
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
# Plot data
ax1.plot(x, y1)
ax2.plot(x, y2)
# Rearrange figure to use all space
fig.tight_layout()
# Show figure
plt.show()
| 16.633333 | 38 | 0.695391 |
a23e0e43898b8301125178c7c69d4cccc505d6ca | 21,583 | py | Python | StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 138 | 2018-01-03T03:32:49.000Z | 2022-03-12T02:57:46.000Z | StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 9 | 2018-01-01T03:16:24.000Z | 2021-05-27T09:57:24.000Z | StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py | SleepySoft/StockAnalysisSystem | 75f95738831614f7946f85d09118e447f7ac6dc7 | [
"Apache-2.0"
] | 50 | 2019-08-05T01:02:30.000Z | 2022-03-07T00:52:14.000Z | import time
import urllib
import random
import logging
import requests
import datetime
from os import sys, path, makedirs
from PyQt5.QtCore import Qt, QTimer, QDateTime
from PyQt5.QtWidgets import QWidget, QPushButton, QVBoxLayout, QLabel, QComboBox, QDateTimeEdit, QCheckBox, QLineEdit, \
QRadioButton
root_path = path.dirname(path.dirname(path.abspath(__file__)))
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.ui_utility import *
from StockAnalysisSystem.core.Utility.task_queue import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.ui.Utility.ui_context import UiContext
from StockAnalysisSystem.interface.interface import SasInterface as sasIF
from StockAnalysisSystem.core.Utility.securities_selector import SecuritiesSelector
# 20200217: It doesn't work anymore - Move to recycled
# -------------------------------------------- class AnnouncementDownloader --------------------------------------------
# -----------------------------------------------------------
# Get code from : https://github.com/gaodechen/cninfo_process
# -----------------------------------------------------------
User_Agent = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0"
]
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5",
'Host': 'www.cninfo.com.cn',
'Origin': 'http://www.cninfo.com.cn',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
'X-Requested-With': 'XMLHttpRequest'
}
# ----------------------------------------------------------------------------------------------------------------------
ALL_STOCK_TEXT = ''
DEFAULT_INFO = '''
1.https://github.com/gaodechen/cninfo_process
2.
3.Download/report/
4.
- View->
-
5.BAN
'''
DOWNLOAD_ALL_TIPS = '''
********BAN********
--------------------------
'''
# ----------------------------------- UpdateTask -----------------------------------
# ----------------------------- AnnouncementDownloaderUi -----------------------------
# ----------------------------------------------------------------------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_id': 'efa60977-65e9-4ecf-9271-7c6e629da399',
'plugin_name': 'ReportDownloader',
'plugin_version': '0.0.0.1',
'tags': ['Announcement', 'Report', 'Finance Report', 'Annual Report', 'Sleepy'],
}
def plugin_adapt(method: str) -> bool:
return method in ['widget']
def plugin_capacities() -> list:
return ['widget']
# ----------------------------------------------------------------------------------------------------------------------
sasInterface = None
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
sys.excepthook = exception_hook
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
| 39.099638 | 195 | 0.580874 |
a23e80a2bc9c75ffcdcaee541fdcd296843ceb25 | 1,109 | py | Python | tests/routes/generators/test_random.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | tests/routes/generators/test_random.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | tests/routes/generators/test_random.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | import random
import shapely.geometry as sg
from locintel.quality.generators.random import RandomRoutePlanGenerator, polygons
random.seed(10)
| 35.774194 | 85 | 0.733093 |
a23ebe170e2650bcc75fd785f5c11d3fba8249e1 | 3,878 | py | Python | curtin-rci/local_utils.py | Curtin-Open-Knowledge-Initiative/mag_coverage_report | a75dd1273c44895b5c857ebd498407aa95bd45e5 | [
"Apache-2.0"
] | null | null | null | curtin-rci/local_utils.py | Curtin-Open-Knowledge-Initiative/mag_coverage_report | a75dd1273c44895b5c857ebd498407aa95bd45e5 | [
"Apache-2.0"
] | 2 | 2021-08-30T11:52:25.000Z | 2021-09-02T12:11:05.000Z | curtin-rci/local_utils.py | Curtin-Open-Knowledge-Initiative/mag_coverage_report | a75dd1273c44895b5c857ebd498407aa95bd45e5 | [
"Apache-2.0"
] | 3 | 2021-07-04T07:39:01.000Z | 2021-08-24T15:24:29.000Z | import pandas as pd
import plotly.graph_objects as go
from typing import Union, Optional
from pathlib import Path
DATA_FOLDER = Path('data_files')
MAIN_SCHOOLS = [
'Curtin Law School',
'Curtin Medical School',
'School of Accounting, Economics and Finance',
'School of Allied Health',
'School of Civil and Mechanical Engineering',
'School of Design and the Built Environment',
'School of Earth and Planetary Sciences',
'School of Education',
'School of Elec Eng, Comp and Math Sci',
'School of Management & Marketing',
'School of Media, Creative Arts and Social Inquiry',
'School of Molecular and Life Sciences',
'School of Nursing',
'School of Population Health',
'WASM Minerals, Energy and Chemical Engineering',
'Not Assigned'
]
CITATION_SCHOOLS = [
'Curtin Medical School',
'School of Allied Health',
'School of Civil and Mechanical Engineering',
'School of Earth and Planetary Sciences',
'School of Elec Eng, Comp and Math Sci',
'School of Molecular and Life Sciences',
'School of Nursing',
'School of Population Health',
'WASM Minerals, Energy and Chemical Engineering',
]
FIELD_METRIC_COLUMNS = [ #'magy_rci_group_0', 'magy_rci_group_I',
# 'magy_rci_group_II', 'magy_rci_group_III', 'magy_rci_group_IV',
# 'magy_rci_group_V', 'magy_rci_group_VI',
'magy_centile_1',
'magy_centile_5', 'magy_centile_10', 'magy_centile_25',
'magy_centile_50', 'magy_centile_other']
JOURNAL_METRIC_COLUMNS = ['rci_group_0', 'rci_group_I',
'rci_group_II', 'rci_group_III', 'rci_group_IV',
'rci_group_V', 'rci_group_VI', 'mag_centile_1',
'mag_centile_5', 'mag_centile_10', 'mag_centile_25',
'mag_centile_50', 'mag_centile_other'] | 33.721739 | 95 | 0.57968 |
a23fbcb063477231d30f7934e898ac5453872dde | 2,492 | py | Python | scripts/pa-loaddata.py | kbase/probabilistic_annotation | 2454925ca98c80c73bda327a0eff8aed94c5a48d | [
"MIT"
] | null | null | null | scripts/pa-loaddata.py | kbase/probabilistic_annotation | 2454925ca98c80c73bda327a0eff8aed94c5a48d | [
"MIT"
] | null | null | null | scripts/pa-loaddata.py | kbase/probabilistic_annotation | 2454925ca98c80c73bda327a0eff8aed94c5a48d | [
"MIT"
] | null | null | null | #! /usr/bin/python
import argparse
import os
from biokbase.probabilistic_annotation.DataParser import DataParser
from biokbase.probabilistic_annotation.Helpers import get_config
from biokbase import log
desc1 = '''
NAME
pa-loaddata -- load static database of gene annotations
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
Load the static database of high-quality gene annotations along with
files containing intermediate data. The files are then available for
a probabilistic annotation server on this system. Since downloading
from Shock can take a long time, run this command to load the static
database files before the server is started. The configFilePath argument
specifies the path to the configuration file for the service.
Note that a probabilistic annotation server is unable to service client
requests for the annotate() and calculate() methods while this command is
running and must be restarted to use the new files.
'''
desc3 = '''
EXAMPLES
Load static database files:
> pa-loaddata loaddata.cfg
SEE ALSO
pa-gendata
pa-savedata
AUTHORS
Matt Benedict, Mike Mundy
'''
# Main script function
if __name__ == "__main__":
# Parse arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='pa-loaddata', epilog=desc3)
parser.add_argument('configFilePath', help='path to configuration file', action='store', default=None)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
# Create a log object.
submod = os.environ.get('KB_SERVICE_NAME', 'probabilistic_annotation')
mylog = log.log(submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, config=args.configFilePath)
# Get the probabilistic_annotation section from the configuration file.
config = get_config(args.configFilePath)
# Create a DataParser object for working with the static database files (the
# data folder is created if it does not exist).
dataParser = DataParser(config)
# Get the static database files. If the files do not exist and they are downloaded
# from Shock, the command may run for a long time.
testDataPath = os.path.join(os.environ['KB_TOP'], 'services', submod, 'testdata')
dataOption = dataParser.getDatabaseFiles(mylog, testDataPath)
exit(0)
| 34.611111 | 124 | 0.726726 |
a2408683ebb50640f78f65bb066c73360bbad5e1 | 21,441 | py | Python | pippin.py | harlowja/pippin | e101ad867ea9982457374281a2050c30020b10f4 | [
"Apache-2.0"
] | null | null | null | pippin.py | harlowja/pippin | e101ad867ea9982457374281a2050c30020b10f4 | [
"Apache-2.0"
] | null | null | null | pippin.py | harlowja/pippin | e101ad867ea9982457374281a2050c30020b10f4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
try:
from collections import OrderedDict # noqa
except ImportError:
from ordereddict import OrderedDict # noqa
import collections
import contextlib
import hashlib
import json
import logging
import os
import shutil
import sys
import tempfile
# TODO: get rid of this...
from taskflow.types import tree
from distutils import version as dist_version
import argparse
import networkx as nx
from pip import req as pip_req
from pkgtools.pypi import PyPIJson
from pkgtools.pypi import real_name as pypi_real_name
import requests
import six
LOG = logging.getLogger('pippin')
# Default URL downloading/fetching timeout...
TIMEOUT = 5.0
try:
from pip import util as pip_util # noqa
except ImportError:
from pip import utils as pip_util # noqa
def parse_line(line, path=None):
from_where = ''
if path:
from_where = " -> ".join(str(r.req) for r in path)
from_where = from_where.strip()
if not from_where:
from_where = "???"
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = pip_req.InstallRequirement.from_editable(line,
comes_from=from_where)
else:
req = pip_req.InstallRequirement.from_line(line,
comes_from=from_where)
return req
_MatchedRelease = collections.namedtuple('_MatchedRelease',
['string_version',
'parsed_version',
'origin_url',
'origin_filename',
'origin_size'])
def expand(requirements, options):
if not requirements:
return {}
print("Expanding all requirements dependencies (deeply) and"
" finding matching versions that will be installable into a"
" directed graph...")
print("Please wait...")
# Cache it in the scratch dir to avoid recomputing...
buf = six.StringIO()
for (pkg_name, pkg_req) in six.iteritems(requirements):
buf.write(pkg_req.req)
buf.write("\n")
graph_name = hashlib.md5(buf.getvalue().strip()).hexdigest()
graph_name += str(PackageFinder.MAX_VERSIONS)
graph_pickled_filename = os.path.join(
options.scratch, '.graphs', "%s.gpickle" % graph_name)
if os.path.exists(graph_pickled_filename):
print("Loading prior graph from '%s" % graph_pickled_filename)
return nx.read_gpickle(graph_pickled_filename)
else:
finder = PackageFinder(options)
detailer = EggDetailer(options)
graph = DiGraph(name=graph_name)
expander = DeepExpander(finder, detailer, options)
graph = expander.expand_many(list(six.itervalues(requirements)))
nx.write_gpickle(graph, graph_pickled_filename)
return graph
def tree_generator(root, graph, parent=None):
children = list(graph.successors_iter(root))
if parent is None:
parent = tree.Node(root, **graph.node[root])
for child in children:
node = tree.Node(child, **graph.node[child])
parent.add(node)
tree_generator(child, graph, parent=node)
return parent
def resolve(requirements, graph, options):
solutions = OrderedDict()
for pkg_name, pkg_req in six.iteritems(requirements):
LOG.debug("Generating the solution paths for '%s'", pkg_req)
node = tree_generator(pkg_req.req, graph)
solutions[pkg_name] = node
node_paths = []
for sub_node in node:
leaves = []
for n in sub_node.dfs_iter():
if not n.child_count():
leaves.append(n)
paths = []
for n in leaves:
path = []
for p_n in n.path_iter():
if _is_exact(p_n.item):
path.insert(0, p_n.item)
if p_n is sub_node:
break
paths.append(path)
if not paths:
if _is_exact(sub_node.item):
paths.append([sub_node.item])
else:
raise RuntimeError("No solution paths found for '%s'"
% sub_node.item)
LOG.debug("%s solution paths found for '%s' (solution"
" for '%s') found", len(paths), sub_node.item, pkg_req)
for i, path in enumerate(paths):
LOG.debug("Solution path %s:", i)
for p in path:
LOG.debug(" - %s" % p)
node_paths.append(paths)
return {}
def setup_logging(options):
if options.verbose:
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: @%(name)s : %(message)s',
stream=sys.stdout)
else:
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: @%(name)s : %(message)s',
stream=sys.stdout)
req_logger = logging.getLogger('requests')
req_logger.setLevel(logging.WARNING)
def main():
parser = create_parser()
options = parser.parse_args()
if not options.requirements:
parser.error("At least one requirement file must be provided")
setup_logging(options)
initial = parse_requirements(options)
for d in ['.download', '.versions', '.graphs']:
scratch_path = os.path.join(options.scratch, d)
if not os.path.isdir(scratch_path):
os.makedirs(scratch_path)
print("Initial package set:")
for r in sorted(list(six.itervalues(initial)), cmp=req_cmp):
print(" - %s" % r)
graph = expand(initial, options)
if options.verbose:
print(graph.pformat())
resolved = resolve(initial, graph, options)
print("Resolved package set:")
for r in sorted(list(six.itervalues(resolved)), cmp=req_cmp):
print(" - %s" % r)
if __name__ == "__main__":
main()
| 37.223958 | 79 | 0.54671 |
a2431b76a7fd7273de98b3d8241bb7216ee7d296 | 2,182 | py | Python | python/src/main/python/pygw/query/aggregation_query_builder.py | radiant-maxar/geowave | 2d9f39d32e4621c8f5965a4dffff0623c1c03231 | [
"Apache-2.0"
] | 280 | 2017-06-14T01:26:19.000Z | 2022-03-28T15:45:23.000Z | python/src/main/python/pygw/query/aggregation_query_builder.py | radiant-maxar/geowave | 2d9f39d32e4621c8f5965a4dffff0623c1c03231 | [
"Apache-2.0"
] | 458 | 2017-06-12T20:00:59.000Z | 2022-03-31T04:41:59.000Z | python/src/main/python/pygw/query/aggregation_query_builder.py | radiant-maxar/geowave | 2d9f39d32e4621c8f5965a4dffff0623c1c03231 | [
"Apache-2.0"
] | 135 | 2017-06-12T20:39:34.000Z | 2022-03-15T13:42:30.000Z | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from .base_query_builder import BaseQueryBuilder
from .aggregation_query import AggregationQuery
from ..base.type_conversions import StringArrayType
| 35.193548 | 120 | 0.651696 |
a243a526c6890fd80b3908d73d1ec8bf0226c2b2 | 6,059 | py | Python | tests/test_cells.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_cells.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_cells.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | from pyg_base import acell, cell, cell_func, dictattr, dt, getargspec, passthru, add_, get_cache
from pyg_base._cell import cell_output, cell_item, cell_inputs, _updated
import pytest
from pyg_base import *
| 29.70098 | 126 | 0.523519 |
a244d716297448851950a6f197be289befd9e237 | 4,379 | py | Python | uwsgi/unacc/poc.py | nobgr/vulhub | b24a89459fbd98ba76881adb6d4e2fb376792863 | [
"MIT"
] | 9,681 | 2017-09-16T12:31:59.000Z | 2022-03-31T23:49:31.000Z | uwsgi/unacc/poc.py | dingafter/vulhub | 67547c4ca153980004ccaeab94f77bcc9952d764 | [
"MIT"
] | 180 | 2017-11-01T08:05:07.000Z | 2022-03-31T05:26:33.000Z | uwsgi/unacc/poc.py | dingafter/vulhub | 67547c4ca153980004ccaeab94f77bcc9952d764 | [
"MIT"
] | 3,399 | 2017-09-16T12:21:54.000Z | 2022-03-31T12:28:48.000Z | #!/usr/bin/python
# coding: utf-8
######################
# Uwsgi RCE Exploit
######################
# Author: wofeiwo@80sec.com
# Created: 2017-7-18
# Last modified: 2018-1-30
# Note: Just for research purpose
import sys
import socket
import argparse
import requests
if __name__ == '__main__':
main() | 30.2 | 106 | 0.570222 |
a2453fb1d06de4864cf98c020579a6af505d8bfa | 4,169 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/dark_lang/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
Views file for the Darklang Django App
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.utils.translation import ugettext as _
from web_fragments.fragment import Fragment
from openedx.core.djangoapps.dark_lang import DARK_LANGUAGE_KEY
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
LANGUAGE_INPUT_FIELD = 'preview_language'
def _set_preview_language(self, request):
"""
Sets the preview language for the current user.
"""
preview_language = request.POST.get(LANGUAGE_INPUT_FIELD, '')
if not preview_language.strip():
PageLevelMessages.register_error_message(request, _('Language not provided'))
return
set_user_preference(request.user, DARK_LANGUAGE_KEY, preview_language)
PageLevelMessages.register_success_message(
request,
_('Language set to {preview_language}').format(
preview_language=preview_language
)
)
def _clear_preview_language(self, request):
"""
Clears the preview language for the current user.
"""
delete_user_preference(request.user, DARK_LANGUAGE_KEY)
if LANGUAGE_SESSION_KEY in request.session:
del request.session[LANGUAGE_SESSION_KEY]
PageLevelMessages.register_success_message(
request,
_('Language reset to the default')
)
| 36.893805 | 135 | 0.688894 |
a2455b7d1f4c59b3f3fc10bc30bcb0f313e3156b | 13,480 | py | Python | pipenv/vendor/vistir/spin.py | erikkemperman/pipenv | 8707fe52571422ff5aa2905a2063fdf5ce14840b | [
"MIT"
] | 3 | 2020-06-04T05:22:33.000Z | 2020-09-23T19:44:02.000Z | pipenv/vendor/vistir/spin.py | erikkemperman/pipenv | 8707fe52571422ff5aa2905a2063fdf5ce14840b | [
"MIT"
] | 9 | 2019-12-05T00:49:12.000Z | 2021-09-08T01:31:25.000Z | pipenv/vendor/vistir/spin.py | erikkemperman/pipenv | 8707fe52571422ff5aa2905a2063fdf5ce14840b | [
"MIT"
] | 1 | 2019-06-04T10:25:26.000Z | 2019-06-04T10:25:26.000Z | # -*- coding=utf-8 -*-
import functools
import os
import signal
import sys
import threading
import time
import colorama
import cursor
import six
from .compat import to_native_string
from .termcolors import COLOR_MAP, COLORS, colored, DISABLE_COLORS
from io import StringIO
try:
import yaspin
except ImportError:
yaspin = None
Spinners = None
else:
from yaspin.spinners import Spinners
handler = None
if yaspin and os.name == "nt":
handler = yaspin.signal_handlers.default_handler
elif yaspin and os.name != "nt":
handler = yaspin.signal_handlers.fancy_handler
CLEAR_LINE = chr(27) + "[K"
base_obj = yaspin.core.Yaspin if yaspin is not None else DummySpinner
| 33.120393 | 105 | 0.602819 |
a24661a46dbbfae17cce472d5d44c7bd7360c84c | 621 | py | Python | book/book/settings.py | ChaosSoong/ScrapyDouban | e6a018a09e76f5f5506934e90b104091dfffe693 | [
"MIT"
] | 1 | 2021-04-12T13:37:48.000Z | 2021-04-12T13:37:48.000Z | book/book/settings.py | ChaosSoong/ScrapyDouban | e6a018a09e76f5f5506934e90b104091dfffe693 | [
"MIT"
] | null | null | null | book/book/settings.py | ChaosSoong/ScrapyDouban | e6a018a09e76f5f5506934e90b104091dfffe693 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
BOT_NAME = 'book'
SPIDER_MODULES = ['book.spiders']
NEWSPIDER_MODULE = 'book.spiders'
IMAGES_STORE = '../storage/book/'
COOKIES_ENABLED = True
COOKIE_DEBUG = True
LOG_LEVEL = 'INFO'
# LOG_LEVEL = 'DEBUG'
CONCURRENT_REQUESTS = 100
CONCURRENT_REQUESTS_PER_DOMAIN = 1000
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, \
like Gecko) Chrome/49.0.2623.87 Safari/537.36"
DEFAULT_REQUEST_HEADERS = {
'Referer': 'https://m.douban.com/book/'
}
ITEM_PIPELINES = {
'book.pipelines.CoverPipeline': 0,
'book.pipelines.BookPipeline': 1,
}
| 20.7 | 79 | 0.705314 |
a246d1c2c2b92da01d8058201ebb138463ac4efe | 105 | py | Python | tests/pyxl_original/test_eof.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 27 | 2018-06-04T19:11:42.000Z | 2022-02-23T22:46:39.000Z | tests/pyxl_original/test_eof.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 7 | 2018-06-09T15:27:51.000Z | 2021-03-11T20:00:35.000Z | tests/pyxl_original/test_eof.py | adrienbrunet/mixt | d725ec752ce430d135e993bc988bfdf2b8457c4b | [
"MIT"
] | 3 | 2018-07-29T10:20:02.000Z | 2021-11-18T19:55:07.000Z | # coding: mixt
from mixt import html
| 15 | 53 | 0.571429 |
a247922adf11769c636098f78e98f1b9b8df3ed1 | 6,325 | py | Python | text_analysis/analysis_classify/a01_basic_statistics.py | yongzhuo/Text-Analysis | 6f9f79fdb1e6ea1c5559b59558cee641940f85d2 | [
"Apache-2.0"
] | 3 | 2021-11-19T07:02:53.000Z | 2021-12-15T03:15:15.000Z | text_analysis/analysis_classify/a01_basic_statistics.py | yongzhuo/Text-Analysis | 6f9f79fdb1e6ea1c5559b59558cee641940f85d2 | [
"Apache-2.0"
] | null | null | null | text_analysis/analysis_classify/a01_basic_statistics.py | yongzhuo/Text-Analysis | 6f9f79fdb1e6ea1c5559b59558cee641940f85d2 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/5/27 21:18
# @author : Mo
# @function:
from text_analysis.utils.text_common import txt_read, txt_write, load_json, save_json, get_all_dirs_files
from text_analysis.conf.path_log import logger
from collections import Counter
from typing import List, Dict
import json
import os
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
from pylab import mpl
def counter_length_label(path_file, dir_save, show: str="bar"):
"""
-
:param path_file: str
:param path_save: str
:return:
"""
files = get_all_dirs_files(path_file)
files = [file for file in files if file.endswith(".json")]
tc_data_dev = []
for f in files:
tc_data_dev += txt_read(f)
#
lengths_question = []
label_total = []
for tdd in tc_data_dev:
tdd_json = json.loads(tdd)
question = tdd_json.get("text", "")
label = tdd_json.get("label")
lengths_question.append(len(question))
if type(label) == list:
label_total += label
else:
label_total.append(label)
#
lengths_dict = dict(Counter(lengths_question))
label_dict = dict(Counter(label_total))
#
lengths_dict_sort = sorted(lengths_dict.items(), key=lambda x: x[0], reverse=False)
label_dict_sort = sorted(label_dict.items(), key=lambda x: x[1], reverse=True)
logger.info("length of text is {}".format(lengths_dict_sort))
logger.info("freq of label is {}".format(label_dict_sort))
#
lengths_question.sort()
len_ques = len(lengths_question)
len_99 = lengths_question[int(0.99 * len_ques)]
len_98 = lengths_question[int(0.98 * len_ques)]
len_95 = lengths_question[int(0.95 * len_ques)]
len_90 = lengths_question[int(0.90 * len_ques)]
logger.info("99% length of text is {}".format(len_99))
logger.info("98% length of text is {}".format(len_98))
logger.info("95% length of text is {}".format(len_95))
logger.info("90% length of text is {}".format(len_90))
length_dict = {"len_99": len_99,
"len_98": len_98,
"len_95": len_95,
"len_90": len_90
}
# length/
save_json(length_dict, os.path.join(dir_save, "length.json"))
# length/
draw_picture(lengths_dict_sort, os.path.join(dir_save, "length.png"), show="plot")
# label/
draw_picture(label_dict_sort, os.path.join(dir_save, "label.png"), show)
# length/
draw_box([lengths_question], os.path.join(dir_save, "{}_boxplot.png".format("length")))
def show_chinese(xs: List, ys: List, file: str=None, show: str="bar"):
"""
,
:param xs: list
:param ys: list
:param dir: str
:return: draw picture
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"]
xis = [i for i in range(len(xs))]
if len(ys) >= 32:
plt.xscale('symlog')
plt.yscale('symlog')
plt.subplots_adjust(bottom=0.2)
# plt.figure(dpi=64)
# elif len(ys) >= 128:
# plt.xscale('log')
# plt.yscale('log')
# plt.yticks(xis, ys, size='small', fontsize=13)
if show=="plot": #
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(64))
# plt.figure(dpi=256)
# from matplotlib.font_manager import FontProperties
# font = FontProperties(fname="C:\Windows\Fonts\simkai.ttf", size=16)
# fontproperites = font
# fontdict={"fontname":"C:\Windows\Fonts\simkai.ttf"}
# plt.xlabel(xs, fontproperites = font)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.plot(xis, xs, 'o-', label=u"") #
elif show=="pie": #
# plt.figure(dpi=256)
plt.xticks(xis, xs, size='small', rotation=64, fontsize=13)
plt.pie(xs, labels=ys, autopct='%1.1f%%', shadow=False, startangle=150)
else: #
#
# fig, ax = plt.subplots(1, 1)
# ax.xaxis.set_major_locator(ticker.MultipleLocator(max(int(len(xs)/16), 128)))
# plt.figure(dpi=128)
# plt.figure(dpi=256)
plt.xticks(xis, ys, size='small', rotation=64, fontsize=13)
plt.bar(xis, xs, 0.8)
# plt.figure(figsize=(min(512, len(xs)), min(256, int(len(xs)/2))), dpi=32)
# plt.figure(dpi=128)
# plt.yticks(xis, ys, size='small', fontsize=13)
# plt.barh(xis, xs, 0.8)
if file: # , saveplt
plt.savefig(file)
else: #
plt.savefig("fig.png")
# plt.show()
plt.close()
def draw_picture(xy_list_tuple, path, show: str="bar"):
"""
-(-)
:param xy_list_tuple: List[tuple]
:param path: str
:return:
"""
length_x = []
length_y = []
for k, v in xy_list_tuple:
length_x.append(k)
length_y.append(v)
show_chinese(length_y, length_x, path, show)
def draw_box(boxs: List, file: str=None):
"""
boxplot()
:param boxs: list
:param file: str
:return:
"""
mpl.rcParams["font.sans-serif"] = ["SimHei"] #
plt.figure(figsize=(10, 5)) #
plt.title("boxplot-length", fontsize=20) #
# notchsym
plt.boxplot(boxs, notch=True, sym="*", vert=False, showmeans=True, patch_artist=True)
# boxprops={'color':'orangered', 'facecolor':'gray'}) #
if file: # , saveplt
plt.savefig(file)
else: #
plt.savefig("boxplot.png")
# plt.show() #
plt.close()
if __name__ == '__main__':
path_in_dir = "../data/corpus/classify"
path_save_dir = "../data/corpus/classify/"
if path_save_dir is None:
path_save_dir = os.path.join(os.path.dirname(path_in_dir), "")
if path_save_dir:
if not os.path.exists(path_save_dir):
os.mkdir(path_save_dir)
counter_length_label(path_in_dir, path_save_dir, show="bar")
# show_x = [i for i in range(32)]
# show_y = [str("") for i in range(32)]
# show_chinese(show_x, show_y, file="xy1.png")
# show_chinese(show_x, show_y, file="xy2.png", show="pie")
# show_chinese(show_x, show_y, file="xy3.png", show="plot")
| 33.115183 | 105 | 0.61502 |
a2480500111770e0985c6d623537477de897c591 | 1,689 | py | Python | components/workstation.py | cqzhao/FooProxy | 5953bcd46388135e0c951ffbcd63dc782ff8bfad | [
"MIT"
] | null | null | null | components/workstation.py | cqzhao/FooProxy | 5953bcd46388135e0c951ffbcd63dc782ff8bfad | [
"MIT"
] | null | null | null | components/workstation.py | cqzhao/FooProxy | 5953bcd46388135e0c951ffbcd63dc782ff8bfad | [
"MIT"
] | null | null | null | #coding:utf-8
"""
@author : linkin
@email : yooleak@outlook.com
@date : 2018-10-04
"""
import logging
from APIserver.apiserver import app
from components.collector import Collector
from components.validator import Validator
from components.detector import Detector
from components.scanner import Scaner
from components.tentacle import Tentacle
from multiprocessing import Pool
from multiprocessing import Manager
from config.config import MODE
from const.settings import RUN_FUNC
logger = logging.getLogger()
| 24.838235 | 70 | 0.625222 |
a2482ec97e97d9e65a4d8d49711236d2566859ca | 30,410 | py | Python | ml/rbms/core.py | torfjelde/ml | 6ae3a5543663a7adfe3b6f1c596093c123fa2b88 | [
"MIT"
] | null | null | null | ml/rbms/core.py | torfjelde/ml | 6ae3a5543663a7adfe3b6f1c596093c123fa2b88 | [
"MIT"
] | null | null | null | ml/rbms/core.py | torfjelde/ml | 6ae3a5543663a7adfe3b6f1c596093c123fa2b88 | [
"MIT"
] | null | null | null | import abc
import logging
from enum import Enum
from tqdm import tqdm
from ml import np
from ml.functions import sigmoid, dot_batch, bernoulli_from_probas
_log = logging.getLogger("ml")
def mean_visible(self, h, beta=1.0):
r"""
Computes :math:`\mathbb{E}[\mathbf{v} \mid \mathbf{h}]`.
It can be shown that this expectation equals: [1]_
- Bernoulli:
.. math::
:nowrap:
\begin{equation}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] =
p \big( V_{i} = 1 \mid \mathbf{h} \big) = \text{sigmoid}
\Bigg( \beta \bigg( b_{i} + \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}} \bigg) \Bigg)
\end{equation}
- Gaussian:
.. math::
:nowrap:
\begin{equation*}
\mathbb{E}[\mathbf{v} \mid \mathbf{h}] = b_i + \sigma_i \sum_{\mu=1}^{|\mathcal{H}|} W_{i \mu} \frac{h_{\mu}}{\sigma_{\mu}}
\end{equation*}
where :math:`\sigma_{\mu} = 1` if :math:`H_\mu` is a Bernoulli random variable.
Notes
-----
Observe that the expectation when using Gaussian units is
independent of :math:`\beta`. To see the effect :math:`\beta` has
on the Gaussian case, see :func:`RBM.proba_visible`.
References
----------
.. [1] Fjelde, T. E., Restricted Boltzmann Machines, , (), (2018).
"""
mean = self.v_bias + (self.v_sigma *
np.matmul(h / self.h_sigma, self.W.T))
if self.visible_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.visible_type == UnitType.GAUSSIAN:
return mean
def mean_hidden(self, v, beta=1.0):
"Computes conditional expectation E[h | v]."
mean = self.h_bias + self.h_sigma * np.matmul(v / self.v_sigma, self.W)
if self.hidden_type == UnitType.BERNOULLI:
return sigmoid(mean * beta)
elif self.hidden_type == UnitType.GAUSSIAN:
return mean
def contrastive_divergence(self, v_0,
k=1,
h_0=None,
burnin=-1,
beta=1.0):
"""Contrastive Divergence.
Parameters
----------
v_0: array-like
Visible state to initialize the chain from.
k: int
Number of steps to use in CD-k.
h_0: array-like, optional
Visible states to initialize the chain.
If not specified, will sample conditioned on visisble.
Returns
-------
h_0, h, v_0, v: arrays
``h_0`` and ``v_0`` are the initial states for the hidden and
visible units, respectively.
``h`` and ``v`` are the final states for the hidden and
visible units, respectively.
"""
if h_0 is None:
h_0 = self.sample_hidden(v_0, beta=beta)
v = v_0
h = h_0
for t in range(k):
v = self.sample_visible(h, beta=beta)
h = self.sample_hidden(v, beta=beta)
return v_0, h_0, v, h
def step(self, v, k=1, lr=0.1, lmbda=0.0, **sampler_kwargs):
"Performs a single gradient DEscent step on the batch `v`."
# compute gradient for each observed visible configuration
grad = self.grad(v, k=k, **sampler_kwargs)
# update parameters
self._update(grad, lr=lr)
# possibly apply weight-decay
if lmbda > 0.0:
self._apply_weight_decay(lmbda=lmbda)
def fit(self, train_data,
k=1,
learning_rate=0.01,
num_epochs=5,
batch_size=64,
test_data=None,
show_progress=True,
weight_decay=0.0,
early_stopping=-1,
callbacks={},
**sampler_kwargs):
"""
Parameters
----------
train_data: array-like
Data to fit RBM on.
k: int, default=1
Number of sampling steps to perform. Used by CD-k, PCD-k and PT.
learning_rate: float or array, default=0.01
Learning rate used when updating the parameters.
Can also be array of same length as `self.variables`, in
which case the learning rate at index `i` will be used to
to update ``RBM.variables[i]``.
num_epochs: int, default=5
Number of epochs to train.
batch_size: int, default=64
Batch size to within the epochs.
test_data: array-like, default=None
Data similar to ``train_data``, but this will only be used as
validation data, not trained on.
If specified, will compute and print the free energy / negative
log-likelihood on this dataset after each epoch.
show_progress: bool, default=True
If true, will display progress bar for each epoch.
weight_decay: float, default=0.0
If greater than 0.0, weight decay will be applied to the
parameter updates. See :func:`RBM.step` for more information.
early_stopping: int, default=-1
If ``test_data`` is given and ``early_stopping > 0``, training
will terminate after epoch if the free energy of the
``test_data`` did not improve over the fast ``early_stopping``
epochs.
Returns
-------
nlls_train, nlls_test : array-like, array-like
Returns the free energy of both ``train_data`` and ``test_data``
as computed at each epoch.
"""
num_samples = train_data.shape[0]
indices = np.arange(num_samples)
np.random.shuffle(indices)
nlls_train = []
nlls = []
prev_best = None
for epoch in range(1, num_epochs + 1):
if "pre_epoch" in callbacks:
for c in callbacks["pre_epoch"]:
c(self, epoch)
# reset sampler at beginning of epoch
# Used by methods such as PCD to reset the
# initialization value.
self.reset_sampler()
# compute train & test negative log-likelihood
# TODO: compute train- and test-nll in mini-batches
# to avoid numerical problems
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train):"
f" {nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test):"
f" {nll:>20.5f}")
nlls.append(nll)
# stop early if all `early_stopping` previous
# evaluations on `test_data` did not improve.
if early_stopping > 0:
if epoch > early_stopping and \
np.all([a >= prev_best for a in nlls[epoch - early_stopping:]]):
_log.info("Hasn't improved in {early_stopping} epochs; stopping early")
break
else:
# update `prev_best`
if prev_best is None:
prev_best = nll
elif nll < prev_best:
prev_best = nll
# iterate through dataset in batches
if show_progress:
bar = tqdm(total=num_samples)
for start in range(0, num_samples, batch_size):
# ensure we don't go out-of-bounds
end = min(start + batch_size, num_samples)
# take a gradient-step
self.step(train_data[start: end],
k=k,
lr=learning_rate,
lmbda=weight_decay,
**sampler_kwargs)
if "post_step" in callbacks:
for c in callbacks["post_step"]:
c(self, epoch, end)
# update progress
if show_progress:
bar.update(end - start)
if show_progress:
bar.close()
# shuffle indices for next epoch
np.random.shuffle(indices)
if "post_epoch" in callbacks:
for c in callbacks["post_epoch"]:
c(self, epoch)
# compute train & test negative log-likelihood of final batch
nll_train = float(np.mean(self.free_energy(train_data)))
nlls_train.append(nll_train)
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (train): "
f"{nll_train:>20.5f}")
if test_data is not None:
nll = float(np.mean(self.free_energy(test_data)))
_log.info(f"[{epoch:03d} / {num_epochs:03d}] NLL (test): "
f"{nll:>20.5f}")
nlls.append(nll)
return nlls_train, nlls
| 36.638554 | 135 | 0.529037 |
a248fa91871a4d64d360baf9357e2574f6ec13d4 | 218 | py | Python | Ports.py | bullgom/pysnn2 | dad5ae26b029afd5c5bf76fe141249b0f7b7a36c | [
"MIT"
] | null | null | null | Ports.py | bullgom/pysnn2 | dad5ae26b029afd5c5bf76fe141249b0f7b7a36c | [
"MIT"
] | null | null | null | Ports.py | bullgom/pysnn2 | dad5ae26b029afd5c5bf76fe141249b0f7b7a36c | [
"MIT"
] | null | null | null | AP = "AP"
BP = "BP"
ARRIVE = "ARRIVE"
NEUROMODULATORS = "NEUROMODULATORS"
TARGET = "TARGET"
OBSERVE = "OBSERVE"
SET_FREQUENCY = "SET_FREQUENCY"
DEACTIVATE = "DEACTIVATE"
ENCODE_INFORMATION = "ENCODE_INFORMATION"
| 13.625 | 41 | 0.724771 |
a2490cedb898fffcdd522f5198f098b39d8227c4 | 2,798 | py | Python | src/oolongt/cli/cli.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | 2 | 2020-02-18T09:13:13.000Z | 2021-06-12T13:16:13.000Z | src/oolongt/cli/cli.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | null | null | null | src/oolongt/cli/cli.py | schmamps/textteaser | e948ac6c0a4a4a44c7011206d7df236529d7813d | [
"MIT"
] | 1 | 2019-05-05T14:43:53.000Z | 2019-05-05T14:43:53.000Z | """Command line interface for OolongT"""
import argparse
import os
import sys
import typing
from textwrap import wrap as wrap_text
from ..constants import DEFAULT_LENGTH
from ..content import Document
from ..files import get_document
from ..string import simplify
from ..typings import OptionalString, StringList
DEFAULT_WRAP = 70
def get_args():
"""Parse command line arguments if invoked directly
Returns:
object -- .img_dir: output directory, .details: get document details
"""
desc = 'A Python-based utility to summarize content.'
limit_help = 'length of summary ({}, {}, [default: {}])'.format(
'< 1: pct. of sentences', '>= 1: total sentences', DEFAULT_LENGTH)
ext_help = 'nominal extension of file [default: {}]'.format(
'txt if local, html if remote')
wrap_help = 'wrap at column number [default: {}]'.format(
DEFAULT_WRAP)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'path', help='path/URL to file')
parser.add_argument(
'-e', '--ext', help=ext_help, default=None)
parser.add_argument(
'-w', '--wrap', help=wrap_help, default=DEFAULT_WRAP)
parser.add_argument(
'-l', '--limit', help=limit_help, default=DEFAULT_LENGTH)
args = parser.parse_args()
if not args.path.startswith('http') and not os.path.exists(args.path):
sys.stderr.write('File {!r} does not exist.'.format(args.path))
sys.exit(1)
return args
def get_summary(doc: Document, limit: float, wrap: int) -> StringList:
"""Get summary of `doc` as StringList of lines
Arguments:
doc {Document} -- document
limit {float} -- length of summary
wrap {int} -- column wrap
Returns:
StringList -- lines of document
"""
sentences = doc.summarize(limit)
text = ' '.join(sentences)
return [text] if wrap < 1 else wrap_text(text, width=wrap)
def cli():
"""Collect arguments, pass for summary, output to console"""
args = get_args()
limit = float(args.limit)
wrap = int(args.wrap)
for line in get_output_lines(args.path, args.ext, limit, wrap):
print(line)
| 27.98 | 76 | 0.641172 |
a249698e484130d9327ab696efff125ba53413ba | 15,123 | py | Python | chotgun.py | hmatsuya/chotgun | 0cee1b4ae385c57cf094376dee0ad450e308aa0a | [
"MIT"
] | 1 | 2021-11-04T14:26:10.000Z | 2021-11-04T14:26:10.000Z | chotgun.py | hmatsuya/chotgun | 0cee1b4ae385c57cf094376dee0ad450e308aa0a | [
"MIT"
] | 1 | 2020-08-07T06:58:09.000Z | 2020-08-13T06:23:20.000Z | chotgun.py | hmatsuya/chotgun | 0cee1b4ae385c57cf094376dee0ad450e308aa0a | [
"MIT"
] | null | null | null | import sys
import os.path
import threading
import queue
import logging
import random
import copy
from paramiko.client import SSHClient
import paramiko
import re
import time
import os
def infostr(s):
print(f'info string {s}', flush=True)
def main():
chotgun = Chotgun(n_jobs=5)
chotgun.start()
sys.exit()
if __name__ == "__main__":
main()
sys.exit()
| 35.251748 | 101 | 0.511803 |
a2497a32646aebe6dad4bb729f7554cf9a01a99e | 9,051 | py | Python | source/base/utils.py | phygitalism/points2surf | c8e6d47062fc068802e179a37427981c8e10b128 | [
"MIT"
] | 4 | 2021-11-25T19:28:16.000Z | 2022-02-27T19:13:59.000Z | source/base/utils.py | phygitalism/points2surf | c8e6d47062fc068802e179a37427981c8e10b128 | [
"MIT"
] | null | null | null | source/base/utils.py | phygitalism/points2surf | c8e6d47062fc068802e179a37427981c8e10b128 | [
"MIT"
] | 1 | 2020-09-10T01:05:03.000Z | 2020-09-10T01:05:03.000Z | import numpy as np
import os
from source.base import utils_mp
from source.base import file_utils
def batch_quat_to_rotmat(q, out=None):
"""
quaternion a + bi + cj + dk should be given in the form [a,b,c,d]
:param q:
:param out:
:return:
"""
import torch
batchsize = q.size(0)
if out is None:
out = q.new_empty(batchsize, 3, 3)
# 2 / squared quaternion 2-norm
s = 2 / torch.sum(q.pow(2), 1)
# coefficients of the Hamilton product of the quaternion with itself
h = torch.bmm(q.unsqueeze(2), q.unsqueeze(1))
out[:, 0, 0] = 1 - (h[:, 2, 2] + h[:, 3, 3]).mul(s)
out[:, 0, 1] = (h[:, 1, 2] - h[:, 3, 0]).mul(s)
out[:, 0, 2] = (h[:, 1, 3] + h[:, 2, 0]).mul(s)
out[:, 1, 0] = (h[:, 1, 2] + h[:, 3, 0]).mul(s)
out[:, 1, 1] = 1 - (h[:, 1, 1] + h[:, 3, 3]).mul(s)
out[:, 1, 2] = (h[:, 2, 3] - h[:, 1, 0]).mul(s)
out[:, 2, 0] = (h[:, 1, 3] - h[:, 2, 0]).mul(s)
out[:, 2, 1] = (h[:, 2, 3] + h[:, 1, 0]).mul(s)
out[:, 2, 2] = 1 - (h[:, 1, 1] + h[:, 2, 2]).mul(s)
return out
| 40.226667 | 108 | 0.667772 |
a24a44290243b8973c58ac83bd9c32d62a1b7331 | 194 | py | Python | contact/views.py | rsHalford/xhalford-django | 970875bbcd23782af15f24361ec3bbda0230ee81 | [
"MIT"
] | 2 | 2020-11-02T22:04:01.000Z | 2020-11-14T14:45:45.000Z | contact/views.py | rsHalford/xhalford-django | 970875bbcd23782af15f24361ec3bbda0230ee81 | [
"MIT"
] | null | null | null | contact/views.py | rsHalford/xhalford-django | 970875bbcd23782af15f24361ec3bbda0230ee81 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import ListView
from contact.models import Profile
| 24.25 | 41 | 0.78866 |
a24b77db8e7a819628a9ae74f4884a124de6d7df | 24,382 | py | Python | xbbo/surrogate/gaussian_process.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | 2 | 2021-09-06T02:06:22.000Z | 2021-12-09T10:46:56.000Z | xbbo/surrogate/gaussian_process.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | null | null | null | xbbo/surrogate/gaussian_process.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | null | null | null | from typing import List
import typing
from scipy import optimize
import sklearn
# from sklearn.gaussian_process import kernels
from sklearn.gaussian_process.kernels import Kernel, KernelOperator
# import torch
# from scipy.linalg import solve_triangular, cholesky
# from scipy import optimize, stats
import numpy as np
# import GPy
from sklearn import gaussian_process
# from botorch.acquisition import ExpectedImprovement
from xbbo.surrogate.base import Surrogate, BaseGP
from xbbo.surrogate.gp_kernels import HammingKernel, Matern, ConstantKernel, WhiteKernel
from xbbo.surrogate.gp_prior import HorseshoePrior, LognormalPrior, Prior, SoftTopHatPrior, TophatPrior
from xbbo.utils.util import get_types
VERY_SMALL_NUMBER = 1e-10
| 36.014771 | 103 | 0.56029 |
a24baed065a08f05a3618b4b5c209c85239d1882 | 10,112 | py | Python | lib/training/tpu.py | learning-at-home/dalle | acf688eac206a6bcd543d56ddbb9dcf6bb72012b | [
"MIT"
] | null | null | null | lib/training/tpu.py | learning-at-home/dalle | acf688eac206a6bcd543d56ddbb9dcf6bb72012b | [
"MIT"
] | null | null | null | lib/training/tpu.py | learning-at-home/dalle | acf688eac206a6bcd543d56ddbb9dcf6bb72012b | [
"MIT"
] | null | null | null | import ctypes
import threading
from functools import partial
from contextlib import nullcontext
from copy import deepcopy
import multiprocessing as mp
from itertools import zip_longest
from typing import Iterable
import torch
import torch.nn as nn
import torch.utils.data
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.distributed.parallel_loader as pl
from hivemind.utils.logging import get_logger
logger = get_logger(__name__)
| 43.586207 | 128 | 0.65714 |
a24d8145f2c40687cee72c78a8cd67399721ce08 | 1,819 | py | Python | code/evaluate.py | xuyangcao/SegWithDistMap | 9638aaacf15dba6c2f907e5e82f8ed37a786bc96 | [
"Apache-2.0"
] | 3 | 2021-01-29T16:03:39.000Z | 2021-12-16T04:40:28.000Z | code/evaluate.py | xuyangcao/SegWithDistMap | 9638aaacf15dba6c2f907e5e82f8ed37a786bc96 | [
"Apache-2.0"
] | null | null | null | code/evaluate.py | xuyangcao/SegWithDistMap | 9638aaacf15dba6c2f907e5e82f8ed37a786bc96 | [
"Apache-2.0"
] | 2 | 2019-12-20T13:15:08.000Z | 2020-01-02T15:49:16.000Z | import numpy as np
import os
import argparse
import tqdm
import pandas as pd
import SimpleITK as sitk
from medpy import metric
if __name__ == '__main__':
main()
| 29.33871 | 91 | 0.630566 |
a2513b451ec5004528a7e01bf0d9f3485e85254c | 64 | py | Python | integraph/core/__init__.py | nleguillarme/inteGraph | 65faae4b7c16977094c387f6359980a4e99f94cb | [
"Apache-2.0"
] | null | null | null | integraph/core/__init__.py | nleguillarme/inteGraph | 65faae4b7c16977094c387f6359980a4e99f94cb | [
"Apache-2.0"
] | null | null | null | integraph/core/__init__.py | nleguillarme/inteGraph | 65faae4b7c16977094c387f6359980a4e99f94cb | [
"Apache-2.0"
] | null | null | null | from .taxid import TaxId
from .uri import URIManager, URIMapper
| 21.333333 | 38 | 0.8125 |
a253f668fac9338a8b6bc1ab3d03ebaeb0518c82 | 4,170 | py | Python | unit_tests/test_swift_storage_context.py | coreycb/charm-swift-storage | c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | unit_tests/test_swift_storage_context.py | coreycb/charm-swift-storage | c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | unit_tests/test_swift_storage_context.py | coreycb/charm-swift-storage | c31991ab198d7b51b9a4f5744a1fcc1fef0bc1ef | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock
from test_utils import CharmTestCase, patch_open
import lib.swift_storage_context as swift_context
TO_PATCH = [
'config',
'log',
'related_units',
'relation_get',
'relation_ids',
'unit_private_ip',
'get_ipv6_addr',
]
| 38.971963 | 76 | 0.664508 |
a2567fe63fe79e43c35228a0d120b319e330a8d1 | 5,956 | py | Python | spiketoolkit/validation/quality_metric_classes/noise_overlap.py | ferchaure/spiketoolkit | 0b1deea724f742797181bb4fe57270fdd84951c1 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/noise_overlap.py | ferchaure/spiketoolkit | 0b1deea724f742797181bb4fe57270fdd84951c1 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/noise_overlap.py | ferchaure/spiketoolkit | 0b1deea724f742797181bb4fe57270fdd84951c1 | [
"MIT"
] | null | null | null | import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
| 45.121212 | 121 | 0.631632 |
a256bf58e2a1c3f65c6795ace24758ddfe629807 | 1,397 | py | Python | lib/spider/NewsSpider1.py | ardegra/standard.api | 36856acf3820cfc33def26f9737d6a682fba94ee | [
"MIT"
] | null | null | null | lib/spider/NewsSpider1.py | ardegra/standard.api | 36856acf3820cfc33def26f9737d6a682fba94ee | [
"MIT"
] | null | null | null | lib/spider/NewsSpider1.py | ardegra/standard.api | 36856acf3820cfc33def26f9737d6a682fba94ee | [
"MIT"
] | null | null | null | import json
import pymongo
import falcon
from bson import json_util | 32.488372 | 68 | 0.680029 |
a2575cc36e877edd1ee71f8adfedc976cf489a26 | 4,152 | py | Python | core/global_registration.py | MichaelArbel/OT-sync | 0b8308375b0064a9ada3f8741f04551a3ba29b63 | [
"BSD-3-Clause"
] | 2 | 2021-04-04T22:49:06.000Z | 2021-08-09T12:19:30.000Z | core/global_registration.py | hrheydarian/OT-sync | 0b8308375b0064a9ada3f8741f04551a3ba29b63 | [
"BSD-3-Clause"
] | null | null | null | core/global_registration.py | hrheydarian/OT-sync | 0b8308375b0064a9ada3f8741f04551a3ba29b63 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T12:19:03.000Z | 2021-08-09T12:19:03.000Z | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Advanced/global_registration.py
import open3d as o3d
import numpy as np
import copy
if __name__ == "__main__":
voxel_size = 0.05 # means 5cm for the dataset
source, target, source_down, target_down, source_fpfh, target_fpfh = \
prepare_dataset(voxel_size)
result_ransac = execute_global_registration(source_down, target_down,
source_fpfh, target_fpfh,
voxel_size)
print(result_ransac)
draw_registration_result(source_down, target_down,
result_ransac.transformation)
result_icp = refine_registration(source, target, source_fpfh, target_fpfh,
voxel_size)
print(result_icp)
draw_registration_result(source, target, result_icp.transformation) | 44.170213 | 80 | 0.701108 |
a257f947f9d83091dd668f62bb9fa0c75a8eafcd | 2,698 | py | Python | src/get_test_results.py | williamdjones/deep_protein_binding | 10b00835024702b6d0e73092c777fed267215ca7 | [
"MIT"
] | null | null | null | src/get_test_results.py | williamdjones/deep_protein_binding | 10b00835024702b6d0e73092c777fed267215ca7 | [
"MIT"
] | null | null | null | src/get_test_results.py | williamdjones/deep_protein_binding | 10b00835024702b6d0e73092c777fed267215ca7 | [
"MIT"
] | null | null | null | import os
import argparse
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, r2_score
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", type=str, help="path to directory containing test results",
default="/scratch/wdjo224/deep_protein_binding/experiments")
parser.add_argument("--exp_name", type=str, help="name of the experiment to collect results", default="binding_debug")
parser.add_argument("--exp_type", type=str, help="indicate regression (reg) or classification (class)",
default="class")
parser.add_argument("--exp_epoch", type=int, help="which epoch to get results for", default=4)
args = parser.parse_args()
test_dict = {"path": [], "score": []}
test_list = []
print("reading test results...")
for root, dirs, files in tqdm(os.walk(args.exp_dir), total=len(os.listdir(args.exp_dir))):
if "test_results" in root and args.exp_name in root and "epoch{}".format(args.exp_epoch) in root:
process = root.split("/")[-1].split("_")[0]
test_df = pd.DataFrame({"idx": [], "pred": [], "true": [], "loss": []})
for file in os.listdir(root):
test_df = pd.concat([test_df, pd.read_csv(root + "/" + file, index_col=0)])
score = None
if args.exp_type == "class":
y_true = test_df.true.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
y_pred = test_df.pred.apply(lambda x: np.argmax(np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32)))
score = f1_score(y_pred=y_pred, y_true=y_true)
elif args.exp_type == "reg":
y_true = test_df.true.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
y_pred = test_df.pred.apply(lambda x: np.fromstring(x.strip("[ ]"), sep=" ", dtype=np.float32))
score = r2_score(y_pred=y_pred, y_true=y_true)
else:
raise Exception("not a valid output type")
test_list.append({"path": root, "score": score, "process": process})
print("finished reading. finding best result")
best_score = -9999999
best_idx = 0
for idx, test in tqdm(enumerate(test_list)):
if test["score"] > best_score:
best_score = test["score"]
best_idx = idx
best_test = test_list[best_idx]
print("best test results:\n score: {} \t process: {} \t path: {}".format(best_test["score"], best_test["process"],
best_test["path"]))
pd.DataFrame(test_list).sort_values(by="score", ascending=False).to_csv(
"/scratch/wdjo224/deep_protein_binding/"+args.exp_name+"_test_results.csv")
| 46.517241 | 118 | 0.636027 |
a2595f5495569bfb18a30651ccf4bc3e61dec9b6 | 35 | py | Python | analysis/Leo/scripts/__init__.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-02-09T02:13:23.000Z | 2021-02-09T02:13:23.000Z | analysis/Leo/scripts/__init__.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 31 | 2021-02-02T17:03:39.000Z | 2021-04-13T03:22:16.000Z | analysis/Leo/scripts/__init__.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-03-14T05:56:16.000Z | 2021-03-14T05:56:16.000Z | import scripts.project_functions
| 8.75 | 32 | 0.857143 |
a25a29dc91019ce3281b5fcc6f7a268059eba344 | 8,278 | py | Python | align/pnr/write_constraint.py | ALIGN-analoglayout/ALIGN-public | 80c25a2ac282cbfa199bd21ad85277e9376aa45d | [
"BSD-3-Clause"
] | 119 | 2019-05-14T18:44:34.000Z | 2022-03-17T01:01:02.000Z | align/pnr/write_constraint.py | ALIGN-analoglayout/ALIGN-public | 80c25a2ac282cbfa199bd21ad85277e9376aa45d | [
"BSD-3-Clause"
] | 717 | 2019-04-03T15:36:35.000Z | 2022-03-31T21:56:47.000Z | align/pnr/write_constraint.py | ALIGN-analoglayout/ALIGN-public | 80c25a2ac282cbfa199bd21ad85277e9376aa45d | [
"BSD-3-Clause"
] | 34 | 2019-04-01T21:21:27.000Z | 2022-03-21T09:46:57.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 14:50:24 2021
@author: kunal001
"""
import pathlib
import pprint
import json
import logging
from ..schema import constraint
logger = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent=4)
| 42.451282 | 105 | 0.439841 |
a25a329785c9f77e159427cefe14e85a15f3128c | 157 | py | Python | ch02/number_eight.py | joy-joy/pcc | 6c7d166a1694a2c3f371307aea6c4bdf340c4c42 | [
"MIT"
] | null | null | null | ch02/number_eight.py | joy-joy/pcc | 6c7d166a1694a2c3f371307aea6c4bdf340c4c42 | [
"MIT"
] | null | null | null | ch02/number_eight.py | joy-joy/pcc | 6c7d166a1694a2c3f371307aea6c4bdf340c4c42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 9 00:00:43 2018
@author: joy
"""
print(5 + 3)
print(9 - 1)
print(2 * 4)
print(16//2) | 13.083333 | 35 | 0.573248 |
a25a47c51ab943aef82605acc3a660cf6ca5f070 | 7,042 | py | Python | tests/test_git_factory.py | kostya0shift/SyncToGit | b3f2ec7e1167a0b032d4d40726de625d31a02354 | [
"MIT"
] | 1 | 2015-03-14T15:33:12.000Z | 2015-03-14T15:33:12.000Z | tests/test_git_factory.py | kostya0shift/SyncToGit | b3f2ec7e1167a0b032d4d40726de625d31a02354 | [
"MIT"
] | null | null | null | tests/test_git_factory.py | kostya0shift/SyncToGit | b3f2ec7e1167a0b032d4d40726de625d31a02354 | [
"MIT"
] | null | null | null | import os
from contextlib import ExitStack
from pathlib import Path
import pytest
from synctogit.git_factory import GitError, git_factory
| 29.965957 | 86 | 0.626527 |
a25ad39526f4933af2df581028f2688cffce6933 | 2,117 | py | Python | pychron/fractional_loss_calculator.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/fractional_loss_calculator.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/fractional_loss_calculator.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from numpy import linspace
from traits.api import HasTraits, Int, Float, Instance, on_trait_change
from traitsui.api import View, VGroup, UItem, Item, HGroup
from pychron.graph.graph import Graph
from pychron.processing.argon_calculations import calculate_fractional_loss
if __name__ == "__main__":
f = FractionalLossCalculator()
f.configure_traits()
# ============= EOF =============================================
| 34.145161 | 82 | 0.616911 |
a25bd49134a1f86571250e2c3fa2596b40823392 | 1,043 | py | Python | chatrooms/mixer/thread.py | Dogeek/ChatAggregator | c1cf700e2529d6bb78ce7e4850c532ef55841d85 | [
"MIT"
] | 3 | 2019-11-17T19:31:08.000Z | 2020-12-07T00:47:22.000Z | chatrooms/mixer/thread.py | Dogeek/ChatAggregator | c1cf700e2529d6bb78ce7e4850c532ef55841d85 | [
"MIT"
] | 16 | 2019-11-17T19:48:02.000Z | 2019-11-24T02:49:44.000Z | chatrooms/mixer/thread.py | Dogeek/ChatAggregator | c1cf700e2529d6bb78ce7e4850c532ef55841d85 | [
"MIT"
] | 3 | 2019-11-17T19:31:13.000Z | 2019-11-21T11:59:18.000Z | import asyncio
import threading
from .connection import MixerConnection
from .utils import get_channel_id
from chatrooms import lock
| 30.676471 | 84 | 0.628955 |
a25bec9b2e01804b38b6f619f80dd7f9ad6e8b87 | 44 | py | Python | test/py.py | PhilipDeegan/mkn | 399dd01990e130c4deeb0c2800204836d3875ae9 | [
"BSD-3-Clause"
] | 61 | 2015-02-05T07:43:13.000Z | 2020-05-19T13:26:50.000Z | test/py.py | mkn/mkn | a05b542497270def02200df6620804b89429259b | [
"BSD-3-Clause"
] | 29 | 2016-11-21T03:37:42.000Z | 2020-10-18T12:04:53.000Z | test/py.py | mkn/mkn | a05b542497270def02200df6620804b89429259b | [
"BSD-3-Clause"
] | 12 | 2016-01-05T05:35:29.000Z | 2020-03-15T11:03:37.000Z | #! /usr/bin/python3
print("HELLO PYTHON")
| 8.8 | 21 | 0.659091 |
a25c1f80b839438c40bc8b1ec20e3dcbcc9d3fa1 | 181 | py | Python | proxy_config.py | Nou4r/YandexMail-Account-Creator | b65f24630d23c59dfb8d196f3efe5a222aa3e11a | [
"MIT"
] | 1 | 2021-11-23T05:28:16.000Z | 2021-11-23T05:28:16.000Z | proxy_config.py | Nou4r/YandexMail-Account-Creator | b65f24630d23c59dfb8d196f3efe5a222aa3e11a | [
"MIT"
] | null | null | null | proxy_config.py | Nou4r/YandexMail-Account-Creator | b65f24630d23c59dfb8d196f3efe5a222aa3e11a | [
"MIT"
] | null | null | null | try:
with open('proxies.txt', 'r') as file:
proxy = [ line.rstrip() for line in file.readlines()]
except FileNotFoundError:
raise Exception('Proxies.txt not found.') | 36.2 | 61 | 0.662983 |
a25c2ec82a6c0af9fd73752dd6ceae9477f697d3 | 1,577 | py | Python | src/notifications/middleware.py | MAE776569/project-manager | 986a1a8b84950da81e98125d70ae3ef380e96e54 | [
"Apache-2.0"
] | null | null | null | src/notifications/middleware.py | MAE776569/project-manager | 986a1a8b84950da81e98125d70ae3ef380e96e54 | [
"Apache-2.0"
] | 7 | 2020-03-24T17:08:34.000Z | 2022-02-10T09:50:00.000Z | src/notifications/middleware.py | MAE776569/project-manager | 986a1a8b84950da81e98125d70ae3ef380e96e54 | [
"Apache-2.0"
] | null | null | null | from .models import NotificationManager
from django.utils.deprecation import MiddlewareMixin
| 41.5 | 83 | 0.616994 |
a25c6100f9d37d3d232cbc72e44c946c286a4444 | 5,167 | py | Python | tests/test_prns.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 365 | 2018-12-17T07:43:34.000Z | 2022-03-29T22:23:39.000Z | tests/test_prns.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 36 | 2019-07-24T10:20:45.000Z | 2022-02-14T22:11:24.000Z | tests/test_prns.py | mfkiwl/laika-gnss | dc38f251dbc7ebb535a3c220de8424634d297248 | [
"MIT"
] | 156 | 2018-12-17T05:06:23.000Z | 2022-03-31T12:06:07.000Z | import unittest
from laika.helpers import get_constellation, get_prn_from_nmea_id, \
get_nmea_id_from_prn, NMEA_ID_RANGES
SBAS_DATA = [
['S01', 33],
['S02', 34],
['S10', 42],
['S22', 54],
['S23', 55],
['S32', 64],
['S33', 120],
['S64', 151],
['S65', 152],
['S71', 158]
]
MAIN_CONSTELLATIONS = [
['G01', 1],
['G10', 10],
['G32', 32],
['R01', 65],
['R10', 74],
['R23', 87],
['R24', 88],
['R25', 89],
['R32', 96],
['E01', 301],
['E02', 302],
['E36', 336],
['C01', 201],
['C02', 202],
['C29', 229],
['J01', 193],
['J04', 196]
]
| 31.895062 | 75 | 0.587962 |
a25d0281cfcfe0d0eb9dbdd381ee04036b26239e | 29,969 | py | Python | amt_tools/transcribe.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | 4 | 2021-06-15T19:45:26.000Z | 2022-03-31T20:42:26.000Z | amt_tools/transcribe.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | null | null | null | amt_tools/transcribe.py | cwitkowitz/transcription-models | e8697d6969b074926ac55986bc02fa1aad04b471 | [
"MIT"
] | 1 | 2021-11-08T02:13:02.000Z | 2021-11-08T02:13:02.000Z | # Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>
# My imports
from . import tools
# Regular imports
from abc import abstractmethod
from copy import deepcopy
import numpy as np
import os
def filter_notes_by_duration(pitches, intervals, threshold=0.):
"""
Remove notes from a collection which have a duration less than a threshold
Parameters
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
threshold : float
Minimum duration (seconds) to keep a note - if set to zero, notes must have non-zero duration
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
# Convert to batched notes for easy indexing
batched_notes = tools.notes_to_batched_notes(pitches, intervals)
# Calculate the duration of each note
durations = batched_notes[:, 1] - batched_notes[:, 0]
if threshold:
# Remove notes with duration below the threshold
batched_notes = batched_notes[durations >= threshold]
else:
# Remove zero-duration notes
batched_notes = batched_notes[durations > threshold]
# Convert back to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
return pitches, intervals
def multi_pitch_to_notes(multi_pitch, times, profile, onsets=None, offsets=None):
"""
Transcription protocol to convert a multi pitch array into loose MIDI note groups.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
onsets : ndarray (F x T) or None (Optional)
Where to start considering notes "active"
F - number of discrete pitches
T - number of frames
offsets : ndarray (F x T) or None (Optional)
Where to stop considering notes "active" - currently unused
F - number of discrete pitches
T - number of frames
Returns
----------
pitches : ndarray (N)
Array of pitches corresponding to notes in MIDI format
N - number of notes
intervals : ndarray (N x 2)
Array of onset-offset time pairs corresponding to notes
N - number of notes
"""
if onsets is None:
# Default the onsets if they were not provided
onsets = tools.multi_pitch_to_onsets(multi_pitch)
# Make sure all onsets have corresponding pitch activations
multi_pitch = np.logical_or(onsets, multi_pitch).astype(tools.FLOAT32)
# Turn onset activations into impulses at starting frame
onsets = tools.multi_pitch_to_onsets(onsets)
# Determine the total number of frames
num_frames = multi_pitch.shape[-1]
# Estimate the duration of the track (for bounding note offsets)
times = np.append(times, times[-1] + tools.estimate_hop_length(times))
# Create empty lists for note pitches and their time intervals
pitches, intervals = list(), list()
# Determine the pitch and frame indices where notes begin
pitch_idcs, frame_idcs = onsets.nonzero()
# Loop through note beginnings
for pitch, frame in zip(pitch_idcs, frame_idcs):
# Mark onset and start offset counter
onset, offset = frame, frame + 1
# Increment the offset counter until one of the following occurs:
# 1. There are no more frames
# 2. Pitch is no longer active in the multi pitch array
# 3. A new onset occurs involving the current pitch
while True:
# There are no more frames to count
maxed_out = offset == num_frames
if maxed_out:
# Stop looping
break
# There is an activation for the pitch at the next frame
active_pitch = multi_pitch[pitch, offset]
if not active_pitch:
# Stop looping
break
# There is an onset for the pitch at the next frame
new_onset = onsets[pitch, offset]
if new_onset:
# Stop looping
break
# Include the offset counter
offset += 1
# Add the frequency to the list
pitches.append(pitch + profile.low)
# Add the interval to the list
intervals.append([times[onset], times[offset]])
# Convert the lists to numpy arrays
pitches, intervals = np.array(pitches), np.array(intervals)
# Sort notes by onset just for the purpose of being neat
pitches, intervals = tools.sort_notes(pitches, intervals)
return pitches, intervals
##################################################
# ESTIMATORS #
##################################################
class StackedNoteTranscriber(Estimator):
"""
Estimate stacked notes from stacked multi pitch activation maps.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class for others...
inhibition_window : float or None (optional)
Amount of time after which another note of the same pitch cannot begin
minimum_duration : float or None (optional)
Minimum necessary duration to keep a note
"""
super().__init__(profile, save_dir)
self.inhibition_window = inhibition_window
self.minimum_duration = minimum_duration
def estimate(self, raw_output):
"""
Estimate notes for each slice of a stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
"""
# Obtain the multi pitch activation maps to transcribe
stacked_multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Determine the number of slices in the stacked multi pitch array
stack_size = stacked_multi_pitch.shape[-3]
# Obtain the frame times associated with the activation maps
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Obtain the onsets and offsets from the raw output if they exist
stacked_onsets = tools.unpack_dict(raw_output, tools.KEY_ONSETS)
stacked_offsets = tools.unpack_dict(raw_output, tools.KEY_OFFSETS)
# If no onsets were provided, prepare a list of None's
if stacked_onsets is None:
stacked_onsets = [None] * stack_size
# If no offsets were provided, prepare a list of None's
if stacked_offsets is None:
stacked_offsets = [None] * stack_size
# Initialize a dictionary to hold the notes
stacked_notes = dict()
# Loop through the slices of the stack
for slc in range(stack_size):
# Obtain all of the transcription information for this slice
multi_pitch, onsets, offsets = stacked_multi_pitch[slc], stacked_onsets[slc], stacked_offsets[slc]
if self.inhibition_window is not None:
if onsets is None:
# Default the onsets if they were not provided
onsets = tools.multi_pitch_to_onsets(multi_pitch)
# Remove trailing onsets within inhibition window of a previous onset
onsets = tools.inhibit_activations(onsets, times, self.inhibition_window)
# Transcribe this slice of activations
pitches, intervals = multi_pitch_to_notes(multi_pitch, times, self.profile, onsets, offsets)
if self.minimum_duration is not None:
# Filter the notes by duration
pitches, intervals = filter_notes_by_duration(pitches, intervals, self.minimum_duration)
# Add the pitch-interval pairs to the stacked notes dictionary under the slice key
stacked_notes.update(tools.notes_to_stacked_notes(pitches, intervals, slc))
return stacked_notes
def write(self, stacked_notes, track):
"""
Write slice-wise note estimates to respective files.
Parameters
----------
stacked_notes : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
track : string
Name of the track being processed
"""
# Obtain a list of the stacked note keys
keys = list(stacked_notes.keys())
# Determine how to name the results
tag = tools.get_tag(track)
# Loop through the slices of the stack
for key in keys:
# Add another tag for the degree of freedom if more than one
slice_tag = f'{tag}_{key}' if len(stacked_notes) > 1 else f'{tag}'
# Construct a path for saving the estimates
path = os.path.join(self.save_dir, f'{slice_tag}.{tools.TXT_EXT}')
# Extract the loose note groups from the stack
pitches, intervals = stacked_notes[key]
# Write the notes to the path
tools.write_notes(pitches, intervals, path)
class NoteTranscriber(StackedNoteTranscriber):
"""
Estimate notes from a multi pitch activation map.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
def estimate(self, raw_output):
"""
Estimate notes from a multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
"""
# Perform any pre-processing steps
raw_output = self.pre_proc(raw_output)
# Obtain the multi pitch activation map to transcribe
multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Convert the multi pitch array to a stacked multi pitch array
raw_output[tools.KEY_MULTIPITCH] = tools.multi_pitch_to_stacked_multi_pitch(multi_pitch)
# Obtain onsets and offsets from output if they exist
onsets = tools.unpack_dict(raw_output, tools.KEY_ONSETS)
offsets = tools.unpack_dict(raw_output, tools.KEY_OFFSETS)
if onsets is not None:
# Convert onsets to a stacked onset activation map
raw_output[tools.KEY_ONSETS] = tools.multi_pitch_to_stacked_multi_pitch(onsets)
if offsets is not None:
# Convert offsets to a stacked offset activation map
raw_output[tools.KEY_OFFSETS] = tools.multi_pitch_to_stacked_multi_pitch(offsets)
# Call the parent class estimate function. Multi pitch is just a special
# case of stacked multi pitch, where there is only one degree of freedom
output = super().estimate(raw_output)
# Add the estimated output to the raw output
pitches, intervals = tools.stacked_notes_to_notes(output)
batched_notes = tools.notes_to_batched_notes(pitches, intervals)
return batched_notes
def write(self, batched_notes, track):
"""
Write note estimates to a file.
Parameters
----------
batched_notes : ndarray (N x 3)
Array of note intervals and pitches by row
N - number of notes
track : string
Name of the track being processed
"""
# Convert the batched notes to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
# Stack the loose note groups
stacked_notes = tools.notes_to_stacked_notes(pitches, intervals)
# Call the parent function
super().write(stacked_notes, track)
class StackedMultiPitchRefiner(StackedNoteTranscriber):
"""
Refine stacked multi pitch activation maps, after using them to make note
predictions, by converting note estimates back into multi pitch activation.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
def estimate(self, raw_output):
"""
Refine a stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
"""
# Attempt to extract pre-existing note estimates
stacked_notes = tools.unpack_dict(raw_output, tools.KEY_NOTES)
if stacked_notes is None:
# Obtain note estimates if they were not provided
stacked_notes = super().estimate(raw_output)
# Convert the stacked notes back into stacked multi pitch activation maps
stacked_multi_pitch = tools.stacked_multi_pitch_to_stacked_onsets(stacked_notes)
return stacked_multi_pitch
def write(self, stacked_multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
stacked_multi_pitch : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
class MultiPitchRefiner(NoteTranscriber):
"""
Refine a multi pitch activation map, after using it to make note
predictions, by converting note estimates back into multi pitch activation.
"""
def __init__(self, profile, save_dir=None, inhibition_window=None, minimum_duration=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See StackedNoteTranscriber class...
"""
super().__init__(profile, save_dir, inhibition_window, minimum_duration)
def estimate(self, raw_output):
"""
Refine a multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
"""
# Attempt to extract pre-existing note estimates
batched_notes = tools.unpack_dict(raw_output, tools.KEY_NOTES)
if batched_notes is None:
# Obtain note estimates if they were not provided
batched_notes = super().estimate(raw_output)
# Convert the batched notes to loose note groups
pitches, intervals = tools.batched_notes_to_notes(batched_notes)
# Obtain the frame times associated with the multi pitch array
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Convert the notes back into a multi pitch array
multi_pitch = tools.notes_to_multi_pitch(pitches, intervals, times, self.profile)
return multi_pitch
def write(self, multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
multi_pitch : ndarray (F x T)
Discrete pitch activation map
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
class StackedPitchListWrapper(Estimator):
"""
Wrapper for converting stacked multi pitch activations to stacked pitch lists.
"""
def __init__(self, profile, save_dir=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
"""
super().__init__(profile, save_dir)
def estimate(self, raw_output):
"""
Convert stacked multi pitch activations to stacked pitch lists.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
"""
# Obtain the stacked multi pitch activation maps
stacked_multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Obtain the frame times associated with the stacked activation map
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Perform the conversion
stacked_pitch_list = tools.stacked_multi_pitch_to_stacked_pitch_list(stacked_multi_pitch, times, self.profile)
return stacked_pitch_list
def write(self, stacked_pitch_list, track):
"""
Write slice-wise pitch estimates to respective files.
Parameters
----------
stacked_pitch_list : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
track : string
Name of the track being processed
"""
# Obtain a list of the stacked pitch list keys
keys = list(stacked_pitch_list.keys())
# Determine how to name the results
tag = tools.get_tag(track)
# Loop through the slices of the stack
for key in keys:
# Add another tag for the degree of freedom if more than one
slice_tag = f'{tag}_{key}' if len(stacked_pitch_list) > 1 else f'{tag}'
# Construct a path for saving the estimates
path = os.path.join(self.save_dir, f'{slice_tag}.{tools.TXT_EXT}')
# Extract the pitch list from the stack
times, pitch_list = stacked_pitch_list[key]
# Write the notes to the path
tools.write_pitch_list(times, pitch_list, path)
class PitchListWrapper(StackedPitchListWrapper):
"""
Wrapper for converting a multi pitch activation map to a pitch lists.
"""
def __init__(self, profile, save_dir=None):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
"""
super().__init__(profile, save_dir)
def estimate(self, raw_output):
"""
Convert a multi pitch activation map to a pitch lists.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
"""
# Obtain the multi pitch activation map
multi_pitch = tools.unpack_dict(raw_output, tools.KEY_MULTIPITCH)
# Obtain the frame times associated with the activation map
times = tools.unpack_dict(raw_output, tools.KEY_TIMES)
# Perform the conversion
pitch_list = tools.multi_pitch_to_pitch_list(multi_pitch, self.profile)
return times, pitch_list
def write(self, pitch_list, track):
"""
Write pitch estimates to a file.
Parameters
----------
pitch_list : tuple containing
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
track : string
Name of the track being processed
"""
# Stack the pitch list
stacked_pitch_list = tools.pitch_list_to_stacked_pitch_list(*pitch_list)
# Call the parent function
super().write(stacked_pitch_list, track)
class TablatureWrapper(Estimator):
"""
Wrapper for converting tablature to multi pitch.
"""
def __init__(self, profile, save_dir=None, stacked=False):
"""
Initialize parameters for the estimator.
Parameters
----------
See Estimator class...
stacked : bool
Whether to collapse into a single representation or leave stacked
"""
super().__init__(profile, save_dir)
self.stacked = stacked
def get_key(self):
"""
Default key for multi pitch activations.
"""
return tools.KEY_MULTIPITCH
def estimate(self, raw_output):
"""
Convert tablature into a single or stacked multi pitch activation map.
Parameters
----------
raw_output : dict
Dictionary containing raw output relevant to estimation
Returns
----------
multi_pitch : ndarray ((S) x F x T)
Discrete pitch activation map
S - number of slices in stack - only if stacked=True
F - number of discrete pitches
T - number of frames
"""
# Obtain the tablature
tablature = tools.unpack_dict(raw_output, tools.KEY_TABLATURE)
# Perform the conversion
multi_pitch = tools.tablature_to_stacked_multi_pitch(tablature, self.profile)
if not self.stacked:
multi_pitch = tools.stacked_multi_pitch_to_multi_pitch(multi_pitch)
return multi_pitch
def write(self, multi_pitch, track):
"""
Do nothing. There is no protocol for writing multi pitch activation maps to a file.
A more appropriate action might be converting them to pitch lists and writing those.
Parameters
----------
multi_pitch : ndarray ((S) x F x T)
Discrete pitch activation map
S - number of slices in stack - only if stacked=True
F - number of discrete pitches
T - number of frames
track : string
Name of the track being processed
"""
pass
| 31.088174 | 118 | 0.622076 |
a25d09e67ac4aff5540ba2b0f11ec21250507d36 | 121 | py | Python | ToDoApp/admin.py | aishabazylzhanova/ToDo | a787e57bf8ace5719d847d8fc4949d05a5d117c5 | [
"MIT"
] | null | null | null | ToDoApp/admin.py | aishabazylzhanova/ToDo | a787e57bf8ace5719d847d8fc4949d05a5d117c5 | [
"MIT"
] | null | null | null | ToDoApp/admin.py | aishabazylzhanova/ToDo | a787e57bf8ace5719d847d8fc4949d05a5d117c5 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Tasks
admin.site.register(Tasks)
# Register your models here.
| 20.166667 | 33 | 0.768595 |
a25efb76b91de6c5a6535d8621723808a44381dd | 8,046 | py | Python | dilami_calendar/constants.py | Jangal/python-deylami-calendar | 65b4a36ea6d9cba71b7086b3c488fd6842ead687 | [
"MIT"
] | 12 | 2019-08-05T19:11:24.000Z | 2021-11-17T03:52:12.000Z | dilami_calendar/constants.py | Jangal/python-dilami-calendar | 65b4a36ea6d9cba71b7086b3c488fd6842ead687 | [
"MIT"
] | 2 | 2019-08-03T05:42:02.000Z | 2021-12-01T07:34:26.000Z | dilami_calendar/constants.py | Jangal/python-dilami-calendar | 65b4a36ea6d9cba71b7086b3c488fd6842ead687 | [
"MIT"
] | null | null | null | DILAMI_WEEKDAY_NAMES = {
0: "",
1: "",
2: "",
3: "",
4: "",
5: "",
6: "",
}
DILAMI_MONTH_NAMES = {
0: "",
1: " ",
2: " ",
3: " ",
4: " ",
5: " ",
6: "",
7: " ",
8: " ",
9: " ",
10: " ",
11: " ",
12: " ",
}
DILAMI_LEAP_YEARS = (
199,
203,
207,
211,
215,
220,
224,
228,
232,
236,
240,
244,
248,
253,
257,
261,
265,
269,
273,
277,
281,
286,
290,
294,
298,
302,
306,
310,
315,
319,
323,
327,
331,
335,
339,
343,
348,
352,
356,
360,
364,
368,
372,
376,
381,
385,
389,
393,
397,
401,
405,
409,
414,
418,
422,
426,
430,
434,
438,
443,
447,
451,
455,
459,
463,
467,
471,
476,
480,
484,
488,
492,
496,
500,
504,
509,
513,
517,
521,
525,
529,
533,
537,
542,
546,
550,
554,
558,
562,
566,
571,
575,
579,
583,
587,
591,
595,
599,
604,
608,
612,
616,
620,
624,
628,
632,
637,
641,
645,
649,
653,
657,
661,
665,
669,
674,
678,
682,
686,
690,
694,
698,
703,
707,
711,
715,
719,
723,
727,
731,
736,
740,
744,
748,
752,
756,
760,
764,
769,
773,
777,
781,
785,
789,
793,
797,
802,
806,
810,
814,
818,
822,
826,
831,
835,
839,
843,
847,
851,
855,
859,
864,
868,
872,
876,
880,
884,
888,
892,
897,
901,
905,
909,
913,
917,
921,
925,
930,
934,
938,
942,
946,
950,
954,
959,
963,
967,
971,
975,
979,
983,
987,
992,
996,
1000,
1004,
1008,
1012,
1016,
1020,
1025,
1029,
1033,
1037,
1041,
1045,
1049,
1053,
1058,
1062,
1066,
1070,
1074,
1078,
1082,
1087,
1091,
1095,
1099,
1103,
1107,
1111,
1115,
1120,
1124,
1128,
1132,
1136,
1140,
1144,
1148,
1153,
1157,
1161,
1165,
1169,
1173,
1177,
1181,
1186,
1190,
1194,
1198,
1202,
1206,
1210,
1215,
1219,
1223,
1227,
1231,
1235,
1239,
1243,
1248,
1252,
1256,
1260,
1264,
1268,
1272,
1276,
1281,
1285,
1289,
1293,
1297,
1301,
1305,
1309,
1314,
1318,
1322,
1326,
1330,
1334,
1338,
1343,
1347,
1351,
1355,
1359,
1363,
1367,
1371,
1376,
1380,
1384,
1388,
1392,
1396,
1400,
1404,
1409,
1413,
1417,
1421,
1425,
1429,
1433,
1437,
1442,
1446,
1450,
1454,
1458,
1462,
1466,
1471,
1475,
1479,
1483,
1487,
1491,
1495,
1499,
1504,
1508,
1512,
1516,
1520,
1524,
1528,
1532,
1537,
1541,
1545,
1549,
1553,
1557,
1561,
1565,
1570,
1574,
1578,
1582,
1586,
1590,
1594,
1599,
1603,
1607,
1611,
1615,
1619,
1623,
1627,
1632,
1636,
1640,
1644,
1648,
1652,
1656,
1660,
1665,
1669,
1673,
1677,
1681,
1685,
1689,
1693,
1698,
1702,
1706,
1710,
1714,
1718,
1722,
1727,
1731,
1735,
1739,
1743,
1747,
1751,
1755,
1760,
1764,
1768,
1772,
1776,
1780,
1784,
1788,
1793,
1797,
1801,
1805,
1809,
1813,
1817,
1821,
1826,
1830,
1834,
1838,
1842,
1846,
1850,
1855,
1859,
1863,
1867,
1871,
1875,
1879,
1883,
1888,
1892,
1896,
1900,
1904,
1908,
1912,
1916,
1921,
1925,
1929,
1933,
1937,
1941,
1945,
1949,
1954,
1958,
1962,
1966,
1970,
1974,
1978,
1983,
1987,
1991,
1995,
1999,
2003,
2007,
2011,
2016,
2020,
2024,
2028,
2032,
2036,
2040,
2044,
2049,
2053,
2057,
2061,
2065,
2069,
2073,
2077,
2082,
2086,
2090,
2094,
2098,
2102,
2106,
2111,
2115,
2119,
2123,
2127,
2131,
2135,
2139,
2144,
2148,
2152,
2156,
2160,
2164,
2168,
2172,
2177,
2181,
2185,
2189,
2193,
2197,
2201,
2205,
2210,
2214,
2218,
2222,
2226,
2230,
2234,
2239,
2243,
2247,
2251,
2255,
2259,
2263,
2267,
2272,
2276,
2280,
2284,
2288,
2292,
2296,
2300,
2305,
2309,
2313,
2317,
2321,
2325,
2329,
2333,
2338,
2342,
2346,
2350,
2354,
2358,
2362,
2367,
2371,
2375,
2379,
2383,
2387,
2391,
2395,
2400,
2404,
2408,
2412,
2416,
2420,
2424,
2428,
2433,
2437,
2441,
2445,
2449,
2453,
2457,
2461,
2466,
2470,
2474,
2478,
2482,
2486,
2490,
2495,
2499,
2503,
2507,
2511,
2515,
2519,
2523,
2528,
2532,
2536,
2540,
2544,
2548,
2552,
2556,
2561,
2565,
2569,
2573,
2577,
2581,
2585,
2589,
2594,
2598,
2602,
2606,
2610,
2614,
2618,
2623,
2627,
2631,
2635,
2639,
2643,
2647,
2651,
2656,
2660,
2664,
2668,
2672,
2676,
2680,
2684,
2689,
2693,
2697,
2701,
2705,
2709,
2713,
2717,
2722,
2726,
2730,
2734,
2738,
2742,
2746,
2751,
2755,
2759,
2763,
2767,
2771,
2775,
2779,
2784,
2788,
2792,
2796,
2800,
2804,
2808,
2812,
2817,
2821,
2825,
2829,
2833,
2837,
2841,
2845,
2850,
2854,
2858,
2862,
2866,
2870,
2874,
2879,
2883,
2887,
2891,
2895,
2899,
2903,
2907,
2912,
2916,
2920,
2924,
2928,
2932,
2936,
2940,
2945,
2949,
2953,
2957,
2961,
2965,
2969,
2973,
2978,
2982,
2986,
2990,
2994,
2998,
3002,
3007,
3011,
3015,
3019,
3023,
3027,
3031,
3035,
3040,
3044,
3048,
3052,
3056,
3060,
3064,
3068,
3073,
3077,
3081,
3085,
3089,
3093,
3097,
3101,
3106,
3110,
3114,
3118,
3122,
3126,
3130,
3135,
3139,
3143,
3147,
3151,
3155,
3159,
3163,
3168,
3172,
3176,
3180,
3184,
3188,
3192,
3196,
3201,
3205,
3209,
3213,
3217,
3221,
3225,
3229,
3234,
3238,
3242,
3246,
3250,
3254,
3258,
3263,
3267,
3271,
3275,
3279,
3283,
3287,
3291,
3296,
3300,
3304,
3308,
3312,
3316,
3320,
3324,
3329,
3333,
3337,
3341,
3345,
3349,
3353,
3357,
3362,
3366,
3370,
)
#: Minimum year supported by the library.
MINYEAR = 195
#: Maximum year supported by the library.
MAXYEAR = 3372
| 10.007463 | 41 | 0.393239 |
a25fceaa81b9a2397bbf59a5c9765ebd1d84a0d6 | 324 | py | Python | inputs/sineClock.py | hongaar/ringctl | 9e2adbdf16e85852019466e42be9d88a9e63cde5 | [
"MIT"
] | null | null | null | inputs/sineClock.py | hongaar/ringctl | 9e2adbdf16e85852019466e42be9d88a9e63cde5 | [
"MIT"
] | null | null | null | inputs/sineClock.py | hongaar/ringctl | 9e2adbdf16e85852019466e42be9d88a9e63cde5 | [
"MIT"
] | null | null | null | import math
from inputs.sine import Sine
from inputs.timeElapsed import TimeElapsed
from utils.number import Number
| 20.25 | 56 | 0.70679 |
a26034218c90d245fe24941c0da299f8ed7dd85c | 667 | py | Python | config/urls.py | erik-sn/tagmap | 8131fac833cf4edd20ac3497377ec2145fa75bcc | [
"MIT"
] | null | null | null | config/urls.py | erik-sn/tagmap | 8131fac833cf4edd20ac3497377ec2145fa75bcc | [
"MIT"
] | null | null | null | config/urls.py | erik-sn/tagmap | 8131fac833cf4edd20ac3497377ec2145fa75bcc | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from api.views import index
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
]
# troubleshooting tool
if settings.TOOLBAR:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
"""
If we are serving the base html file through django then
route all non-matching urls to the html file where they
will be processed on the client by the react application
"""
if settings.SERVER_TYPE.upper() == 'DJANGO':
urlpatterns += [url(r'^.*$', index)]
| 25.653846 | 57 | 0.706147 |
a26076e09d7b45380034f14f9bab4f75147d9786 | 86 | py | Python | run.py | tdavislab/mapper-stitching | 09cb6949cea57ebece640b58ef5c449fb177db38 | [
"MIT"
] | 10 | 2019-06-12T01:18:44.000Z | 2021-12-19T16:12:08.000Z | run.py | tdavislab/mapper-stitching | 09cb6949cea57ebece640b58ef5c449fb177db38 | [
"MIT"
] | 7 | 2019-03-20T23:47:49.000Z | 2019-04-10T19:23:41.000Z | run.py | tdavislab/mapper-stitching | 09cb6949cea57ebece640b58ef5c449fb177db38 | [
"MIT"
] | 3 | 2020-10-16T04:30:09.000Z | 2021-03-16T18:45:33.000Z | #!flask/bin/python
from app import app
app.run(host='127.0.0.1',port=8080,debug=True)
| 21.5 | 46 | 0.732558 |
a26126e8b013a4ee9583aa03f98292063e236062 | 2,572 | py | Python | middleware.py | jaylett/django_audited_model | b7d45b2e325512861a0ef23e756a81bfdf3adaf7 | [
"MIT"
] | 1 | 2016-05-06T07:07:18.000Z | 2016-05-06T07:07:18.000Z | middleware.py | jaylett/django_audited_model | b7d45b2e325512861a0ef23e756a81bfdf3adaf7 | [
"MIT"
] | null | null | null | middleware.py | jaylett/django_audited_model | b7d45b2e325512861a0ef23e756a81bfdf3adaf7 | [
"MIT"
] | null | null | null | # Copyright (c) 2009 James Aylett <http://tartarus.org/james/computers/django/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.db.models.signals import pre_save
import threading
import datetime
stash = threading.local()
def get_current_user():
"""Get the user whose session resulted in the current code running. (Only valid during requests.)"""
return getattr(stash, 'current_user', None)
pre_save.connect(onanymodel_presave)
| 42.866667 | 104 | 0.734059 |
a261c4073b37f990b45a6d0c9e5cc17d54ee8a8f | 24,440 | py | Python | data_attributes.py | prise-3d/Thesis-NoiseDetection-metrics | b37b2a3e0601e8a879df12c9d88289b1ea43bbb1 | [
"MIT"
] | null | null | null | data_attributes.py | prise-3d/Thesis-NoiseDetection-metrics | b37b2a3e0601e8a879df12c9d88289b1ea43bbb1 | [
"MIT"
] | null | null | null | data_attributes.py | prise-3d/Thesis-NoiseDetection-metrics | b37b2a3e0601e8a879df12c9d88289b1ea43bbb1 | [
"MIT"
] | null | null | null | # main imports
import numpy as np
import sys
# image transform imports
from PIL import Image
from skimage import color
from sklearn.decomposition import FastICA
from sklearn.decomposition import IncrementalPCA
from sklearn.decomposition import TruncatedSVD
from numpy.linalg import svd as lin_svd
from scipy.signal import medfilt2d, wiener, cwt
import pywt
import cv2
from ipfml.processing import transform, compression, segmentation
from ipfml.filters import convolution, kernels
from ipfml import utils
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
def get_image_features(data_type, block):
"""
Method which returns the data type expected
"""
if data_type == 'lab':
block_file_path = '/tmp/lab_img.png'
block.save(block_file_path)
data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))
if data_type == 'mscn':
img_mscn_revisited = transform.rgb_to_mscn(block)
# save tmp as img
img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
img_output.save(mscn_revisited_file_path)
img_block = Image.open(mscn_revisited_file_path)
# extract from temp image
data = compression.get_SVD_s(img_block)
"""if data_type == 'mscn':
img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
img_mscn_norm = transform.normalize_2D_arr(img_mscn)
img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')
data = compression.get_SVD_s(img_mscn_gray)
"""
if data_type == 'low_bits_6':
low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
data = compression.get_SVD_s(low_bits_6)
if data_type == 'low_bits_5':
low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
data = compression.get_SVD_s(low_bits_5)
if data_type == 'low_bits_4':
low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
data = compression.get_SVD_s(low_bits_4)
if data_type == 'low_bits_3':
low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
data = compression.get_SVD_s(low_bits_3)
if data_type == 'low_bits_2':
low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
data = compression.get_SVD_s(low_bits_2)
if data_type == 'low_bits_4_shifted_2':
data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6)))
if data_type == 'sub_blocks_stats':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_stats_reduced':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 4), int(height / 4)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
# get information we want from svd
data.append(np.mean(l_svd_data))
data.append(np.median(l_svd_data))
data.append(np.percentile(l_svd_data, 25))
data.append(np.percentile(l_svd_data, 75))
data.append(np.var(l_svd_data))
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'sub_blocks_area_normed':
block = np.asarray(block)
width, height, _= block.shape
sub_width, sub_height = int(width / 8), int(height / 8)
sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height))
data = []
for sub_b in sub_blocks:
# by default use the whole lab L canal
l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
l_svd_data = utils.normalize_arr(l_svd_data)
area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
data.append(area_under_curve)
# convert into numpy array after computing all stats
data = np.asarray(data)
if data_type == 'mscn_var_4':
data = _get_mscn_variance(block, (100, 100))
if data_type == 'mscn_var_16':
data = _get_mscn_variance(block, (50, 50))
if data_type == 'mscn_var_64':
data = _get_mscn_variance(block, (25, 25))
if data_type == 'mscn_var_16_max':
data = _get_mscn_variance(block, (50, 50))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'mscn_var_64_max':
data = _get_mscn_variance(block, (25, 25))
data = np.asarray(data)
size = int(len(data) / 4)
indices = data.argsort()[-size:][::-1]
data = data[indices]
if data_type == 'ica_diff':
current_image = transform.get_LAB_L(block)
ica = FastICA(n_components=50)
ica.fit(current_image)
image_ica = ica.fit_transform(current_image)
image_restored = ica.inverse_transform(image_ica)
final_image = utils.normalize_2D_arr(image_restored)
final_image = np.array(final_image * 255, 'uint8')
sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))
data = abs(np.array(sv_values) - np.array(ica_sv_values))
if data_type == 'svd_trunc_diff':
current_image = transform.get_LAB_L(block)
svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
transformed_image = svd.fit_transform(current_image)
restored_image = svd.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'ipca_diff':
current_image = transform.get_LAB_L(block)
transformer = IncrementalPCA(n_components=20, batch_size=25)
transformed_image = transformer.fit_transform(current_image)
restored_image = transformer.inverse_transform(transformed_image)
reduced_image = (current_image - restored_image)
U, s, V = compression.get_SVD(reduced_image)
data = s
if data_type == 'svd_reconstruct':
reconstructed_interval = (90, 200)
begin, end = reconstructed_interval
lab_img = transform.get_LAB_L(block)
lab_img = np.array(lab_img, 'uint8')
U, s, V = lin_svd(lab_img, full_matrices=True)
smat = np.zeros((end-begin, end-begin), dtype=complex)
smat[:, :] = np.diag(s[begin:end])
output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))
output_img = np.array(output_img, 'uint8')
data = compression.get_SVD_s(output_img)
if 'sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'wave_sv_std_filters' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
images.append(medfilt2d(arr, [3, 3]))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
# with the use of wavelet
if 'sv_std_filters_full' in data_type:
# convert into lab by default to apply filters
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
# Apply list of filter on arr
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
# By default computation of current block image
s_arr = compression.get_SVD_s(arr)
sv_vector = [s_arr]
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_array = np.array(sv_vector)
_, length = sv_array.shape
sv_std = []
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
data = s_arr[indices]
if 'sv_entropy_std_filters' in data_type:
lab_img = transform.get_LAB_L(block)
arr = np.array(lab_img)
images = []
kernel = np.ones((3,3),np.float32)/9
images.append(cv2.filter2D(arr,-1,kernel))
kernel = np.ones((5,5),np.float32)/25
images.append(cv2.filter2D(arr,-1,kernel))
images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))
images.append(cv2.GaussianBlur(arr, (3, 3), 1))
images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))
images.append(cv2.GaussianBlur(arr, (5, 5), 1))
images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))
images.append(medfilt2d(arr, [3, 3]))
images.append(medfilt2d(arr, [5, 5]))
images.append(wiener(arr, [3, 3]))
images.append(wiener(arr, [5, 5]))
wave = w2d(arr, 'db1', 2)
images.append(np.array(wave, 'float64'))
sv_vector = []
sv_entropy_list = []
# for each new image apply SVD and get SV
for img in images:
s = compression.get_SVD_s(img)
sv_vector.append(s)
sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)]
sv_entropy_list.append(sv_entropy)
sv_std = []
sv_array = np.array(sv_vector)
_, length = sv_array.shape
# normalize each SV vectors and compute standard deviation for each sub vectors
for i in range(length):
sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
sv_std.append(np.std(sv_array[:, i]))
indices = []
if 'lowest' in data_type:
indices = utils.get_indices_of_lowest_values(sv_std, 200)
if 'highest' in data_type:
indices = utils.get_indices_of_highest_values(sv_std, 200)
# data are arranged following std trend computed
s_arr = compression.get_SVD_s(arr)
data = s_arr[indices]
if 'convolutional_kernels' in data_type:
sub_zones = segmentation.divide_in_blocks(block, (20, 20))
data = []
diff_std_list_3 = []
diff_std_list_5 = []
diff_mean_list_3 = []
diff_mean_list_5 = []
plane_std_list_3 = []
plane_std_list_5 = []
plane_mean_list_3 = []
plane_mean_list_5 = []
plane_max_std_list_3 = []
plane_max_std_list_5 = []
plane_max_mean_list_3 = []
plane_max_mean_list_5 = []
for sub_zone in sub_zones:
l_img = transform.get_LAB_L(sub_zone)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (3, 3)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (3, 3))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_3.append(std_diff)
diff_mean_list_3.append(mean_diff)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
std_diff = np.std(normed_diff)
mean_diff = np.mean(normed_diff)
diff_std_list_5.append(std_diff)
diff_mean_list_5.append(mean_diff)
# plane mean with window of size (3, 3)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (3, 3))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_3.append(std_plane_mean)
plane_mean_list_3.append(mean_plane_mean)
# plane mean with window of size (5, 5)
normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (5, 5))
std_plane_mean = np.std(normed_plane_mean)
mean_plane_mean = np.mean(normed_plane_mean)
plane_std_list_5.append(std_plane_mean)
plane_mean_list_5.append(mean_plane_mean)
# plane max error with window of size (3, 3)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (3, 3))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_3.append(std_plane_max)
plane_max_mean_list_3.append(mean_plane_max)
# plane max error with window of size (5, 5)
normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (5, 5))
std_plane_max = np.std(normed_plane_max)
mean_plane_max = np.mean(normed_plane_max)
plane_max_std_list_5.append(std_plane_max)
plane_max_mean_list_5.append(mean_plane_max)
diff_std_list_3 = np.array(diff_std_list_3)
diff_std_list_5 = np.array(diff_std_list_5)
diff_mean_list_3 = np.array(diff_mean_list_3)
diff_mean_list_5 = np.array(diff_mean_list_5)
plane_std_list_3 = np.array(plane_std_list_3)
plane_std_list_5 = np.array(plane_std_list_5)
plane_mean_list_3 = np.array(plane_mean_list_3)
plane_mean_list_5 = np.array(plane_mean_list_5)
plane_max_std_list_3 = np.array(plane_max_std_list_3)
plane_max_std_list_5 = np.array(plane_max_std_list_5)
plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
plane_max_mean_list_5 = np.array(plane_max_mean_list_5)
if 'std_max_blocks' in data_type:
data.append(np.std(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.std(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'mean_max_blocks' in data_type:
data.append(np.mean(diff_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_3[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_std_list_5[0:int(len(sub_zones)/5)]))
data.append(np.mean(plane_max_mean_list_5[0:int(len(sub_zones)/5)]))
if 'std_normed' in data_type:
data.append(np.std(diff_std_list_3))
data.append(np.std(diff_mean_list_3))
data.append(np.std(diff_std_list_5))
data.append(np.std(diff_mean_list_5))
data.append(np.std(plane_std_list_3))
data.append(np.std(plane_mean_list_3))
data.append(np.std(plane_std_list_5))
data.append(np.std(plane_mean_list_5))
data.append(np.std(plane_max_std_list_3))
data.append(np.std(plane_max_mean_list_3))
data.append(np.std(plane_max_std_list_5))
data.append(np.std(plane_max_mean_list_5))
if 'mean_normed' in data_type:
data.append(np.mean(diff_std_list_3))
data.append(np.mean(diff_mean_list_3))
data.append(np.mean(diff_std_list_5))
data.append(np.mean(diff_mean_list_5))
data.append(np.mean(plane_std_list_3))
data.append(np.mean(plane_mean_list_3))
data.append(np.mean(plane_std_list_5))
data.append(np.mean(plane_mean_list_5))
data.append(np.mean(plane_max_std_list_3))
data.append(np.mean(plane_max_mean_list_3))
data.append(np.mean(plane_max_std_list_5))
data.append(np.mean(plane_max_mean_list_5))
data = np.array(data)
if data_type == 'convolutional_kernel_stats_svd':
l_img = transform.get_LAB_L(block)
normed_l_img = utils.normalize_2D_arr(l_img)
# bilateral with window of size (5, 5)
normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5))
# getting sigma vector from SVD compression
s = compression.get_SVD_s(normed_diff)
data = s
if data_type == 'svd_entropy':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
values.append(utils.get_entropy(sv))
data = np.array(values)
if data_type == 'svd_entropy_noise_20':
l_img = transform.get_LAB_L(block)
blocks = segmentation.divide_in_blocks(l_img, (20, 20))
values = []
for b in blocks:
sv = compression.get_SVD_s(b)
sv_size = len(sv)
values.append(utils.get_entropy(sv[int(sv_size / 4):]))
data = np.array(values)
return data
| 32.849462 | 103 | 0.627169 |
a26266a4fdcfcd0c96232392fec99b6244059514 | 2,008 | py | Python | pythonVersion/interpolateMetm.py | oradules/Deconvolution_short_long | 730a55a257a376e2b347c0d2453347c2c463ab17 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T12:41:45.000Z | 2021-05-26T12:41:45.000Z | pythonVersion/interpolateMetm.py | oradules/Deconvolution_short_long | 730a55a257a376e2b347c0d2453347c2c463ab17 | [
"BSD-3-Clause"
] | null | null | null | pythonVersion/interpolateMetm.py | oradules/Deconvolution_short_long | 730a55a257a376e2b347c0d2453347c2c463ab17 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 21:36:32 2021
@author: rachel
"""
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt | 28.685714 | 63 | 0.564741 |
a263c93196bd64932fb6ed1c2feb12221c53c151 | 3,036 | py | Python | mad_scientist_lab/histogram.py | wusui/squidly_dorkle | 45992db8b2a9f6fa1264549ad88c25a8989af5c0 | [
"MIT"
] | null | null | null | mad_scientist_lab/histogram.py | wusui/squidly_dorkle | 45992db8b2a9f6fa1264549ad88c25a8989af5c0 | [
"MIT"
] | null | null | null | mad_scientist_lab/histogram.py | wusui/squidly_dorkle | 45992db8b2a9f6fa1264549ad88c25a8989af5c0 | [
"MIT"
] | null | null | null | import os
import sys
import codecs
import itertools
sys.stdout = codecs.getwriter('utf=8')(sys.stdout.buffer)
sys.stderr = codecs.getwriter('utf=8')(sys.stderr.buffer)
fname = os.sep.join(["D:", "Users", "Warren", "python3",
"squirrels_on_caffeine", "src", "sedecordle",
"answers.txt"])
with open(fname, "r", encoding="UTF-8") as rfile:
glist = rfile.read()
histogram_full = {}
histogram_once = {}
for word in glist.split():
f_word = sorted(word)
o_word = list(set(f_word))
add_to(f_word, histogram_full)
add_to(o_word, histogram_once)
print(dict(sorted(histogram_full.items(), key=lambda item: item[1])))
print(dict(sorted(histogram_once.items(), key=lambda item: item[1])))
ok_list = []
for word in glist.split():
bad = False
for tlet in 'jqvwxz':
if tlet in word:
bad = True
break
if bad:
continue
if len(list(set(word))) != 5:
continue
ok_list.append(word)
print(ok_list)
print(len(ok_list), len(glist))
OKLETS = 'bcdefghiklmnoprstuy'
acombos = list(itertools.combinations(OKLETS, 9))
print(acombos[50000])
lset = list(acombos[50000])
# print(get_2x5_wlist(ok_list, lset, 'a'))
out_str = []
for entry in acombos:
ret_list = get_2x5_wlist(ok_list, list(entry), 'a')
if ret_list:
nstr = ret_list[0][0] + ret_list[0][1]
str2 = []
for let2 in OKLETS:
if let2 not in nstr:
str2.append(let2)
rlist2 = get_2x5_wlist(ok_list, str2[1:], str2[0])
if rlist2:
print(ret_list, " pairs with ", rlist2)
for p1 in ret_list:
for p2 in rlist2:
out_str += [p1 + p2]
txtlist = []
for entry in out_str:
s = ", ".join(sorted(entry))
txtlist.append(s)
slist = list(set(sorted(txtlist)))
ostr = "\n".join(slist)
with open("wlist20.txt", "w") as wlist:
wlist.write(ostr)
| 26.867257 | 73 | 0.54249 |
a265970c825b69a6bcc7be605b442dbeced8128f | 9,491 | py | Python | app/jobHistory/migrations/0003_auto_20190804_1403.py | stephengtuggy/job-history | 5c4931ff7b594494a687da0253262c7fc46f8b13 | [
"MIT"
] | 2 | 2020-01-18T00:39:35.000Z | 2020-01-18T02:03:26.000Z | app/jobHistory/migrations/0003_auto_20190804_1403.py | stephengtuggy/job-history | 5c4931ff7b594494a687da0253262c7fc46f8b13 | [
"MIT"
] | 18 | 2020-08-07T23:22:37.000Z | 2021-06-10T18:38:42.000Z | app/jobHistory/migrations/0003_auto_20190804_1403.py | stephengtuggy/job-history | 5c4931ff7b594494a687da0253262c7fc46f8b13 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-08-04 21:03
from django.db import migrations, models
import django.db.models.deletion
| 40.387234 | 132 | 0.603624 |
a265a038ab356fbb6e17091c1ee11fb5ec910fe6 | 518 | py | Python | Messaging/Packets/Server/Home/LobbyInfoMessage.py | Kuler2006/BSDS-V40 | 9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7 | [
"Apache-2.0"
] | 4 | 2021-11-27T16:49:30.000Z | 2021-12-21T13:50:00.000Z | Messaging/Packets/Server/Home/LobbyInfoMessage.py | Kuler2006/BSDS-V40 | 9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7 | [
"Apache-2.0"
] | null | null | null | Messaging/Packets/Server/Home/LobbyInfoMessage.py | Kuler2006/BSDS-V40 | 9e9a6e5b36cd5082fe428ebb0279df23d5d9c7b7 | [
"Apache-2.0"
] | 1 | 2021-12-21T13:38:20.000Z | 2021-12-21T13:38:20.000Z | from Logic.Data.DataManager import Writer
from Logic.Client.ClientsManager import ClientsManager
| 34.533333 | 133 | 0.694981 |
a265d646f255b96ee6cd63611d22fe0c03ffcd24 | 1,560 | py | Python | article/views.py | TianyongWang/TyBlog | 2d3543a314beafe55762b58ab23d4ef4dc2cbfe9 | [
"MIT"
] | null | null | null | article/views.py | TianyongWang/TyBlog | 2d3543a314beafe55762b58ab23d4ef4dc2cbfe9 | [
"MIT"
] | null | null | null | article/views.py | TianyongWang/TyBlog | 2d3543a314beafe55762b58ab23d4ef4dc2cbfe9 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from article.models import Article
from datetime import datetime
# Create your views here.
# def home(request):
# return HttpResponse("Hello World, Django,Blog")
# def detail(request, my_args):
# # return HttpResponse("You're looking at my_args %s." % my_args)
# post = Article.objects.all()[int(my_args)]
# str = ("title = %s,category = %s,date_time = %s,content = %s" % (post.title,post.category,post.date_time,post.content))
# return HttpResponse(str)
# def test(request):
# return render(request,'test.html',{'current_time':datetime.now()}) | 32.5 | 125 | 0.663462 |
a26749689fb404e888e2125613c846cdef380811 | 405 | py | Python | PythonExercicios/ex067.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex067.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | PythonExercicios/ex067.py | Luis-Emanuel/Python | 92936dfb005b9755a53425d16c3ff54119eebe78 | [
"MIT"
] | null | null | null | #Faa um programa que mostre a tabuada de vrios nmeros, um de cada vez, para cada valor digitado pelo usurio.
#O programa ser interronpido quando o nmero solicitado for negativo.
c = 0
while True:
print(30*'-')
num = int(input('Quer ver a tabuada de qual valor ?'))
print(30*'-')
if num < 0:
break
for c in range(1,11):
print(f'{num} X {c} = {num*c}')
print('FIM') | 33.75 | 112 | 0.637037 |
a26af4c2704297b324a8b326cbf17e3cd4d232f6 | 1,251 | py | Python | examples/src/python/bolt/half_ack_bolt.py | takeratta/heron | 7b7c38594186f009741c62d379364b9b45d82b61 | [
"Apache-2.0"
] | 1 | 2021-06-29T07:00:10.000Z | 2021-06-29T07:00:10.000Z | examples/src/python/bolt/half_ack_bolt.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | examples/src/python/bolt/half_ack_bolt.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# copyright 2016 twitter. all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''half ack bolt'''
from heronpy.api.bolt.bolt import Bolt
| 32.921053 | 75 | 0.694644 |
a26b73d904e11aae41e76e1fb93f09e8f345dc84 | 534 | py | Python | projects/cassava-leaf-disease/code/src/config.py | dric2018/coding-room | ff538ed16d09ab4918d1b0d55aef09fe95b1078a | [
"MIT"
] | 1 | 2021-02-02T08:30:50.000Z | 2021-02-02T08:30:50.000Z | projects/cassava-leaf-disease/code/src/.ipynb_checkpoints/config-checkpoint.py | dric2018/coding-room | ff538ed16d09ab4918d1b0d55aef09fe95b1078a | [
"MIT"
] | null | null | null | projects/cassava-leaf-disease/code/src/.ipynb_checkpoints/config-checkpoint.py | dric2018/coding-room | ff538ed16d09ab4918d1b0d55aef09fe95b1078a | [
"MIT"
] | 1 | 2021-03-09T14:27:00.000Z | 2021-03-09T14:27:00.000Z | import os
| 25.428571 | 66 | 0.629213 |
a26c405342f3cf01116c7589d07a48162ad6f4f5 | 1,265 | py | Python | midburn/migrations/0007_auto_20160116_0902.py | mtr574/projectMidbrunFirstReg | 2569c3f07e1af746bfc1f213632708c76d8fc829 | [
"Apache-2.0"
] | null | null | null | midburn/migrations/0007_auto_20160116_0902.py | mtr574/projectMidbrunFirstReg | 2569c3f07e1af746bfc1f213632708c76d8fc829 | [
"Apache-2.0"
] | 1 | 2016-01-22T09:32:04.000Z | 2016-01-22T12:14:12.000Z | midburn/migrations/0007_auto_20160116_0902.py | mtr574/projectMidbrunFirstReg | 2569c3f07e1af746bfc1f213632708c76d8fc829 | [
"Apache-2.0"
] | 3 | 2016-11-04T12:10:03.000Z | 2017-02-23T08:52:53.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 29.418605 | 88 | 0.575494 |
a26ce5cbff56541c401d259eb84396d16a623b3d | 329 | py | Python | win/test_ddg.py | janakhpon/PersonalAssistant | bacd6743d23d139af1199df12c7bf99d092764b1 | [
"MIT"
] | null | null | null | win/test_ddg.py | janakhpon/PersonalAssistant | bacd6743d23d139af1199df12c7bf99d092764b1 | [
"MIT"
] | null | null | null | win/test_ddg.py | janakhpon/PersonalAssistant | bacd6743d23d139af1199df12c7bf99d092764b1 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
text = input("text : ")
text.replace(" ", "+")
params = {"q": text}
content = requests.get("https://duckduckgo.com/?q=", params=params)
soup = BeautifulSoup(content.text, 'html.parser')
res = soup.find_all('div', class_="result__snippet js-result-snippet")
for r in res:
print(r)
| 25.307692 | 70 | 0.702128 |
a26e63d9279b0f8a80c97662c0a07a697eeb4cdf | 2,771 | py | Python | experiments/scripts/third_party/roll_q/roll_q.py | AnonymousALifer/primordium | 0c37d387f2cc5b343c6cbd038ae197fd9a502d76 | [
"MIT"
] | null | null | null | experiments/scripts/third_party/roll_q/roll_q.py | AnonymousALifer/primordium | 0c37d387f2cc5b343c6cbd038ae197fd9a502d76 | [
"MIT"
] | null | null | null | experiments/scripts/third_party/roll_q/roll_q.py | AnonymousALifer/primordium | 0c37d387f2cc5b343c6cbd038ae197fd9a502d76 | [
"MIT"
] | null | null | null | import sys
#### CONFIG OPTIONS
# replicates = 50 #Will be dynamically determined
roll_q_dir = './'
if len(sys.argv) < 2:
print('Must pass one argument, the number of jobs in the queue!')
exit(-1)
jobs_in_queue = int(sys.argv[1])
if len(sys.argv) >= 3:
roll_q_dir = sys.argv[2]
do_resub = True
if len(sys.argv) > 4:
do_resub = sys.argv[3].strip() == '1'
if roll_q_dir[-1] != '/':
roll_q_dir += '/'
if do_resub:
open_slots = 999 - jobs_in_queue
else:
open_slots = 999 - jobs_in_queue
print(open_slots, 'slots available in queue.')
cur_tasks_to_run = 0
#num_jobs_to_run = open_slots // replicates
cur_idx = 0
with open(roll_q_dir + 'roll_q_idx.txt', 'r') as fp:
cur_idx = int(fp.readline().strip())
print('Current index in job array:', cur_idx)
room_for_all_jobs = False
jobs_to_run = []
with open(roll_q_dir + 'roll_q_job_array.txt', 'r') as fp:
all_jobs_finished = False
for i in range(0, cur_idx):
line = fp.readline().strip()
if line == '':
all_jobs_finished = True
break
#print('Skipping:', line)
if all_jobs_finished:
print('All jobs already running or done, there\'s nothing to queue!')
exit(0)
while True:
#for i in range(0, num_jobs_to_run):
line = fp.readline().strip()
#print(line)
if line == '':
print('We hit the end of the queue! Submitting the last few jobs...')
room_for_all_jobs = True
break
num_tasks = 1
with open(line, 'r') as job_fp:
for job_line in job_fp:
L = job_line.split()
if len(L) > 0:
if L[0] == '#SBATCH':
L2 = L[1].split('=')
if L2[0] == '--array':
start, end = [int(x) for x in L2[1].split('-')]
num_tasks = (end - start) + 1
if cur_tasks_to_run + num_tasks > open_slots:
break
cur_tasks_to_run += num_tasks
jobs_to_run.append(line)
if not room_for_all_jobs and do_resub:
base_script = ''
with open(roll_q_dir + 'roll_q_resub_base.sb', 'r') as in_fp:
base_script = in_fp.read()
print(base_script)
with open(roll_q_dir + 'roll_q_resub_job.sb', 'w') as out_fp:
out_fp.write(base_script.replace('<<ROLL_Q_DIR>>', roll_q_dir))
with open(roll_q_dir + 'roll_q_submit.sh', 'w') as out_fp:
out_fp.write('#!/bin/bash\n')
for job in jobs_to_run:
out_fp.write('sbatch ' + job + '\n')
with open(roll_q_dir + 'roll_q_idx.txt', 'w') as idx_fp:
idx_fp.write(str(cur_idx + len(jobs_to_run)))
print('Prepared', len(jobs_to_run), 'jobs, with ' + str(cur_tasks_to_run) + ' tasks, to run!')
| 32.6 | 94 | 0.583544 |
a275677a628b972b4fd284b9ad40ccf51d3ac9ae | 390 | py | Python | prplatform/exercises/migrations/0002_auto_20180508_1200.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | 3 | 2018-10-07T18:50:01.000Z | 2020-07-29T14:43:51.000Z | prplatform/exercises/migrations/0002_auto_20180508_1200.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | 9 | 2019-08-26T11:55:00.000Z | 2020-05-04T13:56:06.000Z | prplatform/exercises/migrations/0002_auto_20180508_1200.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | null | null | null | # Generated by Django 2.0.4 on 2018-05-08 12:00
from django.db import migrations
| 20.526316 | 47 | 0.610256 |
a277d99ca9d564507caf9cea939d843c77111614 | 777 | py | Python | spirit/utils/paginator/infinite_paginator.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | spirit/utils/paginator/infinite_paginator.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | spirit/utils/paginator/infinite_paginator.py | rterehov/Spirit | 515894001da9d499852b7ebde25892d290e26c38 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import Http404
from infinite_scroll_pagination.paginator import SeekPaginator, EmptyPage
| 24.28125 | 86 | 0.700129 |
a27856f4617a1105202515667ba0f2cfc6adb560 | 10,172 | py | Python | lib/exaproxy/configuration.py | oriolarcas/exaproxy | 5dc732760d811fd4986f83e6dd78d29228927aec | [
"BSD-2-Clause"
] | 124 | 2015-01-03T10:42:17.000Z | 2021-12-24T05:30:25.000Z | lib/exaproxy/configuration.py | oriolarcas/exaproxy | 5dc732760d811fd4986f83e6dd78d29228927aec | [
"BSD-2-Clause"
] | 14 | 2015-02-06T02:21:16.000Z | 2019-01-10T18:22:18.000Z | lib/exaproxy/configuration.py | oriolarcas/exaproxy | 5dc732760d811fd4986f83e6dd78d29228927aec | [
"BSD-2-Clause"
] | 25 | 2015-04-11T04:01:57.000Z | 2021-07-21T07:46:31.000Z | # encoding: utf-8
"""
configuration.py
Created by Thomas Mangin on 2011-11-29.
Copyright (c) 2011-2013 Exa Networks. All rights reserved.
"""
# NOTE: reloading mid-program not possible
import os
import sys
import logging
import pwd
import math
import socket
import struct
_application = None
_config = None
_defaults = None
_syslog_name_value = {
'CRITICAL' : logging.CRITICAL,
'ERROR' : logging.ERROR,
'WARNING' : logging.WARNING,
'INFO' : logging.INFO,
'DEBUG' : logging.DEBUG,
}
_syslog_value_name = {
logging.CRITICAL : 'CRITICAL',
logging.ERROR : 'ERROR',
logging.WARNING : 'WARNING',
logging.INFO : 'INFO',
logging.DEBUG : 'DEBUG',
}
nonedict = NoneDict()
home = os.path.normpath(sys.argv[0]) if sys.argv[0].startswith('/') else os.path.normpath(os.path.join(os.getcwd(),sys.argv[0]))
import ConfigParser
def _configuration (conf):
location = os.path.join(os.sep,*os.path.join(home.split(os.sep)))
while location and location != '/':
location, directory = os.path.split(location)
if directory in ('lib','bin'):
break
_conf_paths = []
if conf:
_conf_paths.append(os.path.abspath(os.path.normpath(conf)))
if location:
_conf_paths.append(os.path.normpath(os.path.join(location,'etc',_application,'%s.conf' % _application)))
_conf_paths.append(os.path.normpath(os.path.join('/','etc',_application,'%s.conf' % _application)))
_conf_paths.append(os.path.normpath(os.path.join('/','usr','etc',_application,'%s.conf' % _application)))
configuration = Store()
ini = ConfigParser.ConfigParser()
ini_files = [path for path in _conf_paths if os.path.exists(path)]
if ini_files:
ini.read(ini_files[0])
for section in _defaults:
default = _defaults[section]
for option in default:
convert = default[option][0]
try:
proxy_section = '%s.%s' % (_application,section)
env_name = '%s.%s' % (proxy_section,option)
rep_name = env_name.replace('.','_')
if env_name in os.environ:
conf = os.environ.get(env_name)
elif rep_name in os.environ:
conf = os.environ.get(rep_name)
else:
try:
# raise and set the default
conf = value.unquote(ini.get(section,option,nonedict))
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
# raise and set the default
conf = value.unquote(ini.get(proxy_section,option,nonedict))
# name without an = or : in the configuration and no value
if conf is None:
conf = default[option][2]
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
conf = default[option][2]
try:
configuration.setdefault(section,Store())[option] = convert(conf)
except TypeError,error:
raise ConfigurationError('invalid value for %s.%s : %s (%s)' % (section,option,conf,str(error)))
return configuration
def load (application=None,defaults=None,conf=None):
global _application
global _defaults
global _config
if _config:
return _config
if conf is None:
raise RuntimeError('You can not have an import using load() before main() initialised it')
_application = application
_defaults = defaults
_config = _configuration(conf)
return _config
def default ():
for section in sorted(_defaults):
for option in sorted(_defaults[section]):
values = _defaults[section][option]
default = "'%s'" % values[2] if values[1] in (string.list,string.path,string.quote) else values[2]
yield '%s.%s.%s %s: %s. default (%s)' % (_application,section,option,' '*(20-len(section)-len(option)),values[3],default)
| 27.197861 | 128 | 0.665946 |
a278b6850520063ea039b2fa761bcc89b24ae7fc | 1,009 | py | Python | timo/exception.py | Minsoo-web/TIMO | 79051cdce4539bc62d01b19e98b4fce6a3f02fae | [
"MIT"
] | null | null | null | timo/exception.py | Minsoo-web/TIMO | 79051cdce4539bc62d01b19e98b4fce6a3f02fae | [
"MIT"
] | null | null | null | timo/exception.py | Minsoo-web/TIMO | 79051cdce4539bc62d01b19e98b4fce6a3f02fae | [
"MIT"
] | 2 | 2020-07-13T00:55:52.000Z | 2020-07-27T04:23:41.000Z | from typing import AnyStr
from typing import NoReturn
| 22.931818 | 83 | 0.654113 |
a27af76ac557d5a5a06d9803200c94099e5080e2 | 301 | py | Python | scikit/Adaboost/example.py | JayMiao/MLAction | fec1c08fa33ed1f5d9b0befecc6dac551cc02302 | [
"MIT"
] | 1 | 2017-02-13T10:25:11.000Z | 2017-02-13T10:25:11.000Z | scikit/Adaboost/example.py | JayMiao/MLAction | fec1c08fa33ed1f5d9b0befecc6dac551cc02302 | [
"MIT"
] | null | null | null | scikit/Adaboost/example.py | JayMiao/MLAction | fec1c08fa33ed1f5d9b0befecc6dac551cc02302 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
iris = load_iris()
clf = AdaBoostClassifier(n_estimators=1000)
scores = cross_val_score(clf, iris.data, iris.target)
print scores.mean() | 30.1 | 53 | 0.800664 |
a27d6ad41df2cef9c59436191968c1e6444af6da | 4,720 | py | Python | main.py | jg-fisher/indeed-bot | 601720c3f20f62a99e02ef2f017cfb225a3f770e | [
"MIT"
] | 9 | 2019-11-28T08:54:50.000Z | 2022-02-23T05:12:53.000Z | main.py | jg-fisher/indeed-bot | 601720c3f20f62a99e02ef2f017cfb225a3f770e | [
"MIT"
] | null | null | null | main.py | jg-fisher/indeed-bot | 601720c3f20f62a99e02ef2f017cfb225a3f770e | [
"MIT"
] | 9 | 2019-12-07T08:32:10.000Z | 2022-03-28T17:47:30.000Z | import os
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
if __name__ == '__main__':
profile = {
'name': "John Fisher",
'email': "jfishersolutions@gmail.com",
'phone_number': '860-364-3249',
'resume': os.getcwd() + '\\resume.txt'
}
id_bot = IndeedBot()
# keywords, city, state
id_bot.query('python developer', 'dallas', 'tx')
id_bot.find_express_jobs()
id_bot.apply_to_express_jobs(profile) | 27.764706 | 88 | 0.561017 |
a27f8d0583f96864cbfcef5f30b901f38868d920 | 1,266 | py | Python | biobb_adapters/pycompss/biobb_md/gromacs_extra/append_ligand_pc.py | jfennick/biobb_adapters | a37c1c696476c93225e7d3c661b0d4393af9dfe1 | [
"Apache-2.0"
] | null | null | null | biobb_adapters/pycompss/biobb_md/gromacs_extra/append_ligand_pc.py | jfennick/biobb_adapters | a37c1c696476c93225e7d3c661b0d4393af9dfe1 | [
"Apache-2.0"
] | 4 | 2019-03-04T15:22:06.000Z | 2021-09-24T14:43:48.000Z | biobb_adapters/pycompss/biobb_md/gromacs_extra/append_ligand_pc.py | jfennick/biobb_adapters | a37c1c696476c93225e7d3c661b0d4393af9dfe1 | [
"Apache-2.0"
] | 2 | 2020-09-08T05:26:23.000Z | 2022-03-28T07:09:20.000Z | import traceback
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from pycompss.api.parameter import FILE_IN, FILE_OUT
from biobb_common.tools import file_utils as fu
from biobb_md.gromacs_extra import append_ligand
import os
import sys | 42.2 | 120 | 0.704581 |
a27fb79cf4664c162660d09fef51a46e76ed5c1e | 3,277 | py | Python | sellalert.py | sjmiller609/cvs_scraper | f6a1e5673869a7031c028d38a6bb4b37a3ba5473 | [
"MIT"
] | null | null | null | sellalert.py | sjmiller609/cvs_scraper | f6a1e5673869a7031c028d38a6bb4b37a3ba5473 | [
"MIT"
] | null | null | null | sellalert.py | sjmiller609/cvs_scraper | f6a1e5673869a7031c028d38a6bb4b37a3ba5473 | [
"MIT"
] | null | null | null | import requests
import json
from pprint import pprint
import re
import time
import sys
#getdata = requests.get(geturl)
#pprint (vars(getdata))
from bs4 import BeautifulSoup
from geopy.geocoders import Nominatim
if len(sys.argv) != 4:
print(sys.argv[0]+" <item> <location> <num items>")
exit()
#get list of product IDs
item = sys.argv[1].replace(" ","+")
print("searching for items with: "+item)
geturl = "http://www.cvs.com/search/N-0?searchTerm="+item+"&navNum="+sys.argv[3]
print("search url: "+geturl)
#This step is important.Converting QString to Ascii for lxml to process
#archive_links = html.fromstring(str(result.toAscii()))
#print archive_links
response = requests.get(geturl)
print(str(response))
page = str(BeautifulSoup(response.content,"html.parser"))
print(page)
exit()
urls = []
getUrls(urls,page)
for url in urls:
print(url)
itemlist = []
skuidlist = []
for i in range(0,len(urls)):
m = re.search('/shop/.*/.*/.*/(.*)-skuid-(\d{6})',urls[i])
if m and m.group(2) not in skuidlist:
itemlist.append(m.group(1))
skuidlist.append(m.group(2))
print("items found:")
for item in itemlist:
print("\t"+item)
#TODO: now the page loads these in js, so we need to interpret js
exit()
geolocator = Nominatim()
location = geolocator.geocode(sys.argv[2])
print((location.latitude,location.longitude))
posturl = "http://www.cvs.com/rest/bean/cvs/catalog/CvsBohServiceHandler/storeInventoryValues"
dicts = []
print('loading initial inventory...')
for i in range(0,len(skuidlist)):
time.sleep(2)
productId = skuidlist[i]
postdata = {'productId': productId, 'productSPUlnd': 'true','favstore':'NULL','geolatitude':str(location.latitude),'geolongitude':str(location.longitude)}
inv = requests.post(posturl,data=postdata)
dict = {}
jsons = inv.json()['atgResponse']
for j in range(0,len(jsons)):
temp = jsons[j]
if(temp['Qty'] == ''):
temp['Qty'] = '0'
dict[temp['storeAddress']] = temp['Qty']
dicts.append(dict)
print(str(100*i/len(skuidlist))+"%")
while True:
for j in range(0,len(skuidlist)):
#delay between requests
print('3 seconds...')
time.sleep(3)
productId = skuidlist[j]
postdata = {'productId': productId, 'productSPUlnd': 'true','favstore':'NULL','geolatitude':str(location.latitude),'geolongitude':str(location.longitude)}
inv = requests.post(posturl,data=postdata)
jsons = inv.json()['atgResponse']
for i in range(0,len(jsons)):
temp = jsons[i]
if(temp['Qty'] == ''):
temp['Qty'] = '0'
if(dicts[j][temp['storeAddress']] != temp['Qty']):
print("was: "+dicts[j][temp['storeAddress']]+" now: "+temp['Qty'])
sold = int(dicts[j][temp['storeAddress']]) - int(temp['Qty'])
print(temp['storeAddress']+" sold "+str(sold) + " of item " +itemlist[j])
dicts[j][temp['storeAddress']] = temp['Qty']
| 29.522523 | 159 | 0.648764 |
a27fd6c4631670b333af8985d1aba8f26af3183c | 5,670 | py | Python | neucom/utils.py | jacobver/diag_context | ca8d008b745743bf20c4bedcf6faa412a5ad8080 | [
"MIT"
] | null | null | null | neucom/utils.py | jacobver/diag_context | ca8d008b745743bf20c4bedcf6faa412a5ad8080 | [
"MIT"
] | null | null | null | neucom/utils.py | jacobver/diag_context | ca8d008b745743bf20c4bedcf6faa412a5ad8080 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
from copy import copy
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
def pairwise_add(u, v=None, is_batch=False):
"""
performs a pairwise summation between vectors (possibly the same)
can also be performed on batch of vectors.
Parameters:
----------
u, v: Tensor (m,) or (b,m)
Returns:
---------
Tensor (m, n) or (b, m, n)
"""
u_shape = u.size()
if v is None:
v = u
v_shape = v.size()
if len(u_shape) > 2 and not is_batch:
raise ValueError("Expected at most 2D tensor or 3D tensor with batch")
if len(v_shape) > 2 and not is_batch:
raise ValueError("Expected at most 2D tensor or 3D tensor with batch")
m = u_shape[0] if not is_batch else u_shape[1]
n = v_shape[0] if not is_batch else v_shape[1]
u = expand_dims(u, axis=-1)
new_u_shape = list(u.size())
new_u_shape[-1] = n
U_ = u.expand(*new_u_shape)
v = expand_dims(v, axis=-2)
new_v_shape = list(v.size())
new_v_shape[-2] = m
V_ = v.expand(*new_v_shape)
return U_ + V_
def matmal(left, right):
'''
left is of size (*N, n1,n2), where N is a list
right is of size(*M, m1,m2), where M is a list
output is of size
'''
pass
def cosine_distance(memory_matrix, cos_keys):
"""
compute the cosine similarity between keys to each of the
memory slot.
Parameters:
----------
memory_matrix: Tensor (batch_size, mem_slot, mem_size)
the memory matrix to lookup in
keys: Tensor (batch_size, mem_size, number_of_keys)
the keys to query the memory with
strengths: Tensor (batch_size, number_of_keys, )
the list of strengths for each lookup key
Returns: Tensor (batch_size, mem_slot, number_of_keys)
The list of lookup weightings for each provided key
"""
memory_norm = torch.norm(memory_matrix, 2, 2, keepdim=True)
keys_norm = torch.norm(cos_keys, 2, 1, keepdim=True)
normalized_mem = torch.div(
memory_matrix, memory_norm.expand_as(memory_matrix) + 1e-8)
normalized_keys = torch.div(cos_keys, keys_norm.expand_as(cos_keys) + 1e-8)
out = torch.bmm(normalized_mem, normalized_keys)
# print(normalized_keys)
# print(out)
# apply_dict(locals())
return out
def softmax(input, axis=1):
"""
Apply softmax on input at certain axis.
Parammeters:
----------
input: Tensor (N*L or rank>2)
axis: the axis to apply softmax
Returns: Tensor with softmax applied on that dimension.
"""
input_size = input.size()
trans_input = input.transpose(axis, len(input_size) - 1)
trans_size = trans_input.size()
input_2d = trans_input.contiguous().view(-1, trans_size[-1])
soft_max_2d = F.softmax(input_2d)
soft_max_nd = soft_max_2d.view(*trans_size)
# apply_dict(locals())
return soft_max_nd.transpose(axis, len(input_size) - 1)
| 28.069307 | 134 | 0.603351 |
a27ff6238bdd6adda0370578acda1918aca05e2f | 776 | py | Python | school/lecture1/isi_cv_02_task.py | kubekbreha/ML-Python-Algorithms | 8058b68a2d98a79a6debcc69abdd188c97420d75 | [
"MIT"
] | null | null | null | school/lecture1/isi_cv_02_task.py | kubekbreha/ML-Python-Algorithms | 8058b68a2d98a79a6debcc69abdd188c97420d75 | [
"MIT"
] | null | null | null | school/lecture1/isi_cv_02_task.py | kubekbreha/ML-Python-Algorithms | 8058b68a2d98a79a6debcc69abdd188c97420d75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 19 20:41:09 2017
@author: pd
"""
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
X, Y = datasets.make_classification(n_samples=1000,
n_features=3, n_redundant=0)
# print(X, Y)
clf = DecisionTreeClassifier()
clf = clf.fit(X*10, Y*10)
x,y,z = clf.predict([[-2, 2, 0],[-131, -123, -435],[-22, 100, 53]])
#### initial visualization
plt.xlim(0.0, 20.0)
plt.ylim(0.0, 20.0)
# plt.scatter(X, Y, color="b", label="fast")
# plt.scatter(x, y, color="r", label="slow")
# plt.legend()
# plt.xlabel("bumpiness")
# plt.ylabel("grade")
plt.show()
| 20.972973 | 67 | 0.640464 |
a280eaab2887649d537621914d70995f7a90e0ab | 327 | py | Python | rotary/rotary/doctype/monthly_report/monthly_report.py | neilLasrado/rotary | 66659b41c6fbd04d22aa368573c786dabe1102e5 | [
"MIT"
] | null | null | null | rotary/rotary/doctype/monthly_report/monthly_report.py | neilLasrado/rotary | 66659b41c6fbd04d22aa368573c786dabe1102e5 | [
"MIT"
] | null | null | null | rotary/rotary/doctype/monthly_report/monthly_report.py | neilLasrado/rotary | 66659b41c6fbd04d22aa368573c786dabe1102e5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Neil Lasrado and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import now
| 25.153846 | 51 | 0.770642 |
a281068a96d517af66fbb0b7cc8c9a41a817af13 | 109 | py | Python | kili/mutations/project_version/fragments.py | ASonay/kili-playground | 9624073703b5e6151cf496f44f17f531576875b7 | [
"Apache-2.0"
] | 214 | 2019-08-05T14:55:01.000Z | 2022-03-28T21:02:22.000Z | kili/mutations/project_version/fragments.py | x213212/kili-playground | dfb94c2d54bedfd7fec452b91f811587a2156c13 | [
"Apache-2.0"
] | 10 | 2020-05-14T10:44:16.000Z | 2022-03-08T09:39:24.000Z | kili/mutations/project_version/fragments.py | x213212/kili-playground | dfb94c2d54bedfd7fec452b91f811587a2156c13 | [
"Apache-2.0"
] | 19 | 2019-11-26T22:41:09.000Z | 2022-01-16T19:17:38.000Z | """
Fragments of project version mutations
"""
PROJECT_VERSION_FRAGMENT = '''
content
id
name
projectId
'''
| 9.909091 | 38 | 0.733945 |
a281c8f1cacd2892e9e276b0c28506e1a7b6dc79 | 6,037 | py | Python | metrics/fid/fid_score.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | null | null | null | metrics/fid/fid_score.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | null | null | null | metrics/fid/fid_score.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | 1 | 2021-06-11T16:52:55.000Z | 2021-06-11T16:52:55.000Z | # Code apapted from https://github.com/mseitzer/pytorch-fid
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from util import tools
def get_activations(dataset, model, size=1000, batch_size=50, dims=2048, device='cpu'):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- device : Device to run calculations
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > size:
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = size
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=False)
pred_arr = np.empty((size, dims))
start_idx = 0
for batch, _ in dataloader:
if batch.shape[1] == 1:
batch = torch.cat((batch, batch, batch), 1)
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze(3).squeeze(2).cpu().numpy()
pred_arr[start_idx:start_idx + pred.shape[0]] = pred
start_idx = start_idx + pred.shape[0]
if start_idx >= size:
break
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, 'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, 'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
start_time = time.time()
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
print("FID: sqrtm --- %s seconds ---" % (time.time() - start_time))
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
# raise ValueError('Imaginary component {}'.format(m))
print('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
| 42.514085 | 108 | 0.68047 |
a281d5a3c0cadb9b0e4f53931b714575ab5662a4 | 169 | py | Python | test/test.py | ttkltll/fisher | 8889705c7bde10304cfde7972b805226ac59d735 | [
"MIT"
] | null | null | null | test/test.py | ttkltll/fisher | 8889705c7bde10304cfde7972b805226ac59d735 | [
"MIT"
] | 3 | 2020-09-15T23:37:18.000Z | 2020-09-16T00:36:55.000Z | test/test.py | ttkltll/fisher | 8889705c7bde10304cfde7972b805226ac59d735 | [
"MIT"
] | 1 | 2020-09-15T02:55:54.000Z | 2020-09-15T02:55:54.000Z | from flask import Flask, current_app, request, Request
app = Flask(__name__)
ctx = app.app_context()
ctx.push()
current_app.static_floder = 'static'
ctx.pop()
app.run
| 16.9 | 54 | 0.751479 |
a28375161ebc70272c07037bc6d8933c4916ada9 | 4,108 | py | Python | augmentation.py | Pandoro/tools | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 1 | 2019-04-22T16:38:03.000Z | 2019-04-22T16:38:03.000Z | augmentation.py | afcarl/tools-Pandoro | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 2 | 2018-03-13T10:49:48.000Z | 2018-03-13T10:54:01.000Z | augmentation.py | afcarl/tools-Pandoro | 631c6036cb74dc845668fd912588fd31aae46f8b | [
"MIT"
] | 2 | 2018-03-08T19:40:10.000Z | 2018-06-11T14:43:49.000Z | import scipy.ndimage
import cv2
import numpy as np
| 42.791667 | 140 | 0.657741 |
a2840316fe01ccb59fbb68f41487073e6d6d5fcd | 9,653 | py | Python | src/config/utils/db-loader/contrail_db_loader/resources/security_group.py | hamzazafar/contrail-controller | 67df90fa2d9d10263cf507c2751171c4e52f10dd | [
"Apache-2.0"
] | 1 | 2020-04-16T20:34:55.000Z | 2020-04-16T20:34:55.000Z | src/config/utils/db-loader/contrail_db_loader/resources/security_group.py | hamzazafar/contrail-controller | 67df90fa2d9d10263cf507c2751171c4e52f10dd | [
"Apache-2.0"
] | null | null | null | src/config/utils/db-loader/contrail_db_loader/resources/security_group.py | hamzazafar/contrail-controller | 67df90fa2d9d10263cf507c2751171c4e52f10dd | [
"Apache-2.0"
] | 1 | 2020-11-20T06:49:58.000Z | 2020-11-20T06:49:58.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
from __future__ import unicode_literals
from builtins import str
from builtins import range
import logging
from netaddr import IPNetwork
from random import randint, choice
import uuid
from .resource import Resource
from ..utils import timeit
logger = logging.getLogger(__name__)
| 40.389121 | 79 | 0.443489 |
a28511d4313faddcec24e963d4aea4b50f61ce85 | 135 | py | Python | sopy/admin/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:07:27.000Z | 2021-08-15T17:46:13.000Z | sopy/admin/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 81 | 2015-02-17T17:04:16.000Z | 2021-02-21T03:52:55.000Z | sopy/admin/__init__.py | AlexFrazer/sopython-site | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T18:28:06.000Z | 2022-02-05T03:11:04.000Z | from flask import Blueprint
bp = Blueprint('admin', __name__)
| 15 | 33 | 0.748148 |
a2856ec06ce72f7e0f5fc2a98ea631945b111855 | 1,790 | py | Python | onmt/modules/extensions/fused_layer_norm/setup.py | quanpn90/NMTGMinor | 0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796 | [
"MIT"
] | 75 | 2019-05-02T10:37:39.000Z | 2022-02-13T17:53:24.000Z | onmt/modules/extensions/fused_layer_norm/setup.py | quanpn90/NMTGMinor | 0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796 | [
"MIT"
] | 11 | 2018-11-08T16:52:51.000Z | 2021-09-23T15:01:14.000Z | onmt/modules/extensions/fused_layer_norm/setup.py | quanpn90/NMTGMinor | 0e5f989c8bc01c6c8dc3a8c1ce7c05bfd884b796 | [
"MIT"
] | 34 | 2018-06-04T14:20:01.000Z | 2022-01-26T08:10:05.000Z | import os
import torch
from torch.utils import cpp_extension
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
cc_flag = []
ext_modules = []
cc_flag.append('-gencode')
cc_flag.append('arch=compute_75,code=sm_75')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_80,cod =sm_80')
cc_flag.append('-gencode')
cc_flag.append('arch=compute_86,code=sm_86')
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
ext_modules.append(
CUDAExtension(name='fused_layer_norm_cuda',
sources=['layer_norm_cuda.cpp',
'layer_norm_cuda_kernel.cu'],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-maxrregcount=50',
'-O3',
'--use_fast_math'] + version_dependent_macros}))
setup(
name="fused_layer_norm_cuda",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension},
) | 37.291667 | 102 | 0.640782 |
a285b6ae623657a020499a2ec4ea9b0765d78e0b | 5,708 | py | Python | expenses/migrations/0001_initial.py | inducer/expensely | b88b830e466db63cce5acfcdb0269411c7b39358 | [
"MIT",
"Unlicense"
] | 1 | 2021-07-02T02:03:09.000Z | 2021-07-02T02:03:09.000Z | expenses/migrations/0001_initial.py | inducer/expensely | b88b830e466db63cce5acfcdb0269411c7b39358 | [
"MIT",
"Unlicense"
] | null | null | null | expenses/migrations/0001_initial.py | inducer/expensely | b88b830e466db63cce5acfcdb0269411c7b39358 | [
"MIT",
"Unlicense"
] | 2 | 2016-08-24T05:25:57.000Z | 2018-12-31T01:06:07.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-24 23:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| 43.572519 | 148 | 0.576384 |
a285bdaafcb56b79447200be9f8737064e66fac5 | 800 | py | Python | pages/themes/ParallelProgramming-Lecture4/examples/processQueue/TASK_sharing_variable_by_processes.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | pages/themes/ParallelProgramming-Lecture4/examples/processQueue/TASK_sharing_variable_by_processes.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | pages/themes/ParallelProgramming-Lecture4/examples/processQueue/TASK_sharing_variable_by_processes.py | WWWCourses/PythonCourseNetIT-Slides | 78dbb5eb7695cc64042b71a1911d4ef3feddb074 | [
"MIT"
] | null | null | null | """:
- ,
x 20.
multiprocessing.Queue() x .
"""
import multiprocessing as mp
if __name__ == "__main__":
x = 0
incr_count = 10
# create and start 2 process which should increment a variable:
pr1 = mp.Process(target=increment, args=(range(incr_count),))
pr2 = mp.Process(target=increment, args=(range(incr_count),))
pr1.start(); pr2.start()
# wait processes to finish
pr1.join();pr2.join()
print(f"x in {mp.current_process().name}: {x}")
#
# x in Main Process: 20 | 23.529412 | 91 | 0.71625 |
a28631f9170fbf0128fb181d7e9585c79cf0e573 | 241 | py | Python | pythonProject/02al28pass_elipsis_placeholders/exercicio_num_int.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/02al28pass_elipsis_placeholders/exercicio_num_int.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/02al28pass_elipsis_placeholders/exercicio_num_int.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | numero_int = input('Digite um numero inteiro')
if numero_int.isdigit():
numero_int = int(numero_int)
if numero_int % 2 == 0:
print('o numero e par')
elif numero_int % 1 == 0:
print('o numero e impar')
else:
| 21.909091 | 46 | 0.605809 |
a286ecdd87da9c3a2db9af7dec80faeeeab6de6c | 327 | py | Python | Ejercicio_DecimalBinario.py | Sofia1306/Python_Clases | 60bfab6425269b572ec738abcb5f96d74fc56f95 | [
"MIT"
] | null | null | null | Ejercicio_DecimalBinario.py | Sofia1306/Python_Clases | 60bfab6425269b572ec738abcb5f96d74fc56f95 | [
"MIT"
] | null | null | null | Ejercicio_DecimalBinario.py | Sofia1306/Python_Clases | 60bfab6425269b572ec738abcb5f96d74fc56f95 | [
"MIT"
] | null | null | null | """Ejercicio Decimal a Binario """
import math
numero = int(input('Ingresa un nmero: \n'))
binario = ''
while (numero > 0):
if (numero%2 == 0):
binario = '0' + binario
else:
binario = '1' + binario
numero = int(math.floor(numero/2))
print(f'El nmero en binario es {binario}')
| 19.235294 | 45 | 0.562691 |
a288d8b6411de0a207c959a000823b29df69e32d | 743 | py | Python | src/server.py | awsassets/superfish | 77d93ec864de22b592bc4b69aa5ab7580aa383ab | [
"MIT"
] | null | null | null | src/server.py | awsassets/superfish | 77d93ec864de22b592bc4b69aa5ab7580aa383ab | [
"MIT"
] | null | null | null | src/server.py | awsassets/superfish | 77d93ec864de22b592bc4b69aa5ab7580aa383ab | [
"MIT"
] | null | null | null | import flask ; from flask import * | 24.766667 | 75 | 0.561238 |
a28a52e59294caa6c7f0ce984c5ca19e80db8e8f | 152 | py | Python | block/admin.py | amirkh75/user_block_chain | f9bdba11c1d8b724787151480cd52155ad8718e4 | [
"MIT"
] | null | null | null | block/admin.py | amirkh75/user_block_chain | f9bdba11c1d8b724787151480cd52155ad8718e4 | [
"MIT"
] | null | null | null | block/admin.py | amirkh75/user_block_chain | f9bdba11c1d8b724787151480cd52155ad8718e4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Block
| 19 | 36 | 0.763158 |
a28aac04289f6912a4237acfbf9626f8b5f787ef | 593 | py | Python | SOURCE/test_ex01.py | PinkPhayate/Information_Access_Design | a6ae3b055e971708d67fda7129e51cd0d9b16d2f | [
"MIT"
] | null | null | null | SOURCE/test_ex01.py | PinkPhayate/Information_Access_Design | a6ae3b055e971708d67fda7129e51cd0d9b16d2f | [
"MIT"
] | null | null | null | SOURCE/test_ex01.py | PinkPhayate/Information_Access_Design | a6ae3b055e971708d67fda7129e51cd0d9b16d2f | [
"MIT"
] | null | null | null | import re,io,os.path,os
for line in open('./../text_list', "r"):
filename = './../TXT/tragedies/'+line.rstrip()
print filename
f = open("./../TXT/test_"+line.rstrip(),"w")
for line in io.open(filename,"r",encoding="utf-16"):
if remove_tag(line):
# remove signiture
line = re.sub(re.compile("[!-/:-@[-`{-~;?]"),"", line).rstrip()
# print line
f.write(line.encode('utf-8'))
f.close()
| 25.782609 | 75 | 0.53457 |
a28c65ff15ac6df969c2d1a4bd260f0f3974490e | 1,736 | py | Python | lumiml/examples/test_install.py | ovra-peers/lumiml | 8df5f591edacb36c473b6e09f35da8098754b2e8 | [
"BSD-3-Clause"
] | 4 | 2019-01-29T12:02:02.000Z | 2019-12-26T11:12:32.000Z | lumiml/examples/test_install.py | ovra-peers/lumiml | 8df5f591edacb36c473b6e09f35da8098754b2e8 | [
"BSD-3-Clause"
] | null | null | null | lumiml/examples/test_install.py | ovra-peers/lumiml | 8df5f591edacb36c473b6e09f35da8098754b2e8 | [
"BSD-3-Clause"
] | 4 | 2019-12-26T08:22:38.000Z | 2020-10-07T09:37:12.000Z |
if __name__ == '__main__':
try:
TestInstall()
except Exception as e:
print(e);
print('Something is wrong with installation! Please read the error message carefuly to try and resolve it.')
| 24.450704 | 116 | 0.663594 |
a28d85267ddf700f8793d60f25330f1799660aba | 422 | py | Python | urllib/Cookie/CookieServer.py | pengchenyu111/SpiderLearning | d1fca1c7f46bfb22ad23f9396d0f2e2301ec4534 | [
"Apache-2.0"
] | 3 | 2020-11-21T13:13:46.000Z | 2020-12-03T05:43:32.000Z | urllib/Cookie/CookieServer.py | pengchenyu111/SpiderLearning | d1fca1c7f46bfb22ad23f9396d0f2e2301ec4534 | [
"Apache-2.0"
] | null | null | null | urllib/Cookie/CookieServer.py | pengchenyu111/SpiderLearning | d1fca1c7f46bfb22ad23f9396d0f2e2301ec4534 | [
"Apache-2.0"
] | 1 | 2020-12-03T05:43:53.000Z | 2020-12-03T05:43:53.000Z | from flask import Flask
from flask import request
app = Flask(__name__)
if __name__ == '__main__':
app.run()
| 18.347826 | 48 | 0.694313 |
a28eb678ba5f89d1bb90f58b1a3981298261532f | 3,691 | py | Python | Aihan-Liu-Individual-project/Code/demo.py | laihanel/Final-Project-Group3 | e58cd526d8e26ee6b13b5a77af6ebcc1ff7e77ca | [
"MIT"
] | null | null | null | Aihan-Liu-Individual-project/Code/demo.py | laihanel/Final-Project-Group3 | e58cd526d8e26ee6b13b5a77af6ebcc1ff7e77ca | [
"MIT"
] | 8 | 2021-11-11T02:52:41.000Z | 2021-12-05T23:01:05.000Z | Code/demo.py | laihanel/Final-Project-Group3 | e58cd526d8e26ee6b13b5a77af6ebcc1ff7e77ca | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import cv2
import os
import shutil
from matplotlib import pyplot as plt
from Model_Definition import VC3D
from mypath import NICKNAME, DATA_DIR, PATH
# TODO: Now can display images with plt.show(), need to solve display on cloud instance
OUT_DIR = PATH + os.path.sep + 'Result'
DEMO_DIR = PATH + os.path.sep + 'Demo'
# %%
check_folder_exist(OUT_DIR)
# %%
# %%
if __name__ == '__main__':
main() | 32.955357 | 117 | 0.544568 |
a28f7b4918b94b07856ae26b2413470f943cc99a | 123 | py | Python | remove_punctuations.py | sparemeyoursoliloquy/Python-Exercises | 18f2075327dec0dbc55edd4f50fa3f71258777e1 | [
"MIT"
] | 3 | 2020-07-28T03:31:27.000Z | 2020-07-28T03:31:32.000Z | remove_punctuations.py | sparemeyoursoliloquy/Python-Exercises | 18f2075327dec0dbc55edd4f50fa3f71258777e1 | [
"MIT"
] | null | null | null | remove_punctuations.py | sparemeyoursoliloquy/Python-Exercises | 18f2075327dec0dbc55edd4f50fa3f71258777e1 | [
"MIT"
] | null | null | null | text = input()
punc_remove = [",", ".", "!", "?"]
for i in punc_remove:
text = text.replace(i, "")
print(text.lower()) | 20.5 | 34 | 0.544715 |
a29166d0430486b39f985f973d6999d2da3a0aae | 5,519 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/multisalesforce/views.py | oddbird/sfdo-template | ac128ca5b2db18d3069a1535cb6ac23f83aa987f | [
"BSD-3-Clause"
] | 3 | 2018-08-23T18:59:59.000Z | 2021-05-25T00:05:52.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/multisalesforce/views.py | oddbird/sfdo-template | ac128ca5b2db18d3069a1535cb6ac23f83aa987f | [
"BSD-3-Clause"
] | 9 | 2018-09-28T21:30:35.000Z | 2020-08-10T20:42:34.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/multisalesforce/views.py | oddbird/sfdo-template | ac128ca5b2db18d3069a1535cb6ac23f83aa987f | [
"BSD-3-Clause"
] | 2 | 2019-03-28T05:03:08.000Z | 2019-05-05T18:10:30.000Z | import logging
import re
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2CallbackView,
OAuth2LoginView,
)
from allauth.socialaccount.providers.salesforce.views import (
SalesforceOAuth2Adapter as SalesforceOAuth2BaseAdapter,
)
from allauth.utils import get_request_param
from django.core.exceptions import SuspiciousOperation
from sfdo_template_helpers.crypto import fernet_decrypt, fernet_encrypt
from ..api.constants import ORGANIZATION_DETAILS
from .provider import (
SalesforceCustomProvider,
SalesforceProductionProvider,
SalesforceTestProvider,
)
logger = logging.getLogger(__name__)
ORGID_RE = re.compile(r"^00D[a-zA-Z0-9]{15}$")
CUSTOM_DOMAIN_RE = re.compile(r"^[a-zA-Z0-9.-]+$")
prod_oauth2_login = LoggingOAuth2LoginView.adapter_view(
SalesforceOAuth2ProductionAdapter
)
prod_oauth2_callback = LoggingOAuth2CallbackView.adapter_view(
SalesforceOAuth2ProductionAdapter
)
sandbox_oauth2_login = LoggingOAuth2LoginView.adapter_view(
SalesforceOAuth2SandboxAdapter
)
sandbox_oauth2_callback = LoggingOAuth2CallbackView.adapter_view(
SalesforceOAuth2SandboxAdapter
)
custom_oauth2_login = LoggingOAuth2LoginView.adapter_view(SalesforceOAuth2CustomAdapter)
custom_oauth2_callback = LoggingOAuth2CallbackView.adapter_view(
SalesforceOAuth2CustomAdapter
)
| 34.067901 | 88 | 0.694872 |
a29207dc0a5cb4e063b1e7adbc8c0acc0f001bf3 | 475 | py | Python | 7_testing/autotest/student.py | ProGabe/teals | 7ebf0b6e6f81d8a4c44baa7b5d3a9d95267ec1e3 | [
"MIT"
] | null | null | null | 7_testing/autotest/student.py | ProGabe/teals | 7ebf0b6e6f81d8a4c44baa7b5d3a9d95267ec1e3 | [
"MIT"
] | 9 | 2019-11-21T13:12:47.000Z | 2021-02-02T14:52:52.000Z | 7_testing/autotest/student.py | ProGabe/teals | 7ebf0b6e6f81d8a4c44baa7b5d3a9d95267ec1e3 | [
"MIT"
] | 2 | 2021-01-25T03:38:30.000Z | 2021-03-07T23:54:53.000Z | '''
Student: Dan Grecoe
Assignment: Homework 1
Submission of the first homework assignment. The assignment
was to create a python file with 2 functions
multiply - Takes two parameters x and y and returns the product
of the values provided.
noop - Takes 0 parameters and returns None
'''
| 22.619048 | 67 | 0.669474 |
a292f32feefb9582465a4d958817a596211378a8 | 31,533 | py | Python | nova/tests/unit/virt/ec2/test_ec2.py | platform9/omni-devstack-fixes | bc94150974fe181840ab3c5d618fa5ce3db44805 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/ec2/test_ec2.py | platform9/omni-devstack-fixes | bc94150974fe181840ab3c5d618fa5ce3db44805 | [
"Apache-2.0"
] | 1 | 2020-03-03T13:53:23.000Z | 2020-03-03T13:53:23.000Z | nova/tests/unit/virt/ec2/test_ec2.py | platform9/omni-devstack-fixes | bc94150974fe181840ab3c5d618fa5ce3db44805 | [
"Apache-2.0"
] | 1 | 2020-09-03T20:54:21.000Z | 2020-09-03T20:54:21.000Z | """
Copyright 2016 Platform9 Systems Inc.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import base64
import contextlib
import boto3
import mock
from moto import mock_ec2
from oslo_log import log as logging
from oslo_utils import uuidutils
from credsmgrclient.common.exceptions import HTTPBadGateway
from nova.compute import task_states
from nova import context
from nova import exception
from nova.image.glance import GlanceImageServiceV2
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt.ec2 import EC2Driver
LOG = logging.getLogger(__name__)
keypair_exist_response = {
'KeyPairs': [
{
'KeyName': 'fake_key',
'KeyFingerprint': 'fake_key_data'
},
{
'KeyName': 'fake_key1',
'KeyFingerprint': 'fake_key_data1'
}
]
}
| 43.979079 | 79 | 0.603114 |