hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
739baac2ff5ef50ecd5e6693fbb6afb0bb494d6a | 5,403 | py | Python | samples/sample-2.py | shoriwe/LVaED | 68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7 | [
"MIT"
] | null | null | null | samples/sample-2.py | shoriwe/LVaED | 68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7 | [
"MIT"
] | 19 | 2021-02-08T22:14:16.000Z | 2021-03-03T15:13:07.000Z | samples/sample-2.py | shoriwe/LVaED | 68ca38eed2b4c2b1b7a6a8304c8effbcf2f977f7 | [
"MIT"
] | 3 | 2021-08-30T01:06:32.000Z | 2022-02-21T03:22:28.000Z | import io
import os
import re
import zipfile
import flask
import markdown
import blueprints.example
import blueprints.home
import blueprints.presentation
import blueprints.transformations
if __name__ == '__main__':
main()
| 37.006849 | 108 | 0.760689 |
739bd82ee95264fe3d722473cc7aa6319a24720f | 4,420 | py | Python | yexinyang/scripts/main.py | TheSignPainter/MLproject-docknet | 5d5647356f116d34ef57267524851e44595e5e93 | [
"MIT"
] | null | null | null | yexinyang/scripts/main.py | TheSignPainter/MLproject-docknet | 5d5647356f116d34ef57267524851e44595e5e93 | [
"MIT"
] | null | null | null | yexinyang/scripts/main.py | TheSignPainter/MLproject-docknet | 5d5647356f116d34ef57267524851e44595e5e93 | [
"MIT"
] | 4 | 2019-05-29T12:31:51.000Z | 2019-05-30T12:00:12.000Z | import os, time
import numpy as np
import logging
import fire
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from model import *
from dataset import *
if __name__ == '__main__':
fire.Fire({
'train': main,
'test': score,
}) | 30.694444 | 102 | 0.601357 |
739c941ac4971ed7f222b2a59535b53c9bba54d7 | 1,018 | py | Python | myconnectome/utils/download_file.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 28 | 2015-04-02T16:43:14.000Z | 2020-06-17T20:04:26.000Z | myconnectome/utils/download_file.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 11 | 2015-05-19T02:57:22.000Z | 2017-03-17T17:36:16.000Z | myconnectome/utils/download_file.py | poldrack/myconnectome | 201f414b3165894d6fe0be0677c8a58f6d161948 | [
"MIT"
] | 10 | 2015-05-21T17:01:26.000Z | 2020-11-11T04:28:08.000Z | # -*- coding: utf-8 -*-
"""
download file using requests
Created on Fri Jul 3 09:13:04 2015
@author: poldrack
"""
import requests
import os
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from requests import Session, exceptions
# from http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
| 30.848485 | 104 | 0.698428 |
739ceade8d1851b8f8c7cabe7fe9035c80fe7143 | 9,388 | py | Python | django-openstack/django_openstack/syspanel/views/instances.py | tylesmit/openstack-dashboard | 8199011a98aa8bc5672e977db014f61eccc4668c | [
"Apache-2.0"
] | 2 | 2015-05-18T13:50:23.000Z | 2015-05-18T14:47:08.000Z | django-openstack/django_openstack/syspanel/views/instances.py | tylesmit/openstack-dashboard | 8199011a98aa8bc5672e977db014f61eccc4668c | [
"Apache-2.0"
] | null | null | null | django-openstack/django_openstack/syspanel/views/instances.py | tylesmit/openstack-dashboard | 8199011a98aa8bc5672e977db014f61eccc4668c | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django import http
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
import datetime
import logging
from django.contrib import messages
from django_openstack import api
from django_openstack import forms
from django_openstack.dash.views import instances as dash_instances
from openstackx.api import exceptions as api_exceptions
TerminateInstance = dash_instances.TerminateInstance
RebootInstance = dash_instances.RebootInstance
LOG = logging.getLogger('django_openstack.syspanel.views.instances')
| 36.96063 | 101 | 0.659139 |
739e11e44ead5664c57ce1862ebd696671d1bb6a | 612 | py | Python | image_png.py | tomasdisk/tommGL-py | 63876cc7211610908f388c2fd9b2b5f4dbd4411c | [
"MIT"
] | 1 | 2018-06-19T21:19:20.000Z | 2018-06-19T21:19:20.000Z | image_png.py | tomasdisk/tommGL-py | 63876cc7211610908f388c2fd9b2b5f4dbd4411c | [
"MIT"
] | null | null | null | image_png.py | tomasdisk/tommGL-py | 63876cc7211610908f388c2fd9b2b5f4dbd4411c | [
"MIT"
] | null | null | null | from datetime import datetime as dt
from bitmap import Bitmap, PilBitmap
h = 500
w = 500
image = Bitmap(w, h, alpha=True)
pil_image = PilBitmap(w, h, alpha=True)
color_red = 0
for i in range(h):
for j in range(w):
image.set_rgba_pixel(j, i, color_red, 0, 0, 150)
pil_image.set_rgba_pixel(j, i, color_red, 0, 0, 150)
color_red += 1
path = "images/im1_" + dt.now().strftime("%Y-%m-%d_%H:%M:%S") + ".png"
print("Image saved: " + path)
image.save_as_png(path)
path = "images/im2_" + dt.now().strftime("%Y-%m-%d_%H:%M:%S") + ".png"
print("Image saved: " + path)
pil_image.save_as_png(path)
| 27.818182 | 70 | 0.643791 |
739e6d0875de7997feffc9f90decf0de25b225f9 | 9,157 | py | Python | src/memberdef.py | alljoyn/devtools-codegen | 388cac15e584dce3040d5090e8f627e5360e5c0f | [
"0BSD"
] | null | null | null | src/memberdef.py | alljoyn/devtools-codegen | 388cac15e584dce3040d5090e8f627e5360e5c0f | [
"0BSD"
] | null | null | null | src/memberdef.py | alljoyn/devtools-codegen | 388cac15e584dce3040d5090e8f627e5360e5c0f | [
"0BSD"
] | null | null | null | # Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import validate
import common
def get_indirection_level(signature):
"""Get the number of dimensions in the array or 0 if not an array."""
return len(signature) - len(signature.lstrip('a'))
def get_base_signature(signature, index = 0):
"""Return the base signature i.e. 'i', 'ai', and 'aai' all return 'i'."""
return signature[index:len(signature)].lstrip('a')
def is_array(signature):
"""Return True if this argument is an array. A dictionary is considered an array."""
return signature[0] == "a"
def is_structure(signature):
"""Return True if the base argument type is a structure."""
sig = get_base_signature(signature)
return sig[0] == '('
def is_dictionary(signature):
"""Return True if the base argument type is a dictionary."""
sig = get_base_signature(signature)
return signature[0] == 'a' and sig[0] == '{'
def is_dictionary_array(signature):
"""Return True if the base argument type is an array of dictionaries."""
return is_dictionary(signature) and get_indirection_level(signature) > 1
def __find_end_of_type(signature, index = 0):
"""Returns the index of the start of the next type starting at 'index'.
If there are no more types then return the end of the type signature.
For example:
("ab", 0) returns 1
("ab", 1) returns 2
("aab", 0) returns 1
("aab", 1) returns 1
("aab", 2) returns 3
("abb", 1) returns 2
("abb", 2) returns 3
("bqd", 0) returns 1
("bqd", 1) returns 2
("bqd", 2) returns 3
("(bqd)", 0) returns 4
("(bqd)", 1) returns 2
("(bqd)", 2) returns 3
("(bqd)", 3) returns 4
("(bqd)", 4) returns 5
("(bqd(bad))", 0) returns 9
("(bqd(bad))", 1) returns 2
("(bqd(bad))", 2) returns 3
("(bqd(bad))", 3) returns 4
("(bqd(bad))", 4) returns 8
("(bqd(bad))", 5) returns 6"""
assert(index < len(signature))
c = signature[index]
if c == '(':
end_index = __find_container_end(signature, index, ')')
elif c == '{':
end_index = __find_container_end(signature, index, '}')
elif c == 'a':
base = get_base_signature(signature, index)
end_index = __find_end_of_type(base)
end_index += index + get_indirection_level(signature, index)
else:
end_index = index + 1
return end_index
def is_basic_type(signature):
"""Returns True if the signature is a basic type
'a', '(', '{', and 'v' are not considered basic types because they usually
cannot be handled the same as other types."""
basic_types = ('b','d', 'g', 'i','n','o','q','s','t','u','x','y')
return signature in basic_types
def get_max_array_dimension(signature):
"""Gets the number of array dimensions in this signature."""
return_value = 0
while signature.find((return_value + 1) * 'a') != -1:
return_value += 1
return return_value
def split_signature(sig):
"""splits a container signature into individual fields."""
components = []
index = 1
while index < len(sig)-1:
part = sig[index:]
startindex = get_indirection_level(part)
endindex = __find_end_of_type(part, startindex)
components.append(part[:endindex])
index = index + endindex
return components
| 33.177536 | 96 | 0.644207 |
739eb239f78d72920cbdfea243f1d357367bd4a8 | 2,187 | py | Python | ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py | Nathaka/graveyard | dcc5ba2fa1679318e65c0078f734cbfeeb287c32 | [
"MIT"
] | 6 | 2018-06-10T09:47:50.000Z | 2022-02-13T12:22:07.000Z | ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py | Nathaka/graveyard | dcc5ba2fa1679318e65c0078f734cbfeeb287c32 | [
"MIT"
] | 268 | 2018-05-30T21:54:50.000Z | 2022-01-08T21:00:03.000Z | ddcz/migrations/0010_creativepage_creativepageconcept_creativepagesection.py | jimmeak/graveyard | 4c0f9d5e8b6c965171d9dc228c765b662f5b7ab4 | [
"MIT"
] | 4 | 2018-09-14T03:50:08.000Z | 2021-04-19T19:36:23.000Z | # Generated by Django 2.0.2 on 2018-06-13 22:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 30.375 | 68 | 0.40695 |
739f4a4af64c366326ef39984c42e5d44fc7cab0 | 8,145 | py | Python | libml/preprocess.py | isabella232/l2p | 4379849b009edd9d5fde71d625cbb9aa1166aa17 | [
"Apache-2.0"
] | 45 | 2021-12-20T19:14:30.000Z | 2022-03-31T14:08:44.000Z | libml/preprocess.py | google-research/l2p | 98b10eaf07d3dd899a324fe4149bf6f01e26c589 | [
"Apache-2.0"
] | 3 | 2021-12-29T03:53:22.000Z | 2022-03-18T01:08:25.000Z | libml/preprocess.py | isabella232/l2p | 4379849b009edd9d5fde71d625cbb9aa1166aa17 | [
"Apache-2.0"
] | 5 | 2021-12-22T01:37:18.000Z | 2022-02-14T23:17:38.000Z | # coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocesses."""
from typing import Any, Callable, Dict, Optional
import ml_collections
from augment import augment_utils
import tensorflow as tf
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4867, 0.4408)
CIFAR100_STD = (0.2675, 0.2565, 0.2761)
# Constants for configuring config.<name>
RANDOM_ERASING = "randerasing"
AUGMENT = "augment"
MIX = "mix"
COLORJITTER = "colorjitter"
create_mix_augment = augment_utils.create_mix_augment
def resize_small(image: tf.Tensor,
size: int,
*,
antialias: bool = False) -> tf.Tensor:
"""Resizes the smaller side to `size` keeping aspect ratio.
Args:
image: Single image as a float32 tensor.
size: an integer, that represents a new size of the smaller side of an input
image.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
h, w = tf.shape(image)[0], tf.shape(image)[1]
# Figure out the necessary h/w.
ratio = (tf.cast(size, tf.float32) / tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
image = tf.image.resize(image, [h, w], antialias=antialias)
return image
def central_crop(image: tf.Tensor, size: int) -> tf.Tensor:
"""Makes central crop of a given size."""
h, w = size, size
top = (tf.shape(image)[0] - h) // 2
left = (tf.shape(image)[1] - w) // 2
image = tf.image.crop_to_bounding_box(image, top, left, h, w)
return image
def decode_and_random_resized_crop(image: tf.Tensor, rng,
resize_size: int) -> tf.Tensor:
"""Decodes the images and extracts a random crop."""
shape = tf.io.extract_jpeg_shape(image)
begin, size, _ = tf.image.stateless_sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
seed=rng,
area_range=(0.05, 1.0),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
top, left, _ = tf.unstack(begin)
h, w, _ = tf.unstack(size)
image = tf.image.decode_and_crop_jpeg(image, [top, left, h, w], channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, (resize_size, resize_size))
return image
def train_preprocess(features: Dict[str, tf.Tensor],
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Processes a single example for training."""
image = features["image"]
# This PRNGKey is unique to this example. We can use it with the stateless
# random ops in TF.
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
image = decode_and_random_resized_crop(image, rng_crop, resize_size=crop_size)
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
return {"image": image, "label": features["label"]}
def train_cifar_preprocess(features: Dict[str, tf.Tensor]):
"""Augmentation function for cifar dataset."""
image = tf.io.decode_jpeg(features["image"])
image = tf.image.resize_with_crop_or_pad(image, 32 + 4, 32 + 4)
rng = features.pop("rng")
rng, rng_crop, rng_flip = tf.unstack(
tf.random.experimental.stateless_split(rng, 3))
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.stateless_random_crop(image, [32, 32, 3], rng_crop)
# Randomly flip the image horizontally
image = tf.image.stateless_random_flip_left_right(image, rng_flip)
image = tf.cast(image, tf.float32) / 255.0
return {"image": image, "label": features["label"]}
def get_augment_preprocess(
augment_params: ml_collections.ConfigDict,
*,
colorjitter_params: Optional[ml_collections.ConfigDict] = None,
randerasing_params: Optional[ml_collections.ConfigDict] = None,
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
basic_process: Callable[[Dict[str, tf.Tensor]],
Dict[str, tf.Tensor]] = train_preprocess,
) -> Callable[[Dict[str, tf.Tensor]], Dict[str, tf.Tensor]]:
"""Creates a custom augmented image preprocess."""
augmentor = None
# If augment_params.type is noop/default, we skip.
if augment_params and augment_params.get(
"type") and augment_params.type not in ("default", "noop"):
augmentor = augment_utils.create_augmenter(**augment_params.to_dict())
jitter = None
if colorjitter_params and colorjitter_params.type not in ("default", "noop"):
jitter = augment_utils.create_augmenter(**colorjitter_params.to_dict())
return train_custom_augment_preprocess
def eval_preprocess(features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None,
input_size: int = 256,
crop_size: int = 224) -> Dict[str, tf.Tensor]:
"""Process a single example for evaluation."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
# image = resize_small(image, size=int(256 / 224 * input_size))
# image = central_crop(image, size=input_size)
image = resize_small(image, size=input_size) # e.g. 256, 448
image = central_crop(image, size=crop_size) # e.g. 224, 384
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
def cifar_eval_preprocess(
features: Dict[str, tf.Tensor],
mean: Optional[tf.Tensor] = None,
std: Optional[tf.Tensor] = None) -> Dict[str, tf.Tensor]:
"""Processes a single example for evaluation for cifar."""
image = features["image"]
assert image.dtype == tf.uint8
image = tf.cast(image, tf.float32) / 255.0
if mean is not None:
_check_valid_mean_std(mean, std)
image = (image - mean) / std
return {"image": image, "label": features["label"]}
| 38.060748 | 80 | 0.672437 |
73a022545603af3f26c0bf2eec8dadb8c4ffd178 | 2,693 | py | Python | glue/viewers/matplotlib/qt/toolbar.py | tiagopereira/glue | 85bf7ce2d252d7bc405e8160b56fc83d46b9cbe4 | [
"BSD-3-Clause"
] | 1 | 2019-12-17T07:58:35.000Z | 2019-12-17T07:58:35.000Z | glue/viewers/matplotlib/qt/toolbar.py | scalet98/glue | ff949ad52e205c20561f48c05f870b2abb39e0b0 | [
"BSD-3-Clause"
] | null | null | null | glue/viewers/matplotlib/qt/toolbar.py | scalet98/glue | ff949ad52e205c20561f48c05f870b2abb39e0b0 | [
"BSD-3-Clause"
] | 1 | 2019-08-04T14:10:12.000Z | 2019-08-04T14:10:12.000Z | from __future__ import absolute_import, division, print_function
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT
from glue.config import viewer_tool
from glue.viewers.common.tool import CheckableTool, Tool
__all__ = ['MatplotlibTool', 'MatplotlibCheckableTool', 'HomeTool', 'SaveTool',
'PanTool', 'ZoomTool']
| 24.935185 | 88 | 0.678797 |
73a0ab5a7274a4ae6d6cb3e1e3d9e17024ee3ea6 | 1,003 | py | Python | 2_4_overfitting_underfitting/utils_overfitting.py | layerwise/training | 21ad2a5684a3712192fb13f8214bc3bb4c975f3e | [
"MIT"
] | null | null | null | 2_4_overfitting_underfitting/utils_overfitting.py | layerwise/training | 21ad2a5684a3712192fb13f8214bc3bb4c975f3e | [
"MIT"
] | null | null | null | 2_4_overfitting_underfitting/utils_overfitting.py | layerwise/training | 21ad2a5684a3712192fb13f8214bc3bb4c975f3e | [
"MIT"
] | 1 | 2021-07-20T11:38:47.000Z | 2021-07-20T11:38:47.000Z | import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interactive, interactive_output, fixed, HBox, VBox
import ipywidgets as widgets
| 24.463415 | 73 | 0.62014 |
73a4124d5d48a030e18fb459f88816554d8ff126 | 1,036 | py | Python | analyze.py | sveitser/mandarin | 474617971e5eb9120d5ea5454cc2c49bb40b4977 | [
"MIT"
] | null | null | null | analyze.py | sveitser/mandarin | 474617971e5eb9120d5ea5454cc2c49bb40b4977 | [
"MIT"
] | null | null | null | analyze.py | sveitser/mandarin | 474617971e5eb9120d5ea5454cc2c49bb40b4977 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import jieba
import numpy as np
jieba.setLogLevel(60) # quiet
fname = sys.argv[1]
with open(fname) as f:
text = f.read()
tokenizer = jieba.Tokenizer()
tokens = list(tokenizer.cut(text))
occurences = np.array([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])
difficulties = 1 / (occurences + 1)
max_occurence = np.max(list(tokenizer.FREQ.values()))
min_score = 1 / (max_occurence + 1)
max_score = 1
perc = 75
mean = np.mean(difficulties)
median = np.percentile(difficulties, perc)
normalized_mean = norm(mean)
normalized_median = norm(median)
print(
f"{os.path.basename(fname)}: "
f"mean: {normalized_mean:.6f}, {perc}th percentile: {normalized_median:.6f} "
f"in [0: trivial, 1: hardest]"
)
import matplotlib.pyplot as plt
clipped = difficulties[(difficulties <= 0.01) & (difficulties >= 0.0001)]
plt.hist(clipped, bins=20, density=True)
ax = plt.gca()
ax.set_title(fname)
plt.show()
| 20.313725 | 81 | 0.697876 |
73a548fe78fa2339c064396148e3d2072e173b7a | 2,836 | py | Python | brown_clustering/data.py | helpmefindaname/BrownClustering | 1b9d3e424a58813dec13ef619ca18e3671d75819 | [
"MIT"
] | 7 | 2021-11-30T13:35:46.000Z | 2022-03-31T14:01:04.000Z | brown_clustering/data.py | helpmefindaname/BrownClustering | 1b9d3e424a58813dec13ef619ca18e3671d75819 | [
"MIT"
] | null | null | null | brown_clustering/data.py | helpmefindaname/BrownClustering | 1b9d3e424a58813dec13ef619ca18e3671d75819 | [
"MIT"
] | null | null | null | from itertools import tee
from typing import Dict, Iterator, List, Sequence, Tuple
from brown_clustering.defaultvaluedict import DefaultValueDict
Corpus = Sequence[Sequence[str]]
| 30.494624 | 78 | 0.565233 |
73a58a2a727d6573f018385b2dad3ec0e4b46b5e | 3,299 | py | Python | xs/layers/ops.py | eLeVeNnN/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 290 | 2020-07-06T02:13:12.000Z | 2021-01-04T14:23:39.000Z | xs/layers/ops.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 1 | 2020-12-03T11:11:48.000Z | 2020-12-03T11:11:48.000Z | xs/layers/ops.py | E1eveNn/xshinnosuke | 69da91e0ea5042437edfc31c0e6ff9ef394c6cc9 | [
"MIT"
] | 49 | 2020-07-16T00:27:47.000Z | 2020-11-26T03:03:14.000Z | from .base import *
| 39.746988 | 114 | 0.620794 |
73a60122798b5b44ac1b77285ac69b9d5cb78587 | 2,888 | py | Python | fcore/util.py | superwhyun/farmos | 9292f3ba24b7d07002af0549ae510ce4edf09ce5 | [
"BSD-3-Clause"
] | null | null | null | fcore/util.py | superwhyun/farmos | 9292f3ba24b7d07002af0549ae510ce4edf09ce5 | [
"BSD-3-Clause"
] | null | null | null | fcore/util.py | superwhyun/farmos | 9292f3ba24b7d07002af0549ae510ce4edf09ce5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 JiNong, Inc.
# All right reserved.
#
"""
Utility Functions .
"""
import time
import math
import logging
import logging.handlers
if __name__ == '__main__':
st = SunTime(128.856632, 37.798953)
print("rise", st.getsunrise(), "set", st.getsunset())
| 33.976471 | 158 | 0.621191 |
73a657e874819eb1f55d87b508eba3c94d916b59 | 144 | py | Python | src/lib/__init__.py | gfjiangly/RCNet | ef6860f23943eb8e21fdec565019f2f8eda17673 | [
"MIT"
] | null | null | null | src/lib/__init__.py | gfjiangly/RCNet | ef6860f23943eb8e21fdec565019f2f8eda17673 | [
"MIT"
] | null | null | null | src/lib/__init__.py | gfjiangly/RCNet | ef6860f23943eb8e21fdec565019f2f8eda17673 | [
"MIT"
] | null | null | null | # -*- encoding:utf-8 -*-
# @Time : 2019/10/23 15:45
# @Author : gfjiang
# @Site :
# @File : __init__.py
# @Software: PyCharm
| 18 | 30 | 0.513889 |
73a7196bbf0eb253a97a49fbb8e7cb7ec93df591 | 611 | py | Python | tests/manual/i3wmcommands.py | diegoperezm/screencast-script | ac477c6f44a151cafa88ebfd981d2bbe34f792bd | [
"MIT"
] | null | null | null | tests/manual/i3wmcommands.py | diegoperezm/screencast-script | ac477c6f44a151cafa88ebfd981d2bbe34f792bd | [
"MIT"
] | null | null | null | tests/manual/i3wmcommands.py | diegoperezm/screencast-script | ac477c6f44a151cafa88ebfd981d2bbe34f792bd | [
"MIT"
] | null | null | null | import sys
# for development
sys.path.append('../../src')
from screencastscript import ScreencastScript # noqa: E402
screencast = ScreencastScript()
screencast.sleep(1)
screencast.i3wm_focus_left()
screencast.sleep(1)
screencast.i3wm_zoom_in()
screencast.sleep(1)
screencast.i3wm_zoom_out()
screencast.sleep(1)
screencast.i3wm_focus_right()
screencast.sleep(1)
screencast.i3wm_focus_up()
screencast.sleep(1)
screencast.i3wm_focus_down()
screencast.sleep(1)
screencast.i3wm_toggle_fullscreen()
screencast.sleep(1)
screencast.i3wm_ws_2()
screencast.sleep(1)
screencast.i3wm_ws_1()
screencast.sleep(1)
| 16.972222 | 59 | 0.800327 |
73a7a553c3b396a8049a5ddf4e1a0e97e5a14ea4 | 1,003 | py | Python | hippocampus/scripts/s04_hipp_cortex_fc_mean.py | CNG-LAB/cng-open | b775a8fd554a39ad3b4033e545bd4bf68f7ed46b | [
"MIT"
] | null | null | null | hippocampus/scripts/s04_hipp_cortex_fc_mean.py | CNG-LAB/cng-open | b775a8fd554a39ad3b4033e545bd4bf68f7ed46b | [
"MIT"
] | null | null | null | hippocampus/scripts/s04_hipp_cortex_fc_mean.py | CNG-LAB/cng-open | b775a8fd554a39ad3b4033e545bd4bf68f7ed46b | [
"MIT"
] | null | null | null | """
computes the mean hippocampal-cortical functional connectivity (fc) matrix,
for the left hemisphere subfields
"""
import os
import h5py
import numpy as np
# data dirs
ddir = '../data/'
conndir = '../data/tout_hippoc/'
odir = '../data/tout_group/'
# get HCP - S900 subject list
subjlist = '../data/subjectListS900_QC_gr.txt'
f = open(subjlist); mylist = f.read().split("\n"); f.close()
subjlist = joinedlist = mylist[:-1]
print('We have now %i subjects... ' % (len(subjlist))) # 709
fc_left = np.zeros((4096, 360))
j = 0
for subjID in subjlist:
fname = os.path.join(conndir, 'HCP_' + subjID + '_left.h5')
f = h5py.File(fname, 'r')
f = np.array(f['HCP_' + subjID])
fc_left = fc_left + f
j += 1
fc_left = fc_left / j
h = h5py.File('../data/tout_group/Hmean709_FC_left.h5', 'w')
h.create_dataset('data', data = fc_left)
h.close()
print(fc_left.min(), fc_left.max(), fc_left.shape, j)
# -0.005300521852874321, 0.39153784016161197, (4096, 360), 709 | 25.075 | 75 | 0.645065 |
73a85bf483c1c47a0091ad63bb16957bd6c8d4f4 | 3,907 | py | Python | setup.py | sgp79/reptools | 3290b8daab58a0c5f2965fb221f7b480c380966b | [
"MIT"
] | null | null | null | setup.py | sgp79/reptools | 3290b8daab58a0c5f2965fb221f7b480c380966b | [
"MIT"
] | 1 | 2021-12-10T13:09:54.000Z | 2021-12-10T13:09:54.000Z | setup.py | sgp79/reptools | 3290b8daab58a0c5f2965fb221f7b480c380966b | [
"MIT"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
#To install:
# py -3 setup.py sdist
# pip3 install .
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
from io import open
#from reptools import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
#Get the version
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='reptools',
version=open("reptools/version.py").readlines()[-1].split()[-1].strip("\"'"),
# https://packagiATR01400 ng.python.org/specifications/core-metadata/#summary
description='Tools for processing Rep-seq data',
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description,
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown',
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
#url='', # Optional
author='Stephen Preston',
author_email='stephen.preston@zoo.ox.ac.uk',
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Immunologists',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
# Note that this is a string of words separated by whitespace, not a list.
#keywords='sample setuptools development', # Optional
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy','numba'],
python_requires='>=3.7',
#extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# The following provides a command called `reptools` which
# executes the function `main` from the reptools.cli package when invoked:
entry_points={
'console_scripts': [
'reptools=reptools.cli:main',
],
},
# List additional URLs that are relevant to your project as a dict.
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
#},
)
| 32.831933 | 98 | 0.653187 |
73a9012563f8e544e446267b12c23f24456df159 | 1,563 | py | Python | peeldb/migrations/0033_auto_20171018_1423.py | ashwin31/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | 1 | 2021-09-27T05:01:39.000Z | 2021-09-27T05:01:39.000Z | peeldb/migrations/0033_auto_20171018_1423.py | kiran1415/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | null | null | null | peeldb/migrations/0033_auto_20171018_1423.py | kiran1415/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | 1 | 2022-01-05T09:02:32.000Z | 2022-01-05T09:02:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-18 14:23
from __future__ import unicode_literals
from django.db import migrations, models
| 40.076923 | 130 | 0.435061 |
73a9cda8e0d2bd2c5fe35622d180c1e9b443a525 | 1,905 | py | Python | application/modules/post/windows-priv-check/wpc/report/issues.py | cys3c/viper-shell | e05a07362b7d1e6d73c302a24d2506846e43502c | [
"PSF-2.0",
"BSD-2-Clause"
] | 2 | 2018-06-30T03:21:30.000Z | 2020-03-22T02:31:02.000Z | application/modules/post/windows-priv-check/wpc/report/issues.py | cys3c/viper-shell | e05a07362b7d1e6d73c302a24d2506846e43502c | [
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null | application/modules/post/windows-priv-check/wpc/report/issues.py | cys3c/viper-shell | e05a07362b7d1e6d73c302a24d2506846e43502c | [
"PSF-2.0",
"BSD-2-Clause"
] | 3 | 2017-11-15T11:08:20.000Z | 2020-03-22T02:31:03.000Z | from wpc.report.issue import issue
import xml.etree.cElementTree as etree
from lxml import etree as letree
from operator import itemgetter, attrgetter, methodcaller
# TODO should this class contain info about the scan? or define a new class called report?
# Version of script
# Date, time of audit
# Who the audit ran as (username, groups, privs)
# ...
| 31.229508 | 95 | 0.632546 |
73aa3640a120523b4d2b177f875511cc1784ef46 | 1,456 | py | Python | util/doxify.py | lanfangping/ravel | 7be759f219828b09696faf0b3eb52e83243998f9 | [
"Apache-2.0"
] | 9 | 2016-03-14T19:19:21.000Z | 2020-03-24T07:04:39.000Z | util/doxify.py | lanfangping/ravel | 7be759f219828b09696faf0b3eb52e83243998f9 | [
"Apache-2.0"
] | null | null | null | util/doxify.py | lanfangping/ravel | 7be759f219828b09696faf0b3eb52e83243998f9 | [
"Apache-2.0"
] | 10 | 2016-05-10T14:47:56.000Z | 2021-11-08T05:47:47.000Z | #!/usr/bin/python
"""
From Mininet 2.2.1: convert simple documentation to epydoc/pydoctor-compatible markup
"""
from sys import stdin, stdout, argv
import os
from tempfile import mkstemp
from subprocess import call
import re
spaces = re.compile(r'\s+')
singleLineExp = re.compile(r'\s+"([^"]+)"')
commentStartExp = re.compile(r'\s+"""')
commentEndExp = re.compile(r'"""$')
returnExp = re.compile(r'\s+(returns:.*)')
lastindent = ''
comment = False
def fixParam(line):
"Change foo: bar to @foo bar"
result = re.sub(r'(\w+):', r'@param \1', line)
result = re.sub(r' @', r'@', result)
return result
def fixReturns(line):
"Change returns: foo to @return foo"
return re.sub('returns:', r'@returns', line)
if __name__ == '__main__':
infile = open(argv[1])
outfid, outname = mkstemp()
fixLines(infile.readlines(), outfid)
infile.close()
os.close(outfid)
call([ 'doxypy', outname ])
| 23.483871 | 85 | 0.625687 |
73aa48515ec8f415bcd5c491e96baf51080aa39d | 3,924 | py | Python | mysite/stock/views.py | flohh-py/django-tutorial | feecb2b25d88abe0cdccdae4cef87658fa5d8ea7 | [
"MIT"
] | null | null | null | mysite/stock/views.py | flohh-py/django-tutorial | feecb2b25d88abe0cdccdae4cef87658fa5d8ea7 | [
"MIT"
] | null | null | null | mysite/stock/views.py | flohh-py/django-tutorial | feecb2b25d88abe0cdccdae4cef87658fa5d8ea7 | [
"MIT"
] | null | null | null | from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.shortcuts import redirect
from .models import StockEntry, StockEntryLine
from .forms import StockEntryForm, StockEntryLineForm, StockEntryLineIF
from main.views import BaseView
| 34.421053 | 79 | 0.690367 |
73aaccfbd257c25514479c0a480ba43ed3380e07 | 2,589 | py | Python | src/sentry/web/frontend/generic.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/generic.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/generic.py | erhuabushuo/sentry | 8b3bad10155aaacfdff80910e5972e64304e880c | [
"BSD-3-Clause"
] | null | null | null | """
sentry.web.frontend.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from sentry.models import Team
from sentry.permissions import can_create_teams
from sentry.plugins import plugins
from sentry.plugins.base import Response
from sentry.web.decorators import login_required
from sentry.web.helpers import render_to_response
def static_media(request, **kwargs):
"""
Serve static files below a given point in the directory structure.
"""
from django.contrib.staticfiles.views import serve
module = kwargs.get('module')
path = kwargs.get('path', '')
if module:
path = '%s/%s' % (module, path)
return serve(request, path, insecure=True)
def missing_perm(request, perm, **kwargs):
"""
Returns a generic response if you're missing permission to perform an
action.
Plugins may overwrite this with the ``missing_perm_response`` hook.
"""
response = plugins.first('missing_perm_response', request, perm, **kwargs)
if response:
if isinstance(response, HttpResponseRedirect):
return response
if not isinstance(response, Response):
raise NotImplementedError('Use self.render() when returning responses.')
return response.respond(request, {
'perm': perm,
})
if perm.label:
return render_to_response('sentry/generic_error.html', {
'title': _('Missing Permission'),
'message': _('You do not have the required permissions to %s.') % (perm.label,)
}, request)
return HttpResponseRedirect(reverse('sentry'))
| 31.573171 | 121 | 0.683275 |
73aaee020a07b3d8d2a092fd658dc4eb59eaed84 | 878 | py | Python | setup.py | harsh020/synthetic_metric | acecba0150a37c58613a477918ad407373c4cd5c | [
"MIT"
] | 1 | 2021-11-08T09:19:02.000Z | 2021-11-08T09:19:02.000Z | setup.py | harsh020/synthetic_metric | acecba0150a37c58613a477918ad407373c4cd5c | [
"MIT"
] | 2 | 2021-10-14T11:30:21.000Z | 2021-10-14T11:55:50.000Z | setup.py | harsh020/synthetic_metric | acecba0150a37c58613a477918ad407373c4cd5c | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="synmetric",
version="0.2.dev1",
license='MIT',
author="Harsh Soni",
author_email="author@example.com",
description="Metric to evaluate data quality for synthetic data.",
url="https://github.com/harsh020/synthetic_metric",
download_url = 'https://github.com/harsh020/synthetic_metric/archive/v_02dev1.tar.gz',
project_urls={
"Bug Tracker": "https://github.com/harsh020/synthetic_metric/issues",
},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires = [
'numpy',
'pandas',
'scikit-learn',
'scipy'
]
)
| 28.322581 | 90 | 0.624146 |
73ac2455924ff0001809acc001de20f6e6bc1656 | 813 | py | Python | neurokit2/microstates/__init__.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | 1 | 2020-12-31T17:48:11.000Z | 2020-12-31T17:48:11.000Z | neurokit2/microstates/__init__.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | neurokit2/microstates/__init__.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | 2 | 2021-12-25T15:39:49.000Z | 2021-12-25T15:44:16.000Z | """Submodule for NeuroKit."""
from .microstates_clean import microstates_clean
from .microstates_peaks import microstates_peaks
from .microstates_static import microstates_static
from .microstates_dynamic import microstates_dynamic
from .microstates_complexity import microstates_complexity
from .microstates_segment import microstates_segment
from .microstates_classify import microstates_classify
from .microstates_plot import microstates_plot
from .microstates_findnumber import microstates_findnumber
__all__ = ["microstates_clean",
"microstates_peaks",
"microstates_static",
"microstates_dynamic",
"microstates_complexity",
"microstates_segment",
"microstates_classify",
"microstates_plot",
"microstates_findnumber"]
| 35.347826 | 58 | 0.771218 |
73ac5bc20db43b168b228169be2bbfd420f16a64 | 2,184 | py | Python | notario/tests/validators/test_hybrid.py | alfredodeza/notario | 036bdc8435778c6f20f059d3789c8eb8242cff92 | [
"MIT"
] | 4 | 2015-08-20T20:14:55.000Z | 2018-06-01T14:39:29.000Z | notario/tests/validators/test_hybrid.py | alfredodeza/notario | 036bdc8435778c6f20f059d3789c8eb8242cff92 | [
"MIT"
] | 9 | 2016-02-04T21:46:12.000Z | 2018-11-14T04:43:10.000Z | notario/tests/validators/test_hybrid.py | alfredodeza/notario | 036bdc8435778c6f20f059d3789c8eb8242cff92 | [
"MIT"
] | 4 | 2015-04-29T20:40:12.000Z | 2018-11-14T04:08:20.000Z | from pytest import raises
from notario.validators import Hybrid
from notario.exceptions import Invalid
from notario.decorators import optional
from notario import validate
| 29.513514 | 68 | 0.588828 |
73ac608fd669eeeca5d58b623c5bbec41cd2e0ea | 346 | py | Python | players/urls.py | OnerInce/nfl-rest_api | 8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca | [
"Apache-2.0"
] | 2 | 2021-06-14T18:14:10.000Z | 2022-01-29T18:45:28.000Z | players/urls.py | OnerInce/nfl-rest_api | 8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca | [
"Apache-2.0"
] | null | null | null | players/urls.py | OnerInce/nfl-rest_api | 8d66d68ae7f04476a1b9f509e69a9d0dc83bfcca | [
"Apache-2.0"
] | 1 | 2022-02-09T14:14:20.000Z | 2022-02-09T14:14:20.000Z | from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r'players', views.PlayerView, basename='players')
router.register(r'teams', views.TeamView, basename='teams')
urlpatterns = [
path('', views.APIWelcomeView),
path('', include((router.urls))),
] | 28.833333 | 66 | 0.736994 |
73ad356948f61ca0a0905878d21b428c799f6aa2 | 380 | py | Python | watch/migrations/0014_auto_20201101_2304.py | msyoki/Neighborhood | d7eb55ba7772388850d8bcf04a867aba3fa81665 | [
"Unlicense"
] | null | null | null | watch/migrations/0014_auto_20201101_2304.py | msyoki/Neighborhood | d7eb55ba7772388850d8bcf04a867aba3fa81665 | [
"Unlicense"
] | null | null | null | watch/migrations/0014_auto_20201101_2304.py | msyoki/Neighborhood | d7eb55ba7772388850d8bcf04a867aba3fa81665 | [
"Unlicense"
] | 1 | 2021-02-08T10:27:06.000Z | 2021-02-08T10:27:06.000Z | # Generated by Django 2.0.2 on 2020-11-01 20:04
from django.db import migrations, models
| 20 | 62 | 0.584211 |
73aed6f56861e4609809462a9a1cf35c41cc4da9 | 612 | py | Python | torchx/examples/apps/lightning_classy_vision/test/component_test.py | LaudateCorpus1/torchx | 9ee0fdbf63882ba836c00d7522f6850c0c6dc418 | [
"BSD-3-Clause"
] | 101 | 2021-06-12T20:00:09.000Z | 2022-03-31T11:14:35.000Z | torchx/examples/apps/lightning_classy_vision/test/component_test.py | LaudateCorpus1/torchx | 9ee0fdbf63882ba836c00d7522f6850c0c6dc418 | [
"BSD-3-Clause"
] | 340 | 2021-06-14T18:16:12.000Z | 2022-03-31T21:10:28.000Z | torchx/examples/apps/lightning_classy_vision/test/component_test.py | LaudateCorpus1/torchx | 9ee0fdbf63882ba836c00d7522f6850c0c6dc418 | [
"BSD-3-Clause"
] | 19 | 2021-06-13T06:17:21.000Z | 2022-03-28T19:28:00.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torchx.examples.apps.lightning_classy_vision.component as lightning_classy_vision
from torchx.components.component_test_base import ComponentTestCase
| 36 | 88 | 0.785948 |
73aff3784e37e6b27b43b9c61f5212221ec2b0ef | 1,270 | py | Python | app.py | cykreet/getV | 429833b94fe9c40c594290c9d4b163e8559a4033 | [
"MIT"
] | null | null | null | app.py | cykreet/getV | 429833b94fe9c40c594290c9d4b163e8559a4033 | [
"MIT"
] | null | null | null | app.py | cykreet/getV | 429833b94fe9c40c594290c9d4b163e8559a4033 | [
"MIT"
] | null | null | null | import requests
from sanic import Sanic
from sanic.response import json
from sanic_limiter import Limiter, get_remote_address
from bs4 import BeautifulSoup
app = Sanic()
app.error_handler.add(Exception, ratelimit_handler)
limiter = Limiter(app, global_limits=["1 per 3 seconds", "50 per hour"], key_func=get_remote_address)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9500) | 36.285714 | 169 | 0.670866 |
73b067acf9b9f460405ab89ad75c34fdcfb06605 | 8,373 | py | Python | third_party/xiuminglib/xiuminglib/vis/video.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 183 | 2021-06-04T01:22:57.000Z | 2022-03-31T06:18:20.000Z | third_party/xiuminglib/xiuminglib/vis/video.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 40 | 2019-05-05T17:04:10.000Z | 2021-09-06T18:11:19.000Z | third_party/xiuminglib/xiuminglib/vis/video.py | leehsiu/nerfactor | 87f7d3ffa56bdbca925958a4b89e249d35006c80 | [
"Apache-2.0"
] | 26 | 2021-06-04T18:28:11.000Z | 2022-03-22T13:44:19.000Z | from os.path import join, dirname
import numpy as np
from .text import put_text
from .. import const
from ..os import makedirs
from ..imprt import preset_import
from ..log import get_logger
logger = get_logger()
def make_video(
imgs, fps=24, outpath=None, method='matplotlib', dpi=96, bitrate=-1):
"""Writes a list of images into a grayscale or color video.
Args:
imgs (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
fps (int, optional): Frame rate.
outpath (str, optional): Where to write the video to (a .mp4 file).
``None`` means
``os.path.join(const.Dir.tmp, 'make_video.mp4')``.
method (str, optional): Method to use: ``'matplotlib'``, ``'opencv'``,
``'video_api'``.
dpi (int, optional): Dots per inch when using ``matplotlib``.
bitrate (int, optional): Bit rate in kilobits per second when using
``matplotlib``; reasonable values include 7200.
Writes
- A video of the images.
"""
if outpath is None:
outpath = join(const.Dir.tmp, 'make_video.mp4')
makedirs(dirname(outpath))
assert imgs, "Frame list is empty"
for frame in imgs:
assert np.issubdtype(frame.dtype, np.unsignedinteger), \
"Image type must be unsigned integer"
h, w = imgs[0].shape[:2]
for frame in imgs[1:]:
assert frame.shape[:2] == (h, w), \
"All frames must have the same shape"
if method == 'matplotlib':
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import animation
w_in, h_in = w / dpi, h / dpi
fig = plt.figure(figsize=(w_in, h_in))
Writer = animation.writers['ffmpeg'] # may require you to specify path
writer = Writer(fps=fps, bitrate=bitrate)
anim = animation.ArtistAnimation(fig, [(img_plt(x),) for x in imgs])
anim.save(outpath, writer=writer)
# If obscure error like "ValueError: Invalid file object: <_io.Buff..."
# occurs, consider upgrading matplotlib so that it prints out the real,
# underlying ffmpeg error
plt.close('all')
elif method == 'opencv':
cv2 = preset_import('cv2', assert_success=True)
# TODO: debug codecs (see http://www.fourcc.org/codecs.php)
if outpath.endswith('.mp4'):
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# fourcc = cv2.VideoWriter_fourcc(*'X264')
fourcc = cv2.VideoWriter_fourcc(*'H264')
# fourcc = 0x00000021
elif outpath.endswith('.avi'):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
else:
raise NotImplementedError("Video type of\n\t%s" % outpath)
vw = cv2.VideoWriter(outpath, fourcc, fps, (w, h))
for frame in imgs:
if frame.ndim == 3:
frame = frame[:, :, ::-1] # cv2 uses BGR
vw.write(frame)
vw.release()
elif method == 'video_api':
video_api = preset_import('video_api', assert_success=True)
assert outpath.endswith('.webm'), "`video_api` requires .webm"
with video_api.write(outpath, fps=fps) as h:
for frame in imgs:
if frame.ndim == 3 and frame.shape[2] == 4:
frame = frame[:, :, :3]
#frame = frame.astype(np.ubyte)
h.add_frame(frame)
else:
raise ValueError(method)
logger.debug("Images written as a video to:\n%s", outpath)
def make_comparison_video(
imgs1, imgs2, bar_width=4, bar_color=(1, 0, 0), sweep_vertically=False,
sweeps=1, label1='', label2='', font_size=None, font_ttf=None,
label1_top_left_xy=None, label2_top_left_xy=None, **make_video_kwargs):
"""Writes two lists of images into a comparison video that toggles between
two videos with a sweeping bar.
Args:
imgs? (list(numpy.ndarray)): Each image should be of type ``uint8`` or
``uint16`` and of shape H-by-W (grayscale) or H-by-W-by-3 (RGB).
bar_width (int, optional): Width of the sweeping bar.
bar_color (tuple(float), optional): Bar and label RGB, normalized to
:math:`[0,1]`. Defaults to red.
sweep_vertically (bool, optional): Whether to sweep vertically or
horizontally.
sweeps (int, optional): Number of sweeps.
label? (str, optional): Label for each video.
font_size (int, optional): Font size.
font_ttf (str, optional): Path to the .ttf font file. Defaults to Arial.
label?_top_left_xy (tuple(int), optional): The XY coordinate of the
label's top left corner.
make_video_kwargs (dict, optional): Keyword arguments for
:func:`make_video`.
Writes
- A comparison video.
"""
# Bar is perpendicular to sweep-along
sweep_along = 0 if sweep_vertically else 1
bar_along = 1 if sweep_vertically else 0
# Number of frames
n_frames = len(imgs1)
assert n_frames == len(imgs2), \
"Videos to be compared have different numbers of frames"
img_shape = imgs1[0].shape
# Bar color according to image dtype
img_dtype = imgs1[0].dtype
bar_color = np.array(bar_color, dtype=img_dtype)
if np.issubdtype(img_dtype, np.integer):
bar_color *= np.iinfo(img_dtype).max
# Map from frame index to bar location, considering possibly multiple trips
bar_locs = []
for i in range(sweeps):
ind = np.arange(0, img_shape[sweep_along])
if i % 2 == 1: # reverse every other trip
ind = ind[::-1]
bar_locs.append(ind)
bar_locs = np.hstack(bar_locs) # all possible locations
ind = np.linspace(0, len(bar_locs) - 1, num=n_frames, endpoint=True)
bar_locs = [bar_locs[int(x)] for x in ind] # uniformly sampled
# Label locations
if label1_top_left_xy is None:
# Label 1 at top left corner
label1_top_left_xy = (int(0.1 * img_shape[1]), int(0.05 * img_shape[0]))
if label2_top_left_xy is None:
if sweep_vertically:
# Label 2 at bottom left corner
label2_top_left_xy = (
int(0.1 * img_shape[1]), int(0.75 * img_shape[0]))
else:
# Label 2 at top right corner
label2_top_left_xy = (
int(0.7 * img_shape[1]), int(0.05 * img_shape[0]))
frames = []
for i, (img1, img2) in enumerate(zip(imgs1, imgs2)):
assert img1.shape == img_shape, f"`imgs1[{i}]` has a differnet shape"
assert img2.shape == img_shape, f"`imgs2[{i}]` has a differnet shape"
assert img1.dtype == img_dtype, f"`imgs1[{i}]` has a differnet dtype"
assert img2.dtype == img_dtype, f"`imgs2[{i}]` has a differnet dtype"
# Label the two images
img1 = put_text(
img1, label1, label_top_left_xy=label1_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
img2 = put_text(
img2, label2, label_top_left_xy=label2_top_left_xy,
font_size=font_size, font_color=bar_color, font_ttf=font_ttf)
# Bar start and end
bar_loc = bar_locs[i]
bar_width_half = bar_width // 2
bar_start = max(0, bar_loc - bar_width_half)
bar_end = min(bar_loc + bar_width_half, img_shape[sweep_along])
# Up to bar start, we show Image 1; bar end onwards, Image 2
img1 = np.take(img1, range(bar_start), axis=sweep_along)
img2 = np.take(
img2, range(bar_end, img_shape[sweep_along]), axis=sweep_along)
# Between the two images, we show the bar
actual_bar_width = img_shape[
sweep_along] - img1.shape[sweep_along] - img2.shape[sweep_along]
reps = [1, 1, 1]
reps[sweep_along] = actual_bar_width
reps[bar_along] = img_shape[bar_along]
bar_img = np.tile(bar_color, reps)
frame = np.concatenate((img1, bar_img, img2), axis=sweep_along)
frames.append(frame)
make_video(frames, **make_video_kwargs)
| 37.886878 | 80 | 0.609817 |
73b135f20a4d854cdb5b09c10b76e9756be5c474 | 161 | py | Python | shipfunk_python/__init__.py | vilkasgroup/shipfunk_python | cd8a5414bda7e9670511c52d0b4df2efd11ee5d9 | [
"MIT"
] | null | null | null | shipfunk_python/__init__.py | vilkasgroup/shipfunk_python | cd8a5414bda7e9670511c52d0b4df2efd11ee5d9 | [
"MIT"
] | 2 | 2018-01-16T07:32:18.000Z | 2018-01-17T07:29:41.000Z | shipfunk_python/__init__.py | vilkasgroup/shipfunk_python | cd8a5414bda7e9670511c52d0b4df2efd11ee5d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for Shipfunk."""
__author__ = """Jaana Sarajrvi"""
__email__ = 'jaana.sarajarvi@vilkas.fi'
__version__ = '0.1.1'
| 20.125 | 39 | 0.652174 |
73b14a8ac2d94f0475d3f40d5181eb41aedadcce | 638 | py | Python | vpc/nos/driver/ovs/ne.py | zhufawuwo/baton | 64c88750bc96b92e268b4903f34a1d5021c686f4 | [
"Apache-2.0"
] | null | null | null | vpc/nos/driver/ovs/ne.py | zhufawuwo/baton | 64c88750bc96b92e268b4903f34a1d5021c686f4 | [
"Apache-2.0"
] | null | null | null | vpc/nos/driver/ovs/ne.py | zhufawuwo/baton | 64c88750bc96b92e268b4903f34a1d5021c686f4 | [
"Apache-2.0"
] | null | null | null | #! python3
# coding: utf-8
from vpc.nos import NetworkElement,NetworkElementEvent,event_t,EventChain
if __name__ == "__main__":
pass | 20.580645 | 73 | 0.653605 |
73b18a00ca497be31f461b8bdce57d8afe3a826f | 1,307 | py | Python | cumulusci/core/config/BaseConfig.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | 163 | 2018-09-13T18:49:34.000Z | 2022-03-25T08:37:15.000Z | cumulusci/core/config/BaseConfig.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | 1,280 | 2018-09-11T20:09:37.000Z | 2022-03-31T18:40:21.000Z | cumulusci/core/config/BaseConfig.py | leboff/CumulusCI | 81edbb1d64f2cc215a951c570052a1e423821cc1 | [
"BSD-3-Clause"
] | 93 | 2018-09-13T07:29:22.000Z | 2022-03-26T23:15:48.000Z | import logging
| 28.413043 | 97 | 0.560826 |
73b21fcf6f7c734702d8957b8a9a200636e97246 | 8,995 | py | Python | scikit_algo/All.py | sankar-mukherjee/CoFee | d05b461a6cdd581be0f8084a804f02be3332ccdd | [
"Apache-2.0"
] | null | null | null | scikit_algo/All.py | sankar-mukherjee/CoFee | d05b461a6cdd581be0f8084a804f02be3332ccdd | [
"Apache-2.0"
] | null | null | null | scikit_algo/All.py | sankar-mukherjee/CoFee | d05b461a6cdd581be0f8084a804f02be3332ccdd | [
"Apache-2.0"
] | null | null | null | """
Created on Tue Feb 24 16:08:39 2015
@author: mukherjee
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, metrics
from sklearn.learning_curve import learning_curve
# read Form data
DATA_FORM_FILE = 'all-merged-cat.csv'
#rawdata = pd.read_csv(DATA_FORM_FILE, usecols=np.r_[3,5:12,13:28,81:87,108])
rawdata = pd.read_csv(DATA_FORM_FILE)
#select features
posfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[3:12]].astype(float)
posfeat_name = rawdata.columns.values[3:12]
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[12:14]]
lextypefeat_name = rawdata.columns.values[12:14]
lexfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[14:29]].astype(float)
lexfeat_name = rawdata.columns.values[14:29]
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29:47]]
accoufeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[47:81]].astype(float)
accoufeat_name = rawdata.columns.values[47:81]
phonfeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[29]].astype(float)
lextypefeat = pd.DataFrame.as_matrix(rawdata)[:,np.r_[13]]
lextypefeat_name = rawdata.columns.values[13:14].astype(object)
# feature name
feat_name = np.concatenate((posfeat_name,accoufeat_name,lexfeat_name),axis=0)
# Transforming categorical feature
le = preprocessing.LabelBinarizer()
le.fit(lextypefeat)
list(le.classes_)
lextypefeat = le.transform(lextypefeat)
#----------------------------------------------------------------------------------------------------
# select feature combination
featN = np.column_stack((posfeat,accoufeat))
#featB = np.column_stack((lexfeat,lextypefeat))
featB = lexfeat
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=4)
#####------------------------------------------- Randomized PCA
##from sklearn.decomposition import RandomizedPCA
##pca = RandomizedPCA(n_components=30, whiten=True)
###
#scale = pca.fit(feat1)
#feat1 = scale.fit_transform(feat1)
feat = np.column_stack((featN,featB))
feat[np.isnan(feat)] = 0
feat[np.isinf(feat)] = 0
# select test labels
#Ytest = pd.DataFrame.as_matrix(rawdata)[:,20:26].astype(float)
label = pd.DataFrame.as_matrix(rawdata)[:,108]
#remove bad features as there is no label
scale = np.where(label == 'None')
label = np.delete(label,scale)
feat = np.delete(feat,scale,0)
#----------------------------------------------------------------------------------------------------
# Transforming categorical feature
le = preprocessing.LabelEncoder()
le.fit(label)
list(le.classes_)
label = le.transform(label)
# create traning and test data by partioning
nSamples = len(feat)
XtrainPos = feat[:.7 * nSamples,:]
YtrainPos = label[:.7 * nSamples]
XtestPos = feat[.7 * nSamples:,:]
YtestPos = label[.7 * nSamples:]
XtrainAll = feat
#----------------------------------------------------------------------------------------------------
#normalization of features
scale = preprocessing.StandardScaler().fit(XtrainPos)
XtrainPos = scale.transform(XtrainPos)
XtestPos = scale.transform(XtestPos)
# for whole data set
scaleAll = preprocessing.StandardScaler().fit(XtrainAll)
XtrainAll = scaleAll.transform(XtrainAll)
#scale = preprocessing.MinMaxScaler()
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.MinMaxScaler()
#XtrainAll = scaleAll.fit_transform(XtrainAll)
#scale = preprocessing.Normalizer().fit(XtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = preprocessing.Normalizer().fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- RandomizedLogisticRegression
#from sklearn.linear_model import RandomizedLogisticRegression
#scale = RandomizedLogisticRegression()
#XtrainPos = scale.fit_transform(XtrainPos,YtrainPos)
#XtestPos = scale.transform(XtestPos)
#XtrainAll = scale.fit_transform(XtrainAll,label)
###------------------------------------------- PCA
#from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
####------------------------------------------- Randomized PCA
#from sklearn.decomposition import RandomizedPCA
#pca = RandomizedPCA(n_components=30, whiten=True)
##
##
#scale = pca.fit(XtrainPos)
#XtrainPos = scale.fit_transform(XtrainPos)
#XtestPos = scale.fit_transform(XtestPos)
#scaleAll = pca.fit(XtrainAll)
#XtrainAll = scaleAll.transform(XtrainAll)
###------------------------------------------- LDA
#from sklearn.lda import LDA
#lda = LDA(n_components=4)
#scale = lda.fit(XtrainPos,YtrainPos)
#XtrainPos = scale.transform(XtrainPos)
#XtestPos = scale.transform(XtestPos)
#scaleAll = lda.fit(XtrainAll,label)
#XtrainAll = scaleAll.transform(XtrainAll)
#--------------------------------------------classification-------------------------------------------
##GradientBoost
#from sklearn.ensemble import GradientBoostingClassifier
#clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
# max_depth=1, random_state=0)
## SVM
#from sklearn import svm
#clf = svm.SVC()
#from sklearn.multiclass import OneVsOneClassifier
#from sklearn.multiclass import OutputCodeClassifier
#clf = OutputCodeClassifier(svm.SVC())
## RandomForest
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(min_samples_leaf=10)
## SGD
#from sklearn.linear_model import SGDClassifier
#clf = SGDClassifier(loss="log", penalty="l2")
# CART
#from sklearn import tree
#clf = tree.DecisionTreeClassifier()
#
### AdaBoostClassifier
#from sklearn.ensemble import AdaBoostClassifier
#clf = AdaBoostClassifier(n_estimators=100)
# Gaussian Naive Bayes
#from sklearn.naive_bayes import GaussianNB
#clf = GaussianNB()
# KNN
#from sklearn import neighbors
##clf = neighbors.KNeighborsClassifier(n_neighbors=10,weights='distance')
#clf = neighbors.KNeighborsClassifier(n_neighbors=10)
##-------------------------------------------------Traning------------------
clf = clf.fit(XtrainPos, YtrainPos)
print(metrics.classification_report(YtestPos, clf.predict(XtestPos)))
##--------------------------Crossvalidation 5 times using different split------------------------------
#from sklearn import cross_validation
#scores = cross_validation.cross_val_score(clf, XtrainAll, label, cv=3, scoring='f1')
#print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
####---------------------------------Check for overfeat-------------------------------------
train_sample_size, train_scores, test_scores = learning_curve(clf,
XtrainAll, label,
train_sizes=np.arange(0.1,1,0.1), cv=10)
#----------------------------------------Visualization---------------------------------------------
plt.xlabel("# Training sample")
plt.ylabel("Accuracy")
plt.grid();
mean_train_scores = np.mean(train_scores, axis=1)
mean_test_scores = np.mean(test_scores, axis=1)
std_train_scores = np.std(train_scores, axis=1)
std_test_scores = np.std(test_scores, axis=1)
gap = np.abs(mean_test_scores - mean_train_scores)
g = plt.figure(1)
plt.title("Learning curves for %r\n"
"Best test score: %0.2f - Gap: %0.2f" %
(clf, mean_test_scores.max(), gap[-1]))
plt.plot(train_sample_size, mean_train_scores, label="Training", color="b")
plt.fill_between(train_sample_size, mean_train_scores - std_train_scores,
mean_train_scores + std_train_scores, alpha=0.1, color="b")
plt.plot(train_sample_size, mean_test_scores, label="Cross-validation",
color="g")
plt.fill_between(train_sample_size, mean_test_scores - std_test_scores,
mean_test_scores + std_test_scores, alpha=0.1, color="g")
plt.legend(loc="lower right")
g.show()
## confusion matrix
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(YtestPos,clf.predict(XtestPos))
## Show confusion matrix in a separate window
#plt.matshow(cm)
#plt.title('Confusion matrix')
#plt.colorbar()
#plt.ylabel('True label')
#plt.xlabel('Predicted label')
#plt.show()
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
f = plt.figure(2,figsize=(18, 18))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, feat_name[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.savefig('feature_importance')
f.show()
| 36.864754 | 104 | 0.642023 |
73b2b67943acda046ca7c7f56efd2e03603a7e68 | 4,140 | py | Python | tests/test_client.py | KazkiMatz/py-googletrans | c1d6d5d27c7386c2a1aa6c78dfe376dbb910f7a5 | [
"MIT"
] | null | null | null | tests/test_client.py | KazkiMatz/py-googletrans | c1d6d5d27c7386c2a1aa6c78dfe376dbb910f7a5 | [
"MIT"
] | 1 | 2020-11-28T18:53:18.000Z | 2020-11-28T18:53:18.000Z | tests/test_client.py | TashinAhmed/googletrans | 9c0014cdcdc22e1f146624279f8dd69c3c62e385 | [
"MIT"
] | null | null | null | from httpcore import TimeoutException
from httpcore._exceptions import ConnectError
from httpx import Timeout, Client, ConnectTimeout
from unittest.mock import patch
from pytest import raises
from googletrans import Translator
| 25.714286 | 73 | 0.68913 |
73b2dbd6e7f9c859fe75e459a5b5509630530b13 | 3,324 | py | Python | Network/class_func.py | Mobad225/S-DCNet | a5fff5da2e04441f1f9133944ad09bdf087896e6 | [
"MIT"
] | 153 | 2019-07-31T07:27:11.000Z | 2022-01-05T08:52:56.000Z | Network/class_func.py | Mobad225/S-DCNet | a5fff5da2e04441f1f9133944ad09bdf087896e6 | [
"MIT"
] | 17 | 2019-09-11T07:45:29.000Z | 2021-04-20T05:10:47.000Z | Network/class_func.py | Mobad225/S-DCNet | a5fff5da2e04441f1f9133944ad09bdf087896e6 | [
"MIT"
] | 30 | 2019-08-20T05:35:20.000Z | 2021-11-07T07:49:19.000Z | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Func1: change density map into count map
# density map: batch size * 1 * w * h
# Func2: convert count to class (0->c-1)
# Func3: convert class (0->c-1) to count number
def Class2Count(pre_cls,label_indice):
'''
# --Input:
# 1.pre_cls is class label range in [0,1,2,...,C-1]
# 2.label_indice not include 0 but the other points
# --Output:
# 1.count value, the same size as pre_cls
'''
if isinstance(label_indice,np.ndarray):
label_indice = torch.from_numpy(label_indice)
label_indice = label_indice.squeeze()
IF_gpu = torch.cuda.is_available()
IF_ret_gpu = (pre_cls.device.type == 'cuda')
# tranform interval to count value map
label2count = [0.0]
for (i,item) in enumerate(label_indice):
if i<label_indice.size()[0]-1:
tmp_count = (label_indice[i]+label_indice[i+1])/2
else:
tmp_count = label_indice[i]
label2count.append(tmp_count)
label2count = torch.tensor(label2count)
label2count = label2count.type(torch.FloatTensor)
#outputs = outputs.max(dim=1)[1].cpu().data
ORI_SIZE = pre_cls.size()
pre_cls = pre_cls.reshape(-1).cpu()
pre_counts = torch.index_select(label2count,0,pre_cls.cpu().type(torch.LongTensor))
pre_counts = pre_counts.reshape(ORI_SIZE)
if IF_ret_gpu:
pre_counts = pre_counts.cuda()
return pre_counts
if __name__ == '__main__':
pre_cls = torch.Tensor([[0,1,2],[3,4,4]])
label_indice =torch.Tensor([0.5,1,1.5,2])
pre_counts = Class2Count(pre_cls,label_indice)
print(pre_cls)
print(label_indice)
print(pre_counts)
pre_cls = Count2Class(pre_counts,label_indice)
print(pre_cls) | 34.625 | 99 | 0.647112 |
73b325d3f7c7dfbcd48251ddfe6b8d3299767cb6 | 540 | py | Python | src/python/pants/backend/codegen/avro/avro_subsystem.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/avro/avro_subsystem.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | 22 | 2022-01-27T09:59:50.000Z | 2022-03-30T07:06:49.000Z | src/python/pants/backend/codegen/avro/avro_subsystem.py | danxmoran/pants | 7fafd7d789747c9e6a266847a0ccce92c3fa0754 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.option.option_types import BoolOption
from pants.option.subsystem import Subsystem
| 27 | 75 | 0.709259 |
73b51f1631247fbf3daf41c2e06e80f0d22df79c | 11,864 | py | Python | shade/tests/unit/test_shade.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | shade/tests/unit/test_shade.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | shade/tests/unit/test_shade.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
import testtools
import shade
from shade import _utils
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
RANGE_DATA = [
dict(id=1, key1=1, key2=5),
dict(id=2, key1=1, key2=20),
dict(id=3, key1=2, key2=10),
dict(id=4, key1=2, key2=30),
dict(id=5, key1=3, key2=40),
dict(id=6, key1=3, key2=40),
]
def test_list_servers_all_projects(self):
'''This test verifies that when list_servers is called with
`all_projects=True` that it passes `all_tenants=True` to nova.'''
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail'],
qs_elements=['all_tenants=True']),
complete_qs=True,
json={'servers': []}),
])
self.cloud.list_servers(all_projects=True)
self.assert_calls()
def test__nova_extensions(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
extensions = self.cloud._nova_extensions()
self.assertEqual(set(['NMN', 'OS-DCF']), extensions)
self.assert_calls()
def test__nova_extensions_fails(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
status_code=404),
])
with testtools.ExpectedException(
exc.OpenStackCloudURINotFound,
"Error fetching extension list for nova"
):
self.cloud._nova_extensions()
self.assert_calls()
def test__has_nova_extension(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertTrue(self.cloud._has_nova_extension('NMN'))
self.assert_calls()
def test__has_nova_extension_missing(self):
body = [
{
"updated": "2014-12-03T00:00:00Z",
"name": "Multinic",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "NMN",
"description": "Multiple network support."
},
{
"updated": "2014-12-03T00:00:00Z",
"name": "DiskConfig",
"links": [],
"namespace": "http://openstack.org/compute/ext/fake_xml",
"alias": "OS-DCF",
"description": "Disk Management Extension."
},
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/extensions'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json=dict(extensions=body))
])
self.assertFalse(self.cloud._has_nova_extension('invalid'))
self.assert_calls()
def test_range_search(self):
filters = {"key1": "min", "key2": "20"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[1]], retval)
def test_range_search_2(self):
filters = {"key1": "<=2", "key2": ">10"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(2, len(retval))
self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval)
def test_range_search_3(self):
filters = {"key1": "2", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_4(self):
filters = {"key1": "max", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(0, len(retval))
def test_range_search_5(self):
filters = {"key1": "min", "key2": "min"}
retval = self.cloud.range_search(RANGE_DATA, filters)
self.assertIsInstance(retval, list)
self.assertEqual(1, len(retval))
self.assertEqual([RANGE_DATA[0]], retval)
| 34.99705 | 76 | 0.567515 |
73b53eb4cdb22bcc92d1f7a0efda19417f586729 | 3,780 | py | Python | plots_tournament.py | rradules/opponent_modelling_monfg | eb28546a6024613a76c942a2e53a48e6a8d83233 | [
"MIT"
] | 1 | 2021-03-04T04:40:50.000Z | 2021-03-04T04:40:50.000Z | plots_tournament.py | rradules/opponent_modelling_monfg | eb28546a6024613a76c942a2e53a48e6a8d83233 | [
"MIT"
] | null | null | null | plots_tournament.py | rradules/opponent_modelling_monfg | eb28546a6024613a76c942a2e53a48e6a8d83233 | [
"MIT"
] | null | null | null | import matplotlib
import pandas as pd
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import seaborn as sns
from utils.utils import mkdir_p
sns.set()
sns.despine()
sns.set_context("paper", rc={"font.size": 18, "axes.labelsize": 18, "xtick.labelsize": 15, "ytick.labelsize": 15,
"legend.fontsize": 16})
sns.set_style('white', {'axes.edgecolor': "0.5", "pdf.fonttype": 42})
plt.gcf().subplots_adjust(bottom=0.15, left=0.14)
if __name__ == "__main__":
experiment = ['Q', 'Q']
info = '0M'
l1 = 1
l2 = 1
episodes = 5000
moocs = ['SER']
games = ['iag', 'iagR', 'iagM', 'iagRNE', 'iagNE'] # ['iagRNE'] # ['iag']['iagM']'iagNE',
for l1 in range(1, 2):
for l2 in range(1, 2):
for mooc in moocs:
for game in games:
path_data = f'results/tour_{experiment}_{game}_l{l1}_{l2}'
plot_results(game, mooc, path_data, experiment)
| 34.678899 | 113 | 0.579101 |
73b6522af809e94b26c9f10e4657b8e31125731b | 3,979 | py | Python | test/test_wrapper.py | bertsky/ocrd_keraslm | da105a8a8b68844389cd3e08307c05c9c6123350 | [
"Apache-2.0"
] | null | null | null | test/test_wrapper.py | bertsky/ocrd_keraslm | da105a8a8b68844389cd3e08307c05c9c6123350 | [
"Apache-2.0"
] | null | null | null | test/test_wrapper.py | bertsky/ocrd_keraslm | da105a8a8b68844389cd3e08307c05c9c6123350 | [
"Apache-2.0"
] | null | null | null | import os, sys
import shutil
from unittest import TestCase, main
from ocrd.resolver import Resolver
from ocrd_models.ocrd_page import to_xml
from ocrd_modelfactory import page_from_file
from ocrd_utils import MIMETYPE_PAGE
from ocrd_tesserocr.recognize import TesserocrRecognize
from ocrd_keraslm.wrapper import KerasRate
WORKSPACE_DIR = '/tmp/pyocrd-test-ocrd_keraslm'
PWD = os.path.dirname(os.path.realpath(__file__))
if __name__ == '__main__':
main()
| 43.25 | 103 | 0.605931 |
73b6bd8f4831b3ecbdd4ef2d6b98086651e18b51 | 16,415 | py | Python | meltingpot/python/configs/substrates/territory_rooms.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | meltingpot/python/configs/substrates/territory_rooms.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | meltingpot/python/configs/substrates/territory_rooms.py | Rohan138/meltingpot | d4e3839225b78babcedbbbf95cf747ff9e0a87b5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Territory: Rooms.
Example video: https://youtu.be/u0YOiShqzA4
See _Territory: Open_ for the general description of the mechanics at play in
this substrate.
In this substrate, _Territory: Rooms_, individuals start in segregated rooms
that strongly suggest a partition individuals could adhere to. They can break
down the walls of these regions and invade each other's "natural territory", but
the destroyed resources are lost forever. A peaceful partition is possible at
the start of the episode, and the policy to achieve it is easy to implement. But
if any agent gets too greedy and invades, it buys itself a chance of large
rewards, but also chances inflicting significant chaos and deadweight loss on
everyone if its actions spark wider conflict. The reason it can spiral out of
control is that once an agent's neighbor has left their natural territory then
it becomes rational to invade the space, leaving one's own territory undefended,
creating more opportunity for mischief by others.
"""
from typing import Any, Dict
from ml_collections import config_dict
from meltingpot.python.utils.substrates import colors
from meltingpot.python.utils.substrates import game_object_utils
from meltingpot.python.utils.substrates import shapes
from meltingpot.python.utils.substrates import specs
_COMPASS = ["N", "E", "S", "W"]
# This number just needs to be greater than the number of players.
MAX_ALLOWED_NUM_PLAYERS = 10
DEFAULT_ASCII_MAP = """
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
WRRRRRWWRRRRRWWRRRRRW
R RR RR R
R RR RR R
R P RR P RR P R
R RR RR R
R RR RR R
WRRRRRWWRRRRRWWRRRRRW
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": "spawn_point",
"W": "wall",
"R": {"type": "all", "list": ["resource", "reward_indicator"]},
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall",],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [True]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "AllBeamBlocker",
"kwargs": {}
},
]
}
SPAWN_POINT = {
"name": "spawn_point",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "playerSpawnPoint",
"stateConfigs": [{
"state": "playerSpawnPoint",
"layer": "logic",
"groups": ["spawnPoints"],
}],
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "invisible",
"spriteNames": [],
"spriteRGBColors": []
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
]
}
RESOURCE = {
"name": "resource",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "unclaimed",
"stateConfigs": [
{"state": "unclaimed",
"layer": "upperPhysical",
"sprite": "UnclaimedResourceSprite",
"groups": ["unclaimedResources"]},
{"state": "destroyed"},
],
}
},
{
"component": "Appearance",
"kwargs": {
"spriteNames": ["UnclaimedResourceSprite"],
# This color is grey.
"spriteRGBColors": [(64, 64, 64, 255)]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "Resource",
"kwargs": {
"initialHealth": 2,
"destroyedState": "destroyed",
"reward": 1.0,
"rewardRate": 0.01,
"rewardDelay": 100
}
},
]
}
REWARD_INDICATOR = {
"name": "reward_indicator",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "inactive",
"stateConfigs": [
{"state": "active",
"layer": "overlay",
"sprite": "ActivelyRewardingResource"},
{"state": "inactive"},
],
}
},
{
"component": "Appearance",
"kwargs": {
"spriteNames": ["ActivelyRewardingResource",],
"renderMode": "ascii_shape",
"spriteShapes": [shapes.PLUS_IN_BOX],
"palettes": [{"*": (86, 86, 86, 65),
"#": (202, 202, 202, 105),
"@": (128, 128, 128, 135),
"x": (0, 0, 0, 0)}],
"noRotates": [True]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "RewardIndicator",
"kwargs": {
}
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
PLAYER_COLOR_PALETTES = []
for i in range(MAX_ALLOWED_NUM_PLAYERS):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[i]))
# Set up player-specific settings for resources.
for j, color in enumerate(colors.palette[:MAX_ALLOWED_NUM_PLAYERS]):
sprite_name = "Color" + str(j + 1) + "ResourceSprite"
game_object_utils.get_first_named_component(
RESOURCE,
"StateManager")["kwargs"]["stateConfigs"].append({
"state": "claimed_by_" + str(j + 1),
"layer": "upperPhysical",
"sprite": sprite_name,
"groups": ["claimedResources"]
})
game_object_utils.get_first_named_component(
RESOURCE,
"Appearance")["kwargs"]["spriteNames"].append(sprite_name)
game_object_utils.get_first_named_component(
RESOURCE,
"Appearance")["kwargs"]["spriteRGBColors"].append(color)
# PREFABS is a dictionary mapping names to template game objects that can
# be cloned and placed in multiple locations accoring to an ascii map.
PREFABS = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
"resource": RESOURCE,
"reward_indicator": REWARD_INDICATOR,
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0, "fireClaim": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0, "fireClaim": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0, "fireClaim": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0, "fireClaim": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0, "fireClaim": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0, "fireClaim": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0, "fireClaim": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1, "fireClaim": 0}
FIRE_CLAIM = {"move": 0, "turn": 0, "fireZap": 0, "fireClaim": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
FIRE_CLAIM
)
# The Scene object is a non-physical object, its components implement global
# logic.
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
},
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_avatar_object(player_idx: int) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
color_palette = PLAYER_COLOR_PALETTES[player_idx]
live_state_name = "player{}".format(lua_index)
avatar_sprite_name = "avatarSprite{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
# Initial player state.
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]},
# Player wait state used when they have been zapped out.
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [avatar_sprite_name],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [color_palette],
"noRotates": [True]
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"spawnGroup": "spawnPoints",
"actionOrder": ["move",
"turn",
"fireZap",
"fireClaim"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
"fireClaim": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
}
},
{
"component": "AvatarDirectionIndicator",
# We do not normally use direction indicators for the MAGI suite,
# but we do use them for territory because they function to claim
# any resources they contact.
"kwargs": {"color": (202, 202, 202, 50)}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 1e6, # Effectively never respawn.
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
}
},
{
"component": "ReadyToShootObservation",
},
{
"component": "ResourceClaimer",
"kwargs": {
"color": color_palette["*"],
"playerIndex": lua_index,
"beamLength": 2,
"beamRadius": 0,
"beamWait": 0,
}
},
{
"component": "LocationObserver",
"kwargs": {
"objectIsAvatar": True,
"alsoReportOrientation": True
}
},
{
"component": "Taste",
"kwargs": {
"role": "none",
"rewardAmount": 1.0,
}
},
]
}
return avatar_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(player_idx)
avatar_objects.append(game_object)
return avatar_objects
def create_lab2d_settings(num_players: int) -> Dict[str, Any]:
"""Returns the lab2d settings."""
lab2d_settings = {
"levelName": "territory",
"levelDirectory":
"meltingpot/lua/levels",
"numPlayers": num_players,
# Define upper bound of episode length since episodes end stochastically.
"maxEpisodeLengthFrames": 2000,
"spriteSize": 8,
"topology": "TORUS", # Choose from ["BOUNDED", "TORUS"],
"simulation": {
"map": DEFAULT_ASCII_MAP,
"gameObjects": create_avatar_objects(num_players),
"scene": create_scene(),
"prefabs": PREFABS,
"charPrefabMap": CHAR_PREFAB_MAP,
},
}
return lab2d_settings
def get_config(factory=create_lab2d_settings):
"""Default configuration for training on the territory level."""
config = config_dict.ConfigDict()
# Basic configuration.
config.num_players = 9
# Lua script configuration.
config.lab2d_settings = factory(config.num_players)
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
"POSITION",
"ORIENTATION",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
"POSITION": specs.OBSERVATION["POSITION"],
"ORIENTATION": specs.OBSERVATION["ORIENTATION"],
"WORLD.RGB": specs.rgb(168, 168),
})
return config
| 31.266667 | 80 | 0.50003 |
73b6d9825bd3d60f6c8e389a888e756f7df56287 | 5,269 | py | Python | aptitudetech_private/tasks.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | null | null | null | aptitudetech_private/tasks.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | null | null | null | aptitudetech_private/tasks.py | CloudGround/aptitudetech_private | d4d150226bd33ea0c76086264286ae7cae52457f | [
"MIT"
] | 1 | 2019-05-17T00:04:05.000Z | 2019-05-17T00:04:05.000Z | #-*- coding: utf-8 -*-
import frappe
import boto3
import boto3.session
import rows
import json
import zipfile
import tempfile
import sqlite3
from io import BytesIO
from frappe import _
from frappe.utils import cint, flt, today, getdate, get_first_day, add_to_date
try:
from frappe.utils import file_manager
with_file_manager = True
except ImportError:
with_file_manager = False
from frappe.core.doctype.file.file import create_new_folder
SQLVIEW = """
select lineitemusageaccountid as account,
lineitemproductcode as item_group,
productproductfamily as item_code,
productinstancetype as item_type,
pricingterm as item_term,
pricingunit as item_unit,
strftime('%Y-%m-%d', min(billbillingperiodstartdate)) as start_date,
strftime('%Y-%m-%d', max(billbillingperiodenddate)) as end_date,
sum(lineitemusageamount) as consumed_units,
sum(ifnull(lineitemunblendedcost, 0.0)) / sum(ifnull(lineitemusageamount, 1.0)) as cost_per_unit
from billing_aptech
where lineitemlineitemtype != "Tax"
group by lineitemusageaccountid, lineitemproductcode, productproductfamily, productinstancetype, pricingterm, pricingunit
order by lineitemusageaccountid, lineitemproductcode, productproductfamily, productinstancetype, pricingterm, pricingunit
"""
import_fields = u"""
lineItem/UsageAccountId
lineItem/LineItemType
lineItem/ProductCode
product/productFamily
product/instanceType
pricing/term
pricing/unit
bill/BillingPeriodStartDate
bill/BillingPeriodEndDate
lineItem/UsageAmount
lineItem/UnblendedCost
lineItem/UnblendedRate
""".strip().splitlines()
| 31.933333 | 139 | 0.72974 |
73b7ddfb55e7a791df45923bdbfc93d74e627ca1 | 1,983 | py | Python | udfs/tests/test_run_udfs.py | tslr/bigquery-utils | 67143b87a24bbbde684aa5ff061f80ffc27c71ed | [
"Apache-2.0"
] | null | null | null | udfs/tests/test_run_udfs.py | tslr/bigquery-utils | 67143b87a24bbbde684aa5ff061f80ffc27c71ed | [
"Apache-2.0"
] | null | null | null | udfs/tests/test_run_udfs.py | tslr/bigquery-utils | 67143b87a24bbbde684aa5ff061f80ffc27c71ed | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from parameterized import parameterized
from google.cloud import bigquery
from google.api_core.exceptions import GoogleAPICallError
from utils import Utils
if __name__ == '__main__':
unittest.main()
| 36.054545 | 87 | 0.642965 |
73b8798661011cebe8aed8c67f5ab3688edd6b74 | 1,195 | py | Python | pandas/tests/generic/test_panel.py | EternalLearner42/pandas | a2b414ccaab83e085d46e8217d5302a5d0f874f4 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/generic/test_panel.py | EternalLearner42/pandas | a2b414ccaab83e085d46e8217d5302a5d0f874f4 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/generic/test_panel.py | EternalLearner42/pandas | a2b414ccaab83e085d46e8217d5302a5d0f874f4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from warnings import catch_warnings, simplefilter
from pandas import Panel
from pandas.util.testing import assert_panel_equal
from .test_generic import Generic
# run all the tests, but wrap each in a warning catcher
for t in ['test_rename', 'test_get_numeric_data',
'test_get_default', 'test_nonzero',
'test_downcast', 'test_constructor_compound_dtypes',
'test_head_tail',
'test_size_compat', 'test_split_compat',
'test_unexpected_keyword',
'test_stat_unexpected_keyword', 'test_api_compat',
'test_stat_non_defaults_args',
'test_truncate_out_of_bounds',
'test_metadata_propagation', 'test_copy_and_deepcopy',
'test_pct_change', 'test_sample']:
setattr(TestPanel, t, f())
| 30.641026 | 77 | 0.659414 |
73b8db5714154072049f41562b46bb8f89e7deee | 1,233 | py | Python | shortest-paths.py | SAUSy-Lab/map-speed-test | 0c9e78056017a247976ff63782c6366c5a724bf4 | [
"MIT"
] | 2 | 2017-03-31T02:16:57.000Z | 2019-07-13T14:31:04.000Z | shortest-paths.py | SAUSy-Lab/map-speed-test | 0c9e78056017a247976ff63782c6366c5a724bf4 | [
"MIT"
] | 10 | 2017-01-07T04:26:41.000Z | 2017-03-07T21:00:27.000Z | shortest-paths.py | SAUSy-Lab/map-speed-test | 0c9e78056017a247976ff63782c6366c5a724bf4 | [
"MIT"
] | null | null | null | # calculate shortest paths between OD pairs
# in the map_speed_od postgis table
# update the shortest path geometry into the table
import requests, json, psycopg2
# get OD pairs from DB
conn_string = (
"host='localhost' dbname='' user='' password=''"
)
connection = psycopg2.connect(conn_string)
connection.autocommit = True
c = connection.cursor()
c.execute("""
SELECT
id,
ST_X(ST_StartPoint(vector)) AS lon1,
ST_Y(ST_StartPoint(vector)) AS lat1,
ST_X(ST_EndPoint(vector)) AS lon2,
ST_Y(ST_EndPoint(vector)) AS lat2
FROM map_speed_od
""")
# iterate over DB pairs
for (rid,lon1,lat1,lon2,lat2) in c.fetchall():
# request route for these points
options = {
'geometries':'geojson',
'overview':'full',
'steps':'false',
'annotations':'false'
}
response = requests.get(
('http://206.167.182.17:5000/route/v1/transit/'+str(lon1)+','+str(lat1)+';'+str(lon2)+','+str(lat2)),
params=options,
timeout=5
)
# parse the result
j = json.loads(response.text)
print json.dumps(j['routes'][0]['geometry'])
# insert the route result
c.execute("""
UPDATE map_speed_od
SET shortest_path = ST_SetSRID(ST_GeomFromGeoJSON(%s),4326)
WHERE id = %s;
""",
(json.dumps(j['routes'][0]['geometry']),rid,)
)
| 25.163265 | 103 | 0.687753 |
73b9218ed262aae642dc0406539a72aa91d888bc | 320 | py | Python | my_tools/tools_for_os/for_file.py | Alex2Yang97/waiting_time_project | 649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9 | [
"MIT"
] | null | null | null | my_tools/tools_for_os/for_file.py | Alex2Yang97/waiting_time_project | 649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9 | [
"MIT"
] | 12 | 2020-11-13T17:16:58.000Z | 2021-04-23T01:25:17.000Z | my_tools/tools_for_os/for_file.py | Alex2Yang97/waiting_time_project | 649dbaa4bd45b9b9974a5b71a8ee17fada07bcc9 | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
# @Time : 2020-02-15 15:49
# @Author : Zhirui(Alex) Yang
# @Function :
import os
| 17.777778 | 46 | 0.61875 |
73be179d5a3f60a254ebcb05e6ce4cdd7d7c207f | 7,842 | py | Python | tcp_tls_tunnel/hyper_http2_adapter.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2021-08-30T21:03:41.000Z | 2021-08-30T21:03:41.000Z | tcp_tls_tunnel/hyper_http2_adapter.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2022-03-31T12:02:29.000Z | 2022-03-31T12:02:29.000Z | tcp_tls_tunnel/hyper_http2_adapter.py | DSAdv/tcp-tls-tunnel-py | e9b5271e4cfae1df09b9fab77db4906b7cee8337 | [
"MIT"
] | 1 | 2021-08-28T14:35:18.000Z | 2021-08-28T14:35:18.000Z | import ssl
import socket
from typing import Tuple
from hyper.common.util import to_native_string
from urllib.parse import urlparse
from hyper import HTTP11Connection, HTTPConnection
from hyper.common.bufsocket import BufferedSocket
from hyper.common.exceptions import TLSUpgrade
from hyper.contrib import HTTP20Adapter
from hyper.tls import init_context
from tcp_tls_tunnel.utils import generate_basic_header, generate_proxy_url
from tcp_tls_tunnel.dto import ProxyOptions, AdapterOptions, TunnelOptions
from tcp_tls_tunnel.exceptions import ProxyError
def _create_tunnel(tunnel_opts: TunnelOptions,
dest_host: str,
dest_port: int,
server_name: str = None,
proxy: ProxyOptions = None,
timeout: int = None) -> Tuple[socket.socket, str]:
"""
Sends CONNECT method to a proxy and returns a socket with established
connection to the target.
:returns: socket, proto
"""
headers = {
"Authorization": generate_basic_header(tunnel_opts.auth_login, tunnel_opts.auth_password),
"Client": tunnel_opts.client.value,
"Connection": 'keep-alive',
"Server-Name": server_name or dest_host,
"Host": tunnel_opts.host,
"Secure": str(int(tunnel_opts.secure)),
"HTTP2": str(int(tunnel_opts.http2)),
}
if proxy:
headers["Proxy"] = generate_proxy_url(proxy=proxy)
conn = HTTP11Connection(tunnel_opts.host, tunnel_opts.port, timeout=timeout)
conn.request('CONNECT', '%s:%d' % (dest_host, dest_port),
headers=headers)
resp = conn.get_response()
try:
proto = resp.headers.get("Alpn-Protocol")[0].decode('utf-8')
except TypeError:
proto = 'http/1.1'
if resp.status != 200:
raise ProxyError(
"Tunnel connection failed: %d %s" %
(resp.status, to_native_string(resp.reason)),
response=resp
)
return getattr(conn, "_sock"), proto
| 35.165919 | 98 | 0.561464 |
73bfa3453754f3fe35dd27f3bb51112f146dfd38 | 1,387 | py | Python | get_variances.py | OmnesRes/GRIMMER | 173c99ebdb6a9edb1242d24a791d0c5d778ff643 | [
"MIT"
] | 4 | 2017-02-20T12:03:29.000Z | 2018-10-27T14:06:07.000Z | get_variances.py | OmnesRes/GRIMMER | 173c99ebdb6a9edb1242d24a791d0c5d778ff643 | [
"MIT"
] | 1 | 2019-10-08T17:39:30.000Z | 2019-10-11T20:56:50.000Z | get_variances.py | OmnesRes/GRIMMER | 173c99ebdb6a9edb1242d24a791d0c5d778ff643 | [
"MIT"
] | null | null | null | from itertools import *
import time
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
#my own variance function runs much faster than numpy or the Python 3 ported statistics module
##rounding the means and variances helps to collapse them
precision_ave=16
precision_var=12
##perform runs
#n can probably just be set to 7 or even lower
#code will take a while, you should run copies of this script in parallel
for r in range(5,100):
n=30-r
if n<=7:
n=7
run(n,r)
| 26.169811 | 94 | 0.626532 |
73c1b51a0130489b93a3586b1b8afac1d574b406 | 621 | py | Python | utils/pagenation.py | Andrewpqc/URL-shortener | 74943b9f1f787e243a32e27eec425eb51f84e65e | [
"MIT"
] | 9 | 2018-07-01T11:19:05.000Z | 2021-12-30T03:00:03.000Z | utils/pagenation.py | Andrewpqc/URL-shortener | 74943b9f1f787e243a32e27eec425eb51f84e65e | [
"MIT"
] | 1 | 2020-12-09T23:46:04.000Z | 2020-12-09T23:46:04.000Z | utils/pagenation.py | Andrewpqc/URL-shortener | 74943b9f1f787e243a32e27eec425eb51f84e65e | [
"MIT"
] | 1 | 2018-06-06T15:10:57.000Z | 2018-06-06T15:10:57.000Z | # coding: utf-8
"""
paginate.py
```````````
: api
"""
from flask import url_for
def pagination(lit, page, perpage,endpoint):
"""
,
nextlast
{current: next_lit}
"""
_yu = len(lit) % perpage
_chu = len(lit) // perpage
if _yu == 0:
last = _chu
else:
last = _chu + 1
current = lit[perpage*(page-1): perpage*page]
next_page = ""
if page < last:
next_page = url_for(endpoint, page=page+1)
elif page == last:
next_page = ""
last_page = url_for(endpoint, page=last)
return [current, (next_page, last_page)]
| 20.7 | 50 | 0.558776 |
73c3330e453fbfebace232606fc0f58589eb269b | 5,272 | py | Python | app/default/rest_train.py | dbhack-aquila/aquila | 5fd31665fcfdb2a1ba341f5c98d44668e467add8 | [
"MIT"
] | 1 | 2017-12-16T14:51:54.000Z | 2017-12-16T14:51:54.000Z | app/default/rest_train.py | dbhack-aquila/aquila | 5fd31665fcfdb2a1ba341f5c98d44668e467add8 | [
"MIT"
] | null | null | null | app/default/rest_train.py | dbhack-aquila/aquila | 5fd31665fcfdb2a1ba341f5c98d44668e467add8 | [
"MIT"
] | null | null | null | import pandas as pd
from . import default
import wikipedia
import json
from flask import jsonify
import re
import os
import multiprocessing
import requests
import urllib
import hashlib
df = 0
wikipedia.set_lang("de")
def get_wikidata_id(article):
"""Find the Wikidata ID for a given Wikipedia article."""
dapp = urllib.parse.urlencode({"action": "query",
"prop": "pageprops",
"ppprop":"wikibase_item",
"redirects": 1,
"format": "json",
"titles": article})
query_string = "https://de.wikipedia.org/w/api.php?%s" % dapp
ret = requests.get(query_string).json()
id = next(iter(ret["query"]["pages"]))
# TODO: Catch the case where article has no Wikidata ID
# This can happen for new or seldomly edited articles
return ret["query"]["pages"][id]["pageprops"]["wikibase_item"]
def get_wikidata_image(wikidata_id):
"""Return the image for the Wikidata item with *wikidata_id*. """
query_string = ("https://www.wikidata.org/wiki/Special:EntityData/%s.json"
% wikidata_id)
item = json.loads(requests.get(query_string).text)
wdata = item["entities"][wikidata_id]["claims"]
try:
image = wdata["P18"][0]["mainsnak"]["datavalue"]["value"].replace(" ", "_")
except KeyError:
print("No image on Wikidata.")
else:
md = hashlib.md5(image.encode('utf-8')).hexdigest()
image_url = ("https://upload.wikimedia.org/wikipedia/commons/thumb/%s/%s/%s/64px-%s"
% (md[0], md[:2], image, image))
return image_url
def get_wikidata_desc(wikidata_id):
"""Return the image for the Wikidata item with *wikidata_id*. """
dapp = urllib.parse.urlencode({'action':'wbgetentities','ids':get_wikidata_id(wikidata_id),'languages':'de'})
query_string = "https://www.wikidata.org/w/api.php?" + dapp
res = requests.get(query_string).text
print(query_string)
item = json.loads(res)
wdata = item["entities"][wikidata_id]["descriptions"]["de"]["value"]
return wdata
if __name__ == "__main__":
wid = get_wikidata_id("Limburger Dom")
image_url = get_wikidata_image(wid)
print(image_url) | 33.579618 | 190 | 0.624241 |
73c42c7f51f7b24a02fde60345ef5bd395fee637 | 246 | py | Python | tools/python_api_Easytest/out.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | 5 | 2019-03-23T09:21:14.000Z | 2019-10-18T11:31:10.000Z | tools/python_api_Easytest/out.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | null | null | null | tools/python_api_Easytest/out.py | xutongxin1/UnitAi-project | 226ccc7d73096fd3582a55bf76593756d8033892 | [
"MIT"
] | 2 | 2020-01-12T06:03:44.000Z | 2020-01-17T00:23:20.000Z | import json,requests
print(test) | 22.363636 | 52 | 0.686992 |
73c50231a058cf0ef478478e7a36afc7a3fd3081 | 3,481 | py | Python | src/cam_loop.py | stay-whimsical/screamchess | 4950d480f8f33db2bc3f2d94eea5dc6706ae8087 | [
"MIT"
] | 2 | 2019-06-19T20:25:12.000Z | 2021-06-04T04:43:36.000Z | src/cam_loop.py | pablo-meier/screamchess | 4950d480f8f33db2bc3f2d94eea5dc6706ae8087 | [
"MIT"
] | 8 | 2017-08-19T07:09:55.000Z | 2017-08-20T21:11:11.000Z | src/cam_loop.py | pablo-meier/screamchess | 4950d480f8f33db2bc3f2d94eea5dc6706ae8087 | [
"MIT"
] | 1 | 2020-04-17T00:19:43.000Z | 2020-04-17T00:19:43.000Z | from camera import board_image_processor as bip
from chess.models import *
import cv2
import numpy as np
from media.sound import *
if __name__ == '__main__':
main()
#main_get_color_ranges()
| 33.152381 | 72 | 0.564493 |
73c507797796f3d05c197c7fb4b73550955df8ce | 2,854 | py | Python | __train/preprocessing.py | aiddun/jazzCNN | f2d60d1b0697e71327e1d6d2bb9af6407e1253d1 | [
"MIT"
] | 1 | 2018-03-02T09:59:36.000Z | 2018-03-02T09:59:36.000Z | _evaluate/preprocessing.py | AidDun/jazzCNN | f2d60d1b0697e71327e1d6d2bb9af6407e1253d1 | [
"MIT"
] | 3 | 2020-11-13T17:17:54.000Z | 2022-02-09T23:27:21.000Z | _evaluate/preprocessing.py | AidDun/jazzCNN | f2d60d1b0697e71327e1d6d2bb9af6407e1253d1 | [
"MIT"
] | null | null | null | import numpy as np
from numpy import random
import glob
import scipy.io.wavfile
np.random.seed(4)
| 25.711712 | 131 | 0.501402 |
73c590592e5f6c7d80e9e638ac61992cbf513263 | 49 | py | Python | test/fixtures/python/analysis/main1.py | matsubara0507/semantic | 67899f701abc0f1f0cb4374d8d3c249afc33a272 | [
"MIT"
] | 8,844 | 2019-05-31T15:47:12.000Z | 2022-03-31T18:33:51.000Z | test/fixtures/python/analysis/main1.py | matsubara0507/semantic | 67899f701abc0f1f0cb4374d8d3c249afc33a272 | [
"MIT"
] | 401 | 2019-05-31T18:30:26.000Z | 2022-03-31T16:32:29.000Z | test/fixtures/python/analysis/main1.py | matsubara0507/semantic | 67899f701abc0f1f0cb4374d8d3c249afc33a272 | [
"MIT"
] | 504 | 2019-05-31T17:55:03.000Z | 2022-03-30T04:15:04.000Z | import a as b
import b.c as e
b.foo(1)
e.baz(1)
| 8.166667 | 15 | 0.632653 |
73c5ed5d0d7202bd31940ec8f1e4251cfbaeba8a | 10,054 | py | Python | app/views.py | allthingsclowd/K5_User_Onboarding_Example | 313b0033ceb015cca86574762915e02000d4bbbb | [
"MIT"
] | null | null | null | app/views.py | allthingsclowd/K5_User_Onboarding_Example | 313b0033ceb015cca86574762915e02000d4bbbb | [
"MIT"
] | null | null | null | app/views.py | allthingsclowd/K5_User_Onboarding_Example | 313b0033ceb015cca86574762915e02000d4bbbb | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""Summary - Flask Views Used to Control/Wrap a web UI
around the Add User Python Script
Author: Graham Land
Date: 08/12/16
Twitter: @allthingsclowd
Github: https://github.com/allthingscloud
Blog: https://allthingscloud.eu
"""
from flask import flash, render_template, session, request, redirect, url_for, json, make_response
from app import app
import os,binascii
import AddUserToProjectv3 as K5User
import k5APIwrappersV19 as K5API
from functools import wraps
app.secret_key = os.urandom(24)
JSESSION_ID = binascii.b2a_hex(os.urandom(16))
def login_required(f):
"""Summary - Decorator used to ensure that routes channeled through
this function are authenticated already
Otherwise they're returned to the login screen
"""
return decorated_function
| 41.717842 | 98 | 0.514919 |
73c828b9d6fbfbe855a2020cf66b582e67bedfef | 867 | py | Python | src/users/models.py | gabrielstork/django-to-do-list | 756f636fc531f131bbf0649c14272178ce13d957 | [
"MIT"
] | 6 | 2021-11-15T18:56:44.000Z | 2022-02-15T10:02:24.000Z | src/users/models.py | gabrielstork/django-to-do-list | 756f636fc531f131bbf0649c14272178ce13d957 | [
"MIT"
] | 1 | 2022-02-14T20:28:39.000Z | 2022-02-14T20:28:39.000Z | src/users/models.py | gabrielstork/django-to-do-list | 756f636fc531f131bbf0649c14272178ce13d957 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.core.validators import MinLengthValidator
from django.utils.translation import gettext_lazy as _
from django.db import models
from . import validators
| 27.967742 | 76 | 0.690888 |
73c9920f5c36cc9f240880ba80cb675e0c7cb5ca | 5,135 | py | Python | readux/books/abbyyocr.py | jpkarlsberg/readux | 50a895dcf7d64b753a07808e9be218cab3682850 | [
"Apache-2.0"
] | null | null | null | readux/books/abbyyocr.py | jpkarlsberg/readux | 50a895dcf7d64b753a07808e9be218cab3682850 | [
"Apache-2.0"
] | null | null | null | readux/books/abbyyocr.py | jpkarlsberg/readux | 50a895dcf7d64b753a07808e9be218cab3682850 | [
"Apache-2.0"
] | null | null | null | '''
:class:`eulxml.xmlmap.XmlObject` classes for working with ABBYY
FineReadux OCR XML.
Currently supports **FineReader6-schema-v1** and
**FineReader8-schema-v2**.
----
'''
from eulxml import xmlmap
def frns(xpath):
'''Utility function to convert a simple xpath to match any of the
configured versions of ABBYY FineReader XML namespaces. Example
conversions:
* ``page`` becomes ``f1:page|f2:page``
* ``text/par`` becomes ``f1:page/f1:text|f2:page/f2:text``
Uses all declared namespace prefixes from
:attr:`Base.ROOT_NAMESPACES`
'''
namespaces = Base.ROOT_NAMESPACES.keys()
return '|'.join('/'.join('%s:%s' % (ns, el) for el in xpath.split('/'))
for ns in namespaces)
| 36.161972 | 84 | 0.651996 |
73c9e7bedf96216a6d9365965c340b5bab6a369e | 742 | py | Python | Aulas/aula14.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Aulas/aula14.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Aulas/aula14.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | '''for c in range(1, 10):
print(c)
print('FIM')'''
'''c = 1
while c < 10:
print(c)
c += 1
print('FIM')'''
'''n = 1
while n != 0: #flag ou condio de parada
n = int(input('Digite um valor: '))
print('FIM')'''
'''r = 'S'
while r == 'S':
n = int(input('Digite um valor: '))
r = str(input('Quer continuar? [S/N]')).upper()
print('FIM')'''
n = 1
totPar = totaImpar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0: # nao vai contabilizar o 0 no final da contagem
if n % 2 ==0:
totPar += 1
else:
totaImpar += 1
print('Voc digitou {} numeros pares e {} numeros impares.'.format(totPar, totaImpar))
# OBS.: nesse caso no vai considerar o 0 como numero!!!!
| 22.484848 | 86 | 0.540431 |
73cb0638be23bf0c8d4dd43c1030dd71337f3c61 | 2,330 | py | Python | tests/test_markdown_in_code_cells.py | st--/jupytext | f8e8352859cc22e17b11154d0770fd946c4a430a | [
"MIT"
] | 5,378 | 2018-09-01T22:03:43.000Z | 2022-03-31T06:51:42.000Z | tests/test_markdown_in_code_cells.py | st--/jupytext | f8e8352859cc22e17b11154d0770fd946c4a430a | [
"MIT"
] | 812 | 2018-08-31T08:26:13.000Z | 2022-03-30T18:12:11.000Z | tests/test_markdown_in_code_cells.py | st--/jupytext | f8e8352859cc22e17b11154d0770fd946c4a430a | [
"MIT"
] | 380 | 2018-09-02T01:40:07.000Z | 2022-03-25T13:57:23.000Z | """Issue #712"""
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import reads, writes
from jupytext.cell_to_text import three_backticks_or_more
from jupytext.compare import compare, compare_notebooks
from .utils import requires_myst
| 17.923077 | 63 | 0.564807 |
73cd6b9d543cd1b702c785eacf0e7b85b40a9737 | 629 | py | Python | amy/workshops/migrations/0152_event_open_ttt_applications.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 53 | 2015-01-10T17:39:19.000Z | 2019-06-12T17:36:34.000Z | amy/workshops/migrations/0152_event_open_ttt_applications.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 1,176 | 2015-01-02T06:32:47.000Z | 2019-06-18T11:57:47.000Z | amy/workshops/migrations/0152_event_open_ttt_applications.py | code-review-doctor/amy | 268c1a199510457891459f3ddd73fcce7fe2b974 | [
"MIT"
] | 44 | 2015-01-03T15:08:56.000Z | 2019-06-09T05:33:08.000Z | # Generated by Django 2.1 on 2018-09-02 14:27
from django.db import migrations, models
| 33.105263 | 281 | 0.677266 |
73ced5d59e03f3d885db00b3181a8bf0e4e60e2a | 5,220 | py | Python | example/cifar10/fast_at.py | KuanKuanQAQ/ares | 40dbefc18f6438e1812021fe6d6c3195f22ca295 | [
"MIT"
] | 206 | 2020-12-31T09:43:11.000Z | 2022-03-30T07:02:41.000Z | example/cifar10/fast_at.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
] | 7 | 2021-01-26T06:45:44.000Z | 2022-02-26T05:25:48.000Z | example/cifar10/fast_at.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
] | 61 | 2020-12-29T14:02:41.000Z | 2022-03-26T14:21:10.000Z | ''' This file provides a wrapper class for Fast_AT (https://github.com/locuslab/fast_adversarial) model for CIFAR-10 dataset. '''
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import tensorflow as tf
from ares.model.pytorch_wrapper import pytorch_classifier_with_logits
from ares.utils import get_res_path
MODEL_PATH = get_res_path('./cifar10/cifar_model_weights_30_epochs.pth')
def PreActResNet18():
return PreActResNet(PreActBlock, [2,2,2,2])
if __name__ == '__main__':
if not os.path.exists(MODEL_PATH):
if not os.path.exists(os.path.dirname(MODEL_PATH)):
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
url = 'https://drive.google.com/file/d/1XM-v4hqi9u8EDrQ2xdCo37XXcM9q-R07/view'
print('Please download "{}" to "{}".'.format(url, MODEL_PATH))
| 37.021277 | 129 | 0.638314 |
73cf094cf77e18c95fada7abbb805a0feed41fec | 526 | py | Python | auto_pilot/common/registrable.py | farrellsc/zAutoPilot | 652d93690237dcb21c3cbdbdad95f917b7fec6e3 | [
"MIT"
] | 1 | 2018-03-05T08:27:58.000Z | 2018-03-05T08:27:58.000Z | auto_pilot/common/registrable.py | farrellsc/zAutoPilot | 652d93690237dcb21c3cbdbdad95f917b7fec6e3 | [
"MIT"
] | null | null | null | auto_pilot/common/registrable.py | farrellsc/zAutoPilot | 652d93690237dcb21c3cbdbdad95f917b7fec6e3 | [
"MIT"
] | null | null | null | from typing import Callable, TypeVar, List
T = TypeVar('T')
| 23.909091 | 51 | 0.65019 |
73cf528b5a42e68ea53f81fc68bbf5a7a0f2cf10 | 688 | py | Python | noheavenbot/cogs/commands/testing.py | Molanito13/noheaven-bot | ad126d4601321ecabff9d1d214ce7d3f4e258c3e | [
"MIT"
] | 3 | 2018-10-13T14:05:24.000Z | 2018-12-25T21:40:21.000Z | noheavenbot/cogs/commands/testing.py | Molanito13/noheaven-bot | ad126d4601321ecabff9d1d214ce7d3f4e258c3e | [
"MIT"
] | 2 | 2018-10-08T14:33:39.000Z | 2020-03-02T18:00:47.000Z | noheavenbot/cogs/commands/testing.py | Molanito13/noheaven-bot | ad126d4601321ecabff9d1d214ce7d3f4e258c3e | [
"MIT"
] | 5 | 2018-10-08T14:18:58.000Z | 2020-11-01T17:55:51.000Z | from discord.ext.commands import command, Cog
from noheavenbot.utils.constants import TEXTCHANNELS
from discord import Member
from noheavenbot.utils.database_tables.table_users import Users
from noheavenbot.utils.validator import has_role as check_role
| 28.666667 | 112 | 0.709302 |
73cfd3a5b8cd1e7653bb83ccce83e87f0876fda2 | 6,174 | py | Python | mayan/apps/linking/tests/test_smart_link_condition_views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 343 | 2015-01-05T14:19:35.000Z | 2018-12-10T19:07:48.000Z | mayan/apps/linking/tests/test_smart_link_condition_views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 191 | 2015-01-03T00:48:19.000Z | 2018-11-30T09:10:25.000Z | mayan/apps/linking/tests/test_smart_link_condition_views.py | atitaya1412/Mayan-EDMS | bda9302ba4b743e7d829ad118b8b836221888172 | [
"Apache-2.0"
] | 257 | 2019-05-14T10:26:37.000Z | 2022-03-30T03:37:36.000Z | from mayan.apps.testing.tests.base import GenericViewTestCase
from ..events import event_smart_link_edited
from ..permissions import permission_smart_link_edit
from .mixins import (
SmartLinkConditionViewTestMixin, SmartLinkTestMixin,
SmartLinkViewTestMixin
)
| 33.737705 | 75 | 0.698737 |
73d0507c967519673d3c90287e9f91022857b10e | 19,105 | py | Python | P1.py | chinmaydas96/CarND-LaneLines-P1 | be8e03257962314d6adea68634d053d5f0550510 | [
"MIT"
] | null | null | null | P1.py | chinmaydas96/CarND-LaneLines-P1 | be8e03257962314d6adea68634d053d5f0550510 | [
"MIT"
] | null | null | null | P1.py | chinmaydas96/CarND-LaneLines-P1 | be8e03257962314d6adea68634d053d5f0550510 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
# In[1]:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# ## Read in an Image
# In[2]:
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# In[3]:
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines_new(img, lines, color=[255, 0, 0], thickness=6):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
## create an empty array with all the line slope
all_slopes = np.zeros((len(lines)))
## create an empty array for left lines
left_line_slope = []
## create an empty array for right lines
right_line_slope = []
# keep each line slope in the array
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
all_slopes[index] = (y2-y1)/(x2-x1)
# get all left line slope if it is positive
left_line_slope = all_slopes[all_slopes > 0]
# get all left line slope if it is negetive
right_line_slope = all_slopes[all_slopes < 0]
## mean value of left slope and right slope
m_l = left_line_slope.mean()
m_r = right_line_slope.mean()
# Create empty list for all the left points and right points
final_x4_l = []
final_x3_l = []
final_x4_r = []
final_x3_r = []
## get fixed y-cordinate in both top and bottom point
y4 = 320
y3 = img.shape[0]
## Go for each line to calculate left top x-cordinate, right top x-cordinate,
## left buttom x-cordinate, right bottom top x-cordinate
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
m = (y2-y1)/(x2-x1)
if m > 0 :
final_x4_l.append(int(((x1 + (y4 - y1) / m_l) + (x2 + (y4 - y2) / m_l))/ 2))
final_x3_l.append(int(((x1 + (y3 - y1) / m_l) + (x2 + (y3 - y2) / m_l))/ 2))
else:
final_x4_r.append(int(((x1 + (y4 - y1) / m_r) + (x2 + (y4 - y2) / m_r))/ 2))
final_x3_r.append(int(((x1 + (y3 - y1) / m_r) + (x2 + (y3 - y2) / m_r))/ 2))
try :
## taking average of each points
x4_l = int(sum(final_x4_l)/ len(final_x4_l))
x4_r = int(sum(final_x4_r)/ len(final_x4_r))
x3_l = int(sum(final_x3_l)/ len(final_x3_l))
x3_r = int(sum(final_x3_r)/ len(final_x3_r))
## Draw the left line and right line
cv2.line(img, (x4_l, y4), (x3_l, y3), color, thickness)
cv2.line(img, (x4_r, y4), (x3_r, y3), color, thickness)
except:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines_new(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, =0.8, =1., =0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * + img * +
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, , img, , )
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[18]:
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
process_test_images('test_images','test_images_output')
# In[19]:
# In[20]:
os.listdir('test_images')
# In[21]:
# Checking in an image
plt.figure(figsize=(15,8))
plt.subplot(121)
image = mpimg.imread('test_images/solidYellowCurve.jpg')
plt.imshow(image)
plt.title('Original image')
plt.subplot(122)
image = mpimg.imread('test_images_output/whiteCarLaneSwitch.jpg')
plt.imshow(image)
plt.title('Output image')
plt.show()
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[9]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# In[10]:
# Let's try the one with the solid white lane on the right first ...
# In[11]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
# In[13]:
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
# In[16]:
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
| 40.997854 | 638 | 0.702277 |
73d14617d94420a3d56d21a483a4a8f9476f65c1 | 170 | py | Python | notebooks/container/__init__.py | DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020 | eb9d76a8dc7fff29e4123b940200d58eed87147c | [
"BSD-3-Clause"
] | 114 | 2020-06-16T09:29:30.000Z | 2022-03-12T09:06:52.000Z | notebooks/container/__init__.py | DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020 | eb9d76a8dc7fff29e4123b940200d58eed87147c | [
"BSD-3-Clause"
] | 5 | 2020-11-06T13:02:26.000Z | 2021-06-10T18:34:37.000Z | notebooks/container/__init__.py | DanieleBaranzini/sktime-tutorial-pydata-amsterdam-2020 | eb9d76a8dc7fff29e4123b940200d58eed87147c | [
"BSD-3-Clause"
] | 62 | 2020-06-16T09:25:05.000Z | 2022-03-01T21:02:10.000Z | from container.base import TimeBase
from container.array import TimeArray, TimeDtype
from container.timeseries import TimeSeries
from container.timeframe import TimeFrame | 42.5 | 48 | 0.876471 |
73d30c85b213e414209b78284449266b653e1713 | 558 | py | Python | spiketools/utils/base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 1 | 2022-03-09T19:40:37.000Z | 2022-03-09T19:40:37.000Z | spiketools/utils/base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 35 | 2021-09-28T15:13:31.000Z | 2021-11-26T04:38:08.000Z | spiketools/utils/base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 4 | 2021-09-28T14:56:24.000Z | 2022-03-09T21:00:31.000Z | """Base utility functions, that manipulate basic data structures, etc."""
###################################################################################################
###################################################################################################
def flatten(lst):
"""Flatten a list of lists into a single list.
Parameters
----------
lst : list of list
A list of embedded lists.
Returns
-------
lst
A flattened list.
"""
return [item for sublist in lst for item in sublist]
| 26.571429 | 99 | 0.378136 |
73d374874a532014fc2ba903875cc4289b921e60 | 11,593 | py | Python | zentral/contrib/osquery/forms.py | mikemcdonald/zentral | 4aa03937abfbcea6480aa04bd99f4da7b8dfc923 | [
"Apache-2.0"
] | null | null | null | zentral/contrib/osquery/forms.py | mikemcdonald/zentral | 4aa03937abfbcea6480aa04bd99f4da7b8dfc923 | [
"Apache-2.0"
] | null | null | null | zentral/contrib/osquery/forms.py | mikemcdonald/zentral | 4aa03937abfbcea6480aa04bd99f4da7b8dfc923 | [
"Apache-2.0"
] | 1 | 2020-09-09T19:26:04.000Z | 2020-09-09T19:26:04.000Z | from django import forms
from zentral.core.probes.forms import BaseCreateProbeForm
from zentral.utils.forms import validate_sha256
from .probes import (OsqueryProbe, OsqueryComplianceProbe,
OsqueryDistributedQueryProbe, OsqueryFileCarveProbe,
OsqueryFIMProbe)
# OsqueryProbe
# OsqueryComplianceProbe
KeyFormSet = forms.formset_factory(KeyForm,
formset=BaseKeyFormSet,
min_num=1, max_num=10, extra=0, can_delete=True)
# OsqueryDistributedQueryProbe
# OsqueryFileCarveProbe
# FIM probes
| 35.344512 | 114 | 0.547399 |
73d5dcabb54b57daa8b78e26015c8bd966917221 | 197 | py | Python | src/dataclay/communication/grpc/messages/logicmodule/__init__.py | kpavel/pyclay | 275bc8af5c57301231a20cca1cc88556a9c84c79 | [
"BSD-3-Clause"
] | 1 | 2020-04-16T17:09:15.000Z | 2020-04-16T17:09:15.000Z | src/dataclay/communication/grpc/messages/logicmodule/__init__.py | kpavel/pyclay | 275bc8af5c57301231a20cca1cc88556a9c84c79 | [
"BSD-3-Clause"
] | 35 | 2019-11-06T17:06:16.000Z | 2021-04-12T16:27:20.000Z | src/dataclay/communication/grpc/messages/logicmodule/__init__.py | kpavel/pyclay | 275bc8af5c57301231a20cca1cc88556a9c84c79 | [
"BSD-3-Clause"
] | 1 | 2020-05-06T11:28:16.000Z | 2020-05-06T11:28:16.000Z |
""" Class description goes here. """
"""Package containing gRPC classes."""
__author__ = 'Enrico La Sala <enrico.lasala@bsc.es>'
__copyright__ = '2017 Barcelona Supercomputing Center (BSC-CNS)'
| 24.625 | 64 | 0.725888 |
73db434f1dcc511c2a6170ca3b1d4a1d255f07e3 | 87 | py | Python | src/cms/models/offers/__init__.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | 14 | 2020-12-03T07:56:30.000Z | 2021-10-30T13:09:50.000Z | src/cms/models/offers/__init__.py | Integreat/integreat-cms | b3f80964a6182d714f26ac229342b47e1c7c4f29 | [
"Apache-2.0"
] | 367 | 2020-11-20T00:34:20.000Z | 2021-12-14T15:20:42.000Z | src/cms/models/offers/__init__.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
] | 3 | 2021-02-09T18:46:52.000Z | 2021-12-07T10:41:39.000Z | """
This package contains :class:`~cms.models.offers.offer_template.OfferTemplate`
"""
| 21.75 | 78 | 0.758621 |
73dc1ffc39f60e86bf599c00df7b537997fbf251 | 5,150 | py | Python | service/audio_trigger_test.py | nicolas-f/noisesensor | fc007fe5e03b0deca0863d987cb6776be1cd2bef | [
"BSD-3-Clause"
] | 2 | 2020-03-29T21:58:45.000Z | 2021-09-21T12:43:15.000Z | service/audio_trigger_test.py | nicolas-f/noisesensor | fc007fe5e03b0deca0863d987cb6776be1cd2bef | [
"BSD-3-Clause"
] | null | null | null | service/audio_trigger_test.py | nicolas-f/noisesensor | fc007fe5e03b0deca0863d987cb6776be1cd2bef | [
"BSD-3-Clause"
] | 1 | 2019-02-19T14:53:01.000Z | 2019-02-19T14:53:01.000Z | import numpy
from scipy.spatial import distance
import matplotlib.pyplot as plt
import math
import matplotlib.ticker as mtick
freqs = [20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]
# from scipy
# from scipy
# from scipy
trigger = [40.49, 39.14, 34.47, 30.5, 39.54, 31.98, 38.37, 43.84, 36.09, 43.72, 40.55, 39.25, 39.15, 38.36, 38.3, 36.58,
39.9, 47.76, 51.64, 37.2, 44.89, 46.6, 51.08, 37.77, 28, 29.59, 30.25, 23.16, 25.74]
weight = [0.04,0.04,0.04,0.04,0.04,0.04,0.04,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14, 0.24, 0.41,
0.60, 0.80, 0.94, 1.0, 0.94, 0.80, 0.60, 0.41]
ref_spectrum = numpy.genfromtxt('test/test2_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test1_spectrum = numpy.genfromtxt('test/test1_near.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test2_spectrum = numpy.genfromtxt('test/test2_far_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test3_spectrum = numpy.genfromtxt('test/test_background.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
dist0 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist1 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test1_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist2 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test2_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist3 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test3_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist0_bis = numpy.ones(len(ref_spectrum)) - [dist_cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
#print(numpy.around(dist0_bis - dist0, 3))
ref_spectrum = numpy.rot90(ref_spectrum)
test1_spectrum = numpy.rot90(test1_spectrum)
test2_spectrum = numpy.rot90(test2_spectrum)
test3_spectrum = numpy.rot90(test3_spectrum)
fig, axes = plt.subplots(nrows=4, ncols=3, constrained_layout=True)
gs = axes[0, 0].get_gridspec()
axes[0, 1].imshow(ref_spectrum)
autocolor(axes[0, 2].bar(numpy.arange(len(dist0)), dist0))
axes[1, 1].imshow(test1_spectrum)
autocolor(axes[1, 2].bar(numpy.arange(len(dist1)), dist1))
axes[2, 1].imshow(test2_spectrum)
autocolor(axes[2, 2].bar(numpy.arange(len(dist2)), dist2))
axes[3, 1].imshow(test3_spectrum)
axes[3, 2].bar(numpy.arange(len(dist2)), dist3)
for ax in axes[0:, 0]:
ax.remove()
axbig = fig.add_subplot(gs[0:, 0])
axbig.set_title("Spectrum trigger")
axbig.imshow(numpy.rot90([trigger]))
for i in range(len(axes)):
axes[i, 2].set_ylim([0.95, 1.0])
axes[i, 1].set_yticks(range(len(freqs))[::5])
axes[i, 1].set_yticklabels([str(ylab) + " Hz" for ylab in freqs[::5]][::-1])
axes[i, 1].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 1].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 2].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_ylabel("Cosine similarity (%)")
axes[i, 2].yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
axes[i, 1].set_title("Spectrogram "+str(i)+" (dB)")
axbig.set_yticks(range(len(freqs)))
axbig.set_yticklabels([str(ylab) + " Hz" for ylab in freqs][::-1])
axbig.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.show()
| 37.591241 | 162 | 0.665825 |
73ddae6e14c41c647c3dc794212f25f68df13789 | 1,094 | py | Python | Python/6-hc_sr04-sensor.py | matr1xprogrammer/raspberry_pi-iot | 7ff8247fde839a23dd75720c58f3b04d86485ec4 | [
"MIT"
] | 2 | 2017-02-18T12:05:25.000Z | 2017-02-18T12:15:53.000Z | Python/6-hc_sr04-sensor.py | matr1xprogrammer/raspberry_pi-iot | 7ff8247fde839a23dd75720c58f3b04d86485ec4 | [
"MIT"
] | null | null | null | Python/6-hc_sr04-sensor.py | matr1xprogrammer/raspberry_pi-iot | 7ff8247fde839a23dd75720c58f3b04d86485ec4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# HC-SR04 Ultrasonic ranging sensor
#
import RPi.GPIO as GPIO
import sys, time
try:
GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
print "Distance measurement in progress..."
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
GPIO.output(TRIG, False)
while True:
print "Waiting for sensor to settle"
time.sleep(2)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
print "Distance: ", distance, "cm"
except KeyboardInterrupt:
GPIO.cleanup()
print("<Ctrl+C> pressed... exiting.")
except:
GPIO.cleanup()
print("Error: {0} {1}".format(sys.exc_info()[0], sys.exc_info()[1]))
| 22.791667 | 74 | 0.543876 |
73de45d1436eebf32a4bbacaf18feaafc9502e50 | 10,651 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/arista/eos/plugins/module_utils/network/eos/config/ospfv3/ospfv3.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The eos_ospfv3 config file.
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to its desired end-state is
created.
"""
import re
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_merge,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.resource_module import (
ResourceModule,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.facts.facts import (
Facts,
)
from ansible_collections.arista.eos.plugins.module_utils.network.eos.rm_templates.ospfv3 import (
Ospfv3Template,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
get_from_dict,
)
| 36.351536 | 103 | 0.529903 |
73de5fb73d8473474f580b5f20b98adc8660e07b | 1,141 | py | Python | platypush/plugins/logger/__init__.py | BlackLight/runbullet | 8d26c8634d2677b4402f0a21b9ab8244b44640db | [
"MIT"
] | 3 | 2017-11-03T17:03:36.000Z | 2017-11-10T06:38:15.000Z | platypush/plugins/logger/__init__.py | BlackLight/runbullet | 8d26c8634d2677b4402f0a21b9ab8244b44640db | [
"MIT"
] | 14 | 2017-11-04T11:46:37.000Z | 2017-12-11T19:15:27.000Z | platypush/plugins/logger/__init__.py | BlackLight/runbullet | 8d26c8634d2677b4402f0a21b9ab8244b44640db | [
"MIT"
] | null | null | null | from platypush.plugins import Plugin, action
# vim:sw=4:ts=4:et:
| 21.12963 | 57 | 0.531113 |
73de6cd753fb9320e7590a96928403d694712cd8 | 1,632 | py | Python | hc/front/tests/test_add_pdc.py | IfBkg/healthchecks | dcd8a74c6b0bcdb0065e7c27d5b6639823400562 | [
"BSD-3-Clause"
] | 1 | 2020-07-13T15:33:31.000Z | 2020-07-13T15:33:31.000Z | hc/front/tests/test_add_pdc.py | IfBkg/healthchecks | dcd8a74c6b0bcdb0065e7c27d5b6639823400562 | [
"BSD-3-Clause"
] | 53 | 2020-11-27T14:55:01.000Z | 2021-04-22T10:01:13.000Z | hc/front/tests/test_add_pdc.py | IfBkg/healthchecks | dcd8a74c6b0bcdb0065e7c27d5b6639823400562 | [
"BSD-3-Clause"
] | null | null | null | from django.test.utils import override_settings
from hc.api.models import Channel
from hc.test import BaseTestCase
| 34 | 76 | 0.677083 |
73dee1fd408bd1037f09660c2312f58f954869d8 | 994 | py | Python | atcoder/corp/codethxfes2014b_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/codethxfes2014b_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/codethxfes2014b_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | import sys
sys.setrecursionlimit(3000)
r, c = map(int, input().split())
table = [[0] * c for _ in range(r)]
rs, cs = map(lambda x:int(x) - 1, input().split())
rg, cg = map(lambda x:int(x) - 1, input().split())
n = int(input())
draw = [list(map(int, input().split())) for _ in range(n)]
for ri, ci, hi, wi in draw:
ri -= 1
ci -= 1
for i in range(ri, ri+hi):
for j in range(ci, ci+wi):
table[i][j] = 1
if table[rs][cs] != 1 or table[rg][cg] != 1:
print('NO')
else:
print('YES' if check(rs, cs) else 'NO')
| 28.4 | 67 | 0.524145 |
73df0b517cdf0b8ebc3a55ea196f1562c83f9f1c | 4,329 | py | Python | tests/test_bullet_train.py | masschallenge/bullet-train-python-client | bcec653c0b4ed65779ab4e1a2f809810c684be00 | [
"BSD-3-Clause"
] | null | null | null | tests/test_bullet_train.py | masschallenge/bullet-train-python-client | bcec653c0b4ed65779ab4e1a2f809810c684be00 | [
"BSD-3-Clause"
] | null | null | null | tests/test_bullet_train.py | masschallenge/bullet-train-python-client | bcec653c0b4ed65779ab4e1a2f809810c684be00 | [
"BSD-3-Clause"
] | null | null | null | import json
import logging
from unittest import mock, TestCase
from bullet_train import BulletTrain
import os
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TEST_API_URL = 'https://test.bullet-train.io/api'
TEST_IDENTIFIER = 'test-identity'
TEST_FEATURE = 'test-feature'
| 34.632 | 113 | 0.736891 |
73df243fb4b55e390ea6a1111a32c8c6671d261d | 3,105 | py | Python | plim/console.py | spollard/Plim | 7689de85364691063ed5c43a891c433f9ebef5b9 | [
"MIT"
] | 85 | 2015-01-08T20:15:54.000Z | 2022-03-12T21:51:27.000Z | plim/console.py | spollard/Plim | 7689de85364691063ed5c43a891c433f9ebef5b9 | [
"MIT"
] | 18 | 2015-02-27T14:59:08.000Z | 2021-09-24T10:27:19.000Z | plim/console.py | spollard/Plim | 7689de85364691063ed5c43a891c433f9ebef5b9 | [
"MIT"
] | 14 | 2015-02-26T07:20:42.000Z | 2022-02-01T17:52:16.000Z | """
This module contains entry points for command-line utilities provided by Plim package.
"""
import sys
import os
import argparse
import codecs
from pkg_resources import get_distribution
from pkg_resources import EntryPoint
from mako.template import Template
from mako.lookup import TemplateLookup
from .util import PY3K
def plimc(args=None, stdout=None):
"""This is the `plimc` command line utility
:param args: list of command-line arguments. If None, then ``sys.argv[1:]`` will be used.
:type args: list or None
:param stdout: file-like object representing stdout. If None, then ``sys.stdout`` will be used.
Custom stdout is used for testing purposes.
:type stdout: None or a file-like object
"""
# Parse arguments
# ------------------------------------
cli_parser = argparse.ArgumentParser(description='Compile plim source files into mako files.')
cli_parser.add_argument('source', help="path to source plim template")
cli_parser.add_argument('-o', '--output', help="write result to FILE.")
cli_parser.add_argument('-e', '--encoding', default='utf-8', help="content encoding")
cli_parser.add_argument('-p', '--preprocessor', default='plim:preprocessor',
help="Preprocessor instance that will be used for parsing the template")
cli_parser.add_argument('-H', '--html', action='store_true', help="Render HTML output instead of Mako template")
cli_parser.add_argument('-V', '--version', action='version',
version='Plim {}'.format(get_distribution("Plim").version))
if args is None:
args = sys.argv[1:]
args = cli_parser.parse_args(args)
# Get custom preprocessor, if specified
# -------------------------------------
preprocessor_path = args.preprocessor
# Add an empty string path, so modules located at the current working dir
# are reachable and considered in the first place (see issue #32).
sys.path.insert(0, '')
preprocessor = EntryPoint.parse('x={}'.format(preprocessor_path)).load(False)
# Render to html, if requested
# ----------------------------
if args.html:
root_dir = os.path.dirname(os.path.abspath(args.source))
template_file = os.path.basename(args.source)
lookup = TemplateLookup(directories=[root_dir],
input_encoding=args.encoding,
output_encoding=args.encoding,
preprocessor=preprocessor)
content = lookup.get_template(template_file).render_unicode()
else:
with codecs.open(args.source, 'rb', args.encoding) as fd:
content = preprocessor(fd.read())
# Output
# ------------------------------------
if args.output is None:
if stdout is None:
stdout = PY3K and sys.stdout.buffer or sys.stdout
fd = stdout
content = codecs.encode(content, 'utf-8')
else:
fd = codecs.open(args.output, 'wb', args.encoding)
try:
fd.write(content)
finally:
fd.close()
| 40.324675 | 116 | 0.622544 |
73e0868276739ce21107e9b9452274d8030151db | 2,568 | py | Python | devel_notes/test_class_speed.py | mekhub/alphafold | 8d89abf73ea07841b550b968aceae794acb244df | [
"MIT"
] | 3 | 2019-05-15T16:46:20.000Z | 2019-07-19T13:27:45.000Z | devel_notes/test_class_speed.py | mekhub/alphafold | 8d89abf73ea07841b550b968aceae794acb244df | [
"MIT"
] | null | null | null | devel_notes/test_class_speed.py | mekhub/alphafold | 8d89abf73ea07841b550b968aceae794acb244df | [
"MIT"
] | 4 | 2020-02-08T02:43:01.000Z | 2021-08-22T09:23:17.000Z | #!/usr/bin/python
import time
import sys
import os
from copy import deepcopy
sys.path.append(os.path.join(os.getcwd(), '..'))
from alphafold.partition import DynamicProgrammingData as DP
x = [[]]*500
for i in range( 500 ): x[i] = [0.0]*500
dx = deepcopy( x )
xcontrib = [[]]*500
for i in range( 500 ): xcontrib[i] = [[]]*500
xDP = DP( 500 ) # 500x500 object with other stuff in it.
N = 500000
print 'Try for ', N, 'cycles each:'
# Time getting
print 'GETTING'
t0 = time.time()
for i in range( N ): y = x[56][56]
t1 = time.time()
print t1 - t0, 'y = x[56][56]'
t0 = time.time()
for i in range( N ): y = xDP.X[56][56]
t1 = time.time()
print t1 - t0,'y = xDP.X[56][56]'
t0 = time.time()
for i in range( N ): y = getval(xDP,56)
t1 = time.time()
print t1 - t0, 'y = getval(xDP,56)'
t0 = time.time()
for i in range( N ): y = xDP[56][56]
t1 = time.time()
print t1 - t0, 'y = xDP[56][56]'
# Time setting
print 'SETTING'
t0 = time.time()
for i in range( N ): x[56][56] = 20
t1 = time.time()
print t1 - t0, 'x[56][56] = 20'
t0 = time.time()
for i in range( N ): xDP.X[56][56] = 20
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20'
t0 = time.time()
for i in range( N ):
val = 20
xDP.X[56][56] = val
t1 = time.time()
print t1 - t0,'val = 20; xDP.X[56][56] = val'
t0 = time.time()
for i in range( N ): xDP[56][56] = 20
t1 = time.time()
print t1 - t0,'xDP[56][56] = 20'
# Time setting, including derivs
print 'SETTING INCLUDE DERIVS'
t0 = time.time()
for i in range( N ):
x[56][56] = 20
dx[56][56] = 0
t1 = time.time()
print t1 - t0, 'x[56][56] = 20, dx[56][56] = 20'
t0 = time.time()
for i in range( N ):
x[56][56] = (20,0)
t1 = time.time()
print t1 - t0, 'x[56][56] = (20,0)'
t0 = time.time()
for i in range( N ):
xDP.X[56][56] = 20
xDP.dX[56][56] = 0
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20, xDP.dX[56][56]'
t0 = time.time()
for i in range( N ):
xDP.add(56,56,20)
t1 = time.time()
print t1 - t0,'xDP += 20'
# Time setting, including derivs and contribs
print 'SETTING INCLUDE DERIVS AND CONTRIBS'
t0 = time.time()
for i in range( N ):
x[56][56] = 20
dx[56][56] = 0
xcontrib[56][56].append( [x,56,56,20] )
t1 = time.time()
print t1 - t0, 'x[56][56] = 20'
t0 = time.time()
for i in range( N ):
xDP.X[56][56] = 20
xDP.dX[56][56] = 0
xDP.X_contrib[56][56].append( [x,56,56,20] )
t1 = time.time()
print t1 - t0,'xDP.X[56][56] = 20'
t0 = time.time()
for i in range( N ):
xDP.add(56,56,20)
t1 = time.time()
print t1 - t0,'xDP += 20'
| 20.709677 | 60 | 0.575545 |
73e13d84ff4673d8d1b1b964136674b1bd1ae5ef | 688 | py | Python | testRead.py | BichonCby/BaseBSPython | 411f7f5be5636aa7dc9975fb0ab61daa37e6d40a | [
"MIT"
] | null | null | null | testRead.py | BichonCby/BaseBSPython | 411f7f5be5636aa7dc9975fb0ab61daa37e6d40a | [
"MIT"
] | null | null | null | testRead.py | BichonCby/BaseBSPython | 411f7f5be5636aa7dc9975fb0ab61daa37e6d40a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*-coding:Latin-1 -*
import time
from Definitions import *
#from ev3dev2.motor import OUTPUT_B,LargeMotor
from ev3dev2.sensor import *
from AddSensors import AngleSensor
from ev3dev2.sensor.lego import TouchSensor
import Trace
trace = Trace.Trace()
i=0
toucher = TouchSensor(INPUT_3)
EncoderSensRight = AngleSensor(INPUT_1)
EncoderSensLeft = AngleSensor(INPUT_2)
trace.Log('toto\n')
while i<50:
top = time.time()
i=i+1
#toucher.value()
fic=open('/sys/class/lego-sensor/sensor0/value0','r')
val = fic.read()
fic.close()
duration = (time.time()-top)
trace.Log(val + ': %.2f\n' %(duration*1000))
time.sleep(0.1)
trace.Close()
| 22.193548 | 57 | 0.699128 |
73e339eb2591f2a4b2f2b9553c0b32fcb1202cbf | 2,697 | py | Python | infer.py | vic9527/ViClassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | 1 | 2021-11-03T05:05:34.000Z | 2021-11-03T05:05:34.000Z | infer.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | null | null | null | infer.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
import os, sys
viclassifier_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(viclassifier_dir)
sys.path.append(viclassifier_dir)
model = load_model('D:\\myai\\projects\\tmp\\git\\viclassifier\\tmps\\model.pth')
print(model)
image_path = r'C:\xxx\xxx.jpg'
# ### python###
# d1 = {'a': 1, 'b': 2, 'c': 3}
# #
# d2 = {}
# for key, value in d1.items():
# d2[value] = key
#
# #
# d2 = {k: v for v, k in d1.items()}
#
# # zip
# d2 = dict(zip(d1.value(), d1.key()))
class_to_idx = {'bad': 0, 'good': 1}
idx_to_class = {k: v for v, k in class_to_idx.items()}
predict(model, image_path, idx_to_class, is_show=False, device_type='cuda')
| 32.107143 | 105 | 0.632925 |
73e41b86e4797d0bdf28efbbcf4b63a5d38dc998 | 1,675 | py | Python | compiler/router/tests/10_supply_grid_test.py | bsg-external/OpenRAM | 3c5e13f95c925a204cabf052525c3de07638168f | [
"BSD-3-Clause"
] | 43 | 2016-11-06T20:53:46.000Z | 2021-09-03T18:57:39.000Z | compiler/router/tests/10_supply_grid_test.py | bsg-external/OpenRAM | 3c5e13f95c925a204cabf052525c3de07638168f | [
"BSD-3-Clause"
] | 27 | 2016-11-15T19:28:25.000Z | 2018-02-20T19:23:52.000Z | compiler/router/tests/10_supply_grid_test.py | bsg-external/OpenRAM | 3c5e13f95c925a204cabf052525c3de07638168f | [
"BSD-3-Clause"
] | 30 | 2016-11-09T16:02:45.000Z | 2018-02-23T17:07:59.000Z | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
#!/usr/bin/env python3
"Run a regresion test the library cells for DRC"
import unittest
from testutils import header,openram_test
import sys,os
sys.path.append(os.path.join(sys.path[0],".."))
import globals
import debug
OPTS = globals.OPTS
# instantiate a copy of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
| 29.385965 | 79 | 0.63403 |
73e5db5282163558729f472aa4322e2b0c37c1ec | 3,021 | py | Python | sources/decoding/analyse_model.py | int-brain-lab/paper-ephys-atlas | 47a7d52d6d59b5b618826d6f4cb72329dee77e0e | [
"MIT"
] | null | null | null | sources/decoding/analyse_model.py | int-brain-lab/paper-ephys-atlas | 47a7d52d6d59b5b618826d6f4cb72329dee77e0e | [
"MIT"
] | null | null | null | sources/decoding/analyse_model.py | int-brain-lab/paper-ephys-atlas | 47a7d52d6d59b5b618826d6f4cb72329dee77e0e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 21 17:05:48 2022
@author: Guido Meijer
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix
from ibllib.atlas import BrainRegions
from joblib import load
from model_functions import load_channel_data, load_trained_model
import matplotlib.pyplot as plt
import seaborn as sns
br = BrainRegions()
# Settings
FEATURES = ['psd_delta', 'psd_theta', 'psd_alpha', 'psd_beta', 'psd_gamma', 'rms_ap', 'rms_lf',
'spike_rate', 'axial_um', 'x', 'y', 'depth']
# Load in data
chan_volt = load_channel_data()
# chan_volt = pd.read_parquet("/home/sebastian/Downloads/FlatIron/tables/channels_voltage_features.pqt")
chan_volt = chan_volt.loc[~chan_volt['rms_ap'].isnull()] # remove NaNs
# 31d8dfb1-71fd-4c53-9229-7cd48bee07e4 64d04585-67e7-4320-baad-8d4589fd18f7
if True:
test = chan_volt.loc[['31d8dfb1-71fd-4c53-9229-7cd48bee07e4', '64d04585-67e7-4320-baad-8d4589fd18f7'], : ]
else:
test = chan_volt
feature_arr = test[FEATURES].to_numpy()
regions = test['cosmos_acronyms'].values
# Load model
clf = load_trained_model('channels', 'cosmos')
# Decode brain regions
print('Decoding brain regions..')
predictions = clf.predict(feature_arr)
probs = clf.predict_proba(feature_arr)
# histogram of response probabilities
certainties = probs.max(1)
plt.hist(certainties)
plt.close()
# plot of calibration, how certain are correct versus incorrect predicitions
plt.hist(certainties[regions == predictions], label='Correct predictions')
plt.hist(certainties[regions != predictions], label='Wrong predictions')
plt.title("Model calibration", size=24)
plt.legend(frameon=False, fontsize=16)
plt.ylabel("Occurences", size=21)
plt.xlabel("Prob for predicted region", size=21)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
sns.despine()
plt.tight_layout()
plt.savefig("/home/sebastian/Pictures/calibration")
plt.close()
# compute accuracy and balanced for our highly imbalanced dataset
acc = accuracy_score(regions, predictions)
bacc = balanced_accuracy_score(regions, predictions)
print(f'Accuracy: {acc*100:.1f}%')
print(f'Balanced accuracy: {bacc*100:.1f}%')
# compute confusion matrix
names = np.unique(np.append(regions, predictions))
cm = confusion_matrix(regions, predictions, labels=names)
cm = cm / cm.sum(1)[:, None]
cm_copy = cm.copy()
# list top n classifications
n = 10
np.max(cm[~np.isnan(cm)])
cm[np.isnan(cm)] = 0
for i in range(n):
ind = np.unravel_index(np.argmax(cm, axis=None), cm.shape)
if ind[0] != ind[1]:
print("Top {} classification, mistake: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
else:
print("Top {} classification, success: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
cm[ind] = 0
# plot confusion matrix
plt.imshow(cm_copy)
plt.yticks(range(len(names)), names)
plt.xticks(range(len(names)), names, rotation='65')
plt.show() | 32.138298 | 115 | 0.737504 |
73e778dc0ac39e74782e31bce2904aee2683d400 | 3,923 | py | Python | Lab04_82773/ex4_4/ex4_4.py | viniciusbenite/cdb | ccc39e9320b03e26d5479a24f76a209ed2283000 | [
"MIT"
] | null | null | null | Lab04_82773/ex4_4/ex4_4.py | viniciusbenite/cdb | ccc39e9320b03e26d5479a24f76a209ed2283000 | [
"MIT"
] | null | null | null | Lab04_82773/ex4_4/ex4_4.py | viniciusbenite/cdb | ccc39e9320b03e26d5479a24f76a209ed2283000 | [
"MIT"
] | null | null | null | # Vinicius Ribeiro
# Nmec 82773
# Make sure to run pip3 install -r requirements.txt and load the .dump at Neo4j
# https://neo4j.com/docs/operations-manual/current/tools/dump-load/
# Dataset: https://neo4j.com/graphgist/beer-amp-breweries-graphgist#_create_nodes_and_relationships
import sys
from neo4j import GraphDatabase
# Connect to local DB
init_db("bolt://localhost:7687", "neo4j", "12345")
| 42.182796 | 116 | 0.515932 |
73e823c830b6abe9c91c69930849b15b603a17bb | 184 | py | Python | readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py | xenon-middleware/xenon-tutorial | 92e4e4037ab2bc67c8473ac4366ff41326a7a41c | [
"Apache-2.0"
] | 2 | 2016-06-23T09:03:34.000Z | 2018-03-31T12:45:39.000Z | readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py | NLeSC/Xenon-examples | 92e4e4037ab2bc67c8473ac4366ff41326a7a41c | [
"Apache-2.0"
] | 54 | 2015-11-26T16:36:48.000Z | 2017-08-01T12:12:51.000Z | readthedocs/code-tabs/python/tests/test_directory_listing_recursive.py | xenon-middleware/xenon-examples | 92e4e4037ab2bc67c8473ac4366ff41326a7a41c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import pytest
from pyxenon_snippets import directory_listing_recursive
| 16.727273 | 56 | 0.831522 |
73e8d0b6bdf6ce5014c04793aa8b3ccc731b67fb | 764 | py | Python | submissions/past201912-open/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/past201912-open/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/past201912-open/i.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from itertools import product
n, m = map(int, readline().split())
inf = float('inf')
dp = [inf] * (2 ** n)
dp[0] = 0
for _ in range(m):
s, c = readline().rstrip().decode().split()
c = int(c)
bit = [0] * n
for i, ss in enumerate(s):
if ss == 'Y':
bit[i] = 1
for i, v in enumerate(product([0, 1], repeat=n)):
if dp[i] != inf:
num = 0
for index, (x, y) in enumerate(zip(v[::-1], bit)):
if x == 1 or y == 1:
num += 2 ** index
dp[num] = min(dp[num], dp[i] + c)
print(-1 if dp[-1] == inf else dp[-1])
| 27.285714 | 62 | 0.510471 |
73e8d525fff7a96e23c10924c3bedcf78a0ab5d6 | 55,250 | py | Python | google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 32 | 2020-07-11T02:50:13.000Z | 2022-02-10T19:45:59.000Z | google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 112 | 2020-02-11T13:24:14.000Z | 2022-03-31T20:59:08.000Z | google/cloud/dlp_v2/services/dlp_service/transports/grpc_asyncio.py | LaudateCorpus1/python-dlp | e0a51c9254677016f547647848dcbee85ee1bf29 | [
"Apache-2.0"
] | 22 | 2020-02-03T18:23:38.000Z | 2022-01-29T08:09:29.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dlp_v2.types import dlp
from google.protobuf import empty_pb2 # type: ignore
from .base import DlpServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import DlpServiceGrpcTransport
__all__ = ("DlpServiceGrpcAsyncIOTransport",)
| 43.814433 | 102 | 0.639982 |
73e9a8245d7f2b954b01c47bce5f6ddf87248068 | 781 | py | Python | tym.py | tsyogesh40/Finger_recognition-Python- | 4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac | [
"MIT"
] | null | null | null | tym.py | tsyogesh40/Finger_recognition-Python- | 4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac | [
"MIT"
] | null | null | null | tym.py | tsyogesh40/Finger_recognition-Python- | 4c1597cd246be1248bbfbb6cfc1ce1cbf5c4ecac | [
"MIT"
] | null | null | null | import datetime
t=datetime.datetime.now()
#date format
weekday=t.strftime("%a") # %A for abbr
day=t.strftime("%d")
month=t.strftime("%b") #%B for abbr
month_num=t.strftime("%m")
year=t.strftime("%Y")
date=t.strftime("%Y-%m-%d")
print(date)
#time format
hour_12=t.strftime("%I")
hour_24=t.strftime("%H")
minutes=t.strftime("%H")
seconds=t.strftime("%S")
am_pm=t.strftime("%p")
time_12=t.strftime("%I:%M:%S %p") #12hrs time AM/PM
time_24=t.strftime("%H:%M:%S") #24 Hrs time
print(time_12)
print(time_24)
print(sem_calc(int(month_num)))
print(date())
| 17.75 | 55 | 0.641485 |
73eb8bdab00daf7ae249b9e5cfe3937c7c3470b5 | 92 | py | Python | parameters_8001.py | sanket0211/courier-portal | 6b35aa006813f710db9c3e61da4a718aff20881d | [
"BSD-3-Clause"
] | null | null | null | parameters_8001.py | sanket0211/courier-portal | 6b35aa006813f710db9c3e61da4a718aff20881d | [
"BSD-3-Clause"
] | null | null | null | parameters_8001.py | sanket0211/courier-portal | 6b35aa006813f710db9c3e61da4a718aff20881d | [
"BSD-3-Clause"
] | null | null | null | password="pbkdf2(1000,20,sha512)$8a062c206755a51e$df13c5122a621a9de3a64d39f26460f175076ca0"
| 46 | 91 | 0.891304 |
73ec5cfa22b958735251f6bd136ed85eba9a7172 | 562 | py | Python | TheKinozal/custom_storages/async_s3_video.py | R-Mielamud/TheKinozal | 62cb79faae58b23f0ef0175593ed9b5746229b5b | [
"MIT"
] | 1 | 2020-10-16T19:15:32.000Z | 2020-10-16T19:15:32.000Z | TheKinozal/custom_storages/async_s3_video.py | R-Mielamud/TheKinozal | 62cb79faae58b23f0ef0175593ed9b5746229b5b | [
"MIT"
] | null | null | null | TheKinozal/custom_storages/async_s3_video.py | R-Mielamud/TheKinozal | 62cb79faae58b23f0ef0175593ed9b5746229b5b | [
"MIT"
] | null | null | null | import os
from TheKinozal import settings
from storages.backends.s3boto3 import S3Boto3Storage
from helpers.random_string import generate_random_string
from helpers.chunked_upload import ChunkedS3VideoUploader
| 35.125 | 81 | 0.756228 |
73ed247eb28b6b5d48aa9d6331bcb389807b9a5d | 1,098 | py | Python | bh_tsne/prep_result.py | mr4jay/numerai | a07b2dcafe9f078df8578d150d585f239fe73c51 | [
"MIT"
] | 306 | 2016-09-18T07:32:33.000Z | 2022-03-22T16:30:26.000Z | bh_tsne/prep_result.py | mikekosk/numerai | 2a09c648c66143ee101cd80de4827108aaf218fc | [
"MIT"
] | 2 | 2017-01-04T02:17:20.000Z | 2017-09-18T11:43:59.000Z | bh_tsne/prep_result.py | mikekosk/numerai | 2a09c648c66143ee101cd80de4827108aaf218fc | [
"MIT"
] | 94 | 2016-09-17T03:48:55.000Z | 2022-01-05T11:54:25.000Z | import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('../data/train_data.csv')
df_valid = pd.read_csv('../data/valid_data.csv')
df_test = pd.read_csv('../data/test_data.csv')
with open('result.dat', 'rb') as f:
N, = struct.unpack('i', f.read(4))
no_dims, = struct.unpack('i', f.read(4))
print(N, no_dims)
mappedX = struct.unpack('{}d'.format(N * no_dims), f.read(8 * N * no_dims))
mappedX = np.array(mappedX).reshape((N, no_dims))
print(mappedX)
tsne_train = mappedX[:len(df_train)]
tsne_valid = mappedX[len(df_train):len(df_train)+len(df_valid)]
tsne_test = mappedX[len(df_train)+len(df_valid):]
assert(len(tsne_train) == len(df_train))
assert(len(tsne_valid) == len(df_valid))
assert(len(tsne_test) == len(df_test))
save_path = '../data/tsne_{}d_30p.npz'.format(no_dims)
np.savez(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)
print('Saved: {}'.format(save_path))
# landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N))
# costs, = struct.unpack('{}d'.format(N), f.read(8 * N))
| 34.3125 | 79 | 0.653916 |
73eec10a12c7ce55e197ae8c7928050831069eb9 | 623 | py | Python | moca/urls.py | satvikdhandhania/vit-11 | e599f2b82a9194658c67bbd5c7e45f3b50d016da | [
"BSD-3-Clause"
] | 1 | 2016-09-20T20:36:53.000Z | 2016-09-20T20:36:53.000Z | moca/urls.py | satvikdhandhania/vit-11 | e599f2b82a9194658c67bbd5c7e45f3b50d016da | [
"BSD-3-Clause"
] | null | null | null | moca/urls.py | satvikdhandhania/vit-11 | e599f2b82a9194658c67bbd5c7e45f3b50d016da | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls.defaults import patterns, url, include
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^log/', include('requestlog.urls')),
(r'^admin/', include(admin.site.urls)),
# Pass anything that doesn't match on to the mrs app
url(r'^',
include('moca.mrs.urls')),
)
from django.conf import settings
if settings.DEBUG:
urlpatterns += patterns(
'',
(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| 23.074074 | 60 | 0.632424 |
73eee2fb344cce481c9e4bf622cf22c5054e99f7 | 3,833 | py | Python | tests/template_tests/filter_tests/test_unordered_list.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | 1 | 2015-01-09T08:45:54.000Z | 2015-01-09T08:45:54.000Z | tests/template_tests/filter_tests/test_unordered_list.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | null | null | null | tests/template_tests/filter_tests/test_unordered_list.py | DasAllFolks/django | 9f427617e4559012e1c2fd8fce46cbe225d8515d | [
"BSD-3-Clause"
] | null | null | null | import warnings
from django.test import SimpleTestCase
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.safestring import mark_safe
from ..utils import render, setup
| 50.434211 | 97 | 0.60527 |
73efefef974776a64a4da11b84a452736ff6369e | 5,218 | py | Python | models/train_classifier.py | jcardenas14/Disaster-Response | 303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395 | [
"MIT"
] | null | null | null | models/train_classifier.py | jcardenas14/Disaster-Response | 303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395 | [
"MIT"
] | null | null | null | models/train_classifier.py | jcardenas14/Disaster-Response | 303cbbc9098e3e1d163e8a6a7bc4bcdc8f134395 | [
"MIT"
] | null | null | null | import numpy as np
import nltk
import re
import pandas as pd
import sys
import pickle
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import f1_score, precision_score, recall_score
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sqlalchemy import create_engine
# download nltk libraries and stopwords
nltk.download(['punkt', 'wordnet','stopwords','averaged_perceptron_tagger'])
stop_words = stopwords.words('english')
# function to load data
def load_data(database_filepath):
'''
load data from sql database given the database file path.
Returns:
X (DataFrame): DataFrame - each row is a message
Y (DataFrame): DataFrame - each column is a category
categories (list): List of category names
'''
engine = create_engine('sqlite:///'+database_filepath)
df = pd.read_sql_table('disaster_cleaned', con=engine)
X = df['message'].values
Y = df.drop(columns = ['id', 'message', 'original', 'genre']).values
categories = df.drop(columns = ['id', 'message', 'original', 'genre']).columns
return X, Y, categories
def tokenize(text):
"""Returns list of processed and tokenized text given input text."""
# tokenize text and convert to lower case
tokens = [tok.lower() for tok in word_tokenize(text)]
# remove stop words and non alpha-numeric characters
tokens = [tok for tok in tokens if tok not in stop_words and tok.isalnum()]
# initialize WordNetLemmatizer object
lemmatizer = WordNetLemmatizer()
# create list of lemmatized tokens
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
Returns multi-output random forest classifier pipeline.
Construct pipeline for count vectorization of input text, TF-IDF
transformation, and initialization of multi-output
random forest classifier. Initialize hyperparameter tuning
using GridSearchCV.
'''
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
parameters = {
'clf__estimator__n_estimators': [50, 100, 200],
'clf__estimator__min_samples_split': [2, 3, 4]
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
Returns f1 score, precision, and recall for each category.
Parameters:
model: trained model object
X_test: DataFrame of test messages
Y_test: DataFrame of test classified categories
category_names: List of category names
Returns:
eval_df: DataFrame of f1 score, precision, and recall per category.
'''
# predict on test data
y_pred = model.predict(X_test)
# calculate f1 score, precision, and recall
f1 = []
precision = []
recall = []
for i in range(y_pred.shape[1]):
f1.append(f1_score(Y_test[:,i], y_pred[:,i], average='macro', zero_division=0))
precision.append(precision_score(Y_test[:,i], y_pred[:,i], average='macro', zero_division=0))
recall.append(recall_score(Y_test[:,i], y_pred[:,i], average='macro'))
eval_df = pd.DataFrame({"f1":f1, "precision":precision, "recall":recall}, index=category_names)
return eval_df
def save_model(model, model_filepath):
"""Save trained model as pickle file to given path."""
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
if __name__ == '__main__':
main() | 33.025316 | 101 | 0.675929 |
73f111cc65a7da55125e7eb4f996288413f32c34 | 3,850 | py | Python | getauditrecords.py | muzznak/pyviyatools | 58a99656e0a773370c050de191999fbc98ac5f03 | [
"Apache-2.0"
] | 25 | 2019-04-09T19:52:54.000Z | 2022-03-07T02:11:58.000Z | getauditrecords.py | muzznak/pyviyatools | 58a99656e0a773370c050de191999fbc98ac5f03 | [
"Apache-2.0"
] | 49 | 2018-12-13T15:53:16.000Z | 2022-03-09T15:31:13.000Z | getauditrecords.py | muzznak/pyviyatools | 58a99656e0a773370c050de191999fbc98ac5f03 | [
"Apache-2.0"
] | 25 | 2019-08-23T19:58:29.000Z | 2022-02-24T16:14:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# getauditrecords.py January 2020
#
# Extract list of audit records from SAS Infrastructure Data Server using REST API.
#
# Examples:
#
# 1. Return list of audit events from all users and applications
# ./getauditrecords.py
#
# Change History
#
# 10JAN2020 Comments added
#
# Copyright 2018, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing permissions and limitations under the License.
#
# Import Python modules
import json
import socket
import argparse, sys
from sharedfunctions import callrestapi,getinputjson,simpleresults,getbaseurl,printresult
# Sample reqval="/audit/entries?filter=and(eq(application,'reports'),eq(state,'success'),ge(timeStamp,'2018-11-20'),le(timeStamp,'2020-11-20T23:59:59.999Z'))&sortBy=timeStamp&limit=1000"
# Parse arguments based on parameters that are passed in on the command line
parser = argparse.ArgumentParser()
parser.add_argument("-a","--application", help="Filter by Application or Service name",default=None)
parser.add_argument("-l","--limit", help="Maximum number of records to display",default='1000')
parser.add_argument("-t","--type", help="Filter by entry Type",default=None)
parser.add_argument("-c","--action", help="Filter by entry Action",default=None)
parser.add_argument("-s","--state", help="Filter by entry State",default=None)
parser.add_argument("-u","--user", help="Filter by Username",default=None)
parser.add_argument("-A","--after", help="Filter entries that are created after the specified timestamp. For example: 2020-01-03 or 2020-01-03T18:15Z",default=None)
parser.add_argument("-B","--before", help="Filter entries that are created before the specified timestamp. For example: 2020-01-03 or 2020-01-03T18:15Z",default=None)
parser.add_argument("-S","--sortby", help="Sort the output ascending by this field",default='timeStamp')
parser.add_argument("-o","--output", help="Output Style", choices=['csv','json','simple','simplejson'],default='csv')
args = parser.parse_args()
appname=args.application
output_style=args.output
sort_order=args.sortby
output_limit=args.limit
username=args.user
entry_type=args.type
entry_action=args.action
entry_state=args.state
ts_after=args.after
ts_before=args.before
# Create list for filter conditions
filtercond=[]
if appname!=None: filtercond.append("eq(application,'"+appname+"')")
if username!=None: filtercond.append("eq(user,'"+username+"')")
if entry_type!=None: filtercond.append("eq(type,'"+entry_type+"')")
if entry_action!=None: filtercond.append("eq(action,'"+entry_action+"')")
if entry_state!=None: filtercond.append("eq(state,'"+entry_state+"')")
if ts_after!=None: filtercond.append("ge(timeStamp,'"+ts_after+"')")
if ts_before!=None: filtercond.append("le(timeStamp,'"+ts_before+"')")
# Construct filter
delimiter = ','
completefilter = 'and('+delimiter.join(filtercond)+')'
# Set request
reqtype = 'get'
reqval = "/audit/entries?filter="+completefilter+"&limit="+output_limit+"&sortBy="+sort_order
# Construct & print endpoint URL
baseurl=getbaseurl()
endpoint=baseurl+reqval
# print("REST endpoint: " +endpoint)
# Make REST API call, and process & print results
files_result_json=callrestapi(reqval,reqtype)
cols=['id','timeStamp','type','action','state','user','remoteAddress','application','description','uri']
printresult(files_result_json,output_style,cols)
| 43.258427 | 189 | 0.751169 |
73f1a91dc045f413a69942d834270e344133624f | 6,345 | py | Python | async_blp/handlers.py | rockscie/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | 12 | 2019-08-05T16:56:54.000Z | 2021-02-02T11:09:37.000Z | async_blp/handlers.py | lightning-like/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | null | null | null | async_blp/handlers.py | lightning-like/async_blp | acb8777ccf2499681bde87d76ca780b61219699c | [
"MIT"
] | 5 | 2019-12-08T15:43:13.000Z | 2021-11-14T08:38:07.000Z | """
File contains handler for ReferenceDataRequest
"""
import asyncio
import uuid
from typing import Dict
from typing import List
from .base_handler import HandlerBase
from .base_request import RequestBase
from .requests import Subscription
from .utils.blp_name import RESPONSE_ERROR
from .utils.log import get_logger
# pylint: disable=ungrouped-imports
try:
import blpapi
except ImportError:
from async_blp.utils import env_test as blpapi
LOGGER = get_logger()
def _response_handler(self, event_: blpapi.Event):
"""
Process blpapi.Event.RESPONSE events. This is the last event for the
corresponding requests, therefore after processing all messages
from the event, None will be send to the corresponding requests.
"""
self._partial_response_handler(event_)
for msg in event_:
self._close_requests(msg.correlationIds())
class SubscriptionHandler(HandlerBase):
"""
Handler gets response events from Bloomberg from other thread,
then puts it to request queue. Each handler opens its own session
Used for handling subscription requests and responses
"""
def _subscriber_data_handler(self, event_: blpapi.Event):
"""
Redirect data to the request queue.
"""
for msg in event_:
for cor_id in msg.correlationIds():
self._current_requests[cor_id].send_queue_message(msg)
def _subscriber_status_handler(self, event_: blpapi.Event):
"""
Raise exception if something goes wrong
"""
for msg in event_:
if msg.asElement().name() not in ("SubscriptionStarted",
"SubscriptionStreamsActivated",
):
self._raise_exception(msg)
| 33.571429 | 78 | 0.628684 |
73f2bc3599ec98d3aba14c518c543be223219c33 | 4,759 | py | Python | cytochrome-b6f-nn-np-model-kinetics.py | vstadnyt/cytochrome | 546aa450fa6dc2758b079aba258e3572dd24d60c | [
"MIT"
] | null | null | null | cytochrome-b6f-nn-np-model-kinetics.py | vstadnyt/cytochrome | 546aa450fa6dc2758b079aba258e3572dd24d60c | [
"MIT"
] | null | null | null | cytochrome-b6f-nn-np-model-kinetics.py | vstadnyt/cytochrome | 546aa450fa6dc2758b079aba258e3572dd24d60c | [
"MIT"
] | 1 | 2021-09-28T17:17:48.000Z | 2021-09-28T17:17:48.000Z | import cytochrome_lib #This is a cytochrome library
import matplotlib.pyplot as plt
import numpy as np
version = "Last update: Aug 8, 2017"
desription = "This code calculates population distribution in the cytochrome b6f protein and plots kinetic profiles for two different models: \n'nn' and 'np' models \n The outputs are: \n Figure 1: \n Figure 2: The ppulation distributions for different oxydations states of the cytochrome proteins. \n Figure 3: the resulting absorbance and circular dichroism kinetics for two different models"
print desription
print version
#the eclusions_lst is a list of hemes that are taken into account during calculations (1 - include; 0 - exclude);
#There are 8 values for 4 hemes and 2 dipoles per heme: [Qx_p1, Qy_p1, Qx_n1, Qy_n1, Qx_p2, Qy_p2, Qx_n2, Qy_n2]
##This is a main part of a code
#This part creates two lists of several instances of a cyt class (see cytochrome library) with different input files
exclusions_lst = []
exclusions_lst.append([0,0,0,0,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,0,0])
exclusions_lst.append([1,1,1,1,0,0,0,0])
exclusions_lst.append([1,1,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,1,1,1,1])
cyt_b6f_np = []
for excl in exclusions_lst:
cyt_b6f_np.append(cytochrome_lib.cyt('cytochrome_b6f.txt',excl))
for i in range(len(exclusions_lst)):
cyt_b6f_np[i].read_structure_file()
cyt_b6f_np[i].Hamiltonian()
cyt_b6f_np[i].D_and_R_strength()
cyt_b6f_np[i].spectra_plot()
exclusions_lst = []
exclusions_lst.append([0,0,0,0,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,0,0])
exclusions_lst.append([0,0,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,0,0,1,1])
exclusions_lst.append([1,1,1,1,1,1,1,1])
cyt_b6f_nn = []
for excl in exclusions_lst:
cyt_b6f_nn.append(cytochrome_lib.cyt('cytochrome_b6f.txt',excl))
for i in range(len(exclusions_lst)):
cyt_b6f_nn[i].read_structure_file()
cyt_b6f_nn[i].Hamiltonian()
cyt_b6f_nn[i].D_and_R_strength()
cyt_b6f_nn[i].spectra_plot()
x_range_nm = cyt_b6f_nn[0].x_range_nm
plt.figure(1)
plt.ion()
plt.subplot(2,2,1)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_nn[i].specR,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
#plt.legend(['n1p1','n1n2','n1p2','p1n2','p1p2','n2p2']);
plt.title('cytochrome b6f np model')
plt.subplot(2,2,2)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_np[i].specR,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
plt.title('cytochrome b6f nn model')
plt.subplot(2,2,3)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_nn[i].specD,axis = 0),linewidth=2)
#plt.plot(x_range_nm,np.sum(specR_full,axis = 0),linewidth=5)
plt.subplot(2,2,4)
for i in range(len(exclusions_lst)):
plt.plot(x_range_nm,np.sum(cyt_b6f_np[i].specD,axis = 0),linewidth=2)
plt.show()
length = 10000
population = cytochrome_lib.kinetics_solve(np.array([1,1,1,1,0,0,0]),length)
plt.figure(2)
plt.ion()
for i in range(5):
plt.plot(range(length),population[i,:])
plt.title("Population distribution of proteins in different oxydation states")
plt.legend(['0e- state (fully oxydized)','1e- state','2e- state','3e- state','4e- state(fully reduced)'])
plt.show()
Absorbance_lst_b6f_nn = []
Circular_Dichroism_lst_b6f_nn = []
for i in range(5):
Absorbance_lst_b6f_nn.append(population[i,:]*np.sum(np.sum(cyt_b6f_nn[i].specD,axis = 0)))
Circular_Dichroism_lst_b6f_nn.append(population[i,:]*np.sum(np.abs(np.sum(cyt_b6f_nn[i].specR,axis = 0))))
Absorbance_b6f_nn = np.asarray(Absorbance_lst_b6f_nn)
Circular_Dichroism_b6f_nn = np.asarray(Circular_Dichroism_lst_b6f_nn)
Absorbance_lst_b6f_np = []
Circular_Dichroism_lst_b6f_np = []
for i in range(5):
Absorbance_lst_b6f_np.append(population[i,:]*np.sum(np.sum(cyt_b6f_np[i].specD,axis = 0)))
Circular_Dichroism_lst_b6f_np.append(population[i,:]*np.sum(np.abs(np.sum(cyt_b6f_np[i].specR,axis = 0))))
Absorbance_b6f_np = np.asarray(Absorbance_lst_b6f_np)
Circular_Dichroism_b6f_np = np.asarray(Circular_Dichroism_lst_b6f_np)
plt.figure(3)
plt.ion()
plt.title('cytochrome b6f nn and np models')
plt.plot(range(length),np.sum(Absorbance_b6f_nn, axis = 0)/np.max(np.sum(Absorbance_b6f_nn, axis = 0)))
plt.plot(range(length),np.sum(Absorbance_b6f_np, axis = 0)/np.max(np.sum(Absorbance_b6f_np, axis = 0)))
plt.plot(range(length),np.sum(Circular_Dichroism_b6f_nn, axis = 0)/np.max(np.sum(Circular_Dichroism_b6f_nn, axis = 0)))
plt.plot(range(length),np.sum(Circular_Dichroism_b6f_np, axis = 0)/np.max(np.sum(Circular_Dichroism_b6f_np, axis = 0)))
plt.legend(['OD_nn','OD_np','CD_nn','CD_np'])
plt.show()
print "\nCalculations are finished. Please, see figures 1-3"
| 36.328244 | 394 | 0.741963 |
73f3505bc64c937e900a105ef529d5195af953f8 | 10,062 | py | Python | moderation/models.py | raja-creoit/django-moderation | 627afeeeb272d8d7e8f4893e8418d8942ccb80ba | [
"BSD-3-Clause"
] | null | null | null | moderation/models.py | raja-creoit/django-moderation | 627afeeeb272d8d7e8f4893e8418d8942ccb80ba | [
"BSD-3-Clause"
] | 1 | 2020-01-31T20:37:53.000Z | 2020-01-31T20:37:53.000Z | moderation/models.py | raja-creoit/django-moderation | 627afeeeb272d8d7e8f4893e8418d8942ccb80ba | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.conf import settings
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models, transaction
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from . import moderation
from .constants import (MODERATION_READY_STATE,
MODERATION_DRAFT_STATE,
MODERATION_STATUS_REJECTED,
MODERATION_STATUS_APPROVED,
MODERATION_STATUS_PENDING)
from .diff import get_changes_between_models
from .fields import SerializedObjectField
from .managers import ModeratedObjectManager
from .signals import post_moderation, pre_moderation
from .utils import django_19
import datetime
MODERATION_STATES = Choices(
(MODERATION_READY_STATE, 'ready', _('Ready for moderation')),
(MODERATION_DRAFT_STATE, 'draft', _('Draft')),
)
STATUS_CHOICES = Choices(
(MODERATION_STATUS_REJECTED, 'rejected', _("Rejected")),
(MODERATION_STATUS_APPROVED, 'approved', _("Approved")),
(MODERATION_STATUS_PENDING, 'pending', _("Pending")),
)
| 38.7 | 93 | 0.633174 |
73f3c138d83e22bb6c02d12e03c089fb61651fa0 | 3,684 | py | Python | hygnd/munge.py | thodson-usgs/hygnd | 04d3596f79350ba19e08851e494c8feb7d68c0e0 | [
"MIT"
] | 2 | 2018-07-27T22:29:27.000Z | 2020-03-04T18:01:47.000Z | hygnd/munge.py | thodson-usgs/hygnd | 04d3596f79350ba19e08851e494c8feb7d68c0e0 | [
"MIT"
] | null | null | null | hygnd/munge.py | thodson-usgs/hygnd | 04d3596f79350ba19e08851e494c8feb7d68c0e0 | [
"MIT"
] | null | null | null | from math import floor
import pandas as pd
def filter_param_cd(df, code):
"""Return df filtered by approved data
"""
approved_df = df.copy()
params = [param.strip('_cd') for param in df.columns if param.endswith('_cd')]
for param in params:
#filter out records where param_cd doesn't contain 'A' for approved.
approved_df[param].where(approved_df[param + '_cd'].str.contains(code), inplace=True)
# drop any rows where all params are nan and return
#return approved_df.dropna(axis=0, how='all', subset=params)
return approved_df
def interp_to_freq(df, freq=15, interp_limit=120, fields=None):
"""
WARNING: for now this only works on one site at a time,
Also must review this function further
Args:
df (DataFrame): a dataframe with a datetime index
freq (int): frequency in minutes
interp_limit (int): max time to interpolate over
Returns:
DataFrame
"""
#XXX assumes no? multiindex
df = df.copy()
if type(df) == pd.core.series.Series:
df = df.to_frame()
#df.reset_index(level=0, inplace=True)
limit = floor(interp_limit/freq)
freq_str = '{}min'.format(freq)
start = df.index[0]
end = df.index[-1]
new_index = pd.date_range(start=start, end=end, periods=None, freq=freq_str)
#new_index = new_index.union(df.index)
new_df = pd.DataFrame(index=new_index)
new_df = new_df.merge(df, how='outer', left_index=True, right_index=True)
#new_df = pd.merge(df, new_df, how='outer', left_index=True, right_index=True)
#this resampling eould be more efficient
out_df = new_df.interpolate(method='time',limit=limit, limit_direction='both').asfreq(freq_str)
out_df = out_df.resample('{}T'.format(freq)).asfreq()
out_df.index.name = 'datetime'
return out_df
#out_df.set_index('site_no', append=True, inplace=True)
#return out_df.reorder_levels(['site_no','datetime'])
def fill_iv_w_dv(iv_df, dv_df, freq='15min', col='00060'):
"""Fill gaps in an instantaneous discharge record with daily average estimates
Args:
iv_df (DataFrame): instantaneous discharge record
dv_df (DataFrame): Average daily discharge record.
freq (int): frequency of iv record
Returns:
DataFrame: filled-in discharge record
"""
#double brackets makes this a dataframe
dv_df.rename(axis='columns',
mapper={'00060_Mean':'00060'},
inplace=True)
#limit ffill to one day or 96 samples at 15min intervals
updating_field = dv_df[[col]].asfreq(freq).ffill(limit=96)
iv_df.update(updating_field, overwrite=False)
#return update_merge(iv_df, updating_field, na_only=True)
return iv_df
#This function may be deprecated once pandas.update support joins besides left.
def update_merge(left, right, na_only=False, on=None):
"""Performs a combination
Args:
left (DataFrame): original data
right (DataFrame): updated data
na_only (bool): if True, only update na values
TODO: na_only
"""
df = left.merge(right, how='outer',
left_index=True, right_index=True)
# check for column overlap and resolve update
for column in df.columns:
#if duplicated column, use the value from right
if column[-2:] == '_x':
name = column[:-2] # find column name
if na_only:
df[name] = df[name+'_x'].fillna(df[name+'_y'])
else:
df[name+'_x'].update(df[name+'_y'])
df[name] = df[name+'_x']
df.drop([name + '_x', name + '_y'], axis=1, inplace=True)
return df
| 32.034783 | 99 | 0.646851 |