hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60ff5c6f7092666241901b36f6825248e6f4d160 | 360 | py | Python | api/flat/urls.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | 1 | 2021-12-23T12:43:17.000Z | 2021-12-23T12:43:17.000Z | api/flat/urls.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | null | null | null | api/flat/urls.py | SanjarbekSaminjonov/musofirlar.backend | 23b09e90cc4e3d153063ad1768b5ae1c18ff866d | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.FlatListAPIView.as_view()),
path('create/', views.FlatCreateAPIView.as_view()),
path('<int:pk>/', views.FlatDetailAPIView.as_view()),
path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()),
path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()),
]
| 30 | 64 | 0.683333 |
8801787fa421093191e86dccf0ba799d1e648912 | 506 | py | Python | hyssop_aiohttp/component/__init__.py | hsky77/hyssop | 4ab1e82f9e2592de56589c7426a037564bef49a6 | [
"MIT"
] | null | null | null | hyssop_aiohttp/component/__init__.py | hsky77/hyssop | 4ab1e82f9e2592de56589c7426a037564bef49a6 | [
"MIT"
] | null | null | null | hyssop_aiohttp/component/__init__.py | hsky77/hyssop | 4ab1e82f9e2592de56589c7426a037564bef49a6 | [
"MIT"
] | null | null | null | # Copyright (C) 2020-Present the hyssop authors and contributors.
#
# This module is part of hyssop and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
File created: January 1st 2021
Modified By: hsky77
Last Updated: January 7th 2021 15:30:08 pm
'''
from hyssop.project.component import ComponentTypes
from .aio_client import AioClientComponent
| 25.3 | 69 | 0.774704 |
88019b110382885f8543e3444fa6b00a5c38b567 | 3,691 | py | Python | run_clone.py | tGhattas/IMP-seamless-cloning | 2c81e0bd9bc99955afe06ec4eea187a5a42761e3 | [
"MIT"
] | null | null | null | run_clone.py | tGhattas/IMP-seamless-cloning | 2c81e0bd9bc99955afe06ec4eea187a5a42761e3 | [
"MIT"
] | null | null | null | run_clone.py | tGhattas/IMP-seamless-cloning | 2c81e0bd9bc99955afe06ec4eea187a5a42761e3 | [
"MIT"
] | null | null | null | import cv2
import getopt
import sys
from gui import MaskPainter, MaskMover
from clone import seamless_cloning, shepards_seamless_cloning
from utils import read_image, plt
from os import path
if __name__ == '__main__':
# parse command line arguments
args = {}
try:
opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print("See help: run_clone.py -h")
exit(2)
for o, a in opts:
if o in ("-h"):
usage()
exit()
elif o in ("-s"):
args["source"] = a
elif o in ("-t"):
args["target"] = a
elif o in ("-m"):
args["mask"] = a
elif o in ("-x"):
args["mode"] = a.lower()
elif o in ("-v"):
args["gradient_field_source_only"] = a
else:
continue
#
if ("source" not in args) or ("target" not in args):
usage()
exit()
#
# set default mode to Possion solver
mode = "poisson" if ("mode" not in args) else args["mode"]
gradient_field_source_only = ("gradient_field_source_only" not in args)
source = read_image(args["source"], 2)
target = read_image(args["target"], 2)
if source is None or target is None:
print('Source or target image not exist.')
exit()
if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]:
print('Source image cannot be larger than target image.')
exit()
# draw the mask
mask_path = ""
if "mask" not in args:
print('Please highlight the object to disapparate.\n')
mp = MaskPainter(args["source"])
mask_path = mp.paint_mask()
else:
mask_path = args["mask"]
# adjust mask position for target image
print('Please move the object to desired location to apparate.\n')
mm = MaskMover(args["target"], mask_path)
offset_x, offset_y, target_mask_path = mm.move_mask()
# blend
print('Blending ...')
target_mask = read_image(target_mask_path, 1)
offset = offset_x, offset_y
cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning
kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {}
blend_result = cloning_tool(source, target, target_mask, offset, **kwargs)
cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'),
blend_result)
plt.figure("Result"), plt.imshow(blend_result), plt.show()
print('Done.\n')
'''
running example:
- Possion based solver:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg
python run_clone.py -s external/source3.jpg -t external/target3.jpg -v
- Shepard's interpolation:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x
python run_clone.py -s external/source3.jpg -t external/target3.jpg -x
''' | 33.554545 | 147 | 0.629098 |
8801ff2af63497d7ca9dadd57139f98ae23b3370 | 5,081 | py | Python | punkweb_boards/rest/serializers.py | Punkweb/punkweb-boards | 8934d15fbff2a3ce9191fdb19d58d029eb55ef16 | [
"BSD-3-Clause"
] | 20 | 2018-02-22T11:36:04.000Z | 2022-03-22T11:48:22.000Z | punkweb_boards/rest/serializers.py | imgVOID/punkweb-boards | 8934d15fbff2a3ce9191fdb19d58d029eb55ef16 | [
"BSD-3-Clause"
] | 28 | 2018-02-22T07:11:46.000Z | 2022-02-23T08:05:29.000Z | punkweb_boards/rest/serializers.py | imgVOID/punkweb-boards | 8934d15fbff2a3ce9191fdb19d58d029eb55ef16 | [
"BSD-3-Clause"
] | 5 | 2018-02-25T11:05:19.000Z | 2021-05-27T02:25:31.000Z | from rest_framework import serializers
from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS
from punkweb_boards.models import (
BoardProfile,
Category,
Subcategory,
Thread,
Post,
Conversation,
Message,
Report,
Shout,
)
| 30.793939 | 81 | 0.657745 |
88031b336437f0a5497f94eace7653d85a0ddb61 | 1,326 | py | Python | runtime/components/Statistic/moving_minimum_time.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 41 | 2020-11-18T10:12:29.000Z | 2022-03-28T21:46:41.000Z | runtime/components/Statistic/moving_minimum_time.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 4 | 2020-12-08T15:28:15.000Z | 2022-02-01T11:40:17.000Z | runtime/components/Statistic/moving_minimum_time.py | ulise/hetida-designer | a6be8eb45abf950d5498e3ca756ea1d2e46b5c00 | [
"MIT"
] | 14 | 2020-11-18T11:39:17.000Z | 2022-03-21T15:05:11.000Z | from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
import numpy as np
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
| 28.212766 | 68 | 0.555053 |
88044ce700e39ec36bb7ba44d3c9905b593ae3a4 | 4,460 | py | Python | painter.py | MikhailNakhatovich/rooms_painting | 51b92797c867d4bb1c8d42a58785c0f4dacd4075 | [
"MIT"
] | null | null | null | painter.py | MikhailNakhatovich/rooms_painting | 51b92797c867d4bb1c8d42a58785c0f4dacd4075 | [
"MIT"
] | null | null | null | painter.py | MikhailNakhatovich/rooms_painting | 51b92797c867d4bb1c8d42a58785c0f4dacd4075 | [
"MIT"
] | null | null | null | import cv2
import ezdxf
import numpy as np
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
| 35.11811 | 99 | 0.578924 |
8804685e3bac745bbfacb5b5cab8b6e032a05238 | 3,064 | py | Python | misago/misago/users/serializers/auth.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | 2 | 2021-03-06T21:06:13.000Z | 2021-03-09T15:05:12.000Z | misago/misago/users/serializers/auth.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | null | null | null | misago/misago/users/serializers/auth.py | vascoalramos/misago-deployment | 20226072138403108046c0afad9d99eb4163cedc | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import serializers
from ...acl.useracl import serialize_user_acl
from .user import UserSerializer
User = get_user_model()
__all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"]
AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields(
"is_avatar_locked",
"is_blocked",
"is_followed",
"is_signature_locked",
"meta",
"signature",
"status",
)
| 31.587629 | 87 | 0.616841 |
8804c3b09c4502328bb0532182f3bbfcec72facf | 2,171 | py | Python | shop/models.py | mohammadanarul/Ecommerce-Django-YT | afecc8f41693925619b81986d979706c64175360 | [
"MIT"
] | null | null | null | shop/models.py | mohammadanarul/Ecommerce-Django-YT | afecc8f41693925619b81986d979706c64175360 | [
"MIT"
] | null | null | null | shop/models.py | mohammadanarul/Ecommerce-Django-YT | afecc8f41693925619b81986d979706c64175360 | [
"MIT"
] | null | null | null | from ctypes.wintypes import CHAR
from distutils.command.upload import upload
from random import choice
from telnetlib import STATUS
from unicodedata import category
from django.db import models
from ckeditor.fields import RichTextField
from taggit.managers import TaggableManager
# Create your models here.
from mptt.models import MPTTModel, TreeForeignKey
| 38.767857 | 109 | 0.740212 |
8805528dd519906fc019a797eb45969b31e9b633 | 7,470 | py | Python | supriya/patterns/NoteEvent.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/patterns/NoteEvent.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/patterns/NoteEvent.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import uuid
import supriya.commands
import supriya.realtime
from supriya.patterns.Event import Event
| 35.571429 | 86 | 0.545382 |
88055aadf736a00daf291c08df0121953d6b59c8 | 443 | py | Python | emoji_utils.py | ApacheAA/LastSeen | 1fe675b3ee3072d56e9fe094d1d80e1f7d876215 | [
"MIT"
] | null | null | null | emoji_utils.py | ApacheAA/LastSeen | 1fe675b3ee3072d56e9fe094d1d80e1f7d876215 | [
"MIT"
] | null | null | null | emoji_utils.py | ApacheAA/LastSeen | 1fe675b3ee3072d56e9fe094d1d80e1f7d876215 | [
"MIT"
] | 1 | 2021-04-04T02:46:10.000Z | 2021-04-04T02:46:10.000Z | # unicode digit emojis
# digits from '0' to '9'
zero_digit_code = zd = 48
# excluded digits
excl_digits = [2, 4, 5, 7]
# unicode digit keycap
udkc = '\U0000fe0f\U000020e3'
hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10)
if i - zd not in excl_digits]
# number '10' emoji
hours_0_9.append('\U0001f51f')
# custom emojis from '11' to '23'
hours_11_23 = [str(i) for i in range(11, 24)]
vote = ('PLUS', 'MINUS')
edit = '\U0001F4DD' | 26.058824 | 54 | 0.654628 |
88059d921ab4392734ab0df3051f19d38efd4fa5 | 1,131 | py | Python | TFBertForMaskedLM/main.py | Sniper970119/ExampleForTransformers | 3348525957c38b2a45898d4f4652879933503b25 | [
"Apache-2.0"
] | 3 | 2021-01-24T04:55:46.000Z | 2021-05-12T15:11:35.000Z | TFBertForMaskedLM/main.py | Sniper970119/ExampleForTransformers | 3348525957c38b2a45898d4f4652879933503b25 | [
"Apache-2.0"
] | null | null | null | TFBertForMaskedLM/main.py | Sniper970119/ExampleForTransformers | 3348525957c38b2a45898d4f4652879933503b25 | [
"Apache-2.0"
] | 1 | 2021-01-24T04:55:53.000Z | 2021-01-24T04:55:53.000Z | # -*- coding:utf-8 -*-
"""
BUG
CREATE BY SNIPER
"""
import tensorflow as tf
import numpy as np
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True)
inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][6])
o1 = tokenizer.decode(int(output))
inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][4])
o2 = tokenizer.decode(int(output))
print()
| 21.75 | 78 | 0.546419 |
8805a00d3b1fcbc6ac9137bed25cfb76407c9dfe | 663 | py | Python | mirari/TCS/migrations/0042_auto_20190726_0145.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | mirari/TCS/migrations/0042_auto_20190726_0145.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | 18 | 2019-12-27T19:58:20.000Z | 2022-02-27T08:17:49.000Z | mirari/TCS/migrations/0042_auto_20190726_0145.py | gcastellan0s/mirariapp | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2019-07-26 06:45
from django.db import migrations
| 36.833333 | 365 | 0.653092 |
88063bdddf555a3761172dbc965029eec4f02090 | 6,071 | py | Python | kornia/geometry/calibration/undistort.py | belltailjp/kornia | cfa3b6823d55e276893847f1c3f06ddf108c606a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-06T00:36:04.000Z | 2022-01-06T00:36:04.000Z | kornia/geometry/calibration/undistort.py | belltailjp/kornia | cfa3b6823d55e276893847f1c3f06ddf108c606a | [
"ECL-2.0",
"Apache-2.0"
] | 12 | 2021-09-26T14:07:49.000Z | 2022-03-20T14:08:08.000Z | kornia/geometry/calibration/undistort.py | belltailjp/kornia | cfa3b6823d55e276893847f1c3f06ddf108c606a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
B, _, rows, cols = image.shape
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
return out
| 39.679739 | 109 | 0.567452 |
8806b7d99f0084120c35f1e69100c53537ba82bc | 422 | py | Python | Tests/Aula_7a.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | 4 | 2021-04-23T18:07:58.000Z | 2021-05-12T11:38:14.000Z | Tests/Aula_7a.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | null | null | null | Tests/Aula_7a.py | o-Ian/Practice-Python | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | [
"MIT"
] | null | null | null | n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('A soma : {}!' .format(n1+n2))
print('A subtrao entre {} e {} {}!' .format(n1, n2, n1-n2))
print('A multiplicao desses valores {}!' .format(n1 * n2))
print('A diviso entre {} e {} {:.3}' .format(n1, n2, n1/n2))
print('A diviso sem restos {}!' .format(n1//n2), end = ' ')
print('O resto dessa diviso {}' .format(n1 % n2))
| 46.888889 | 63 | 0.601896 |
880784410cfda04eacd518622e54861cdb7a1605 | 6,288 | py | Python | manubot/cite/tests/test_citekey_api.py | shuvro-zz/manubot | 9023b7fbfa0b235c14a4d702516bc0cd6d3101ed | [
"BSD-3-Clause"
] | 1 | 2019-11-11T05:17:28.000Z | 2019-11-11T05:17:28.000Z | manubot/cite/tests/test_citekey_api.py | shuvro-zz/manubot | 9023b7fbfa0b235c14a4d702516bc0cd6d3101ed | [
"BSD-3-Clause"
] | null | null | null | manubot/cite/tests/test_citekey_api.py | shuvro-zz/manubot | 9023b7fbfa0b235c14a4d702516bc0cd6d3101ed | [
"BSD-3-Clause"
] | null | null | null | """Tests API-level functions in manubot.cite. Both functions are found in citekey.py"""
import pytest
from manubot.cite import citekey_to_csl_item, standardize_citekey
def test_citekey_to_csl_item_arxiv():
citekey = "arxiv:cond-mat/0703470v2"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "ES92tcdg"
assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2"
assert csl_item["number"] == "cond-mat/0703470v2"
assert csl_item["version"] == "2"
assert csl_item["type"] == "report"
assert csl_item["container-title"] == "arXiv"
assert csl_item["title"] == "Portraits of Complex Networks"
authors = csl_item["author"]
assert authors[0]["literal"] == "J. P. Bagrow"
assert csl_item["DOI"] == "10.1209/0295-5075/81/68004"
def test_citekey_to_csl_item_pmc():
"""
https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534
"""
citekey = f"pmcid:PMC3041534"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "RoOhUFKU"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/"
assert csl_item["container-title-short"] == "Summit Transl Bioinform"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
assert "generated by Manubot" in csl_item["note"]
assert "standard_id: pmcid:PMC3041534" in csl_item["note"]
def test_citekey_to_csl_item_pubmed_1():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full
"""
citekey = "pmid:21347133"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "y9ONtSZ9"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133"
assert csl_item["container-title"] == "Summit on translational bioinformatics"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities."
)
assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]]
authors = csl_item["author"]
assert authors[0]["given"] == "Taxiarchis"
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
def test_citekey_to_csl_item_pubmed_2():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full
"""
citekey = "pmid:27094199"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["id"] == "alaFV9OY"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199"
assert csl_item["container-title"] == "Circulation. Cardiovascular genetics"
assert csl_item["container-title-short"] == "Circ Cardiovasc Genet"
assert csl_item["page"] == "179-84"
assert (
csl_item["title"]
== "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits."
)
assert csl_item["issued"]["date-parts"] == [[2016, 4]]
authors = csl_item["author"]
assert authors[0]["given"] == "Casey S"
assert authors[0]["family"] == "Greene"
assert csl_item["PMID"] == "27094199"
assert csl_item["DOI"] == "10.1161/circgenetics.115.001181"
def test_citekey_to_csl_item_pubmed_with_numeric_month():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full
See https://github.com/manubot/manubot/issues/69
"""
citekey = "pmid:29028984"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]]
def test_citekey_to_csl_item_pubmed_book():
"""
Extracting CSL metadata from books in PubMed is not supported.
Logic not implemented to parse XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full
"""
with pytest.raises(NotImplementedError):
citekey_to_csl_item("pmid:29227604")
def test_citekey_to_csl_item_isbn():
csl_item = citekey_to_csl_item("isbn:9780387950693")
assert csl_item["type"] == "book"
assert csl_item["title"] == "Complex analysis"
| 38.109091 | 98 | 0.667144 |
8808d379a8ce975e29508dea21a42397452fc552 | 2,489 | py | Python | vispy/io/datasets.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 2,617 | 2015-01-02T07:52:18.000Z | 2022-03-29T19:31:15.000Z | vispy/io/datasets.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 1,674 | 2015-01-01T00:36:08.000Z | 2022-03-31T19:35:56.000Z | vispy/io/datasets.py | hmaarrfk/vispy | 7f3f6f60c8462bb8a3a8fa03344a2e6990b86eb2 | [
"BSD-3-Clause"
] | 719 | 2015-01-10T14:25:00.000Z | 2022-03-02T13:24:56.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| 26.2 | 73 | 0.60225 |
8809a9e20076798a2ad0ec40dc57152d0a032e41 | 13,731 | py | Python | universal_portfolio/knapsack.py | jehung/universal_portfolio | de731a6166ff057c8d6f3f73f80f9aca151805fa | [
"CC-BY-3.0"
] | 14 | 2017-03-01T07:54:17.000Z | 2021-10-10T11:07:56.000Z | universal_portfolio/knapsack.py | jehung/universal_portfolio | de731a6166ff057c8d6f3f73f80f9aca151805fa | [
"CC-BY-3.0"
] | null | null | null | universal_portfolio/knapsack.py | jehung/universal_portfolio | de731a6166ff057c8d6f3f73f80f9aca151805fa | [
"CC-BY-3.0"
] | 3 | 2017-06-27T10:18:03.000Z | 2020-07-03T01:29:56.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: Daniel Zakrisson
Created: 30/03/2016
Copyright: (c) Daniel Zakrisson 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
# Initialize first state, all items are placed deterministically
# Take Action
# Get Reward, the reward is returned at the end of an episode
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| 36.134211 | 145 | 0.610516 |
880a98e6cfdd279e5621d17d6384a4912cab6353 | 7,165 | py | Python | experiments/experiment_01.py | bask0/q10hybrid | 9b18af9dd382c65dd667139f97e7da0241091a2c | [
"Apache-2.0"
] | 2 | 2021-05-05T13:37:58.000Z | 2021-05-05T15:11:07.000Z | experiments/experiment_01.py | bask0/q10hybrid | 9b18af9dd382c65dd667139f97e7da0241091a2c | [
"Apache-2.0"
] | null | null | null | experiments/experiment_01.py | bask0/q10hybrid | 9b18af9dd382c65dd667139f97e7da0241091a2c | [
"Apache-2.0"
] | 1 | 2021-11-23T18:13:08.000Z | 2021-11-23T18:13:08.000Z |
import pytorch_lightning as pl
import optuna
import xarray as xr
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import os
import shutil
from argparse import ArgumentParser
from datetime import datetime
from project.fluxdata import FluxData
from models.hybrid import Q10Model
# Hardcoded `Trainer` args. Note that these cannot be passed via cli.
TRAINER_ARGS = dict(
max_epochs=100,
log_every_n_steps=1,
weights_summary=None
)
def main(parser: ArgumentParser = None, **kwargs):
"""Use kwargs to overload argparse args."""
# ------------
# args
# ------------
if parser is None:
parser = ArgumentParser()
parser = Objective.add_project_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = Q10Model.add_model_specific_args(parser)
parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits')
parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).')
args = parser.parse_args()
globargs = TRAINER_ARGS.copy()
globargs.update(kwargs)
for k, v in globargs.items():
setattr(args, k, v)
# ------------
# study setup
# ------------
search_space = {
'q10_init': [0.5, 1.5, 2.5],
'seed': [0] if args.single_seed else [i for i in range(10)],
'dropout': [0.0, 0.2, 0.4, 0.6],
'use_ta': [True, False]
}
sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db"))
sql_path = f'sqlite:///{sql_file}'
if args.create_study | (not os.path.isfile(sql_file)):
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir, exist_ok=True)
study = optuna.create_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space),
direction='minimize',
load_if_exists=False)
if args.create_study:
return None
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
# ------------
# run study
# ------------
n_trials = 1
for _, v in search_space.items():
n_trials *= len(v)
study = optuna.load_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space))
study.optimize(Objective(args), n_trials=n_trials)
if __name__ == '__main__':
main()
| 32.130045 | 112 | 0.579204 |
880bad578d9944f1ec06e580824fc923f1978b8e | 2,886 | py | Python | main.py | warifp/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | 4 | 2019-06-03T04:00:51.000Z | 2021-11-09T21:34:38.000Z | main.py | nittaya1990/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | null | null | null | main.py | nittaya1990/InstagramPostAndDelete | d22577325eccf42e629cef076ab43f7788587bc4 | [
"MIT"
] | 4 | 2019-10-30T19:44:08.000Z | 2021-09-07T16:30:09.000Z | #! @@Author : WAHYU ARIF PURNOMO
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| 28.574257 | 125 | 0.615731 |
880bba102de2d9226a037a90ff3d98814009f0c2 | 2,549 | py | Python | pyspectator/collection.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 39 | 2017-02-27T15:21:21.000Z | 2021-12-31T03:23:43.000Z | pyspectator/collection.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 18 | 2017-07-09T00:16:28.000Z | 2021-12-03T21:01:38.000Z | pyspectator/collection.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 25 | 2017-03-05T07:59:34.000Z | 2021-12-15T15:22:58.000Z | from collections import MutableMapping, Container
from datetime import datetime, timedelta
from pyvalid import accepts
def __get_slice(self, start, end):
keys = sorted(self.keys())
return keys[start:end]
def __getitem__(self, item):
return self.__storage.__getitem__(item)
__all__ = ['LimitedTimeTable']
| 27.117021 | 60 | 0.617497 |
880be95fb023fa99a8e4f0737f4b060a1751c3cd | 576 | py | Python | keyboardrow.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | keyboardrow.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | keyboardrow.py | AndySamoil/Elite_Code | 7dc3b7b1b8688c932474f8a10fd2637fd2918bdd | [
"MIT"
] | null | null | null | def findWords(self, words: List[str]) -> List[str]:
''' sets and iterate through sets
'''
every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")]
ans = []
for word in words:
l = len(word)
for sett in every:
count = 0
for let in word:
if let.lower() in sett:
count += 1
if count == l:
ans.append(word)
return ans | 27.428571 | 69 | 0.362847 |
880bf5d9dd1fda0ba4fc9eafcb000337f1273e4d | 1,673 | py | Python | DFS_Backtracking/31. Next Permutation.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | 2 | 2021-04-02T11:57:46.000Z | 2021-04-02T11:57:47.000Z | DFS_Backtracking/31. Next Permutation.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | null | null | null | DFS_Backtracking/31. Next Permutation.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
sol = Solution()
# nums = [2, 1, 3]
nums = [1, 5, 1]
sol.nextPermutation(nums)
print(sol.res)
| 26.140625 | 89 | 0.499701 |
880c149eaa01b78f766f6b8032706b3698b74fbc | 1,392 | py | Python | plugin/DataExport/extend.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 125 | 2015-01-22T05:43:23.000Z | 2022-03-22T17:15:59.000Z | plugin/DataExport/extend.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 59 | 2015-02-10T09:13:06.000Z | 2021-11-11T02:32:38.000Z | plugin/DataExport/extend.py | konradotto/TS | bf088bd8432b1e3f4b8c8c083650a30d9ef2ae2e | [
"Apache-2.0"
] | 98 | 2015-01-17T01:25:10.000Z | 2022-03-18T17:29:42.000Z | #!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
| 27.84 | 131 | 0.569684 |
880c1d871834c4fa9a80907f77053c53af975688 | 5,205 | py | Python | boids/biods_object.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | 1 | 2022-02-21T15:54:01.000Z | 2022-02-21T15:54:01.000Z | boids/biods_object.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | null | null | null | boids/biods_object.py | PaulAustin/sb7-pgz | fca3e50132b9d1894fb348b2082e83ce7b937b19 | [
"MIT"
] | 2 | 2020-11-21T16:34:22.000Z | 2021-01-27T10:30:34.000Z | # Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# The original Javascript version wasdonw by Ben Eater
# at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
#
# Complex numbers are are used as vectors to integrate x and y positions and velocities
# MIT licesense (details in parent directory)
import random
import time
HEIGHT = 500 # window height
WIDTH = 900 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 75
VISUAL_RANGE = 70 # radius of influence for most algoriths
SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 20 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.050 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge
HISTORY_LENGTH = 30
BACK_COLOR = (0, 0, 90)
BOID_COLOR = (255, 128, 128)
BOID_SIZE = 8
TRAIL_COLOR = (255, 255, 64)
g_boids = []
init()
| 31.932515 | 88 | 0.602882 |
880d1df9e7fa8cda82be2e587cdbae5ea94afb44 | 4,960 | py | Python | upoutdf/types/recurring/yearly.py | UpOut/UpOutDF | 5d2f87884565d98b77e25c6a26af7dbea266be76 | [
"MIT"
] | null | null | null | upoutdf/types/recurring/yearly.py | UpOut/UpOutDF | 5d2f87884565d98b77e25c6a26af7dbea266be76 | [
"MIT"
] | null | null | null | upoutdf/types/recurring/yearly.py | UpOut/UpOutDF | 5d2f87884565d98b77e25c6a26af7dbea266be76 | [
"MIT"
] | null | null | null | # coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from .base import BaseRecurring
from upoutdf.occurences import OccurenceBlock, OccurenceGroup
from upoutdf.constants import YEARLY_TYPE
| 30.060606 | 87 | 0.594153 |
880d94d22915e741e24ad40b49de37d7ad8757e9 | 625 | py | Python | project/urls.py | dbinetti/captable | 29769b2b99a3185fda241b3087ccbe621f8c97a2 | [
"BSD-2-Clause"
] | 18 | 2016-05-12T18:49:09.000Z | 2021-10-05T13:29:09.000Z | project/urls.py | dbinetti/captable | 29769b2b99a3185fda241b3087ccbe621f8c97a2 | [
"BSD-2-Clause"
] | null | null | null | project/urls.py | dbinetti/captable | 29769b2b99a3185fda241b3087ccbe621f8c97a2 | [
"BSD-2-Clause"
] | 5 | 2015-08-28T02:50:30.000Z | 2019-11-14T04:03:05.000Z | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.captable.urls',)),
)
urlpatterns += staticfiles_urlpatterns()
| 31.25 | 85 | 0.7248 |
880ea7ec7f81ab78d2446766017eac398be3d80f | 9,388 | py | Python | common/evaluators/bert_emotion_evaluator.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | 5 | 2020-07-12T08:27:47.000Z | 2021-10-16T11:40:48.000Z | common/evaluators/bert_emotion_evaluator.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | null | null | null | common/evaluators/bert_emotion_evaluator.py | marjanhs/procon20 | c49ad38a77e58fd84ff0409cc9f5081c6de0bf0b | [
"MIT"
] | 1 | 2021-04-12T09:54:37.000Z | 2021-04-12T09:54:37.000Z | import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
| 51.582418 | 166 | 0.678526 |
881189eb3c68f5eb6d4b3bde9fa97065430d1651 | 781 | py | Python | model/mlp1.py | andrearosasco/DistilledReplay | 2a4efa88d22b9afc7016f07549114688f346dbe8 | [
"MIT"
] | 7 | 2021-06-27T16:09:13.000Z | 2022-03-17T20:02:55.000Z | model/mlp1.py | andrew-r96/DistilledReplay | 2a4efa88d22b9afc7016f07549114688f346dbe8 | [
"MIT"
] | null | null | null | model/mlp1.py | andrew-r96/DistilledReplay | 2a4efa88d22b9afc7016f07549114688f346dbe8 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
| 28.925926 | 50 | 0.516005 |
8811e504a270f2f7246e1ece4241279f011e0643 | 745 | py | Python | netbox/ipam/managers.py | aslafy-z/netbox | a5512dd4c46c005df8752fc330c1382ac22b31ea | [
"Apache-2.0"
] | 1 | 2022-01-25T09:02:56.000Z | 2022-01-25T09:02:56.000Z | netbox/ipam/managers.py | aslafy-z/netbox | a5512dd4c46c005df8752fc330c1382ac22b31ea | [
"Apache-2.0"
] | 4 | 2021-06-08T22:29:06.000Z | 2022-03-12T00:48:51.000Z | netbox/ipam/managers.py | aslafy-z/netbox | a5512dd4c46c005df8752fc330c1382ac22b31ea | [
"Apache-2.0"
] | null | null | null | from django.db import models
from ipam.lookups import Host, Inet
| 41.388889 | 118 | 0.689933 |
881335d234ca66e078e1413e1e2269e82e80ed06 | 5,709 | py | Python | train.py | VArdulov/learning-kis | 2637f08d5e8027a22feff17064be45ea51f738e5 | [
"MIT"
] | null | null | null | train.py | VArdulov/learning-kis | 2637f08d5e8027a22feff17064be45ea51f738e5 | [
"MIT"
] | null | null | null | train.py | VArdulov/learning-kis | 2637f08d5e8027a22feff17064be45ea51f738e5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) Naoya Takeishi, 2017.
takeishi@ailab.t.u-tokyo.ac.jp
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| 38.574324 | 145 | 0.669294 |
8813da3968ae4a879a3ffd1fca43f066e89df5ea | 671 | py | Python | Algorithms/Easy/1200. Minimum Absolute Difference/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/1200. Minimum Absolute Difference/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/1200. Minimum Absolute Difference/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
if __name__ == "__main__":
s = Solution()
result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
print(result)
| 26.84 | 70 | 0.490313 |
8814231575bbe6e4934834a1434e867f02c0e57d | 2,125 | py | Python | resources/physequations.py | VijayStroup/Physics_Problem_Solver_Basic | fc6944475ed8bcfe91bbd207734c3f9aee31e0fe | [
"MIT"
] | null | null | null | resources/physequations.py | VijayStroup/Physics_Problem_Solver_Basic | fc6944475ed8bcfe91bbd207734c3f9aee31e0fe | [
"MIT"
] | null | null | null | resources/physequations.py | VijayStroup/Physics_Problem_Solver_Basic | fc6944475ed8bcfe91bbd207734c3f9aee31e0fe | [
"MIT"
] | null | null | null | import math
def close(expected, actual, maxerror):
'''checks to see if the actual number is within expected +- maxerror.'''
low = expected - maxerror
high = expected + maxerror
if actual >= low and actual <= high:
return True
else:
return False
def grav_potential_energy(mass, height, gravity=9.81):
'''calculate potential energy given mass and height. Mass in
kilograms and height in meters.'''
gp_energy = mass * height * gravity
return gp_energy
def kin_energy(mass, velocity):
'''calculate kinetic energy given mass and velocity. Mass in
kilograms and velocity in meters per second.'''
k_energy = .5 * mass * velocity ** 2
return k_energy
def work_energy(force, displacement, angle):
'''calculate work energy given force, displancement,
and angle. Force in newtons, displacement in meters, angle in degrees.'''
anglerad = math.radians(angle)
cos = math.cos(anglerad)
w_energy = force * displacement * cos
return w_energy
'''=============================================================================
Tests
============================================================================='''
if __name__ == '__main__':
print(close(10, 11.1, 1))
print(close(100, 100.001, .01))
print(close(-10, -11.01, 1))
print(close(84756, 84300.2, 500.5))
#gravitional potential energy tests
ans = grav_potential_energy(3.00, 7.00)
check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001)
ans = grav_potential_energy(2.00, 5.00)
check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01)
#kinetic energy tests
ans = kin_energy(2, 6.55)
check('kin_energy', '2, 6.55', 42.90, ans, 0.01)
ans = kin_energy(5.65, 10)
check('kin_energy', '5.65, 10', 282.5, ans, 0.1)
#work energy tests
ans = work_energy(500, 10, 0)
check('work_energy', '500, 10, 0', 5000.0, ans, 0.1)
ans = work_energy(150, 50, 45)
check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
| 32.19697 | 88 | 0.631059 |
71467296157c3ad9afffaf380b92ae10d722c419 | 10,659 | py | Python | mvpa2/tests/test_erdataset.py | andycon/PyMVPA | 67f7ee68012e3a1128168c583d6c83303b7a2c27 | [
"MIT"
] | null | null | null | mvpa2/tests/test_erdataset.py | andycon/PyMVPA | 67f7ee68012e3a1128168c583d6c83303b7a2c27 | [
"MIT"
] | null | null | null | mvpa2/tests/test_erdataset.py | andycon/PyMVPA | 67f7ee68012e3a1128168c583d6c83303b7a2c27 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for the event-related dataset'''
from mvpa2.testing import *
from mvpa2.datasets import dataset_wizard
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.mappers.fx import FxMapper
from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \
extract_boxcar_event_samples
from mvpa2.datasets.sources import load_example_fmri_dataset
from mvpa2.mappers.zscore import zscore
| 48.45 | 96 | 0.626888 |
7148d1a57a15a29836e2ab0aae7b7bc5dc398f57 | 1,174 | py | Python | userbot/plugins/delfp.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | userbot/plugins/delfp.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | 1 | 2022-01-09T11:35:06.000Z | 2022-01-09T11:35:06.000Z | userbot/plugins/delfp.py | aksr-aashish/FIREXUSERBOT | dff0b7bf028cb27779626ce523402346cc990402 | [
"MIT"
] | null | null | null | from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest
from telethon.tl.types import InputPhoto
from userbot.cmdhelp import CmdHelp
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
| 34.529412 | 86 | 0.67632 |
7149245bb6b3dda015cca0a397d867fb3542c00d | 1,308 | py | Python | amlb/benchmarks/file.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 282 | 2018-09-19T09:45:46.000Z | 2022-03-30T04:05:51.000Z | amlb/benchmarks/file.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 267 | 2018-11-02T11:43:11.000Z | 2022-03-31T08:58:16.000Z | amlb/benchmarks/file.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 104 | 2018-10-17T19:32:36.000Z | 2022-03-19T22:47:59.000Z | import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
| 39.636364 | 119 | 0.727829 |
714957e1bb0b1384b108ed8e7921b1c771c5effe | 4,815 | py | Python | pybuspro/devices/control.py | eyesoft/pybuspro | 9a178117be2db40ef1399cc60afdc18e251682bc | [
"MIT"
] | 2 | 2019-03-15T03:47:10.000Z | 2019-10-30T15:34:09.000Z | pybuspro/devices/control.py | eyesoft/pybuspro | 9a178117be2db40ef1399cc60afdc18e251682bc | [
"MIT"
] | null | null | null | pybuspro/devices/control.py | eyesoft/pybuspro | 9a178117be2db40ef1399cc60afdc18e251682bc | [
"MIT"
] | 4 | 2019-01-12T17:50:24.000Z | 2020-01-12T16:56:24.000Z | from ..core.telegram import Telegram
from ..helpers.enums import OperateCode
| 28.660714 | 106 | 0.655867 |
7149b8c5cf18fd7bdd1bfdc804b0918d755edaae | 5,961 | py | Python | appengine/chrome_infra_console_loadtest/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/chrome_infra_console_loadtest/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | appengine/chrome_infra_console_loadtest/main.py | eunchong/infra | ce3728559112bfb3e8b32137eada517aec6d22f9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import endpoints
import random
import webapp2
from apiclient import discovery
from google.appengine.ext import ndb
from oauth2client.client import GoogleCredentials
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from components import auth
CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY"
API_NAME = 'consoleapp'
API_VERSION = 'v1'
DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest'
def field_generator(dataparams, index, fields):
if index == len(dataparams):
return [fields]
else:
key = dataparams[index].field_key
return sum((field_generator(
dataparams, index+1, fields+[{'key': key, 'value': value}])
for value in dataparams[index].values), [])
class CronHandler(webapp2.RequestHandler):
backend_handlers = [
('/cron', CronHandler)
]
WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True)
APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
| 33.301676 | 78 | 0.690824 |
7149cd13d14ac2cce8176e2e198709907cc8c456 | 9,523 | py | Python | src/mitre/securingai/restapi/task_plugin/controller.py | usnistgov/dioptra | 08a08e96c27787915bafc75a483431333e2c70ca | [
"CC-BY-4.0"
] | 14 | 2021-06-17T15:16:12.000Z | 2021-11-08T10:25:37.000Z | src/mitre/securingai/restapi/task_plugin/controller.py | usnistgov/dioptra | 08a08e96c27787915bafc75a483431333e2c70ca | [
"CC-BY-4.0"
] | 7 | 2021-09-20T20:20:26.000Z | 2022-03-30T13:17:43.000Z | src/mitre/securingai/restapi/task_plugin/controller.py | usnistgov/dioptra | 08a08e96c27787915bafc75a483431333e2c70ca | [
"CC-BY-4.0"
] | 4 | 2021-06-29T16:52:42.000Z | 2022-01-21T16:56:45.000Z | # This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
"""The module defining the task plugin endpoints."""
import uuid
from typing import List, Optional
import structlog
from flask import current_app, jsonify
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from injector import inject
from structlog.stdlib import BoundLogger
from mitre.securingai.restapi.utils import as_api_parser
from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError
from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData
from .schema import TaskPluginSchema, TaskPluginUploadSchema
from .service import TaskPluginService
LOGGER: BoundLogger = structlog.stdlib.get_logger()
api: Namespace = Namespace(
"TaskPlugin",
description="Task plugin registry operations",
)
| 37.789683 | 88 | 0.676888 |
714a5d7f1ebf03213e86c878b9d094ccb13ebf53 | 16,181 | py | Python | dulwich/tests/test_lru_cache.py | mjmaenpaa/dulwich | d13a0375f4cc3099ff1c6edacda97f317c28f67a | [
"Apache-2.0"
] | null | null | null | dulwich/tests/test_lru_cache.py | mjmaenpaa/dulwich | d13a0375f4cc3099ff1c6edacda97f317c28f67a | [
"Apache-2.0"
] | null | null | null | dulwich/tests/test_lru_cache.py | mjmaenpaa/dulwich | d13a0375f4cc3099ff1c6edacda97f317c28f67a | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the lru_cache module."""
from dulwich import (
lru_cache,
)
from dulwich.tests import (
TestCase,
)
| 35.798673 | 79 | 0.603115 |
714b8d767c11fadd1e5da33bbf5b7d19a7d70405 | 382 | py | Python | py/2016/5B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2016/5B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | py/2016/5B.py | pedrotari7/advent_of_code | 98d5bc8d903435624a019a5702f5421d7b4ef8c8 | [
"MIT"
] | null | null | null | import md5
(i,count) = (0,0)
password = ['']*8
while 1:
key = 'reyedfim' + str(i)
md = md5.new(key).hexdigest()
if md[:5] == '00000':
index = int(md[5],16)
if index < len(password) and password[index]=='':
password[index] = md[6]
count += 1
if count == 8:
break
i+=1
print ''.join(password) | 17.363636 | 58 | 0.465969 |
714cfc19c240490817e3657df9cb9287844afbb6 | 16,391 | py | Python | release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py | lsica-scopely/mgear4 | 28ed5d66370a9516da05d93d447bfc15f4c0c9f4 | [
"MIT"
] | null | null | null | release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py | lsica-scopely/mgear4 | 28ed5d66370a9516da05d93d447bfc15f4c0c9f4 | [
"MIT"
] | null | null | null | release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py | lsica-scopely/mgear4 | 28ed5d66370a9516da05d93d447bfc15f4c0c9f4 | [
"MIT"
] | null | null | null | import pymel.core as pm
import ast
from pymel.core import datatypes
from mgear.shifter import component
from mgear.core import node, applyop, vector
from mgear.core import attribute, transform, primitive
| 33.865702 | 79 | 0.505887 |
714e6f1bdf4058bf187b53f8c773baa127319b6d | 546 | py | Python | streams/blog/migrations/0012_auto_20200928_1212.py | Engerrs/ckan.org | a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158 | [
"BSD-3-Clause"
] | 1 | 2022-03-18T03:20:00.000Z | 2022-03-18T03:20:00.000Z | streams/blog/migrations/0012_auto_20200928_1212.py | Engerrs/ckan.org | a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158 | [
"BSD-3-Clause"
] | 26 | 2021-07-07T08:42:42.000Z | 2022-03-29T14:34:59.000Z | streams/blog/migrations/0012_auto_20200928_1212.py | Engerrs/ckan.org | a5a9b63b0ca16cb5aa4f709f7a264b8f6c265158 | [
"BSD-3-Clause"
] | 3 | 2021-07-07T22:11:03.000Z | 2021-09-15T18:19:10.000Z | # Generated by Django 3.1.1 on 2020-09-28 12:12
import datetime
from django.db import migrations, models
| 22.75 | 82 | 0.600733 |
714e74c6035390e31e82cb8cc61f9783ca761b5f | 58,939 | py | Python | opac/webapp/main/views.py | rafaelpezzuto/opac | 9b54202350e262a27cb9cb756a892185b288df24 | [
"BSD-2-Clause"
] | null | null | null | opac/webapp/main/views.py | rafaelpezzuto/opac | 9b54202350e262a27cb9cb756a892185b288df24 | [
"BSD-2-Clause"
] | null | null | null | opac/webapp/main/views.py | rafaelpezzuto/opac | 9b54202350e262a27cb9cb756a892185b288df24 | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O peridico est indisponvel por motivo de: ")
ISSUE_UNPUBLISH = _("O nmero est indisponvel por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo est indisponvel por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
def get_lang_from_session():
"""
Tenta retornar o idioma da seo, caso no consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
# ##################################Collection###################################
# ###################################Journal#####################################
# ###################################Issue#######################################
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
# ##################################Article######################################
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configurao OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A normalizao busca obter uma URL absoluta em funo de uma relativa, ou
uma absoluta em funo de uma absoluta, mas com as partes *scheme* e
*authority* trocadas pelas definidas nas diretivas citadas anteriormente.
Este cdigo deve ser removido assim que o valor de Article.xml estiver
consistente, i.e., todos os registros possuirem apenas URLs absolutas.
"""
if url.startswith("http"):
parsed_url = urlparse(url)
return current_app.config["SSM_BASE_URI"] + parsed_url.path
else:
return current_app.config["SSM_BASE_URI"] + url
def get_pdf_content(url):
logger.debug("Get PDF: %s", url)
if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]:
url = use_ssm_url(url)
try:
response = fetch_data(url)
except NonRetryableError:
abort(404, _('PDF no encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
mimetype, __ = mimetypes.guess_type(url)
return Response(response, mimetype=mimetype)
# ###############################E-mail share##################################
# ###############################Others########################################
| 34.957888 | 128 | 0.625358 |
714ebaf58f896dbaa65742bb16b60c72d8438768 | 252 | py | Python | create_read_write_1/Writing/to_csv.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | create_read_write_1/Writing/to_csv.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | create_read_write_1/Writing/to_csv.py | CodeXfull/Pandas | 08b0adc28eedba47f6eb8303ba6a36a37ababb92 | [
"MIT"
] | null | null | null | """
Converter um DataFrame para CSV
"""
import pandas as pd
dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamo"],
"Nomes": ["verton", "Mrcia"]},
index=["Linha 1", "Linha 2"])
dataset.to_csv("dataset.csv") | 25.2 | 55 | 0.543651 |
714ec7d33bab5008ec611874fc87d94cc9deca3c | 9,769 | py | Python | venv/Lib/site-packages/pygsheets/client.py | 13rilliant/Python-CMS | 56c4f3f1cbdd81020aa690ab92d0e26d042458c1 | [
"MIT"
] | 1 | 2019-04-22T14:22:38.000Z | 2019-04-22T14:22:38.000Z | venv/Lib/site-packages/pygsheets/client.py | 13rilliant/Python-Updates-Text-Files-from-Sheets | 56c4f3f1cbdd81020aa690ab92d0e26d042458c1 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pygsheets/client.py | 13rilliant/Python-Updates-Text-Files-from-Sheets | 56c4f3f1cbdd81020aa690ab92d0e26d042458c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-.
import re
import warnings
import os
import logging
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from pygsheets.spreadsheet import Spreadsheet
from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound
from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption
from google_auth_httplib2 import AuthorizedHttp
GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)")
_email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?")
# _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_deprecated_keyword_mapping = {
'parent_id': 'folder',
}
def spreadsheet_titles(self, query=None):
"""Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed."""
return [x['name'] for x in self.drive.spreadsheet_metadata(query)]
def create(self, title, template=None, folder=None, **kwargs):
"""Create a new spreadsheet.
The title will always be set to the given value (even overwriting the templates title). The template
can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_
or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored.
:param title: Title of the new spreadsheet.
:param template: A template to create the new spreadsheet from.
:param folder: The Id of the folder this sheet will be stored in.
:param kwargs: Standard parameters (see reference for details).
:return: :class:`~pygsheets.Spreadsheet`
"""
result = self.sheet.create(title, template=template, **kwargs)
if folder:
self.drive.move_file(result['spreadsheetId'],
old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0],
new_folder=folder)
return self.spreadsheet_cls(self, jsonsheet=result)
def open(self, title):
"""Open a spreadsheet by title.
In a case where there are several sheets with the same title, the first one found is returned.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open('TestSheet')
:param title: A title of a spreadsheet.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found.
"""
try:
spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0]
return self.open_by_key(spreadsheet['id'])
except (KeyError, IndexError):
raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title)
def open_by_key(self, key):
"""Open a spreadsheet by key.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE')
:param key: The key of a spreadsheet. (can be found in the sheet URL)
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found.
"""
response = self.sheet.get(key,
fields='properties,sheets/properties,spreadsheetId,namedRanges',
includeGridData=False)
return self.spreadsheet_cls(self, response)
def open_by_url(self, url):
"""Open a spreadsheet by URL.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
:param url: URL of a spreadsheet as it appears in a browser.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL.
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def open_all(self, query=''):
"""Opens all available spreadsheets.
Result can be filtered when specifying the query parameter. On the details on how to form the query:
`Reference <https://developers.google.com/drive/v3/web/search-parameters>`_
:param query: (Optional) Can be used to filter the returned metadata.
:returns: A list of :class:`~pygsheets.Spreadsheet`.
"""
return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)]
def open_as_json(self, key):
"""Return a json representation of the spreadsheet.
See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details.
"""
return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,'
'spreadsheetId,namedRanges',
includeGridData=False)
def get_range(self, spreadsheet_id,
value_range,
major_dimension='ROWS',
value_render_option=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER):
"""Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__
:param spreadsheet_id: The ID of the spreadsheet to retrieve data from.
:param value_range: The A1 notation of the values to retrieve.
:param major_dimension: The major dimension that results should use.
For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then
requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]],
whereas requesting range=A1:B2,majorDimension=COLUMNS will return
[[1,3],[2,4]].
:param value_render_option: How values should be represented in the output. The default
render option is `ValueRenderOption.FORMATTED_VALUE`.
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:return: An array of arrays with the values fetched. Returns an empty array if no
values were fetched. Values are dynamically typed as int, float or string.
"""
result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option,
date_time_render_option)
try:
return result['values']
except KeyError:
return [['']] | 46.519048 | 142 | 0.614085 |
714ecc8f34f21f3f5078c51278dfea154ffd4835 | 1,511 | py | Python | model/group_contact.py | NatalyAristova/Training_python | e95a2b9e25238285d705a880fd94d73f173c3a31 | [
"Apache-2.0"
] | null | null | null | model/group_contact.py | NatalyAristova/Training_python | e95a2b9e25238285d705a880fd94d73f173c3a31 | [
"Apache-2.0"
] | null | null | null | model/group_contact.py | NatalyAristova/Training_python | e95a2b9e25238285d705a880fd94d73f173c3a31 | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
| 36.853659 | 148 | 0.650563 |
714f78bb4bb01676183ee7d2b3639573c3d0ac56 | 712 | py | Python | test/manual/documents/test_iter_documents.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 103 | 2015-01-12T00:40:51.000Z | 2022-03-29T07:02:06.000Z | test/manual/documents/test_iter_documents.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 26 | 2015-01-10T04:08:41.000Z | 2021-02-05T16:31:37.000Z | test/manual/documents/test_iter_documents.py | membranepotential/mendeley-python-sdk | 0336f0164f4d409309e813cbd0140011b5b2ff8f | [
"Apache-2.0"
] | 43 | 2015-03-04T18:11:06.000Z | 2022-03-13T02:33:34.000Z | from itertools import islice
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document
| 32.363636 | 96 | 0.706461 |
714fe59976a41e4840adb621109e180ee047b25c | 5,567 | py | Python | demo.py | cbsudux/minimal-hand | 893c252e7e818a9a96b279023ea8a78a88fb0a4d | [
"MIT"
] | null | null | null | demo.py | cbsudux/minimal-hand | 893c252e7e818a9a96b279023ea8a78a88fb0a4d | [
"MIT"
] | null | null | null | demo.py | cbsudux/minimal-hand | 893c252e7e818a9a96b279023ea8a78a88fb0a4d | [
"MIT"
] | null | null | null | import argparse
import cv2
import keyboard
import numpy as np
import open3d as o3d
import os
import pygame
from transforms3d.axangles import axangle2mat
import config
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from utils import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vid_file', type=str,
help='input video path or youtube link')
args = parser.parse_args()
run(args)
| 33.136905 | 100 | 0.629064 |
715027948c136a1c6e6c296495419c7112dea3be | 1,929 | py | Python | test_project/settings.py | incuna/incuna-groups | 148c181faf66fe73792cb2c5bbf5500ba61aa22d | [
"BSD-2-Clause"
] | 1 | 2017-09-29T23:58:02.000Z | 2017-09-29T23:58:02.000Z | test_project/settings.py | incuna/incuna-groups | 148c181faf66fe73792cb2c5bbf5500ba61aa22d | [
"BSD-2-Clause"
] | 51 | 2015-03-30T08:58:15.000Z | 2022-01-13T00:40:17.000Z | test_project/settings.py | incuna/incuna-groups | 148c181faf66fe73792cb2c5bbf5500ba61aa22d | [
"BSD-2-Clause"
] | null | null | null | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = []
ROOT_URLCONF = 'groups.tests.urls'
STATIC_URL = '/static/'
SECRET_KEY = 'krc34ji^-fd-=+r6e%p!0u0k9h$9!q*_#l=6)74h#o(jrxsx4p'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost/groups')
}
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
INSTALLED_APPS = (
'groups',
'crispy_forms',
'pagination',
'polymorphic',
# Put contenttypes before auth to work around test issue.
# See: https://code.djangoproject.com/ticket/10827#comment:12
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'groups', 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
TEST_RUNNER = 'test_project.test_runner.Runner'
| 28.367647 | 76 | 0.659927 |
7150fca7ddfd290e2618756c7d1c3d98b7e62c0b | 11,824 | py | Python | tests/test_akismet.py | cclauss/akismet | 7b65bc163d6947a3013d01bf9accf1bc6c0781ca | [
"BSD-3-Clause"
] | 9 | 2015-07-21T01:43:05.000Z | 2021-04-01T12:53:32.000Z | tests/test_akismet.py | cclauss/akismet | 7b65bc163d6947a3013d01bf9accf1bc6c0781ca | [
"BSD-3-Clause"
] | 3 | 2015-09-28T09:01:17.000Z | 2021-11-18T08:19:36.000Z | tests/test_akismet.py | cclauss/akismet | 7b65bc163d6947a3013d01bf9accf1bc6c0781ca | [
"BSD-3-Clause"
] | 7 | 2015-09-27T03:14:44.000Z | 2021-12-05T22:48:44.000Z | import datetime
import os
import sys
import unittest
from unittest import mock
import akismet
| 31.87062 | 88 | 0.58931 |
7151993c0f8145d0e1fdf8168c7b895118af0892 | 2,581 | py | Python | experimenting/dataset/datamodule.py | gaurvigoyal/lifting_events_to_3d_hpe | 66d27eb7534f81a95d9f68e17cc534ef2a2c9b1c | [
"Apache-2.0"
] | 19 | 2021-04-16T11:43:34.000Z | 2022-01-07T10:21:42.000Z | experimenting/dataset/datamodule.py | gaurvigoyal/lifting_events_to_3d_hpe | 66d27eb7534f81a95d9f68e17cc534ef2a2c9b1c | [
"Apache-2.0"
] | 4 | 2021-04-16T14:07:38.000Z | 2022-02-12T16:35:22.000Z | experimenting/dataset/datamodule.py | gianscarpe/event-camera | 8bb60a281adb9e2c961b5e12c24c9bbbba1876d5 | [
"Apache-2.0"
] | 5 | 2021-04-23T16:30:37.000Z | 2022-02-12T01:42:14.000Z | import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from .core import BaseCore
from .factory import BaseDataFactory
| 28.362637 | 84 | 0.631538 |
7152cc15e7baaacfb5a36373bdeff28f520d9e9f | 2,906 | py | Python | sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py | giulianoiorio/PeTar | f6a849552b3d8e47c5e08fe90fed05bf38bc407d | [
"MIT"
] | null | null | null | sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py | giulianoiorio/PeTar | f6a849552b3d8e47c5e08fe90fed05bf38bc407d | [
"MIT"
] | null | null | null | sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py | giulianoiorio/PeTar | f6a849552b3d8e47c5e08fe90fed05bf38bc407d | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Load file
dt=pd.read_csv("sevn_output/output_0.csv")
#Give a look to the columns
print(dt.columns)
#Consider only the final states
dt=dt.drop_duplicates(["ID","name"], keep='last')
#Load evolved file
dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+')
#Give a look to the columns
print(dte.columns)
dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"})
#After change
print(dte.columns)
#Join the two dataset
dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") )
# - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables
# - how: type of join to use, see documentation here and the next slide
# - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes.
#Give a look to the columns
print(dt.columns)
#Create filter indexes
idx0 = (dt.RemnantType_0==6)
idx1 = (dt.RemnantType_1==6)
idxb0 = idx0 & dt.Semimajor.notnull()
idxb1 = idx1 & dt.Semimajor.notnull()
idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000)
idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000)
#Filter and join masses
AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1])
BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1])
MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1])
#Filter and join initial masses
AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1])
BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1])
MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1])
#Filter and join initial semimajor axis
AllBHa = pd.concat([dt[idx0].a,dt[idx1].a])
BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a])
MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a])
#Plot
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound")
plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging")
plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray")
plt.xscale("log")
plt.yscale("log")
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.legend(fontsize=16)
plt.subplot(1,2,2)
plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound")
plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18)
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.tight_layout()
plt.savefig("analysis3.png")
plt.show()
| 34.595238 | 144 | 0.719202 |
7152e1ff12041d507f6d8d481cc402ae12c07a3f | 91 | py | Python | apps/tg_bot/apps.py | VladimirLazor/Lohika | a36407feeb2e3ade4f8c689030f343d88ff47a92 | [
"Apache-2.0"
] | null | null | null | apps/tg_bot/apps.py | VladimirLazor/Lohika | a36407feeb2e3ade4f8c689030f343d88ff47a92 | [
"Apache-2.0"
] | 9 | 2021-03-19T15:59:10.000Z | 2022-03-12T00:57:56.000Z | apps/tg_bot/apps.py | VladimirLazor/Lohika | a36407feeb2e3ade4f8c689030f343d88ff47a92 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 15.166667 | 33 | 0.747253 |
71530e1943a52265477429affe05d43b9f82d449 | 2,152 | py | Python | office365/sharepoint/portal/group_site_manager.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/sharepoint/portal/group_site_manager.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | office365/sharepoint/portal/group_site_manager.py | rikeshtailor/Office365-REST-Python-Client | ca7bfa1b22212137bb4e984c0457632163e89a43 | [
"MIT"
] | null | null | null | from office365.runtime.client_object import ClientObject
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.group_creation_params import GroupCreationInformation
from office365.sharepoint.portal.group_site_info import GroupSiteInfo
| 37.754386 | 109 | 0.701208 |
715433d014e2773f3519d53929b4573136138236 | 186 | py | Python | tests/errors/e_tuple_args_T692.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 2 | 2020-01-29T08:20:22.000Z | 2020-01-29T08:20:25.000Z | tests/errors/e_tuple_args_T692.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 1 | 2019-09-21T19:58:10.000Z | 2019-09-21T19:58:10.000Z | tests/errors/e_tuple_args_T692.py | smok-serwis/cython | e551a3a348888bd89d4aad809916709a634af1fb | [
"Apache-2.0"
] | 2 | 2017-06-18T04:09:18.000Z | 2018-11-30T20:03:58.000Z | # ticket: 692
# mode: error
_ERRORS = u"""
4:9: Missing argument name
5:11: undeclared name not builtin: a
5:15: undeclared name not builtin: b
"""
| 14.307692 | 36 | 0.645161 |
71548039cb810f86d8a1fe4c36b02cd515b16949 | 558 | py | Python | ble.py | Ladvien/esp32_upython_env | 8b0feab940efd3feff16220473e1b5b27d679a56 | [
"MIT"
] | null | null | null | ble.py | Ladvien/esp32_upython_env | 8b0feab940efd3feff16220473e1b5b27d679a56 | [
"MIT"
] | null | null | null | ble.py | Ladvien/esp32_upython_env | 8b0feab940efd3feff16220473e1b5b27d679a56 | [
"MIT"
] | null | null | null | import bluetooth
import time
bt = bluetooth.BLE() # singleton
bt.active(True) # activate BT stack
UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')
UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,)
UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),)
SERVICES = (UART_SERVICE,)
( (tx, rx,), ) = bt.gatts_register_services(SERVICES)
bt.gap_advertise(100) | 50.727273 | 112 | 0.716846 |
715562602b941a7d39f1c3b9c3f9ed3ae5bab180 | 952 | py | Python | examples/custom-generator/customer.py | luxbe/sledo | 26aa2b59b11ea115afc25bb407602578cb342170 | [
"MIT"
] | 4 | 2021-12-13T17:52:52.000Z | 2021-12-28T09:40:52.000Z | examples/custom-generator/customer.py | luxbe/sledo | 26aa2b59b11ea115afc25bb407602578cb342170 | [
"MIT"
] | null | null | null | examples/custom-generator/customer.py | luxbe/sledo | 26aa2b59b11ea115afc25bb407602578cb342170 | [
"MIT"
] | null | null | null | from random import randint
from sledo.generate.field_generators.base import FieldGenerator
values = ("Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czech Republic",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"United States",
"Japan",
"United Kingdom",
"Bangladesh",
"Argentina",
"China")
count = len(values) - 1
| 21.636364 | 63 | 0.456933 |
71567463ea68f026c0c3520620d04799ac10631b | 731 | py | Python | status-uncertain/baseline_model.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 1 | 2020-04-03T12:43:27.000Z | 2020-04-03T12:43:27.000Z | status-uncertain/baseline_model.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 21 | 2020-02-14T04:29:03.000Z | 2020-07-14T02:19:37.000Z | status-uncertain/baseline_model.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 1 | 2019-10-25T01:06:58.000Z | 2019-10-25T01:06:58.000Z | #!/usr/bin/env python3
from sklearn.metrics import r2_score
import numpy as np
| 20.305556 | 58 | 0.621067 |
7157c50528da6262c46158a9ce6e62a7c31b48be | 3,229 | py | Python | aligner/grow_diag_final.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | aligner/grow_diag_final.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | aligner/grow_diag_final.py | ecalder6/MT-HW2 | 1356aeb374a6e4d0b0ae819684bf314039948c56 | [
"MIT"
] | null | null | null | import optparse
import sys
if __name__ == "__main__":
main()
| 44.232877 | 142 | 0.577888 |
715823dd8a36dcb9c1e16c0545d16a02d319badc | 2,567 | py | Python | tests/test_tbears_db.py | Transcranial-Solutions/t-bears | 4712b8bb425814c444ee75f3220a31df934982aa | [
"Apache-2.0"
] | 35 | 2018-08-24T03:39:35.000Z | 2021-08-21T23:35:57.000Z | tests/test_tbears_db.py | Transcranial-Solutions/t-bears | 4712b8bb425814c444ee75f3220a31df934982aa | [
"Apache-2.0"
] | 40 | 2018-08-24T05:35:54.000Z | 2021-12-15T08:23:38.000Z | tests/test_tbears_db.py | Transcranial-Solutions/t-bears | 4712b8bb425814c444ee75f3220a31df934982aa | [
"Apache-2.0"
] | 22 | 2018-08-28T15:11:46.000Z | 2021-12-01T23:34:45.000Z | # -*- coding: utf-8 -*-
# Copyright 2017-2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from tbears.block_manager.tbears_db import TbearsDB
DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__)))
DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db')
| 33.776316 | 85 | 0.679782 |
715a02ff047054f60c24cd7d80d0ef426229bc1b | 1,658 | py | Python | src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 1,560 | 2015-01-01T08:53:05.000Z | 2022-03-29T20:22:43.000Z | src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 818 | 2015-01-01T17:38:40.000Z | 2022-03-30T07:29:24.000Z | src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 439 | 2015-01-06T21:20:41.000Z | 2022-03-19T23:24:25.000Z | # encoding: utf-8
"""
mplsmask.py
Created by Evelio Vila on 2016-12-01.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |L|R| Reserved |
# +-+-+-+-+-+-+-+-+
# https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask
#
# +------------+------------------------------------------+-----------+
# | Bit | Description | Reference |
# +------------+------------------------------------------+-----------+
# | 'L' | Label Distribution Protocol (LDP) | [RFC5036] |
# | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] |
# | | (RSVP-TE) | |
# | 'Reserved' | Reserved for future use | |
# +------------+------------------------------------------+-----------+
# RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
| 41.45 | 77 | 0.390229 |
715d6a83862066d08f507e36bb0ef91281fb5c5f | 4,977 | py | Python | tests/test_cecum.py | hsorby/scaffoldmaker | 5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1 | [
"Apache-2.0"
] | null | null | null | tests/test_cecum.py | hsorby/scaffoldmaker | 5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1 | [
"Apache-2.0"
] | 38 | 2018-04-04T10:40:26.000Z | 2022-03-14T22:02:26.000Z | tests/test_cecum.py | hsorby/scaffoldmaker | 5e3b4531665dbc465b53acc1662f8d9bbb9dc1e1 | [
"Apache-2.0"
] | 28 | 2018-03-11T19:31:35.000Z | 2022-02-03T23:14:21.000Z | import unittest
from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.context import Context
from opencmiss.zinc.element import Element
from opencmiss.zinc.field import Field
from opencmiss.zinc.result import RESULT_OK
from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1
from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace
from testutils import assertAlmostEqualList
if __name__ == "__main__":
unittest.main()
| 53.516129 | 115 | 0.723327 |
715db019834eea3cecfac08bf5fe333bb00487eb | 3,658 | py | Python | samples/destroy_vm.py | jm66/pyvmomi-community-samples | 5ca4a50b767500e07b9bce9fba70240bfa963a4e | [
"Apache-2.0"
] | 4 | 2016-01-04T06:19:56.000Z | 2018-09-09T01:03:07.000Z | samples/destroy_vm.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 12 | 2019-04-17T02:47:25.000Z | 2021-04-02T09:15:37.000Z | samples/destroy_vm.py | zhangjiahaol/pyvmomi-community-samples | 905ec34edfbd151531832e98b6a0748fa6ff5e0e | [
"Apache-2.0"
] | 15 | 2018-04-26T05:18:12.000Z | 2021-11-06T04:44:58.000Z | #!/usr/bin/env python
# Copyright 2015 Michael Rice <michael@michaelrice.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
from tools import cli
from tools import tasks
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to destroy.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'destroy.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'destroy')
parser.add_argument('-v', '--vm',
help='VM name of the VirtualMachine you want '
'to destroy.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vimtype, name):
"""Create contrainer view and search for object in it"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnectNoSSL(host=ARGS.host,
user=ARGS.user,
pwd=ARGS.password,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except (IOError, vim.fault.InvalidLogin):
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied credentials.")
VM = None
if ARGS.vm:
VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm)
elif ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit(
"Unable to locate VirtualMachine. Arguments given: "
"vm - {0} , uuid - {1} , name - {2} , ip - {3}"
.format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip)
)
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
| 31.264957 | 76 | 0.594587 |
715e64156e2717f5d7270f3da98702a6b8223554 | 253 | py | Python | helpers/Screen.py | 1000monkeys/MastermindRedux | 6b07a341ecbf2ea325949a49c84218cc3632cd33 | [
"Unlicense"
] | null | null | null | helpers/Screen.py | 1000monkeys/MastermindRedux | 6b07a341ecbf2ea325949a49c84218cc3632cd33 | [
"Unlicense"
] | null | null | null | helpers/Screen.py | 1000monkeys/MastermindRedux | 6b07a341ecbf2ea325949a49c84218cc3632cd33 | [
"Unlicense"
] | null | null | null | import sys | 19.461538 | 46 | 0.545455 |
71602e883fba7821b66ac710b8b6c9c76a964d73 | 5,193 | py | Python | VirtualStage/BackgroundMatting/fixed_threshold.py | chris-han/ailab | b77d90f9089fa8003095843aa5de718fe73965a7 | [
"MIT"
] | null | null | null | VirtualStage/BackgroundMatting/fixed_threshold.py | chris-han/ailab | b77d90f9089fa8003095843aa5de718fe73965a7 | [
"MIT"
] | null | null | null | VirtualStage/BackgroundMatting/fixed_threshold.py | chris-han/ailab | b77d90f9089fa8003095843aa5de718fe73965a7 | [
"MIT"
] | null | null | null | import os
| 33.720779 | 116 | 0.48238 |
7160d131d6077709c38251321b7619b34bcdeab7 | 7,041 | py | Python | hn2016_falwa/utilities.py | veredsil/hn2016_falwa | 53035ac838860dd8a8d85619f16cc9785dee8655 | [
"MIT"
] | null | null | null | hn2016_falwa/utilities.py | veredsil/hn2016_falwa | 53035ac838860dd8a8d85619f16cc9785dee8655 | [
"MIT"
] | null | null | null | hn2016_falwa/utilities.py | veredsil/hn2016_falwa | 53035ac838860dd8a8d85619f16cc9785dee8655 | [
"MIT"
] | null | null | null | import numpy as np
from math import pi,exp
def static_stability(height,area,theta,s_et=None,n_et=None):
"""
The function "static_stability" computes the vertical gradient (z-derivative)
of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def-
inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing.
At the boundary, the static stability is estimated by forward/backward differen-
cing involving two adjacent z-grid points:
i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
height : sequence or array_like
Array of z-coordinate [in meters] with dimension = (kmax), equally spaced
area : ndarray
Two-dimension numpy array specifying differential areal element of each grid point;
dimension = (nlat, nlon).
theta : ndarray
Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat)
s_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
n_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
Returns
-------
t0_n : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_s : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_n : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_s : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
"""
nlat = theta.shape[1]
if s_et==None:
s_et = nlat//2
if n_et==None:
n_et = nlat//2
stat_n = np.zeros(theta.shape[0])
stat_s = np.zeros(theta.shape[0])
if theta.ndim==3:
zonal_mean = np.mean(theta,axis=-1)
elif theta.ndim==2:
zonal_mean = theta
if area.ndim==2:
area_zonal_mean = np.mean(area,axis=-1)
elif area.ndim==1:
area_zonal_mean = area
csm_n_et = np.sum(area_zonal_mean[-n_et:])
csm_s_et = np.sum(area_zonal_mean[:s_et])
t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et
t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et
stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2])
stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2])
stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0])
stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1])
return t0_n,t0_s,stat_n,stat_s
def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp,
t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.):
"""
The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential
vorticity based on the absolute vorticity, potential temperature and static
stability given.
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
omega : float, optional
Rotation rate of the planet.
nlat : int
Latitudinal dimension of the latitude grid.
nlon : int
Longitudinal dimension of the longitude grid.
kmax : int
Vertical dimension of the height grid.
unih : sequence or array_like
Numpy array of height in [meters]; dimension = (kmax)
ylat : sequence or array_like
Numpy array of latitudes in [degrees]; dimension = (nlat)
avort : ndarray
Three-dimension numpy array of absolute vorticity (i.e. relative vorticity
+ 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon)
potential_temp : ndarray
Three-dimension numpy array of potential temperature in [K];
dimension = (kmax x nlat x nlon)
t0_cn : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_cs : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_cn : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_cs : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
scale_height : float
Scale height of the atmosphere in [m] with default value 7000.
Returns
-------
QGPV : ndarray
Three-dimension numpy array of quasi-geostrophic potential vorticity;
dimension = (kmax x nlat x nlon)
dzdiv : ndarray
Three-dimension numpy array of the stretching term in QGPV;
dimension = (kmax x nlat x nlon)
"""
if nlat_s==None:
nlat_s=nlat//2
clat = np.cos(ylat*pi/180.)
clat = np.abs(clat) # Just to avoid the negative value at poles
# --- Next, calculate PV ---
av2 = np.empty_like(potential_temp) # dv/d(lon)
av3 = np.empty_like(potential_temp) # du/d(lat)
qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv
av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.)
# Calculate the z-divergence term
zdiv = np.empty_like(potential_temp)
dzdiv = np.empty_like(potential_temp)
for kk in range(kmax): # This is more efficient
zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk]
zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk]
dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \
(zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \
/(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis])
dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \
(unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis])
dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \
(unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis])
qgpv = avort+dzdiv * av1
return qgpv, dzdiv
| 40.234286 | 111 | 0.656441 |
7160dc5984a5a68781b1f9dc71bfe52a6ee535f4 | 12,570 | py | Python | src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py | JennyLawrance/azure-cli | cb9ca4b694110806b31803a95f9f315b2fde6410 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py | JennyLawrance/azure-cli | cb9ca4b694110806b31803a95f9f315b2fde6410 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py | JennyLawrance/azure-cli | cb9ca4b694110806b31803a95f9f315b2fde6410 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag)
from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku
from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from .custom import KeyType, SimpleAccessRights
from ._validators import validate_policy_permissions
from ._completers import get_device_id_completion_list
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
| 61.019417 | 120 | 0.631344 |
7160eb99604d70299eb40716235e949ffc576a16 | 3,280 | py | Python | metrics-calculator/tests/integration/test_s3.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | metrics-calculator/tests/integration/test_s3.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | metrics-calculator/tests/integration/test_s3.py | nhsconnect/prm-practice-migration-dashboard | 40c8760f409834d05bde4fb015aa5f8765acaa82 | [
"0BSD"
] | null | null | null | import boto3
import gzip
from moto import mock_s3
import pytest
import os
from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist
from tests.builders.file import build_gzip_csv
| 28.521739 | 97 | 0.710366 |
716145a9d2a82e68a98031ac79781824db56e9c8 | 13,528 | py | Python | image_analogy/losses/patch_matcher.py | kaldap/image-analogies | 0867aedfae7dfc0d27c42805a3d07f7b9eb7eaa2 | [
"MIT"
] | 3,722 | 2016-02-28T18:03:51.000Z | 2022-03-29T18:03:30.000Z | image_analogy/losses/patch_matcher.py | germanmad/image-analogies | 066626149ccb96b0a0488ca7ea4fc992aa62b727 | [
"MIT"
] | 58 | 2016-02-28T03:23:43.000Z | 2022-03-11T23:14:08.000Z | image_analogy/losses/patch_matcher.py | germanmad/image-analogies | 066626149ccb96b0a0488ca7ea4fc992aa62b727 | [
"MIT"
] | 351 | 2016-03-05T03:22:48.000Z | 2022-03-01T09:06:33.000Z | import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
| 43.922078 | 171 | 0.671348 |
716192be9eb9b6903ed659ac040571121cd26498 | 344 | py | Python | muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py | desafinadude/muni-portal-backend | 9ffc447194b8f29619585cd919f67d62062457a3 | [
"MIT"
] | 1 | 2021-01-18T13:01:04.000Z | 2021-01-18T13:01:04.000Z | muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py | desafinadude/muni-portal-backend | 9ffc447194b8f29619585cd919f67d62062457a3 | [
"MIT"
] | 42 | 2020-08-29T08:55:53.000Z | 2021-04-14T16:41:29.000Z | muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py | desafinadude/muni-portal-backend | 9ffc447194b8f29619585cd919f67d62062457a3 | [
"MIT"
] | 2 | 2020-10-28T16:34:41.000Z | 2022-02-07T10:29:31.000Z | # Generated by Django 2.2.10 on 2021-02-24 09:42
from django.db import migrations
| 19.111111 | 48 | 0.610465 |
7161bb83a934c99f17f3988c15fe48d8592c6f29 | 1,247 | py | Python | rllib/agents/ppo/tests/test_appo.py | noahshpak/ray | edd783bc327760a4892ab89222ee551e42df15b9 | [
"Apache-2.0"
] | 2 | 2020-02-17T17:36:23.000Z | 2020-08-24T19:59:18.000Z | rllib/agents/ppo/tests/test_appo.py | noahshpak/ray | edd783bc327760a4892ab89222ee551e42df15b9 | [
"Apache-2.0"
] | 8 | 2020-11-13T19:02:47.000Z | 2022-03-12T00:44:51.000Z | rllib/agents/ppo/tests/test_appo.py | noahshpak/ray | edd783bc327760a4892ab89222ee551e42df15b9 | [
"Apache-2.0"
] | 1 | 2021-07-26T07:17:06.000Z | 2021-07-26T07:17:06.000Z | import unittest
import ray
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_compute_single_action, \
framework_iterator
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 29 | 76 | 0.630313 |
716227dcc03cade8b73786f23f543f0e5e37ee6c | 2,516 | py | Python | ezeeai/utils/hooks.py | jmarine/ezeeai | 091b4ce3bc5794c534084bff3301b15ba8a9be1a | [
"Apache-2.0"
] | 19 | 2019-06-12T03:14:59.000Z | 2021-05-31T16:02:53.000Z | ezeeai/utils/hooks.py | jmarine/ezeeai | 091b4ce3bc5794c534084bff3301b15ba8a9be1a | [
"Apache-2.0"
] | 29 | 2019-06-27T10:15:38.000Z | 2022-03-11T23:46:36.000Z | ezeeai/utils/hooks.py | jmarine/ezeeai | 091b4ce3bc5794c534084bff3301b15ba8a9be1a | [
"Apache-2.0"
] | 10 | 2019-05-14T17:45:44.000Z | 2020-08-26T13:25:04.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.tf_export import tf_export
import smtplib
from email.mime.text import MIMEText
| 37 | 113 | 0.68124 |
71643b0981d730fe3a0cca31ee9497698e110f45 | 245 | py | Python | tests/factory_fixtures/dummy_resource.py | whiletrace/dwellinglybackend | e766b3d612b4c92fd337b82498ab8ef68bd95e1f | [
"MIT"
] | 15 | 2020-07-09T20:51:09.000Z | 2021-11-28T21:59:02.000Z | tests/factory_fixtures/dummy_resource.py | codeforpdx/dwellinglybackend | 92fee6d19a68ae00750927b8700eaa7195b57668 | [
"MIT"
] | 148 | 2020-03-28T22:10:30.000Z | 2021-12-19T09:22:59.000Z | tests/factory_fixtures/dummy_resource.py | whiletrace/dwellinglybackend | e766b3d612b4c92fd337b82498ab8ef68bd95e1f | [
"MIT"
] | 30 | 2020-03-12T02:31:27.000Z | 2021-07-29T02:40:36.000Z | from flask import request
from flask_restful import Resource
from utils.gatekeeper import allowed_params
| 20.416667 | 43 | 0.759184 |
7164421f4b7f16666c296653efa901ece81b5485 | 3,999 | py | Python | quizzes/00.organize.me/hackerrank/sorted_set/server2.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-07-20T10:15:49.000Z | 2018-07-20T10:16:54.000Z | quizzes/00.organize.me/hackerrank/sorted_set/server2.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-06-26T09:12:44.000Z | 2019-12-18T00:09:14.000Z | quizzes/00.organize.me/hackerrank/sorted_set/server2.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket, threading
from queue import Queue
import sys, struct
# NOTE: Use this path to create the UDS Server socket
SERVER_SOCKET_PATH = "./socket";
FMT = "!L"
if __name__ == '__main__':
main()
| 24.838509 | 83 | 0.507877 |
7166d9cc0195426344d7d645ff648763fd1d1b77 | 40 | py | Python | vnpy/gateway/rohon/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 323 | 2015-11-21T14:45:29.000Z | 2022-03-16T08:54:37.000Z | vnpy/gateway/rohon/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 9 | 2017-03-21T08:26:21.000Z | 2021-08-23T06:41:17.000Z | vnpy/gateway/rohon/__init__.py | funrunskypalace/vnpy | 2d87aede685fa46278d8d3392432cc127b797926 | [
"MIT"
] | 148 | 2016-09-26T03:25:39.000Z | 2022-02-06T14:43:48.000Z | from .rohon_gateway import RohonGateway
| 20 | 39 | 0.875 |
7169f3d04044834201fc8a2b35d915d5a016859d | 1,283 | py | Python | dnd/mobile/urls.py | dndtools2/dndtools2 | 6bd794349b84f3018dd0bd12712535924557c166 | [
"MIT"
] | null | null | null | dnd/mobile/urls.py | dndtools2/dndtools2 | 6bd794349b84f3018dd0bd12712535924557c166 | [
"MIT"
] | null | null | null | dnd/mobile/urls.py | dndtools2/dndtools2 | 6bd794349b84f3018dd0bd12712535924557c166 | [
"MIT"
] | null | null | null | from django.conf.urls import patterns, url, include
from .views import force_desktop_version, return_to_mobile_version
app_name = 'mobile'
urlpatterns = [
# force desktop
url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'),
# return to mobile version
url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'),
# index
url(r'^', include('dnd.mobile.index.urls')),
# character classes
url(r'^classes/', include('dnd.mobile.character_classes.urls')),
# feats
url(r'^feats/', include('dnd.mobile.feats.urls')),
# items
url(r'^items/', include('dnd.mobile.items.urls')),
# languages
url(r'^languages/', include('dnd.mobile.languages.urls')),
# monsters
url(r'^monsters/', include('dnd.mobile.monsters.urls')),
# races
url(r'^races/', include('dnd.mobile.races.urls')),
# rulebooks
url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')),
# rules
url(r'^rules/', include('dnd.mobile.rules.urls')),
# skills
url(r'^skills/', include('dnd.mobile.skills.urls')),
# spells
url(r'^spells/', include('dnd.mobile.spells.urls')),
# deities
url(r'^deities/', include('dnd.mobile.deities.urls')),
]
| 26.183673 | 99 | 0.653936 |
716d93f8130aaab6f0fe666657a995579882463d | 698 | py | Python | ros_aruco.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | 69 | 2019-09-30T13:42:02.000Z | 2022-03-28T08:37:51.000Z | ros_aruco.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | 5 | 2019-10-23T20:03:42.000Z | 2021-07-10T09:43:50.000Z | ros_aruco.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | 18 | 2019-11-17T20:57:46.000Z | 2022-03-15T10:46:25.000Z | """
Calibrate with the ROS package aruco_detect
"""
import rospy
import roslib
from geometry_msgs.msg import Transform
| 24.928571 | 94 | 0.679083 |
716e210884f18d925519c5ee8a6aa1f846b9c04f | 3,977 | py | Python | utils/utils.py | mmalandra-kb4/service-metrics-gatherer | f9a795a43d491ef59a32121ab4ed5c2c62cb968b | [
"Apache-2.0"
] | null | null | null | utils/utils.py | mmalandra-kb4/service-metrics-gatherer | f9a795a43d491ef59a32121ab4ed5c2c62cb968b | [
"Apache-2.0"
] | null | null | null | utils/utils.py | mmalandra-kb4/service-metrics-gatherer | f9a795a43d491ef59a32121ab4ed5c2c62cb968b | [
"Apache-2.0"
] | 2 | 2022-01-28T18:31:21.000Z | 2022-03-03T14:42:48.000Z | """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import re
import os
import json
from urllib.parse import urlparse
import datetime
logger = logging.getLogger("metricsGatherer.utils")
def read_json_file(folder, filename, to_json=False):
"""Read fixture from file"""
with open(os.path.join(folder, filename), "r") as file:
return file.read() if not to_json else json.loads(file.read())
def build_url(main_url, url_params):
"""Build url by concating url and url_params"""
return main_url + "/" + "/".join(url_params)
| 33.70339 | 103 | 0.647473 |
716fc75d575164c084b19d0f3c008a98785ed3a6 | 20,287 | py | Python | OSAnalysisHelper.py | nassermarafi/SRCSWArchetypes | 105a5e40ef0ba1951108dc52b382ae0c5457057a | [
"MIT"
] | 7 | 2020-04-29T08:44:12.000Z | 2022-03-05T04:00:11.000Z | OSAnalysisHelper.py | nassermarafi/SRCSWArchetypes | 105a5e40ef0ba1951108dc52b382ae0c5457057a | [
"MIT"
] | null | null | null | OSAnalysisHelper.py | nassermarafi/SRCSWArchetypes | 105a5e40ef0ba1951108dc52b382ae0c5457057a | [
"MIT"
] | 4 | 2019-12-20T04:38:11.000Z | 2021-11-21T18:25:34.000Z | from __future__ import absolute_import
__author__ = 'marafi'
| 63.199377 | 137 | 0.724257 |
717008cf6d0ff4d98caa231046b8d209403318a1 | 6,193 | py | Python | unityparser/commands.py | socialpoint-labs/unity-yaml-parser | 91c175140ed32aed301bc34d4311f370da69a8ba | [
"MIT"
] | 76 | 2019-06-17T13:17:59.000Z | 2022-03-11T19:39:24.000Z | unityparser/commands.py | socialpoint-labs/unity-yaml-parser | 91c175140ed32aed301bc34d4311f370da69a8ba | [
"MIT"
] | 17 | 2019-06-07T09:04:27.000Z | 2022-02-16T19:01:38.000Z | unityparser/commands.py | socialpoint-labs/unity-yaml-parser | 91c175140ed32aed301bc34d4311f370da69a8ba | [
"MIT"
] | 9 | 2019-10-08T16:07:35.000Z | 2021-12-08T15:27:00.000Z | import re
from argparse import ArgumentParser
from multiprocessing import Pool, Manager, Process
from pathlib import Path
from .utils import UnityDocument
YAML_HEADER = '%YAML'
if __name__ == '__main__':
# None is considered successful
code = UnityProjectTester().run() or 0
exit(code)
| 39.698718 | 117 | 0.56467 |
7171d1486ab6a395eb9ff27ecf4115ab48da0237 | 3,767 | py | Python | dokang/harvesters/__init__.py | Polyconseil/dokang | b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08 | [
"BSD-3-Clause"
] | 6 | 2016-07-04T17:16:42.000Z | 2018-11-13T08:10:21.000Z | dokang/harvesters/__init__.py | Polyconseil/dokang | b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08 | [
"BSD-3-Clause"
] | 6 | 2016-02-23T15:08:51.000Z | 2017-01-02T11:57:45.000Z | dokang/harvesters/__init__.py | Polyconseil/dokang | b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08 | [
"BSD-3-Clause"
] | 5 | 2015-04-05T14:07:11.000Z | 2017-04-13T14:08:02.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Polyconseil SAS. All rights reserved.
import hashlib
import json
import logging
import os
import re
from .html import html_config, HtmlHarvester # pylint: disable=unused-import
from .sphinx import ( # pylint: disable=unused-import
sphinx_config, sphinx_rtd_config,
SphinxHarvester, ReadTheDocsSphinxHarvester
)
logger = logging.getLogger(__name__)
| 38.050505 | 104 | 0.617733 |
7171ec803ebbc9d578b8e216bcbe447dfe0af3a6 | 27 | py | Python | __init__.py | semccomas/string-method-gmxapi | fb68dce792d35df739225b1048e0816a4a61d45e | [
"MIT"
] | 6 | 2020-10-15T16:43:19.000Z | 2022-01-21T09:09:13.000Z | __init__.py | semccomas/string-method-gmxapi | fb68dce792d35df739225b1048e0816a4a61d45e | [
"MIT"
] | 9 | 2020-07-01T08:36:49.000Z | 2021-06-23T07:15:53.000Z | __init__.py | semccomas/string-method-gmxapi | fb68dce792d35df739225b1048e0816a4a61d45e | [
"MIT"
] | 5 | 2020-07-15T06:08:00.000Z | 2021-07-02T14:24:59.000Z | __all__ = ["stringmethod"]
| 13.5 | 26 | 0.703704 |
71727855c3b5a49ba770b23fd1b96b453bcf8530 | 855 | py | Python | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | 1 | 2020-09-30T01:27:57.000Z | 2020-09-30T01:27:57.000Z | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | 9 | 2020-06-05T19:51:33.000Z | 2022-03-11T23:40:25.000Z | carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 81463761058f67d47cea980f29a061b1e1b2d08a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.4 on 2019-05-21 16:51
from django.db import migrations, models
| 29.482759 | 98 | 0.611696 |
717345e66810546b06a5a6c9cdbe99a57810c275 | 357 | py | Python | src/fuckbot/ticker.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | null | null | null | src/fuckbot/ticker.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | null | null | null | src/fuckbot/ticker.py | Zer0-One/fuckbot | 02f5a112988e25a9f04a9a941a55f11cf51c3d8f | [
"BSD-2-Clause"
] | 1 | 2022-01-24T21:20:43.000Z | 2022-01-24T21:20:43.000Z | import discord
import logging
TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote'
TRADING_API_ICON='https://iextrading.com/favicon.ico'
| 29.75 | 117 | 0.756303 |
7173dccce721752a801b4b3463958745f87a8a0c | 9,769 | py | Python | minos/lib/util/StateSet.py | johny-c/minos | 660e991f44118382f4a3cb7566670c4159d33fe3 | [
"MIT"
] | 1 | 2020-02-18T08:19:32.000Z | 2020-02-18T08:19:32.000Z | minos/lib/util/StateSet.py | johny-c/minos | 660e991f44118382f4a3cb7566670c4159d33fe3 | [
"MIT"
] | 4 | 2019-12-27T12:44:58.000Z | 2021-05-07T17:41:09.000Z | minos/lib/util/StateSet.py | johny-c/minos | 660e991f44118382f4a3cb7566670c4159d33fe3 | [
"MIT"
] | 1 | 2019-10-15T00:28:39.000Z | 2019-10-15T00:28:39.000Z | import bz2
import csv
import collections
import math
from enum import Enum
def main():
import argparse
# Argument processing
parser = argparse.ArgumentParser(description='Load state set')
parser.add_argument('-n', '--limit',
type=int,
help='Number of states per scene')
parser.add_argument('--select',
default=Select.FIRST,
type=Select,
help='Number of states per scene')
parser.add_argument('--field',
default=None,
help='Field to use for selection')
parser.add_argument('--scenes',
type=str,
default=None,
help='Scenes file to load')
parser.add_argument('input',
help='Input file to load')
args = parser.parse_args()
state_set = StateSet(scenes_file=args.scenes,
states_files=args.input,
max_states_per_scene=args.limit,
select_policy=SelectPolicy(args.select, args.field))
for state in state_set.states:
print(state)
if __name__ == "__main__":
main()
| 41.747863 | 117 | 0.522469 |
7174375d0908f71b2864b3f93d7df2286d52caea | 29 | py | Python | pagetags/configuration/development.py | pmatigakis/pagetags | 5e81d01493548edc2677453819c32de3cf75d159 | [
"MIT"
] | null | null | null | pagetags/configuration/development.py | pmatigakis/pagetags | 5e81d01493548edc2677453819c32de3cf75d159 | [
"MIT"
] | null | null | null | pagetags/configuration/development.py | pmatigakis/pagetags | 5e81d01493548edc2677453819c32de3cf75d159 | [
"MIT"
] | null | null | null | DEBUG = True
TESTING = False
| 9.666667 | 15 | 0.724138 |
71757b823cf5dc703ab76426a5d125f92b1f4a70 | 665 | py | Python | hpcrocket/pyfilesystem/factory.py | SvenMarcus/hpc-rocket | b28917e7afe6e2e839d1ae58f2e21fba6e3eb61c | [
"MIT"
] | 7 | 2022-01-03T13:52:40.000Z | 2022-03-10T16:26:04.000Z | hpcrocket/pyfilesystem/factory.py | SvenMarcus/ssh-slurm-runner | 91ea1a052a0362b5b8676b6e429aa3c890359e73 | [
"MIT"
] | 18 | 2021-04-16T15:53:55.000Z | 2021-09-13T17:38:44.000Z | hpcrocket/pyfilesystem/factory.py | SvenMarcus/hpclaunch | 1a0459167bf5d7b26b1d7e46a1b1d073a4a55650 | [
"MIT"
] | null | null | null | from hpcrocket.core.filesystem import Filesystem, FilesystemFactory
from hpcrocket.core.launchoptions import Options
from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem
from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem
| 35 | 67 | 0.77594 |
7175fb970f1844dacf40b20065573654fbebe36d | 4,053 | py | Python | cqlsh_tests/cqlsh_tools.py | vincewhite/cassandra-dtest | a01dce6af73a8656e8740227a811fe63025fb3f4 | [
"Apache-2.0"
] | null | null | null | cqlsh_tests/cqlsh_tools.py | vincewhite/cassandra-dtest | a01dce6af73a8656e8740227a811fe63025fb3f4 | [
"Apache-2.0"
] | null | null | null | cqlsh_tests/cqlsh_tools.py | vincewhite/cassandra-dtest | a01dce6af73a8656e8740227a811fe63025fb3f4 | [
"Apache-2.0"
] | null | null | null | import csv
import random
import cassandra
from cassandra.cluster import ResultSet
from typing import List
def assert_csvs_items_equal(filename1, filename2):
with open(filename1, 'r') as x, open(filename2, 'r') as y:
assert list(x.readlines()) == list(y.readlines())
def random_list(gen=None, n=None):
if gen is None:
if n is None:
else:
return [gen() for _ in range(length())]
def write_rows_to_csv(filename, data):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row)
csvfile.close
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = cassandra.marshal.int64_unpack(byts)
try:
return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
return timestamp_ms
def monkeypatch_driver():
"""
Monkeypatches the `cassandra` driver module in the same way
that clqsh does. Returns a dictionary containing the original values of
the monkeypatched names.
"""
cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize,
'DateType_deserialize': cassandra.cqltypes.DateType.deserialize,
'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values}
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
cassandra.cqltypes.CassandraType.support_empty_values = True
if hasattr(cassandra, 'deserializers'):
cache['DesDateType'] = cassandra.deserializers.DesDateType
del cassandra.deserializers.DesDateType
return cache
def unmonkeypatch_driver(cache):
"""
Given a dictionary that was used to cache parts of `cassandra` for
monkeypatching, restore those values to the `cassandra` module.
"""
cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize'])
cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize'])
cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values']
if hasattr(cassandra, 'deserializers'):
cassandra.deserializers.DesDateType = cache['DesDateType']
def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None:
"""
So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering,
however I'm not finding it atm. As such, this method isn't intended for use with large datasets.
:param got: ResultSet, expect schema of [a, b]
:param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet
"""
# Adding a touch of sanity check so people don't mis-use this. n^2 is bad.
assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.'
# First quick check: if we have a different count, we can just die.
assert len(got.current_rows) == len(expected)
for t in expected:
assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t)
found = False
for row in got.current_rows:
if found:
break
if row.a == t[0] and row.b == t[1]:
found = True
assert found, 'Failed to find expected row: {}'.format(t)
| 33.495868 | 107 | 0.683691 |
71764b0e93fc239b103c34e487ed538048a2ed7d | 5,394 | py | Python | tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 1,690 | 2017-11-29T20:13:37.000Z | 2022-03-31T12:58:11.000Z | tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 2,762 | 2017-12-04T05:18:03.000Z | 2022-03-31T23:40:11.000Z | tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 961 | 2017-11-30T16:44:03.000Z | 2022-03-30T23:12:09.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
| 32.890244 | 100 | 0.725436 |
71770ce551bdcd43974b0f18b616fb25201796c0 | 827 | py | Python | testing.py | sofwerx/mycroft-articlekeyword-skill | 7cab109db512d3a6465db241b18018e9415f4a9f | [
"Unlicense"
] | null | null | null | testing.py | sofwerx/mycroft-articlekeyword-skill | 7cab109db512d3a6465db241b18018e9415f4a9f | [
"Unlicense"
] | null | null | null | testing.py | sofwerx/mycroft-articlekeyword-skill | 7cab109db512d3a6465db241b18018e9415f4a9f | [
"Unlicense"
] | null | null | null |
import subprocess
proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE )
#print(type(proc.communicate()[0]))
# path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/'
text = proc.stdout.read()
rows = text.splitlines()
#print(text.splitlines())
count = 0
s = ""
for row in rows:
divide = row.split()
wordCount = len(divide)
if wordCount > 1:
count = count + 1
s += str(count)
s += " "
s += str(divide[1])
s += " "
print(s)
# with open(path + 'out.csv', 'r') as content_file:
# text = content_file.read()
# self.speak_dialog("bitcoin.price", data={'price': str(text)})
#file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv'
#wordCount = 10
#
# text = Path(file_path).read_text()
# #print(exit_code) | 21.205128 | 101 | 0.622733 |
717864c0c5586a731d9e7b34b779d6af81159c7a | 4,509 | py | Python | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | slcyGeneral.py | mirrorcoloured/slcypi | c47975b3523f770d12a521c82e2dfca181e3f35b | [
"MIT"
] | null | null | null | # Python 2.7.1
import RPi.GPIO as GPIO
from twython import Twython
import time
import sys
import os
import pygame
APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw'
APP_SECRET='ksfSVa2hxvTQKYy4UR9tjpb57CAynMJDsygz9qOyzlH24NVwpW'
OAUTH_TOKEN='794094183841566720-BagrHW91yH8C3Mdh9SOlBfpL6wrSVRW'
OAUTH_TOKEN_SECRET='d0Uucq2dkSHrFHZGLM1X8Hw05d80ajKYGl1zTRxZQSKTm'
applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
### GENERAL ###
def Sleep(seconds):
"""Puts the program to sleep"""
time.sleep(seconds)
def Alert(channel):
"""Simple alert function for testing event interrupts"""
print('Alert on channel',channel)
def TimeString():
"""Returns the current time"""
t = time.localtime()
return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5])
def LoadPins(mapping,inp):
"""Organizes an input into a pin mapping dict
mapping <list>, ['IA','IB']
inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2]
"""
if type(inp) is int and len(mapping) == 1:
return {mapping[0]:inp}
elif type(inp) is list and len(mapping) == len(inp):
o = {}
for i in range(len(inp)):
o[mapping[i]] = inp[i]
return o
elif type(inp) is dict:
return inp
else:
print('Invalid input for pins:',inp,type(inp))
print('Expected:',mapping)
return {}
def BoolToSign(inp):
"""Converts boolean bits into signed bits
0 -> -1
1 -> 1"""
return (inp * 2) - 1
def SignToBool(inp):
"""Converts signed bits into boolean bits
-1 -> 0
1 -> 1"""
return (inp + 1) / 2
### PYGAME ###
def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)):
"""Sets up a pygame window to take keyboard input
size <tuple>, width by height
caption <str>, window title bar
text <str>, text to display in window, accepts \n
background <tuple>, foreground <tuple>, (r,g,b) color
"""
pygame.init()
screen = pygame.display.set_mode(size,0,32)
pygame.display.set_caption(caption)
myfont = pygame.font.SysFont('Monospace',15)
labels = []
lines = text.split('\n')
for line in lines:
labels.append(myfont.render(line,1,foreground))
screen.fill(background)
y = 0
for label in labels:
screen.blit(label, (0,y))
y += 15
pygame.display.update()
def InputLoop(eventmap):
"""Begins a pygame loop, mapping key inputs to functions
eventmap <dict>, {pygame.K_t:myfunction}
"""
index = 0
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
#print("{0}: You pressed {1:c}".format ( index , event.key ))
if event.key in eventmap:
eventmap[event.key]()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
### TWITTER ###
def Tweet(twit,statustext):
"""Tweets a message
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
statustext <str>, must be <= 140 characters
"""
if len(statustext) > 140:
print('ERROR: Character limit 140 exceeded:',len(statustext))
else:
twit.update_status(status=statustext)
def TweetPicture(twit,file,statustext):
"""Tweets a message with a picture
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to picture
statustext <str>, must be <= 140 characters
"""
photo = open(file, 'rb')
response = twitter.upload_media(media=photo)
twit.update_status(status=statustext, media_ids=[response['media_id']])
def TweetVideo(twit,file,statustext):
"""Tweets a message with a video
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to video
statustext <str>, must be <= 140 characters
"""
video = open(file, 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twit.update_status(status=statustext, media_ids=[response['media_id']])
| 30.883562 | 94 | 0.635174 |
717a01e3e2c90ae46a5bad6b2a2010bbac8dace6 | 1,856 | py | Python | python/pyarmnn/scripts/generate_docs.py | PetervdPerk-NXP/pyarmnn-release | 2008c270f7c7c84a930842c845138628c8b95713 | [
"MIT"
] | 7 | 2020-02-27T07:45:14.000Z | 2021-01-25T12:07:12.000Z | python/pyarmnn/scripts/generate_docs.py | MitchellTesla/PyArmNN | cbe37a0364b00f32ac2a8ced74eed5d576a0d52c | [
"MIT"
] | 5 | 2020-07-28T15:01:12.000Z | 2022-02-04T18:24:02.000Z | python/pyarmnn/scripts/generate_docs.py | MitchellTesla/PyArmNN | cbe37a0364b00f32ac2a8ced74eed5d576a0d52c | [
"MIT"
] | 3 | 2020-07-31T11:41:24.000Z | 2021-06-06T07:58:39.000Z | # Copyright 2019 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
import tarfile
import pyarmnn as ann
import shutil
from typing import List, Union
from pdoc.cli import main
package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
"""Copies multiple files to a directory.
Args:
file_paths (Union[List(str)]): List of files to copy
target_dir_path (str): Target directory.
Returns:
None
"""
file_paths = [] + file_paths
if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
os.makedirs(target_dir_path)
for file_path in file_paths:
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
raise RuntimeError('Not a file: {}'.format(file_path))
file_name = os.path.basename(file_path)
shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
def archive_docs(path: str, version: str):
"""Creates an archive.
Args:
path (str): Path which will be archived.
version (str): Version of Arm NN.
Returns:
None
"""
output_filename = f'pyarmnn_docs-{version}.tar'
with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar:
tar.add(path)
if __name__ == "__main__":
readme_filename = os.path.join(package_dir, '..', '..', 'README.md')
with open(readme_filename, 'r') as readme_file:
top_level_pyarmnn_doc = ''.join(readme_file.readlines())
ann.__doc__ = top_level_pyarmnn_doc
main()
target_path = os.path.join(package_dir, 'docs')
archive_docs(target_path, ann.__version__)
| 27.701493 | 82 | 0.644935 |
717a53af9750d33e9be1b7de3f152d83339bf874 | 969 | py | Python | tests/gejun_sum.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | 2 | 2016-02-17T06:00:35.000Z | 2020-11-23T13:34:00.000Z | tests/gejun_sum.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | null | null | null | tests/gejun_sum.py | jeffzhengye/pylearn | a140d0fca8a371faada194cb0126192675cc2045 | [
"Unlicense"
] | null | null | null | __author__ = 'jeffye'
if __name__ == '__main__':
test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0]
print sum_consecutives_corrected(test_li)
| 20.617021 | 82 | 0.472652 |
717f58f0b458c75ac48a3f2890221b9b52dcce70 | 88 | py | Python | plerr/__main__.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | 2 | 2020-10-28T23:53:59.000Z | 2020-10-29T03:31:20.000Z | plerr/__main__.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | null | null | null | plerr/__main__.py | b2bs-team/pylint-errors | f1362c8afbe6b7075f805560d7699f63ad35a10b | [
"MIT"
] | 1 | 2020-10-28T23:53:47.000Z | 2020-10-28T23:53:47.000Z | """plerr entrypoint"""
from plerr import cli
if __name__ == '__main__':
cli.main()
| 14.666667 | 26 | 0.659091 |
71801cfc804d913976cbde0f2c680802285aa66d | 817 | py | Python | code/send.py | CamouOkau/messenger_new_years_bot | 38f3c26b6c5b4dae7fe48f8b61680ec903c0deac | [
"MIT"
] | null | null | null | code/send.py | CamouOkau/messenger_new_years_bot | 38f3c26b6c5b4dae7fe48f8b61680ec903c0deac | [
"MIT"
] | null | null | null | code/send.py | CamouOkau/messenger_new_years_bot | 38f3c26b6c5b4dae7fe48f8b61680ec903c0deac | [
"MIT"
] | null | null | null | import sys
import time
from datetime import datetime
from bot import FbMessengerBot
if __name__ == "__main__":
if len(sys.argv) < 3:
print("No email or password provided")
else:
bot = FbMessengerBot(sys.argv[1], sys.argv[2])
with open("users.txt", "r") as file:
users = dict.fromkeys(file.read().split("\n"))
for user in users:
users[user] = bot.uid(user)
with open("message.txt", "r") as file:
message = file.read()
time_now = datetime.now()
send_time = datetime(time_now.year + 1, 1, 1)
wait_time = (send_time - time_now).total_seconds()
print("Waiting...")
time.sleep(wait_time)
for uid in users.values():
bot.send_message(message, uid)
bot.logout()
| 29.178571 | 58 | 0.575275 |
71803fa300d2ccbae9efe9edab91921379251431 | 4,361 | py | Python | senlin_tempest_plugin/api/policies/test_policy_update_negative.py | ghanshyammann/senlin-tempest-plugin | 9f33bbe723eb381f93c2248a6a277efef3d92ec3 | [
"Apache-2.0"
] | null | null | null | senlin_tempest_plugin/api/policies/test_policy_update_negative.py | ghanshyammann/senlin-tempest-plugin | 9f33bbe723eb381f93c2248a6a277efef3d92ec3 | [
"Apache-2.0"
] | null | null | null | senlin_tempest_plugin/api/policies/test_policy_update_negative.py | ghanshyammann/senlin-tempest-plugin | 9f33bbe723eb381f93c2248a6a277efef3d92ec3 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from senlin_tempest_plugin.api import base
from senlin_tempest_plugin.common import utils
| 40.37963 | 75 | 0.616372 |
718204a2b383cce840b6e0f7101b4542d7502bc6 | 136 | py | Python | boa3_test/test_sc/interop_test/contract/DestroyContract.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/interop_test/contract/DestroyContract.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/interop_test/contract/DestroyContract.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
from boa3.builtin.interop.contract import destroy_contract
| 17 | 58 | 0.794118 |
71820cfe7864a17de8d5ffb455a24ec586958eca | 4,363 | py | Python | tests/test_vmax.py | qinfeng2011/wltp | 317ad38fb96599a29d22e40f69b6aeb4d205611d | [
"Apache-2.0"
] | null | null | null | tests/test_vmax.py | qinfeng2011/wltp | 317ad38fb96599a29d22e40f69b6aeb4d205611d | [
"Apache-2.0"
] | null | null | null | tests/test_vmax.py | qinfeng2011/wltp | 317ad38fb96599a29d22e40f69b6aeb4d205611d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import functools as fnt
import logging
import random
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas import IndexSlice as _ix
from wltp import engine, vehicle, downscale, vmax
from wltp.io import gear_names, veh_names
from . import vehdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
| 32.080882 | 87 | 0.603942 |
718275b3e8d58cfc1c69bd90b16b90b94fc076c8 | 881 | py | Python | util/canonicaljson.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | util/canonicaljson.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | util/canonicaljson.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import collections
def canonicalize(json_obj, preserve_sequence_order=True):
"""
This function canonicalizes a Python object that will be serialized as JSON.
Example usage: json.dumps(canonicalize(my_obj))
Args:
json_obj (object): the Python object that will later be serialized as JSON.
Returns:
object: json_obj now sorted to its canonical form.
"""
if isinstance(json_obj, collections.MutableMapping):
sorted_obj = sorted(
{
key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items()
}.items()
)
return collections.OrderedDict(sorted_obj)
elif isinstance(json_obj, (list, tuple)):
seq = [canonicalize(val, preserve_sequence_order) for val in json_obj]
return seq if preserve_sequence_order else sorted(seq)
return json_obj
| 32.62963 | 96 | 0.681044 |
71829ce0488364233ac4688992792bd2903978d0 | 1,170 | py | Python | datasette_plugin_geo/inspect.py | russss/datasette-geo | d4cecc020848bbde91e9e17bf352f7c70bc3dccf | [
"Apache-2.0"
] | 9 | 2019-05-02T14:44:57.000Z | 2022-01-19T20:56:50.000Z | datasette_plugin_geo/inspect.py | russss/datasette-geo | d4cecc020848bbde91e9e17bf352f7c70bc3dccf | [
"Apache-2.0"
] | 5 | 2019-04-30T12:22:03.000Z | 2021-05-29T20:08:42.000Z | datasette_plugin_geo/inspect.py | russss/datasette-geo | d4cecc020848bbde91e9e17bf352f7c70bc3dccf | [
"Apache-2.0"
] | 2 | 2019-07-31T19:16:43.000Z | 2021-05-28T20:12:36.000Z | from datasette import hookimpl
from datasette.utils import detect_spatialite
from shapely import wkt
| 26.590909 | 110 | 0.564957 |