hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8437331efb5465038081e91c134bce49f22a468 | 394 | py | Python | models/losses/MSE.py | johnrachwan123/SNIP-it | a578a0693318f261492331298b6602de225fe21f | [
"MIT"
] | null | null | null | models/losses/MSE.py | johnrachwan123/SNIP-it | a578a0693318f261492331298b6602de225fe21f | [
"MIT"
] | null | null | null | models/losses/MSE.py | johnrachwan123/SNIP-it | a578a0693318f261492331298b6602de225fe21f | [
"MIT"
] | 1 | 2021-11-08T16:34:45.000Z | 2021-11-08T16:34:45.000Z | import torch
from torch import nn
from models.GeneralModel import GeneralModel
| 28.142857 | 81 | 0.700508 |
b845201c7741d5e90f7173c09fe9315087e66057 | 2,046 | py | Python | svca_limix/limix/core/covar/test/test_categorical.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 65 | 2015-01-20T20:46:26.000Z | 2021-06-27T14:40:35.000Z | svca_limix/limix/core/covar/test/test_categorical.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 29 | 2015-02-01T22:35:17.000Z | 2017-08-07T08:18:23.000Z | svca_limix/limix/core/covar/test/test_categorical.py | DenisSch/svca | bd029c120ca8310f43311253e4d7ce19bc08350c | [
"Apache-2.0"
] | 35 | 2015-02-01T17:26:50.000Z | 2019-09-13T07:06:16.000Z | """LMM testing code"""
import unittest
import scipy as sp
import numpy as np
from limix.core.covar import CategoricalCov
from limix.utils.check_grad import mcheck_grad
if __name__ == '__main__':
unittest.main()
| 26.921053 | 62 | 0.580645 |
b8459a275062134e5f40c7584623582d09c9aa02 | 4,274 | py | Python | code/stephen/005/005.py | Stephen0910/python-practice-for-game-tester | e17b2666d18a51e5bff31ad0355ad4a6775191a4 | [
"MIT"
] | 29 | 2019-03-07T03:03:42.000Z | 2021-12-25T04:55:58.000Z | code/stephen/005/005.py | Stephen0910/python-practice-for-game-tester | e17b2666d18a51e5bff31ad0355ad4a6775191a4 | [
"MIT"
] | null | null | null | code/stephen/005/005.py | Stephen0910/python-practice-for-game-tester | e17b2666d18a51e5bff31ad0355ad4a6775191a4 | [
"MIT"
] | 19 | 2019-03-11T02:40:37.000Z | 2021-09-24T08:57:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/13 0013 3:44
# @Author : Stephen
# @Site :
# @File : 005.py
# @Purpose :
# @Software : PyCharm
# @Copyright: (c) Stephen 2019
# @Licence : <your licence>
import os, re, time
from pyecharts import Line
"""
"""
package_name = "archery.elite.shooting.free.game.android" #
game_name = ""
p = Phone()
p.graphic(20, 1)
# p.cpu_test()
# z = os.popen("adb shell cat /proc/15402/stat")
# print(z.readlines())
| 30.312057 | 97 | 0.523631 |
b846bfa9679bd871993a1750e2cf6c621e13bfac | 13,470 | py | Python | sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py | Tecnarca/whitenoise-system | 9dfc1425bca77f6e30afe1eea253a6b580bfa847 | [
"MIT"
] | 1 | 2021-12-30T15:21:54.000Z | 2021-12-30T15:21:54.000Z | sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py | Tecnarca/whitenoise-system | 9dfc1425bca77f6e30afe1eea253a6b580bfa847 | [
"MIT"
] | null | null | null | sdk/opendp/smartnoise/synthesizers/pytorch/nn/dpctgan.py | Tecnarca/whitenoise-system | 9dfc1425bca77f6e30afe1eea253a6b580bfa847 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import optim
from torch.nn import functional
import torch.nn as nn
import torch.utils.data
from torch.nn import BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential,Sigmoid
from torch.nn import functional as F
from opendp.smartnoise.synthesizers.base import SDGYMBaseSynthesizer
import ctgan
from ctgan.transformer import DataTransformer
from ctgan.conditional import ConditionalGenerator
from ctgan.models import Generator
from ctgan.sampler import Sampler
from ctgan import CTGANSynthesizer
import opacus
from opacus import autograd_grad_sample
from opacus import PrivacyEngine, utils
# custom for calcuate grad_sample for multiple loss.backward()
def _custom_create_or_extend_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, batch_dim: int
) -> None:
"""
Create a 'grad_sample' attribute in the given parameter, or accumulate it
if the 'grad_sample' attribute already exists.
This custom code will not work when using optimizer.virtual_step()
"""
#print ("now this happen")
if hasattr(param, "grad_sample"):
param.grad_sample = param.grad_sample + grad_sample
#param.grad_sample = torch.cat((param.grad_sample, grad_sample), batch_dim)
else:
param.grad_sample = grad_sample
| 36.307278 | 129 | 0.566592 |
b846da72c1b90ad2cd7931c2938c866fd817d9f6 | 1,814 | py | Python | client/client.py | MasonDiGi/chat_server | 2100eb012f8bce359b51e0dc8684a82949ba1c17 | [
"MIT"
] | null | null | null | client/client.py | MasonDiGi/chat_server | 2100eb012f8bce359b51e0dc8684a82949ba1c17 | [
"MIT"
] | null | null | null | client/client.py | MasonDiGi/chat_server | 2100eb012f8bce359b51e0dc8684a82949ba1c17 | [
"MIT"
] | null | null | null | import socket
import threading
import time
# Create constants
HEADER = 64
PORT = 5050
FORMAT = 'utf-8'
DC_MSG = "!DISCONNECT"
SERVER = "localhost"
ADDR = (SERVER, PORT)
# Set up client var and connect to the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
erase = '\x1b[1A\x1b[K'
# Handles sending a message to the server
# A thread to handle receiving messages broadcast from the server
# Main thread
try:
# Send initial message to set up username
uname = input("Enter a username: ")
send(uname)
# Start handling received messages
RECVTHREAD = threading.Thread(target=recvThread)
RECVTHREAD.start()
# Handle the prompt and sending messages
while True:
msg = input(f"[{uname}]: ")
send(msg)
print("\x1b[A\x1b[K", end="")
if msg == DC_MSG:
break
# Close everything if ctrl+c is pressed
finally:
send(DC_MSG)
time.sleep(0.5)
client.close()
print("\ngoodbye")
exit()
| 26.676471 | 117 | 0.615215 |
b8481e8c9248a5d340e323d7d3c83d87b3a95b6f | 9,183 | py | Python | src/cogs/ide/dialogs/edit_view.py | osam7a/Jarvide | 9a4424c293ae40b21968b5118f60862860ff5247 | [
"MIT"
] | null | null | null | src/cogs/ide/dialogs/edit_view.py | osam7a/Jarvide | 9a4424c293ae40b21968b5118f60862860ff5247 | [
"MIT"
] | null | null | null | src/cogs/ide/dialogs/edit_view.py | osam7a/Jarvide | 9a4424c293ae40b21968b5118f60862860ff5247 | [
"MIT"
] | null | null | null | from __future__ import annotations
import disnake
from disnake.ext import commands
from typing import TYPE_CHECKING
from src.utils.utils import EmbedFactory, ExitButton, SaveButton, add_lines, get_info
if TYPE_CHECKING:
from src.utils import File
| 35.871094 | 117 | 0.597299 |
b8493d2511af44620ab30010ea879f211db8a17b | 11,878 | py | Python | modules/administrator.py | Gaeta/Delta | c76e149d0c17e025fe2648964e2512440fc0b4c7 | [
"MIT"
] | 1 | 2021-07-04T10:34:11.000Z | 2021-07-04T10:34:11.000Z | modules/administrator.py | Gaeta/Delta | c76e149d0c17e025fe2648964e2512440fc0b4c7 | [
"MIT"
] | null | null | null | modules/administrator.py | Gaeta/Delta | c76e149d0c17e025fe2648964e2512440fc0b4c7 | [
"MIT"
] | null | null | null | import discord, sqlite3, asyncio, utils, re
from discord.ext import commands
from datetime import datetime
TIME_REGEX = re.compile("(?:(\d{1,5})\s?(h|hours|hrs|hour|hr|s|seconds|secs|sec|second|m|mins|minutes|minute|min|d|days|day))+?")
TIME_DICT = {"h": 3600, "s": 1, "m": 60, "d": 86400}
def setup(bot):
bot.add_cog(AdministratorCommands(bot)) | 46.217899 | 272 | 0.594124 |
b84a30c58e64eb7a73321b156d6da42908f33f1f | 23,650 | py | Python | models/feature_extraction/gcn_resnest.py | hoangtuanvu/rad_chestxray | b29c2bf98ae41d85258b21674e8826847a0cc647 | [
"MIT"
] | 2 | 2020-09-07T00:06:41.000Z | 2020-09-29T07:08:24.000Z | models/feature_extraction/gcn_resnest.py | hoangtuanvu/rad_chestxray | b29c2bf98ae41d85258b21674e8826847a0cc647 | [
"MIT"
] | 7 | 2020-09-25T22:12:53.000Z | 2021-08-25T16:06:24.000Z | models/feature_extraction/gcn_resnest.py | hoangtuanvu/rad_chestxray | b29c2bf98ae41d85258b21674e8826847a0cc647 | [
"MIT"
] | null | null | null | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""ResNet variants"""
import os
import math
import torch
import numpy as np
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
from models.attention_map import SEModule, SpatialCGNL, SAModule
from models.feature_extraction.splat import SplAtConv2d
from models.utils import gen_adj_num, gen_adj
from models.common import conv1x1
_url_format = 'https://hangzh.s3.amazonaws.com/encoding/models/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in
[('528c19ca', 'resnest50'), ('22405ba7', 'resnest101'), ('75117900', 'resnest200'),
('0cc87c48', 'resnest269'), ]}
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for name in
_model_sha256.keys()}
__all__ = ['ResNet', 'Bottleneck']
def gcn_resnest200(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
use_se=cfg.use_se, extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
# model = ResNet(Bottleneck, [3, 24, 36, 3], radix=2, groups=1, bottleneck_width=64,
# deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
# use_se=False, extract_fields='0,1,2,3,4,5', agree_rate=0.5,
# csv_path='D:/Dataset/Vinmec/Noise/train_sss.csv', **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest200'], progress=True),
strict=False)
return model
def gcn_resnest101(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True, avd=True, avd_first=False,
use_se=cfg.use_se, extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest101'], progress=True),
strict=False)
return model
def gcn_resnest50(cfg=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], radix=2, groups=1, bottleneck_width=64, deep_stem=True,
stem_width=32, avg_down=True, avd=True, avd_first=False, use_se=cfg.use_se,
extract_fields=cfg.extract_fields, agree_rate=cfg.agree_rate,
csv_path=cfg.csv_path, **kwargs)
if cfg.pretrained:
model.load_state_dict(
torch.hub.load_state_dict_from_url(resnest_model_urls['resnest50'], progress=True),
strict=False)
return model
def merge_gcn_residual(feature, x, merge_conv):
feature_raw = feature
feature = feature_raw.transpose(1, 2)
feature = feature.transpose(2, 3).contiguous()
feature = feature.view(-1, feature.shape[-1])
reshape_x = x.transpose(0, 1)
feature = torch.matmul(feature, reshape_x)
feature = feature.view(feature_raw.shape[0], feature_raw.shape[2], feature_raw.shape[3], -1)
feature = feature.transpose(2, 3)
feature = feature.transpose(1, 2)
feature = merge_conv(feature)
return feature_raw + feature
if __name__ == "__main__":
import torchsummary
x = torch.randn([2, 3, 224, 224])
model = gcn_resnest200(num_classes=6, word_file='diseases_embeddings.npy')
logits = model(x)
# print(torchsummary.summary(model, input_size=(3, 512, 512), device='cpu'))
print(logits)
# x = torch.randn([2, 2048, 7, 7])
# word = torch.randn([6, 300])
# adj = torch.randn([6, 6]) #
# # gcn = GraphConvolution(in_features=300, out_features=256, bias=True)
# gcn = GraphAttentionLayer(in_features=300, out_features=256, bias=True)
# output = gcn(word, adj)
# print(output)
# feature = torch.randn([2, 128, 56, 56]) # x = torch.randn([11, 128]) # merge_conv = nn.Conv2d(11, 128, kernel_size=1, stride=1, bias=False) # # output = merge_gcn_residual(feature, x, merge_conv) # print(output.size())
| 42.383513 | 229 | 0.602199 |
b84bfe3e24cf3fa88c7b90891f02c84318e2faae | 7,473 | py | Python | nextai_lib/inference.py | jav0927/nextai | 9de0c338a41a3ce0297b95f625290fa814a83344 | [
"Apache-2.0"
] | null | null | null | nextai_lib/inference.py | jav0927/nextai | 9de0c338a41a3ce0297b95f625290fa814a83344 | [
"Apache-2.0"
] | 1 | 2021-09-28T05:33:17.000Z | 2021-09-28T05:33:17.000Z | nextai_lib/inference.py | jav0927/nextai | 9de0c338a41a3ce0297b95f625290fa814a83344 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 02_inference.ipynb (unless otherwise specified).
__all__ = ['device', 'pad_output', 'get_activ_offsets_mns']
# Cell
#from fastai.vision.all import *
from fastai import *
from typing import *
from torch import tensor, Tensor
import torch
import torchvision # Needed to invoke torchvision.ops.mns function
# Cell
# Automatically sets for GPU or CPU environments
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Cell
# Pad tensors so that they have uniform dimentions: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)
def pad_output(l_bb:List, l_scr:List, l_idx:List, no_classes:int):
'''Pad tensors so that they have uniform dimentions: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)
Inputs: l_bb - list of tensors containing individual non-uniform sized bounding boxes
l_scr - list of tensors containing class index values (i.e. 1 - airplane)
l_idx - list of tensors containing class index values (i.e. 1 - airplane)
no_classes - Number of classes, Integer
Outputs: Uniform-sized tensors: bounding box tensor and score tensor with dims: (batch size, no of items in a batch, 4) and (batch size, no of items in a batch, 21)'''
if len([len(img_bb) for img_bb in l_bb]) == 0.:
print(F'Image did not pass the scoring threshold')
return
mx_len = max([len(img_bb) for img_bb in l_bb]) # Calculate maximun lenght of the boxes in the batch
l_b, l_c, l_x, l_cat = [], [], [], []
# Create Bounding Box tensors # zeroed tensor accumulators
for i, ntr in enumerate(zip(l_bb, l_scr, l_idx)):
bbox, cls, idx = ntr[0], ntr[1], ntr[2] # Unpack variables
tsr_len = mx_len - bbox.shape[0] # Calculate the number of zero-based rows to add
m = nn.ConstantPad2d((0, 0, 0, tsr_len), 0.) # Prepare to pad the box tensor with zero entries
l_b.append(m(bbox)) # Add appropriate zero-based box rows and add to list
# Create Category tensors
cat_base = torch.zeros(mx_len-bbox.shape[0], dtype=torch.int32)
img_cat = torch.cat((idx, cat_base), dim=0)
l_cat.append(img_cat)
# Create Score tensors
img_cls = [] # List to construct class vectors
for ix in range(idx.shape[0]): # Construct class vectors of dim(no of classes)
cls_base = torch.zeros(no_classes).to(device) # Base zero-based class vector
cls_base[idx[ix]] = cls[ix] # Add the score in the nth position
img_cls.append(cls_base)
img_stack = torch.stack(img_cls) # Create single tensor per image
img_stack_out = m(img_stack)
l_c.append( img_stack_out ) # Add appropriate zero-based class rows and add to list
return (TensorBBox(torch.stack(l_b,0)), TensorMultiCategory(torch.stack(l_c,0)), TensorMultiCategory(torch.stack(l_cat,0)) )
# Cell
def get_activ_offsets_mns(anchrs:Tensor, activs:Tensor, no_classes:int, threshold:float=0.5):
''' Takes in activations and calculates corresponding anchor box offsets.
It then filters the resulting boxes through MNS
Inputs:
anchrs - Anchors as Tensor
activs - Activations as Tensor
no_classes - Number of classes (categories)
threshold - Coarse filtering. Default = 0.5
Output:
one_batch_boxes, one_batch_scores as Tuple'''
p_bboxes, p_classes = activs # Read p_bboxes: [32, 189,4] Torch.Tensor and p_classes: [32, 189, 21] Torch.Tensor from self.learn.pred
#scores = torch.sigmoid(p_classes) # Calculate the confidence levels, scores, for class predictions [0, 1]
scores = torch.softmax(p_classes, -1) # Calculate the confidence levels, scores, for class predictions [0, 1] - Probabilistic
offset_boxes = activ_decode(p_bboxes, anchrs) # Return anchors + anchor offsets wiith format (batch, No Items in Batch, 4)
# For each item in batch, and for each class in the item, filter the image by passing it through NMS. Keep preds with IOU > thresshold
one_batch_boxes = []; one_batch_scores = []; one_batch_cls_pred = [] # Agregators at the bath level
for i in range(p_classes.shape[0]): # For each image in batch ...
batch_p_boxes = offset_boxes[i] # box preds for the current batch
batch_scores = scores[i] # Keep scores for the current batch
max_scores, cls_idx = torch.max(batch_scores, 1 ) # Keep batch class indexes
bch_th_mask = max_scores > threshold # Threshold mask for batch
bch_keep_boxes = batch_p_boxes[bch_th_mask] # "
bch_keep_scores = batch_scores[bch_th_mask] # "
bch_keep_cls_idx = cls_idx[bch_th_mask]
# Agregators per image in a batch
img_boxes = [] # Bounding boxes per image
img_scores = [] # Scores per image
img_cls_pred = [] # Class predictons per image
for c in range (1,no_classes): # Loop through each class
cls_mask = bch_keep_cls_idx==c # Keep masks for the current class
if cls_mask.sum() == 0: continue # Weed out images with no positive class masks
cls_boxes = bch_keep_boxes[cls_mask] # Keep boxes per image
cls_scores = bch_keep_scores[cls_mask].max(dim=1)[0] # Keep class scores for the current image
nms_keep_idx = torchvision.ops.nms(cls_boxes, cls_scores, iou_threshold=0.5) # Filter images by passing them through NMS
img_boxes += [*cls_boxes[nms_keep_idx]] # Agregate cls_boxes into tensors for all classes
box_stack = torch.stack(img_boxes,0) # Transform individual tensors into a single box tensor
img_scores += [*cls_scores[nms_keep_idx]] # Agregate cls_scores into tensors for all classes
score_stack = torch.stack(img_scores, 0) # Transform individual tensors into a single score tensor
img_cls_pred += [*tensor([c]*len(nms_keep_idx))]
cls_pred_stack = torch.stack(img_cls_pred, 0)
batch_mask = score_stack > threshold # filter final lists tto be greater than threshold
box_stack = box_stack[batch_mask] # "
score_stack = score_stack[batch_mask] # "
cls_pred_stack = cls_pred_stack[batch_mask] # "
if 'box_stack' not in locals(): continue # Failed to find any valid classes
one_batch_boxes.append(box_stack) # Agregate bounding boxes for the batch
one_batch_scores.append(score_stack) # Agregate scores for the batch
one_batch_cls_pred.append(cls_pred_stack)
# Pad individual box and score tensors into uniform-sized box and score tensors of shapes: (batch, no 0f items in batch, 4) and (batch, no 0f items in batch, 21)
one_batch_boxes, one_batch_scores, one_batch_cats = pad_output(one_batch_boxes, one_batch_scores, one_batch_cls_pred, no_classes)
return (one_batch_boxes, one_batch_cats) | 59.784 | 174 | 0.640707 |
b84c1c6e378f4059bee57b13f1d84bcf63b4ae74 | 2,141 | py | Python | code.py | ashweta81/data-wrangling-pandas-code-along-practice | af49250a45c616f46d763990f2321f470d439916 | [
"MIT"
] | null | null | null | code.py | ashweta81/data-wrangling-pandas-code-along-practice | af49250a45c616f46d763990f2321f470d439916 | [
"MIT"
] | null | null | null | code.py | ashweta81/data-wrangling-pandas-code-along-practice | af49250a45c616f46d763990f2321f470d439916 | [
"MIT"
] | null | null | null | # --------------
import pandas as pd
import numpy as np
# Read the data using pandas module.
data=pd.read_csv(path)
# Find the list of unique cities where matches were played
print("The unique cities where matches were played are ", data.city.unique())
print('*'*80)
# Find the columns which contains null values if any ?
print("The columns which contain null values are ", data.columns[data.isnull().any()])
print('*'*80)
# List down top 5 most played venues
print("The top 5 most played venues are", data.venue.value_counts().head(5))
print('*'*80)
# Make a runs count frequency table
print("The frequency table for runs is", data.runs.value_counts())
print('*'*80)
# How many seasons were played and in which year they were played
data['year']=data.date.apply(lambda x : x[:4])
seasons=data.year.unique()
print('The total seasons and years are', seasons)
print('*'*80)
# No. of matches played per season
ss1=data.groupby(['year'])['match_code'].nunique()
print('The total matches played per season are', ss1)
print("*"*80)
# Total runs across the seasons
ss2=data.groupby(['year']).agg({'total':'sum'})
print("Total runs are",ss2)
print("*"*80)
# Teams who have scored more than 200+ runs. Show the top 10 results
w1=data.groupby(['match_code','batting_team']).agg({'total':'sum'}).sort_values(by='total', ascending=False)
w1[w1.total>200].reset_index().head(10)
print("The top 10 results are",w1[w1.total>200].reset_index().head(10))
print("*"*80)
# What are the chances of chasing 200+ target
dt1=data.groupby(['match_code','batting_team','inning'])['total'].sum().reset_index()
dt1.head()
dt1.loc[((dt1.total>200) & (dt1.inning==2)),:].reset_index()
data.match_code.unique().shape[0]
probability=(dt1.loc[((dt1.total>200) & (dt1.inning==2)),:].shape[0])/(data.match_code.unique().shape[0])*100
print("Chances are", probability)
print("*"*80)
# Which team has the highest win count in their respective seasons ?
dt2=data.groupby(['year','winner'])['match_code'].nunique()
dt3=dt2.groupby(level=0,group_keys=False)
dt4=dt3.apply(lambda x: x.sort_values(ascending=False).head(1))
print("The team with the highes win count is", dt4)
| 40.396226 | 109 | 0.712751 |
b84e3b8a7a2a09cb215aab0d692cf00fa2446655 | 794 | py | Python | Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py | disrupted/Trakttv.bundle | 24712216c71f3b22fd58cb5dd89dad5bb798ed60 | [
"RSA-MD"
] | 1,346 | 2015-01-01T14:52:24.000Z | 2022-03-28T12:50:48.000Z | Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 474 | 2015-01-01T10:27:46.000Z | 2022-03-21T12:26:16.000Z | Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 191 | 2015-01-02T18:27:22.000Z | 2022-03-29T10:49:48.000Z | from plugin.scrobbler.core import SessionEngine, SessionHandler
| 31.76 | 91 | 0.61461 |
b84e7cc9d16e3f0b3e8a9ecacf33341e96af47cb | 102 | py | Python | Desafio 46.py | MisaelGuilherme/100_Exercicios_Em_Python | 8c4cdad7e60201abcdd2c4a5646f52aed4e7041e | [
"MIT"
] | null | null | null | Desafio 46.py | MisaelGuilherme/100_Exercicios_Em_Python | 8c4cdad7e60201abcdd2c4a5646f52aed4e7041e | [
"MIT"
] | null | null | null | Desafio 46.py | MisaelGuilherme/100_Exercicios_Em_Python | 8c4cdad7e60201abcdd2c4a5646f52aed4e7041e | [
"MIT"
] | null | null | null | print('====== DESAFIO 46 ======')
import time
for c in range(10,-1,-1):
time.sleep(1)
print(c) | 20.4 | 33 | 0.539216 |
b850754dddf9940614a7ecc4de4bab7929800a85 | 4,329 | py | Python | samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py | aguzev/sql-server-samples | 498c47f2ac8e45d052ed61878a2ce11eb32394bf | [
"MIT"
] | 4,474 | 2019-05-06T23:05:37.000Z | 2022-03-31T23:30:31.000Z | samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py | aguzev/sql-server-samples | 498c47f2ac8e45d052ed61878a2ce11eb32394bf | [
"MIT"
] | 256 | 2019-05-07T07:07:19.000Z | 2022-03-29T17:11:41.000Z | samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py | aguzev/sql-server-samples | 498c47f2ac8e45d052ed61878a2ce11eb32394bf | [
"MIT"
] | 5,075 | 2019-05-07T00:07:21.000Z | 2022-03-31T23:31:15.000Z | # Placeholder for adding logic specific to application
# and backend key store.
#
import os
import json
import sys
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
# Append the current application path to sys path to be able to resolve local modules.
#
sys.path.append('.')
sys.path.append('./model')
from constants import ConfigurationConstants, Operations, CryptoConstants
import utils
from json_objects import EncryptDecryptRequest, JsonWebKeyResponse, EncryptDecryptResponse
def decrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for decrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
decrypted_payload = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(decrypted_payload.plaintext)
return response
def encrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for encrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
encrypted_payload = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(encrypted_payload.ciphertext)
return response
def get_akv_key(json_key_attributes_dict, credential):
"""
Gets the AKV key object.
"""
if "vault_url" in json_key_attributes_dict:
vault_url = json_key_attributes_dict["vault_url"]
else:
raise KeyError('vault_url was expected in the parameters but not found')
if "keyname" in json_key_attributes_dict:
key_name = json_key_attributes_dict["keyname"]
else:
raise KeyError('keyname was expected in the parameters but not found')
if "keyversion" in json_key_attributes_dict:
key_version = json_key_attributes_dict["keyversion"]
else:
raise KeyError('keyversion was expected in the parameters but not found')
key_client = KeyClient(vault_url=vault_url, credential=credential)
key_vault_key = key_client.get_key(key_name, key_version)
return key_vault_key
def set_env(json_key_attributes_dict, pin):
"""
Sets the environment variables for the MS identity credential lookup to work.
"""
if "azure_client_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_client_id"]
else:
raise KeyError('azure_client_id was expected in the parameters but not found')
if "azure_tenant_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_tenant_id"]
else:
raise KeyError('azure_tenant_id was expected in the parameters but not found')
os.environ["AZURE_CLIENT_ID"]=json_key_attributes_dict["azure_client_id"]
os.environ["AZURE_TENANT_ID"]=json_key_attributes_dict["azure_tenant_id"]
os.environ["AZURE_CLIENT_SECRET"]=pin
| 40.839623 | 90 | 0.769924 |
b85115da00994686b76087d8e81c839619f86fa0 | 338 | py | Python | scss/setup.py | Jawbone/pyScss | b1f483c253ec4aaceb3b8d4d630ca5528590e9b8 | [
"MIT"
] | null | null | null | scss/setup.py | Jawbone/pyScss | b1f483c253ec4aaceb3b8d4d630ca5528590e9b8 | [
"MIT"
] | null | null | null | scss/setup.py | Jawbone/pyScss | b1f483c253ec4aaceb3b8d4d630ca5528590e9b8 | [
"MIT"
] | null | null | null | from distutils.core import setup, Extension
setup(name='jawbonePyScss',
version='1.1.8',
description='jawbonePyScss',
ext_modules=[
Extension(
'_scss',
sources=['src/_scss.c', 'src/block_locator.c', 'src/scanner.c'],
libraries=['pcre'],
optional=True
)
]
)
| 22.533333 | 76 | 0.553254 |
b851d6b0de112cf236b222d90e4d36785001355b | 18,301 | py | Python | spanner_orm/tests/query_test.py | MetaOfX/python-spanner-orm | 59063eb6989b845d1658118a7a0282eede19d8bf | [
"Apache-2.0"
] | 37 | 2018-11-01T18:29:03.000Z | 2022-03-30T17:24:39.000Z | spanner_orm/tests/query_test.py | MetaOfX/python-spanner-orm | 59063eb6989b845d1658118a7a0282eede19d8bf | [
"Apache-2.0"
] | 48 | 2018-11-05T18:51:23.000Z | 2021-12-17T20:28:11.000Z | spanner_orm/tests/query_test.py | MetaOfX/python-spanner-orm | 59063eb6989b845d1658118a7a0282eede19d8bf | [
"Apache-2.0"
] | 19 | 2019-05-04T06:05:31.000Z | 2021-12-17T20:52:53.000Z | # python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import unittest
from unittest import mock
from absl.testing import parameterized
from spanner_orm import condition
from spanner_orm import error
from spanner_orm import field
from spanner_orm import query
from spanner_orm.tests import models
from google.cloud.spanner_v1.proto import type_pb2
def select(self, *conditions):
return query.SelectQuery(models.UnittestModel, list(conditions))
def test_query_limit(self):
key, value = 'limit0', 2
select_query = self.select(condition.limit(value))
self.assertEndsWith(select_query.sql(), ' LIMIT @{}'.format(key))
self.assertEqual(select_query.parameters(), {key: value})
self.assertEqual(select_query.types(), {key: field.Integer.grpc_type()})
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'LIMIT')
def test_query_limit_offset(self):
limit_key, limit = 'limit0', 2
offset_key, offset = 'offset0', 5
select_query = self.select(condition.limit(limit, offset=offset))
self.assertEndsWith(select_query.sql(),
' LIMIT @{} OFFSET @{}'.format(limit_key, offset_key))
self.assertEqual(select_query.parameters(), {
limit_key: limit,
offset_key: offset
})
self.assertEqual(select_query.types(), {
limit_key: field.Integer.grpc_type(),
offset_key: field.Integer.grpc_type()
})
def test_query_order_by(self):
order = ('int_', condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), ' ORDER BY table.int_ DESC')
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'ORDER BY')
def test_query_order_by_with_object(self):
order = (models.UnittestModel.int_, condition.OrderType.DESC)
select_query = self.select(condition.order_by(order))
self.assertEndsWith(select_query.sql(), ' ORDER BY table.int_ DESC')
self.assertEmpty(select_query.parameters())
self.assertEmpty(select_query.types())
select_query = self.select()
self.assertNotRegex(select_query.sql(), 'ORDER BY')
def test_query_combines_properly(self):
select_query = self.select(
condition.equal_to('int_', 5),
condition.not_equal_to('string_array', ['foo', 'bar']),
condition.limit(2),
condition.order_by(('string', condition.OrderType.DESC)))
expected_sql = ('WHERE table.int_ = @int_0 AND table.string_array != '
'@string_array1 ORDER BY table.string DESC LIMIT @limit2')
self.assertEndsWith(select_query.sql(), expected_sql)
def test_only_one_limit_allowed(self):
with self.assertRaises(error.SpannerError):
self.select(condition.limit(2), condition.limit(2))
def test_force_index(self):
select_query = self.select(condition.force_index('test_index'))
expected_sql = 'FROM table@{FORCE_INDEX=test_index}'
self.assertEndsWith(select_query.sql(), expected_sql)
def test_force_index_with_object(self):
select_query = self.select(
condition.force_index(models.UnittestModel.test_index))
expected_sql = 'FROM table@{FORCE_INDEX=test_index}'
self.assertEndsWith(select_query.sql(), expected_sql)
def includes(self, relation, *conditions, foreign_key_relation=False):
include_condition = condition.includes(relation, list(conditions),
foreign_key_relation)
return query.SelectQuery(
models.ForeignKeyTestModel
if foreign_key_relation else models.RelationshipTestModel,
[include_condition],
)
def test_includes_subconditions_query(self):
select_query = self.includes('parents', condition.equal_to('key', 'value'))
expected_sql = (
'WHERE SmallTestModel.key = RelationshipTestModel.parent_key '
'AND SmallTestModel.key = @key0')
self.assertRegex(select_query.sql(), expected_sql)
def includes_result(self, related=1):
child = {'parent_key': 'parent_key', 'child_key': 'child'}
result = [child[name] for name in models.RelationshipTestModel.columns]
parent = {'key': 'key', 'value_1': 'value_1', 'value_2': None}
parents = []
for _ in range(related):
parents.append([parent[name] for name in models.SmallTestModel.columns])
result.append(parents)
return child, parent, [result]
def fk_includes_result(self, related=1):
child = {
'referencing_key_1': 'parent_key',
'referencing_key_2': 'child',
'referencing_key_3': 'child',
'self_referencing_key': 'child'
}
result = [child[name] for name in models.ForeignKeyTestModel.columns]
parent = {'key': 'key', 'value_1': 'value_1', 'value_2': None}
parents = []
for _ in range(related):
parents.append([parent[name] for name in models.SmallTestModel.columns])
result.append(parents)
return child, parent, [result]
def test_includes_subcondition_result(self):
select_query = self.includes('parents', condition.equal_to('key', 'value'))
child_values, parent_values, rows = self.includes_result(related=2)
result = select_query.process_results(rows)[0]
self.assertLen(result.parents, 2)
for name, value in child_values.items():
self.assertEqual(getattr(result, name), value)
for name, value in parent_values.items():
self.assertEqual(getattr(result.parents[0], name), value)
def test_or(self):
condition_1 = condition.equal_to('int_', 1)
condition_2 = condition.equal_to('int_', 2)
select_query = self.select(condition.or_([condition_1], [condition_2]))
expected_sql = '((table.int_ = @int_0) OR (table.int_ = @int_1))'
self.assertEndsWith(select_query.sql(), expected_sql)
self.assertEqual(select_query.parameters(), {'int_0': 1, 'int_1': 2})
self.assertEqual(select_query.types(), {
'int_0': field.Integer.grpc_type(),
'int_1': field.Integer.grpc_type()
})
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
| 38.855626 | 112 | 0.674335 |
b8521a1784e7669f76ae670720d2483ecddff419 | 518 | py | Python | leetcode/1859_sorting_the_sentence.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | 3 | 2018-05-10T09:56:49.000Z | 2020-11-07T18:09:42.000Z | leetcode/1859_sorting_the_sentence.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | leetcode/1859_sorting_the_sentence.py | jacquerie/leetcode | a05e6b832eb0e0740aaff7b2eb3109038ad404bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == '__main__':
solution = Solution()
assert 'This is a sentence' == solution.sortSentence('is2 sentence4 This1 a3')
assert 'Me Myself and I' == solution.sortSentence('Myself2 Me1 I4 and3')
| 25.9 | 82 | 0.579151 |
b8522b1cda4c464e1d7c573371d89f13b40ae37b | 89 | py | Python | todoapi/apps.py | Faysa1/Gestion-Tickets-Taches | eeba92df59c3217d15b02a5bb1ed3c6e673537a4 | [
"Apache-2.0"
] | 51 | 2018-12-12T20:18:31.000Z | 2022-03-11T20:23:35.000Z | todoapi/apps.py | Faysa1/Gestion-Tickets-Taches | eeba92df59c3217d15b02a5bb1ed3c6e673537a4 | [
"Apache-2.0"
] | 11 | 2018-12-17T08:48:07.000Z | 2022-03-02T02:54:38.000Z | todoapi/apps.py | Faysa1/Gestion-Tickets-Taches | eeba92df59c3217d15b02a5bb1ed3c6e673537a4 | [
"Apache-2.0"
] | 29 | 2018-12-12T20:19:00.000Z | 2022-01-18T12:33:21.000Z | from django.apps import AppConfig
| 14.833333 | 33 | 0.752809 |
b85283b049e0e58e8a7c62f87369d905b8440e5f | 3,101 | py | Python | src/flagon/backends/redis_backend.py | ashcrow/flagon | 50e6aa96854468a89399ef08573e4f814a002d26 | [
"MIT"
] | 18 | 2015-08-27T03:49:42.000Z | 2021-05-12T21:48:17.000Z | src/flagon/backends/redis_backend.py | ashcrow/flagon | 50e6aa96854468a89399ef08573e4f814a002d26 | [
"MIT"
] | 2 | 2016-07-18T13:48:46.000Z | 2017-05-20T15:56:03.000Z | src/flagon/backends/redis_backend.py | ashcrow/flagon | 50e6aa96854468a89399ef08573e4f814a002d26 | [
"MIT"
] | 5 | 2015-09-20T08:46:01.000Z | 2021-06-10T03:41:04.000Z | # The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Redis backend.
"""
import redis
from flagon import errors
from flagon.backends import Backend
| 32.642105 | 78 | 0.639471 |
b8578eebc5523ec0f810b0739d30b92505082a9a | 2,365 | py | Python | 6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py | dmNadim/Numerical-Methods | 2c74312ea4efddd7db65483fef02fea710963dcf | [
"MIT"
] | null | null | null | 6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py | dmNadim/Numerical-Methods | 2c74312ea4efddd7db65483fef02fea710963dcf | [
"MIT"
] | null | null | null | 6. Ordinary Differential Equations/4. a. Higher Order Ordinary Differential Equation using RK4.py | dmNadim/Numerical-Methods | 2c74312ea4efddd7db65483fef02fea710963dcf | [
"MIT"
] | null | null | null | from math import sin, cos, pi
f = lambda x: 9*pi*cos(x) + 7*sin(x) + 4*x - 5*x*cos(x) # Analytical Solution
df = lambda x: -9*pi*sin(x) + 7*cos(x) + 4 - 5*(cos(x)-x*sin(x))
dy = lambda x,y,u: u # 1st Derivative, y' = u
du = lambda x,y,u: 4*x + 10*sin(x) - y # 2nd Derivative, u' = 4x+10sin(x)-y
x = pi # Lower limit, [
xn = 2*pi # Upper limit, 2]
y = 0 # Initial condition, y() = 0
u = 2 # Initial condition, u() = 2
h = 0.5 # Width of each division, step size
# h = 0.1 # Smaller step size gives less error
n = int((xn-x)/h) # Number of divisions of the domain
print('x \t\ty(RK4) \t\ty\'(RK4) \ty(Exact) \ty\'(Exact)') # Header of Output
print('%f \t%f \t%f \t%f \t%f' % (x, y, u, f(x), df(x))) # Initial x and y
for i in range(n):
L1 = h * du(x,y,u)
K1 = h * dy(x,y,u)
L2 = h * du(x + h/2, y + K1/2, u + L1/2)
K2 = h * dy(x + h/2, y + K1/2, u + L1/2)
L3 = h * du(x + h/2, y + K2/2, u + L2/2)
K3 = h * dy(x + h/2, y + K2/2, u + L2/2)
L4 = h * du(x + h, y + K3, u + L3)
K4 = h * dy(x + h, y + K3, u + L3)
u += 1/6*(L1 + 2*L2 + 2*L3 + L4) # u(x+h) = u(x) + 1/6(L1+2L2+2L3+L4)
y += 1/6*(K1 + 2*K2 + 2*K3 + K4) # y(x+h) = y(x) + 1/6(K1+2K2+2K3+K4)
x += h # x for next step, x = x + h
print('%f \t%f \t%f \t%f \t%f' % (x, y, u, f(x), df(x)))
"""
2nd order ODE y'' = f(x,y,y') should be divided into two first order ODE's
y' = u and u' = f(x,y,u)
The two equations are solved simultaneously using RK4
L1 = h u'(x,y,u)
K1 = h y'(x,y,u)
L2 = h u'(x + h/2, y + K1/2, u + L1/2)
K2 = h y'(x + h/2, y + K1/2, u + L1/2)
L3 = h u'(x + h/2, y + K2/2, u + L2/2)
K3 = h y'(x + h/2, y + K2/2, u + L2/2)
L4 = h u'(x + h, y + K3, u + L3)
K4 = h y'(x + h, y + K3, u + L3)
u(x+h) = u(x) + 1/6 (L1 + 2 L2 + 2 L3 + L4)
y(x+h) = y(x) + 1/6 (K1 + 2 K2 + 2 K3 + K4)
The initial condition is the value of y(x) at initial domain x
Find the numerical solution of the following differential equation
over the domain [,2]: y''+y = 4x+10sin(x), y() = 0, y'() = 2
y' = u, y() = 0
u' = 4x+10sin(x)-y, u() = 2
Analytical Solution: y = 9 cos(x) + 7sin(x) + 4x - 5x cos(x)
"""
| 36.384615 | 78 | 0.4537 |
b85874411e43ac8ab8f40e52f253f84cc102e824 | 1,029 | py | Python | scripts/image_navigation.py | habibmuhammadthariq/iq_gnc | 06752997c103b48db48efb2814923fdc3a0f74b8 | [
"MIT"
] | null | null | null | scripts/image_navigation.py | habibmuhammadthariq/iq_gnc | 06752997c103b48db48efb2814923fdc3a0f74b8 | [
"MIT"
] | null | null | null | scripts/image_navigation.py | habibmuhammadthariq/iq_gnc | 06752997c103b48db48efb2814923fdc3a0f74b8 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#ros library
#import rospy
#import the API
#from iq_gnc.py_gnc_functions import *
#print the colours
#from iq_gnc.PrintColours import *
# Importing Point message from package geometry_msgs.
#from geometry_msgs.msg import Point
#import opencv library
import cv2
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#red color
low_red = np.array([161, 155, 84])
high_red = np_array([179, 255, 255])
#blue color
#low_blue = np.array([94, 80, 2])
#high_blue = np.array([126, 255, 255])
#green color
#low_green = np.array([25, 52, 72])
#high_green = np.array([102, 255, 255])
#every color except white
#low = np.array([0, 42, 0])
#high = np.array([179, 255, 255])
red_mask = cv2.inRange(hsv, low_red, high_red)
red = cv2.bitwise_and(image, image, mask=red_mask)
cv2.imshow("Original Image", image)
cv2.imshow("Red Filter", red)
key = cv2.waitKey(1)
if key == 27:
break
| 25.725 | 54 | 0.651118 |
b8588a227beffd14bd3ab5788c323affed1dda08 | 1,083 | py | Python | switchmng/wsgi.py | AnsgarKlein/switchmng | d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d | [
"MIT"
] | null | null | null | switchmng/wsgi.py | AnsgarKlein/switchmng | d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d | [
"MIT"
] | null | null | null | switchmng/wsgi.py | AnsgarKlein/switchmng | d86ae2dc40ef70f43fec8e3adf49ae5fd796a01d | [
"MIT"
] | null | null | null | from switchmng import config
from switchmng.schema.base import Base
from switchmng.database import DatabaseConnection
from switchmng.routes import create_app
def app(*args, **kwargs):
"""
Entry point for wsgi server like `gunicorn` serving this
application.
Parse all command line arguments, initialize application then
start this application.
"""
help_str = 'Possible parameters:\n'\
+ ' config=FILE Use FILE for configuration file'
# Parse gunicorn parameters, convert them to normal sys.argv style
# parameters and pass them to parsing function.
params = []
for k in kwargs:
if k == 'config':
params.append('--config')
params.append(kwargs[k])
else:
print('Unknown parameter "{}"\n\n{}\n'.format(k, help_str))
return None
# Parse given arguments
config.parse_arguments(params)
# Initialize the database
db = DatabaseConnection(config.DB_TYPE, config.DB_PATH, config.DB_VERBOSE, Base)
# Return wsgi app
return create_app(db)
| 29.27027 | 84 | 0.665743 |
b8591745507d3ac646b22cef27786c56c597a729 | 1,598 | py | Python | records_mover/db/postgres/copy_options/date_output_style.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | records_mover/db/postgres/copy_options/date_output_style.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | records_mover/db/postgres/copy_options/date_output_style.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | from records_mover.utils import quiet_remove
from records_mover.records.delimited import cant_handle_hint, ValidatedRecordsHints
from typing import Set, Tuple, Optional
from .types import DateOrderStyle, DateOutputStyle
| 39.95 | 83 | 0.70025 |
b8596ffa290b85166791b3474bb6337caf557e75 | 6,239 | py | Python | tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | null | null | null | tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | null | null | null | tacker/tests/unit/vnfm/infra_drivers/openstack/test_vdu.py | takahashi-tsc/tacker | a0ae01a13dcc51bb374060adcbb4fd484ab37156 | [
"Apache-2.0"
] | 1 | 2020-11-16T02:14:35.000Z | 2020-11-16T02:14:35.000Z | # Copyright 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker import context
from tacker.db.common_services import common_services_db_plugin
from tacker.objects import heal_vnf_request
from tacker.plugins.common import constants
from tacker.tests.unit import base
from tacker.vnfm.infra_drivers.openstack import vdu
vnf_dict = {
'attributes': {
'heat_template': {
'outputs': {
'mgmt_ip-VDU1': {
'value': {
'get_attr': [
'CP1', 'fixed_ips', 0, 'ip_address']
}
}
},
'description': 'Demo example\n',
'parameters': {},
'resources': {
'VDU1': {
'type': 'OS::Nova::Server',
'properties': {
'user_data_format': 'SOFTWARE_CONFIG',
'availability_zone': 'nova',
'image': 'cirros-0.4.0-x86_64-disk',
'config_drive': False,
'flavor': {'get_resource': 'VDU1_flavor'},
'networks': [{'port': {'get_resource': 'CP1'}}]
}
},
'CP1': {
'type': 'OS::Neutron::Port',
'properties': {
'port_security_enabled': False,
'network': 'net_mgmt'
}
},
'VDU1_flavor': {
'type': 'OS::Nova::Flavor',
'properties': {'vcpus': 1, 'disk': 1, 'ram': 512}
}
}
}
},
'status': 'ACTIVE',
'vnfd_id': '576acf48-b9df-491d-a57c-342de660ec78',
'tenant_id': '13d2ca8de70d48b2a2e0dbac2c327c0b',
'vim_id': '3f41faa7-5630-47d2-9d4a-1216953c8887',
'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8',
'placement_attr': {'vim_name': 'openstack-vim'},
'id': 'a27fc58e-66ae-4031-bba4-efede318c60b',
'name': 'vnf_create_1'
}
| 38.042683 | 79 | 0.599295 |
b85adde254fd21cc8c4987b399dbf5487b008f43 | 445 | py | Python | tests/test_example.py | jlane9/mockerena | a3fd1bd39af6269dc96846967b4bba47759bab41 | [
"MIT"
] | 1 | 2019-09-10T05:12:38.000Z | 2019-09-10T05:12:38.000Z | tests/test_example.py | jlane9/mockerena | a3fd1bd39af6269dc96846967b4bba47759bab41 | [
"MIT"
] | 10 | 2019-09-10T16:14:35.000Z | 2019-12-19T17:13:51.000Z | tests/test_example.py | jlane9/mockerena | a3fd1bd39af6269dc96846967b4bba47759bab41 | [
"MIT"
] | 2 | 2019-09-10T05:11:58.000Z | 2020-04-29T17:59:47.000Z | """test_example
.. codeauthor:: John Lane <john.lane93@gmail.com>
"""
from flask import url_for
from eve import Eve
import pytest
| 19.347826 | 67 | 0.698876 |
b85d1ecfbfe5440d3438acef2b9c37a3da7e6e97 | 1,243 | py | Python | tests/TestPoissonSpikeGeneration.py | VadimLopatkin/AtlasSnnController | 25c87bd7c80cbb5a1163311b2fd87fad5344f978 | [
"Apache-2.0"
] | 2 | 2016-05-22T12:30:41.000Z | 2016-06-03T06:05:21.000Z | tests/TestPoissonSpikeGeneration.py | VadimLopatkin/AtlasSnnController | 25c87bd7c80cbb5a1163311b2fd87fad5344f978 | [
"Apache-2.0"
] | null | null | null | tests/TestPoissonSpikeGeneration.py | VadimLopatkin/AtlasSnnController | 25c87bd7c80cbb5a1163311b2fd87fad5344f978 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from brian2 import *
if __name__ == '__main__':
unittest.main() | 33.594595 | 74 | 0.697506 |
b85d56ed4c33e772f43301cfd59b3662ccd3560a | 6,100 | py | Python | stream.py | Abhishek-Aditya-bs/Streaming-Spark-For-Machine-Learning | 76f9c97e66d6171bc83d1183fadc30bd492422a7 | [
"MIT"
] | 1 | 2021-12-10T13:14:53.000Z | 2021-12-10T13:14:53.000Z | stream.py | iVishalr/SSML-spark-streaming-for-machine-learning | ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f | [
"MIT"
] | null | null | null | stream.py | iVishalr/SSML-spark-streaming-for-machine-learning | ba95a7d2d6bb15bacfbbf5b3c95317310b36d54f | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import time
import json
import pickle
import socket
import argparse
import numpy as np
from tqdm import tqdm
parser = argparse.ArgumentParser(
description='Streams a file to a Spark Streaming Context')
parser.add_argument('--file', '-f', help='File to stream', required=False,type=str, default="cifar")
parser.add_argument('--batch-size', '-b', help='Batch size',required=False, type=int, default=100)
parser.add_argument('--endless', '-e', help='Enable endless stream',required=False, type=bool, default=False)
parser.add_argument('--split','-s', help="training or test split", required=False, type=str, default='train')
parser.add_argument('--sleep','-t', help="streaming interval", required=False, type=int, default=3)
TCP_IP = "localhost"
TCP_PORT = 6100
if __name__ == '__main__':
args = parser.parse_args()
print(args)
input_file = args.file
batch_size = args.batch_size
endless = args.endless
sleep_time = args.sleep
train_test_split = args.split
dataset = Dataset()
tcp_connection, _ = dataset.connectTCP()
if input_file == "cifar":
_function = dataset.streamCIFARDataset
if endless:
while True:
_function(tcp_connection, input_file)
else:
_function(tcp_connection, input_file)
tcp_connection.close() | 40.131579 | 122 | 0.59623 |
b85e1207d6e09dc9d3b5821470f14d0eed8e2190 | 394 | py | Python | subcontent/backup/python3_closure_nonlocal.py | fingerkc/fingerkc.github.io | 0bfe5163ea28be3747756c8b6be64ad4f09b2fbf | [
"MIT"
] | 2 | 2019-06-13T07:22:22.000Z | 2019-11-23T03:55:21.000Z | subcontent/backup/python3_closure_nonlocal.py | fingerkc/fingerkc.github.io | 0bfe5163ea28be3747756c8b6be64ad4f09b2fbf | [
"MIT"
] | 1 | 2019-12-15T04:10:59.000Z | 2019-12-15T04:10:59.000Z | subcontent/backup/python3_closure_nonlocal.py | fingerkc/fingerkc.github.io | 0bfe5163ea28be3747756c8b6be64ad4f09b2fbf | [
"MIT"
] | 1 | 2019-06-24T08:17:13.000Z | 2019-06-24T08:17:13.000Z | #!/usr/bin/python3
##python3 nonlocal
#
#(closure)
#clo_B
#nonlocal
| 15.153846 | 50 | 0.670051 |
b85e66013be32836e47b0a35176f156e7e93f2e2 | 352 | py | Python | utils/tracker.py | emarche/Fashion-MNIST | f8183e33ab7c3df673a60de3b16f2c4c979b89bb | [
"MIT"
] | null | null | null | utils/tracker.py | emarche/Fashion-MNIST | f8183e33ab7c3df673a60de3b16f2c4c979b89bb | [
"MIT"
] | null | null | null | utils/tracker.py | emarche/Fashion-MNIST | f8183e33ab7c3df673a60de3b16f2c4c979b89bb | [
"MIT"
] | null | null | null | import os
import numpy as np | 29.333333 | 76 | 0.661932 |
b86107cdd1d04d347d396fb2227d46e8eb33bf64 | 2,663 | py | Python | kittycad/models/cluster.py | KittyCAD/kittycad.py | 7f7460d366dbd55fce50e5faa4a032b62e4baae4 | [
"MIT"
] | 1 | 2022-02-06T05:07:25.000Z | 2022-02-06T05:07:25.000Z | kittycad/models/cluster.py | KittyCAD/kittycad.py | 7f7460d366dbd55fce50e5faa4a032b62e4baae4 | [
"MIT"
] | 7 | 2022-02-04T11:29:25.000Z | 2022-03-07T01:37:26.000Z | kittycad/models/cluster.py | KittyCAD/kittycad.py | 7f7460d366dbd55fce50e5faa4a032b62e4baae4 | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Cluster")
| 28.945652 | 77 | 0.605332 |
b8630346a106bda3978051caf8a5f0528b3d18fe | 4,287 | py | Python | Crypto.py | akshatsri89/Cryptogram | 51ea1e658ecf82ea922a967299814e812da73c4e | [
"Apache-2.0"
] | 1 | 2021-08-10T14:09:34.000Z | 2021-08-10T14:09:34.000Z | Crypto.py | akshatsri89/Cryptogram | 51ea1e658ecf82ea922a967299814e812da73c4e | [
"Apache-2.0"
] | null | null | null | Crypto.py | akshatsri89/Cryptogram | 51ea1e658ecf82ea922a967299814e812da73c4e | [
"Apache-2.0"
] | null | null | null | # import tkinter module
from tkinter import *
# import other necessery modules
import random
# Vigenre cipher for encryption and decryption
import base64
# creating root object
root = Tk()
# defining size of window
root.geometry("1200x4000")
# setting up the title of window
root.title("Message Encrypter and Decrypter")
Tops = Frame(root, width=1600, relief=SUNKEN)
Tops.pack(side=TOP)
f1 = Frame(root, width=800, relief=SUNKEN)
f1.pack(side=LEFT)
# ==============================================
lblInfo = Label(Tops, font=('helvetica', 40, 'bold', 'underline'),
text="SECRET MESSAGING",
fg="Black", bd=10, anchor='w')
lblInfo.grid(row=0, column=0)
# Initializing variables
Msg = StringVar()
key = StringVar()
mode = StringVar()
Result = StringVar()
# labels for the message
lblMsg = Label(f1, font=('arial', 16, 'bold'),
text="MESSAGE", bd=16, anchor="w")
lblMsg.grid(row=1, column=0)
# Entry box for the message
txtMsg = Entry(f1, font=('arial', 16, 'bold'),
textvariable=Msg, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtMsg.grid(row=1, column=1)
# labels for the key
lblkey = Label(f1, font=('arial', 16, 'bold'),
text="KEY (Only Integer)", bd=16, anchor="w")
lblkey.grid(row=2, column=0)
# Entry box for the key
txtkey = Entry(f1, font=('arial', 16, 'bold'),
textvariable=key, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtkey.grid(row=2, column=1)
# labels for the mode
lblmode = Label(f1, font=('arial', 16, 'bold'),
text="MODE(e for encrypt, d for decrypt)",
bd=16, anchor="w")
lblmode.grid(row=3, column=0)
# Entry box for the mode
txtmode = Entry(f1, font=('arial', 16, 'bold'),
textvariable=mode, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtmode.grid(row=3, column=1)
# labels for the result
lblResult = Label(f1, font=('arial', 16, 'bold'),
text="The Result-", bd=16, anchor="w")
lblResult.grid(row=2, column=2)
# Entry box for the result
txtResult = Entry(f1, font=('arial', 16, 'bold'),
textvariable=Result, bd=10, insertwidth=4,
bg="powder blue", justify='right')
txtResult.grid(row=2, column=3)
# Vigenre cipher
# Function to encode
# Function to decode
# exit function
# Function to reset the window
# Show message button
btnTotal = Button(f1, padx=16, pady=8, bd=16, fg="black",
font=('arial', 16, 'bold'), width=10,
text="Show Message", bg="yellow",
command=Results).grid(row=7, column=1)
# Reset button
btnReset = Button(f1, padx=16, pady=8, bd=16,
fg="black", font=('arial', 16, 'bold'),
width=10, text="Reset", bg="green",
command=Reset).grid(row=7, column=2)
# Exit button
btnExit = Button(f1, padx=16, pady=8, bd=16,
fg="black", font=('arial', 16, 'bold'),
width=10, text="Exit", bg="red",
command=qExit).grid(row=7, column=3)
# keeps window alive
root.mainloop()
| 24.497143 | 68 | 0.54397 |
b865512ce604a1054ccd890643255a1593208d7a | 224 | py | Python | bootstrap_rmsf/__init__.py | jeeberhardt/bootstrap_rmsf | 1487251ffde91d34b7609aec147c0ff99fc7cded | [
"MIT"
] | 1 | 2021-08-06T02:31:32.000Z | 2021-08-06T02:31:32.000Z | bootstrap_rmsf/__init__.py | jeeberhardt/bootstrap_rmsf | 1487251ffde91d34b7609aec147c0ff99fc7cded | [
"MIT"
] | null | null | null | bootstrap_rmsf/__init__.py | jeeberhardt/bootstrap_rmsf | 1487251ffde91d34b7609aec147c0ff99fc7cded | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Jrme Eberhardt 2018
# Bootstrap RMSF
# Author: Jrme Eberhardt <qksonoe@gmail.com>
#
# License: MIT
from bootstrap_rmsf import Bootstrap_RMSF
from utils import plot_rmsf
| 18.666667 | 46 | 0.736607 |
b8658b22f0fd4c1d9dcc67d9f35f9aa1c9580dfe | 1,221 | py | Python | fixture/application.py | OSavchik/python_training | 8e532c9f0da99e5f342467dd7bcc3a43e667daf6 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | OSavchik/python_training | 8e532c9f0da99e5f342467dd7bcc3a43e667daf6 | [
"Apache-2.0"
] | null | null | null | fixture/application.py | OSavchik/python_training | 8e532c9f0da99e5f342467dd7bcc3a43e667daf6 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
| 24.918367 | 65 | 0.580672 |
b868a9af47b1de35f84902480574280915282a7c | 7,601 | py | Python | data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py | supernord/tools-iuc | 9a0c41967765d120a8fc519c0c7f09cbe3a6efbe | [
"MIT"
] | 142 | 2015-03-13T18:08:34.000Z | 2022-03-30T23:52:34.000Z | data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py | mtekman/tools-iuc | 95f1ae4ed1cdd56114df76d215f9e1ed549aa4c5 | [
"MIT"
] | 3,402 | 2015-01-05T18:04:20.000Z | 2022-03-30T22:09:36.000Z | data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py | willemdek11/tools-iuc | dc0a0cf275168c2a88ee3dc47652dd7ca1137871 | [
"MIT"
] | 438 | 2015-01-07T20:33:59.000Z | 2022-03-30T04:39:18.000Z | #!/usr/bin/env python
# Dave B.
# Uses fasta sorting functions written by Dan Blankenberg.
import json
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "fasta_indexes"
if __name__ == "__main__":
main()
| 40.865591 | 157 | 0.675043 |
b86adccb9d42d87933b32bb27aaf25b01696f8a9 | 818 | py | Python | django_for_startups/django_customizations/drf_customizations.py | Alex3917/django_for_startups | 9dda54f5777247f7367a963d668f25e797c9adf1 | [
"MIT"
] | 102 | 2021-02-28T00:58:36.000Z | 2022-03-30T09:29:34.000Z | django_for_startups/django_customizations/drf_customizations.py | Alex3917/django_for_startups | 9dda54f5777247f7367a963d668f25e797c9adf1 | [
"MIT"
] | 1 | 2021-07-11T18:45:29.000Z | 2021-07-11T18:45:29.000Z | django_for_startups/django_customizations/drf_customizations.py | Alex3917/django_for_startups | 9dda54f5777247f7367a963d668f25e797c9adf1 | [
"MIT"
] | 16 | 2021-06-23T18:34:46.000Z | 2022-03-30T09:27:34.000Z | # Standard Library imports
# Core Django imports
# Third-party imports
from rest_framework import permissions
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
# App imports
| 24.787879 | 94 | 0.734719 |
b86ccfc144647099cbf5ac1e80b91ec536893766 | 171,517 | py | Python | python/mapCells.py | claraya/meTRN | a4e4911b26a295e22d7309d5feda026db3325885 | [
"MIT"
] | 2 | 2019-11-18T22:54:13.000Z | 2019-11-18T22:55:18.000Z | python/mapCells.py | claraya/meTRN | a4e4911b26a295e22d7309d5feda026db3325885 | [
"MIT"
] | null | null | null | python/mapCells.py | claraya/meTRN | a4e4911b26a295e22d7309d5feda026db3325885 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# perform cellular-resolution expression analyses!
import sys
import time
import optparse
import general
import hyper
import numpy
import math
import pickle
import pdb
import metrn
import modencode
import itertools
import os
import re
import datetime
import calendar
#import simplejson as json
from scipy.stats.stats import pearsonr
from runner import *
from scipy import stats
from network import Network
from network import export
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define functions of internal use """
""" define a function to recover cells in a time range """
""" define a function to construct a cell-parent relationships, and pedigree cell list """
""" define a function to construct a cell-parent relationships, and pedigree cell list """
""" define a function to generate the underlying tree of a given parent """
""" define a function to generate the list of cells that are parents to a given cell """
""" define a function to generate the list of cells that are progeny to a given parent """
""" define a function to generate the list of cells that are progeny to a given parent (using combinations function) """
""" define a function to generate the list of cells that are progeny to a given parent (using lineage growth) """
""" define a function to generate lists of related-cells from a given set of of cells """
""" define a function to calculate the number of possible subsets """
""" define a function to calculate the number of divisions between two cells """
""" define a function that calculates the lineage distance between two cells """
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
#python mapCells.py --path ~/meTRN --mode import --infile murray_2012_supplemental_dataset_1_per_gene.txt --name murray # Retired!
#python mapCells.py --path ~/meTRN --mode import --infile waterston_avgExpression.csv --name waterston --measure max.expression
#python mapCells.py --path ~/meTRN --mode import --infile waterston_avgExpression.csv --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN --mode check.status --peaks optimal_standard_factor_sx_rawraw --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN --mode check.status --peaks optimal_standard_factor_ex_rawraw --name waterston --measure avg.expression
#python mapCells.py --path ~/meTRN/ --mode build.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode build.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages complete --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode test.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_tracked --name waterston.tracked --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN/ --mode test.lineages --pedigree waterston_cell_pedigree.csv --expression mapcells_avgExp_waterston_expression_assayed --name waterston.assayed --method builder --lineages tracked --descendants OFF --ascendants OFF --limit 10000
#python mapCells.py --path ~/meTRN --organism ce --mode robust --infile waterston_avgExpression.csv | 42.815027 | 384 | 0.692007 |
b86edb269cd9e7e592b4cc82203020de3b8e84a3 | 1,838 | py | Python | gravity/bak/gravity3.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | gravity/bak/gravity3.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | gravity/bak/gravity3.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | # https://stackoverflow.com/questions/47295473/how-to-plot-using-matplotlib-python-colahs-deformed-grid
"""
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
EDGE = 5
STEP = 2 * EDGE + 1
fig, ax = plt.subplots()
ax.set_aspect('equal')
grid_x, grid_y = np.meshgrid(np.linspace(-EDGE, EDGE, STEP), np.linspace(-EDGE, EDGE, STEP))
plot_grid(grid_x, grid_y, ax=ax, color="lightgrey")
distx, disty = f1(grid_x, grid_y)
plot_grid(distx, disty, ax=ax, color="C0")
plt.show()
| 24.506667 | 103 | 0.581066 |
b86fc82da8dc94ff37ad24a384c231a1a48f881c | 7,780 | py | Python | IR_Extraction.py | Kazuhito00/yolo2_onnx | 95c5e2063071d610ec8e98963f3639e0b25efb59 | [
"MIT"
] | 15 | 2018-07-02T19:11:09.000Z | 2022-03-31T07:12:53.000Z | IR_Extraction.py | Kazuhito00/yolo2_onnx | 95c5e2063071d610ec8e98963f3639e0b25efb59 | [
"MIT"
] | null | null | null | IR_Extraction.py | Kazuhito00/yolo2_onnx | 95c5e2063071d610ec8e98963f3639e0b25efb59 | [
"MIT"
] | 9 | 2018-05-08T01:58:53.000Z | 2022-01-28T06:36:02.000Z | from Onnx import make_dir, OnnxImportExport
import subprocess
import pickle
import os
import numpy as np
import time
def generate_svg(modelName, marked_nodes=[]):
"""
generate SVG figure from existed ONNX file
"""
if marked_nodes ==[]:
addfilenamestr = ""
add_command_str = ""
else:
addfilenamestr = "_marked"
marked_str = '_'.join([str(e) for e in marked_nodes])
add_command_str = " --marked 1 --marked_list {}".format(marked_str)
onnxfilepath = "onnx/{}.onnx".format(modelName)
dotfilepath = "dot/{}{}.dot".format(modelName,addfilenamestr)
svgfilepath = "svg/{}{}.svg".format(modelName,addfilenamestr)
# check if onnx file exist
if not os.path.isfile(os.getcwd()+"/"+onnxfilepath):
print('generate_svg Error! Onnx file not exist!')
return
else:
make_dir("dot")
make_dir("svg")
subprocess.call("python net_drawer.py --input {} --output {} --embed_docstring {}".format(onnxfilepath,dotfilepath,add_command_str), shell=True) # onnx -> dot
subprocess.call("dot -Tsvg {} -o {}".format(dotfilepath,svgfilepath), shell=True)# dot -> svg
print('generate_svg ..end')
return svgfilepath
def get_init_shape_dict(rep):
"""
Extract Shape of Initial Input Object
e.g.
if
%2[FLOAT, 64x3x3x3]
%3[FLOAT, 64]
then
return {u'2':(64,3,3,3),u'3':(64,)}
"""
d = {}
if hasattr(rep, 'input_dict'):
for key in rep.input_dict:
tensor = rep.input_dict[key]
shape = np.array(tensor.shape, dtype=int)
d.update({key:shape})
return d
elif hasattr(rep, 'predict_net'):
for k in rep.predict_net.tensor_dict.keys():
tensor = rep.predict_net.tensor_dict[k]
shape = np.array(tensor.shape.as_list(),dtype=float).astype(int)
d.update({k: shape})
return d
else:
print ("rep Error! check your onnx version, it might not support IR_Extraction operation!")
return d
def get_output_shape_of_node(node, shape_dict, backend, device = "CPU"):# or "CUDA:0"
"""
generate output_shape of a NODE
"""
out_idx = node.output[0]
input_list = node.input # e.g. ['1', '2']
inps = []
for inp_idx in input_list:
inp_shape = shape_dict[inp_idx]
rand_inp = np.random.random(size=inp_shape).astype('float16')
inps.append(rand_inp)
try:
out = backend.run_node(node=node, inputs=inps, device=device)
out_shape = out[0].shape
except:
out_shape = shape_dict[input_list[0]]
print("Op: [{}] run_node error! return inp_shape as out_shape".format(node.op_type))
return out_shape, out_idx
def get_overall_shape_dict(model, init_shape_dict, backend):
"""
generate output_shape of a MODEL GRAPH
"""
shape_dict = init_shape_dict.copy()
for i, node in enumerate(model.graph.node):
st=time.time()
out_shape, out_idx = get_output_shape_of_node(node, shape_dict, backend)
shape_dict.update({out_idx:out_shape})
print("out_shape: {} for Obj[{}], node [{}][{}]...{:.2f} sec".format(out_shape, out_idx, i, node.op_type,time.time()-st))
return shape_dict
def get_graph_order(model):
"""
Find Edges (each link) in MODEL GRAPH
"""
Node2nextEntity = {}
Entity2nextNode = {}
for Node_idx, node in enumerate(model.graph.node):
# node input
for Entity_idx in node.input:
if not Entity_idx in Entity2nextNode.keys():
Entity2nextNode.update({Entity_idx:Node_idx})
# node output
for Entity_idx in node.output:
if not Node_idx in Node2nextEntity.keys():
Node2nextEntity.update({Node_idx:Entity_idx})
return Node2nextEntity, Entity2nextNode
def get_kernel_shape_dict(model, overall_shape_dict):
"""
Get Input/Output/Kernel Shape for Conv in MODEL GRAPH
"""
conv_d = {}
for i, node in enumerate(model.graph.node):
if node.op_type == 'Conv':
for attr in node.attribute:
if attr.name == "kernel_shape":
kernel_shape = np.array(attr.ints, dtype=int)
break
inp_idx = node.input[0]
out_idx = node.output[0]
inp_shape = overall_shape_dict[inp_idx]
out_shape = overall_shape_dict[out_idx]
conv_d.update({i:(inp_idx, out_idx, inp_shape, out_shape, kernel_shape)})
print("for node [{}][{}]:\ninp_shape: {} from obj[{}], \nout_shape: {} from obj[{}], \nkernel_shape: {} \n"
.format(i, node.op_type, inp_shape, inp_idx, out_shape, out_idx, kernel_shape ))
return conv_d
def calculate_num_param_n_num_flops(conv_d):
"""
calculate num_param and num_flops from conv_d
"""
n_param = 0
n_flops = 0
for k in conv_d:
#i:(inp_idx, out_idx, inp_shape, out_shape, kernel_shape)
inp_shape, out_shape, kernel_shape = conv_d[k][2],conv_d[k][3],conv_d[k][4]
h,w,c,n,H,W = kernel_shape[1], kernel_shape[1], inp_shape[1], out_shape[1], out_shape[2], out_shape[3]
n_param += n*(h*w*c+1)
n_flops += H*W*n*(h*w*c+1)
return n_param, n_flops
def find_sequencial_nodes(model, Node2nextEntity, Entity2nextNode, search_target=['Conv', 'Add', 'Relu', 'MaxPool'], if_print = False):
"""
Search Where is Subgroup
"""
found_nodes = []
for i, node in enumerate(model.graph.node):
if if_print: print("\nnode[{}] ...".format(i))
n_idx = i #init
is_fit = True
for tar in search_target:
try:
assert model.graph.node[n_idx].op_type == tar #check this node
if if_print: print("node[{}] fit op_type [{}]".format(n_idx, tar))
e_idx = Node2nextEntity[n_idx] #find next Entity
n_idx = Entity2nextNode[e_idx] #find next Node
#if if_print: print(e_idx,n_idx)
except:
is_fit = False
if if_print: print("node[{}] doesn't fit op_type [{}]".format(n_idx, tar))
break
if is_fit:
if if_print: print("node[{}] ...fit!".format(i))
found_nodes.append(i)
else:
if if_print: print("node[{}] ...NOT fit!".format(i))
if if_print: print("\nNode{} fit the matching pattern".format(found_nodes))
return found_nodes
def get_permutations(a):
"""
get all permutations of list a
"""
import itertools
p = []
for r in range(len(a)+1):
c = list(itertools.combinations(a,r))
for cc in c:
p += list(itertools.permutations(cc))
return p
def get_list_of_sequencial_nodes(search_head = ['Conv'], followings = ['Add', 'Relu', 'MaxPool']):
"""
if
search_head = ['Conv']
followings = ['Add', 'Relu', 'MaxPool']
return
[['Conv'],
['Conv', 'Add'],
['Conv', 'Relu'],
['Conv', 'MaxPool'],
['Conv', 'Add', 'Relu'],
['Conv', 'Relu', 'Add'],
['Conv', 'Add', 'MaxPool'],
['Conv', 'MaxPool', 'Add'],
['Conv', 'Relu', 'MaxPool'],
['Conv', 'MaxPool', 'Relu'],
['Conv', 'Add', 'Relu', 'MaxPool'],
['Conv', 'Add', 'MaxPool', 'Relu'],
['Conv', 'Relu', 'Add', 'MaxPool'],
['Conv', 'Relu', 'MaxPool', 'Add'],
['Conv', 'MaxPool', 'Add', 'Relu'],
['Conv', 'MaxPool', 'Relu', 'Add']]
"""
search_targets = [ search_head+list(foll) for foll in get_permutations(followings)]
return search_targets
| 35.525114 | 166 | 0.579434 |
b87022120f02d56a10e8caeb021ec987a4c00e77 | 9,961 | py | Python | app.py | rshane7/Sqlalchemy-Challenge | b0cd11388727e1f43453b0e7b0019e304d45eb39 | [
"ADSL"
] | null | null | null | app.py | rshane7/Sqlalchemy-Challenge | b0cd11388727e1f43453b0e7b0019e304d45eb39 | [
"ADSL"
] | null | null | null | app.py | rshane7/Sqlalchemy-Challenge | b0cd11388727e1f43453b0e7b0019e304d45eb39 | [
"ADSL"
] | null | null | null | # Python script uses flask and SQL alchemy to create API requests for weather data from Hawaii.
# Import dependencies.
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API SQLite Connection & Landing Page
# The Flask Application does all of the following:
# Correctly generates the engine to the correct sqlite file
# Uses automap_base() and reflects the database schema
# Correctly saves references to the tables in the sqlite file (measurement and station)
# Correctly creates and binds the session between the python app and database
#----------------------------------------------------------------------------------------------------------------------
# Database Setup
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# Reflect an existing database into a new model
Base = automap_base()
# Reflect the tables
Base.prepare(engine, reflect=True)
# Save references to tables
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Flask Setup
app = Flask(__name__)
# Flask Routes
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API Static Routes
# The static routes do all of the following:
# Precipitation route
# Returns the jsonified precipitation data for the last year in the database.
# Returns json with the date as the key and the value as the precipitation Stations route.
# Returns jsonified data of all of the stations in the database Tobs route.
# Returns jsonified data for the most active station (USC00519281) for the last year of data.
#----------------------------------------------------------------------------------------------------------------------
# most active station last year of data
#----------------------------------------------------------------------------------------------------------------------
# Rubric - API Dynamic Route
# The dynamic route does all of the following:
# Start route
# Route accepts the start date as a parameter from the URL
# Start/end route
# Route accepts the start and end dates as parameters from the URL
# Returns the min, max, and average temperatures calculated from the given start date to the given end date Returns the min, max, and average temperatures calculated from the given start date to the end of the dataset
#----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
app.run(debug=True)
session.close()
#----------------------------------------------------------------------------------------------------------------------
# THE END
#----------------------------------------------------------------------------------------------------------------------
| 45.484018 | 229 | 0.635679 |
b870872caf1a9e4c3f638cbe128e60ddb9f7db8d | 5,194 | py | Python | optimalTAD/__main__.py | cosmoskaluga/optimalTAD | eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2 | [
"MIT"
] | null | null | null | optimalTAD/__main__.py | cosmoskaluga/optimalTAD | eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2 | [
"MIT"
] | null | null | null | optimalTAD/__main__.py | cosmoskaluga/optimalTAD | eae806878df36a96b3ef8c5dc7eb8ecc9fa622b2 | [
"MIT"
] | null | null | null | import argparse
import logging
import sys
import time
import glob
import os
from . import logger
from . import config
from . visualization import plot
from . optimization import run
from . optimization import utils
if __name__ == '__main__':
optimalTAD()
| 55.849462 | 167 | 0.649788 |
b870e2ce26d78dfa9746e5e88adb9ed1463fb9fc | 944 | py | Python | communications/migrations/0002_auto_20190902_1759.py | shriekdj/django-social-network | 3654051e334996ee1b0b60f83c4f809a162ddf4a | [
"MIT"
] | 368 | 2019-10-10T18:02:09.000Z | 2022-03-31T14:31:39.000Z | communications/migrations/0002_auto_20190902_1759.py | shriekdj/django-social-network | 3654051e334996ee1b0b60f83c4f809a162ddf4a | [
"MIT"
] | 19 | 2020-05-09T19:10:29.000Z | 2022-03-04T18:22:51.000Z | communications/migrations/0002_auto_20190902_1759.py | shriekdj/django-social-network | 3654051e334996ee1b0b60f83c4f809a162ddf4a | [
"MIT"
] | 140 | 2019-10-10T18:01:59.000Z | 2022-03-14T09:37:39.000Z | # Generated by Django 2.2.4 on 2019-09-02 11:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 32.551724 | 153 | 0.665254 |
b871aaee0feb9ef1cdc6b28c76ed73a977fed9b3 | 1,126 | py | Python | examples/sht2x.py | kungpfui/python-i2cmod | 57d9cc8de372aa38526c3503ceec0d8924665c04 | [
"MIT"
] | null | null | null | examples/sht2x.py | kungpfui/python-i2cmod | 57d9cc8de372aa38526c3503ceec0d8924665c04 | [
"MIT"
] | null | null | null | examples/sht2x.py | kungpfui/python-i2cmod | 57d9cc8de372aa38526c3503ceec0d8924665c04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sensirion SHT2x humidity sensor.
Drives SHT20, SHT21 and SHT25 humidity and temperature sensors.
Sensirion `SHT2x Datasheets <https://www.sensirion.com/en/environmental-sensors/humidity-sensors/humidity-temperature-sensor-sht2x-digital-i2c-accurate/>`
"""
from i2cmod import SHT2X
if __name__ == '__main__':
example()
| 34.121212 | 154 | 0.579041 |
b872257816f92142d8b69ab7304685ffe49c0d35 | 52 | py | Python | __init__.py | HarisNaveed17/aws-boxdetector | e71daebbebe9dc847bdad70d2ea2fe859fede587 | [
"Apache-2.0"
] | null | null | null | __init__.py | HarisNaveed17/aws-boxdetector | e71daebbebe9dc847bdad70d2ea2fe859fede587 | [
"Apache-2.0"
] | null | null | null | __init__.py | HarisNaveed17/aws-boxdetector | e71daebbebe9dc847bdad70d2ea2fe859fede587 | [
"Apache-2.0"
] | null | null | null | from pipeline import *
box_detection = BoxDetector() | 26 | 29 | 0.807692 |
b8746c0bb1705159c5c6690183e9699670c24d04 | 217 | bzl | Python | bazel_versions.bzl | pennig/rules_xcodeproj | 109ab85a82954ea38f0529eafc291f5ce6f63483 | [
"MIT"
] | 1 | 2022-03-31T09:13:24.000Z | 2022-03-31T09:13:24.000Z | bazel_versions.bzl | pennig/rules_xcodeproj | 109ab85a82954ea38f0529eafc291f5ce6f63483 | [
"MIT"
] | null | null | null | bazel_versions.bzl | pennig/rules_xcodeproj | 109ab85a82954ea38f0529eafc291f5ce6f63483 | [
"MIT"
] | null | null | null | """Specifies the supported Bazel versions."""
CURRENT_BAZEL_VERSION = "5.0.0"
OTHER_BAZEL_VERSIONS = [
"6.0.0-pre.20220223.1",
]
SUPPORTED_BAZEL_VERSIONS = [
CURRENT_BAZEL_VERSION,
] + OTHER_BAZEL_VERSIONS
| 18.083333 | 45 | 0.728111 |
b874c67d7c0255eb46088b631d745bcaf2f71c70 | 1,372 | py | Python | Pymug/server/game/parse.py | Aitocir/UnfoldingWorld | 70606eec694f006ccd6687912bce7b75d623287e | [
"MIT"
] | 2 | 2019-08-30T08:26:44.000Z | 2021-04-09T14:22:09.000Z | Pymug/server/game/parse.py | Aitocir/UnfoldingWorld | 70606eec694f006ccd6687912bce7b75d623287e | [
"MIT"
] | null | null | null | Pymug/server/game/parse.py | Aitocir/UnfoldingWorld | 70606eec694f006ccd6687912bce7b75d623287e | [
"MIT"
] | null | null | null |
# parse user command text | 26.384615 | 52 | 0.487609 |
b876fc588cb708294748bda2b97c2a9bb2b7cc83 | 539 | py | Python | files/OOP/Encapsulation/Encapsulation 3.py | grzegorzpikus/grzegorzpikus.github.io | 652233e0b98f48a3396583bab2559f5981bac8ad | [
"CC-BY-3.0"
] | null | null | null | files/OOP/Encapsulation/Encapsulation 3.py | grzegorzpikus/grzegorzpikus.github.io | 652233e0b98f48a3396583bab2559f5981bac8ad | [
"CC-BY-3.0"
] | null | null | null | files/OOP/Encapsulation/Encapsulation 3.py | grzegorzpikus/grzegorzpikus.github.io | 652233e0b98f48a3396583bab2559f5981bac8ad | [
"CC-BY-3.0"
] | null | null | null |
my_account = BankAccount()
my_account.set_checking(523.48)
print(my_account.get_checking())
my_account.set_savings(386.15)
print(my_account.get_savings()) | 22.458333 | 54 | 0.747681 |
b877894244ea866fac268797b7d04d857a48c881 | 800 | py | Python | utils/folder_to_list.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 27 | 2020-05-19T16:51:42.000Z | 2022-02-28T05:00:16.000Z | utils/folder_to_list.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 3 | 2020-04-09T04:46:24.000Z | 2020-10-21T18:57:05.000Z | utils/folder_to_list.py | abhatta1234/face_analysis_pytorch | 2abe930c0ca02a1fd819d4710fd9bff392f32f58 | [
"MIT"
] | 10 | 2020-05-11T19:50:30.000Z | 2022-03-16T11:49:52.000Z | import argparse
from os import listdir, path
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Folder with classes subfolders to a file to train."
)
parser.add_argument("--folder", "-f", help="Folder to convert.")
parser.add_argument("--output", "-o", help="Output file.")
args = parser.parse_args()
convert(args.folder, args.output)
| 26.666667 | 72 | 0.665 |
b8783b5b039e343b576ed8c99dd5d5e0e166571d | 509 | py | Python | Leetcode/Python Solutions/Strings/ReverseString.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 1 | 2020-01-06T02:21:56.000Z | 2020-01-06T02:21:56.000Z | Leetcode/Python Solutions/Strings/ReverseString.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | null | null | null | Leetcode/Python Solutions/Strings/ReverseString.py | Mostofa-Najmus-Sakib/Applied-Algorithm | bc656fd655617407856e0ce45b68585fa81c5035 | [
"MIT"
] | 3 | 2021-02-22T17:41:01.000Z | 2022-01-13T05:03:19.000Z | """
LeetCode Problem: 344. Reverse String
Link: https://leetcode.com/problems/reverse-string/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(n)
Space Complexity: O(1)
""" | 23.136364 | 58 | 0.554028 |
b878732e91bebe5ae9b4cd691ecca80c673cb34c | 7,242 | py | Python | packages/verify_layer.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | packages/verify_layer.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | packages/verify_layer.py | OpenTrustGroup/scripts | 31ca2ca5bae055113c6f92a2eb75b0c7528902b3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from common import FUCHSIA_ROOT, get_package_imports, get_product_imports
import json
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# Standard names for root packages in a layer.
ROOT_CANONICAL_PACKAGES = [
'buildbot',
'default',
'kitchen_sink',
]
REQUIRED_PRODUCTS = [
'default'
]
# Standard names for packages in a layer.
CANONICAL_PACKAGES = [
'all',
]
# Directories which do not require aggregation.
NO_AGGREGATION_DIRECTORIES = [
'config',
'disabled',
'products',
]
# Non-package files allowed in package directories.
NON_PACKAGE_FILES = [
'README.md',
]
def check_json(packages):
'''Verifies that all files in the list are JSON files.'''
all_json = True
for package in packages:
with open(package, 'r') as file:
try:
json.load(file)
except ValueError:
all_json = False
print('Non-JSON file: %s' % package)
return all_json
def check_schema(packages, validator, schema):
'''Verifies that all files adhere to the schema.'''
all_valid = True
for package in packages:
if subprocess.call([validator, schema, package]) != 0:
all_valid = False
return all_valid
def check_deps_exist(dep_map):
'''Verifies that all dependencies exist.'''
all_exist = True
for (package, deps) in dep_map.iteritems():
for dep in deps:
if not os.path.isfile(dep):
all_exist = False
print('Dependency of %s does not exist: %s' % (package, dep))
return all_exist
def check_all(directory, dep_map, layer, is_root=True):
'''Verifies that directories contain an "all" package and that this packages
lists all the files in the directory.
'''
for dirpath, dirnames, filenames in os.walk(directory):
dirnames = [d for d in dirnames if d not in NO_AGGREGATION_DIRECTORIES]
is_clean = True
for dir in dirnames:
subdir = os.path.join(dirpath, dir)
if not check_all(subdir, dep_map, layer, is_root=False):
is_clean = False
if not is_clean:
return False
all_package = os.path.join(dirpath, 'all')
if not os.path.isfile(all_package):
print('Directory does not contain an "all" package: %s' % dirpath)
return False
known_deps = dep_map[all_package]
has_all_files = True
for file in filenames:
if is_root and (file in ROOT_CANONICAL_PACKAGES or file == layer):
continue
if file in CANONICAL_PACKAGES or file in NON_PACKAGE_FILES:
continue
package = os.path.join(dirpath, file)
if not verify(package):
has_all_files = False
for dir in dirnames:
package = os.path.join(dirpath, dir, 'all')
if not verify(package):
has_all_files = False
return has_all_files
def check_root(base, layer):
'''Verifies that all canonical packages are present at the root.'''
all_there = True
for file in ROOT_CANONICAL_PACKAGES + [layer]:
if not os.path.isfile(os.path.join(base, file)):
all_there = False
print('Missing root package: %s' % file)
return all_there
def check_product_root(base, layer):
'''Verified that the default product is present.'''
missing = []
for product in REQUIRED_PRODUCTS:
path = os.path.join(base, product)
if not os.path.isfile(path):
missing.append(path)
if not missing:
return True
print('Missing products: %s' % missing)
return False
if __name__ == '__main__':
return_code = 0
if not main():
print('Errors!')
return_code = 1
sys.exit(return_code)
| 31.081545 | 80 | 0.619994 |
b8796958be709a16a0f0fcd864b552aa54f7203a | 8,064 | py | Python | server/main.py | MrCheka/langidnn | 77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a | [
"MIT"
] | null | null | null | server/main.py | MrCheka/langidnn | 77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a | [
"MIT"
] | 7 | 2020-07-17T01:22:21.000Z | 2022-02-26T10:48:01.000Z | server/main.py | MrCheka/langidnn | 77788ef1b7c03d3de92cb1f28e6eb7f3a20d262a | [
"MIT"
] | null | null | null | import tensorflow as tf
import argparse
import logging
from src.helpers.NNHelper import NNHelper
from src.controller.Controller import Controller
from src.params.Parameters import Parameters
if __name__ == '__main__':
parser = createParser()
namespace = parser.parse_args()
if namespace.mode == 'server':
run_server(namespace)
elif namespace.mode == 'train':
run_train(namespace)
elif namespace.mode == 'test':
run_test(namespace)
else:
logging.error(' . .')
| 48 | 131 | 0.631696 |
b879c0d4ddbe81e9894622e0f700feebdf2b2709 | 1,086 | py | Python | Python/Code/Python3-Base/13_Network/Server/SingleProgressServer.py | hiloWang/notes | 64a637a86f734e4e80975f4aa93ab47e8d7e8b64 | [
"Apache-2.0"
] | 2 | 2020-10-08T13:22:08.000Z | 2021-07-28T14:45:41.000Z | Python/Python3-Base/13_Network/Server/SingleProgressServer.py | flyfire/Programming-Notes-Code | 4b1bdd74c1ba0c007c504834e4508ec39f01cd94 | [
"Apache-2.0"
] | null | null | null | Python/Python3-Base/13_Network/Server/SingleProgressServer.py | flyfire/Programming-Notes-Code | 4b1bdd74c1ba0c007c504834e4508ec39f01cd94 | [
"Apache-2.0"
] | 6 | 2020-08-20T07:19:17.000Z | 2022-03-02T08:16:21.000Z | #########################
#
#########################
"""
connectlisten
receive
closereceive
"""
from socket import *
serSocket = socket(AF_INET, SOCK_STREAM)
# 2MSL
serSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
localAddress = ('', 7788)
serSocket.bind(localAddress)
serSocket.listen(5)
while True:
print('-----------')
newSocket, destinationAddress = serSocket.accept()
print('-----[%s]-----' % str(destinationAddress))
try:
while True:
receiveData = newSocket.recv(1024)
if len(receiveData) > 0:
print('receive[%s]:%s' % (str(destinationAddress), receiveData))
else:
print('[%s]' % str(destinationAddress))
break
finally:
newSocket.close()
# serSocket.close()
| 24.133333 | 80 | 0.64825 |
b87a63d631b48c56ae9ad1ccd48c2c053e4047a5 | 3,098 | py | Python | webapp/kortkatalogen/base/models.py | snickaren/CIPAC | 58455d59734a571e0134d6368d27ee3e65001c9a | [
"Apache-2.0"
] | null | null | null | webapp/kortkatalogen/base/models.py | snickaren/CIPAC | 58455d59734a571e0134d6368d27ee3e65001c9a | [
"Apache-2.0"
] | 2 | 2021-06-01T22:47:10.000Z | 2021-06-10T20:52:49.000Z | webapp/kortkatalogen/base/models.py | snickaren/CIPAC | 58455d59734a571e0134d6368d27ee3e65001c9a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models
| 45.558824 | 203 | 0.726598 |
b87adad624fdbc747cbd3966ca19edcc62c0db08 | 2,607 | py | Python | script/raw-word-cloud.py | ranyxr/infoVis | 307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b | [
"MIT"
] | 2 | 2020-05-27T11:12:41.000Z | 2020-12-17T19:33:41.000Z | script/raw-word-cloud.py | ranyxr/infoVis | 307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b | [
"MIT"
] | null | null | null | script/raw-word-cloud.py | ranyxr/infoVis | 307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b | [
"MIT"
] | 3 | 2020-03-18T19:20:24.000Z | 2020-12-17T17:37:24.000Z | import os
import nltk
import spacy
from datetime import datetime
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType, ArrayType
from pyspark.sql.functions import udf, col, explode, collect_list, count
from SYS import COL, MODE, DIR, FILE
nltk.download('stopwords')
os.system("python -m spacy download en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
java8_location = '/Library/Java/JavaVirtualMachines/liberica-jdk-1.8.0_202/Contents/Home'
os.environ['JAVA_HOME'] = java8_location
spark = SparkSession\
.builder\
.appName("A1")\
.getOrCreate()
if __name__ == '__main__':
spark.read.parquet(FILE.word_cloud_data1_uri).groupby(col(COL.token)).count().sort(col(COL.count), ascending=False).show()
# df = get_unprocessed_df()
# df = process_token(df)
# df.write.mode("overwrite").parquet(FILE.word_cloud_data1_uri, compression="gzip")
# if MODE.debug:
# df = df.filter(col(COL.descri).isNotNull())
# df.show()
| 33.857143 | 126 | 0.638282 |
b87d4232f38a23242d6a6192e497347e1e6d8428 | 141 | py | Python | main/ftpServer.py | McUtty/FlowerPlan | b0998835356e8e10fe53cad447bc559df2ac7175 | [
"MIT"
] | null | null | null | main/ftpServer.py | McUtty/FlowerPlan | b0998835356e8e10fe53cad447bc559df2ac7175 | [
"MIT"
] | null | null | null | main/ftpServer.py | McUtty/FlowerPlan | b0998835356e8e10fe53cad447bc559df2ac7175 | [
"MIT"
] | null | null | null | import uftpd
uftpd.stop()
# uftpd.start([port = 21][, verbose = 1])
uftpd.restart()
# Version abfragen
# wenn neuer - Dateien downloaden
| 12.818182 | 41 | 0.687943 |
b87d46b05ec2436786cd95db9a677d1f89cf7d59 | 10,672 | py | Python | Entradas/views.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | null | null | null | Entradas/views.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | 4 | 2021-03-30T13:26:38.000Z | 2021-06-10T19:20:56.000Z | Entradas/views.py | ToniIvars/Blog | c2d1674c2c1fdf51749f4b014795b507ed93b45e | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.core.mail import send_mail
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from random import randint
from Entradas.models import entradas_blog, comentarios
from Entradas.forms import FormNuevaEntrada, FormContacto, FormEditarEntrada, SignupForm, LoginForm, FormEditarPerfil
initial_dict_editar={}
initial_dict_crear={}
entrada_a_editar=None
id_entrada=None
username=''
# View 'signup'
codigo_enviado=False
codigo=''
email=''
password=''
# Create your views here.
# @login_required
# @login_required
# @login_required
def signup(request):
global username, codigo_enviado, codigo, email, password
if request.method=='POST':
registro=SignupForm(request.POST)
if registro.is_valid():
info_registro=registro.cleaned_data
if ' ' in info_registro['username']:
messages.error(request, 'El nombre de usuario no puede contener espacios.')
return redirect('registro')
else:
username=info_registro['username']
password=info_registro['password']
password2=info_registro['password2']
email=info_registro['email']
try:
user=User.objects.get_by_natural_key(username)
messages.error(request, 'Este usuario ya existe.')
return redirect('registro')
except ObjectDoesNotExist:
if password != password2:
messages.error(request, 'Las contraseas no coinciden.')
return redirect('registro')
else:
user=User.objects.create_user(username, email, password)
user.save()
login(request, user)
messages.success(request, 'El usuario ha sido creado correctamente.')
return redirect('inicio')
else:
registro=SignupForm()
return render(request, 'signup.html', {'form':registro})
def login_view(request):
global username
if request.method=='POST':
inicio_sesion=LoginForm(request.POST)
if inicio_sesion.is_valid():
info_inicio=inicio_sesion.cleaned_data
username=info_inicio['username']
password=info_inicio['password']
user=authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('inicio')
else:
messages.error(request, 'No ha sido posible iniciar sesin.')
return redirect('iniciar-sesion')
else:
inicio_sesion=LoginForm()
return render(request, 'login.html', {'form':inicio_sesion})
def logout_view(request):
logout(request)
return redirect('iniciar-sesion')
# @login_required | 32.048048 | 205 | 0.648801 |
b87f562e23be6f95cf850092c0a407380227775e | 975 | py | Python | setup.py | remiolsen/anglerfish | 5caabebf5864180e5552b3e40de3650fc5fcabd6 | [
"MIT"
] | null | null | null | setup.py | remiolsen/anglerfish | 5caabebf5864180e5552b3e40de3650fc5fcabd6 | [
"MIT"
] | 19 | 2019-10-07T11:14:54.000Z | 2022-03-28T12:36:47.000Z | setup.py | remiolsen/anglerfish | 5caabebf5864180e5552b3e40de3650fc5fcabd6 | [
"MIT"
] | 2 | 2019-05-28T14:15:26.000Z | 2022-03-28T09:28:44.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
import sys, os
setup(
name='anglerfish',
version='0.4.1',
description='Anglerfish, a tool to demultiplex Illumina libraries from ONT data',
author='Remi-Andre Olsen',
author_email='remi-andre.olsen@scilifelab.se',
url='https://github.com/remiolsen/anglerfish',
license='MIT',
packages = find_packages(),
install_requires=[
'python-levenshtein',
'biopython',
'numpy'
],
scripts=['./anglerfish.py'],
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Medical Science Apps."
]
)
| 29.545455 | 85 | 0.645128 |
b87f7b6b8b386385428ae91baa19206d67341ede | 600 | py | Python | files/urls.py | danielchriscarter/part2-django | d4adffa3280431151bb8d1c51f0be2dbffff9dd1 | [
"BSD-2-Clause"
] | null | null | null | files/urls.py | danielchriscarter/part2-django | d4adffa3280431151bb8d1c51f0be2dbffff9dd1 | [
"BSD-2-Clause"
] | null | null | null | files/urls.py | danielchriscarter/part2-django | d4adffa3280431151bb8d1c51f0be2dbffff9dd1 | [
"BSD-2-Clause"
] | null | null | null | from django.urls import path
from . import views
app_name = 'files'
urlpatterns = [
path('', views.index, name='index'),
path('file/<int:file_id>/', views.fileview, name='file'),
path('file/<int:file_id>/edit', views.fileedit, name='fileedit'),
path('dir/<int:dir_id>/', views.dirview, name='directory'),
path('newfile/<int:dir_id>/', views.newfile, name='newfile'),
path('newdir/<int:dir_id>/', views.newdir, name='newdir'),
path('newdir/root/', views.newdir_root, name='newdir_root'),
path('search', views.search, name='search'),
]
| 37.5 | 73 | 0.62 |
b8823356abe70dc72971de117d1caaf078936601 | 1,733 | py | Python | morpheus/algorithms/kmeans.py | amirsh/MorpheusPy | 8eda959e71a3b377c3f6629802bad2bd4f5a5ee6 | [
"Apache-2.0"
] | 12 | 2018-10-04T08:27:33.000Z | 2022-01-11T15:41:29.000Z | morpheus/algorithms/kmeans.py | amirsh/MorpheusPy | 8eda959e71a3b377c3f6629802bad2bd4f5a5ee6 | [
"Apache-2.0"
] | 3 | 2020-09-22T16:18:51.000Z | 2021-12-28T19:01:00.000Z | morpheus/algorithms/kmeans.py | amirsh/MorpheusPy | 8eda959e71a3b377c3f6629802bad2bd4f5a5ee6 | [
"Apache-2.0"
] | 4 | 2019-12-13T17:52:19.000Z | 2021-12-17T12:43:44.000Z | # Copyright 2018 Side Li and Arun Kumar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClusterMixin
| 40.302326 | 104 | 0.660704 |
b8833e3d9f3a2008bcf62eb119ccbf510334b106 | 796 | py | Python | 670/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 670/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | 670/main.py | pauvrepetit/leetcode | 6ad093cf543addc4dfa52d72a8e3c0d05a23b771 | [
"MIT"
] | null | null | null | # 670.
#
# 20200905
# huao
print(Solution().maximumSwap(100))
print(Solution().maximumSwap(2736))
print(Solution().maximumSwap(9973))
print(Solution().maximumSwap(98638))
| 22.742857 | 56 | 0.5 |
b8860c8f4169552c8561caf03f121aafce628fa6 | 333 | py | Python | tests/resources/test_codegen/template.py | come2ry/atcoder-tools | d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b | [
"MIT"
] | 313 | 2016-12-04T13:25:21.000Z | 2022-03-31T09:46:15.000Z | tests/resources/test_codegen/template.py | come2ry/atcoder-tools | d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b | [
"MIT"
] | 232 | 2016-12-02T22:55:20.000Z | 2022-03-27T06:48:02.000Z | tests/resources/test_codegen/template.py | come2ry/atcoder-tools | d7ecf5c19427848e6c8f0aaa3c1a8af04c467f1b | [
"MIT"
] | 90 | 2017-09-23T15:09:48.000Z | 2022-03-17T03:13:40.000Z | #!/usr/bin/env python3
import sys
def solve(${formal_arguments}):
return
def main():
tokens = iterate_tokens()
${input_part}
solve(${actual_arguments})
if __name__ == '__main__':
main()
| 16.65 | 37 | 0.588589 |
b8862a702744111dde08cc354f165d4d573be5a8 | 21,501 | py | Python | orghtml.py | waynezhang/orgextended | 853ae89f937d302c2dd9dad3ae98aa5c2485faaa | [
"MIT"
] | null | null | null | orghtml.py | waynezhang/orgextended | 853ae89f937d302c2dd9dad3ae98aa5c2485faaa | [
"MIT"
] | null | null | null | orghtml.py | waynezhang/orgextended | 853ae89f937d302c2dd9dad3ae98aa5c2485faaa | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
import datetime
import re
import regex
from pathlib import Path
import os
import fnmatch
import OrgExtended.orgparse.node as node
from OrgExtended.orgparse.sublimenode import *
import OrgExtended.orgutil.util as util
import OrgExtended.orgutil.navigation as nav
import OrgExtended.orgutil.template as templateEngine
import logging
import sys
import traceback
import OrgExtended.orgfolding as folding
import OrgExtended.orgdb as db
import OrgExtended.asettings as sets
import OrgExtended.orgcapture as capture
import OrgExtended.orgproperties as props
import OrgExtended.orgutil.temp as tf
import OrgExtended.pymitter as evt
import OrgExtended.orgnotifications as notice
import OrgExtended.orgextension as ext
import OrgExtended.orgsourceblock as src
import OrgExtended.orgexporter as exp
import yaml
import sys
import subprocess
import html
log = logging.getLogger(__name__)
# Global properties I AT LEAST want to support.
# Both as a property on the document and in our settings.
#+OPTIONS: num:nil toc:nil
#+REVEAL_TRANS: None/Fade/Slide/Convex/Concave/Zoom
#+REVEAL_THEME: Black/White/League/Sky/Beige/Simple/Serif/Blood/Night/Moon/Solarized
#+Title: Title of Your Talk
#+Author: Your Name
#+Email: Your Email Address or Twitter Handle
RE_CAPTION = regex.compile(r"^\s*[#][+]CAPTION[:]\s*(?P<caption>.*)")
RE_ATTR = regex.compile(r"^\s*[#][+]ATTR_HTML[:](?P<params>\s+[:](?P<name>[a-zA-Z0-9._-]+)\s+(?P<value>([^:]|((?<! )[:]))+))+$")
RE_ATTR_ORG = regex.compile(r"^\s*[#][+]ATTR_ORG[:] ")
RE_SCHEDULING_LINE = re.compile(r"^\s*(SCHEDULED|CLOSED|DEADLINE|CLOCK)[:].*")
RE_DRAWER_LINE = re.compile(r"^\s*[:].+[:]\s*$")
RE_END_DRAWER_LINE = re.compile(r"^\s*[:](END|end)[:]\s*$")
RE_LINK = re.compile(r"\[\[(?P<link>[^\]]+)\](\[(?P<desc>[^\]]+)\])?\]")
RE_UL = re.compile(r"^(?P<indent>\s*)(-|[+])\s+(?P<data>.+)")
RE_STARTQUOTE = re.compile(r"#\+(BEGIN_QUOTE|BEGIN_EXAMPLE|BEGIN_VERSE|BEGIN_CENTER|begin_quote|begin_example|begin_verse|begin_center)")
RE_ENDQUOTE = re.compile(r"#\+(END_QUOTE|END_EXAMPLE|END_VERSE|END_CENTER|end_quote|end_example|end_verse|end_center)")
RE_STARTNOTE = re.compile(r"#\+(BEGIN_NOTES|begin_notes)")
RE_ENDNOTE = re.compile(r"#\+(END_NOTES|end_notes)")
RE_FN_MATCH = re.compile(r"\s*[:]([a-zA-Z0-9-_]+)\s+([^: ]+)?\s*")
RE_STARTSRC = re.compile(r"^\s*#\+(BEGIN_SRC|begin_src|BEGIN:|begin:)\s+(?P<lang>[a-zA-Z0-9]+)")
RE_ENDSRC = re.compile(r"^\s*#\+(END_SRC|end_src|end:|END:)")
RE_RESULTS = re.compile(r"^\s*#\+RESULTS.*")
RE_TABLE_ROW = re.compile(r"^\s*[|]")
RE_TABLE_SEPARATOR = re.compile(r"^\s*[|][-]")
RE_CHECKBOX = re.compile(r"^\[ \] ")
RE_CHECKED_CHECKBOX = re.compile(r"^\[[xX]\] ")
RE_PARTIAL_CHECKBOX = re.compile(r"^\[[-]\] ")
RE_EMPTY_LINE = re.compile(r"^\s*$")
# <!-- multiple_stores height="50%" width="50%" -->
RE_COMMENT_TAG = re.compile(r"^\s*[<][!][-][-]\s+(?P<name>[a-zA-Z0-9_-]+)\s+(?P<props>.*)\s+[-][-][>]")
# Export the entire file using our internal exporter
def sync_up_on_closed():
notice.Get().BuildToday()
| 33.180556 | 188 | 0.612576 |
b887416d23a942756c48311820dd05ec2e0e80d6 | 3,519 | py | Python | qso_toolbox/LBT_MODS_script.py | jtschindler/qso_toolbox | d9864e0f87e0da3952b75949a7b17ae84ba7b839 | [
"MIT"
] | null | null | null | qso_toolbox/LBT_MODS_script.py | jtschindler/qso_toolbox | d9864e0f87e0da3952b75949a7b17ae84ba7b839 | [
"MIT"
] | null | null | null | qso_toolbox/LBT_MODS_script.py | jtschindler/qso_toolbox | d9864e0f87e0da3952b75949a7b17ae84ba7b839 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from qso_toolbox import utils as ut
from qso_toolbox import catalog_tools as ct
targets = pd.read_csv('/Users/schindler/Observations/LBT/MODS/190607-190615/lukas_efficiency_candidates.csv')
offsets = pd.read_csv('')
# query = 'rMeanPSFMag - rMeanApMag < 0.05 and 10 < zMeanPSFMag < 18'
# offsets = ct.get_offset_stars(targets, 'name', 'ps_ra', 'ps_dec', radius=300,
# quality_query=query)
#
# offsets.to_csv('lukas_offsets.csv', index=False)
os.system('modsProject -p LBTB PS1-QSO-LBTMODS')
os.chdir('./PS1-QSO-LBTMODS')
# Create observation and acquisition scripts
coord_list = ut.coord_to_hmsdms(targets['ps_ra'], targets['ps_dec'])
for idx in targets.index:
target_name = targets.loc[idx,'name']
target_mag = targets.loc[idx, 'zmag_AB']
target_priority = targets.loc[idx, 'priority']
# pos_angle =
if target_mag <= 20:
exp_time = 900
else:
exp_time = 1200
make_obs_string = "mkMODSObs -o {} -m red grating -s LS5x60x1.2 -l 1.2 " \
"-rfilter GG495 -e {} -n 1 {}_pr{}".format(target_name,
exp_time,
target_name,
target_priority)
print(make_obs_string)
os.system(make_obs_string)
target_ra_hms = coord_list[idx][0]
target_dec_dms = coord_list[idx][1]
make_acq_string = "mkMODSAcq -o {} -c '{} {}' -g '{} {}' -p {} -m " \
"longslit -a Red -f z_sdss -s LS5x60x1.2 -l 1.2 {}_pr{}".format(target_name,
target_ra_hms,
target_dec_dms,
target_ra_hms,
target_dec_dms,
pos_angle,
target_name,
target_priority)
print(make_acq_string)
os.system(make_acq_string)
# Create the blind offset acquistion scripts
for idx in targets.index:
target_name = targets.loc[idx,'name']
target_priority = targets.loc[idx, 'priority']
acq_filename = '{}_pr{}.acq'.format(target_name, target_priority)
blind_acq_filename = '{}_pr{}_blind.acq'.format(target_name,
target_priority)
target_offsets = offsets.query('target_name=="{}"'.format(target_name))
if target_offsets.shape[0] > 0 :
# Take first offset
dra = target_offsets.loc[target_offsets.index[0], 'dra_offset']
ddec = target_offsets.loc[target_offsets.index[0], 'ddec_offset']
file = open('./{}'.format(acq_filename), 'r')
file_lines = file.readlines()[:-2]
file.close()
new_acq_file = open('./{}'.format(blind_acq_filename), 'w')
for line in file_lines:
new_acq_file.write(line)
new_acq_file.write(" PAUSE\n")
new_acq_file.write(" syncoffset\n")
new_acq_file.write(" PAUSE\n")
new_acq_file.write(" OFFSET {} {} rel\n".format(dra, ddec))
new_acq_file.write(" UPDATEPOINTING\n")
new_acq_file.write(" SlitGO\n")
new_acq_file.write(" PAUSE\n")
new_acq_file.write("\n")
new_acq_file.write("end\n")
new_acq_file.close()
| 37.43617 | 109 | 0.545041 |
b8876be3ac0b9f4743f3b55d348997ace9a6d95d | 1,451 | py | Python | EASTAR/main/migrations/0008_auto_20191005_0012.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | 1 | 2020-09-21T16:46:19.000Z | 2020-09-21T16:46:19.000Z | EASTAR/main/migrations/0008_auto_20191005_0012.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | null | null | null | EASTAR/main/migrations/0008_auto_20191005_0012.py | DightMerc/EASTAR | 04a3578932f8b4b842e0898513ef279c2f750f48 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-04 19:12
from django.db import migrations, models
| 30.87234 | 89 | 0.583046 |
b887c62ca86e34d3408f8bf9208020ecb0064fd5 | 152 | py | Python | archived-stock-trading-bot-v1/utils/alerts.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 101 | 2020-05-20T02:17:45.000Z | 2022-03-31T12:22:09.000Z | archived-stock-trading-bot-v1/utils/alerts.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 10 | 2020-09-02T14:55:12.000Z | 2022-02-21T08:50:48.000Z | archived-stock-trading-bot-v1/utils/alerts.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 33 | 2021-02-13T15:38:51.000Z | 2022-03-21T10:39:15.000Z | import os
from sys import platform
| 16.888889 | 33 | 0.578947 |
b88943d6ad79d038afe5d44eee2909d9f74948cd | 1,685 | py | Python | tests/test_company_apis.py | elaoshi/my_planet_flask_api_backend_with_mongo | 7795034a14783a15772fae649c4f2c918b4b36f0 | [
"Apache-2.0"
] | null | null | null | tests/test_company_apis.py | elaoshi/my_planet_flask_api_backend_with_mongo | 7795034a14783a15772fae649c4f2c918b4b36f0 | [
"Apache-2.0"
] | 3 | 2020-04-02T23:48:46.000Z | 2021-06-10T22:43:22.000Z | tests/test_company_apis.py | elaoshi/my_planet_flask_api_backend_with_mongo | 7795034a14783a15772fae649c4f2c918b4b36f0 | [
"Apache-2.0"
] | null | null | null |
from starlette.testclient import TestClient
import pytest,os
# from server.app import app
import json
import requests
from faker import Faker
fake = Faker()
# The root url of the flask app
url = 'http://127.0.0.1:5000/employee'
| 23.402778 | 54 | 0.636202 |
b889e8215d671ac9152cd6ccc561184f07b5f430 | 9,491 | py | Python | doc2json/grobid2json/grobid/grobid_client.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | null | null | null | doc2json/grobid2json/grobid/grobid_client.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | null | null | null | doc2json/grobid2json/grobid/grobid_client.py | josephcc/s2orc-doc2json | 8a6a21b7a8a3c6ad11cd42bdd0d46ee32a5a990d | [
"Apache-2.0"
] | null | null | null | import os
import io
import json
import argparse
import time
import glob
from doc2json.grobid2json.grobid.client import ApiClient
import ntpath
from typing import List
'''
This version uses the standard ProcessPoolExecutor for parallelizing the concurrent calls to the GROBID services.
Given the limits of ThreadPoolExecutor (input stored in memory, blocking Executor.map until the whole input
is acquired), it works with batches of PDF of a size indicated in the config.json file (default is 1000 entries).
We are moving from first batch to the second one only when the first is entirely processed - which means it is
slightly sub-optimal, but should scale better. However acquiring a list of million of files in directories would
require something scalable too, which is not implemented for the moment.
'''
SERVER = 'localhost'
if 'GROBID_URL' in os.environ:
SERVER = os.environ['GROBID_URL']
DEFAULT_GROBID_CONFIG = {
"grobid_server": SERVER,
"grobid_port": "8070",
"batch_size": 1000,
"sleep_time": 5,
"generateIDs": False,
"consolidate_header": False,
"consolidate_citations": False,
"include_raw_citations": True,
"segment_sentences": True,
"include_coordinates": ['s', 'bib', 'biblStruct', 'ref'],
"include_raw_affiliations": False,
"max_workers": 2,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Client for GROBID services")
parser.add_argument("service", help="one of [processFulltextDocument, processHeaderDocument, processReferences]")
parser.add_argument("--input", default=None, help="path to the directory containing PDF to process")
parser.add_argument("--output", default=None, help="path to the directory where to put the results")
parser.add_argument("--config", default=None, help="path to the config file, default is ./config.json")
args = parser.parse_args()
input_path = args.input
config = json.load(open(args.config)) if args.config else DEFAULT_GROBID_CONFIG
output_path = args.output
service = args.service
client = GrobidClient(config=config)
start_time = time.time()
client.process(input_path, output_path, service)
runtime = round(time.time() - start_time, 3)
print("runtime: %s seconds " % (runtime))
| 35.02214 | 117 | 0.596249 |
b88a756fba8d93702364d516718c809d4476d07c | 14,247 | py | Python | hm_gerber_ex/rs274x.py | halfmarble/halfmarble-panelizer | 73489a0b5d0d46e6d363f6d14454d91fab62f8e3 | [
"MIT"
] | null | null | null | hm_gerber_ex/rs274x.py | halfmarble/halfmarble-panelizer | 73489a0b5d0d46e6d363f6d14454d91fab62f8e3 | [
"MIT"
] | 5 | 2022-01-15T13:32:54.000Z | 2022-01-30T15:18:15.000Z | hm_gerber_ex/rs274x.py | halfmarble/halfmarble-panelizer | 73489a0b5d0d46e6d363f6d14454d91fab62f8e3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2022 HalfMarble LLC
# Copyright 2019 Hiroshi Murayama <opiopan@gmail.com>
from hm_gerber_tool.cam import FileSettings
import hm_gerber_tool.rs274x
from hm_gerber_tool.gerber_statements import *
from hm_gerber_ex.gerber_statements import AMParamStmt, AMParamStmtEx, ADParamStmtEx
from hm_gerber_ex.utility import rotate
import re
| 41.057637 | 110 | 0.563206 |
b88ad3cd16814edcf01716b7796117d85426c826 | 691 | py | Python | salamander/mktcalendar.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 51 | 2019-02-01T19:43:37.000Z | 2022-03-16T09:07:03.000Z | salamander/mktcalendar.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | salamander/mktcalendar.py | cclauss/statarb | a59366f70122c355fc93a2391362a3e8818a290e | [
"Apache-2.0"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | from pandas.tseries.holiday import AbstractHolidayCalendar, Holiday, nearest_workday, \
USMartinLutherKingJr, USPresidentsDay, GoodFriday, USMemorialDay, \
USLaborDay, USThanksgivingDay
from pandas.tseries.offsets import CustomBusinessDay
TDay = CustomBusinessDay(calendar=USTradingCalendar())
| 31.409091 | 88 | 0.691751 |
b88df16653e927e74a8e50a7da42dd7a7bec9732 | 3,063 | py | Python | wall/views.py | pydanny/pinax-wall | 1e3df60dad394292be9024e2ad90a07bf1a0b395 | [
"MIT"
] | 1 | 2019-08-16T20:05:40.000Z | 2019-08-16T20:05:40.000Z | wall/views.py | pydanny/pinax-wall | 1e3df60dad394292be9024e2ad90a07bf1a0b395 | [
"MIT"
] | null | null | null | wall/views.py | pydanny/pinax-wall | 1e3df60dad394292be9024e2ad90a07bf1a0b395 | [
"MIT"
] | null | null | null | """ Sample view for group aware projects """
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from uni_form.helpers import FormHelper, Submit, Reset
from wall.models import Post
from wall.forms import WallForm
| 31.57732 | 91 | 0.601698 |
b88fca2ebe335e0075492e9a81b964d8fd3677ae | 2,595 | py | Python | cdc/src/NoteDeid.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | cdc/src/NoteDeid.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | cdc/src/NoteDeid.py | ZebinKang/cdc | a32fe41892021d29a1d9c534728a92b67f9b6cea | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2016 Wei-Hung Weng
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Title : Clinical Document Classification Pipeline: Deidentification module (deid)
Author : Wei-Hung Weng
Created : 10/21/2016
'''
import sys, os, time
import subprocess
import commands
import string
import pandas as pd
| 47.181818 | 391 | 0.665511 |
b8922ccaf7aecd6b398d579f4ddc8b100bfa96aa | 2,540 | py | Python | turbo_transformers/python/tests/qbert_layer_test.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | 1 | 2021-11-04T07:12:46.000Z | 2021-11-04T07:12:46.000Z | turbo_transformers/python/tests/qbert_layer_test.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | null | null | null | turbo_transformers/python/tests/qbert_layer_test.py | xcnick/TurboTransformers | 48b6ba09af2219616c6b97cc5c09222408e080c2 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import torch
import transformers
import turbo_transformers
from turbo_transformers.layers.utils import convert2tt_tensor, try_convert, convert_returns_as_type, ReturnType
import time
model = transformers.BertModel.from_pretrained('bert-base-uncased')
model.eval()
torch.set_grad_enabled(False)
bertlayer = model.encoder.layer[0]
qbertlayer = turbo_transformers.QBertLayer.from_torch(bertlayer)
torchqbertlayer = torch.quantization.quantize_dynamic(bertlayer)
lens = [40, 60]
loops = 1
for l in lens:
input_tensor = torch.rand((1, l, 768))
attention_mask = torch.ones((1, l))
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
print("seq length =", l)
start = time.time()
for i in range(loops):
res = bertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("torch fp32 layer QPS =", loops / (end - start))
start = time.time()
for i in range(loops):
res2 = qbertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("turbo fp32+int8 layer QPS =", loops / (end - start))
start = time.time()
for i in range(loops):
res3 = torchqbertlayer(input_tensor,
attention_mask,
output_attentions=True)
end = time.time()
print("torch int8 layer QPS =", loops / (end - start))
print(
"max error against torch fp32 =",
max(torch.max(torch.abs(res[0] - res2[0])),
torch.max(torch.abs(res[1] - res2[1]))))
print(
"max error against torch int8 =",
max(torch.max(torch.abs(res3[0] - res2[0])),
torch.max(torch.abs(res3[1] - res2[1]))))
print(
"max error between torch int8 and torch fp32 =",
max(torch.max(torch.abs(res3[0] - res[0])),
torch.max(torch.abs(res3[1] - res[1]))))
| 36.285714 | 111 | 0.686614 |
b892a01e77462df62b5d9db18651eb65be2d4626 | 571 | py | Python | datasets/__init__.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | 1 | 2021-09-08T12:00:11.000Z | 2021-09-08T12:00:11.000Z | datasets/__init__.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | null | null | null | datasets/__init__.py | cogito233/text-autoaugment | cae3cfddaba9da01cf291f975e5cf4f734634b51 | [
"MIT"
] | null | null | null | from .imdb import IMDB
from .sst5 import SST5
from .sst2 import SST2
from .trec import TREC
from .yelp2 import YELP2
from .yelp5 import YELP5
__all__ = ('IMDB', 'SST2', 'SST5', 'TREC', 'YELP2', 'YELP5')
| 24.826087 | 73 | 0.640981 |
b892a5808d0820ec82e081a7e4b50b19b5c795cf | 2,396 | py | Python | src/pypleasant/artifacts.py | weibell/pypleasant | de635994113b6cee7e2c5cfb8a5078921a8805cf | [
"MIT"
] | 3 | 2021-01-09T13:45:23.000Z | 2021-07-07T22:54:28.000Z | src/pypleasant/artifacts.py | weibell/pypleasant | de635994113b6cee7e2c5cfb8a5078921a8805cf | [
"MIT"
] | 2 | 2021-01-10T17:39:16.000Z | 2021-01-19T11:43:12.000Z | src/pypleasant/artifacts.py | weibell/pypleasant | de635994113b6cee7e2c5cfb8a5078921a8805cf | [
"MIT"
] | 3 | 2021-01-10T11:59:51.000Z | 2021-08-15T10:45:24.000Z | import base64
import pathlib
from collections import UserDict
from pypleasant.api import PleasantAPI
| 34.724638 | 101 | 0.680718 |
b894aa5fcf5ee5c3c91a08a010d10cc426cae285 | 857 | py | Python | npc/gui/util.py | Arent128/npc | c8a1e227a1d4d7c540c4f4427b611ffc290535ee | [
"MIT"
] | null | null | null | npc/gui/util.py | Arent128/npc | c8a1e227a1d4d7c540c4f4427b611ffc290535ee | [
"MIT"
] | null | null | null | npc/gui/util.py | Arent128/npc | c8a1e227a1d4d7c540c4f4427b611ffc290535ee | [
"MIT"
] | null | null | null | # Helpers common to the gui
from contextlib import contextmanager
from PyQt5 import QtWidgets
def show_error(title, message, parent):
"""
Helper to show a modal error window
Args:
title (str): Title for the error window
message (str): Message text to display
parent (object): Parent window for the modal. This window will be
disabled while the modal is visible. Defaults to the main window.
"""
QtWidgets.QMessageBox.warning(parent, title, message, QtWidgets.QMessageBox.Ok)
| 27.645161 | 83 | 0.679113 |
b894e0eb0f3f3a5eab5eca43855c560fff5104ea | 2,040 | py | Python | meterbus/wtelegram_header.py | noda/pyMeterBus | a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9 | [
"BSD-3-Clause"
] | 44 | 2016-12-11T14:43:14.000Z | 2022-03-17T18:31:14.000Z | meterbus/wtelegram_header.py | noda/pyMeterBus | a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9 | [
"BSD-3-Clause"
] | 13 | 2017-11-29T14:36:34.000Z | 2020-12-20T18:33:35.000Z | meterbus/wtelegram_header.py | noda/pyMeterBus | a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9 | [
"BSD-3-Clause"
] | 32 | 2015-09-15T12:23:19.000Z | 2022-03-22T08:32:22.000Z | import simplejson as json
from .telegram_field import TelegramField
| 24 | 60 | 0.601471 |
b8957a58c70fbb1e911970ddbd303c74a8951fba | 2,966 | py | Python | clitt/actions.py | Leviosar/tt | f6099ca77736d17f46121c76a0763d587536467e | [
"MIT"
] | null | null | null | clitt/actions.py | Leviosar/tt | f6099ca77736d17f46121c76a0763d587536467e | [
"MIT"
] | null | null | null | clitt/actions.py | Leviosar/tt | f6099ca77736d17f46121c76a0763d587536467e | [
"MIT"
] | null | null | null | import tweepy
from .interface import show_message, show_tweet, show_user
def dm(api: tweepy.API, target: str, content: str):
"""
Sends a direct message to target user
Keyword arguments:
api -- API instance for handling the request
target -- Target user's screename (e.g. @jake/jake)
content -- String that will be sent as message
"""
target = target.replace("@", "")
user = api.get_user(target)
api.send_direct_message(user.id, content)
def search(api: tweepy.API, query: str, count: int):
"""
Searches for tweets containing the input string
Keyword arguments:
api -- API instance for handling the request
query -- String passed as search query for the API
count -- Maximum number of results the API will return
"""
results = api.search(query, count=count)
for result in results:
show_tweet(result)
def user(api: tweepy.API, query: str, count: int):
"""
Searches for users related to the input string
Keyword arguments:
api -- API instance for handling the request
query -- String passed as search query for the API
count -- Maximum number of results the API will return
"""
results = api.search_users(query, count=count)
for user in results:
show_user(user)
def post(api: tweepy.API, content: str):
"""
Update the status for currently logged user (basically, this methods tweets)
Keyword arguments:
api -- API instance for handling the request
content -- String that will be posted
"""
api.update_status(content)
def chat(api: tweepy.API, user: str):
"""
Search and displays private chat with target user
Keyword arguments:
api -- API instance for handling the request
user -- Target user's screename (e.g. @jake/jake)
"""
try:
user = user.replace("@", "")
user = api.get_user(user)
me = api.me()
messages = api.list_direct_messages(count=100)
for message in sorted(
messages, key=lambda message: int(message.created_timestamp)
):
if int(message.message_create["sender_id"]) == user.id:
show_message(message, user)
if (
int(message.message_create["sender_id"]) == me.id
and int(message.message_create["target"]["recipient_id"]) == user.id
):
show_message(message, me, reverse=True)
except tweepy.TweepError:
print("Sorry, user not found")
def read(api: tweepy.API, count: int):
"""
Read currently logged user's timeline
Keyword arguments:
api -- API instance for handling the request
count -- Maximum number of results the API will return
"""
public_tweets = api.home_timeline(count=count)
for tweet in public_tweets:
show_tweet(tweet)
| 28.796117 | 84 | 0.619016 |
b89711b17746d4b4271b066247a24c7b87a987eb | 5,711 | py | Python | test/test_html.py | dominickpastore/pymd4c | 7fac37348b1e2520532c83bcb84b9cfecbcdff0c | [
"MIT"
] | 7 | 2020-04-30T08:27:44.000Z | 2022-02-09T12:23:07.000Z | test/test_html.py | dominickpastore/pymd4c | 7fac37348b1e2520532c83bcb84b9cfecbcdff0c | [
"MIT"
] | 23 | 2020-05-29T14:58:46.000Z | 2021-11-10T23:44:25.000Z | test/test_html.py | dominickpastore/pymd4c | 7fac37348b1e2520532c83bcb84b9cfecbcdff0c | [
"MIT"
] | 2 | 2020-09-17T19:40:44.000Z | 2021-07-13T16:43:18.000Z | # Based on spec_tests.py from
# https://github.com/commonmark/commonmark-spec/blob/master/test/spec_tests.py
# and
# https://github.com/github/cmark-gfm/blob/master/test/spec_tests.py
import sys
import os
import os.path
import re
import md4c
import md4c.domparser
import pytest
from normalize import normalize_html
extension_flags = {
'table': md4c.MD_FLAG_TABLES,
'urlautolink': md4c.MD_FLAG_PERMISSIVEURLAUTOLINKS,
'emailautolink': md4c.MD_FLAG_PERMISSIVEEMAILAUTOLINKS,
'wwwautolink': md4c.MD_FLAG_PERMISSIVEWWWAUTOLINKS,
'tasklist': md4c.MD_FLAG_TASKLISTS,
'strikethrough': md4c.MD_FLAG_STRIKETHROUGH,
'underline': md4c.MD_FLAG_UNDERLINE,
'wikilink': md4c.MD_FLAG_WIKILINKS,
'latexmath': md4c.MD_FLAG_LATEXMATHSPANS,
#TODO Add test cases for the rest of the flags
# (including combination flags)
}
def skip_if_older_version(running_version, test_version):
"""Skip the current test if the running version of MD4C is older than the
version required for the test
:param running_version: Running version of MD4C, e.g. "0.4.8"
:type running_version: str
:param test_version: Version of MD4C required for the test
:type test_version: str
"""
if running_version is None or test_version is None:
return
running_version = [int(x) for x in running_version.split('.')]
test_version = [int(x) for x in test_version.split('.')]
for r, t in zip(running_version, test_version):
if r < t:
pytest.skip()
for t in test_version[len(running_version):]:
if t > 0:
pytest.skip("Test requires newer MD4C")
#TODO Test keyword arguments for flags
#TODO Test HTML flags
#TODO Test mixing keyword arguments and traditional flags
| 33.994048 | 78 | 0.615304 |
b897120ea53c19ba1923fce20e96449f3e0b8393 | 1,737 | py | Python | codes/fetch.py | Pregaine/debian | 9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95 | [
"Linux-OpenIB"
] | null | null | null | codes/fetch.py | Pregaine/debian | 9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95 | [
"Linux-OpenIB"
] | null | null | null | codes/fetch.py | Pregaine/debian | 9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95 | [
"Linux-OpenIB"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Usage: Download all stock code info from TWSE
#
# TWSE equities =
# TPEx equities =
#
import csv
from collections import namedtuple
import requests
from lxml import etree
TWSE_EQUITIES_URL = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=2'
TPEX_EQUITIES_URL = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=4'
ROW = namedtuple( 'Row', [ 'type', 'code', 'name', 'ISIN', 'start', 'market', 'group', 'CFI' ] )
def make_row_tuple(typ, row):
"""u'\u3000unicode"""
code, name = row[ 1 ].split( '\u3000' )
code = code.replace( ' ', '' )
name = name.replace( ' ', '' )
return ROW( typ, code, name, *row[ 2:-1 ] )
if __name__ == '__main__':
to_csv( TWSE_EQUITIES_URL, 'twse_equities.csv' )
to_csv( TPEX_EQUITIES_URL, 'tpex_equities.csv' )
| 27.140625 | 96 | 0.580887 |
b898caf8e8371912904209cfea669349d7d43e84 | 453 | py | Python | SimplePyGA/FitnessCalc/__init__.py | UglySoftware/SimplePyGA | 2cc0ef5709800059b323de2be6ea8bf77fb94384 | [
"MIT"
] | 1 | 2019-09-03T17:52:12.000Z | 2019-09-03T17:52:12.000Z | SimplePyGA/FitnessCalc/__init__.py | UglySoftware/SimplePyGA | 2cc0ef5709800059b323de2be6ea8bf77fb94384 | [
"MIT"
] | null | null | null | SimplePyGA/FitnessCalc/__init__.py | UglySoftware/SimplePyGA | 2cc0ef5709800059b323de2be6ea8bf77fb94384 | [
"MIT"
] | 1 | 2019-09-03T17:52:13.000Z | 2019-09-03T17:52:13.000Z | #-----------------------------------------------------------------------
#
# __init__.py (FitnessCalc)
#
# FitnessCalc package init module
#
# Copyright and Distribution
#
# Part of SimplePyGA: Simple Genetic Algorithms in Python
# Copyright (c) 2016 Terry McKiernan (terry@mckiernan.com)
# Released under The MIT License
# See LICENSE file in top-level package folder
#
#----------------------------------------------------------------------- | 32.357143 | 72 | 0.503311 |
b898de5e2e4348a76809bd0da7631a2cc93a7b25 | 3,757 | py | Python | pyaz/billing/invoice/section/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/billing/invoice/section/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/billing/invoice/section/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | '''
billing invoice section
'''
from .... pyaz_utils import _call_az
def list(account_name, profile_name):
'''
List the invoice sections that a user has access to. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section list", locals())
def show(account_name, name, profile_name):
'''
Get an invoice section by its ID. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section show", locals())
def create(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section create", locals())
def update(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section update", locals())
def wait(account_name, name, profile_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the billing invoice section is met.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az billing invoice section wait", locals())
| 45.26506 | 164 | 0.730104 |
b89c391348948a67ee076b201c2356ffbd5b2843 | 418 | py | Python | fifth.py | leephoter/coding-exam | a95fdd6e8477651da811b5b5a93b7214914e9418 | [
"MIT"
] | null | null | null | fifth.py | leephoter/coding-exam | a95fdd6e8477651da811b5b5a93b7214914e9418 | [
"MIT"
] | null | null | null | fifth.py | leephoter/coding-exam | a95fdd6e8477651da811b5b5a93b7214914e9418 | [
"MIT"
] | null | null | null | # abba
# foo bar bar foo
text1 = list(input())
text2 = input().split()
# text1 = set(text1)
print(text1)
print(text2)
for i in range(len(text1)):
if text1[i] == "a":
text1[i] = 1
else:
text1[i] = 0
for i in range(len(text2)):
if text2[i] == "foo":
text2[i] = 1
else:
text2[i] = 0
print(text1)
print(text2)
if (text1 == text2):
print(True)
else:
print(False)
| 14.928571 | 27 | 0.543062 |
b89c5decc2d125e57179ddb7e0fbbf5b7fa1d17a | 864 | py | Python | login_checks.py | mhhoban/basic-blog | 107d6df7c8374ae088097780a15364bb96394664 | [
"MIT"
] | null | null | null | login_checks.py | mhhoban/basic-blog | 107d6df7c8374ae088097780a15364bb96394664 | [
"MIT"
] | null | null | null | login_checks.py | mhhoban/basic-blog | 107d6df7c8374ae088097780a15364bb96394664 | [
"MIT"
] | null | null | null | """
Methods for user login
"""
from cgi import escape
from google.appengine.ext import ndb
def login_fields_complete(post_data):
"""
validates that both login fields were filled in
:param post_data:
:return:
"""
try:
user_id = escape(post_data['user_id'], quote=True)
except KeyError:
user_id = False
try:
password = escape(post_data['password'], quote=True)
except KeyError:
password = False
if user_id and password:
return {'complete': True, 'user_id': user_id, 'password': password}
else:
return {'complete': False}
def valid_user_id_check(user_id):
"""
checks that user exists
:param user_id:
:return:
"""
user_key = ndb.Key('User', user_id)
user = user_key.get()
if user:
return True
else:
return False
| 18 | 75 | 0.611111 |
b89c8ecff52061b4f4230988ae5b6f0af41cff09 | 1,988 | py | Python | models/lstm_hands_enc_dec.py | amjltc295/hand_track_classification | 71fdc980d3150646cd531e28878ff1eb63c7efea | [
"MIT"
] | 6 | 2019-07-08T12:01:17.000Z | 2021-11-01T06:01:28.000Z | models/lstm_hands_enc_dec.py | georkap/hand_track_classification | 962faa1697864e892475989a97fa6ed9c2f1d7b3 | [
"MIT"
] | null | null | null | models/lstm_hands_enc_dec.py | georkap/hand_track_classification | 962faa1697864e892475989a97fa6ed9c2f1d7b3 | [
"MIT"
] | 3 | 2019-07-08T12:25:45.000Z | 2020-06-05T20:27:57.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 13:20:51 2018
lstm encoder decoder for hands
@author:
"""
import torch
import torch.nn as nn
from utils.file_utils import print_and_save | 37.509434 | 141 | 0.640845 |
b89dc31dd7495dce4a5df9abb3cb76e616316b5e | 2,201 | py | Python | My_Simple_Chatroom/MyChat_C1.py | WilliamWuLH/Network_Basic_Programming | 284f8d3664340b0270271da5c50d5b8bb7ce8534 | [
"MIT"
] | 1 | 2020-11-29T14:56:22.000Z | 2020-11-29T14:56:22.000Z | My_Simple_Chatroom/MyChat_C1.py | WilliamWuLH/Network_Basic_Programming | 284f8d3664340b0270271da5c50d5b8bb7ce8534 | [
"MIT"
] | null | null | null | My_Simple_Chatroom/MyChat_C1.py | WilliamWuLH/Network_Basic_Programming | 284f8d3664340b0270271da5c50d5b8bb7ce8534 | [
"MIT"
] | null | null | null | import socket
import sys
import os
ip_port = ('127.0.0.1',6666)
sk = socket.socket()
sk.bind(ip_port)
sk.listen(5)
while True:
print('client_1 waiting...')
conn,addr = sk.accept()
client_data = conn.recv(1024).decode()
if '|' in client_data:
FTP_receive(conn, client_data)
else:
print(client_data)
model = input("MODEL: 1:chat 2:send file 3:exit\nYour choice : ")
if model == '1':
massage = input("Client_1 : ").strip()
conn.sendall(("Client_1 : "+massage).encode())
elif model == '2':
FTP_send(conn)
elif model == '3':
conn.close()
break
else:
print('error !')
conn.close()
break
#C:\\Users\\William Wu\\Desktop\\WLH.txt | 26.518072 | 78 | 0.531122 |
b89ddcfaf0bb84573dcaa412350a05a29d779e4c | 557 | py | Python | 2020/day14-1.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2020/day14-1.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | 2020/day14-1.py | alvaropp/AdventOfCode2017 | 2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad | [
"MIT"
] | null | null | null | import re
with open("day14.txt", "r") as f:
data = f.read().splitlines()
memory = {}
for line in data:
if "mask" in line:
mask = line.split(" = ")[-1]
else:
address, value = re.findall("(\d+)", line)
memory[address] = apply_mask(mask, int(value))
print(sum(memory.values()))
| 21.423077 | 56 | 0.59246 |
b89e8a9c6f13124a3751f523a8aebbcf0178bdb6 | 2,429 | py | Python | heppy/modules/host.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | null | null | null | heppy/modules/host.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | null | null | null | heppy/modules/host.py | bladeroot/heppy | b597916ff80890ca057b17cdd156e90bbbd9a87a | [
"BSD-3-Clause"
] | null | null | null | from ..Module import Module
from ..TagData import TagData
| 33.273973 | 92 | 0.559078 |
b89ead298075031fa1c3f90802815475e6fa1de6 | 594 | py | Python | setup.py | ANich/patois-stopwords | 37e63a0d9df60c7273dd7664a024e02cfcfb04c7 | [
"MIT"
] | null | null | null | setup.py | ANich/patois-stopwords | 37e63a0d9df60c7273dd7664a024e02cfcfb04c7 | [
"MIT"
] | null | null | null | setup.py | ANich/patois-stopwords | 37e63a0d9df60c7273dd7664a024e02cfcfb04c7 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='patois-stop-words',
version='0.0.1',
description='A list of patois stop words.',
long_description=open('README.md').read(),
license='MIT',
author='Alexander Nicholson',
author_email='alexj.nich@hotmail.com',
url='https://github.com/ANich/patois-stop-words',
packages=find_packages(),
package_data={
'patois_stop_words': ['words.txt']
},
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='patois'
)
| 27 | 53 | 0.62963 |
b8a056a06ca2c0eb3d9388de7c316004827053f2 | 6,924 | py | Python | ls/joyous/tests/test_manythings.py | tjwalch/ls.joyous | 0ee50d3af71c066bddb2310948b02f74b52ee253 | [
"BSD-3-Clause"
] | 72 | 2018-03-16T16:35:08.000Z | 2022-03-23T08:09:33.000Z | polrev/ls/joyous/tests/test_manythings.py | polrev-github/polrev-django | 99108ace1a5307b14c3eccb424a9f9616e8c02ae | [
"MIT"
] | 41 | 2018-03-25T20:36:52.000Z | 2022-03-10T08:59:27.000Z | polrev/ls/joyous/tests/test_manythings.py | polrev-github/polrev-django | 99108ace1a5307b14c3eccb424a9f9616e8c02ae | [
"MIT"
] | 28 | 2018-08-13T22:36:09.000Z | 2022-03-17T12:24:15.000Z | # ------------------------------------------------------------------------------
# Test Many Things Utilities
# ------------------------------------------------------------------------------
import sys
import datetime as dt
import pytz
from django.test import TestCase
from django.utils import translation
from ls.joyous.utils.manythings import (toOrdinal, toTheOrdinal,
toDaysOffsetStr, hrJoin)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 45.254902 | 80 | 0.579434 |
b8a247344049a96e9c957980e4d29c8b38b429af | 21 | py | Python | sgqlc/__init__.py | pberthonneau/sgqlc | 6fb29d381239ba9256589cf177d236eb79b3f8cc | [
"ISC"
] | null | null | null | sgqlc/__init__.py | pberthonneau/sgqlc | 6fb29d381239ba9256589cf177d236eb79b3f8cc | [
"ISC"
] | null | null | null | sgqlc/__init__.py | pberthonneau/sgqlc | 6fb29d381239ba9256589cf177d236eb79b3f8cc | [
"ISC"
] | null | null | null | __version__ = '10.0'
| 10.5 | 20 | 0.666667 |
b8a2fb720f101a2bc0adde648afe4fb533a5d387 | 5,919 | py | Python | brew/migrations/0007_auto_20180307_1842.py | williamlagos/brauerei | 9ba1e22a45ea4f9cb4a58ee02a3149526318e523 | [
"MIT"
] | null | null | null | brew/migrations/0007_auto_20180307_1842.py | williamlagos/brauerei | 9ba1e22a45ea4f9cb4a58ee02a3149526318e523 | [
"MIT"
] | null | null | null | brew/migrations/0007_auto_20180307_1842.py | williamlagos/brauerei | 9ba1e22a45ea4f9cb4a58ee02a3149526318e523 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-03-07 18:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 36.312883 | 164 | 0.573746 |
b8a36c9bf09c46b7b162a8edc1a64e4df507a08f | 1,050 | py | Python | src/box/importer.py | p-ranav/box | 9060343cd4960894da220c4f244535623a54ff98 | [
"MIT"
] | 91 | 2021-07-02T06:00:57.000Z | 2022-03-04T02:51:05.000Z | src/box/importer.py | p-ranav/box | 9060343cd4960894da220c4f244535623a54ff98 | [
"MIT"
] | 1 | 2021-07-07T03:42:32.000Z | 2021-07-07T13:45:00.000Z | src/box/importer.py | p-ranav/box | 9060343cd4960894da220c4f244535623a54ff98 | [
"MIT"
] | 2 | 2021-07-02T06:01:02.000Z | 2021-11-16T21:10:38.000Z | from box.parser import Parser
from box.generator import Generator
import os
| 32.8125 | 79 | 0.60381 |
b8a3792b87c74a7c4d324caa87c2a3a3046ea018 | 319 | py | Python | gargantua/utils/elasticsearch.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 18 | 2015-05-08T02:06:39.000Z | 2022-03-05T21:36:48.000Z | gargantua/utils/elasticsearch.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 131 | 2015-01-22T14:54:59.000Z | 2022-02-16T15:14:10.000Z | gargantua/utils/elasticsearch.py | Laisky/laisky-blog | ebe7dadf8fce283ebab0539926ad1be1246e5156 | [
"Apache-2.0"
] | 3 | 2016-01-11T13:52:41.000Z | 2019-06-12T08:54:15.000Z | import json
| 18.764706 | 67 | 0.539185 |
b8a39d8abd6a6f5c91947df5a4f7859aa7716d4d | 957 | py | Python | HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py | Zubieta/CPP | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 8 | 2017-03-02T07:56:45.000Z | 2021-08-07T20:20:19.000Z | HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | null | null | null | HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 1 | 2021-08-07T20:20:20.000Z | 2021-08-07T20:20:20.000Z | # https://www.hackerrank.com/challenges/the-minion-game
from collections import Counter
| 36.807692 | 77 | 0.640543 |
b8a3fc4016e36479ead942be25f4a3a83458ff3e | 1,785 | py | Python | Data/girisbolum.py | kemalsanli/wordKontrol | e0de525f0434cb87ef641fd24e23a559a73bd389 | [
"MIT"
] | 1 | 2022-01-25T14:47:32.000Z | 2022-01-25T14:47:32.000Z | Data/girisbolum.py | FehmiDeniz/wordKontrol | e0de525f0434cb87ef641fd24e23a559a73bd389 | [
"MIT"
] | null | null | null | Data/girisbolum.py | FehmiDeniz/wordKontrol | e0de525f0434cb87ef641fd24e23a559a73bd389 | [
"MIT"
] | 3 | 2020-12-19T01:39:09.000Z | 2021-01-21T19:20:28.000Z | import os
from docx import Document
from docx.shared import Inches
from docx import Document
from docx.text.paragraph import Paragraph
| 30.775862 | 114 | 0.610644 |
b8a55b880ef0e7fe5cf28dcff59d6249431111b4 | 7,008 | py | Python | src/damn_at/analyzers/mesh/analyzer_assimp.py | sagar-kohli/peragro-at | 057687d680d4b098c7642db7d406fc0d8df13019 | [
"BSD-3-Clause"
] | 5 | 2016-03-01T01:56:00.000Z | 2021-05-04T03:53:31.000Z | src/damn_at/analyzers/mesh/analyzer_assimp.py | sagar-kohli/peragro-at | 057687d680d4b098c7642db7d406fc0d8df13019 | [
"BSD-3-Clause"
] | 25 | 2016-03-05T07:13:45.000Z | 2017-07-21T16:32:06.000Z | src/damn_at/analyzers/mesh/analyzer_assimp.py | sueastside/damn-at | 057687d680d4b098c7642db7d406fc0d8df13019 | [
"BSD-3-Clause"
] | 12 | 2016-03-05T18:51:09.000Z | 2017-12-09T05:52:37.000Z | """Assimp-based analyzer."""
from __future__ import absolute_import
import os
import logging
import subprocess
import pyassimp
from damn_at import (
mimetypes,
MetaDataType,
MetaDataValue,
FileId,
FileDescription,
AssetDescription,
AssetId
)
from damn_at.pluginmanager import IAnalyzer
from six.moves import map
from io import open
LOG = logging.getLogger(__name__)
def get_assimp_types():
"""Extract all possible formats and store their mime types"""
# TODO: not exactly reliable, a lot of unknown mimetypes
# for those extensions :/
try:
pro = subprocess.Popen(
['assimp', 'listext'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
LOG.debug("'assimp listext' failed with error code %d! "
% pro.returncode,
out,
err
)
return []
except OSError as oserror:
LOG.debug("'assimp listext' failed! %s", oserror)
return []
extensions = out.split(';')
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.' + ext, False)[0]
LOG.info('Mimetype Info:\n\tExtension: %s\n\tMime: %s', ext, mime)
mimes.append(mime)
return mimes
| 33.5311 | 79 | 0.519549 |
b8a5a0dd0bcce0b6f79040d683a76c1d74e9013f | 2,423 | py | Python | src/stk/ea/selection/selectors/remove_batches.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | src/stk/ea/selection/selectors/remove_batches.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | src/stk/ea/selection/selectors/remove_batches.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | """
Remove Batches
==============
"""
from .selector import Selector
| 26.336957 | 68 | 0.529922 |
b8a8d25a2989246934825ecb3bded3322cd894bb | 446 | py | Python | students/migrations/0010_institutionalemail_title_email.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | null | null | null | students/migrations/0010_institutionalemail_title_email.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | 21 | 2019-05-11T18:01:10.000Z | 2022-02-10T11:22:01.000Z | students/migrations/0010_institutionalemail_title_email.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-06-30 00:31
from django.db import migrations, models
| 23.473684 | 94 | 0.630045 |
b8a9652011ddd210555829c017c928bd04cf38bf | 920 | py | Python | azure_utility_tool/config.py | alextricity25/azure_utility_tool | 2975b5f415e5c64335618e83ed0216b7923c4166 | [
"MIT"
] | 5 | 2020-01-02T03:12:14.000Z | 2020-08-19T02:31:19.000Z | azure_utility_tool/config.py | alextricity25/azure_utility_tool | 2975b5f415e5c64335618e83ed0216b7923c4166 | [
"MIT"
] | null | null | null | azure_utility_tool/config.py | alextricity25/azure_utility_tool | 2975b5f415e5c64335618e83ed0216b7923c4166 | [
"MIT"
] | 2 | 2020-03-16T00:19:06.000Z | 2020-08-20T19:31:10.000Z | """
Author: Miguel Alex Cantu
Email: miguel.can2@gmail.com
Date: 12/21/2019
Description:
Loads Azure Utility Tool configuration file. The configuration
file is a blend of what the Microsoft Authentication Library
requires and some extra directives that the Auzre Utility
Tool requires. It is a JSON file that is required to be
stored in ~/.aut/aut_config.json
"""
import json
import sys
import os
from azure_utility_tool.exceptions import ConfigFileNotFound
| 34.074074 | 71 | 0.702174 |
b8a965e925e8c33d2b6141373da012de99c134f6 | 1,197 | py | Python | ics/structures/secu_avb_settings.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 45 | 2017-10-17T08:42:08.000Z | 2022-02-21T16:26:48.000Z | ics/structures/secu_avb_settings.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 106 | 2017-03-07T21:10:39.000Z | 2022-03-29T15:32:46.000Z | ics/structures/secu_avb_settings.py | intrepidcs/python_ics | 7bfa8c2f893763608f9255f9536a2019cfae0c23 | [
"Unlicense"
] | 17 | 2017-04-04T12:30:22.000Z | 2022-01-28T05:30:25.000Z | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
import enum
from ics.structures.can_settings import *
from ics.structures.canfd_settings import *
from ics.structures.s_text_api_settings import *
_neoECU_AVBSettings = secu_avb_settings
ECU_AVBSettings = secu_avb_settings
SECU_AVBSettings = secu_avb_settings
| 27.837209 | 72 | 0.670844 |
b8a9e103986b1f6f93cdc5df1a8eef20f43536e8 | 1,531 | py | Python | unaccepted/Substring_with_Concatenation_of_All_Words.py | sheagk/leetcode_solutions | 7571bd13f4274f6b4b622b43a414d56fc26d3be0 | [
"MIT"
] | null | null | null | unaccepted/Substring_with_Concatenation_of_All_Words.py | sheagk/leetcode_solutions | 7571bd13f4274f6b4b622b43a414d56fc26d3be0 | [
"MIT"
] | null | null | null | unaccepted/Substring_with_Concatenation_of_All_Words.py | sheagk/leetcode_solutions | 7571bd13f4274f6b4b622b43a414d56fc26d3be0 | [
"MIT"
] | 1 | 2020-09-03T14:26:00.000Z | 2020-09-03T14:26:00.000Z | ## https://leetcode.com/problems/substring-with-concatenation-of-all-words/submissions/
## this method fails on test case 171 of 173 because it's too slow.
## i'm not sure I see a way to avoid checking every starting position
## in s, and I'm also not sure I see a way to avoid having a loop over
## the words too.
## unfortunately, that means my complexity is O(len(s)*len(words)), which
## is too slow for a case where we have a ton of short words and a very long
## string.
| 31.895833 | 87 | 0.53821 |
b8ab8fab99f75b9332a0131cf9ea65ac9a6bcb59 | 1,848 | py | Python | python_app/supervised_learning/train_data/Data.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 9 | 2018-06-08T00:09:08.000Z | 2021-11-17T11:05:11.000Z | python_app/supervised_learning/train_data/Data.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 1 | 2020-04-25T12:43:26.000Z | 2020-04-25T12:43:26.000Z | python_app/supervised_learning/train_data/Data.py | 0xsuu/Project-Mahjong | e82edc67651ff93c8ec158b590cd728f28504be9 | [
"Apache-2.0"
] | 2 | 2019-05-30T07:18:45.000Z | 2019-11-05T09:15:13.000Z | #!/usr/bin/env python3
'''
The MIT License (MIT)
Copyright (c) 2014 Mark Haines
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
| 36.96 | 82 | 0.682359 |