hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
185f488bf8a799cdaf7e16f96a249d8d9b0d63bc | 1,824 | py | Python | skeema/intermediate/compiler/class_builder.py | HeadHaus/Skeema | fc0faf13afad2c95b8943eaa3bfc2cc23b7de003 | [
"MIT"
] | null | null | null | skeema/intermediate/compiler/class_builder.py | HeadHaus/Skeema | fc0faf13afad2c95b8943eaa3bfc2cc23b7de003 | [
"MIT"
] | null | null | null | skeema/intermediate/compiler/class_builder.py | HeadHaus/Skeema | fc0faf13afad2c95b8943eaa3bfc2cc23b7de003 | [
"MIT"
] | null | null | null | from __future__ import annotations
import sys
from skeema.intermediate.compiler.parser import Parser
from skeema import ModelMeta
from skeema import util
| 28.5 | 111 | 0.627193 |
185f6fd90f53269fc456d4b79fc344aa07fad28a | 1,064 | py | Python | problems/csp/single/LabeledDice.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 28 | 2019-12-14T09:25:52.000Z | 2022-03-24T08:15:13.000Z | problems/csp/single/LabeledDice.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 7 | 2020-04-15T11:02:07.000Z | 2022-01-20T12:48:54.000Z | problems/csp/single/LabeledDice.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 3 | 2020-04-15T08:23:45.000Z | 2021-12-07T14:02:28.000Z | """
From http://jimorlin.wordpress.com/2009/02/17/colored-letters-labeled-dice-a-logic-puzzle/
There are 13 words as follows: buoy, cave, celt, flub, fork, hemp, judy, junk, limn, quip, swag, visa.
There are 24 different letters that appear in the 13 words.
The question is: can one assign the 24 letters to 4 different cubes so that the four letters of each word appears on different cubes.
There is one letter from each word on each cube.
The puzzle was created by Humphrey Dudley.
Execution:
python3 LabeledDice.py
"""
from pycsp3 import *
words = ["buoy", "cave", "celt", "flub", "fork", "hemp", "judy", "junk", "limn", "quip", "swag", "visa"]
# x[i] is the cube where the ith letter of the alphabet is put
x = VarArray(size=26, dom=lambda i: range(1, 5) if i in alphabet_positions("".join(words)) else None)
satisfy(
# the four letters of each word appear on different cubes
[AllDifferent(x[i] for i in alphabet_positions(w)) for w in words],
# each cube is assigned 6 letters
Cardinality(x, occurrences={i: 6 for i in range(1, 5)})
)
| 39.407407 | 133 | 0.710526 |
1860d4a4ba12e96e49b6739a4f21bf910d68cc1a | 4,220 | py | Python | lib/JumpScale/tools/cuisine/solutions/CuisineCockpit.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 8 | 2016-04-14T14:04:57.000Z | 2020-06-09T00:24:34.000Z | lib/JumpScale/tools/cuisine/solutions/CuisineCockpit.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 418 | 2016-01-25T10:30:00.000Z | 2021-09-08T12:29:13.000Z | lib/JumpScale/tools/cuisine/solutions/CuisineCockpit.py | Jumpscale/jumpscale_core8 | f80ac9b1ab99b833ee7adb17700dcf4ef35f3734 | [
"Apache-2.0"
] | 9 | 2016-04-21T07:21:17.000Z | 2022-01-24T10:35:54.000Z | from JumpScale import j
base = j.tools.cuisine._getBaseClass()
| 39.074074 | 131 | 0.632701 |
186207c724d6262ec17c4da5e5a9cf096b45d2c3 | 7,103 | py | Python | examples/finetune-bert/02-BERT-sst2-DeepSpeed.py | ceshine/pytorch-helper-bot | 32c88d41fffa41fe35ba21c278eae83d914f3847 | [
"MIT"
] | 10 | 2019-12-13T23:30:31.000Z | 2021-12-08T14:21:47.000Z | examples/finetune-bert/02-BERT-sst2-DeepSpeed.py | ceshine/pytorch-helper-bot | 32c88d41fffa41fe35ba21c278eae83d914f3847 | [
"MIT"
] | null | null | null | examples/finetune-bert/02-BERT-sst2-DeepSpeed.py | ceshine/pytorch-helper-bot | 32c88d41fffa41fe35ba21c278eae83d914f3847 | [
"MIT"
] | 1 | 2021-11-07T19:00:03.000Z | 2021-11-07T19:00:03.000Z | """ Finetuning BERT using DeepSpeed's ZeRO-Offload
"""
import json
import dataclasses
from pathlib import Path
from functools import partial
import nlp
import torch
import typer
import deepspeed
import numpy as np
from transformers import BertTokenizerFast
from transformers import BertForSequenceClassification
from sklearn.model_selection import train_test_split
from pytorch_helper_bot import (
DeepSpeedBot, MovingAverageStatsTrackerCallback, CheckpointCallback,
LearningRateSchedulerCallback, MultiStageScheduler, Top1Accuracy,
LinearLR, CosineAnnealingScheduler
)
CACHE_DIR = Path("cache/")
CACHE_DIR.mkdir(exist_ok=True)
APP = typer.Typer()
def convert_to_features(tokenizer, example_batch):
# Tokenize contexts and questions (as pairs of inputs)
encodings = tokenizer.batch_encode_plus(
example_batch['sentence'], padding='max_length', max_length=64, truncation=True)
return encodings
if __name__ == "__main__":
APP()
| 32.582569 | 95 | 0.651696 |
18620b84b0e67aed4d98fbdd7983e2e41f67ec2d | 2,118 | py | Python | examples/images/autoencoder.py | jjpalacio/tflearn | e69bc9f341a1d2a90080bb24a686e0e2cf724d63 | [
"MIT"
] | 10,882 | 2016-03-31T16:03:11.000Z | 2022-03-26T03:00:27.000Z | examples/images/autoencoder.py | ciderpark/tflearn | 5c23566de6e614a36252a5828d107d001a0d0482 | [
"MIT"
] | 1,079 | 2016-04-02T06:14:16.000Z | 2022-02-27T10:04:47.000Z | examples/images/autoencoder.py | ciderpark/tflearn | 5c23566de6e614a36252a5828d107d001a0d0482 | [
"MIT"
] | 3,014 | 2016-03-31T16:03:26.000Z | 2022-03-30T20:36:53.000Z | # -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib.pyplot as plt
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building the encoder
encoder = tflearn.input_data(shape=[None, 784])
encoder = tflearn.fully_connected(encoder, 256)
encoder = tflearn.fully_connected(encoder, 64)
# Building the decoder
decoder = tflearn.fully_connected(encoder, 256)
decoder = tflearn.fully_connected(decoder, 784, activation='sigmoid')
# Regression, with mean square error
net = tflearn.regression(decoder, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric=None)
# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, X, n_epoch=20, validation_set=(testX, testX),
run_id="auto_encoder", batch_size=256)
# Encoding X[0] for test
print("\nTest encoding of X[0]:")
# New model, re-using the same session, for weights sharing
encoding_model = tflearn.DNN(encoder, session=model.session)
print(encoding_model.predict([X[0]]))
# Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")
testX = tflearn.data_utils.shuffle(testX)[0]
# Applying encode and decode over test set
encode_decode = model.predict(testX)
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(10):
temp = [[ii, ii, ii] for ii in list(testX[i])]
a[0][i].imshow(np.reshape(temp, (28, 28, 3)))
temp = [[ii, ii, ii] for ii in list(encode_decode[i])]
a[1][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress()
| 32.584615 | 72 | 0.72474 |
18645f94ba67063154674ceff77d5989d4dbd944 | 8,550 | py | Python | secureaws/secureaws.py | paliwalvimal/aws-secure-account | 78447720a17176cc539d62775817026609e67339 | [
"MIT"
] | 1 | 2021-02-11T17:15:18.000Z | 2021-02-11T17:15:18.000Z | secureaws/secureaws.py | paliwalvimal/aws-secure-account | 78447720a17176cc539d62775817026609e67339 | [
"MIT"
] | null | null | null | secureaws/secureaws.py | paliwalvimal/aws-secure-account | 78447720a17176cc539d62775817026609e67339 | [
"MIT"
] | 1 | 2019-12-12T09:01:59.000Z | 2019-12-12T09:01:59.000Z | """
## ## ## ## ##
## ## ##
## ## ##
## ## ## ## ## ##
## ##
## ##
##
AUTHOR = Vimal Paliwal <hello@vimalpaliwal.com>
"""
import sys
import boto3
import click
import threading
from botocore.exceptions import ClientError
from secureaws import checkaws
from secureaws import setupaws
from secureaws import rsautil
# Important Variables - DO NOT change the values
REGION = {
"N_VIRGINIA": "us-east-1",
"OHIO": "us-east-2",
"N_CALIFORNIA": "us-west-1",
"OREGON": "us-west-2",
"MUMBAI": "ap-south-1",
"SEOUL": "ap-northeast-2",
"SINGAPORE": "ap-southeast-1",
"SYDNEY": "ap-southeast-2",
"TOKYO": "ap-northeast-1",
"CANADA": "ca-central-1",
"FRANKFURT": "eu-central-1",
"IRELAND": "eu-west-1",
"LONDON": "eu-west-2",
"PARIS": "eu-west-3",
"SAO_PAULO": "sa-east-1",
"BAHRAIN": "me-south-1",
"STOCKHOLM": "eu-north-1",
"HONG_KONG": "ap-east-1"
}
# Managing CLI
# Map all click groups
sa = click.CommandCollection(sources=[chk_group,setup_group,rsa_group])
if __name__ == '__main__':
sa()
| 34.615385 | 179 | 0.582456 |
1864d86874c3d8b77ca9978c07a999b3a352d135 | 888 | py | Python | python/examples/find_similar.py | yupbank/euclidesdb | c4210b68a79aab20e6911c78940b909b8bede557 | [
"Apache-2.0"
] | null | null | null | python/examples/find_similar.py | yupbank/euclidesdb | c4210b68a79aab20e6911c78940b909b8bede557 | [
"Apache-2.0"
] | null | null | null | python/examples/find_similar.py | yupbank/euclidesdb | c4210b68a79aab20e6911c78940b909b8bede557 | [
"Apache-2.0"
] | null | null | null | import sys
import argparse
import euclides
from PIL import Image
import numpy as np
from torchvision.transforms import functional as F
if __name__ == "__main__":
run_main()
| 26.909091 | 86 | 0.667793 |
18662f52c2055666297ec86901f3368b3430ce9a | 868 | py | Python | gunpowder/nodes/renumber_connected_components.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 43 | 2017-05-03T22:27:11.000Z | 2022-02-11T19:07:28.000Z | gunpowder/nodes/renumber_connected_components.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 102 | 2017-06-09T10:11:06.000Z | 2022-03-29T13:56:37.000Z | gunpowder/nodes/renumber_connected_components.py | trivoldus28/gunpowder | 97e9e64709fb616e2c47567b22d5f11a9234fe48 | [
"MIT"
] | 43 | 2017-04-25T20:25:17.000Z | 2022-02-11T19:07:34.000Z | from .batch_filter import BatchFilter
from gunpowder.ext import malis
| 29.931034 | 78 | 0.670507 |
18664b760b4ae7d4a23d616670b3152102c11769 | 401 | py | Python | project/main/migrations/0003_auto_20200504_1852.py | Leeoku/MovieCrud | fb9e364895684f0cb1e3c1bc68971f0d4a7df1fc | [
"MIT"
] | null | null | null | project/main/migrations/0003_auto_20200504_1852.py | Leeoku/MovieCrud | fb9e364895684f0cb1e3c1bc68971f0d4a7df1fc | [
"MIT"
] | 6 | 2021-03-19T02:52:05.000Z | 2021-09-22T18:58:44.000Z | project/main/migrations/0003_auto_20200504_1852.py | Leeoku/MovieCrud | fb9e364895684f0cb1e3c1bc68971f0d4a7df1fc | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-05-04 18:52
from django.db import migrations, models
| 21.105263 | 58 | 0.605985 |
1868e8987c751a0abe91a5dd69173ea001090442 | 3,534 | py | Python | LR/lr/model/resource_data_monitor/incoming_copy_handler.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 26 | 2015-04-14T03:11:58.000Z | 2022-01-06T14:31:07.000Z | LR/lr/model/resource_data_monitor/incoming_copy_handler.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 11 | 2015-04-03T21:54:03.000Z | 2017-05-02T17:20:03.000Z | LR/lr/model/resource_data_monitor/incoming_copy_handler.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 16 | 2015-02-11T09:30:18.000Z | 2020-11-20T02:06:24.000Z | import logging
import couchdb
from collections import deque
from threading import Thread
from pylons import config
from lr.lib import SpecValidationException, helpers as h
from lr.lib.couch_change_monitor import BaseChangeHandler
from lr.model import ResourceDataModel
from couchdb import ResourceConflict
from lr.lib.replacement_helper import ResourceDataReplacement
from lr.lib.schema_helper import ResourceDataModelValidator
log = logging.getLogger(__name__)
# this doesn't need to be done... should be handled by pylons.config
# scriptPath = os.path.dirname(os.path.abspath(__file__))
# _PYLONS_CONFIG = os.path.join(scriptPath, '..', '..', '..', 'development.ini')
# _config = ConfigParser.ConfigParser()
# _config.read(_PYLONS_CONFIG)
_RESOURCE_DISTRIBUTABLE_TYPE = "resource_data_distributable"
_RESOURCE_TYPE = "resource_data"
_DOC_TYPE = "doc_type"
_DOC = "doc"
_ID = "id"
_DOCUMENT_UPDATE_THRESHOLD = 100
| 36.061224 | 116 | 0.621958 |
186a448cd375a10732fb3690423f8d8f87976e4a | 1,432 | py | Python | proxyclient/m1n1/fw/asc/base.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | 1 | 2022-02-19T17:47:58.000Z | 2022-02-19T17:47:58.000Z | proxyclient/m1n1/fw/asc/base.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | null | null | null | proxyclient/m1n1/fw/asc/base.py | EricRabil/m1n1 | 0a1a9348c32e2e44374720cd9d68cbe81cf696df | [
"MIT"
] | 2 | 2022-02-01T18:33:16.000Z | 2022-02-19T17:50:25.000Z | # SPDX-License-Identifier: MIT
from ...utils import *
# System endpoints
| 25.122807 | 84 | 0.578212 |
186a5816589e84e463b32b76302f76cecdf63a3d | 710 | py | Python | misc/redirector.py | ktan2020/tooling | 5a22adc2895f5baa98faad7028061219c545a675 | [
"MIT"
] | null | null | null | misc/redirector.py | ktan2020/tooling | 5a22adc2895f5baa98faad7028061219c545a675 | [
"MIT"
] | null | null | null | misc/redirector.py | ktan2020/tooling | 5a22adc2895f5baa98faad7028061219c545a675 | [
"MIT"
] | null | null | null | import SimpleHTTPServer
import SocketServer
import sys
from optparse import OptionParser
p = OptionParser()
p.add_option("--ip", dest="ip")
p.add_option("--port", dest="port", type=int, default=8080)
(o,p) = p.parse_args()
if o.ip == None:
print "XXX FATAL : IP address to redirect to is mandatory! XXX"
sys.exit(1)
handler = SocketServer.TCPServer(("", o.port), myHandler)
print "serving at port %s" % o.port
handler.serve_forever()
| 27.307692 | 67 | 0.685915 |
186a69b010242e5cd6623bba8225f28d59422edb | 943 | py | Python | alert/getinfo/model/configdata.py | xwwwb/genshin_task-resin-expedition_alert | cddaafc2723c5d9eea6fbd1db792ad70427344c8 | [
"MIT"
] | 2 | 2022-03-01T10:39:30.000Z | 2022-03-29T13:40:37.000Z | alert/getinfo/model/configdata.py | xwwwb/genshin_task-resin-expedition_alert | cddaafc2723c5d9eea6fbd1db792ad70427344c8 | [
"MIT"
] | null | null | null | alert/getinfo/model/configdata.py | xwwwb/genshin_task-resin-expedition_alert | cddaafc2723c5d9eea6fbd1db792ad70427344c8 | [
"MIT"
] | null | null | null | from typing import List, Literal
import pydantic
| 23 | 60 | 0.688229 |
186ceed8bf38c2d8c4e7809751f03d8df4473f09 | 6,479 | py | Python | src/cli.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | null | null | null | src/cli.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | 1 | 2021-11-11T08:50:32.000Z | 2021-11-19T12:39:06.000Z | src/cli.py | stefantaubert/tacotron | 9ac37fbf8789b4e7fe1067212a736074181b6fd8 | [
"MIT"
] | null | null | null | import os
from argparse import ArgumentParser
from pathlib import Path
from general_utils import split_hparams_string, split_int_set_str
# from tacotron.app.eval_checkpoints import eval_checkpoints
from tacotron.app import (DEFAULT_MAX_DECODER_STEPS, continue_train, infer,
plot_embeddings, train, validate)
from tacotron.app.defaults import (DEFAULT_MCD_NO_OF_COEFFS_PER_FRAME,
DEFAULT_REPETITIONS,
DEFAULT_SAVE_MEL_INFO_COPY_PATH,
DEFAULT_SEED)
BASE_DIR_VAR = "base_dir"
# def init_eval_checkpoints_parser(parser):
# parser.add_argument('--train_name', type=str, required=True)
# parser.add_argument('--custom_hparams', type=str)
# parser.add_argument('--select', type=int)
# parser.add_argument('--min_it', type=int)
# parser.add_argument('--max_it', type=int)
# return eval_checkpoints_main_cli
# def evaeckpoints_main_cli(**args):
# argsl_ch["custom_hparams"] = split_hparams_string(args["custom_hparams"])
# eval_checkpoints(**args)
# def init_restore_parser(parser: ArgumentParser) -> None:
# parser.add_argument('--train_name', type=str, required=True)
# parser.add_argument('--checkpoint_dir', type=Path, required=True)
# return restore_model
if __name__ == "__main__":
main_parser = _init_parser()
received_args = main_parser.parse_args()
_process_args(received_args)
| 39.03012 | 97 | 0.748572 |
186d347af5ccfb1407fd9334ac01a2985ccc1dd2 | 969 | py | Python | apps/hello/uploadHandler.py | tenqaz/tornado_learning | 3ff18039b69c49927452d778098e1a1b7fe7b5da | [
"MIT"
] | 11 | 2019-10-08T07:31:06.000Z | 2021-09-27T01:08:40.000Z | apps/hello/uploadHandler.py | tenqaz/tornado_learning | 3ff18039b69c49927452d778098e1a1b7fe7b5da | [
"MIT"
] | null | null | null | apps/hello/uploadHandler.py | tenqaz/tornado_learning | 3ff18039b69c49927452d778098e1a1b7fe7b5da | [
"MIT"
] | 3 | 2020-04-17T06:29:42.000Z | 2021-09-27T01:08:41.000Z | # -*- coding: utf-8 -*-
"""
@author: Jim
@project: tornado_learning
@time: 2019/8/20 14:48
@desc:
"""
from __future__ import annotations
from tornado_learning.handler import BaseHandler
import os
import uuid
import aiofiles
| 24.225 | 95 | 0.603715 |
18719fea4e335f1ca1128345b7f27750044e6081 | 2,906 | py | Python | mathgrid_app/main.py | logiflo/mathgrid | 9cfff50b66a45a6598651afd2c785560eed78f27 | [
"BSD-2-Clause"
] | null | null | null | mathgrid_app/main.py | logiflo/mathgrid | 9cfff50b66a45a6598651afd2c785560eed78f27 | [
"BSD-2-Clause"
] | null | null | null | mathgrid_app/main.py | logiflo/mathgrid | 9cfff50b66a45a6598651afd2c785560eed78f27 | [
"BSD-2-Clause"
] | null | null | null | """Main module
"""
# Standard library imports
import string
# Third party imports
import numpy as np
import justpy as jp
import pandas as pd
START_INDEX: int = 1
END_INDEX: int = 20
GRID_OPTIONS = """
{
class: 'ag-theme-alpine',
defaultColDef: {
filter: true,
sortable: false,
resizable: true,
headerClass: 'font-bold',
editable: true
},
rowSelection: 'single',
}
"""
def on_input_key(self, msg):
"""On input key event.
Update the clicked cell with the new value from the input field.
Args:
msg (object): Event data object.
"""
if self.last_cell is not None:
self.grid.options['rowData'][self.last_cell['row']
][self.last_cell['col']] = msg.value
def on_cell_clicked(self, msg):
"""On cell clicked event.
Update the cell label value with the coordinates of the cell and set
the value of the cell in the input field.
Args:
msg (object): Event data object.
"""
self.cell_label.value = msg.colId + str(msg.rowIndex)
self.input_field.value = msg.data[msg.colId]
self.input_field.last_cell = {"row": msg.rowIndex, "col": msg.colId}
self.last_row = msg.row
def on_cell_value_changed(self, msg):
"""On input key event.
Update the input field value to match the cell value.
Args:
msg (object): Event data object.
"""
self.input_field.value = msg.data[msg.colId]
def grid_test():
"""Grid test app.
"""
headings = list(string.ascii_uppercase)
index = np.arange(START_INDEX, END_INDEX)
data_frame = pd.DataFrame(index=index, columns=headings)
data_frame = data_frame.fillna('')
# data = np.array([np.arange(10)]*3).T
# css_values = """
# .ag-theme-alpine .ag-ltr .ag-cell {
# border-right: 1px solid #aaa;
# }
# .ag-theme-balham .ag-ltr .ag-cell {
# border-right: 1px solid #aaa;
# }
# """
web_page = jp.WebPage()
root_div = jp.Div(classes='q-pa-md', a=web_page)
in_root_div = jp.Div(classes='q-gutter-md', a=root_div)
cell_label = jp.Input(
a=in_root_div, style='width: 32px; margin-left: 16px', disabled=True)
input_field = jp.Input(classes=jp.Styles.input_classes,
a=in_root_div, width='32px')
input_field.on("input", on_input_key)
input_field.last_cell = None
grid = jp.AgGrid(a=web_page, options=GRID_OPTIONS)
grid.load_pandas_frame(data_frame)
grid.options.pagination = True
grid.options.paginationAutoPageSize = True
grid.cell_label = cell_label
grid.input_field = input_field
grid.on('cellClicked', on_cell_clicked)
grid.on('cellValueChanged', on_cell_value_changed)
input_field.grid = grid
return web_page
def main():
"""Main app.
"""
jp.justpy(grid_test)
if __name__ == "__main__":
main()
| 23.819672 | 77 | 0.631108 |
1874ca96a1f31b40d52d15b318f020ba7a9562e6 | 811 | py | Python | tests/test_linked_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | tests/test_linked_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | tests/test_linked_queue.py | dataloudlabs/dloud-ads | d0ad3f169c2384292db4097e00ba7858f37a8198 | [
"MIT"
] | null | null | null | """ Unit tests for linked_queue.LinkedQueue """
from dloud_ads import linked_queue
def test_dummy():
""" Test definition"""
the_queue = linked_queue.LinkedQueue()
assert the_queue.is_empty()
assert not the_queue
the_queue.enqueue(2)
assert not the_queue.is_empty()
assert len(the_queue) == 1
assert the_queue.dequeue() == 2
_ = [the_queue.enqueue(x) for x in range(4)]
assert len(the_queue) == 4
assert [the_queue.dequeue() for x in range(4)] == [0, 1, 2, 3]
assert not the_queue
_ = [the_queue.enqueue(x) for x in range(9)]
assert len(the_queue) == 9
_ = [the_queue.enqueue(x) for x in range(2)]
assert len(the_queue) == 11
expected = [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1]
assert [the_queue.dequeue() for x in range(11)] == expected
| 26.16129 | 66 | 0.637485 |
1875fb8f105e2c1eaf8a87c9adee8cca7ddd3e65 | 1,831 | py | Python | setup.py | AnacletoLAB/grape | 5ed0a84b7cedf588715919782f37c9492263bd12 | [
"MIT"
] | 6 | 2021-09-22T17:40:01.000Z | 2022-03-24T04:28:00.000Z | setup.py | AnacletoLAB/grape | 5ed0a84b7cedf588715919782f37c9492263bd12 | [
"MIT"
] | 5 | 2021-10-14T10:48:27.000Z | 2022-03-23T11:03:05.000Z | setup.py | AnacletoLAB/grape | 5ed0a84b7cedf588715919782f37c9492263bd12 | [
"MIT"
] | 2 | 2021-09-13T16:24:08.000Z | 2021-09-24T16:23:35.000Z | import os
import re
# To use a consistent encoding
from codecs import open as copen
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with copen(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
__version__ = find_version("grape", "__version__.py")
test_deps = []
# TODO: Authors add your emails!!!
authors = {
"Luca Cappelletti": "luca.cappelletti1@unimi.it",
"Tommaso Fontana": "tommaso.fontana@mail.polimi.it",
"Vida Ravanmehr": "vida.ravanmehr@jax.org",
"Peter Robinson": "peter.robinson@jax.org",
}
setup(
name='grape',
version=__version__,
description="Rust/Python for high performance Graph Processing and Embedding.",
long_description=long_description,
url="https://github.com/AnacletoLAB/grape",
author=", ".join(list(authors.keys())),
author_email=", ".join(list(authors.values())),
# Choose your license
license='MIT',
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
tests_require=test_deps,
install_requires=[
"ensmallen==0.7.0.dev6",
"embiggen==0.10.0.dev2",
]
)
| 27.328358 | 83 | 0.653195 |
187614996f13120eae23f5d092c2a9efde0e80bf | 76,079 | py | Python | pyLMS7002Soapy/LMS7002_BIAS.py | Surfndez/pyLMS7002Soapy | ea230dcb12048007300477e1e2e4decc5414f954 | [
"Apache-2.0"
] | 46 | 2016-11-29T05:10:36.000Z | 2021-10-31T19:27:46.000Z | pyLMS7002M/LMS7002_BIAS.py | myriadrf/pyLMS7002M | b866deea1f05dba44c9ed1a1a4666352b811b66b | [
"Apache-2.0"
] | 2 | 2017-04-15T21:36:01.000Z | 2017-06-08T09:44:26.000Z | pyLMS7002Soapy/LMS7002_BIAS.py | Surfndez/pyLMS7002Soapy | ea230dcb12048007300477e1e2e4decc5414f954 | [
"Apache-2.0"
] | 16 | 2016-11-28T20:47:55.000Z | 2021-04-07T01:48:20.000Z | #***************************************************************
#* Name: LMS7002_BIAS.py
#* Purpose: Class implementing LMS7002 BIAS functions
#* Author: Lime Microsystems ()
#* Created: 2016-11-14
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
| 27.604862 | 148 | 0.557736 |
1876d8349cdadc13b5b12782011e2506eb566592 | 1,299 | py | Python | NorthernLights/shapes/BaseShape.py | jgillick/coffeetable-programs | 244e3cc9099993a050ed64b1d11e41c763a1cb72 | [
"MIT"
] | null | null | null | NorthernLights/shapes/BaseShape.py | jgillick/coffeetable-programs | 244e3cc9099993a050ed64b1d11e41c763a1cb72 | [
"MIT"
] | null | null | null | NorthernLights/shapes/BaseShape.py | jgillick/coffeetable-programs | 244e3cc9099993a050ed64b1d11e41c763a1cb72 | [
"MIT"
] | null | null | null | import time
# Colors
RED = (1,0,0)
YELLOW = (1,1,0)
GREEN = (0,1,0)
CYAN = (0,1,1)
BLUE = (0,0,1)
PURPLE = (1,0,1)
| 24.055556 | 70 | 0.583526 |
1878e0fb7794287a25d9e67514272eb4ae4e8c3c | 148 | py | Python | WD/Cwiczenia/rzymskie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | 1 | 2020-02-29T14:38:33.000Z | 2020-02-29T14:38:33.000Z | WD/Cwiczenia/rzymskie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | null | null | null | WD/Cwiczenia/rzymskie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | null | null | null | rzymskie={'I':1,'II':2,'III':3,'IV':4,'V':5,'VI':6,'VII':7,'VIII':8}
print(rzymskie)
print('Jeden element slownika: \n')
print(rzymskie['I'])
| 24.666667 | 69 | 0.587838 |
187a7c6d2f82ad82d4fc1c57659cdd525e113835 | 1,791 | py | Python | otoku.py | gitmori/WebTools | 05d10f082875f1ffb0eaa6cb40f4bd028d3bf01f | [
"MIT"
] | null | null | null | otoku.py | gitmori/WebTools | 05d10f082875f1ffb0eaa6cb40f4bd028d3bf01f | [
"MIT"
] | null | null | null | otoku.py | gitmori/WebTools | 05d10f082875f1ffb0eaa6cb40f4bd028d3bf01f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from config.site_url import SiteUrl
from urllib.request import urlopen
from lxml.html import fromstring
from random import randint
from time import sleep
#
# Blog14xPath
for page in range(1, 5):
# BlogURL.gitignore
url = SiteUrl()[4] + str(page) + '/'
res = urlopen(url)
dom = fromstring(res.read())
# 12220
if page == 1:
end = 23
else:
end = 21
for row in range(1, end):
#
date = dom.xpath('//*[@id="main"]/div[2]/div[' + str(row) + ']/div[2]/div/p/text()[1]')
date = conv(date)
#
info = dom.xpath('//*[@id="main"]/div[2]/div[' + str(row) + ']/div[2]/h3/a/text()')
info = conv(info)
# URLxPath/@href
link = dom.xpath('//*[@id="main"]/div[2]/div[' + str(row) + ']/div[2]/h3/a/@href')
link = conv(link)
# xPathj25
for i in range(2, 6):
# hrefxPath/text()
cmnt = dom.xpath('//*[@id="main"]/div[2]/div[' + str(row) +']/div[2]/div/p/a[' + str(i) + ']/text()')
cmnt = conv(cmnt)
# inforow
if '' not in info and '' in cmnt:
print(date)
print(info)
print(link)
print(cmnt)
# 13
if page <= 3:
time = randint(1, 3)
sleep(time)
| 29.360656 | 113 | 0.564489 |
187af0810cbd6c021345784f16958a06b58a35c1 | 1,077 | py | Python | falcon/util/net.py | jopereira/horus-tracer | 03671206f02c5ebea18f5682b346f59884e0a538 | [
"MIT"
] | 21 | 2018-04-18T19:01:09.000Z | 2021-11-24T19:22:33.000Z | falcon/util/net.py | jopereira/horus-tracer | 03671206f02c5ebea18f5682b346f59884e0a538 | [
"MIT"
] | 29 | 2018-04-30T16:39:27.000Z | 2021-04-03T16:04:19.000Z | falcon/util/net.py | jopereira/horus-tracer | 03671206f02c5ebea18f5682b346f59884e0a538 | [
"MIT"
] | 7 | 2018-04-21T13:04:03.000Z | 2021-03-07T08:24:26.000Z | import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('c'))
# Get network device's name
# Generate socket id
| 32.636364 | 76 | 0.651811 |
187b4fbe94a221126760180a6b88a7b0450b6264 | 3,677 | py | Python | CKY_Parser/BackupGrammer.py | Deekshantiiitd/NLP-2019 | 36715d6032254bfd684fe4b9dcdebe94c3edaddc | [
"Apache-2.0"
] | null | null | null | CKY_Parser/BackupGrammer.py | Deekshantiiitd/NLP-2019 | 36715d6032254bfd684fe4b9dcdebe94c3edaddc | [
"Apache-2.0"
] | null | null | null | CKY_Parser/BackupGrammer.py | Deekshantiiitd/NLP-2019 | 36715d6032254bfd684fe4b9dcdebe94c3edaddc | [
"Apache-2.0"
] | null | null | null | import nltk,re,codecs
from nltk.tokenize import word_tokenize,sent_tokenize
from backNode import BackNode
from nltk import Tree
lines=data_preprosessing()
grammar=grammer_parse()
parse(lines,grammar)
| 25.894366 | 108 | 0.658689 |
187b747a40ae7c538023582dc3ed2250cb3040ca | 135 | py | Python | mullvad_python/__init__.py | linusg/mullpy | 6f29c33174e30ea2ba360327daae9bafe140c997 | [
"MIT"
] | 12 | 2018-08-02T20:05:54.000Z | 2020-06-24T18:42:53.000Z | mullvad_python/__init__.py | linusg/mullpy | 6f29c33174e30ea2ba360327daae9bafe140c997 | [
"MIT"
] | 3 | 2018-08-04T13:53:01.000Z | 2020-06-24T19:03:42.000Z | mullvad_python/__init__.py | linusg/mullpy | 6f29c33174e30ea2ba360327daae9bafe140c997 | [
"MIT"
] | 2 | 2018-08-05T14:06:39.000Z | 2020-06-24T18:45:47.000Z | """Initialization package."""
from .api import Mullpy
from .banner import banner
__all__ = ['Mullpy', 'banner']
__version__ = '0.3.1'
| 19.285714 | 30 | 0.703704 |
187d64baa4437d9fea0c349cebbb000fe3c38925 | 5,813 | py | Python | tests/init.py | Animenosekai/yuno | bcc48f7ceda022e26392e653c03606d3f5f66806 | [
"MIT"
] | 1 | 2022-02-25T13:39:18.000Z | 2022-02-25T13:39:18.000Z | tests/init.py | Animenosekai/yuno | bcc48f7ceda022e26392e653c03606d3f5f66806 | [
"MIT"
] | null | null | null | tests/init.py | Animenosekai/yuno | bcc48f7ceda022e26392e653c03606d3f5f66806 | [
"MIT"
] | null | null | null | import inspect
import pathlib
import sys
import yuno
# CONSTANTS
TEST_OBJECT = {
"a": 1,
"b": 2,
"c": 3,
"test_dict": {
"a": 1,
"b": 2,
"c": 3
},
"float": 1.1,
"int": 1,
"test_list": [1, 2, 3],
"null": None,
"string": "test",
"boolean": True
}
TEST_LIST = [
"string",
1,
1.1,
None,
[1, 2, 3],
TEST_OBJECT,
True
]
TEST_DOCUMENT = {"_id": "test_document", "hello": "world", "test_list": TEST_LIST, "test_dict": TEST_OBJECT,
"boolean": True, "float": 1.1, "int": 1, "null": None, "string": "test"}
KEPT_DATABASES = {'admin', 'local', 'config'}
REALTIME_TIMEOUT = 5
# UTILITY FUNCTIONS
STEP = f"CI/Testing - v{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
# INITIALIZATION FUNCTIONS
f = pathlib.Path("./MONGO_PORT")
if f.is_file():
MONGO_PORT = int(f.read_text().replace(" ", ""))
else:
MONGO_PORT = 27017
# DECORATORS
| 26.543379 | 141 | 0.608636 |
187dce0fab5d7dab6ce2381189b7af90777ddbc1 | 732 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | go-choppy/choppy-cookiecutter-pypackage | b5bfc226089bba7002397c4055199b7b57c773ea | [
"BSD-3-Clause"
] | 2 | 2019-07-09T14:03:02.000Z | 2019-07-09T14:18:55.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | yjcyxky/cookiecutter-pypackage | b5bfc226089bba7002397c4055199b7b57c773ea | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py | yjcyxky/cookiecutter-pypackage | b5bfc226089bba7002397c4055199b7b57c773ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""
{{cookiecutter.project_slug}}.cli
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{{ cookiecutter.project_short_description }}
:copyright: 2019 by the Choppy Team.
:license: AGPLv3+, see LICENSE for more details.
"""
"""Console script for {{cookiecutter.project_slug}}."""
import sys
import click
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 25.241379 | 68 | 0.64071 |
187e7d05e0f32e5a771a3ba903dffb0254e60e4c | 891 | py | Python | test/classes/test_players.py | teamvolik/teamvolik | 35acc1405d4f0211236631d0c5bbdbf4f948fcb6 | [
"MIT"
] | 6 | 2022-03-27T22:13:35.000Z | 2022-03-31T22:45:02.000Z | test/classes/test_players.py | teamvolik/teamvolik | 35acc1405d4f0211236631d0c5bbdbf4f948fcb6 | [
"MIT"
] | 15 | 2022-03-18T09:47:31.000Z | 2022-03-29T15:26:51.000Z | test/classes/test_players.py | teamvolik/teamvolik | 35acc1405d4f0211236631d0c5bbdbf4f948fcb6 | [
"MIT"
] | null | null | null | import unittest
import src.classes.player as player
if __name__ == "__main__":
unittest.main()
| 40.5 | 178 | 0.654321 |
188087e6c1a4e48475b3e61cabbe3ac47fb2c2ff | 3,745 | py | Python | src/asphalt/core/concurrent.py | agronholm/asphalt | 7b81a71941047770612aeea67e2b3332f92b5c18 | [
"Apache-2.0"
] | 226 | 2015-08-19T16:57:32.000Z | 2022-03-31T22:28:18.000Z | src/asphalt/core/concurrent.py | Asphalt-framework/asphalt | 7b81a71941047770612aeea67e2b3332f92b5c18 | [
"Apache-2.0"
] | 31 | 2015-09-05T11:18:33.000Z | 2019-03-25T10:51:17.000Z | src/asphalt/core/concurrent.py | Asphalt-framework/asphalt | 7b81a71941047770612aeea67e2b3332f92b5c18 | [
"Apache-2.0"
] | 11 | 2015-09-04T21:43:34.000Z | 2017-12-08T19:06:20.000Z | from __future__ import annotations
__all__ = ("executor",)
import inspect
import sys
from asyncio import get_running_loop
from concurrent.futures import Executor
from functools import partial, wraps
from typing import Awaitable, Callable, TypeVar, overload
from asphalt.core import Context
if sys.version_info >= (3, 10):
from typing import Concatenate, ParamSpec
else:
from typing_extensions import Concatenate, ParamSpec
T_Retval = TypeVar("T_Retval")
P = ParamSpec("P")
def executor(
func_or_executor: Executor | str | Callable[Concatenate[Context, P], T_Retval]
) -> (
Callable[
[Callable[Concatenate[Context, P], T_Retval]],
Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]],
]
| Callable[Concatenate[Context, P], T_Retval | Awaitable[T_Retval]]
):
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is
used. Otherwise, the argument must be a PEP 3148 compliant thread pool executor or
the name of an :class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is
ignored and the wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the
event loop thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor
instance or the name of an :class:`~concurrent.futures.Executor` resource
"""
executor: Executor | str | None = None
if isinstance(func_or_executor, (str, Executor)):
executor = func_or_executor
return outer
else:
return outer(func_or_executor)
| 31.737288 | 88 | 0.666489 |
18817926b7a114ee1828bddf7e74ff4c0f734e43 | 2,309 | py | Python | src/templates/rsc/year_test.py | bradunov/shkola | 6ef057f5bd483318bf5763392972d48de481d0fb | [
"MIT"
] | 2 | 2019-08-25T09:37:27.000Z | 2021-01-25T20:22:30.000Z | src/templates/rsc/year_test.py | bradunov/shkola | 6ef057f5bd483318bf5763392972d48de481d0fb | [
"MIT"
] | 28 | 2019-07-04T19:53:36.000Z | 2020-10-24T13:27:56.000Z | src/templates/rsc/year_test.py | bradunov/shkola | 6ef057f5bd483318bf5763392972d48de481d0fb | [
"MIT"
] | null | null | null | import jinja2
page = {}
page['title'] = 'Shkola'
page['item_path'] = '../src/'
page['google_signin_client_id'] = ""
page['google_site_verification'] = ""
page['button'] = {
'width' : '137px',
'height' : '140px',
'font_size' : '111px',
'margin' : '10px',
'choices' : []
}
page['button']['choices'].append({
'title' : '1',
'obj_type' : 'A',
'front_color' : '#ff6956',
'back_color' : '#f9f9f9',
'link' : 'href="1"'
})
page['button']['choices'].append({
'title' : '2',
'obj_type' : 'A',
'front_color' : '#489cba',
'back_color' : '#f9f9f9',
'link' : 'href="2"'
})
page['button']['choices'].append({
'title' : '3',
'obj_type' : 'A',
'front_color' : '#ff6956',
'back_color' : '#f9f9f9',
'link' : 'href="1"'
})
page['button']['choices'].append({
'title' : '4',
'obj_type' : 'A',
'front_color' : '#489cba',
'back_color' : '#f9f9f9',
'link' : 'href="2"'
})
page['menu'] = [
{
'name' : 'Zadaci',
'submenu' : {
'id' : 'zadaci',
'options' : [
{
'name' : 'Cetvrti',
'link' : 'C',
'submenu' : {
'id' : 'cetvrti',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
},
{
'name' : 'Treci',
'link' : 'T',
'submenu' : {
'id' : 'treci',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
}
]
}
},
{
'name' : 'Rezultati',
'link' : 'R'
}
]
file_loader = jinja2.FileSystemLoader("..")
env = jinja2.Environment(loader=file_loader)
template = env.get_template("rsc/year.html.j2")
print(template.render(template_params=page))
| 22.201923 | 67 | 0.374188 |
1882943906f0dcab9b6d642fa9c4ad632eb884ac | 19,771 | py | Python | merganser/conflict_prediction.py | ualberta-smr/merganser | 9ce9acc2a187d165c923f4a6461bd82165cda764 | [
"MIT"
] | 6 | 2019-12-04T06:29:52.000Z | 2020-09-28T01:27:17.000Z | merganser/conflict_prediction.py | ualberta-smr/merganser | 9ce9acc2a187d165c923f4a6461bd82165cda764 | [
"MIT"
] | null | null | null | merganser/conflict_prediction.py | ualberta-smr/merganser | 9ce9acc2a187d165c923f4a6461bd82165cda764 | [
"MIT"
] | 4 | 2019-04-25T21:07:20.000Z | 2021-11-22T15:04:04.000Z |
import logging
import json
import glob
import pandas as pd
import multiprocessing
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.model_selection import cross_val_predict
from sklearn.decomposition import IncrementalPCA
from scipy.stats import spearmanr
import config
from util import *
np.random.seed(config.RANDOM_SEED)
repo_lang = Repository_language()
def store_classification_result(model_name, language, model_classification_report, classification_results):
"""
Stores the result of the classifier
:param model_name: the classification type
:param language: programming language
:param model_classification_report: results
:param classification_results: results
"""
open('{}classification_result_raw_{}_{}.txt'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(model_classification_report)
open('{}classification_result_json_{}_{}.json'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(json.dumps(classification_results))
def data_classification_wo_cv(language, repo, data_train, label_train, data_test, label_test, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state=random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
res = []
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data_train, label_train)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = model.best_estimator_.predict(data_test)
t = get_metrics(label_test, predicted_label)
t['model_name'] = model_name
t['language'] = language
t['repository'] = repo
res.append(t)
return res
def data_classification(language, data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state = random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data, label)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = cross_val_predict(model.best_estimator_, X=data, y=label, cv=outer_cv, n_jobs=job_num)
model_accuracy = accuracy_score(label, predicted_label)
model_confusion_matrix = confusion_matrix(label, predicted_label)
model_classification_report = classification_report(label, predicted_label)
classification_results[model_name] = {}
classification_results[model_name]['best_params'] = model_best_param
classification_results[model_name]['accuracy'] = model_accuracy
classification_results[model_name]['confusion_matrix'] = model_confusion_matrix.tolist()
classification_results[model_name]['classification_report'] = model_classification_report
print(model_classification_report)
## Save the classification result
#store_classification_result(model_name, language, model_classification_report, classification_results)
def get_best_decision_tree(data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the best decision tree
:param data: the data
:param label: the labels
:param random_seed: the random seed
:param job_num:
:return: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Train/test
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
grid_search = GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state=random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
grid_search.score_sample_weight = True
grid_search.fit(data, label)
return grid_search.best_estimator_
def get_feature_importance_by_model(model):
"""
Returns the features importance of a model
:param model: the classifier
:return: The list of feature importance
"""
return model.feature_importances_
def get_feature_set(data):
"""
Returns the feature sets separately
:param data: The input data
"""
# Data separation of feature sets
parallel_changes = data[:, 0].reshape(-1, 1)
commit_num = data[:, 1].reshape(-1, 1)
commit_density = data[:, 2].reshape(-1, 1)
file_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 3:8])
line_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 8:10])
dev_num = data[:, 10].reshape(-1, 1)
keywords = IncrementalPCA(n_components=1).fit_transform(data[:, 11:23])
message = IncrementalPCA(n_components=1).fit_transform(data[:, 23:27])
duration = data[:, 27].reshape(-1, 1)
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
return feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords\
, message, duration
def save_feature_correlation(language, data, label):
"""
Store the feature correlation of the data with the label
:param language: the programming language
:param data: the data
:param label: the label
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
open('{}feature_correlation_{}.txt'.format(config.PREDICTION_RESULT_PATH, language), 'a') \
.write('{}:\t\t{} \t {}\n'.format(feature_sets[i], round(corr, 2), round(p_value, 2)))
def save_feature_correlation_dict(data, label):
"""
Store the feature correlation of the data with the label
:param data: the data
:param label: the label
"""
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
correlation = {}
try:
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
correlation[feature_sets[i] + '_corr'] = corr
correlation[feature_sets[i] + '_p_value'] = p_value
except:
pass
finally:
return correlation
def save_feature_importance(repo_name, data, label):
"""
Store the feature importance
:param language: the programming language
:param data: the data
:param label: the label
"""
data = data.values
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message, duration \
= get_feature_set(data)
feature_data = np.concatenate((parallel_changes, commit_num, commit_density, file_edits, line_edits,
dev_num, keywords, message, duration), axis=1)
return get_feature_importance_by_model(get_best_decision_tree(feature_data, label))
def baseline_classification(language, data, label):
"""
Classify the baseline data (parallel changed files)
:param language: The programming language
:param data: The data
:param label: The labels
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message \
, duration = get_feature_set(data)
language = language + '__baseline'
data_classification(language, parallel_changes, label)
############################################
############################################
from sklearn import metrics
import autosklearn.classification
from sklearn.svm import SVC
if __name__ == "__main__":
# Logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
logging.info('Train/test of merge conflict prediction')
# Data classification
data_files = glob.glob(config.PREDICTION_CSV_PATH + 'data_*')
label_files = glob.glob(config.PREDICTION_CSV_PATH + 'label_*')
repos_set = [files.split('/')[-1].split('_')[3].replace('.csv', '') for files in data_files]
classification_result = []
feature_importance = []
languages = []
corr = []
for ind, data_path in enumerate(data_files):
data_tmp = pd.read_csv(data_path).sort_values(by=['merge_commit_date'])
label_tmp = pd.read_csv(data_path.replace('data_prediction', 'label_prediction')).sort_values(by=['merge_commit_date'])
data_tmp = data_tmp.drop('merge_commit_date', axis=1)
label_tmp = label_tmp.drop('merge_commit_date', axis=1)
# Correlation
try:
tmp_corr = save_feature_correlation_dict(data_tmp.to_numpy(), label_tmp.to_numpy())
if len(tmp_corr) > 0:
tmp_corr['langugae'] = repo_lang.get_lang(repos_set[ind].lower())
tmp_corr['repository'] = repos_set[ind]
corr.append(tmp_corr)
except:
pass
continue
train_ind = int(data_tmp.shape[0] * config.TRAIN_RATE)
data_train = data_tmp.iloc[0:train_ind, :]
data_test = data_tmp.iloc[train_ind:-1, :]
label_train = label_tmp.iloc[0:train_ind, :]['is_conflict'].tolist()
label_test = label_tmp.iloc[train_ind:-1, :]['is_conflict'].tolist()
if len(label_test) != data_test.shape[0]:
print('Inconsistent data: {}'.format(repos_set[ind]))
continue
if data_test.shape[0] < 50:
print('Not enough merge scenarios: {}'.format(repos_set[ind]))
continue
if len(set(label_test)) != 2 or len(set(label_train)) != 2:
print('One class is missed: {}'.format(repos_set[ind]))
continue
if len([i for i in label_test if i == 1]) < 10:
print('Nor enough conflicting merge in the test batch for evaluation: {}'.format(repos_set[ind]))
continue
# k = k + data_tmp.shape[0]
try:
res = data_classification_wo_cv(repo_lang.get_lang(repos_set[ind].lower()), repos_set[ind] ,data_train, label_train, data_test, label_test)
classification_result = classification_result + res
feature_importance.append(save_feature_importance(repos_set[ind], data_train, label_train))
languages.append(repo_lang.get_lang(repos_set[ind].lower()))
except Exception as e:
print('Error - {}'.format(e))
continue
corr_df = pd.DataFrame(corr)
corr_df.to_csv(f'corr_{config.RANDOM_SEED}.csv')
exit()
# Feature importance
feature_importance = pd.DataFrame(feature_importance, columns=['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration'])
feature_importance['language'] = pd.Series(languages)
feature_importance['repository'] = pd.Series(repos_set)
feature_importance.dropna()
feature_importance.to_csv(f'feature_importance_{config.RANDOM_SEED}.csv')
feature_importance_summery = feature_importance.drop('repository', axis=1).groupby('language').agg('median')
feature_importance_summery.to_csv(f'feature_importance_summery_{config.RANDOM_SEED}.csv')
# Classification result
classification_result_df = pd.DataFrame(classification_result)
classification_result_df.to_csv(f'res_{config.RANDOM_SEED}.csv')
| 43.452747 | 163 | 0.694401 |
188415bc541aaa91a4194a25f98e0ed82bdb2af2 | 25,321 | py | Python | lib/wx_lib.py | liveonnet/p3_server | 2dab6eab6e98b3ef0d26093eb461c635f5bc07b4 | [
"Apache-2.0"
] | null | null | null | lib/wx_lib.py | liveonnet/p3_server | 2dab6eab6e98b3ef0d26093eb461c635f5bc07b4 | [
"Apache-2.0"
] | null | null | null | lib/wx_lib.py | liveonnet/p3_server | 2dab6eab6e98b3ef0d26093eb461c635f5bc07b4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import json
import random
import string
import asyncio
import aiohttp
from aiohttp.resolver import AsyncResolver
from hashlib import md5
from urllib.parse import quote
#-#from operator import itemgetter
#-#from itertools import chain
#-#from cStringIO import StringIO
if __name__ == '__main__':
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from lib.conf_lib import conf
from lib.WXBizMsgCrypt import WXBizMsgCrypt
from lib.tools_lib import pcformat
from lib.tools_lib import parseXml2Dict
from lib.applog import app_log
info, debug, error = app_log.info, app_log.debug, app_log.error
def createImage(self, nonce, encrypt_type, from_user, to_user, media_id):
u'''
``media_id`` id
'''
ret_data = 'success'
to_xml = WXManager.TPL_RETURN_IMAGE.format(TOUSER=from_user, FROMUSER=to_user, TIME=int(time.time()), MEDIA_ID=media_id)
if encrypt_type == 'aes':
encryp_helper = WXBizMsgCrypt(self.TOKEN, self.ENCODINGAESKEY, self.APPID)
ret, encrypt_xml = encryp_helper.EncryptMsg(to_xml, nonce)
if not ret:
ret_data = encrypt_xml
else:
info(' %s %s', ret, encrypt_xml)
return ret_data
def extractXml(self, nonce, encrypt_type, msg_sign, timestamp, from_xml):
u'''
'''
d_data = ''
#-# info('nonc %s encrypt_type %s msg_sign %s timestamp %s', nonce, encrypt_type, msg_sign, timestamp)
#-# info('raw data: %s', from_xml)
if encrypt_type == 'aes':
decrypt_helper = WXBizMsgCrypt(self.TOKEN, self.ENCODINGAESKEY, self.APPID)
ret, decryp_xml = decrypt_helper.DecryptMsg(from_xml, msg_sign, timestamp, nonce)
if not ret:
from_xml = decryp_xml
else:
info(' %s %s', ret, decryp_xml)
return d_data
# parse to dict
if from_xml:
d_data = parseXml2Dict(from_xml)
#-# info(':\n%s', pcformat(d_data))
return d_data
if __name__ == '__main__':
from lib.handler_lib import CommonHandler
#-# mgr.getSelfMenu()
loop = asyncio.get_event_loop()
try:
task = asyncio.ensure_future(test_main(loop))
loop.run_until_complete(task)
except KeyboardInterrupt:
info('cancel on KeyboardInterrupt..')
task.cancel()
loop.run_forever()
task.exception()
finally:
loop.stop()
sys.exit(0)
| 39.50234 | 229 | 0.529995 |
43e657cee1737539636db5f58dee3a853afc6290 | 1,565 | py | Python | django_fuzzytest/management/commands/fuzzytest.py | creotiv/django-fuzzytest | 6102ac6e7aee3bf81ff5186fbe5bfb01e688acdc | [
"BSD-3-Clause"
] | 8 | 2015-08-23T19:28:52.000Z | 2021-12-03T06:36:58.000Z | django_fuzzytest/management/commands/fuzzytest.py | creotiv/django-fuzzytest | 6102ac6e7aee3bf81ff5186fbe5bfb01e688acdc | [
"BSD-3-Clause"
] | null | null | null | django_fuzzytest/management/commands/fuzzytest.py | creotiv/django-fuzzytest | 6102ac6e7aee3bf81ff5186fbe5bfb01e688acdc | [
"BSD-3-Clause"
] | 1 | 2021-12-03T06:37:00.000Z | 2021-12-03T06:37:00.000Z | # coding: utf-8
from __future__ import unicode_literals
import time
import logging
import traceback
from optparse import make_option
import json
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django_fuzzytest.runner import FuzzyRunner
logger = logging.getLogger(__file__)
| 30.686275 | 81 | 0.578275 |
43e9c5052f55a709d60fa878953b3e380fa1ce96 | 6,727 | py | Python | save_sim/maker.py | jrbourbeau/composition | f8debd81b0467a6094d5ba56a5f0fc6047369d30 | [
"MIT"
] | null | null | null | save_sim/maker.py | jrbourbeau/composition | f8debd81b0467a6094d5ba56a5f0fc6047369d30 | [
"MIT"
] | null | null | null | save_sim/maker.py | jrbourbeau/composition | f8debd81b0467a6094d5ba56a5f0fc6047369d30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import glob
import re
import os
import argparse
import time
import getpass
import composition.support_functions.paths as paths
import composition.support_functions.simfunctions as simfunctions
from composition.support_functions.checkdir import checkdir
if __name__ == "__main__":
# Setup global path names
mypaths = paths.Paths()
checkdir(mypaths.comp_data_dir)
# Set up condor directory
condor_dir = '/scratch/{}/condor_composition'.format(getpass.getuser())
for directory in ['errors', 'logs', 'outs', 'submit_scripts']:
checkdir(condor_dir + '/' + directory + '/')
simoutput = simfunctions.getSimOutput()
default_sim_list = ['7006', '7579', '7241', '7263', '7791',
'7242', '7262', '7851', '7007', '7784']
p = argparse.ArgumentParser(
description='Runs save_sim.py on cluster en masse',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=simoutput)
p.add_argument('-s', '--sim', dest='sim', nargs='*',
choices=default_sim_list,
default=default_sim_list,
help='Simulation to run over')
p.add_argument('-n', '--n', dest='n', type=int,
default=800,
help='Number of files to run per batch')
p.add_argument('--test', dest='test', action='store_true',
default=False,
help='Option for running test off cluster')
p.add_argument('--maxjobs', dest='maxjobs', type=int,
default=3000,
help='Maximum number of jobs to run at a given time.')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Overwrite existing merged files')
p.add_argument('--remove', dest='remove',
default=False, action='store_true',
help='Remove unmerged hdf5 files')
args = p.parse_args()
cwd = os.getcwd()
jobID = 'save_sim'
jobID = getjobID(jobID, condor_dir)
cmd = '{}/save_sim.py'.format(cwd)
argdict = get_argdict(mypaths.comp_data_dir, **vars(args))
condor_script = '{}/submit_scripts/{}.submit'.format(condor_dir, jobID)
make_submit_script(cmd, jobID, condor_script, condor_dir)
merge_jobID = 'merge_sim'
merge_jobID = getjobID(merge_jobID, condor_dir)
merge_cmd = '{}/merge.py'.format(cwd)
merge_argdict = get_merge_argdict(**vars(args))
merge_condor_script = '{}/submit_scripts/{}.submit'.format(
condor_dir, merge_jobID)
make_submit_script(merge_cmd, merge_jobID, merge_condor_script, condor_dir)
# Set up dag file
jobID = 'save_sim_merge'
jobID = getjobID(jobID, condor_dir)
dag_file = '{}/submit_scripts/{}.submit'.format(condor_dir, jobID)
checkdir(dag_file)
with open(dag_file, 'w') as dag:
for sim in argdict.keys():
parent_string = 'Parent '
if len(argdict[sim]) < 1:
continue
for i, arg in enumerate(argdict[sim]):
dag.write('JOB sim_{}_p{} '.format(sim, i) +
condor_script + '\n')
dag.write('VARS sim_{}_p{} '.format(sim, i) +
'ARGS="' + arg + '"\n')
parent_string += 'sim_{}_p{} '.format(sim, i)
dag.write('JOB merge_{} '.format(
sim) + merge_condor_script + '\n')
dag.write('VARS merge_{} '.format(sim) +
'ARGS="' + merge_argdict[sim] + '"\n')
child_string = 'Child merge_{}'.format(sim)
dag.write(parent_string + child_string + '\n')
# Submit jobs
os.system('condor_submit_dag -maxjobs {} {}'.format(args.maxjobs, dag_file))
| 36.166667 | 95 | 0.581388 |
43e9f411bc2778ec1b8d67dbf67237a43e84adad | 7,257 | py | Python | xlsxwriter_tables/xlsxwriter_tables.py | johncmacy/xlsxwriter-tables | 8e4db55d8d4bbc66209e23f0852d7351f40db587 | [
"MIT"
] | null | null | null | xlsxwriter_tables/xlsxwriter_tables.py | johncmacy/xlsxwriter-tables | 8e4db55d8d4bbc66209e23f0852d7351f40db587 | [
"MIT"
] | null | null | null | xlsxwriter_tables/xlsxwriter_tables.py | johncmacy/xlsxwriter-tables | 8e4db55d8d4bbc66209e23f0852d7351f40db587 | [
"MIT"
] | null | null | null | from typing import Union
| 30.2375 | 126 | 0.51564 |
43eab223999e2604b87fae88107217a209d85e53 | 859 | py | Python | teachers_toolkit/grading_system/migrations/0003_auto_20180706_1923.py | luiscberrocal/teachers_toolkit | 078c55c4a9ad9c5a74e1484d80ac34f3b26b69c9 | [
"MIT"
] | null | null | null | teachers_toolkit/grading_system/migrations/0003_auto_20180706_1923.py | luiscberrocal/teachers_toolkit | 078c55c4a9ad9c5a74e1484d80ac34f3b26b69c9 | [
"MIT"
] | null | null | null | teachers_toolkit/grading_system/migrations/0003_auto_20180706_1923.py | luiscberrocal/teachers_toolkit | 078c55c4a9ad9c5a74e1484d80ac34f3b26b69c9 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-07-06 19:23
from django.db import migrations, models
import django_extensions.db.fields
| 28.633333 | 135 | 0.620489 |
43ebc0969b2793f79841f3adb90ba457341afae3 | 67,834 | py | Python | sdk/python/pulumi_google_native/vmmigration/v1alpha1/outputs.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/vmmigration/v1alpha1/outputs.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/vmmigration/v1alpha1/outputs.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AppliedLicenseResponse',
'CloneJobResponse',
'ComputeEngineTargetDefaultsResponse',
'ComputeEngineTargetDetailsResponse',
'ComputeSchedulingResponse',
'CutoverJobResponse',
'NetworkInterfaceResponse',
'ReplicationCycleResponse',
'ReplicationSyncResponse',
'SchedulePolicyResponse',
'SchedulingNodeAffinityResponse',
'StatusResponse',
'VmUtilizationInfoResponse',
'VmUtilizationMetricsResponse',
'VmwareSourceDetailsResponse',
'VmwareVmDetailsResponse',
]
| 38.542045 | 653 | 0.637306 |
43ed227cd2674901d74eb5739cfb902ec959b334 | 6,300 | py | Python | tests/test_utils.py | yoshikyoto/django-filter-0.14 | b5166e93f4c0fec5f5e8a73b6d1e8e0550b3929b | [
"BSD-3-Clause"
] | null | null | null | tests/test_utils.py | yoshikyoto/django-filter-0.14 | b5166e93f4c0fec5f5e8a73b6d1e8e0550b3929b | [
"BSD-3-Clause"
] | 1 | 2016-08-23T18:20:47.000Z | 2016-08-23T19:16:07.000Z | tests/test_utils.py | yoshikyoto/django-filter-0.14 | b5166e93f4c0fec5f5e8a73b6d1e8e0550b3929b | [
"BSD-3-Clause"
] | null | null | null |
import unittest
import django
from django.test import TestCase
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django_filters.utils import get_model_field, resolve_field
from django_filters.exceptions import FieldLookupError
from .models import User
from .models import Article
from .models import Book
from .models import HiredWorker
from .models import Business
def test_invalid_transformed_lookup_expression(self):
model_field = Article._meta.get_field('published')
with self.assertRaises(FieldLookupError) as context:
resolve_field(model_field, 'date__invalid_lookup')
exc = str(context.exception)
self.assertIn(str(model_field), exc)
self.assertIn('date__invalid_lookup', exc)
| 36 | 108 | 0.644762 |
43ee04853e52a2ff347eaf6785c0c115ae6ad8aa | 164 | py | Python | agc/agc007/agc007a.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | agc/agc007/agc007a.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | agc/agc007/agc007a.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | H, W = map(int, input().split())
A = [input() for _ in range(H)]
if H + W - 1 == sum(a.count('#') for a in A):
print('Possible')
else:
print('Impossible')
| 20.5 | 45 | 0.542683 |
43f06ebbb7637e1e6c0f53bef04ad021c74daf38 | 2,188 | py | Python | relfs/relfs/fuse/mount_root.py | matus-chochlik/various | 2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b | [
"MIT"
] | 1 | 2020-10-25T12:28:50.000Z | 2020-10-25T12:28:50.000Z | relfs/relfs/fuse/mount_root.py | matus-chochlik/various | 2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b | [
"MIT"
] | null | null | null | relfs/relfs/fuse/mount_root.py | matus-chochlik/various | 2a9f5eddd964213f7d1e1ce8328e2e0b2a8e998b | [
"MIT"
] | null | null | null | # coding=utf-8
#------------------------------------------------------------------------------#
import os
import time
import fuse
import errno
from .item import RelFuseItem
from .static_dir import StaticDirectory
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
| 35.290323 | 80 | 0.359232 |
43f1172d32150bd985177a2463faa8dd3ab137f9 | 3,935 | py | Python | clip_onnx/clip_converter.py | EmbarkStudios/CLIP-ONNX | 52f4ce4d603722cb934d27b570f7523f26f1ef7f | [
"MIT"
] | null | null | null | clip_onnx/clip_converter.py | EmbarkStudios/CLIP-ONNX | 52f4ce4d603722cb934d27b570f7523f26f1ef7f | [
"MIT"
] | null | null | null | clip_onnx/clip_converter.py | EmbarkStudios/CLIP-ONNX | 52f4ce4d603722cb934d27b570f7523f26f1ef7f | [
"MIT"
] | null | null | null | import torch
import onnx
from torch import nn
from onnxruntime.quantization import quantize_qat, quantize_dynamic, QuantType
from .utils import Textual, DEFAULT_EXPORT
| 41.421053 | 88 | 0.629225 |
43f1186dd806bfa7da9c44b01e37a130943f2f23 | 6,493 | py | Python | electrum/gui/kivy/uix/dialogs/add_token_dialog.py | VIPSTARCOIN-electrum/electrum-vips | ebe93c09717ea44c049fcb9c3f366af64dc87b37 | [
"MIT"
] | 2 | 2019-07-17T23:09:42.000Z | 2019-10-25T05:44:04.000Z | electrum/gui/kivy/uix/dialogs/add_token_dialog.py | VIPSTARCOIN-electrum/electrum-vips | ebe93c09717ea44c049fcb9c3f366af64dc87b37 | [
"MIT"
] | null | null | null | electrum/gui/kivy/uix/dialogs/add_token_dialog.py | VIPSTARCOIN-electrum/electrum-vips | ebe93c09717ea44c049fcb9c3f366af64dc87b37 | [
"MIT"
] | 3 | 2019-08-10T15:14:29.000Z | 2021-05-26T20:02:02.000Z | from datetime import datetime
from kivy.app import App
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.button import Button
from electrum.gui.kivy.i18n import _
from electrum.bitcoin import Token
from electrum.util import parse_token_URI, InvalidTokenURI
from .choice_dialog import ChoiceDialog
Builder.load_string('''
#:import partial functools.partial
#:import _ electrum.gui.kivy.i18n._
<AddTokenDialog>
id: popup
title: _('Add Token')
contract_addr: ''
BoxLayout:
orientation: 'vertical'
BoxLabel:
text: _('Contract Address')
SendReceiveBlueBottom:
size_hint: 1, None
height: self.minimum_height
BlueButton:
text: popup.contract_addr
shorten: True
on_release: Clock.schedule_once(lambda dt: app.show_info(_('Copy and paste the contract address using the Paste button, or use the camera to scan a QR code.')))
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
text: _('Paste')
on_release: popup.do_paste()
IconButton:
id: qr
size_hint: 0.6, 1
on_release: Clock.schedule_once(lambda dt: app.scan_qr(on_complete=popup.on_qr))
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
AddTokenItem:
my_addr: app.wallet.get_addresses_sort_by_balance()[0]
title: _('My Address:')
description: str(self.my_addr)
action: partial(root.address_select_dialog, self)
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.5
Button:
text: 'Cancel'
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: 'OK'
size_hint: 0.5, None
height: '48dp'
on_release:
root.add_token()
popup.dismiss()
''')
| 36.273743 | 176 | 0.588788 |
43f27c688e68efd3839a07cc972cfa2dd88cc2cc | 17,625 | py | Python | statey/syms/encoders.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | 4 | 2021-02-16T19:34:38.000Z | 2022-01-31T16:44:14.000Z | statey/syms/encoders.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | null | null | null | statey/syms/encoders.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | null | null | null | import abc
import base64
from datetime import date, datetime
import dataclasses as dc
from typing import Type as PyType, Any, Dict, Optional
import marshmallow as ma
import pickle
import pluggy
import statey as st
from statey.syms import types, utils, Object
def create_encoder_plugin_manager():
"""
Factory function to create the default plugin manager for encoders
"""
pm = st.create_plugin_manager()
pm.add_hookspecs(EncoderHooks)
return pm
class MarshmallowValueEncoder(MarshmallowEncoder):
"""
Simple marshmallow encoder for value types
"""
base_field: ma.fields.Field
type_cls: PyType[types.Type]
serializable: bool
class DateLikeFuzzyDeserialize:
""""""
ENCODER_CLASSES = [
DefaultEncoder,
IntegerEncoder,
FloatEncoder,
BooleanEncoder,
StringEncoder,
ArrayEncoder,
StructEncoder,
NativeFunctionEncoder,
MapEncoder,
TypeEncoder,
DateEncoder,
DateTimeEncoder,
]
# We'll prefer a better pickling module if we have one.
try:
import dill
except ImportError:
import warnings
warnings.warn("Dill is not installed.", RuntimeWarning)
else:
ENCODER_CLASSES.append(DillFunctionEncoder)
try:
import cloudpickle
except ImportError:
import warnings
warnings.warn("Cloudpickle is not installed.", RuntimeWarning)
else:
ENCODER_CLASSES.append(CloudPickleFunctionEncoder)
def register(registry: Optional["Registry"] = None) -> None:
"""
Replace default encoder with encoders defined here
"""
if registry is None:
registry = st.registry
for cls in ENCODER_CLASSES:
registry.register(cls)
| 29.228856 | 109 | 0.633816 |
43f28356d6bbc800add9ebabe90e54e8e11a08d4 | 13,558 | py | Python | src/data.py | saattrupdan/danish-asr-models | 967e558d0032d67afbe72b625f3cad0eca65cc2a | [
"MIT"
] | 2 | 2022-03-10T10:47:43.000Z | 2022-03-11T09:24:34.000Z | src/data.py | saattrupdan/danish-asr-models | 967e558d0032d67afbe72b625f3cad0eca65cc2a | [
"MIT"
] | null | null | null | src/data.py | saattrupdan/danish-asr-models | 967e558d0032d67afbe72b625f3cad0eca65cc2a | [
"MIT"
] | null | null | null | '''Functions related to the data loading and processing'''
from transformers import (Wav2Vec2CTCTokenizer,
Wav2Vec2FeatureExtractor,
Wav2Vec2Processor)
from datasets import (load_dataset as ds_load_dataset,
Dataset,
DatasetDict,
Audio)
from unicodedata import normalize
from typing import Optional, Tuple
from pathlib import Path
import json
import re
def clean_transcription(doc: str) -> str:
'''Cleans the transcription of a document.
Args:
doc (str):
A document to be cleaned.
Returns:
str:
The cleaned document.
'''
# NFKC normalize the transcriptions
doc = normalize('NFKC', doc)
# Remove punctuation
regex = r'[\[\]\{\}\(\)\,\?\.\!\-\\\;\:\"\\'\\%\\\\n\r\\]'
doc = re.sub(regex, '', doc)
# Remove non-vocabulary characters
conversion_dict = {
'aa': '',
'': 'g',
'': 'n',
'': 'n',
'': 'e',
'': 'mikro',
'': ' paragraf ',
'': ' promille ',
'': 'u',
'': 's',
'': 'e',
'': 'a',
'': 'ue',
'': 'e',
'': 'c',
'': '',
'': 'i',
'': 's',
'': 'i',
'': 'e',
'': 'd',
'': 'a',
'': 'o',
'': 'th',
'': 'i',
'': '',
'': 'c',
'': 's',
'(?<![0-9])(18|19|20)([0-9]{2})(?![0-9])': '\1 \2',
'1000': ' tusind ',
'[2-9]000': ' \1 tusind',
'100': ' hundrede ',
'[2-9]00': ' \1 hundrede',
'(?<![0-9])([0-9])([0-9])(?![0-9])': '\2 og \1\0',
'10': ' ti ',
'20': ' tyve ',
'30': ' tredive ',
'40': ' fyrre ',
'50': ' halvtreds ',
'60': ' treds ',
'70': ' halvfjerds ',
'80': ' firs ',
'90': ' halvfems ',
'0': ' nul ',
'1': ' et ',
'2': ' to ',
'3': ' tre ',
'4': ' fire ',
'5': ' fem ',
'6': ' seks ',
'7': ' syv ',
'8': ' otte ',
'9': ' ni ',
}
for key, value in conversion_dict.items():
doc = re.sub(key, value, doc)
# Remove empty whitespace
doc = re.sub(u'\u0301', ' ', doc)
doc = re.sub(u'\u200b', ' ', doc)
# Replace spaces with a pipe, to emphasise the word boundaries
doc = re.sub(r' +', '|', doc)
# Make the transcription lowercase and strip whitespace
doc = doc.lower().strip().strip('|')
return doc
| 34.411168 | 79 | 0.52906 |
43f298d87e261cc2cbf422453d37df22dea68372 | 1,604 | py | Python | etravel/urls.py | zahir1509/project-ap-etravel | 2113a84ae4340be0e8cfa2676f448878c625e3e3 | [
"MIT"
] | 1 | 2020-12-06T17:49:11.000Z | 2020-12-06T17:49:11.000Z | etravel/urls.py | zahir1509/project-ap-etravel | 2113a84ae4340be0e8cfa2676f448878c625e3e3 | [
"MIT"
] | null | null | null | etravel/urls.py | zahir1509/project-ap-etravel | 2113a84ae4340be0e8cfa2676f448878c625e3e3 | [
"MIT"
] | 1 | 2020-12-07T14:20:41.000Z | 2020-12-07T14:20:41.000Z | """etravel URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from main import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.homepage, name = 'home'),
path('login/', views.loginPage, name = 'login'),
path('logout/', views.logoutUser, name = 'logout'),
path('signup/', views.signupPage, name = 'signup'),
path('browsehotel/', views.filterhotel, name = 'browsehotel'),
path('myaccount/', views.accountpage, name='myaccount'),
path('editprofile/', views.edit_profile, name='editprofile'),
path('change-password/', views.change_password, name='editpassword'),
path('hotel_booking/', views.bookhotel, name='bookhotel'),
path('hotel/<int:hotel_id>', views.hotelpage, name='hotelpage'),
path('cancelbooking/', views.cancelbooking, name='cancelbooking'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 42.210526 | 77 | 0.706983 |
43f37d4e6dabec0097acd8b5f0892f346b8200d5 | 4,447 | py | Python | adet/data/video_data/yvos_annot_condinst.py | Tanveer81/BoxVOS | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | [
"BSD-2-Clause"
] | 4 | 2022-02-16T02:48:27.000Z | 2022-03-08T06:54:32.000Z | adet/data/video_data/yvos_annot_condinst.py | Tanveer81/BoxVOS | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | [
"BSD-2-Clause"
] | null | null | null | adet/data/video_data/yvos_annot_condinst.py | Tanveer81/BoxVOS | c30aa319f18f3fbee2a25e0ed25cb006a4598300 | [
"BSD-2-Clause"
] | null | null | null | import json
import time
from glob import glob
from pathlib import Path
from adet.data.video_data.util import *
from PIL import Image, ImageFont, ImageDraw
import os
import random
categories = ['airplane', 'ape', 'bear', 'bike', 'bird', 'boat', 'bucket', 'bus', 'camel', 'cat',
'cow', 'crocodile', 'deer', 'dog', 'dolphin', 'duck', 'eagle', 'earless_seal',
'elephant', 'fish', 'fox', 'frisbee', 'frog', 'giant_panda', 'giraffe', 'hand',
'hat', 'hedgehog', 'horse', 'knife', 'leopard', 'lion', 'lizard', 'monkey',
'motorbike', 'mouse', 'others', 'owl', 'paddle', 'parachute', 'parrot', 'penguin',
'person', 'plant', 'rabbit', 'raccoon', 'sedan', 'shark', 'sheep', 'sign',
'skateboard', 'snail', 'snake', 'snowboard', 'squirrel', 'surfboard', 'tennis_racket',
'tiger', 'toilet', 'train', 'truck', 'turtle', 'umbrella', 'whale', 'zebra']
if __name__ == '__main__':
main()
| 43.174757 | 106 | 0.527097 |
43f63cbc9ceb8f44b281dc9e30baf482c1545385 | 1,342 | py | Python | lookup_table.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | 2 | 2017-03-21T14:18:35.000Z | 2020-03-30T20:51:33.000Z | lookup_table.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | null | null | null | lookup_table.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | null | null | null | import numpy as np
def fast_linear_interpolate(f, x):
"""
:param f: array of evenly spaced function values
:param x: array of fractional positions to sample
:type f: np.multiarray.ndarray
:type x: np.multiarray.ndarray
:rtype: np.multiarray.ndarray
"""
x0 = np.floor(x).astype(int)
x1 = np.add(x0, 1)
# limit the range of x1 to prevent out of bounds access
return (x1 - x) * f[x0] + (x - x0) * f[np.clip(x1, a_min=0, a_max=f.size - 1)]
| 29.822222 | 100 | 0.622206 |
43f6879757f40989d16e1db4126c95e8352e1759 | 492 | py | Python | src/seedwork/domain/rules.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | 10 | 2022-03-16T19:26:51.000Z | 2022-03-31T23:50:51.000Z | src/seedwork/domain/rules.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | null | null | null | src/seedwork/domain/rules.py | pgorecki/python-ddd | 0073ccce35c651be263f5d7d3d63f9a49bc0b78a | [
"MIT"
] | 2 | 2022-03-16T19:26:54.000Z | 2022-03-27T13:21:02.000Z | from pydantic import BaseModel
| 23.428571 | 63 | 0.666667 |
43f6f242e391b123212da34e3f976064029b361e | 627 | py | Python | exs/mundo_2/python/067.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 45 | 2021-01-02T18:36:01.000Z | 2022-03-26T19:46:47.000Z | exs/mundo_2/python/067.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 24 | 2020-12-31T17:23:16.000Z | 2021-03-11T19:44:36.000Z | exs/mundo_2/python/067.py | QuatroQuatros/exercicios-CeV | c9b995b717fe1dd2c2eee3557db0161390bc78b0 | [
"MIT"
] | 28 | 2020-12-30T15:57:16.000Z | 2022-03-26T19:46:49.000Z | """
Desafio 067
Problema: Faa um programa que mostre a tabuada de vrios nmeros,
um de cada vez, para cada valor digitado pelo usurio.
O programa ser interrompido quando o nmero solicitado
for negativo.
Resoluo do problema:
"""
print('-' * 20)
print(f'{" Tabuada v3.0 ":~^20}')
print('-' * 20)
while True:
tabuada = int(input('Tabuada desejada: '))
print('-' * 20)
if tabuada < 0:
break
for cont in range(0, 11):
print(f'{tabuada} x {cont:2} = {tabuada * cont:2}')
print('-' * 20)
print(f'{" TABUADA FINALIZADA ":~^30}\nFOI UM PRAZER AJUDA-LO!!!')
| 23.222222 | 66 | 0.601276 |
43fbb641614733e9b5376e1fc262a24a13b94350 | 1,492 | py | Python | pyexcel_xlsx/__init__.py | pyexcel/pyexcel-xlsx | 3b3639d12270cc10fff32651280d139ec65bb354 | [
"BSD-3-Clause"
] | 101 | 2016-02-22T03:51:39.000Z | 2022-03-08T02:21:50.000Z | pyexcel_xlsx/__init__.py | pyexcel/pyexcel-xlsx | 3b3639d12270cc10fff32651280d139ec65bb354 | [
"BSD-3-Clause"
] | 46 | 2016-05-09T14:16:31.000Z | 2022-02-25T18:40:57.000Z | pyexcel_xlsx/__init__.py | pyexcel/pyexcel-xlsx | 3b3639d12270cc10fff32651280d139ec65bb354 | [
"BSD-3-Clause"
] | 23 | 2016-01-29T12:26:02.000Z | 2021-12-30T04:32:20.000Z | """
pyexcel_xlsx
~~~~~~~~~~~~~~~~~~~
The lower level xlsx file format handler using openpyxl
:copyright: (c) 2015-2019 by Onni Software Ltd & its contributors
:license: New BSD License
"""
from pyexcel_io.io import get_data as read_data
from pyexcel_io.io import isstream
from pyexcel_io.io import save_data as write_data
from pyexcel_io.plugins import IOPluginInfoChainV2
__FILE_TYPE__ = "xlsx"
IOPluginInfoChainV2(__name__).add_a_reader(
relative_plugin_class_path="xlsxr.XLSXBook",
locations=["file", "memory"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
).add_a_reader(
relative_plugin_class_path="xlsxr.XLSXBookInContent",
locations=["content"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
).add_a_writer(
relative_plugin_class_path="xlsxw.XLSXWriter",
locations=["file", "memory"],
file_types=[__FILE_TYPE__, "xlsm"],
stream_type="binary",
)
def save_data(afile, data, file_type=None, **keywords):
"""standalone module function for writing module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
write_data(afile, data, file_type=file_type, **keywords)
def get_data(afile, file_type=None, **keywords):
"""standalone module function for reading module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
return read_data(afile, file_type=file_type, **keywords)
| 31.744681 | 75 | 0.72185 |
43fc77cfe764566289284319cba58cc6a6b81ffc | 12,775 | py | Python | GeneralTools/graph_funcs/generative_model_metric.py | frhrdr/MMD-GAN | 7522093498b658026344541ddd5c248095763fb6 | [
"Apache-2.0"
] | null | null | null | GeneralTools/graph_funcs/generative_model_metric.py | frhrdr/MMD-GAN | 7522093498b658026344541ddd5c248095763fb6 | [
"Apache-2.0"
] | null | null | null | GeneralTools/graph_funcs/generative_model_metric.py | frhrdr/MMD-GAN | 7522093498b658026344541ddd5c248095763fb6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.contrib import gan as tfgan
from GeneralTools.graph_funcs.my_session import MySession
from GeneralTools.math_funcs.graph_func_support import mean_cov_np, trace_sqrt_product_np
from GeneralTools.misc_fun import FLAGS
def sliced_wasserstein_distance(self, x_batch, y_batch, num_batch=128, ckpt_folder=None, ckpt_file=None):
""" This function calculates the sliced wasserstein distance between real and fake images.
This function does not work as expected, swd gives nan
:param x_batch:
:param y_batch:
:param num_batch:
:param ckpt_folder:
:param ckpt_file:
:return:
"""
with MySession(load_ckpt=True) as sess:
batches = sess.run_m_times(
[x_batch, y_batch],
ckpt_folder=ckpt_folder, ckpt_file=ckpt_file,
max_iter=num_batch, trace=True)
# get x_images and y_images
x_images = (tf.constant(np.concatenate([batch[0] for batch in batches], axis=0)) + 1.0) * 128.5
y_images = (tf.constant(np.concatenate([batch[1] for batch in batches], axis=0)) + 1.0) * 128.5
if self.image_format in {'channels_first', 'NCHW'}:
x_images = tf.transpose(x_images, perm=(0, 2, 3, 1))
y_images = tf.transpose(y_images, perm=(0, 2, 3, 1))
print('images obtained, shape: {}'.format(x_images.shape))
# sliced_wasserstein_distance returns a list of tuples (distance_real, distance_fake)
# for each level of the Laplacian pyramid from the highest resolution to the lowest
swd = tfgan.eval.sliced_wasserstein_distance(
x_images, y_images, patches_per_image=64, random_sampling_count=4, use_svd=True)
with MySession() as sess:
swd = sess.run_once(swd)
return swd
def ms_ssim(self, x_batch, y_batch, num_batch=128, ckpt_folder=None, ckpt_file=None, image_size=256):
""" This function calculates the multiscale structural similarity between a pair of images.
The image is downscaled four times; at each scale, a 11x11 filter is applied to extract patches.
USE WITH CAUTION !!!
1. This code was lost once and redone. Need to test on real datasets to verify it.
2. This code can be improved to calculate pairwise ms-ssim using tf.image.ssim. tf.image.ssim_multicale is just
tf.image.ssim with pool downsampling.
:param x_batch:
:param y_batch:
:param num_batch:
:param ckpt_folder:
:param ckpt_file:
:param image_size: ssim is defined on images of size at least 176
:return:
"""
# get x_images and y_images
x_images = (x_batch + 1.0) * 128.5
y_images = (y_batch + 1.0) * 128.5
if self.image_format in {'channels_first', 'NCHW'}:
x_images = tf.transpose(x_images, perm=(0, 2, 3, 1))
y_images = tf.transpose(y_images, perm=(0, 2, 3, 1))
if x_images.get_shape().as_list()[1] != 256:
x_images = tf.compat.v1.image.resize_bilinear(x_images, [image_size, image_size])
y_images = tf.compat.v1.image.resize_bilinear(y_images, [image_size, image_size])
scores = tf.image.ssim_multiscale(x_images, y_images, max_val=255) # scores in range [0, 1]
with MySession(load_ckpt=True) as sess:
scores = sess.run_m_times(
scores,
ckpt_folder=ckpt_folder, ckpt_file=ckpt_file,
max_iter=num_batch, trace=True)
ssim_score = np.mean(np.concatenate(scores, axis=0), axis=0)
return ssim_score
| 45.301418 | 133 | 0.636947 |
43fe8ce604f5be764fdbae5dfb8933ec293fcd26 | 187 | py | Python | App/softwares_env/wizard/wsd/main.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/softwares_env/wizard/wsd/main.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | App/softwares_env/wizard/wsd/main.py | Wizard-collab/wizard | c2ec623fe011626716493c232b895fb0513f68ff | [
"MIT"
] | null | null | null | import yaml | 17 | 33 | 0.652406 |
a1003f2195e718d7338e4e93046ad32eab667f13 | 6,545 | py | Python | loldib/getratings/models/NA/na_rengar/na_rengar_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_rengar/na_rengar_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_rengar/na_rengar_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
| 15.695444 | 46 | 0.766692 |
a100629a10b0553407de408897d5616acb03768b | 3,372 | py | Python | fixtures_browsers.py | aleksandr-kotlyar/python_tests_and_hacks | 291e3c33b70ef35deb9ba687885e70e6d23fe82f | [
"Apache-2.0"
] | 9 | 2020-02-07T05:15:00.000Z | 2022-01-19T10:19:02.000Z | fixtures_browsers.py | aleksandr-kotlyar/python_tests_and_hacks | 291e3c33b70ef35deb9ba687885e70e6d23fe82f | [
"Apache-2.0"
] | 5 | 2020-05-03T07:34:03.000Z | 2021-03-25T18:18:30.000Z | fixtures_browsers.py | aleksandr-kotlyar/python_tests_and_hacks | 291e3c33b70ef35deb9ba687885e70e6d23fe82f | [
"Apache-2.0"
] | 1 | 2021-07-26T06:24:36.000Z | 2021-07-26T06:24:36.000Z | import logging
import pytest
from selene import Browser, Config
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
def custom_driver(t_browser):
""" Custom driver """
logging.debug('custom driver config start')
if t_browser == 'chrome':
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(),
options=headless_chrome_options())
else:
raise ValueError('t_browser does not set')
driver.set_page_load_timeout(10)
browser = Browser(Config(
driver=driver,
timeout=10,
window_width=1366,
window_height=1200,
))
logging.debug('custom driver config finish')
return browser
def headless_chrome_options():
""" Custom chrome options """
logging.info('set chromedriver options start')
chrome_options = Options()
chrome_options.set_capability("pageLoadStrategy", "eager")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("--enable-automation")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-setuid-sandbox")
logging.info('set chromedriver options finish')
return chrome_options
def remote_driver(t_browser, page_load_strategy=None):
""" Remote driver """
logging.debug('remote driver config start')
remote_mapping = {
'chrome': {
'command_executor': 'http://selenium__standalone-chrome:4444/wd/hub',
'options': webdriver.ChromeOptions()
},
'firefox': {
'command_executor': 'http://selenium__standalone-firefox:4444/wd/hub',
'options': webdriver.FirefoxOptions()
}
}
if page_load_strategy:
desired_capabilities = webdriver.DesiredCapabilities().CHROME
desired_capabilities["pageLoadStrategy"] = "eager"
driver = webdriver.Remote(command_executor=remote_mapping[t_browser]['command_executor'],
options=remote_mapping[t_browser]['options'])
driver.set_page_load_timeout(20)
browser = Browser(Config(
driver=driver,
timeout=10,
window_width=1500,
window_height=1200,
))
logging.debug('remote driver config finish')
return browser
| 32.423077 | 93 | 0.695136 |
a101053cd887c912399a70d0a235e2cfdc45a962 | 34 | py | Python | evaluation/__init__.py | Luxios22/Dual_Norm | b404a03b15fc05749e0c648d9e46ffe70f6b2a80 | [
"MIT"
] | null | null | null | evaluation/__init__.py | Luxios22/Dual_Norm | b404a03b15fc05749e0c648d9e46ffe70f6b2a80 | [
"MIT"
] | null | null | null | evaluation/__init__.py | Luxios22/Dual_Norm | b404a03b15fc05749e0c648d9e46ffe70f6b2a80 | [
"MIT"
] | null | null | null | from .evaluation import evaluation | 34 | 34 | 0.882353 |
a1016a14567b8bcc8f6f0d1e157f8a64f32c5aaf | 7,034 | py | Python | utils/file_utils.py | lkrmbhlz/MVSC_3D | 7e32f1b507eb0bc85fae2649da0c8bfa89672064 | [
"MIT"
] | 2 | 2022-01-22T15:09:22.000Z | 2022-01-22T15:09:48.000Z | utils/file_utils.py | lkrmbhlz/MVSC_3D | 7e32f1b507eb0bc85fae2649da0c8bfa89672064 | [
"MIT"
] | null | null | null | utils/file_utils.py | lkrmbhlz/MVSC_3D | 7e32f1b507eb0bc85fae2649da0c8bfa89672064 | [
"MIT"
] | null | null | null | import open3d as o3d
import numpy as np
from pclpy import pcl
from tqdm import tqdm
import os
def o3d_meshes(dataset_name: str, path_to_data_folder='../../data'):
"""
Read in mesh (.ply, .stl, .off) files. The function assumes that each class of objects is in
a separate folder and highly spefified for our needs, which is why a list of all objects of
each data set are provided as a hard-coded array of strings.
You can download the data sets referenced in [#1]_ and [#2]_ and use the path to them.
References
----------
.. [#1] http://modelnet.cs.princeton.edu/ Z. Wu, S. Song, A. Khosla, F. Yu, L. Zhang, X. Tang and J. Xiao. 3D ShapeNets: A Deep Representation for Volumetric Shapes. Proceedings of 28th IEEE Conference on Computer Vision and Pattern Recognition (CVPR2015)
.. [#2] http://www.cim.mcgill.ca/~shape/benchMark/ K. Siddiqi, J. Zhang, D. Macrini, A. Shokoufandeh, S. Bouix & S. Dickinson. Retrieving Articulated 3D Models Using Medial Surfaces. Machine Vision and Applications, 19(4), 261--274, 2008.
Parameters
----------
dataset_name: Should correspond to the name of the folder with the data set
path_to_data_folder
Returns
-------
o3d_meshes : array-like, shape (number of objects)
The Open3D mesh representations of the objects as a list.
labels : array-like, shape (number of objects)
The labels of the objects as a list of integers starting from 0.
"""
# http://modelnet.cs.princeton.edu/
if dataset_name == 'modelnet10':
objects = ['bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor', 'night_stand', 'sofa', 'table', 'toilet']
elif dataset_name == 'tali_15':
objects = ['Manching', 'Milet']
elif dataset_name == 'mixed_bones':
objects = ['capra', 'ovis_aries']
# http://www.cim.mcgill.ca/~shape/benchMark/
elif dataset_name == 'mc_gill':
objects = ['airplanes_ply', 'dinosaurs_ply', 'fishes_ply']
else:
raise ValueError('Unknown dataset')
o3d_meshes = []
labels = []
print('Read in %d classes of mesh files...' % len(objects))
for i, obj in enumerate(tqdm(objects)):
if dataset_name == 'modelnet10':
objects_o3d = [o3d.io.read_triangle_mesh(file) for file in
list_files(path_to_data_folder + '/' + dataset_name + '/' + obj + '/test')]
else:
objects_o3d = [o3d.io.read_triangle_mesh(file) for file in
list_files(path_to_data_folder + '/' + dataset_name + '/' + obj)]
# print('class ', i, ': ', len(objects_o3d), ' objects')
o3d_meshes.extend(objects_o3d)
labels.extend([i] * len(objects_o3d))
return o3d_meshes, labels
| 38.228261 | 259 | 0.65681 |
a101ea954f07ea0e68e1799f7386155f6a1d887a | 9,523 | py | Python | Program.py | aakash-lambton/project | 04a1991fc5e65e0cb8988029adbb1fda03656612 | [
"Apache-2.0"
] | null | null | null | Program.py | aakash-lambton/project | 04a1991fc5e65e0cb8988029adbb1fda03656612 | [
"Apache-2.0"
] | null | null | null | Program.py | aakash-lambton/project | 04a1991fc5e65e0cb8988029adbb1fda03656612 | [
"Apache-2.0"
] | null | null | null | import pymongo
import random
#PRINT ALL USERS
#PRINT SINGLE USER
#READ ALL POSTS
#PRINT SINGLE POST
#PRINT ALL COMMENTS
#PRINT SINGLE COMMENTS
#READ POST DATA
#INSERT NEW USER INTO COLLECTION
#DELETE COMMENT
#UPDATE POST CONTENT
if __name__ == '__main__': #CONNECT TO MONGO ATLAS
client = pymongo.MongoClient("mongodb+srv://akash:lambton123@db.di1ed.mongodb.net/db?retryWrites=true&w=majority")
database = client["feeddb"]
create_database(database)
print("Reading all users")
read_all_users(database)
print("Reading single user")
read_single_users(database)
print("Reading all posts")
read_all_post(database)
print("Reading single post")
read_single_post(database)
print("Reading all comments")
read_all_comments(database)
print("Reading single comment")
read_single_comment(database)
print("Reading all comments of a post")
read_post_comment(database)
print("Inserting new user")
insert_user(database)
print("Deleting comment")
delete_comment(database)
print("Reading all comments")
read_all_comments(database)
print("Updating the post")
update_post_content(database)
print("Reading all posts")
read_all_post(database)
| 26.825352 | 118 | 0.522209 |
a102475986cb4c83a3d10579c02a0bf8df165a0a | 530 | py | Python | Mundo 2/ex053.py | judigunkel/judi-exercicios-python | c61bb75b1ae6141defcf42214194e141a70af15d | [
"MIT"
] | null | null | null | Mundo 2/ex053.py | judigunkel/judi-exercicios-python | c61bb75b1ae6141defcf42214194e141a70af15d | [
"MIT"
] | null | null | null | Mundo 2/ex053.py | judigunkel/judi-exercicios-python | c61bb75b1ae6141defcf42214194e141a70af15d | [
"MIT"
] | 1 | 2021-03-06T02:41:36.000Z | 2021-03-06T02:41:36.000Z | """
Crie um programa que leia um a frase qualquer e diga se ela um palndromo,
desconsiderando os espaos.
ex:
apos a sopa
a sacada da casa
a torre da derrota
o lobo ama o bolo
anotaram a data da maratona
"""
frase = input('Digite uma frase (sem acentos): ').replace(' ', '').upper()
inverso = ''
for c in range(len(frase) - 1, -1, -1):
inverso += frase[c]
print(f'O inverso de {frase} {inverso}')
if frase == inverso:
print('A frase digitada um palndromo.')
else:
print('A frase digitada no um Palndromo')
| 26.5 | 76 | 0.681132 |
a1027c07377717af9273b6289963cf9e75ece183 | 1,546 | py | Python | inferfuzzy/base_set.py | leynier/inferfuzzy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | 3 | 2020-11-23T21:05:31.000Z | 2020-11-25T17:33:27.000Z | inferfuzzy/base_set.py | leynier/fuzzpy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | null | null | null | inferfuzzy/base_set.py | leynier/fuzzpy | bc9dd3a3d0d59f323c5c573423ff7d20ba771eeb | [
"MIT"
] | null | null | null | from typing import Any, Callable
import matplotlib.pyplot as plt
from numpy import arange
from .membership import Membership
| 25.766667 | 57 | 0.568564 |
a104d65ea80539f94a6a62d27d42b32939f7ca2a | 9,911 | py | Python | play/play_loop.py | wmloh/ChessAI | b8eafd673ecb8162e464d78fccd32979a0c28126 | [
"MIT"
] | 1 | 2021-09-07T20:40:44.000Z | 2021-09-07T20:40:44.000Z | play/play_loop.py | wmloh/ChessAI | b8eafd673ecb8162e464d78fccd32979a0c28126 | [
"MIT"
] | null | null | null | play/play_loop.py | wmloh/ChessAI | b8eafd673ecb8162e464d78fccd32979a0c28126 | [
"MIT"
] | null | null | null | import numpy as np
import chess
import chess.engine
from tkinter.filedialog import asksaveasfilename
from parsing.math_encode import tensor_encode, tensor_decode
from inference.infer_action import get_action
| 39.486056 | 105 | 0.563112 |
a10591815a24a01b78e2571e754c9c37c5e03b4b | 205 | py | Python | wave/synth/wave/wave/base/curve.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | wave/synth/wave/wave/base/curve.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | wave/synth/wave/wave/base/curve.py | jedhsu/wave | a05d8f4b0a96722bdc2f5a514646c7a44681982b | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import Generic, Mapping, TypeVar
__all__ = ["Curve"]
T = TypeVar("T")
U = TypeVar("U")
| 14.642857 | 44 | 0.692683 |
a10652730ddf79d36acced38c1989dd4d1acb1fa | 877 | py | Python | src/jellyroll/providers/utils/anyetree.py | blturner/jellyroll | 8a3b96e84d6cfbaac478bb8f9e406aabff5a77f3 | [
"BSD-3-Clause"
] | 3 | 2015-03-02T06:34:45.000Z | 2016-11-24T18:53:59.000Z | src/jellyroll/providers/utils/anyetree.py | blturner/jellyroll | 8a3b96e84d6cfbaac478bb8f9e406aabff5a77f3 | [
"BSD-3-Clause"
] | null | null | null | src/jellyroll/providers/utils/anyetree.py | blturner/jellyroll | 8a3b96e84d6cfbaac478bb8f9e406aabff5a77f3 | [
"BSD-3-Clause"
] | null | null | null | """
Get an Etree library. Usage::
>>> from anyetree import etree
Returns some etree library. Looks for (in order of decreasing preference):
* ``lxml.etree`` (http://cheeseshop.python.org/pypi/lxml/)
* ``xml.etree.cElementTree`` (built into Python 2.5)
* ``cElementTree`` (http://effbot.org/zone/celementtree.htm)
* ``xml.etree.ElementTree`` (built into Python 2.5)
* ``elementree.ElementTree (http://effbot.org/zone/element-index.htm)
"""
__all__ = ['etree']
SEARCH_PATHS = [
"lxml.etree",
"xml.etree.cElementTree",
"cElementTree",
"xml.etree.ElementTree",
"elementtree.ElementTree",
]
etree = None
for name in SEARCH_PATHS:
try:
etree = __import__(name, '', '', [''])
break
except ImportError:
continue
if etree is None:
raise ImportError("No suitable ElementTree implementation found.") | 25.057143 | 74 | 0.652223 |
a1081e4aca80f13d81fb5c284f116c973136197c | 608 | py | Python | libs/dispatch/dispatcher.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/dispatch/dispatcher.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/dispatch/dispatcher.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | from abc import abstractmethod, ABC
| 22.518519 | 67 | 0.648026 |
a10a14a640ca1ca76f6da0a67be2551ab7a5efc8 | 766 | py | Python | 3_TT_FLIM.py | swabianinstruments/swabianinstruments-web-demo | 2d59f79958a942ed61f04ea7dd44c98ab2cf17df | [
"MIT"
] | null | null | null | 3_TT_FLIM.py | swabianinstruments/swabianinstruments-web-demo | 2d59f79958a942ed61f04ea7dd44c98ab2cf17df | [
"MIT"
] | null | null | null | 3_TT_FLIM.py | swabianinstruments/swabianinstruments-web-demo | 2d59f79958a942ed61f04ea7dd44c98ab2cf17df | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 11:01:41 2020
@author: liu
"""
from time import sleep
import plot_TT
from TimeTagger import createTimeTagger, freeAllTimeTagger, TimeDifferences
# create a Time Tagger instance
tagger = createTimeTagger()
tagger.reset()
# assign channels for measurement
phot_ch = 1
strt_ch = 2
next_ch = 3
sync_ch = 4
# initialize measurement parameters
binwidth = 10000 # 10 ns
bins = 100
n_pix = 100
# measure FLIM
image = TimeDifferences(tagger, phot_ch, strt_ch, next_ch, sync_ch, binwidth, bins, n_pix)
print("\nFLIM measurement is running.")
sleep(10)
xFLIM = image.getIndex()
yFLIM = image.getData()
plot_TT.BarChart2D(xFLIM, yFLIM)
# free the Time Tagger
freeAllTimeTagger()
| 19.15 | 91 | 0.707572 |
a10b1c87fe2ffd2a2fe1dee4b23ec1fe16f8cf15 | 287 | py | Python | electroPyy/io/__init__.py | ludo67100/electroPyy_Dev | 3b940adbfdf005dd8231e7ac61aca708033d5a95 | [
"OML"
] | null | null | null | electroPyy/io/__init__.py | ludo67100/electroPyy_Dev | 3b940adbfdf005dd8231e7ac61aca708033d5a95 | [
"OML"
] | null | null | null | electroPyy/io/__init__.py | ludo67100/electroPyy_Dev | 3b940adbfdf005dd8231e7ac61aca708033d5a95 | [
"OML"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:54:51 2019
@author: Ludovic.SPAETH
"""
from electroPyy.io.BaseRawIO import BaseRawIO
from electroPyy.io.HdF5IO import HdF5IO
from electroPyy.io.NeuroExIO import NeuroExIO
from electroPyy.io.WinWcpRawIO import WinWcpRawIO
| 23.916667 | 50 | 0.745645 |
a10d6496b80a4c774fdd41dcbb4c0a5e756986a0 | 317 | py | Python | torch_geometric_temporal/signal/__init__.py | tforgaard/pytorch_geometric_temporal | d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8 | [
"MIT"
] | 1,410 | 2020-06-27T03:36:19.000Z | 2022-03-31T23:29:22.000Z | torch_geometric_temporal/signal/__init__.py | tforgaard/pytorch_geometric_temporal | d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8 | [
"MIT"
] | 124 | 2020-07-07T16:11:09.000Z | 2022-03-31T07:21:53.000Z | torch_geometric_temporal/signal/__init__.py | tforgaard/pytorch_geometric_temporal | d3a6a55119cb8cc38cb6d941ba8f74879d02c4b8 | [
"MIT"
] | 230 | 2020-07-27T11:13:52.000Z | 2022-03-31T14:31:29.000Z | from .dynamic_graph_temporal_signal import *
from .dynamic_graph_temporal_signal_batch import *
from .static_graph_temporal_signal import *
from .static_graph_temporal_signal_batch import *
from .dynamic_graph_static_signal import *
from .dynamic_graph_static_signal_batch import *
from .train_test_split import *
| 28.818182 | 50 | 0.858044 |
a10e01e242ade75c580d5f9cde2741f0eeac1fca | 3,605 | py | Python | sdks/python/apache_beam/examples/streaming_wordcount_debugging_test.py | aaltay/incubator-beam | b150ace0884c88bc93da21f6dfe3b7684f886e94 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 9 | 2016-09-28T18:25:24.000Z | 2019-05-09T12:28:29.000Z | sdks/python/apache_beam/examples/streaming_wordcount_debugging_test.py | aaltay/incubator-beam | b150ace0884c88bc93da21f6dfe3b7684f886e94 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 28 | 2020-03-04T22:01:48.000Z | 2022-03-12T00:59:47.000Z | sdks/python/apache_beam/examples/streaming_wordcount_debugging_test.py | aaltay/incubator-beam | b150ace0884c88bc93da21f6dfe3b7684f886e94 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2020-12-02T09:51:34.000Z | 2022-03-15T23:09:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit test for the streaming wordcount example with debug."""
# pytype: skip-file
import unittest
import mock
import pytest
import apache_beam as beam
from apache_beam.examples import streaming_wordcount_debugging
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Protect against environments where the PubSub library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
# pylint: enable=wrong-import-order, wrong-import-position
if __name__ == '__main__':
unittest.main()
| 32.477477 | 76 | 0.691262 |
a10e3d1311566cfbb4eeacef8a5558e6389ab6c2 | 147 | py | Python | rest_framework_bulk/__init__.py | xordoquy/django-rest-framework-bulk | 484df717a790591a7bc58d5fed34f958ae82929a | [
"MIT"
] | 1 | 2019-08-20T02:08:33.000Z | 2019-08-20T02:08:33.000Z | rest_framework_bulk/__init__.py | xordoquy/django-rest-framework-bulk | 484df717a790591a7bc58d5fed34f958ae82929a | [
"MIT"
] | null | null | null | rest_framework_bulk/__init__.py | xordoquy/django-rest-framework-bulk | 484df717a790591a7bc58d5fed34f958ae82929a | [
"MIT"
] | null | null | null | __version__ = '0.1.3'
__author__ = 'Miroslav Shubernetskiy'
try:
from .generics import *
from .mixins import *
except Exception:
pass
| 16.333333 | 37 | 0.687075 |
a10e6a87e856699221521cf8bdbfca12b9ee5a97 | 1,773 | py | Python | random_forest_classifier.py | duongntbk/FashionMNIST | 982f31ac7d857b5deadfde37f979bc6a047fa007 | [
"MIT"
] | null | null | null | random_forest_classifier.py | duongntbk/FashionMNIST | 982f31ac7d857b5deadfde37f979bc6a047fa007 | [
"MIT"
] | 10 | 2020-01-28T22:19:43.000Z | 2022-02-10T00:30:45.000Z | random_forest_classifier.py | duongntbk/FashionMNIST | 982f31ac7d857b5deadfde37f979bc6a047fa007 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pickle
from sklearn.ensemble import RandomForestClassifier
from base_shallow_classifier import BaseShallowClassifier
| 31.105263 | 75 | 0.674563 |
a10f0a0a33562a06ed9b546b2f53186a7237246b | 2,387 | py | Python | setup.py | mehta-lab/recOrder | 67f2edb9ab13114dfe41d57e465ae24f961b0004 | [
"Unlicense"
] | 2 | 2022-01-19T21:13:32.000Z | 2022-02-24T19:40:24.000Z | setup.py | mehta-lab/recOrder | 67f2edb9ab13114dfe41d57e465ae24f961b0004 | [
"Unlicense"
] | 55 | 2021-06-24T18:53:18.000Z | 2022-03-30T21:05:14.000Z | setup.py | mehta-lab/recOrder | 67f2edb9ab13114dfe41d57e465ae24f961b0004 | [
"Unlicense"
] | null | null | null | import os.path as osp
from setuptools import setup, find_packages
# todo: modify as we decide on versions, names, descriptions. readme
MIN_PY_VER = '3.7'
DISTNAME = 'recOrder'
DESCRIPTION = 'computational microscopy toolkit for label-free imaging'
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
LONG_DESCRIPTION_content_type = "text/markdown"
LONG_DESCRIPTION = __doc__
LICENSE = 'Chan Zuckerberg Biohub Software License'
INSTALL_REQUIRES = ['numpy', 'scipy', 'matplotlib', 'pycromanager']
REQUIRES = []
# todo: modify for python dependency
CLASSIFIERS = [
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Utilities',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
]
# populate packages
PACKAGES = [package for package in find_packages()]
# parse requirements
with open(osp.join('requirements', 'default.txt')) as f:
requirements = [line.strip() for line in f
if line and not line.startswith('#')]
# populate requirements
for l in requirements:
sep = l.split(' #')
INSTALL_REQUIRES.append(sep[0].strip())
if len(sep) == 2:
REQUIRES.append(sep[1].strip())
if __name__ == '__main__':
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_content_type,
license=LICENSE,
version="0.0.1",
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
python_requires=f'>={MIN_PY_VER}',
dependency_links=['https://github.com/mehta-lab/waveorder.git#egg=waveorder'],
packages=PACKAGES,
include_package_data=True,
entry_points={
'console_scripts': [
'recOrder.reconstruct = recOrder.cli_module:main',
'recOrder.convert = scripts.convert_tiff_to_zarr:main'
]
}
)
| 33.619718 | 86 | 0.6615 |
a1102b00cc945569015366b5d33e47090c8e92f5 | 6,457 | py | Python | oscrypto/_openssl/_libssl_ctypes.py | frennkie/oscrypto | 24aff3148379b931d9c72ab3b069e537dc2195f8 | [
"MIT"
] | 1 | 2020-05-17T06:44:51.000Z | 2020-05-17T06:44:51.000Z | oscrypto/_openssl/_libssl_ctypes.py | frennkie/oscrypto | 24aff3148379b931d9c72ab3b069e537dc2195f8 | [
"MIT"
] | null | null | null | oscrypto/_openssl/_libssl_ctypes.py | frennkie/oscrypto | 24aff3148379b931d9c72ab3b069e537dc2195f8 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import platform
import sys
from ctypes.util import find_library
from ctypes import CDLL, CFUNCTYPE, POINTER, c_void_p, c_char_p, c_int, c_size_t, c_long
from .. import _backend_config
from .._ffi import FFIEngineError
from ..errors import LibraryNotFoundError
from ._libcrypto import libcrypto_version_info
__all__ = [
'libssl',
]
libssl_path = _backend_config().get('libssl_path')
if libssl_path is None:
libssl_path = find_library('ssl')
# if we are on catalina, we want to strongly version libssl since unversioned libcrypto has a non-stable ABI
if sys.platform == 'darwin' and platform.mac_ver()[0].startswith('10.15') and libssl_path.endswith('libssl.dylib'):
# libssl.44.dylib is in libressl-2.6 which as a OpenSSL 1.0.1-compatible API
libssl_path = libssl_path.replace('libssl.dylib', 'libssl.44.dylib')
if not libssl_path:
raise LibraryNotFoundError('The library libssl could not be found')
libssl = CDLL(libssl_path, use_errno=True)
P_SSL_METHOD = POINTER(c_void_p)
P_SSL_CTX = POINTER(c_void_p)
P_SSL_SESSION = POINTER(c_void_p)
P_SSL = POINTER(c_void_p)
P_BIO_METHOD = POINTER(c_void_p)
P_BIO = POINTER(c_void_p)
X509 = c_void_p
P_X509 = POINTER(X509)
P_X509_STORE = POINTER(c_void_p)
P_X509_STORE_CTX = POINTER(c_void_p)
_STACK = c_void_p
P_STACK = POINTER(_STACK)
try:
if libcrypto_version_info < (1, 1):
libssl.sk_num.argtypes = [P_STACK]
libssl.sk_num.restype = c_int
libssl.sk_value.argtypes = [P_STACK, c_int]
libssl.sk_value.restype = P_X509
libssl.SSL_library_init.argtypes = []
libssl.SSL_library_init.restype = c_int
libssl.OPENSSL_add_all_algorithms_noconf.argtypes = []
libssl.OPENSSL_add_all_algorithms_noconf.restype = None
libssl.SSLv23_method.argtypes = []
libssl.SSLv23_method.restype = P_SSL_METHOD
else:
libssl.OPENSSL_sk_num.argtypes = [P_STACK]
libssl.OPENSSL_sk_num.restype = c_int
libssl.OPENSSL_sk_value.argtypes = [P_STACK, c_int]
libssl.OPENSSL_sk_value.restype = P_X509
libssl.TLS_method.argtypes = []
libssl.TLS_method.restype = P_SSL_METHOD
libssl.BIO_s_mem.argtypes = []
libssl.BIO_s_mem.restype = P_BIO_METHOD
libssl.BIO_new.argtypes = [
P_BIO_METHOD
]
libssl.BIO_new.restype = P_BIO
libssl.BIO_free.argtypes = [
P_BIO
]
libssl.BIO_free.restype = c_int
libssl.BIO_read.argtypes = [
P_BIO,
c_char_p,
c_int
]
libssl.BIO_read.restype = c_int
libssl.BIO_write.argtypes = [
P_BIO,
c_char_p,
c_int
]
libssl.BIO_write.restype = c_int
libssl.BIO_ctrl_pending.argtypes = [
P_BIO
]
libssl.BIO_ctrl_pending.restype = c_size_t
libssl.SSL_CTX_new.argtypes = [
P_SSL_METHOD
]
libssl.SSL_CTX_new.restype = P_SSL_CTX
libssl.SSL_CTX_set_timeout.argtypes = [
P_SSL_CTX,
c_long
]
libssl.SSL_CTX_set_timeout.restype = c_long
verify_callback = CFUNCTYPE(c_int, c_int, P_X509_STORE_CTX)
setattr(libssl, 'verify_callback', verify_callback)
libssl.SSL_CTX_set_verify.argtypes = [
P_SSL_CTX,
c_int,
POINTER(verify_callback)
]
libssl.SSL_CTX_set_verify.restype = None
libssl.SSL_CTX_set_default_verify_paths.argtypes = [
P_SSL_CTX
]
libssl.SSL_CTX_set_default_verify_paths.restype = c_int
libssl.SSL_CTX_load_verify_locations.argtypes = [
P_SSL_CTX,
c_char_p,
c_char_p
]
libssl.SSL_CTX_load_verify_locations.restype = c_int
libssl.SSL_get_verify_result.argtypes = [
P_SSL
]
libssl.SSL_get_verify_result.restype = c_long
libssl.SSL_CTX_get_cert_store.argtypes = [
P_SSL_CTX
]
libssl.SSL_CTX_get_cert_store.restype = P_X509_STORE
libssl.X509_STORE_add_cert.argtypes = [
P_X509_STORE,
P_X509
]
libssl.X509_STORE_add_cert.restype = c_int
libssl.SSL_CTX_set_cipher_list.argtypes = [
P_SSL_CTX,
c_char_p
]
libssl.SSL_CTX_set_cipher_list.restype = c_int
libssl.SSL_CTX_ctrl.arg_types = [
P_SSL_CTX,
c_int,
c_long,
c_void_p
]
libssl.SSL_CTX_ctrl.restype = c_long
libssl.SSL_CTX_free.argtypes = [
P_SSL_CTX
]
libssl.SSL_CTX_free.restype = None
libssl.SSL_new.argtypes = [
P_SSL_CTX
]
libssl.SSL_new.restype = P_SSL
libssl.SSL_free.argtypes = [
P_SSL
]
libssl.SSL_free.restype = None
libssl.SSL_set_bio.argtypes = [
P_SSL,
P_BIO,
P_BIO
]
libssl.SSL_set_bio.restype = None
libssl.SSL_ctrl.arg_types = [
P_SSL,
c_int,
c_long,
c_void_p
]
libssl.SSL_ctrl.restype = c_long
libssl.SSL_get_peer_cert_chain.argtypes = [
P_SSL
]
libssl.SSL_get_peer_cert_chain.restype = P_STACK
libssl.SSL_get1_session.argtypes = [
P_SSL
]
libssl.SSL_get1_session.restype = P_SSL_SESSION
libssl.SSL_set_session.argtypes = [
P_SSL,
P_SSL_SESSION
]
libssl.SSL_set_session.restype = c_int
libssl.SSL_SESSION_free.argtypes = [
P_SSL_SESSION
]
libssl.SSL_SESSION_free.restype = None
libssl.SSL_set_connect_state.argtypes = [
P_SSL
]
libssl.SSL_set_connect_state.restype = None
libssl.SSL_do_handshake.argtypes = [
P_SSL
]
libssl.SSL_do_handshake.restype = c_int
libssl.SSL_get_error.argtypes = [
P_SSL,
c_int
]
libssl.SSL_get_error.restype = c_int
libssl.SSL_get_version.argtypes = [
P_SSL
]
libssl.SSL_get_version.restype = c_char_p
libssl.SSL_read.argtypes = [
P_SSL,
c_char_p,
c_int
]
libssl.SSL_read.restype = c_int
libssl.SSL_write.argtypes = [
P_SSL,
c_char_p,
c_int
]
libssl.SSL_write.restype = c_int
libssl.SSL_pending.argtypes = [
P_SSL
]
libssl.SSL_pending.restype = c_int
libssl.SSL_shutdown.argtypes = [
P_SSL
]
libssl.SSL_shutdown.restype = c_int
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(libssl, '_STACK', _STACK)
setattr(libssl, 'X509', X509)
| 24.093284 | 119 | 0.671055 |
a1102cc6df4e46f14ab22665f1a454bf74d422a0 | 382 | py | Python | etl/etl.py | amalshehu/exercism-python | eb469246504fb22463e036a989dc9b44e0a83410 | [
"MIT"
] | 2 | 2016-08-25T10:58:44.000Z | 2017-11-13T12:58:04.000Z | etl/etl.py | amalshehu/exercism-python | eb469246504fb22463e036a989dc9b44e0a83410 | [
"MIT"
] | 1 | 2016-08-25T10:59:23.000Z | 2016-08-25T12:20:19.000Z | etl/etl.py | amalshehu/exercism-python | eb469246504fb22463e036a989dc9b44e0a83410 | [
"MIT"
] | null | null | null | # File: etl.py
# Purpose: To do the `Transform` step of an Extract-Transform-Load.
# Programmer: Amal Shehu
# Course: Exercism
# Date: Thursday 22 September 2016, 03:40 PM
| 27.285714 | 71 | 0.63089 |
a11034c8715f1c4364caa1c40989aaba6b81cecc | 2,983 | py | Python | codango/account/api.py | NdagiStanley/silver-happiness | 67fb6dd4047c603a84276f88a021d4489cf3b41e | [
"MIT"
] | 2 | 2019-10-17T01:03:12.000Z | 2021-11-24T07:43:14.000Z | codango/account/api.py | NdagiStanley/silver-happiness | 67fb6dd4047c603a84276f88a021d4489cf3b41e | [
"MIT"
] | 49 | 2019-09-05T02:48:04.000Z | 2021-06-28T02:29:42.000Z | codango/account/api.py | NdagiStanley/silver-happiness | 67fb6dd4047c603a84276f88a021d4489cf3b41e | [
"MIT"
] | 1 | 2021-11-25T10:19:27.000Z | 2021-11-25T10:19:27.000Z | import psycopg2
from rest_framework import generics, permissions
# from serializers import UserSerializer, UserFollowSerializer, UserSettingsSerializer
from serializers import UserSerializer, UserFollowSerializer, UserSettingsSerializer
from serializers import AllUsersSerializer, UserRegisterSerializer
from userprofile import serializers, models
from django.contrib.auth.models import User
from rest_framework import permissions
| 31.072917 | 86 | 0.706336 |
a1105853736e4203adc6fff03b4073278e494bcb | 3,597 | py | Python | backend/app/apis/v1/resources.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | 2 | 2020-11-23T13:38:49.000Z | 2021-08-17T15:37:04.000Z | backend/app/apis/v1/resources.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | backend/app/apis/v1/resources.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | # -*- coding:UTF-8 -*-
from flask import Blueprint, current_app, request
import pandas as pd
from app.protocol import serialize
from app.utils import Utils
from app.database.crud import db_mgr
from app.cache import redis_mgr
api_v1 = Blueprint('api_v1', __name__)
| 38.677419 | 86 | 0.659438 |
a111862555b1576ad0436f2aab598c4b8d1d29a9 | 708 | py | Python | report/api/hooks.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | report/api/hooks.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | report/api/hooks.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | from oslo_log import log
from oslo_config import cfg
from report import storage
from pecan import hooks
LOG = log.getLogger(__name__)
| 25.285714 | 78 | 0.69209 |
a1119377e73c71b58b46883ef014d640d56156e5 | 117 | py | Python | garageofcode/semantic/main.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | 2 | 2020-02-11T10:32:06.000Z | 2020-02-11T17:00:47.000Z | garageofcode/semantic/main.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | null | null | null | garageofcode/semantic/main.py | tpi12jwe/garageofcode | 3cfaf01f6d77130bb354887e6ed9921c791db849 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main() | 14.625 | 29 | 0.606838 |
a111d2ca236c2a067c9980e65999cf841b19dd21 | 548 | py | Python | scholariumat/products/migrations/0012_auto_20181125_1221.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | null | null | null | scholariumat/products/migrations/0012_auto_20181125_1221.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | 232 | 2018-06-30T11:40:52.000Z | 2020-04-29T23:55:41.000Z | scholariumat/products/migrations/0012_auto_20181125_1221.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | 3 | 2018-05-31T12:57:03.000Z | 2020-02-27T16:25:44.000Z | # Generated by Django 2.0.9 on 2018-11-25 11:21
from django.db import migrations, models
| 22.833333 | 66 | 0.578467 |
a113c8e85fbfe0a4e5ea8110782dae46220ba93c | 262 | py | Python | setup.py | geickelb/hsip441_neiss_python | 0ad88a664b369ea058b28d79ed98d02ff8418aad | [
"MIT"
] | null | null | null | setup.py | geickelb/hsip441_neiss_python | 0ad88a664b369ea058b28d79ed98d02ff8418aad | [
"MIT"
] | null | null | null | setup.py | geickelb/hsip441_neiss_python | 0ad88a664b369ea058b28d79ed98d02ff8418aad | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.0.1',
description='compiling code for HSIP441 using python to explore the Neiss database',
author='Garrett Eickelberg',
license='MIT',
)
| 23.818182 | 88 | 0.70229 |
a114b71d6021e2552fc945ad4a1ac94774faab77 | 189 | py | Python | test.py | j178/spotlight | 1e65ff35826fee9a9d522b502cd781e86fbed01f | [
"WTFPL"
] | 5 | 2016-12-06T04:03:16.000Z | 2020-09-24T14:08:49.000Z | test.py | j178/spotlight | 1e65ff35826fee9a9d522b502cd781e86fbed01f | [
"WTFPL"
] | 1 | 2020-05-04T02:19:09.000Z | 2020-06-10T08:44:11.000Z | test.py | j178/spotlight | 1e65ff35826fee9a9d522b502cd781e86fbed01f | [
"WTFPL"
] | null | null | null | from weibo import WeiboClient
from weibo.watchyou import fetch_replies
for r in fetch_replies(): # fetch_repliesweibowatchyou,
print(r['text'])
| 31.5 | 91 | 0.793651 |
a114be84d6fa960cedd6c469ba949d63204c8275 | 8,181 | py | Python | tests/unit/test_db_config_options.py | feddovanede/cf-mendix-buildpack-heapdump | 584678bfab90a2839cfbac4126b08d6359885f91 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_db_config_options.py | feddovanede/cf-mendix-buildpack-heapdump | 584678bfab90a2839cfbac4126b08d6359885f91 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_db_config_options.py | feddovanede/cf-mendix-buildpack-heapdump | 584678bfab90a2839cfbac4126b08d6359885f91 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import os
from unittest import TestCase, mock
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
from buildpack.infrastructure.database import (
UrlDatabaseConfiguration,
get_config,
)
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509 import NameAttribute
from cryptography.x509.base import Certificate
from cryptography.x509.oid import NameOID
# Class to generate a test certificate chain
# https://cryptography.io/en/latest/x509/tutorial/
| 33.391837 | 174 | 0.620218 |
a11510f716edaa915f408fd4bc5559303960aa62 | 1,770 | py | Python | Computer & Information Science Core courses/2168/A*/graph.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | 1 | 2020-07-28T16:18:38.000Z | 2020-07-28T16:18:38.000Z | Computer & Information Science Core courses/2168/A*/graph.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | 4 | 2020-07-15T06:40:55.000Z | 2020-08-13T16:01:30.000Z | Computer & Information Science Core courses/2168/A*/graph.py | Vaporjawn/Temple-University-Computer-Science-Resources | 8d54db3a85a1baa8ba344efc90593b440eb6d585 | [
"MIT"
] | null | null | null | """Implement the graph to traverse."""
from collections import Counter
| 28.095238 | 77 | 0.564972 |
a115499f10a5a3acf2f24d7e3dd1a76b57b5b137 | 245 | py | Python | Projects/Python_Python2_json/main.py | LiuOcean/luban_examples | 75d5fd7c1b15d79efc0ebbac21a74bf050aed1fb | [
"MIT"
] | 44 | 2021-05-06T06:16:55.000Z | 2022-03-30T06:27:25.000Z | Projects/Python_Python2_json/main.py | HFX-93/luban_examples | 5b90e392d404950d12ff803a186b26bdea5e0292 | [
"MIT"
] | 1 | 2021-07-25T16:35:32.000Z | 2021-08-23T04:59:49.000Z | Projects/Python_Python2_json/main.py | HFX-93/luban_examples | 5b90e392d404950d12ff803a186b26bdea5e0292 | [
"MIT"
] | 14 | 2021-06-09T10:38:59.000Z | 2022-03-30T06:27:24.000Z | import json
import gen.Types
tables = gen.Types.Tables(loader)
print(tables)
r = tables.TbFullTypes.getDataList()[0].__dict__
print(r)
| 18.846154 | 89 | 0.685714 |
a115806c8d50f7e45e72b3d28a59a48fb80d6f6e | 10,255 | py | Python | rplugin/python3/defx/base/kind.py | kazukazuinaina/defx.nvim | 376b2a91703b6bf19283e58bf1e7b5ce5baae5af | [
"MIT"
] | null | null | null | rplugin/python3/defx/base/kind.py | kazukazuinaina/defx.nvim | 376b2a91703b6bf19283e58bf1e7b5ce5baae5af | [
"MIT"
] | null | null | null | rplugin/python3/defx/base/kind.py | kazukazuinaina/defx.nvim | 376b2a91703b6bf19283e58bf1e7b5ce5baae5af | [
"MIT"
] | null | null | null | # ============================================================================
# FILE: kind.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import json
import typing
from pathlib import Path
from defx.action import ActionAttr
from defx.action import ActionTable
from defx.action import do_action
from defx.context import Context
from defx.defx import Defx
from defx.session import Session
from defx.util import Nvim
from defx.view import View
_action_table: typing.Dict[str, ActionTable] = {}
ACTION_FUNC = typing.Callable[[View, Defx, Context], None]
| 33.295455 | 78 | 0.660263 |
a11589146f3d49dce0f6bfd0ac0a0e58ecd53f6f | 3,659 | py | Python | shopify_listener/dispatcher.py | smallwat3r/shopify-webhook-manager | 1161f070470bc2d2f81c98222b67300bc616121f | [
"MIT"
] | 6 | 2019-08-13T18:12:37.000Z | 2021-05-26T17:55:58.000Z | shopify_listener/dispatcher.py | smallwat3r/shopify-webhook-manager | 1161f070470bc2d2f81c98222b67300bc616121f | [
"MIT"
] | null | null | null | shopify_listener/dispatcher.py | smallwat3r/shopify-webhook-manager | 1161f070470bc2d2f81c98222b67300bc616121f | [
"MIT"
] | 4 | 2019-10-16T06:14:35.000Z | 2021-06-03T06:25:26.000Z | # -*- coding: utf-8 -*-
# @Author: Matthieu Petiteau
# @Date: 2019-04-26 21:01:07
# @Last Modified by: Matthieu Petiteau
# @Last Modified time: 2019-04-26 21:52:46
"""Dispatch webhook event to specific actions."""
import json
| 18.20398 | 70 | 0.622301 |
a115d6f4a8b34eb7bb70f84e6420459fec3a66db | 790 | py | Python | open_spiel/higc/bots/test_bot_fail_after_few_actions.py | higcompetition/tournament | b61688f7fad6d33a6af8097c75cb0bf0bc84faf2 | [
"Apache-2.0"
] | 4 | 2021-07-22T08:01:26.000Z | 2021-12-30T07:07:23.000Z | open_spiel/higc/bots/test_bot_fail_after_few_actions.py | higcompetition/tournament | b61688f7fad6d33a6af8097c75cb0bf0bc84faf2 | [
"Apache-2.0"
] | 1 | 2021-07-22T16:42:31.000Z | 2021-07-23T09:46:22.000Z | open_spiel/higc/bots/test_bot_fail_after_few_actions.py | higcompetition/tournament | b61688f7fad6d33a6af8097c75cb0bf0bc84faf2 | [
"Apache-2.0"
] | 3 | 2021-07-21T19:02:56.000Z | 2021-07-30T17:40:39.000Z | # A bot that picks the first action from the list for the first two rounds,
# and then exists with an exception.
# Used only for tests.
game_name = input()
play_as = int(input())
print("ready")
while True:
print("start")
num_actions = 0
while True:
message = input()
if message == "tournament over":
print("tournament over")
sys.exit(0)
if message.startswith("match over"):
print("match over")
break
public_buf, private_buf, *legal_actions = message.split(" ")
should_act = len(legal_actions) > 0
if should_act:
num_actions += 1
print(legal_actions[-1])
else:
print("ponder")
if num_actions > 2:
raise RuntimeError
| 26.333333 | 75 | 0.572152 |
a116cfc21ab7921ef0308c2ab54fca839bd22800 | 2,027 | py | Python | python/hsfs/util.py | berthoug/feature-store-api | 85c23ae08c7de65acd79a3b528fa72c07e52a272 | [
"Apache-2.0"
] | null | null | null | python/hsfs/util.py | berthoug/feature-store-api | 85c23ae08c7de65acd79a3b528fa72c07e52a272 | [
"Apache-2.0"
] | null | null | null | python/hsfs/util.py | berthoug/feature-store-api | 85c23ae08c7de65acd79a3b528fa72c07e52a272 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from pathlib import Path
from hsfs import feature
def get_cert_pw():
"""
Get keystore password from local container
Returns:
Certificate password
"""
hadoop_user_name = "hadoop_user_name"
crypto_material_password = "material_passwd"
material_directory = "MATERIAL_DIRECTORY"
password_suffix = "__cert.key"
pwd_path = Path(crypto_material_password)
if not pwd_path.exists():
username = os.environ[hadoop_user_name]
material_directory = Path(os.environ[material_directory])
pwd_path = material_directory.joinpath(username + password_suffix)
with pwd_path.open() as f:
return f.read()
| 27.026667 | 76 | 0.700543 |
a11724652d428320ddd7198c24a9514a2d3d1923 | 1,720 | py | Python | src/map_generation/map_parser.py | tbvanderwoude/matching-epea-star | 13d8716f932bb98398fe8e190e668ee65bcf0f34 | [
"MIT"
] | 1 | 2021-08-23T18:00:13.000Z | 2021-08-23T18:00:13.000Z | src/map_generation/map_parser.py | tbvanderwoude/matching-epea-star | 13d8716f932bb98398fe8e190e668ee65bcf0f34 | [
"MIT"
] | null | null | null | src/map_generation/map_parser.py | tbvanderwoude/matching-epea-star | 13d8716f932bb98398fe8e190e668ee65bcf0f34 | [
"MIT"
] | 1 | 2021-08-24T08:16:31.000Z | 2021-08-24T08:16:31.000Z | import os.path
from typing import List, Tuple
from mapfmclient import MarkedLocation, Problem
| 31.851852 | 87 | 0.55 |
a118bed580cb119e113df0f842732da313be42d4 | 9,803 | py | Python | library/oci_api_key.py | AndreyAdnreyev/oci-ansible-modules | accd6e482ff1e8c2ddd6e85958dfe12cd6114383 | [
"Apache-2.0"
] | null | null | null | library/oci_api_key.py | AndreyAdnreyev/oci-ansible-modules | accd6e482ff1e8c2ddd6e85958dfe12cd6114383 | [
"Apache-2.0"
] | null | null | null | library/oci_api_key.py | AndreyAdnreyev/oci-ansible-modules | accd6e482ff1e8c2ddd6e85958dfe12cd6114383 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_api_key
short_description: Upload and delete API signing key of a user in OCI
description:
- This module allows the user upload and delete API signing keys of a user in OCI. A PEM-format RSA credential for
securing requests to the Oracle Cloud Infrastructure REST API. Also known as an API signing key. Specifically,
this is the public key from the key pair. The private key remains with the user calling the API. For information
about generating a key pair in the required PEM format, see Required Keys and OCIDs.
Note that this is not the SSH key for accessing compute instances.
Each user can have a maximum of three API signing keys.
For more information about user credentials, see
U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
version_added: "2.5"
options:
user_id:
description: The OCID of the user whose API signing key needs to be created or deleted.
required: true
api_signing_key:
description: The public key. Must be an RSA key in PEM format. Required when the API signing key is
uploaded with I(state=present)
required: false
aliases: ['key']
api_key_id:
description: The API signing key's id. The Id must be of the format TENANCY_OCID/USER_OCID/KEY_FINGERPRINT.
required: false
aliases: ['id']
state:
description: The state of the api signing key that must be asserted to. When I(state=present), and the
api key doesn't exist, the api key is created with the provided C(api_signing_key).
When I(state=absent), the api signing key corresponding to the provided C(fingerprint) is deleted.
required: false
default: "present"
choices: ['present', 'absent']
author: "Sivakumar Thyagarajan (@sivakumart)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options ]
"""
EXAMPLES = """
- name: Upload a new api signing key for the specified user
oci_api_key:
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
key: "-----BEGIN PUBLIC KEY-----cmdnMIIBIjANBgkqhkiG9w0BAQEFA......mwIDAQAB-----END PUBLIC KEY-----"
- name: Delete an API signing key for the specified user
oci_api_key:
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
"id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx/ocid1.user.oc1..xxxxxEXAMPLExxxxx/08:07:a6:7d:06:b4:73:91:e9:2c:da"
state: "absent"
"""
RETURN = """
oci_api_key:
description: Details of the API signing key
returned: On success
type: dict
sample: {
"fingerprint": "08:07:a6:7d:06:b4:73:91:e9:2c:da:42:c8:cb:df:02",
"inactive_status": null,
"key_id": "ocid1.tenancy.oc1..xxxxxEXAMPLExxxxx/ocid1.user.oc1..xxxxxEXAMPLExxxxx/08:07:a6:7d:06:b4:73:91:e9:2c:da",
"key_value": "-----BEGIN PUBLIC KEY-----...urt/fN8jNz2nZwIDAQAB-----END PUBLIC KEY-----",
"lifecycle_state": "ACTIVE",
"time_created": "2018-01-08T09:33:59.705000+00:00",
"user_id": "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
import oci
from oci.identity.identity_client import IdentityClient
from oci.identity.models import CreateApiKeyDetails
from oci.util import to_dict
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
RESOURCE_NAME = "api_key"
if __name__ == "__main__":
main()
| 36.040441 | 124 | 0.65531 |
a118ceb32497416f45bc3e52e40410e78c21e051 | 836 | py | Python | python_modules/dagster/dagster/core/types/builtin_enum.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | 1 | 2019-07-15T17:34:04.000Z | 2019-07-15T17:34:04.000Z | python_modules/dagster/dagster/core/types/builtin_enum.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/types/builtin_enum.py | jake-billings/dagster | 7a1548a1f246c48189f3d8109e831b744bceb7d4 | [
"Apache-2.0"
] | null | null | null | import sys
if sys.version_info.major >= 3:
import typing
else:
from enum import Enum
| 22.594595 | 70 | 0.551435 |
a11a0df896228fb34c45a26a79b430c991c408ae | 1,173 | py | Python | sallybrowse/extensions/document/__init__.py | XiuyuanLu/browse | ee5ca57e54fe492d5b109b7cae87d1c8a45dbe25 | [
"MIT"
] | null | null | null | sallybrowse/extensions/document/__init__.py | XiuyuanLu/browse | ee5ca57e54fe492d5b109b7cae87d1c8a45dbe25 | [
"MIT"
] | null | null | null | sallybrowse/extensions/document/__init__.py | XiuyuanLu/browse | ee5ca57e54fe492d5b109b7cae87d1c8a45dbe25 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys, os, re, html
from flask import request, Response
from sallybrowse.extensions import BaseExtension
from subprocess import Popen, PIPE
| 18.919355 | 102 | 0.597613 |
a11c3d72105134f3cd78ad0e461a7ff2f92aa01d | 4,713 | py | Python | Tests/testGalaxy.py | elsiehupp/traveller_pyroute | 32a43665910894896b807576125acee56ef02797 | [
"MIT"
] | 12 | 2017-02-09T08:58:16.000Z | 2021-09-04T22:12:57.000Z | Tests/testGalaxy.py | elsiehupp/traveller_pyroute | 32a43665910894896b807576125acee56ef02797 | [
"MIT"
] | 23 | 2017-07-14T05:04:30.000Z | 2022-03-27T02:20:06.000Z | Tests/testGalaxy.py | elsiehupp/traveller_pyroute | 32a43665910894896b807576125acee56ef02797 | [
"MIT"
] | 4 | 2016-12-31T06:23:47.000Z | 2022-03-03T19:36:43.000Z | """
Created on Nov 30, 2021
@author: CyberiaResurrection
"""
import unittest
import re
import sys
sys.path.append('../PyRoute')
from Galaxy import Galaxy
from Galaxy import Sector
| 46.205882 | 136 | 0.70401 |
a11c870ae3ef5f8dd838f6f8d4edc0a12f86fa5e | 188 | py | Python | py_boot/test.py | davidcawork/Investigacion | ed25678cbab26e30370e9e2d07b84029bbad4d0b | [
"Apache-2.0"
] | null | null | null | py_boot/test.py | davidcawork/Investigacion | ed25678cbab26e30370e9e2d07b84029bbad4d0b | [
"Apache-2.0"
] | null | null | null | py_boot/test.py | davidcawork/Investigacion | ed25678cbab26e30370e9e2d07b84029bbad4d0b | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Firefox()
driver.get('https://www.google.com')
time.sleep(60)
driver.close()
| 20.888889 | 47 | 0.787234 |
a11d080c34ade0f2e6de40e4b89c652d910ddf38 | 1,240 | py | Python | tests/test_dlms_state.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | 1 | 2021-08-20T09:19:07.000Z | 2021-08-20T09:19:07.000Z | tests/test_dlms_state.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | null | null | null | tests/test_dlms_state.py | Layty/dlms-cosem | 95b67054a1dfb928e960547b0246b7b6794f0594 | [
"MIT"
] | null | null | null | import pytest
from dlms_cosem import enumerations, state
from dlms_cosem.exceptions import LocalDlmsProtocolError
from dlms_cosem.protocol import acse
from dlms_cosem.protocol.acse import UserInformation
from dlms_cosem.protocol.xdlms import Conformance, InitiateRequestApdu
| 32.631579 | 84 | 0.765323 |
a11ebc5157787a925779b80587bf0be3060a8389 | 705 | py | Python | sets-add.py | limeonion/Python-Programming | 90cbbbd7651fc04669e21be2adec02ba655868cf | [
"MIT"
] | null | null | null | sets-add.py | limeonion/Python-Programming | 90cbbbd7651fc04669e21be2adec02ba655868cf | [
"MIT"
] | null | null | null | sets-add.py | limeonion/Python-Programming | 90cbbbd7651fc04669e21be2adec02ba655868cf | [
"MIT"
] | null | null | null | '''
f we want to add a single element to an existing set, we can use the .add() operation.
It adds the element to the set and returns 'None'.
Example
>>> s = set('HackerRank')
>>> s.add('H')
>>> print s
set(['a', 'c', 'e', 'H', 'k', 'n', 'r', 'R'])
>>> print s.add('HackerRank')
None
>>> print s
set(['a', 'c', 'e', 'HackerRank', 'H', 'k', 'n', 'r', 'R'])
The first line contains an integer N, the total number of country stamps.
The next N lines contains the name of the country where the stamp is from.
Output Format
Output the total number of distinct country stamps on a single line.
'''
n = int(input())
countries = set()
for i in range(n):
countries.add(input())
print(len(countries))
| 22.741935 | 87 | 0.635461 |
a120a8bf6158dc27ba03b14f3d39ab89d4fa4e32 | 2,331 | py | Python | lesson_08/lesson_08_06.py | amindmobile/geekbrains-python-002 | 4bc2f7af755d00e73ddc48f1138830cb78e87034 | [
"MIT"
] | null | null | null | lesson_08/lesson_08_06.py | amindmobile/geekbrains-python-002 | 4bc2f7af755d00e73ddc48f1138830cb78e87034 | [
"MIT"
] | null | null | null | lesson_08/lesson_08_06.py | amindmobile/geekbrains-python-002 | 4bc2f7af755d00e73ddc48f1138830cb78e87034 | [
"MIT"
] | null | null | null | # 6. . . ,
# , , .
# : ,
# .
unit_1 = Printer('hp', 2000, 5, 10)
unit_2 = Scanner('Canon', 1200, 5, 10)
unit_3 = Copier('Xerox', 1500, 1, 15)
print(unit_1.reception())
print(unit_2.reception())
print(unit_3.reception())
print(unit_1.to_print())
print(unit_3.to_copier())
| 33.3 | 118 | 0.637066 |
a120f8eceb39d652a13f796940ef296a98d1bfaa | 1,212 | py | Python | epicteller/core/dao/credential.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | epicteller/core/dao/credential.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | epicteller/core/dao/credential.py | KawashiroNitori/epicteller | 264b11e7e6eb58beb0f67ecbbb811d268a533f7a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Optional
from epicteller.core import redis
from epicteller.core.model.credential import Credential
| 32.756757 | 106 | 0.679868 |
a121e58fcc354bb0486144293e6dc4511324fbba | 1,046 | py | Python | option.py | lotress/new-DL | adc9f6f94538088d3d70327d9c7bb089ef7e1638 | [
"MIT"
] | null | null | null | option.py | lotress/new-DL | adc9f6f94538088d3d70327d9c7bb089ef7e1638 | [
"MIT"
] | null | null | null | option.py | lotress/new-DL | adc9f6f94538088d3d70327d9c7bb089ef7e1638 | [
"MIT"
] | null | null | null | from common import *
from model import vocab
option = dict(edim=256, epochs=1.5, maxgrad=1., learningrate=1e-3, sdt_decay_step=1, batchsize=8, vocabsize=vocab, fp16=2, saveInterval=10, logInterval=.4)
option['loss'] = lambda opt, model, y, out, *_, rewards=[]: F.cross_entropy(out.transpose(-1, -2), y, reduction='none')
option['criterion'] = lambda y, out, mask, *_: (out[:,:,1:vocab].max(-1)[1] + 1).ne(y).float() * mask.float()
option['startEnv'] = lambda x, y, l, *args: (x, y, l, *args)
option['stepEnv'] = lambda i, pred, l, *args: (False, 1., None, None) # done episode, fake reward, Null next input, Null length, Null args
option['cumOut'] = False # True to keep trajectory
option['devices'] = [0] if torch.cuda.is_available() else [] # list of GPUs
option['init_method'] = 'file:///tmp/sharedfile' # initial configuration for multiple-GPU training
try:
from qhoptim.pyt import QHAdam
option['newOptimizer'] = lambda opt, params, _: QHAdam(params, lr=opt.learningrate, nus=(.7, .8), betas=(0.995, 0.999))
except ImportError: pass
| 69.733333 | 155 | 0.686424 |
a122487d9193d1e9db5e1e4904c5779cf5ab0b4a | 1,713 | py | Python | Release/cyberbot-micropython/Examples/Terminal_DA_AD.py | parallaxinc/cyberbot | f7c4d355ee0310dcfef81027802cc41ac6ce90e1 | [
"MIT"
] | 4 | 2019-03-18T20:49:41.000Z | 2022-03-24T01:44:36.000Z | Release/cyberbot-micropython/Examples/Terminal_DA_AD.py | parallaxinc/cyberbot | f7c4d355ee0310dcfef81027802cc41ac6ce90e1 | [
"MIT"
] | 5 | 2019-06-07T18:09:27.000Z | 2021-04-08T17:16:55.000Z | Release/cyberbot-micropython/Examples/Terminal_DA_AD.py | parallaxinc/cyberbot | f7c4d355ee0310dcfef81027802cc41ac6ce90e1 | [
"MIT"
] | null | null | null | # Terminal_DA_AD.py
# Circuit
# D/A0---A/D0, D/A1---A/D1,
# pot A---3.3V, potB---GND, pot wiper---A/D2
# Procedure
# Run, then open REPL and then CTRL + D
# Twist pot input while program runs to see ad2 vary
# Notes
# micro:bit ground is 0.4 V below cyber:bot board ground
# micro:bit 3.3 V = 3.245 V WRT cyber:bot board ground
# cyber:bot 3.3 V = 3.326 V WRT cyber:bot board ground
# Output example
# da0 = 0, da1 = 1024, ad0 = 13, ad1 = 623, ad2 = 7
# da0 = 64, da1 = 960, ad0 = 72, ad1 = 998, ad2 = 7
# da0 = 128, da1 = 896, ad0 = 137, ad1 = 934, ad2 = 7
# da0 = 192, da1 = 832, ad0 = 203, ad1 = 871, ad2 = 7
# da0 = 256, da1 = 768, ad0 = 266, ad1 = 805, ad2 = 87
# da0 = 320, da1 = 704, ad0 = 332, ad1 = 744, ad2 = 150
# da0 = 384, da1 = 640, ad0 = 398, ad1 = 680, ad2 = 211
# da0 = 448, da1 = 576, ad0 = 461, ad1 = 617, ad2 = 261
# da0 = 512, da1 = 512, ad0 = 526, ad1 = 554, ad2 = 308
# da0 = 576, da1 = 448, ad0 = 588, ad1 = 490, ad2 = 372
# da0 = 640, da1 = 384, ad0 = 652, ad1 = 425, ad2 = 469
# da0 = 704, da1 = 320, ad0 = 716, ad1 = 360, ad2 = 629
# da0 = 768, da1 = 256, ad0 = 779, ad1 = 295, ad2 = 806
# da0 = 832, da1 = 192, ad0 = 845, ad1 = 231, ad2 = 867
# da0 = 896, da1 = 128, ad0 = 907, ad1 = 165, ad2 = 947
# da0 = 960, da1 = 64, ad0 = 970, ad1 = 100, ad2 = 1023
from cyberbot import *
bot(22).tone(2000, 300)
while True:
for da in range(0, 1024, 64):
bot(20).write_analog(da)
bot(21).write_analog(1024 - da)
sleep(20)
ad0 = pin0.read_analog()
ad1 = pin1.read_analog()
ad2 = pin2.read_analog()
print("da0 = %d, da1 = %d, ad0 = %d, ad1 = %d, ad2 = %d" % (da, 1024 - da, ad0, ad1, ad2))
sleep(150)
print(" ")
sleep(500) | 32.320755 | 92 | 0.565674 |
a122b64cab542d8bb7f50552627ee57f6ed6232b | 4,781 | py | Python | cinebot_mini_render_server/animation_routes.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini_render_server/animation_routes.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | cinebot_mini_render_server/animation_routes.py | cheng-chi/cinebot_mini | 708a7c80d2f203dfe3b52bf84d9cbafac7673d27 | [
"MIT"
] | null | null | null | import bpy
from aiohttp import web
import numpy as np
from mathutils import Matrix, Vector
import asyncio
from cinebot_mini_render_server.blender_timer_executor import EXECUTOR
routes = web.RouteTableDef()
| 31.873333 | 113 | 0.665551 |
a124c13c10af7bc999fd4983d83bef5b21b878ff | 64 | py | Python | notebooks/_solutions/13-raster-processing32.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 58 | 2020-10-09T10:10:59.000Z | 2022-03-07T14:58:07.000Z | notebooks/_solutions/13-raster-processing32.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 24 | 2020-09-30T19:57:14.000Z | 2021-10-05T07:21:09.000Z | notebooks/_solutions/13-raster-processing32.py | jorisvandenbossche/DS-python-geospatial | 893a12edc5c203a75815f6dcb5f1e18c577c8cd5 | [
"BSD-3-Clause"
] | 19 | 2020-10-05T09:32:18.000Z | 2022-03-20T00:09:14.000Z | roads_subset = roads[roads["frc_omschrijving"].isin(road_types)] | 64 | 64 | 0.8125 |