hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7fc62c697596e38c94228733e0069508642f51ad | 198 | py | Python | app/emails/__init__.py | zollf/CITS3200 | 95fb7569dad325c057e441cd7265d3e85735c058 | [
"CC0-1.0"
] | null | null | null | app/emails/__init__.py | zollf/CITS3200 | 95fb7569dad325c057e441cd7265d3e85735c058 | [
"CC0-1.0"
] | null | null | null | app/emails/__init__.py | zollf/CITS3200 | 95fb7569dad325c057e441cd7265d3e85735c058 | [
"CC0-1.0"
] | null | null | null | from django.apps import AppConfig
default_app_config = 'app.emails.EmailAppConfig'
| 22 | 48 | 0.737374 |
7fc685dc97d5c6a0bef64129b54db775abc19da1 | 21,614 | py | Python | polyaxon_schemas/layers/core.py | gzcf/polyaxon-schemas | a381280cd7535f64158d52f0a9eff2afec997d90 | [
"MIT"
] | null | null | null | polyaxon_schemas/layers/core.py | gzcf/polyaxon-schemas | a381280cd7535f64158d52f0a9eff2afec997d90 | [
"MIT"
] | null | null | null | polyaxon_schemas/layers/core.py | gzcf/polyaxon-schemas | a381280cd7535f64158d52f0a9eff2afec997d90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields, post_dump, post_load, validate
from polyaxon_schemas.constraints import ConstraintSchema
from polyaxon_schemas.initializations import (
GlorotNormalInitializerConfig,
InitializerSchema,
ZerosInitializerConfig
)
from polyaxon_schemas.layers.base import BaseLayerConfig, BaseLayerSchema
from polyaxon_schemas.regularizations import RegularizerSchema
from polyaxon_schemas.utils import ACTIVATION_VALUES, DType, StrOrFct
# class LambdaSchema(BaseLayerSchema):
| 28.364829 | 88 | 0.659434 |
7fc71d742b9583424424ab4953dff97d093bc116 | 5,556 | py | Python | tests/unit/models/cardlesscredit/test_create_payment.py | glendaesutanto/xendit-python | f9b131882ff7d045f2e2c6518933d1594efba3e6 | [
"MIT"
] | 10 | 2020-10-31T23:34:34.000Z | 2022-03-08T19:08:55.000Z | tests/unit/models/cardlesscredit/test_create_payment.py | glendaesutanto/xendit-python | f9b131882ff7d045f2e2c6518933d1594efba3e6 | [
"MIT"
] | 22 | 2020-07-30T14:25:07.000Z | 2022-03-31T03:55:46.000Z | tests/unit/models/cardlesscredit/test_create_payment.py | glendaesutanto/xendit-python | f9b131882ff7d045f2e2c6518933d1594efba3e6 | [
"MIT"
] | 11 | 2020-07-28T08:09:40.000Z | 2022-03-18T00:14:02.000Z | import pytest
from ..model_base_test import ModelBaseTest
from tests.sampleresponse.cardless_credit import cardless_credit_payment_response
from xendit.models import CardlessCredit, CardlessCreditType
# fmt: off
def test_raise_xendit_error_on_response_error_and_global_xendit(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_global_config(mocker, mock_error_request_response, default_cardless_credit_data)
# fmt: on
| 44.095238 | 121 | 0.62203 |
7fc87ac068a828700f0e5927697f90ef933d4e60 | 293 | py | Python | docs/examples/http_proxy/constructor_argument.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 4 | 2017-11-14T17:24:12.000Z | 2020-10-30T01:46:02.000Z | docs/examples/http_proxy/constructor_argument.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 11 | 2017-01-29T08:59:21.000Z | 2018-07-02T09:17:47.000Z | docs/examples/http_proxy/constructor_argument.py | dupontz/libcloud | 419c69441ea10e7bbf37319e5e8d02e82e7e6b40 | [
"Apache-2.0"
] | 4 | 2016-04-04T08:01:48.000Z | 2018-06-06T08:04:36.000Z | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
PROXY_URL_NO_AUTH_1 = 'http://<proxy hostname 1>:<proxy port 2>'
cls = get_driver(Provider.RACKSPACE)
driver = cls('username', 'api key', region='ord',
http_proxy=PROXY_URL_NO_AUTH_1)
| 32.555556 | 64 | 0.750853 |
7fc8a85a68b8ccffabd8645da52a646787f3b6c2 | 2,576 | py | Python | cakechat/dialog_model/factory.py | jacswork/cakechat | d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e | [
"Apache-2.0"
] | 1 | 2020-03-20T18:38:47.000Z | 2020-03-20T18:38:47.000Z | cakechat/dialog_model/factory.py | jacswork/cakechat | d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e | [
"Apache-2.0"
] | 64 | 2019-07-05T06:06:43.000Z | 2021-08-02T05:22:31.000Z | cakechat/dialog_model/factory.py | Spark3757/chatbot | 4e8eae70af2d5b68564d86b7ea0dbec956ae676f | [
"Apache-2.0"
] | 1 | 2020-12-04T15:25:45.000Z | 2020-12-04T15:25:45.000Z | import os
from cakechat.config import BASE_CORPUS_NAME, S3_MODELS_BUCKET_NAME, S3_TOKENS_IDX_REMOTE_DIR, \
S3_NN_MODEL_REMOTE_DIR, S3_CONDITIONS_IDX_REMOTE_DIR
from cakechat.dialog_model.model import get_nn_model
from cakechat.utils.s3 import S3FileResolver
from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path
| 45.192982 | 119 | 0.733696 |
7fc9f53a7aff684d5bb0d1b56fcc2703e86c8f57 | 532 | py | Python | WhileLoop/GraduationPt.2.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | 1 | 2020-09-22T13:25:34.000Z | 2020-09-22T13:25:34.000Z | WhileLoop/GraduationPt.2.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | null | null | null | WhileLoop/GraduationPt.2.py | Rohitm619/Softuni-Python-Basic | 03c9d0b44f5652c99db3b0e42014dd5af50205a2 | [
"MIT"
] | 1 | 2020-10-17T09:27:46.000Z | 2020-10-17T09:27:46.000Z | name = input()
class_school = 1
sum_of_grades = 0
ejected = False
failed = 0
while True:
grade = float(input())
if grade >= 4.00:
sum_of_grades += grade
if class_school == 12:
break
class_school += 1
else:
failed += 1
if failed == 2:
ejected = True
break
if ejected:
print(f"{name} has been excluded at {class_school} grade")
else:
average = sum_of_grades / class_school
print(f"{name} graduated. Average grade: {average:.2f}")
| 19.703704 | 62 | 0.575188 |
7fc9fa1da3516cccfb91e93a1b16adc0a561f07f | 8,990 | py | Python | NAO/train_cifar.py | yaogood/NAS-tensorflow2 | a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057 | [
"BSD-3-Clause"
] | null | null | null | NAO/train_cifar.py | yaogood/NAS-tensorflow2 | a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057 | [
"BSD-3-Clause"
] | null | null | null | NAO/train_cifar.py | yaogood/NAS-tensorflow2 | a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import glob
import time
import copy
import random
import numpy as np
import utils
import logging
import argparse
import tensorflow as tf
import tensorflow.keras as keras
from model import NASNetworkCIFAR
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# Basic model parameters.
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'test'])
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10, cifar100'])
parser.add_argument('--model_dir', type=str, default='models')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--eval_batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=600)
parser.add_argument('--cells', type=int, default=6)
parser.add_argument('--nodes', type=int, default=5)
parser.add_argument('--channels', type=int, default=36)
parser.add_argument('--cutout_size', type=int, default=8)
parser.add_argument('--grad_bound', type=float, default=10.0)
parser.add_argument('--initial_lr', type=float, default=0.025)
parser.add_argument('--keep_prob', type=float, default=0.6)
parser.add_argument('--drop_path_keep_prob', type=float, default=0.8)
parser.add_argument('--l2_reg', type=float, default=3e-4)
parser.add_argument('--arch', type=str, default=None)
parser.add_argument('--use_aux_head', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=9)
parser.add_argument('--train_from_scratch', type=bool, default=False)
args = parser.parse_args()
utils.create_exp_dir(args.model_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
if __name__ == '__main__':
import time
start_time = time.time()
train_cifar10()
print("--- %s seconds ---" % (time.time() - start_time))
| 44.068627 | 120 | 0.628031 |
7fca526c31f2627682c2720c9612c105d831e507 | 1,585 | py | Python | examples/regression.py | Spotflock/studio-sdk-python | 4831819d2a69755777ff773091afc4330f8a91f6 | [
"MIT"
] | 8 | 2019-03-25T17:21:27.000Z | 2019-03-26T10:34:30.000Z | examples/regression.py | Spotflock/studio-sdk-python | 4831819d2a69755777ff773091afc4330f8a91f6 | [
"MIT"
] | null | null | null | examples/regression.py | Spotflock/studio-sdk-python | 4831819d2a69755777ff773091afc4330f8a91f6 | [
"MIT"
] | null | null | null | import studio
if __name__ == '__main__':
main()
| 40.641026 | 93 | 0.692744 |
7fcb384cb9988d683d28c2f7b5a6810c88a449fa | 2,763 | py | Python | VAE/models/vae_mnist.py | Aroksak/generative-dl | 66b71860266d15736b66b0b17fff37c7e881b142 | [
"MIT"
] | null | null | null | VAE/models/vae_mnist.py | Aroksak/generative-dl | 66b71860266d15736b66b0b17fff37c7e881b142 | [
"MIT"
] | null | null | null | VAE/models/vae_mnist.py | Aroksak/generative-dl | 66b71860266d15736b66b0b17fff37c7e881b142 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
| 32.127907 | 106 | 0.627579 |
7fcd0efe44d52a8f5eb0ccaff5033e799faefab2 | 503 | py | Python | json-read.py | ccoffrin/py-json-examples | c01bf6994e4480470939621ed0b4b7043b38819f | [
"MIT"
] | null | null | null | json-read.py | ccoffrin/py-json-examples | c01bf6994e4480470939621ed0b4b7043b38819f | [
"MIT"
] | null | null | null | json-read.py | ccoffrin/py-json-examples | c01bf6994e4480470939621ed0b4b7043b38819f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
data_json = {}
with open('data/json_00.json', 'r') as file:
data_json = json.load(file)
print(data_json)
print(data_json[0])
print(data_json[1])
print(data_json[2])
print(data_json[3])
print(data_json[4])
print(data_json[5])
print(data_json[6])
print(data_json[5][0])
print(data_json[5][1])
print(data_json[5][2])
print(data_json[5][3])
print(data_json[6])
print(data_json[6]["A"])
print(data_json[6]["B"])
print(data_json[6]["C"])
print(data_json[6]["D"])
| 16.766667 | 44 | 0.691849 |
7fce7a6d8d2ce871e7042ada46c6923907411052 | 257 | py | Python | api_python/app/models/classes_basicas/Empregado.py | uninassau-2020-2/proj-grupo5 | ea7ca233004860a432f7301c72bde03fccce5f92 | [
"CC0-1.0"
] | null | null | null | api_python/app/models/classes_basicas/Empregado.py | uninassau-2020-2/proj-grupo5 | ea7ca233004860a432f7301c72bde03fccce5f92 | [
"CC0-1.0"
] | null | null | null | api_python/app/models/classes_basicas/Empregado.py | uninassau-2020-2/proj-grupo5 | ea7ca233004860a432f7301c72bde03fccce5f92 | [
"CC0-1.0"
] | null | null | null | from app.models.classes_basicas.Pessoa import Pessoa | 23.363636 | 52 | 0.723735 |
7fcf8c04bfee9a81a78aefffecb7fb16cd7ee1e5 | 19,028 | py | Python | suiko/createDiff.py | nakamura196/tei | 7aa62bc0603bbff03f96a3dbaad82d8feb6126ba | [
"Apache-2.0"
] | null | null | null | suiko/createDiff.py | nakamura196/tei | 7aa62bc0603bbff03f96a3dbaad82d8feb6126ba | [
"Apache-2.0"
] | null | null | null | suiko/createDiff.py | nakamura196/tei | 7aa62bc0603bbff03f96a3dbaad82d8feb6126ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import difflib
import xml.etree.ElementTree as ET
tmp_path = "data/template.xml"
prefix = ".//{http://www.tei-c.org/ns/1.0}"
xml = ".//{http://www.w3.org/XML/1998/namespace}"
tree = ET.parse(tmp_path)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
root = tree.getroot()
body = root.find(prefix + "body")
p = ET.Element("{http://www.tei-c.org/ns/1.0}p")
body.append(p)
a = ""
b = ""
id_a = "A006267-002"
id_b = "A006371-002"
title_a = " "
title_b = " "
sourceDesc = root.find(prefix + "sourceDesc")
listWit = ET.Element("{http://www.tei-c.org/ns/1.0}listWit")
sourceDesc.append(listWit)
witness = ET.Element("{http://www.tei-c.org/ns/1.0}witness")
listWit.append(witness)
witness.set("xml:id", id_a)
witness.text = title_a
witness = ET.Element("{http://www.tei-c.org/ns/1.0}witness")
listWit.append(witness)
witness.set("xml:id", id_b)
witness.text = title_b
teiHeader = root.find(prefix + "teiHeader")
encodingDesc = ET.Element("{http://www.tei-c.org/ns/1.0}encodingDesc")
teiHeader.append(encodingDesc)
variantEncoding = ET.Element("{http://www.tei-c.org/ns/1.0}variantEncoding")
encodingDesc.append(variantEncoding)
variantEncoding.set("method", "parallel-segmentation")
variantEncoding.set("location", "internal")
s = difflib.SequenceMatcher(None, a, b)
old_ele = p
for tag, i1, i2, j1, j2 in s.get_opcodes():
if tag == "delete":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_a)
rdg.text = a[i1:i2]
old_ele = app
elif tag == "insert":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_b)
rdg.text = b[j1:j2]
old_ele = app
elif tag == "replace":
app = ET.Element("{http://www.tei-c.org/ns/1.0}app")
p.append(app)
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_a)
rdg.text = a[i1:i2]
rdg = ET.Element("{http://www.tei-c.org/ns/1.0}rdg")
app.append(rdg)
rdg.set("wit", "#"+id_b)
rdg.text = b[j1:j2]
old_ele = app
elif tag == "equal":
old_ele.tail = a[i1:i2]
else:
print(tag)
tree.write("data/diff.xml", encoding="utf-8") | 184.737864 | 11,603 | 0.947393 |
7fd026487b4ed720e388b3ddeb8812e59526c4f0 | 6,342 | py | Python | tests/python/pants_test/pants_run_integration_test.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/pants_run_integration_test.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/pants_run_integration_test.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
import unittest
from collections import namedtuple
from operator import eq, ne
from pants.base.build_environment import get_buildroot
from pants.fs.archive import ZIP
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
PantsResult = namedtuple('PantsResult', ['command', 'returncode', 'stdout_data', 'stderr_data'])
| 41.45098 | 99 | 0.677231 |
7fd3371311cc6675c8548300ec8d2acf6af4b1ea | 2,036 | py | Python | ireporterApp/migrations/0001_initial.py | George-Okumu/IReporter-Django | 5962984ce0069cdf048dbf91686377568a7cf55b | [
"MIT"
] | null | null | null | ireporterApp/migrations/0001_initial.py | George-Okumu/IReporter-Django | 5962984ce0069cdf048dbf91686377568a7cf55b | [
"MIT"
] | 1 | 2021-10-06T20:15:11.000Z | 2021-10-06T20:15:11.000Z | ireporterApp/migrations/0001_initial.py | George-Okumu/IReporter-Django | 5962984ce0069cdf048dbf91686377568a7cf55b | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-13 16:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import ireporterApp.models
| 45.244444 | 147 | 0.623281 |
7fd3a16afbbba984f178b48eb62fc0be86afc9a5 | 1,090 | py | Python | ethronsoft/gcspypi/parsers/list.py | JuergenSimon/gcspypi | 80ac843e6702161915db45949c470d749aaabfda | [
"BSD-2-Clause"
] | null | null | null | ethronsoft/gcspypi/parsers/list.py | JuergenSimon/gcspypi | 80ac843e6702161915db45949c470d749aaabfda | [
"BSD-2-Clause"
] | null | null | null | ethronsoft/gcspypi/parsers/list.py | JuergenSimon/gcspypi | 80ac843e6702161915db45949c470d749aaabfda | [
"BSD-2-Clause"
] | null | null | null | from ethronsoft.gcspypi.package.package_manager import PackageManager
from ethronsoft.gcspypi.utilities.console import Console
from ethronsoft.gcspypi.parsers.commons import init_repository | 49.545455 | 108 | 0.665138 |
7fd55e4cd2783cbb99a566e8a1ee6ac0b5a0d931 | 18,880 | py | Python | library/oci_dhcp_options.py | slmjy/oci-ansible-modules | 4713699064f4244b4554b5b2f97b5e5443fa2d6e | [
"Apache-2.0"
] | 106 | 2018-06-29T16:38:56.000Z | 2022-02-16T16:38:56.000Z | library/oci_dhcp_options.py | slmjy/oci-ansible-modules | 4713699064f4244b4554b5b2f97b5e5443fa2d6e | [
"Apache-2.0"
] | 122 | 2018-09-11T12:49:39.000Z | 2021-05-01T04:54:22.000Z | library/oci_dhcp_options.py | slmjy/oci-ansible-modules | 4713699064f4244b4554b5b2f97b5e5443fa2d6e | [
"Apache-2.0"
] | 78 | 2018-07-04T05:48:54.000Z | 2022-03-09T06:33:12.000Z | #!/usr/bin/python
# Copyright (c) 2017, 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_dhcp_options
short_description: Create,update and delete OCI Dhcp Options
description:
- Creates OCI Dhcp Options
- Update OCI Dhcp Options, if present, with a new display name
- Update OCI Dhcp Options, if present, by appending new options to existing options
- Update OCI Dhcp Options, if present, by purging existing options and replacing them with
specified ones
- Delete OCI Dhcp Options, if present.
version_added: "2.5"
options:
compartment_id:
description: Identifier of the compartment under which this
Dhcp Options would be created. Mandatory for create
operation.Optional for delete and update. Mutually exclusive
with dhcp_id.
required: false
vcn_id:
description: Identifier of the Virtual Cloud Network to which the
Dhcp Options should be attached. Mandatory for create
operation. Optional for delete and update. Mutually exclusive
with dhcp_id.
required: false
dhcp_id:
description: Identifier of the Dhcp Options. Mandatory for delete and update.
required: false
aliases: ['id']
display_name:
description: Name of the Dhcp Options. A user friendly name. Does not have to be unique,
and could be changed. If not specified, a default name would be provided.
required: false
aliases: ['name']
options:
description: A set of DHCP options. Mandatory for create and update.
required: false
suboptions:
type:
description: The specific DHCP option.
required: true
choices: ['DomainNameServer', 'SearchDomain']
server_type:
description: Applicable only for the I(type='DomainNameServer').Describes the
type of the server.
required: true
choices: ['VcnLocalPlusInternet', 'CustomDnsServer']
custom_dns_servers:
description: Applicable only for the I(type='DomainNameServer') and I(server_type='CustomDnsServer').
Maximum three DNS server ips are allowed as part of this option.
required: false
search_domain_names:
description: Applicable only for the I(type='SearchDomain').A single search domain name
according to RFC 952 and RFC 1123. Do not include this option with an empty
list of search domain names, or with an empty string as the value for any search
domain name.
required: true
purge_dhcp_options:
description: Purge existing Dhcp Options which are not present in the provided
Dhcp Options. If I(purge_dhcp_options=no), provided options would be
appended to existing options. I(purge_dhcp_options) and I(delete_dhcp_options)
are mutually exclusive.
required: false
default: 'yes'
type: bool
delete_dhcp_options:
description: Delete existing Dhcp Options which are present in the Dhcp Options provided by
I(options). If I(delete_dhcp_options=yes), options provided by I(options) would be
deleted from existing options, if they are part of existing dhcp options.
If they are not part of existing dhcp options, they will be ignored.
I(delete_dhcp_options) and I(purge_dhcp_options) are mutually exclusive.
required: false
default: 'no'
type: bool
state:
description: Create,update or delete Dhcp Options. For I(state=present), if it
does not exist, it gets created. If it exists, it gets updated.
required: false
default: 'present'
choices: ['present','absent']
author:
- "Debayan Gupta(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options, oracle_tags ]
"""
EXAMPLES = """
#Note: These examples do not set authentication details.
#Create/update Dhcp Options
- name: Create Dhcp options
oci_dhcp_options:
compartment_id: 'ocid1.compartment..xdsc'
name: 'ansible_dhcp_options'
vcn_id: 'ocid1.vcn..aaaa'
options:
- type: 'DomainNameServer'
server_type: 'VcnLocalPlusInternet'
custom_dns_servers: []
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
freeform_tags:
region: 'east'
defined_tags:
features:
capacity: 'medium'
state: 'present'
# Update Dhcp Options by appending new options
- name: Update Dhcp Options by appending new options
oci_dhcp_options:
id: 'ocid1.dhcpoptions.oc1.aaa'
purge_dhcp_options: 'no'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8']
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
state: 'present'
# Update Dhcp Options by purging existing options
- name: Update Dhcp Options by purging existing options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions.oc1.aaa'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8', '10.0.0.10', '10.0.0.12']
- type: 'SearchDomain'
search_domain_names: ['ansibletestvcn.oraclevcn.com']
state: 'present'
# Update Dhcp Options by deleting existing options
- name: Update Dhcp Options by deleting existing options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions.oc1.aaa'
options:
- type: 'DomainNameServer'
server_type: 'CustomDnsServer'
custom_dns_servers: ['10.0.0.8', '10.0.0.10', '10.0.0.12']
delete_dhcp_options: 'yes'
state: 'present'
#Delete Dhcp Options
- name: Delete Dhcp Options
oci_dhcp_options:
dhcp_id: 'ocid1.dhcpoptions..xdsc'
state: 'absent'
"""
RETURN = """
dhcp_options:
description: Attributes of the created/updated Dhcp Options.
For delete, deleted Dhcp Options description will
be returned.
returned: success
type: complex
contains:
compartment_id:
description: The identifier of the compartment containing the Dhcp Options
returned: always
type: string
sample: ocid1.compartment.oc1.xzvf..oifds
display_name:
description: Name assigned to the Dhcp Options during creation
returned: always
type: string
sample: ansible_dhcp_options
id:
description: Identifier of the Dhcp Options
returned: always
type: string
sample: ocid1.dhcpoptions.oc1.axdf
vcn_id:
description: Identifier of the Virtual Cloud Network to which the
Dhcp Options is attached.
returned: always
type: string
sample: ocid1.vcn..ixcd
lifecycle_state:
description: The current state of the Dhcp Options
returned: always
type: string
sample: AVAILABLE
options:
description: A list of dhcp options.
returned: always
type: list
sample: [{"custom_dns_servers": [],"server_type": "CustomDnsServer","type": "DomainNameServer"},
{"search_domain_names": ["myansiblevcn.oraclevcn.com"],"type": "SearchDomain"}]
time_created:
description: Date and time when the Dhcp Options was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id":"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"freeform_tags":{"region":"east"},
"defined_tags":{"features":{"capacity":"medium"}},
"display_name":"ansible_dhcp_options",
"id":"ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
"lifecycle_state":"AVAILABLE",
"options":[
{
"custom_dns_servers":[],
"server_type":"VcnLocalPlusInternet",
"type":"DomainNameServer"
},
{
"search_domain_names":["ansibletestvcn.oraclevcn.com"],
"type":"SearchDomain"
},
{
"custom_dns_servers":["10.0.0.8"],
"server_type":"CustomDnsServer",
"type":"DomainNameServer"
}
],
"time_created":"2017-11-26T16:41:06.996000+00:00",
"vcn_id":"ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.core import VirtualNetworkClient
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded, ClientError
from oci.util import to_dict
from oci.core.models import (
CreateDhcpDetails,
DhcpDnsOption,
UpdateDhcpDetails,
DhcpSearchDomainOption,
)
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
if __name__ == "__main__":
main()
| 39.251559 | 117 | 0.610911 |
7fd5f598f64deef95bef0002d5fa94fb5341e2f5 | 4,066 | py | Python | canistream.py | otakucode/canistream | 42682b05eeaf98d6bd13125508c871a5cc5cb885 | [
"MIT"
] | null | null | null | canistream.py | otakucode/canistream | 42682b05eeaf98d6bd13125508c871a5cc5cb885 | [
"MIT"
] | null | null | null | canistream.py | otakucode/canistream | 42682b05eeaf98d6bd13125508c871a5cc5cb885 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
__author__ = 'otakucode'
import argparse
import sys
from urllib import parse
from bs4 import BeautifulSoup
import requests
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Search www.canistream.it for movie availability.')
parser.add_argument('movie', metavar='Title', type=str, help='title to search for')
#parser.add_argument('--tv', help='search for TV show instead of movie')
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--no-streaming', action='append_const', const='streaming', dest='omits', help='do not search for streaming availability')
parser.add_argument('--no-rental', action='append_const', const='rental', dest='omits', help='do not search for rental availability')
parser.add_argument('--no-purchase', action='append_const', const='purchase', dest='omits', help='do not search for purchase availability')
parser.add_argument('--no-xfinity', action='append_const', const='xfinity', dest='omits', help='do not search for xfinity availability')
args = parser.parse_args()
print("Searching...", end='')
sys.stdout.flush()
movie = get_title('movie', args.movie)
if movie is None:
print("\rNo titles matching '{0}' found.".format(args.movie))
sys.exit()
(movie_id, proper_title) = movie
results = get_availability(movie_id, args.verbose, args.omits)
if len(results) == 0:
print('\r"{0}" is not currently available.'.format(proper_title))
else:
print('\r"{0}" is available via: '.format(proper_title), end='')
print(results)
| 37.302752 | 146 | 0.599361 |
7fd64d5d9687aeafd41778f375063551f567e46f | 67 | py | Python | homeassistant/components/hardware/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/hardware/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/hardware/const.py | liangleslie/core | cc807b4d597daaaadc92df4a93c6e30da4f570c6 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Constants for the Hardware integration."""
DOMAIN = "hardware"
| 16.75 | 45 | 0.716418 |
7fd65ccdc6806c6f41c85d9bb13f95232c26ec00 | 2,646 | py | Python | packages/_debug_app/app.py | shellyln/red-agate | 71847872caded631b4783f3baaf5a3e2a0b495a0 | [
"0BSD"
] | 14 | 2017-12-03T15:57:17.000Z | 2021-07-11T12:57:24.000Z | packages/_debug_app/app.py | shellyln/red-agate | 71847872caded631b4783f3baaf5a3e2a0b495a0 | [
"0BSD"
] | 10 | 2020-02-25T08:20:38.000Z | 2020-09-03T22:00:18.000Z | packages/_debug_app/app.py | shellyln/red-agate | 71847872caded631b4783f3baaf5a3e2a0b495a0 | [
"0BSD"
] | 4 | 2018-03-30T16:09:44.000Z | 2022-01-03T19:26:16.000Z | #!/usr/bin/env python3
import json
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../red-agate/')
# pylint: disable=import-error, wrong-import-position
from redagate_lambda import call, LambdaInternalErrorException
# pylint: enable=import-error, wrong-import-position
if __name__ == '__main__':
from flask import Flask, abort, Response
app = Flask(__name__)
port = int(os.environ['PORT']) if os.environ.get('PORT') is not None else None
# To debug with VSCode, set debug=True, use_debugger=False, use_reloader=False.
# debug - whether to enable debug mode and catch exceptions.
# use_debugger - whether to use the internal Flask debugger.
# use_reloader - whether to reload and fork the process on exception.
app.run(debug=True, use_debugger=False, use_reloader=False, port=port)
| 37.8 | 116 | 0.620937 |
7fd69c3d5f382287835cb80d361531b2ea2f11db | 1,290 | py | Python | cmsplugin_cascade/migrations/0009_cascadepage.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 139 | 2015-01-08T22:27:06.000Z | 2021-08-19T03:36:58.000Z | cmsplugin_cascade/migrations/0009_cascadepage.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 286 | 2015-01-02T14:15:14.000Z | 2022-03-22T11:00:12.000Z | cmsplugin_cascade/migrations/0009_cascadepage.py | teklager/djangocms-cascade | adc461f7054c6c0f88bc732aefd03b157df2f514 | [
"MIT"
] | 91 | 2015-01-16T15:06:23.000Z | 2022-03-23T23:36:54.000Z | from django.db import migrations, models
import django.db.models.deletion
| 44.482759 | 199 | 0.628682 |
7fd7f3a7ab836b1162a754535f994bd325636a89 | 1,679 | py | Python | es_import_poet.py | ly3too/chinese-poetry | 47362e5b7bf3976c986765eb8eb9b82e771e0771 | [
"MIT"
] | null | null | null | es_import_poet.py | ly3too/chinese-poetry | 47362e5b7bf3976c986765eb8eb9b82e771e0771 | [
"MIT"
] | null | null | null | es_import_poet.py | ly3too/chinese-poetry | 47362e5b7bf3976c986765eb8eb9b82e771e0771 | [
"MIT"
] | 1 | 2020-11-27T06:49:33.000Z | 2020-11-27T06:49:33.000Z | from elasticsearch_dsl import *
import os
from glob import glob
import json
import re
from . import to_zh_cn
def do_es_import():
"""
import data from current dir
"""
Poet.init()
Author.init()
patt = re.compile(r'^[a-zA-Z]+\.([a-zA-Z]+)\.')
cur_dir = os.path.dirname((os.path.abspath(__file__)))
data_files = glob("{}/json/poet.*.json".format(cur_dir))
for file in data_files:
with open(file, 'r') as f:
data = json.load(f)
dynasty = patt.findall(os.path.basename(file))[0]
for item in data:
item["dynasty"] = dynasty
one = Poet(**to_zh_cn(item))
one.save()
data_files = glob("{}/json/authors.*.json".format(cur_dir))
for file in data_files:
with open(file, 'r') as f:
data = json.load(f)
dynasty = patt.findall(os.path.basename(file))[0]
for item in data:
item["dynasty"] = dynasty
one = Author(**to_zh_cn(item))
one.save()
| 27.080645 | 77 | 0.561048 |
7fd89c7d4555eeef4b73bc37f963bc2cf833445b | 5,785 | py | Python | pyenvgui/gui/components/_version_management.py | ulacioh/pyenv-gui | 03c3b102d78b474f103f7e828533a684f3e87ff6 | [
"BSD-3-Clause"
] | 2 | 2020-05-18T04:37:37.000Z | 2020-06-01T03:33:48.000Z | pyenvgui/gui/components/_version_management.py | ulacioh/pyenv-manager | 03c3b102d78b474f103f7e828533a684f3e87ff6 | [
"BSD-3-Clause"
] | null | null | null | pyenvgui/gui/components/_version_management.py | ulacioh/pyenv-manager | 03c3b102d78b474f103f7e828533a684f3e87ff6 | [
"BSD-3-Clause"
] | null | null | null | from threading import Thread
import tkinter as tk
from tkinter import ttk, messagebox
from . import pyenv
from ._custom_widgets import Treeview
| 33.247126 | 94 | 0.498876 |
7fda7ecbf9da0226a54341ecb40e210f62c31957 | 1,951 | py | Python | proj/python/Test/dictStock.py | jumib/BlackTensor | d66a4fb5289dbe86104900072284e4a881f55645 | [
"MIT"
] | null | null | null | proj/python/Test/dictStock.py | jumib/BlackTensor | d66a4fb5289dbe86104900072284e4a881f55645 | [
"MIT"
] | null | null | null | proj/python/Test/dictStock.py | jumib/BlackTensor | d66a4fb5289dbe86104900072284e4a881f55645 | [
"MIT"
] | null | null | null | import requests
# host = 'localhost:8080'
# path = '/member/changeAppId'
# payload = {'UserId' : userId }
# r = requests.get('localhost:8080/member/changeAppId', params=payload)
# import requests
# import json
#
# # GET
# res = requests.get('http://localhost:8080/member/changeAppId')
# print(str(res.status_code) + " | " + res.text)
#
# # POST (JSON)
# headers = {'Content-Type': 'application/json; chearset=utf-8'}
# payload = {'UserId' : 'userId' }
# res = requests.post('http://localhost:8080/member/changeAppId', payload=json.dumps(payload), headers=headers)
# print(str(res.status_code) + " | " + res.text)
#
# class DictStock:
# @app.route('/history/buy')
# def PythonServerResponse(self, itemName, m_date, openPrice, highPrice, lowPrice, currentPrice, volumn, tradingValue):
# print("It's operate")
# # self.myViewController = vc.ViewController()
# json_object = {
# "name": itemName,
# "": m_date,
# "": openPrice,
# "": highPrice,
# "": lowPrice,
# "": currentPrice,
# "": volumn,
# "": tradingValue
# }
# json_string = json.dumps(json_object)
# print(json_string)
# # return jsonify(json_object)
#
# app.run()
# # # data = {
# # # # 'itemName' : itemName,
# # # 'date' : m_date,
# # # 'openPrice' : openPrice
# # # }
# # # json_data = json.dumps(data)
# # # print(json_data)
# #
# #
# # # import json
# # #
# # # json_object = {
# # # "id": 1,
# # # "username": "Bret",
# # # "email": "Sincere@april.biz",
# # # "address": {
# # # "street": "Kulas Light",
# # # "suite": "Apt. 556",
# # # "city": "Gwenborough",
# # # "zipcode": "92998-3874"
# # # },
# # # "admin": False,
# # # "hobbies": None
# # # }
# # #
# # # json_string = json.dumps(json_object)
# # # print(json_string)
| 26.364865 | 123 | 0.538186 |
7fdaa5ddf18fe9267f5687f8511f00b862b0cdb0 | 1,549 | py | Python | screen_scan.py | vjeranc/puzpirobot | 3d6d0014cbd3092add56295aa463e3b31b750733 | [
"MIT"
] | null | null | null | screen_scan.py | vjeranc/puzpirobot | 3d6d0014cbd3092add56295aa463e3b31b750733 | [
"MIT"
] | null | null | null | screen_scan.py | vjeranc/puzpirobot | 3d6d0014cbd3092add56295aa463e3b31b750733 | [
"MIT"
] | null | null | null | from PIL import ImageGrab, Image
import cv2 as cv
import numpy as np
import match.template_matching as tm
import match.bilging as b
from mss import mss
paths = [("A", './images/whiteblue_square.png', True, 0.9),
("B", './images/greenblue_diamond.png', True, 0.9),
("C", './images/lightblue_circle.png', True, 0.9),
("D", './images/lightyellow_circle.png', True, 0.9),
("E", './images/darkblue_square.png', True, 0.9),
("F", './images/lightblue_square.png', True, 0.9),
("G", './images/lightblue_diamond.png', True, 0.9),
("X", './images/puffer.png', False, 0.5),
("Y", './images/crab.png', False, 0.5),
("Z", './images/jellyfish.png', False, 0.5)]
patterns = [tm.build_pattern(p, n, shape=(45, 45), circle_mask=c, threshold=t)
for n, p, c, t in paths]
b.track_board_state(ScreenshotGrabber(), patterns)
| 32.270833 | 140 | 0.615881 |
7fdaeb9d10001a9b68a81dc49605856be1d46461 | 1,917 | py | Python | simulacoes/queue-sim/src/eventoSaidaFilaZero.py | paulosell/ADS29009 | a85bc0fe19993e3e6624c2605a362605b67c2311 | [
"MIT"
] | null | null | null | simulacoes/queue-sim/src/eventoSaidaFilaZero.py | paulosell/ADS29009 | a85bc0fe19993e3e6624c2605a362605b67c2311 | [
"MIT"
] | null | null | null | simulacoes/queue-sim/src/eventoSaidaFilaZero.py | paulosell/ADS29009 | a85bc0fe19993e3e6624c2605a362605b67c2311 | [
"MIT"
] | null | null | null | from src.event import Event
from src.rng import prng
from src.eventoChegadaFilaUm import EventoChegadaFilaUm
from src.eventoChegadaFilaDois import EventoChegadaFilaDois
"""
for ids in simulador.fila_tempos_zero:
if ids[0] == self.id:
dif = self.time - ids[1]
print(dif)
simulador.fila_soma.append(dif) """
| 38.34 | 111 | 0.586333 |
7fdbabe114f7c62834574d41bba0f9f62e53ca0f | 242 | py | Python | examples/camera/simple.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | examples/camera/simple.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | examples/camera/simple.py | Hikki12/remio | 17942bffe3c0619d3435b1a12399b116d4c800e3 | [
"Apache-2.0"
] | null | null | null | """Single simple camera example."""
import time
from remio import Camera
# Initialize Single Camera device
camera = Camera(name="webcam", src=0, size=[400, 400])
camera.start()
while True:
print("Doing some tasks...")
time.sleep(2) | 20.166667 | 54 | 0.698347 |
7fdecf5212432030558339550547b97267095dde | 1,481 | py | Python | write_cluster_wrappers.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | null | null | null | write_cluster_wrappers.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 7 | 2017-08-29T16:20:04.000Z | 2018-06-12T16:58:36.000Z | write_cluster_wrappers.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 1 | 2018-04-03T20:56:40.000Z | 2018-04-03T20:56:40.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
import stat
import comptools as comp
here = os.path.abspath(os.path.dirname(__file__))
wrapper_path = os.path.join(here, 'wrapper.sh')
wrapper_virtualenv_path = os.path.join(here, 'wrapper_virtualenv.sh')
wrapper = """#!/bin/bash -e
eval `/cvmfs/icecube.opensciencegrid.org/py2-v3/setup.sh`
{icecube_env_script} \\
{wrapper_virtualenv_path} \\
python $*
"""
virtualenv_wrapper = """#!/bin/sh
source {virtualenv_activate}
$@
"""
icecube_env_script = os.path.join(comp.paths.metaproject,
'build',
'env-shell.sh')
virtualenv_activate = os.path.join(comp.paths.virtualenv_dir,
'bin',
'activate')
print('Writing wrapper script {}...'.format(wrapper_path))
with open(wrapper_path, 'w') as f:
lines = wrapper.format(icecube_env_script=icecube_env_script,
wrapper_virtualenv_path=wrapper_virtualenv_path)
f.write(lines)
print('Writing wrapper script {}...'.format(wrapper_virtualenv_path))
with open(wrapper_virtualenv_path, 'w') as f:
lines = virtualenv_wrapper.format(virtualenv_activate=virtualenv_activate)
f.write(lines)
make_executable(wrapper_path)
make_executable(wrapper_virtualenv_path)
| 27.425926 | 78 | 0.673194 |
7fdf5c3be33fa3dac4e441a667a56bd88641def7 | 3,899 | py | Python | labml_nn/gan/dcgan/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | 1 | 2021-09-17T18:16:17.000Z | 2021-09-17T18:16:17.000Z | labml_nn/gan/dcgan/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | null | null | null | labml_nn/gan/dcgan/__init__.py | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e | [
"MIT"
] | null | null | null | """
---
title: Deep Convolutional Generative Adversarial Networks (DCGAN)
summary: A simple PyTorch implementation/tutorial of Deep Convolutional Generative Adversarial Networks (DCGAN).
---
# Deep Convolutional Generative Adversarial Networks (DCGAN)
This is a [PyTorch](https://pytorch.org) implementation of paper
[Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](https://papers.labml.ai/paper/1511.06434).
This implementation is based on the [PyTorch DCGAN Tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html).
"""
import torch.nn as nn
from labml import experiment
from labml.configs import calculate
from labml_helpers.module import Module
from labml_nn.gan.original.experiment import Configs
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# We import the [simple gan experiment](../original/experiment.html) and change the
# generator and discriminator networks
calculate(Configs.generator, 'cnn', lambda c: Generator().to(c.device))
calculate(Configs.discriminator, 'cnn', lambda c: Discriminator().to(c.device))
if __name__ == '__main__':
main()
| 32.22314 | 137 | 0.606566 |
7fdfdb7709a1fc9542844590029ad949b49fd04c | 2,846 | py | Python | src/flexible_models/flexible_GPT2.py | AlexDuvalinho/AITextGenerator | 02b96e40612209b5f7599674f9cd0e867f74fc59 | [
"MIT"
] | 36 | 2021-01-13T03:33:49.000Z | 2022-03-31T00:37:16.000Z | src/flexible_models/flexible_GPT2.py | bytjn1416124/AITextGenerator | 01ff72ebf373018d0d708cdd2018229fd386f73b | [
"MIT"
] | 5 | 2021-03-08T15:51:30.000Z | 2021-08-16T11:56:56.000Z | src/flexible_models/flexible_GPT2.py | bytjn1416124/AITextGenerator | 01ff72ebf373018d0d708cdd2018229fd386f73b | [
"MIT"
] | 16 | 2021-02-20T05:04:47.000Z | 2022-03-22T01:56:27.000Z | from .flexible_model import FlexibleModel
from src.utils import GPT2_BLOCK_SIZE
import torch
from src.flexible_models.GPT2_lm_segment_model import GPT2LMSegmentModel
| 35.575 | 112 | 0.757554 |
7fe0f727107b9ce99344df8215be2fd9b8d15fef | 2,091 | py | Python | measurements-plot/udp-plots.py | HaoruiPeng/latencymeasurement | 82a9c5300a7cedd72885780f542982bf76ae49b2 | [
"MIT"
] | null | null | null | measurements-plot/udp-plots.py | HaoruiPeng/latencymeasurement | 82a9c5300a7cedd72885780f542982bf76ae49b2 | [
"MIT"
] | null | null | null | measurements-plot/udp-plots.py | HaoruiPeng/latencymeasurement | 82a9c5300a7cedd72885780f542982bf76ae49b2 | [
"MIT"
] | null | null | null | import os
import sys
import pandas as pd
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
DATADIR = "../data"
cluster_name = ["bbcluster", "erdc"]
figure, axes = plt.subplots(1, 2)
figure.suptitle("UDP")
PING = dict.fromkeys(cluster_name, {})
for i in range(2):
cluster = cluster_name[i]
cluster_dir = os.path.join(DATADIR, cluster)
axes[i].set_title(cluster)
data = []
labels = []
for root, dirs, files in os.walk(cluster_dir, topdown=False):
for file_name in files:
mode, stack = file_name.split("_")
if mode == "udp":
dst = stack.split(".")[0]
file_path = os.path.join(cluster_dir, file_name)
loss_prb, rtt_array = read_udp(file_path)
# print(rtt_array)
length = len(rtt_array)
# rtt_outliar_removal = np.sort(rtt_array)[0: round(length*0.999)]
# rtt_mean = np.mean(rtt_outliar_removal)
# rtt_std = np.sqrt(np.var(rtt_outliar_removal))
# rtt_conf = st.norm.interval(0.95, loc=rtt_mean, scale=rtt_std)
# PING[cluster][dst] = (rtt_mean, rtt_conf)
data.append(rtt_array)
labels.append(dst)
axes[i].boxplot(data, labels=labels, showfliers=True)
plt.savefig("udp-latency-nofilter.png")
| 34.85 | 98 | 0.574845 |
7fe161bbdcf3de8bdcf00c6ffb06a6b7c300dcdc | 10,236 | py | Python | nitorch/core/pyutils.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | 1 | 2021-04-09T21:24:47.000Z | 2021-04-09T21:24:47.000Z | nitorch/core/pyutils.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | null | null | null | nitorch/core/pyutils.py | wyli/nitorch | 3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac | [
"MIT"
] | null | null | null | """Python utilities."""
import os
import functools
from types import GeneratorType as generator
import warnings
from collections import Counter
def file_mod(s, nam='', prefix='', suffix='', odir='', ext=''):
"""Modify a file path.
Parameters
----------
s : str
File path.
nam : str, default=''
Filename, if empty string, unchanged.
prefix : str, default=''
Filename prefix.
suffix : str, default=''
Filename suffix.
odir : str, default=''
Output directory, if empty string, unchanged.
ext : str, default=''
Extension, if empty string, unchanged.
Returns
----------
s : str
Modified file path.
"""
odir0, nam0 = os.path.split(s)
parts = nam0.split('.')
nam0 = parts[0]
ext0 = '.' + '.'.join(parts[1:])
if not odir:
odir = odir0
odir = os.path.abspath(odir) # Get absolute path
if not nam:
nam = nam0
if not ext:
ext = ext0
return os.path.join(odir, prefix + nam + suffix + ext)
def make_list(*args, **kwargs) -> list:
"""Ensure that the input is a list and pad/crop if necessary.
Parameters
----------
input : scalar or sequence generator
Input argument(s).
n : int, optional
Target length.
default : optional
Default value to pad with.
If not provided, replicate the last value.
Returns
-------
output : list
Output arguments.
"""
return list(make_sequence(*args, **kwargs))
def make_tuple(*args, **kwargs) -> tuple:
"""Ensure that the input is a tuple and pad/crop if necessary.
Parameters
----------
input : scalar or sequence generator
Input argument(s).
n : int, optional
Target length.
default : optional
Default value to pad with.
If not provided, replicate the last value.
Returns
-------
output : tuple
Output arguments.
"""
return tuple(make_sequence(*args, **kwargs))
def make_set(input) -> set:
"""Ensure that the input is a set.
Parameters
----------
input : scalar or sequence
Input argument(s).
Returns
-------
output : set
Output arguments.
"""
if not isinstance(input, (list, tuple, set, range, generator)):
input = [input]
return set(input)
def rep_list(input, n, interleaved=False) -> list:
"""Replicate a list.
Parameters
----------
input : scalar or sequence or generator
Input argument(s).
n : int
Number of replicates.
interleaved : bool, default=False
Interleaved replication.
Returns
-------
output : list
Replicated list.
If the input argument is not a list or tuple, the output
type is `tuple`.
"""
return list(rep_sequence(input, n, interleaved))
# backward compatibility
padlist = functools.wraps(make_sequence)
replist = functools.wraps(rep_sequence)
def getargs(kpd, args=None, kwargs=None, consume=False):
"""Read and remove argument from args/kwargs input.
Parameters
----------
kpd : list of tuple
List of (key, position, default) tuples with:
* key (str): argument name
* position (int): argument position
* default (optional): default value
args : sequence, optional
List of positional arguments
kwargs : dict, optional
List of keyword arguments
consume : bool, default=False
Consume arguments from args/kwargs
Returns:
values (list): List of values
"""
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
# Sort argument by reverse position
kpd = [(i,) + e for i, e in enumerate(kpd)]
kpd = sorted(kpd, key=lambda x: x[2], reverse=True)
values = []
for elem in kpd:
i = elem[0]
key = elem[1]
position = elem[2]
default = elem[3] if len(elem) > 3 else None
value = default
if len(args) >= position:
value = args[-1]
if consume:
del args[-1]
if key in kwargs.keys():
raise_error(key)
elif key in kwargs.keys():
value = kwargs[key]
if consume:
del kwargs[key]
values.append((i, value))
values = [v for _, v in sorted(values)]
return values
def prod(sequence, inplace=False):
"""Perform the product of a sequence of elements.
Parameters
----------
sequence : any object that implements `__iter__`
Sequence of elements for which the `__mul__` operator is defined.
inplace : bool, default=False
Perform the product inplace (using `__imul__` instead of `__mul__`).
Returns
-------
product :
Product of the elements in the sequence.
"""
accumulate = None
for elem in sequence:
if accumulate is None:
accumulate = elem
elif inplace:
accumulate *= elem
else:
accumulate = accumulate * elem
return accumulate
def cumprod(sequence):
"""Perform the cumulative product of a sequence of elements.
Parameters
----------
sequence : any object that implements `__iter__`
Sequence of elements for which the `__mul__` operator is defined.
Returns
-------
product :
Product of the elements in the sequence.
"""
accumulate = None
seq = []
for elem in sequence:
if accumulate is None:
accumulate = elem
else:
accumulate = accumulate * elem
seq.append(accumulate)
return seq
def pop(obj, key=0, *args, **kwargs):
"""Pop an element from a mutable collection.
Parameters
----------
obj : dict or list
Collection
key : str or int
Key or index
default : optional
Default value. Raise error if not provided.
Returns
-------
elem
Popped element
"""
if isinstance(obj, dict):
return obj.pop(key, *args, **kwargs)
else:
try:
val = obj[key]
del obj[key]
return val
except:
if len(args) > 0:
return args[0]
else:
return kwargs.get('default')
def majority(x):
"""Return majority element in a sequence.
Parameters
----------
x : sequence
Input sequence of hashable elements
Returns
-------
elem
Majority element
"""
count = Counter(x)
return count.most_common(1)[0][0]
| 24.965854 | 79 | 0.554807 |
7fe32cec9f3243b49d74a552788df1a4f5765a18 | 2,895 | py | Python | danceschool/prerequisites/handlers.py | benjwrdill/django-danceschool | 9ecb2754502e62d0f49aa23d08ca6de6cae3c99a | [
"BSD-3-Clause"
] | 1 | 2019-02-04T02:11:32.000Z | 2019-02-04T02:11:32.000Z | danceschool/prerequisites/handlers.py | benjwrdill/django-danceschool | 9ecb2754502e62d0f49aa23d08ca6de6cae3c99a | [
"BSD-3-Clause"
] | 2 | 2019-03-26T22:37:49.000Z | 2019-12-02T15:39:35.000Z | danceschool/prerequisites/handlers.py | benjwrdill/django-danceschool | 9ecb2754502e62d0f49aa23d08ca6de6cae3c99a | [
"BSD-3-Clause"
] | 1 | 2019-03-19T22:49:01.000Z | 2019-03-19T22:49:01.000Z | from django.dispatch import receiver
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from danceschool.core.signals import check_student_info
from danceschool.core.models import Customer
from danceschool.core.constants import getConstant
from .models import Requirement
import logging
# Define logger for this file
logger = logging.getLogger(__name__)
| 38.6 | 172 | 0.646287 |
7fe3c41b5d4495299339fa6ac09fd4c855492415 | 1,577 | py | Python | b9_tools.py | eoinnoble/b9-indifference | a8b7f3c2268af162d5269a8ce7180be717bfb3fb | [
"Unlicense"
] | 9 | 2017-11-13T20:31:04.000Z | 2021-11-08T12:30:48.000Z | b9_tools.py | eoinnoble/b9-indifference | a8b7f3c2268af162d5269a8ce7180be717bfb3fb | [
"Unlicense"
] | 1 | 2021-11-30T20:24:26.000Z | 2021-11-30T20:24:26.000Z | b9_tools.py | eoinnoble/b9-indifference | a8b7f3c2268af162d5269a8ce7180be717bfb3fb | [
"Unlicense"
] | 1 | 2017-12-17T09:04:25.000Z | 2017-12-17T09:04:25.000Z | import re
from collections.abc import MutableMapping
from typing import Dict, List
import markovify
import nltk
| 27.666667 | 97 | 0.639822 |
7fe457572d531fc0f3ed15941394935cc6786462 | 3,907 | py | Python | live_visualisation.py | duyanh-y4n/DJITelloPy | 3bfda900a7dc523be4effe21e0e3b83126576750 | [
"MIT"
] | null | null | null | live_visualisation.py | duyanh-y4n/DJITelloPy | 3bfda900a7dc523be4effe21e0e3b83126576750 | [
"MIT"
] | null | null | null | live_visualisation.py | duyanh-y4n/DJITelloPy | 3bfda900a7dc523be4effe21e0e3b83126576750 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : live_visualisation.py
# Author : Duy Anh Pham <duyanh.y4n.pham@gmail.com>
# Date : 10.04.2020
# Last Modified By: Duy Anh Pham <duyanh.y4n.pham@gmail.com>
from djitellopy.realtime_plot.RealtimePlotter import *
import redis
import numpy as np
import traceback
import matplotlib
# define data to get from db
# sensorMeshList = ['baro', 'h', 'tof', 'runtime']
# row = len(sensorMeshList)
data_len = 300
plot_update_interval = 0.005
datasource = redis.StrictRedis(host='localhost', port=6379, db=0)
plt.figure()
baro_axes = plt.subplot(3, 1, 1)
plt.title('tello_edu sensors')
baro_data_list = ['baro', 'runtime']
baro_ylim = [-47, -57]
baro_option = DataplotOption.TIMESTAMP_CUSTOM
baro_dataplot = DataPlot(2, data_len, option=baro_option)
baro_plot = RealtimePlotter(baro_dataplot)
baro_plot.config_plots(baro_axes, y_labels=baro_data_list, ylim=baro_ylim)
baro_plot.axes.set_xlabel('time in ms')
baro_plot.axes.set_ylabel('barometer in cmHg')
tof_axes = plt.subplot(3, 1, 2)
tof_data_list = ['tof', 'runtime']
tof_ylim = [-10, 500]
tof_option = DataplotOption.TIMESTAMP_CUSTOM
tof_dataplot = DataPlot(2, data_len, option=tof_option)
tof_plot = RealtimePlotter(tof_dataplot)
tof_plot.config_plots(tof_axes, y_labels=tof_data_list, ylim=tof_ylim)
tof_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('vertical distance in cm')
h_axes = plt.subplot(3, 1, 3)
h_ylim = [-50, 300]
h_data_list = ['h', 'runtime']
h_option = DataplotOption.TIMESTAMP_CUSTOM
h_dataplot = DataPlot(2, data_len, option=h_option)
h_plot = RealtimePlotter(h_dataplot)
h_plot.config_plots(h_axes, y_labels=h_data_list, ylim=h_ylim)
h_plot.axes.set_xlabel('time in ms')
tof_plot.axes.set_ylabel('height in cm')
if __name__ == "__main__":
while True:
# get new data from database and plot
# baro
baro_plot.dataplot.clear_data_regs()
new_data = []
for sensor in baro_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
baro_y = np.array(new_data[:-1], dtype=np.float)
baro_x = np.array(new_data[-1], dtype=np.int64)
baro_plot.dataplot.append(
y=baro_y, x=baro_x, single=False)
baro_plot.plot_data()
except Exception as e:
print(e)
# tof
tof_plot.dataplot.clear_data_regs()
new_data = []
for sensor in tof_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
tof_y = np.array(new_data[:-1], dtype=np.float)
tof_x = np.array(new_data[-1], dtype=np.int64)
tof_plot.dataplot.append(
y=tof_y, x=tof_x, single=False)
tof_plot.plot_data()
except Exception as e:
print(e)
# height
h_plot.dataplot.clear_data_regs()
new_data = []
for sensor in h_data_list:
new_sensor_data = datasource.lrange(sensor, 0, data_len)
# reverse, bc first element is the newest (not the oldest like deque)
new_sensor_data.reverse()
new_data.append(new_sensor_data)
try:
h_y = np.array(new_data[:-1], dtype=np.float)
h_x = np.array(new_data[-1], dtype=np.int64)
h_plot.dataplot.append(
y=h_y, x=h_x, single=False)
h_plot.plot_data()
except Exception as e:
print(e)
plt.pause(plot_update_interval)
input("Exit(press any key)?")
| 35.198198 | 81 | 0.652931 |
7fe5f97f042b7d291cc4c77318e4cda78c4dbfcc | 1,187 | py | Python | app/cli.py | dev-johnlopez/Assignably | 056960556dd75dfce064970887f37a44a8c66aec | [
"MIT"
] | 1 | 2021-06-09T02:19:18.000Z | 2021-06-09T02:19:18.000Z | app/cli.py | dev-johnlopez/Assignably | 056960556dd75dfce064970887f37a44a8c66aec | [
"MIT"
] | 1 | 2021-06-01T23:45:06.000Z | 2021-06-01T23:45:06.000Z | app/cli.py | dev-johnlopez/assignably | 056960556dd75dfce064970887f37a44a8c66aec | [
"MIT"
] | null | null | null | import os
import click
from app import app
from flask.cli import with_appcontext
from app.auth.models import Role
| 25.255319 | 83 | 0.566133 |
7fe64ab21ba37642fb9fd48c4a4ae360552314de | 2,918 | py | Python | autobasedoc/tableofcontents.py | NuCOS/autobasedoc | 54135199b966847d822e772f435ddeb0a942fb42 | [
"BSD-3-Clause"
] | 3 | 2017-06-20T06:33:05.000Z | 2021-02-26T19:54:01.000Z | autobasedoc/tableofcontents.py | skidzo/autobasedoc | 54135199b966847d822e772f435ddeb0a942fb42 | [
"BSD-3-Clause"
] | null | null | null | autobasedoc/tableofcontents.py | skidzo/autobasedoc | 54135199b966847d822e772f435ddeb0a942fb42 | [
"BSD-3-Clause"
] | 4 | 2017-09-27T09:18:54.000Z | 2019-07-02T23:58:06.000Z | """
tableofcontents
===============
.. module:: tableofcontents
:platform: Unix, Windows
:synopsis: a tableofcontents that breaks not to the next frame but to the next page
.. moduleauthor:: Johannes Eckstein
"""
from reportlab import rl_config
from reportlab.platypus import Table, Paragraph, PageBreak
from reportlab.platypus.tableofcontents import TableOfContents, drawPageNumbers
| 37.896104 | 123 | 0.614119 |
7fe7a7ef4cedcf3d41ec5da04172536952412a93 | 570 | py | Python | conans/test/model/username_test.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | null | null | null | conans/test/model/username_test.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | null | null | null | conans/test/model/username_test.py | jbaruch/conan | 263722b5284828c49774ffe18d314b24ee11e178 | [
"MIT"
] | 1 | 2021-03-03T17:15:46.000Z | 2021-03-03T17:15:46.000Z | import unittest
from conans.errors import ConanException
from conans.model.username import Username
| 28.5 | 59 | 0.684211 |
7fe7fde051fa8a3d76d968e9a6574579dd014181 | 152 | py | Python | exercises/01_Primeiros Passos/exe_08.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | 1 | 2021-05-11T18:30:17.000Z | 2021-05-11T18:30:17.000Z | exercises/01_Primeiros Passos/exe_08.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | null | null | null | exercises/01_Primeiros Passos/exe_08.py | MariaTrindade/CursoPython | 2c60dd670747db08011d9dd33e3bbfd5795b06e8 | [
"Apache-2.0"
] | null | null | null | """
Faa um Programa que pea a temperatura em graus Fahrenheit, transforme e mostre
a temperatura em graus Celsius.
C = (5 * (F-32) / 9)
"""
| 9.5 | 80 | 0.651316 |
7febb7dfbccc110592c6373855dc121877f1f2c7 | 1,641 | py | Python | throwaway/viz_nav_policy.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | throwaway/viz_nav_policy.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | throwaway/viz_nav_policy.py | sfpd/rlreloaded | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from control4.algs.save_load_utils import load_agent_and_mdp
from control4.core.rollout import rollout
from tabulate import tabulate
import numpy as np
import pygame
from control3.pygameviewer import PygameViewer, pygame
from collections import namedtuple
from copy import deepcopy
path = []
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("hdf")
parser.add_argument("--load_idx",type=int,default=-1)
parser.add_argument("--max_steps",type=int)
parser.add_argument("--one_traj",action="store_true")
args = parser.parse_args()
agent, mdp, _hdf = load_agent_and_mdp(args.hdf,args.load_idx)
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
fig1,(ax0,ax1)=plt.subplots(2,1)
fig2,(ax3)=plt.subplots(1,1)
h = mdp.halfsize
while True:
path = []
init_arrs, traj_arrs = rollout(mdp,agent,999999,save_arrs=["m","o","a"])
m = np.concatenate([init_arrs["m"]]+traj_arrs["m"],axis=0)
o = np.concatenate([init_arrs["o"]]+traj_arrs["o"],axis=0)
a_na = np.concatenate(traj_arrs["a"])
print "o:"
print o
print "m:"
print m
ax0.cla()
ax0.plot(m)
ax1.cla()
ax1.plot(o)
ax3.cla()
x,y=np.array(init_arrs['x'].path).T
ax3.plot(x,y,'bx-')
ax3.axis([-h,h,-h,h])
for (x,a) in zip(init_arrs['x'].path,a_na):
ax3.add_artist(Ellipse(xy=x+a[0:2], width=2*a[2], height=2*a[3],alpha=0.2))
plt.draw()
plt.pause(0.01)
plt.ginput()
| 28.293103 | 87 | 0.624619 |
7fec6177c282fa0ff0c92470b63745ee8ad16c40 | 1,772 | py | Python | echopype/echodata/convention/attrs.py | b-reyes/echopype | bc8afa190fa2ca4fab5944bac83cd4b20f7abcf6 | [
"Apache-2.0"
] | null | null | null | echopype/echodata/convention/attrs.py | b-reyes/echopype | bc8afa190fa2ca4fab5944bac83cd4b20f7abcf6 | [
"Apache-2.0"
] | 2 | 2019-02-20T16:47:51.000Z | 2021-04-20T20:20:32.000Z | echopype/echodata/convention/attrs.py | b-reyes/echopype | bc8afa190fa2ca4fab5944bac83cd4b20f7abcf6 | [
"Apache-2.0"
] | 2 | 2019-02-20T16:41:56.000Z | 2021-08-05T04:33:07.000Z | """
Define convention-based global, coordinate and variable attributes
in one place for consistent reuse
"""
DEFAULT_BEAM_COORD_ATTRS = {
"frequency": {
"long_name": "Transducer frequency",
"standard_name": "sound_frequency",
"units": "Hz",
"valid_min": 0.0,
},
"ping_time": {
"long_name": "Timestamp of each ping",
"standard_name": "time",
"axis": "T",
},
"range_bin": {"long_name": "Along-range bin (sample) number, base 0"},
}
DEFAULT_PLATFORM_COORD_ATTRS = {
"location_time": {
"axis": "T",
"long_name": "Timestamps for NMEA datagrams",
"standard_name": "time",
}
}
DEFAULT_PLATFORM_VAR_ATTRS = {
"latitude": {
"long_name": "Platform latitude",
"standard_name": "latitude",
"units": "degrees_north",
"valid_range": (-90.0, 90.0),
},
"longitude": {
"long_name": "Platform longitude",
"standard_name": "longitude",
"units": "degrees_east",
"valid_range": (-180.0, 180.0),
},
"pitch": {
"long_name": "Platform pitch",
"standard_name": "platform_pitch_angle",
"units": "arc_degree",
"valid_range": (-90.0, 90.0),
},
"roll": {
"long_name": "Platform roll",
"standard_name": "platform_roll_angle",
"units": "arc_degree",
"valid_range": (-90.0, 90.0),
},
"heave": {
"long_name": "Platform heave",
"standard_name": "platform_heave_angle",
"units": "arc_degree",
"valid_range": (-90.0, 90.0),
},
"water_level": {
"long_name": "z-axis distance from the platform coordinate system "
"origin to the sonar transducer",
"units": "m",
},
}
| 26.848485 | 75 | 0.550226 |
7fecb02664281603ef197605d74e5b00e842bde4 | 2,072 | py | Python | tf_tests.py | MadsJensen/agency_connectivity | b45adbc133573de1ebdcff0edb17e43f1691c577 | [
"BSD-3-Clause"
] | null | null | null | tf_tests.py | MadsJensen/agency_connectivity | b45adbc133573de1ebdcff0edb17e43f1691c577 | [
"BSD-3-Clause"
] | null | null | null | tf_tests.py | MadsJensen/agency_connectivity | b45adbc133573de1ebdcff0edb17e43f1691c577 | [
"BSD-3-Clause"
] | null | null | null | import mne
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
from tf_analysis import single_trial_tf
plt.ion()
data_folder = "/home/mje/Projects/agency_connectivity/data/"
epochs = mne.read_epochs(data_folder + "P2_ds_bp_ica-epo.fif")
# single trial morlet tests
frequencies = np.arange(6., 30., 2.)
n_cycles = 5.
times = epochs.times
tfr_vol = single_trial_tf(epochs["voluntary"])
tfr_invol = single_trial_tf(epochs["involuntary"])
pow_vol_Cz = np.asarray([np.mean(np.abs(tfr[37, 4:-2, :])**2, axis=0)
for tfr in tfr_vol])
pow_invol_Cz = np.asarray([np.mean(np.abs(tfr[37, 4:-2, :])**2, axis=0)
for tfr in tfr_invol])
pow_invol_Cz_bs = np.asarray([(10*np.log10(trial / np.mean(trial[:103]))) for
trial in pow_invol_Cz])
pow_vol_Cz_bs = np.asarray([(10*np.log10(trial / np.mean(trial[:103]))) for
trial in pow_vol_Cz])
pow_invol_Cz_mean = pow_invol_Cz_bs[:, 921:1024].mean(axis=1)
pow_vol_Cz_mean = pow_vol_Cz_bs[:, 921:1024].mean(axis=1)
stats.ttest_ind(pow_vol_Cz_mean, pow_invol_Cz_mean)
corr, pval = stats.spearmanr(pow_vol_Cz_mean[-60:], pow_invol_Cz_mean)
print("correlation: %s, pval: %s" % (corr, pval))
sns.regplot(pow_vol_Cz_mean[-60:], pow_invol_Cz_mean)
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
good_pick = 37 # channel with a clear evoked response
bad_pick = 47 # channel with no evoked response
plt.close('all')
mne.viz.plot_epochs_image(epochs["involuntary"], [good_pick, bad_pick],
sigma=0.5, cmap="viridis",
colorbar=True, order=order_func, show=True)
| 32.888889 | 77 | 0.670849 |
7fee0ec8e03805400ba3a871766b2ab0228dc4a4 | 17,645 | py | Python | data/smth.py | roeiherz/AG2Video | a4eb439d7147c91237ddd50ec305add8e1537360 | [
"MIT"
] | 22 | 2020-07-01T07:11:15.000Z | 2022-02-17T13:26:16.000Z | data/smth.py | roeiherz/AG2Video | a4eb439d7147c91237ddd50ec305add8e1537360 | [
"MIT"
] | 5 | 2021-06-16T02:35:14.000Z | 2022-03-12T01:00:27.000Z | data/smth.py | roeiherz/AG2Video | a4eb439d7147c91237ddd50ec305add8e1537360 | [
"MIT"
] | 2 | 2021-08-04T05:22:58.000Z | 2021-12-11T02:15:57.000Z | import json
import os
import pickle as pkl
import random
import math
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T
import torch.nn.functional as F
from PIL import Image
from data.SomethingElse.config import action_to_number_of_instances, action_to_num_objects, valid_actions
from data.args import ALIGN_CORNERS
from models import group_transforms
from models.video_transforms import GroupMultiScaleCrop
def clean_boxes_metadata(boxes_metadata):
"""
Get unique boxes metadata
:param boxes_metadata:
:return:
"""
boxes_names = {b['name']: 0 for b in boxes_metadata}
new_boxes_metadata = []
for bb in boxes_metadata:
if bb['name'] in boxes_names and boxes_names[bb['name']] == 0:
boxes_names[bb['name']] += 1
new_boxes_metadata.append(bb)
return new_boxes_metadata
| 45.127877 | 124 | 0.607708 |
7feeefa1c9cfdfdf846929d05b2027d327b3a9e6 | 60 | py | Python | user_login.py | pieddro/football | d5a021da26a2252dcece752c51818f03d1d3db46 | [
"Apache-2.0"
] | null | null | null | user_login.py | pieddro/football | d5a021da26a2252dcece752c51818f03d1d3db46 | [
"Apache-2.0"
] | null | null | null | user_login.py | pieddro/football | d5a021da26a2252dcece752c51818f03d1d3db46 | [
"Apache-2.0"
] | null | null | null | Meine neuer Code..
Neue Codezeile ....
Tst stash zum 2. mal | 20 | 20 | 0.7 |
7fef83c18d0039ec789a2a57075be970bd25f765 | 1,195 | py | Python | refstack/api/controllers/__init__.py | jovial/refstack | b6f9f8611bc3752acbf0c4275453285e80be85dc | [
"Apache-2.0"
] | null | null | null | refstack/api/controllers/__init__.py | jovial/refstack | b6f9f8611bc3752acbf0c4275453285e80be85dc | [
"Apache-2.0"
] | null | null | null | refstack/api/controllers/__init__.py | jovial/refstack | b6f9f8611bc3752acbf0c4275453285e80be85dc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""API controllers package."""
from oslo_config import cfg
from refstack.api import constants as const
CTRLS_OPTS = [
cfg.IntOpt('results_per_page',
default=20,
help='Number of results for one page'),
cfg.StrOpt('input_date_format',
default='%Y-%m-%d %H:%M:%S',
help='The format for %(start)s and %(end)s parameters' % {
'start': const.START_DATE,
'end': const.END_DATE
})
]
CONF = cfg.CONF
CONF.register_opts(CTRLS_OPTS, group='api')
| 32.297297 | 78 | 0.646862 |
7ff0e77d9b3db005d3ce70f0c9f81c5bbde228f8 | 4,808 | py | Python | main.py | superwaiwjia/lowRankForSeer | 86041e0a39e1ef2718e8133eb65a63c05d9a441c | [
"MIT"
] | 2 | 2021-11-18T07:01:40.000Z | 2021-11-18T07:01:49.000Z | main.py | superwaiwjia/lowRankForSeer | 86041e0a39e1ef2718e8133eb65a63c05d9a441c | [
"MIT"
] | null | null | null | main.py | superwaiwjia/lowRankForSeer | 86041e0a39e1ef2718e8133eb65a63c05d9a441c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
import pickle
import sys, os, re, subprocess, math
reload(sys)
sys.setdefaultencoding("utf-8")
from os.path import abspath, dirname, join
whereami = abspath(dirname(__file__))
sys.path.append(whereami)
from sklearn.metrics import roc_auc_score
import pandas as pd
import numpy as np
from scipy import *
import itertools
from utility import getWfixingA, getSparseWeight, appendDFToCSV_void, stop_critier
from solver import optimize
from data import load_dataset, loadSeer
if __name__ == '__main__':
datasetName = sys.argv[1]
main(datasetName)
| 38.464 | 258 | 0.66015 |
7ff1b8e6fdd883cf61f529bf469c18df4b7174fc | 166 | py | Python | django_gotolong/bhav/apps.py | ParikhKadam/gotolong | 839beb8aa37055a2078eaa289b8ae05b62e8905e | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 15 | 2019-12-06T16:19:45.000Z | 2021-08-20T13:22:22.000Z | django_gotolong/bhav/apps.py | ParikhKadam/gotolong | 839beb8aa37055a2078eaa289b8ae05b62e8905e | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 14 | 2020-12-08T10:45:05.000Z | 2021-09-21T17:23:45.000Z | django_gotolong/bhav/apps.py | ParikhKadam/gotolong | 839beb8aa37055a2078eaa289b8ae05b62e8905e | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 9 | 2020-01-01T03:04:29.000Z | 2021-04-18T08:42:30.000Z | from django.apps import AppConfig
from django_gotolong.bhav.views import start
| 16.6 | 44 | 0.704819 |
7ff2225b3cf1350521968e39323aa03d96333bb2 | 2,130 | py | Python | tethysext/atcore/services/paginate.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | 3 | 2020-11-05T23:50:47.000Z | 2021-02-26T21:43:29.000Z | tethysext/atcore/services/paginate.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | 7 | 2020-10-29T16:53:49.000Z | 2021-05-07T19:46:47.000Z | tethysext/atcore/services/paginate.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | null | null | null | """
********************************************************************************
* Name: pagintate.py
* Author: nswain
* Created On: April 17, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
def paginate(objects, results_per_page, page, result_name, sort_by_raw=None, sort_reversed=False):
"""
Paginate given list of objects.
Args:
objects(list): list of objects to paginate.
results_per_page(int): maximum number of results to show on a page.
page(int): page to view.
result_name(str): name to use when referencing the objects.
sort_by_raw(str): sort field if applicable.
sort_reversed(boo): indicates whether the sort is reversed or not.
Returns:
list, dict: list of objects for current page, metadata form paginantion page.
"""
results_per_page_options = [5, 10, 20, 40, 80, 120]
num_objects = len(objects)
if num_objects <= results_per_page:
page = 1
min_index = (page - 1) * results_per_page
max_index = min(page * results_per_page, num_objects)
paginated_objects = objects[min_index:max_index]
enable_next_button = max_index < num_objects
enable_previous_button = min_index > 0
pagination_info = {
'num_results': num_objects,
'result_name': result_name,
'page': page,
'min_showing': min_index + 1 if max_index > 0 else 0,
'max_showing': max_index,
'next_page': page + 1,
'previous_page': page - 1,
'sort_by': sort_by_raw,
'sort_reversed': sort_reversed,
'enable_next_button': enable_next_button,
'enable_previous_button': enable_previous_button,
'hide_buttons': page == 1 and max_index == num_objects,
'hide_header_buttons': len(paginated_objects) < 20,
'show': results_per_page,
'results_per_page_options': [x for x in results_per_page_options if x <= num_objects],
'hide_results_per_page_options': num_objects <= results_per_page_options[0],
}
return paginated_objects, pagination_info
| 39.444444 | 98 | 0.620188 |
7ff4886052822174f0f2c10e163f3567d0699ee7 | 133 | py | Python | geotweet/tests/integration/twitter/__init__.py | meyersj/geotweet | 1a6b55f98adf34d1b91f172d9187d599616412d9 | [
"MIT"
] | 6 | 2016-03-26T19:29:25.000Z | 2020-07-12T02:18:22.000Z | geotweet/tests/integration/twitter/__init__.py | meyersj/geotweet | 1a6b55f98adf34d1b91f172d9187d599616412d9 | [
"MIT"
] | null | null | null | geotweet/tests/integration/twitter/__init__.py | meyersj/geotweet | 1a6b55f98adf34d1b91f172d9187d599616412d9 | [
"MIT"
] | 1 | 2020-01-06T01:25:05.000Z | 2020-01-06T01:25:05.000Z | import os
from os.path import dirname
import sys
ROOT = dirname(dirname(dirname(os.path.abspath(__file__))))
sys.path.append(ROOT)
| 16.625 | 59 | 0.774436 |
7ff4ceaf754a9a8a176cc343441eb5563e96bf86 | 1,996 | py | Python | main.py | nerdmanPc/arvore-b | f993028f0c8971cff4e4434967c8f9b44a5cc265 | [
"MIT"
] | null | null | null | main.py | nerdmanPc/arvore-b | f993028f0c8971cff4e4434967c8f9b44a5cc265 | [
"MIT"
] | null | null | null | main.py | nerdmanPc/arvore-b | f993028f0c8971cff4e4434967c8f9b44a5cc265 | [
"MIT"
] | null | null | null | # Primeiro Trabalho Pratico de EDAII (UFBA)
# Desenvolvido em dupla:
# Laila Pereira Mota Santos e Pedro Antonhyonih Silva Costa
# Verso Python 3.8.10
#
# OBSERVACAO IMPORTANTE:
# A CONSTANTE GRAUMINIMO ESTA NO ARQUIVO node.py
import os
from struct import Struct
from typing import Optional, Tuple
from enum import Enum
from node import Node
from data_base import DataBase, OpStatus
import sys
FILE_PATH = "tree.bin"
#GRAUMINIMO = 2 # Movido para node.py
#os.remove(FILE_PATH)
#Loop principal que processa os comandos
entry = input()
while entry != 'e':
if(entry == 'i'):
num_reg = input()
name_reg = input()
age_reg = input()
insert_entry(int(num_reg), name_reg, int(age_reg))
elif(entry == 'c'):
num_reg = input()
query_entry(int(num_reg))
elif(entry == 'p'):
print_tree()
elif(entry == 'o'):
print_sequence()
elif(entry == 't'):
print_occupancy()
entry = input()
exit_shell()
#Fim do loop principal | 25.265823 | 65 | 0.698397 |
7ff5855819bc7ea53013b0091b066cc505d14375 | 6,134 | py | Python | hcat/backends/spatial_embedding.py | buswinka/hcat | dcfd855904ba51f6e1fa6c9ddc775b3364695e3e | [
"MIT"
] | 4 | 2021-10-14T19:22:57.000Z | 2022-03-29T09:37:43.000Z | hcat/backends/spatial_embedding.py | buswinka/hcat | dcfd855904ba51f6e1fa6c9ddc775b3364695e3e | [
"MIT"
] | null | null | null | hcat/backends/spatial_embedding.py | buswinka/hcat | dcfd855904ba51f6e1fa6c9ddc775b3364695e3e | [
"MIT"
] | null | null | null | import torch
import hcat.lib.functional
from hcat.lib.functional import IntensityCellReject
from hcat.backends.backend import Backend
from hcat.models.r_unet import embed_model as RUnet
from hcat.train.transforms import median_filter, erosion
import hcat.lib.utils
from hcat.lib.utils import graceful_exit
import os.path
import wget
from typing import Dict, Optional
| 34.077778 | 111 | 0.626997 |
7ff58669b09c24b09a4ab1de5e76c0c33e23118d | 6,656 | py | Python | mortar_rdb/tests/test_utility.py | Mortar/mortar_rdb | 576628a299f94ef60324244777766a620556592b | [
"MIT"
] | 1 | 2017-03-24T15:20:40.000Z | 2017-03-24T15:20:40.000Z | mortar_rdb/tests/test_utility.py | Mortar/mortar_rdb | 576628a299f94ef60324244777766a620556592b | [
"MIT"
] | 3 | 2015-12-01T20:06:30.000Z | 2018-02-02T07:05:21.000Z | mortar_rdb/tests/test_utility.py | Mortar/mortar_rdb | 576628a299f94ef60324244777766a620556592b | [
"MIT"
] | 1 | 2019-03-01T08:37:48.000Z | 2019-03-01T08:37:48.000Z | from mortar_rdb import register_session, get_session
from mortar_rdb.interfaces import ISession
from testfixtures.components import TestComponents
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.session import Session
from sqlalchemy.schema import Column
from sqlalchemy.types import Integer, String
from threading import Thread
from testfixtures import (
ShouldRaise,compare,generator,Comparison as C, LogCapture
)
from unittest import TestCase
from zope.component import getSiteManager
from zope.component.interfaces import ComponentLookupError
import transaction
| 32.154589 | 70 | 0.587891 |
7ff6757eb76e6c391780f0171055dc2c8c0944f0 | 2,637 | py | Python | magic_driver_control/scripts/driver_controller.py | flamma7/adv_robotics | da9150de28a5464ee6af1d0859312f4858a6b3d2 | [
"Apache-2.0"
] | null | null | null | magic_driver_control/scripts/driver_controller.py | flamma7/adv_robotics | da9150de28a5464ee6af1d0859312f4858a6b3d2 | [
"Apache-2.0"
] | null | null | null | magic_driver_control/scripts/driver_controller.py | flamma7/adv_robotics | da9150de28a5464ee6af1d0859312f4858a6b3d2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
This node talks directly to the pololu driver
It takes in PID control effort and maps it to commands for the pololu driver
"""
from __future__ import division
import rospy
from std_msgs.msg import Float64, Int8MultiArray
DRIVE_PUB_INDEX = 0
YAW_PUB_INDEX = 1
SIDE_IR_THRESH = 10
FRONT_IR_THRESH = 21
DOORWAY_THRESH = 5
DOORWAY_IGNORES = 250
if __name__ == "__main__":
main()
| 28.663043 | 91 | 0.615851 |
7ff703d79b5264be25b5282ef47dd791ebb22441 | 4,025 | py | Python | GageRnR/application.py | tobyndax/GageRnR | 2dadafe6cd76a963068b7cbbd732f5f8e02d36fb | [
"MIT"
] | null | null | null | GageRnR/application.py | tobyndax/GageRnR | 2dadafe6cd76a963068b7cbbd732f5f8e02d36fb | [
"MIT"
] | null | null | null | GageRnR/application.py | tobyndax/GageRnR | 2dadafe6cd76a963068b7cbbd732f5f8e02d36fb | [
"MIT"
] | null | null | null | """GageRnR.
The input data should be structured
in a 3d array n[i,j,k] where
i = operator, j = part, k = measurement
Stored to file this data would look:
m1 m2 m3
3.29; 3.41; 3.64 # p1 | o1
2.44; 2.32; 2.42 # p2
3.08; 3.25; 3.07 # p1 | o2
2.53; 1.78; 2.32 # p2
3.04; 2.89; 2.85 # p1 | o3
1.62; 1.87; 2.04 # p2
More info: https://github.com/owodunni/GageRnR
Usage:
GageRnR -f FILE -s STRUCTURE [-a <AXES>] [-d <DELIMITER>] [-o <FOLDER>] [-g <PARTS>]
GageRnR -h | --help
GageRnR -v | --version
Examples:
GageRnR -f data.csv -s5,7,11 -o report
GageRnR -f data/data_mXop.csv -s 3,5,11 -o outDir
GageRnR -f data/data_opXm.csv -s 5,7,11 -a 2,1,0 -o outDir
GageRnR -f data/data_demoGRnR.csv -s 3,10,3 -a 0,2,1 -g 40,42,30,43,29,45,27.5,42,26,35 -o outDir
Options:
-f --file=FILE Load input data.
-s --structure=STRUCTURE Data structure.
Order should be operators, parts, measurements.
-a --axes=<AXES> Order of data axes [default: 0,1,2].
-d --delimiter=<DELIMITER> Order of data axes [default: ;].
-o --output=<FOLDER> Report output directory
-g --groundTruth=<PARTS> Ground Truth data for parts
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
import os.path
import GageRnR
from .reportGenerator import ReportGenerator
| 29.166667 | 101 | 0.610683 |
7ff97680a496e4eac114964f67955913e58ace45 | 4,536 | py | Python | final/options.py | annahung31/Advance_MM_homeworks | f6b2d600220442a73d25d478d08898ee796457b6 | [
"MIT"
] | null | null | null | final/options.py | annahung31/Advance_MM_homeworks | f6b2d600220442a73d25d478d08898ee796457b6 | [
"MIT"
] | null | null | null | final/options.py | annahung31/Advance_MM_homeworks | f6b2d600220442a73d25d478d08898ee796457b6 | [
"MIT"
] | null | null | null | import numpy as np
import os
import glob
import torch
import argparse
| 52.744186 | 153 | 0.698633 |
7ffbe9ba3d0ccc12492d20e36da26c44617c81e1 | 6,322 | py | Python | tests/test_pt.py | atti84it/ebook-reader-dict | 6a23b633bb06af7f9ea9d54c837cd78d627a7eb7 | [
"MIT"
] | 83 | 2020-05-21T12:25:07.000Z | 2022-03-25T23:26:42.000Z | tests/test_pt.py | atti84it/ebook-reader-dict | 6a23b633bb06af7f9ea9d54c837cd78d627a7eb7 | [
"MIT"
] | 1,015 | 2020-04-18T12:21:25.000Z | 2022-03-31T16:38:53.000Z | tests/test_pt.py | atti84it/ebook-reader-dict | 6a23b633bb06af7f9ea9d54c837cd78d627a7eb7 | [
"MIT"
] | 16 | 2020-11-05T22:49:31.000Z | 2022-03-31T08:14:05.000Z | import pytest
from wikidict.render import parse_word
from wikidict.utils import process_templates
| 35.122222 | 146 | 0.440683 |
7ffcca638b4a383642444cb66e73358214905bc8 | 10,792 | py | Python | rebalancer.py | papercheck/lndg | 8a0a5c9b2b53dfa2bf790feedac4bc903b4ff5ca | [
"MIT"
] | null | null | null | rebalancer.py | papercheck/lndg | 8a0a5c9b2b53dfa2bf790feedac4bc903b4ff5ca | [
"MIT"
] | null | null | null | rebalancer.py | papercheck/lndg | 8a0a5c9b2b53dfa2bf790feedac4bc903b4ff5ca | [
"MIT"
] | null | null | null | import django, json, datetime
from django.conf import settings
from django.db.models import Sum
from pathlib import Path
from datetime import datetime, timedelta
from gui.lnd_deps import lightning_pb2 as ln
from gui.lnd_deps import lightning_pb2_grpc as lnrpc
from gui.lnd_deps import router_pb2 as lnr
from gui.lnd_deps import router_pb2_grpc as lnrouter
from gui.lnd_deps.lnd_connect import lnd_connect
BASE_DIR = Path(__file__).resolve().parent
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3'
}
}
)
django.setup()
from lndg import settings
from gui.models import Rebalancer, Channels, LocalSettings, Forwards, Autopilot
if __name__ == '__main__':
main() | 58.972678 | 351 | 0.615456 |
7ffe3a2a54d20351ed2bd85d6e6203ef3341cc49 | 5,112 | py | Python | pale/fields/base.py | Loudr/pale | dc002ee6032c856551143af222ff8f71ed9853fe | [
"MIT"
] | 13 | 2015-06-18T02:35:31.000Z | 2019-03-15T14:39:28.000Z | pale/fields/base.py | Loudr/pale | dc002ee6032c856551143af222ff8f71ed9853fe | [
"MIT"
] | 34 | 2015-05-18T17:13:16.000Z | 2021-03-25T21:40:42.000Z | pale/fields/base.py | Loudr/pale | dc002ee6032c856551143af222ff8f71ed9853fe | [
"MIT"
] | 3 | 2016-06-08T01:05:47.000Z | 2020-02-04T17:50:17.000Z | # -*- coding: utf-8 -*-
import logging
import types
from collections import Iterable
| 32.35443 | 78 | 0.600352 |
7ffeda80306a79591e192335e97b6bc94abc7f4b | 160 | py | Python | DublinBusTest/forms.py | Eimg851/DublinBusApp_ResearchPracticum | 41b2c559dc4608705fd1348480ce729c645d6d5a | [
"BSD-2-Clause"
] | null | null | null | DublinBusTest/forms.py | Eimg851/DublinBusApp_ResearchPracticum | 41b2c559dc4608705fd1348480ce729c645d6d5a | [
"BSD-2-Clause"
] | null | null | null | DublinBusTest/forms.py | Eimg851/DublinBusApp_ResearchPracticum | 41b2c559dc4608705fd1348480ce729c645d6d5a | [
"BSD-2-Clause"
] | 1 | 2020-06-20T09:53:15.000Z | 2020-06-20T09:53:15.000Z | from django import forms
from .models import *
| 20 | 38 | 0.66875 |
3d006479e663873fb437875d9ddb0f2fa1dee350 | 9,802 | py | Python | tests/automated/test_aws_automated.py | hrichardlee/meadowrun | 77a182505209a4d185f111cbd5aa62a46038728a | [
"MIT"
] | null | null | null | tests/automated/test_aws_automated.py | hrichardlee/meadowrun | 77a182505209a4d185f111cbd5aa62a46038728a | [
"MIT"
] | null | null | null | tests/automated/test_aws_automated.py | hrichardlee/meadowrun | 77a182505209a4d185f111cbd5aa62a46038728a | [
"MIT"
] | null | null | null | """
These tests require an AWS account to be set up, but don't require any manual
intervention beyond some initial setup. Also, these tests create instances (which cost
money!). Either `meadowrun-manage install` needs to be set up, or `meadowrun-manage
clean` needs to be run periodically
"""
import asyncio
import datetime
import io
import pprint
import threading
import uuid
import boto3
import fabric
import pytest
import meadowrun.aws_integration.management_lambdas.adjust_ec2_instances as adjust_ec2_instances # noqa: E501
from basics import BasicsSuite, HostProvider, ErrorsSuite, MapSuite
from instance_registrar_suite import (
InstanceRegistrarProvider,
InstanceRegistrarSuite,
TERMINATE_INSTANCES_IF_IDLE_FOR_TEST,
)
from meadowrun.aws_integration.aws_core import _get_default_region_name
from meadowrun.aws_integration.ec2_instance_allocation import EC2InstanceRegistrar
from meadowrun.aws_integration.ec2_pricing import _get_ec2_instance_types
from meadowrun.aws_integration.ec2_ssh_keys import ensure_meadowrun_key_pair
from meadowrun.aws_integration.grid_tasks_sqs import (
_add_tasks,
_complete_task,
_create_queues_for_job,
_get_task,
get_results,
worker_loop,
)
from meadowrun.instance_allocation import InstanceRegistrar
from meadowrun.instance_selection import choose_instance_types_for_job, Resources
from meadowrun.meadowrun_pb2 import ProcessState
from meadowrun.run_job import AllocCloudInstance
from meadowrun.run_job_core import Host, JobCompletion, CloudProviderType
# TODO don't always run tests in us-east-2
REGION = "us-east-2"
class TestEC2InstanceRegistrar(EC2InstanceRegistrarProvider, InstanceRegistrarSuite):
pass
| 31.517685 | 110 | 0.652724 |
3d01213807fe34d6cbaa37ec89c61cfcc0e43948 | 11,536 | py | Python | apps/hosts/views.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 1,470 | 2016-10-21T01:21:45.000Z | 2022-03-30T14:08:29.000Z | apps/hosts/views.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 2,594 | 2016-11-02T03:36:01.000Z | 2022-03-31T15:30:04.000Z | apps/hosts/views.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 865 | 2016-11-09T17:46:32.000Z | 2022-03-30T13:06:52.000Z | from django.contrib.auth.models import User
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
authentication_classes,
permission_classes,
throttle_classes,
)
from rest_framework.response import Response
from rest_framework_expiring_authtoken.authentication import (
ExpiringTokenAuthentication,
)
from rest_framework.throttling import UserRateThrottle
from rest_framework_simplejwt.authentication import JWTAuthentication
from accounts.permissions import HasVerifiedEmail
from base.utils import get_model_object, team_paginated_queryset
from .filters import HostTeamsFilter
from .models import ChallengeHost, ChallengeHostTeam
from .serializers import (
ChallengeHostSerializer,
ChallengeHostTeamSerializer,
InviteHostToTeamSerializer,
HostTeamDetailSerializer,
)
from .utils import is_user_part_of_host_team
get_challenge_host_model = get_model_object(ChallengeHost)
| 37.093248 | 78 | 0.684466 |
3d019cb8d3804b67e4c6cc481ba0582e56b8a8a0 | 2,207 | py | Python | trace_for_guess/rescale.py | wtraylor/trace21ka_for_lpjguess | 184f8e213504fdad975eab40cf335bc47810669f | [
"MIT"
] | null | null | null | trace_for_guess/rescale.py | wtraylor/trace21ka_for_lpjguess | 184f8e213504fdad975eab40cf335bc47810669f | [
"MIT"
] | null | null | null | trace_for_guess/rescale.py | wtraylor/trace21ka_for_lpjguess | 184f8e213504fdad975eab40cf335bc47810669f | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Wolfgang Traylor <wolfgang.traylor@senckenberg.de>
#
# SPDX-License-Identifier: MIT
import os
import shutil
import subprocess
from termcolor import cprint
from trace_for_guess.skip import skip
def rescale_file(in_file, out_file, template_file, alg):
"""Regrid a NetCDF file using NCO (i.e. the ncremap command).
Args:
in_file: Path of input file.
out_file: Output file path. It will not be overwritten.
template_file: Path to a NetCDF file that has the desired grid
resolution.
alg: ESMF regrid algorithm. See here:
http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_6_3_0rp1/ESMF_refdoc/node3.html#SECTION03020000000000000000
Returns:
The output file (`out_file`).
Raises:
FileNotFoundError: If `in_file` or `template_file` doesnt exist.
RuntimeError: The `cdo` command is not in the PATH.
RuntimeError: The `ncremap` command failed or produced no output
file.
"""
if not os.path.isfile(in_file):
raise FileNotFoundError("Input file doesnt exist: '%s'" % in_file)
if not os.path.isfile(template_file):
raise FileNotFoundError("Template file doesnt exist: '%s'" %
template_file)
if skip([in_file, template_file], out_file):
return out_file
if shutil.which("ncremap") is None:
raise RuntimeError("Executable `ncremap` not found.")
cprint("Regridding '%s'..." % in_file, 'yellow')
try:
subprocess.run(["ncremap",
"--algorithm=%s" % alg,
"--template_file=%s" % template_file,
"--input_file=%s" % in_file,
"--output_file=%s" % out_file], check=True)
except Exception:
if os.path.isfile(out_file):
cprint(f"Removing file '{out_file}'.", 'red')
os.remove(out_file)
raise
if not os.path.isfile(out_file):
raise RuntimeError("Regridding with `ncremap` failed: No output file "
"created.")
cprint(f"Successfully created '{out_file}'.", 'green')
return out_file
| 36.783333 | 132 | 0.622565 |
3d03e7e9418a784fa6ae34ca818d4e877cfbf8bb | 6,545 | py | Python | loldib/getratings/models/NA/na_khazix/na_khazix_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_khazix/na_khazix_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_khazix/na_khazix_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
| 15.695444 | 46 | 0.766692 |
3d05562ae792843c99e988fb6a4b5372987caff9 | 616 | py | Python | setup.py | KloudTrader/libkloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 11 | 2019-01-16T16:10:09.000Z | 2021-03-02T00:59:17.000Z | setup.py | KloudTrader/kloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 425 | 2019-07-10T06:59:49.000Z | 2021-01-12T05:32:14.000Z | setup.py | KloudTrader/kloudtrader | 015e2779f80ba2de93be9fa6fd751412a9d5f492 | [
"Apache-2.0"
] | 6 | 2019-03-15T16:25:06.000Z | 2021-05-03T10:02:13.000Z | from distutils.core import setup
setup(
name='libkloudtrader',
version='1.0.0',
author='KloudTrader',
author_email='admin@kloudtrader.com',
packages=['libkloudtrader'],
url='https://github.com/KloudTrader/kloudtrader',
license='LICENSE',
description="KloudTrader's in-house library that makes it much easier for you to code algorithms that can trade for you.",
long_description_content_type="text/markdown",
long_description='pypi.md',
install_requires=[
"boto3",
"pandas",
"numpy",
"empyrical",
"asyncio",
"ccxt"
],
)
| 26.782609 | 126 | 0.644481 |
3d05d97057af36632d639c5d678cfee0618bd44e | 197 | py | Python | sdk/python/opencannabis/media/__init__.py | CookiesCo/OpenCannabis | a7bb1f71200c6b8f56c509df47039198f0c3bd4c | [
"MIT"
] | 2 | 2020-08-27T00:45:49.000Z | 2021-06-19T08:01:13.000Z | sdk/python/opencannabis/media/__init__.py | CookiesCo/OpenCannabis | a7bb1f71200c6b8f56c509df47039198f0c3bd4c | [
"MIT"
] | 67 | 2020-08-27T03:16:33.000Z | 2022-03-26T14:33:38.000Z | sdk/python/opencannabis/media/__init__.py | CookiesCo/OpenCannabis | a7bb1f71200c6b8f56c509df47039198f0c3bd4c | [
"MIT"
] | 1 | 2020-11-12T04:26:43.000Z | 2020-11-12T04:26:43.000Z | # ~*~ coding: utf-8 ~*~
__doc__ = """
`opencannabis.media`
---------------------------
Records and definitions that structure digital media and related assets.
"""
# `opencannabis.media`
| 16.416667 | 74 | 0.573604 |
3d05dcf32ee27f2f4d2629e65ab5d7e2a5641f27 | 14,581 | py | Python | mlutils/losses.py | DSciLab/mlutils | 352af36f2b34218b6551254f641427b7bbdd0f31 | [
"MIT"
] | null | null | null | mlutils/losses.py | DSciLab/mlutils | 352af36f2b34218b6551254f641427b7bbdd0f31 | [
"MIT"
] | null | null | null | mlutils/losses.py | DSciLab/mlutils | 352af36f2b34218b6551254f641427b7bbdd0f31 | [
"MIT"
] | null | null | null | from typing import Callable, Optional, Union, Tuple, List
import torch
from torch import nn
from cfg import Opts
from torch import Tensor
from torch.nn import functional as F
from mlutils import LogitToPreds
EPS = 1.0e-8
__all__ = ['IOULoss', 'GDiceLoss', 'SoftDiceLoss',
'CrossEntropyLoss', 'BCELossWithLogits',
'GDiceCELoss', 'GDiceBCELoss', 'SoftDiceCELoss',
'SoftDiceBCELoss', 'DeepSupervisedLoss',
'LossPicker']
def flatten(inp: Tensor, with_class: Optional[bool]=False) -> Tensor:
"""
:param inp: input tensor with shape (B, C, Spatial_shape)
:param with_class: flatten the tensor with C dim
:return: if with_class is True, return a tensor woth shape of
(B, C, prod(spatial_shape)), if with_class is False,
return a tensor with shape of (B, C * prod(spatial_shape))
"""
if with_class:
B = inp.size(0)
C = inp.size(1)
inp = inp.view(B, C, -1)
else:
B = inp.size(0)
inp = inp.view(B, -1)
return inp
def flatten_with_class(inp: Tensor) -> Tensor:
"""
:param inp: input tensor, the expected shape is (B, C, spatial_shape)
:return: a tentor with shape (C, B * prod(spatial_shape))
"""
inp = inp.permute(1, 0, *tuple(range(2, inp.ndim))).contiguous()
C = inp.size(0)
return inp.view(C, -1)
def iou_loss(pred: Tensor, gt: Tensor,
smooth: Optional[float]=0.01,
ignore_label: Optional[int]=None) -> Tensor:
"""
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: IOU, the shape is (B,)
"""
assert pred.shape == gt.shape
if ignore_label is not None:
pred = torch.stack([v for i, v in enumerate(torch.unbind(pred, dim=1))
if i != ignore_label])
gt = torch.stack([v for i, v in enumerate(torch.unbind(gt, dim=1))
if i != ignore_label])
pred = flatten(pred)
gt = flatten(gt)
tp = (pred * gt).sum(-1)
fp = (pred * (1 - gt)).sum(-1)
fn = ((1 - pred) * gt).sum(-1)
iou = (tp + smooth) / (tp + fp + fn + EPS + smooth)
return 1.0 - iou
def generalized_dice_loss(pred: Tensor, gt: Tensor,
smooth: Optional[float]=0.01,
with_weight: Optional[bool]=True,
ignore_label: Optional[int]=None) -> Tensor:
"""
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: GDice, the shape is (B,)
"""
assert pred.shape == gt.shape
if ignore_label is not None:
pred = torch.stack([v for i, v in enumerate(torch.unbind(pred, dim=1))
if i != ignore_label])
gt = torch.stack([v for i, v in enumerate(torch.unbind(gt, dim=1))
if i != ignore_label])
pred = flatten(pred, with_class=True)
gt = flatten(gt, with_class=True)
if with_weight:
gt_class_flatten = flatten_with_class(gt).sum(-1)
class_weight = 1.0 / (gt_class_flatten * gt_class_flatten + EPS)
intersect = (pred * gt).sum(-1) * class_weight.unsqueeze(0)
intersect = intersect.sum(-1)
else:
intersect = (pred * gt).sum([-2, -1])
# the shape of intersect is (B,)
# the shape of pred and gt is (B, C, prod(spatial_shape))
denominator = pred.sum([-2, -1]) + gt.sum([-2, -1])
assert intersect.shape == denominator.shape, \
f'{intersect.shape} != {denominator.shape}'
return 1.0 - (intersect + smooth) / (denominator + EPS + smooth)
def soft_dice_loss(pred: Tensor, gt: Tensor,
ignore_label: Optional[int]=None) -> Tensor:
"""
soft dice = 2 * IOU / (1 + IOU)
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: dice loss, the shape is (B,)
"""
iou = iou_loss(pred, gt, ignore_label=ignore_label)
return 2.0 * iou / (1.0 + iou)
_loss_dict_ = {
'IOULoss': IOULoss,
'GDiceLoss': GDiceLoss,
'SoftDiceLoss': SoftDiceBCELoss,
'CrossEntropyLoss': CrossEntropyLoss,
'BCELossWithLogits': BCELossWithLogits,
'GDiceCELoss': GDiceCELoss,
'GDiceBCELoss': GDiceBCELoss,
'SoftDiceCELoss': SoftDiceCELoss,
'SoftDiceBCELoss': SoftDiceBCELoss
}
| 36.002469 | 78 | 0.56992 |
3d06f699f338062bc96644c815234c6952e6bcf8 | 1,136 | py | Python | libary/yml_wrapper.py | NekoFanatic/kaiji | 7ae8e12d4e821e7d28d78034e1ec044ed75f9536 | [
"MIT"
] | null | null | null | libary/yml_wrapper.py | NekoFanatic/kaiji | 7ae8e12d4e821e7d28d78034e1ec044ed75f9536 | [
"MIT"
] | null | null | null | libary/yml_wrapper.py | NekoFanatic/kaiji | 7ae8e12d4e821e7d28d78034e1ec044ed75f9536 | [
"MIT"
] | null | null | null | from typing import Union
import yaml
| 23.183673 | 52 | 0.49912 |
3d0821d054373cb00fdbdf718c2ebff667597c8c | 14,688 | py | Python | smoothfdr/easy.py | tansey/smoothfdr | c5b693d0a66e83c9387433b33c0eab481bd4a763 | [
"MIT"
] | 6 | 2016-02-26T23:08:57.000Z | 2018-09-13T16:14:47.000Z | smoothfdr/easy.py | tansey/smoothfdr | c5b693d0a66e83c9387433b33c0eab481bd4a763 | [
"MIT"
] | 2 | 2015-09-23T16:59:37.000Z | 2017-09-29T13:19:44.000Z | smoothfdr/easy.py | tansey/smoothfdr | c5b693d0a66e83c9387433b33c0eab481bd4a763 | [
"MIT"
] | 3 | 2017-07-04T12:25:32.000Z | 2021-04-16T00:10:33.000Z | # import itertools
# from functools import partial
# from scipy.stats import norm
# from scipy.sparse import csc_matrix, linalg as sla
# from scipy import sparse
# from scipy.optimize import minimize, minimize_scalar
# from collections import deque, namedtuple
import numpy as np
from networkx import Graph
from pygfl.solver import TrailSolver
from pygfl.trails import decompose_graph, save_chains
from pygfl.utils import chains_to_trails, calc_plateaus, hypercube_edges
from smoothfdr.smoothed_fdr import GaussianKnown
from smoothfdr.normix import *
from smoothfdr.utils import calc_fdr
def smooth_fdr_known_dists(data, fdr_level, null_dist, signal_dist, edges=None, initial_values=None, verbose=0, missing_val=None):
'''FDR smoothing where the null and alternative distributions are known
(and not necessarily Gaussian). Both must define the function pdf.'''
flat_data = data.flatten()
nonmissing_flat_data = flat_data
if edges is None:
if verbose:
print('Using default edge set of a grid of same shape as the data: {0}'.format(data.shape))
edges = hypercube_edges(data.shape)
if missing_val is not None:
if verbose:
print('Removing all data points whose data value is {0}'.format(missing_val))
edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val]
nonmissing_flat_data = flat_data[flat_data != missing_val]
# Decompose the graph into trails
g = Graph()
g.add_edges_from(edges)
chains = decompose_graph(g, heuristic='greedy')
ntrails, trails, breakpoints, edges = chains_to_trails(chains)
if verbose:
print('Smoothing priors via solution path algorithm')
solver = TrailSolver()
solver.set_data(flat_data, edges, ntrails, trails, breakpoints)
results = solution_path_smooth_fdr(flat_data, solver, null_dist, signal_dist, verbose=max(0, verbose-1))
results['discoveries'] = calc_fdr(results['posteriors'], fdr_level)
results['null_dist'] = null_dist
results['signal_dist'] = signal_dist
# Reshape everything back to the original data shape
results['betas'] = results['betas'].reshape(data.shape)
results['priors'] = results['priors'].reshape(data.shape)
results['posteriors'] = results['posteriors'].reshape(data.shape)
results['discoveries'] = results['discoveries'].reshape(data.shape)
results['beta_iters'] = np.array([x.reshape(data.shape) for x in results['beta_iters']])
results['prior_iters'] = np.array([x.reshape(data.shape) for x in results['prior_iters']])
results['posterior_iters'] = np.array([x.reshape(data.shape) for x in results['posterior_iters']])
return results
def solution_path_smooth_fdr(data, solver, null_dist, signal_dist, min_lambda=0.20, max_lambda=1.5, lambda_bins=30, verbose=0, initial_values=None):
'''Follows the solution path of the generalized lasso to find the best lambda value.'''
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
u_trace = []
w_trace = []
c_trace = []
results_trace = []
best_idx = None
best_plateaus = None
for i, _lambda in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, _lambda))
# Fit to the final values
results = fixed_penalty_smooth_fdr(data, solver, _lambda, null_dist, signal_dist,
verbose=max(0,verbose - 1),
initial_values=initial_values)
if verbose:
print('Calculating degrees of freedom')
plateaus = calc_plateaus(results['beta'], solver.edges)
dof_trace[i] = len(plateaus)
if verbose:
print('Calculating AIC')
# Get the negative log-likelihood
log_likelihood_trace[i] = -_data_negative_log_likelihood(data, results['c'], null_dist, signal_dist)
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (data.shape[0] - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(data)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the final run parameters to use for warm-starting the next iteration
initial_values = results
# Save the trace of all the resulting parameters
beta_trace.append(results['beta'])
w_trace.append(results['w'])
c_trace.append(results['c'])
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta_iters': np.array(beta_trace),
'posterior_iters': np.array(w_trace),
'prior_iters': np.array(c_trace),
'lambda_iters': lambda_grid,
'best': best_idx,
'betas': beta_trace[best_idx],
'priors': c_trace[best_idx],
'posteriors': w_trace[best_idx],
'lambda': lambda_grid[best_idx],
'plateaus': best_plateaus}
def _data_negative_log_likelihood(data, prior_prob, null_dist, signal_dist):
'''Calculate the negative log-likelihood of the data given the weights.'''
signal_weight = prior_prob * signal_dist.pdf(data)
null_weight = (1-prior_prob) * null_dist.pdf(data)
return -np.log(signal_weight + null_weight).sum()
def _e_step(data, prior_prob, null_dist, signal_dist):
'''Calculate the complete-data sufficient statistics (weights vector).'''
signal_weight = prior_prob * signal_dist.pdf(data)
null_weight = (1-prior_prob) * null_dist.pdf(data)
post_prob = signal_weight / (signal_weight + null_weight)
return post_prob
def _m_step(beta, prior_prob, post_prob, _lambda,
solver, converge, max_steps,
verbose, initial_values):
'''
Alternating Second-order Taylor-series expansion about the current iterate
'''
prev_nll = _m_log_likelihood(post_prob, beta)
delta = converge + 1
cur_step = 0
while delta > converge and cur_step < max_steps:
if verbose:
print('\t\tM-Step iteration #{0}'.format(cur_step))
print('\t\tTaylor approximation...')
# Cache the exponentiated beta
exp_beta = np.exp(beta)
# Form the parameters for our weighted least squares
weights = (prior_prob * (1 - prior_prob))
y = beta - (prior_prob - post_prob) / weights
solver.set_values_only(y, weights=weights)
if initial_values is None:
initial_values = {'beta': solver.beta, 'z': solver.z, 'u': solver.u}
else:
solver.beta = initial_values['beta']
solver.z = initial_values['z']
solver.u = initial_values['u']
solver.solve(_lambda)
# if np.abs(beta).max() > 20:
# beta = np.clip(beta, -20, 20)
# u = None
beta = initial_values['beta']
# Get the current log-likelihood
cur_nll = _m_log_likelihood(post_prob, beta)
# Track the convergence
delta = np.abs(prev_nll - cur_nll) / (prev_nll + converge)
if verbose:
print('\t\tM-step delta: {0}'.format(delta))
# Increment the step counter
cur_step += 1
# Update the negative log-likelihood tracker
prev_nll = cur_nll
return beta, initial_values
def _m_log_likelihood(post_prob, beta):
'''Calculate the log-likelihood of the betas given the weights and data.'''
return (np.log(1 + np.exp(beta)) - post_prob * beta).sum()
| 41.027933 | 210 | 0.633715 |
3d09d053089d2dfd866a874b9112340c6aa15645 | 438 | py | Python | code/2cams.py | ctm1098/umucv | ea6cce5d9cfece1e372e05eb9223ef6ddc17b438 | [
"BSD-3-Clause"
] | 12 | 2018-02-15T17:54:57.000Z | 2022-02-25T12:00:49.000Z | code/2cams.py | ctm1098/umucv | ea6cce5d9cfece1e372e05eb9223ef6ddc17b438 | [
"BSD-3-Clause"
] | 8 | 2019-03-06T18:53:43.000Z | 2022-03-18T10:04:40.000Z | code/2cams.py | ctm1098/umucv | ea6cce5d9cfece1e372e05eb9223ef6ddc17b438 | [
"BSD-3-Clause"
] | 22 | 2018-02-06T14:40:03.000Z | 2022-03-17T11:38:48.000Z | #!/usr/bin/env python
import numpy as np
import cv2 as cv
cap1 = cv.VideoCapture(0)
cap2 = cv.VideoCapture(1)
while(cv.waitKey(1) & 0xFF != 27):
ret, frame1 = cap1.read()
ret, frame2 = cap2.read()
cv.imshow('c1',frame1)
cv.imshow('c2',frame2)
gray1 = cv.cvtColor(frame1, cv.COLOR_RGB2GRAY)
gray2 = cv.cvtColor(frame2, cv.COLOR_RGB2GRAY)
cv.imshow('frame', gray1//2 + gray2//2)
cv.destroyAllWindows()
| 19.043478 | 50 | 0.652968 |
3d0a653d9351f079b350d765b5ef2da6e1ece3a5 | 1,109 | py | Python | Summary pie chart/Sum_Indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | Summary pie chart/Sum_Indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | Summary pie chart/Sum_Indonesia.py | pdeesawat/PSIT58_test_01 | 631946eacd82503e0697680f06290a4fe10f17f2 | [
"Apache-2.0"
] | null | null | null | import plotly.plotly as py
"""Get data from csv and split it"""
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for ix in alldata:
listdata.append(ix.strip().split(','))
"""Seperate data in each type of disaster."""
all_disaster = {'Drought':0, 'Flood':0, 'Storm':0, 'Epidemic':0, 'Earthquake':0}
for iy in listdata:
if iy[0] == 'Indonesia' and iy[2] in all_disaster:
all_disaster[iy[2]] += 1
"""Calculate each type for make an average."""
total = sum(all_disaster.values())
average = []
for iz in all_disaster:
all_disaster[iz] = float("%.2f" % ((all_disaster[iz]/total)*100))
label = [i for i in all_disaster]
value = [all_disaster[j] for j in label]
"""Apprerance"""
make_circle = {"data": [{"values":value,"labels":label,
"name": "Average", "hoverinfo":"label+percent+name", "hole": 0.39, "type": "pie"}],
"layout": {"title":"Indonesia's Average Disaster from 2000 to 2014", "annotations": [{"font": {"size": 20},
"showarrow": False, "text": ""}]}}
url = py.plot(make_circle, filename='Indonesia\'s Average Disaster from 200o to 2014')
| 35.774194 | 107 | 0.658251 |
3d0b11a3dec857ffd6e51932557d206c66901849 | 2,515 | py | Python | python/draw_dog.py | event-driven-robotics/study-air-hockey | e933bcf85d77762dae7d468f314c7db6e71fba81 | [
"BSD-3-Clause"
] | null | null | null | python/draw_dog.py | event-driven-robotics/study-air-hockey | e933bcf85d77762dae7d468f314c7db6e71fba81 | [
"BSD-3-Clause"
] | null | null | null | python/draw_dog.py | event-driven-robotics/study-air-hockey | e933bcf85d77762dae7d468f314c7db6e71fba81 | [
"BSD-3-Clause"
] | 1 | 2021-07-29T15:09:37.000Z | 2021-07-29T15:09:37.000Z | import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from scipy import signal
import matplotlib.image as mpimg
# matplotlib.use('Agg')
# define normalized 2D gaussian
ellipse = Ellipse(xy=(0,0), width=3.6, height=1.8, edgecolor='r', lw=2, facecolor='none')
x = np.linspace(0, 10, 101)
y = np.linspace(0, 10, 101)
x1, y1 = np.meshgrid(x, y) # get 2D variables instead of 1D
z1 = gaus2d(x1, y1, 5, 5, 2.7, 1.35)
z1_copy = z1.copy()
z1 = z1/z1.max()
x2, y2 = np.meshgrid(x, y) # get 2D variables instead of 1D
z2 = gaus2d(x2, y2, 5, 5, 0.9, 0.45)
z2_copy = z2.copy()
z2 = z2/z2.max()
dog_not_norm = z1 - z2
dog = (z1 - z2)/np.max(z1-z2)
dog[dog<0] = 0
# path
# path1 = 'image_puck.png'
# img1 = mpimg.imread(path1)
# gray1 = rgb2gray(img1)
# img1 = (np.array(gray1))[0:84, 0:84]
# path2 = 'circle.png'
# img2 = mpimg.imread(path2)
# gray2 = rgb2gray(img2)
# img2 = (np.array(gray1))[0:84, 0:84]
# img_conv = signal.convolve2d(img1, z1)
# # img_product = img1 * img2
#
# # Displaying the image
# fig1 = plt.figure()
#
# plt.imshow(img_conv)
# plt.show()
# fig2 = plt.figure()
# plt.imshow(img)
# plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(3,2,5)
ax1.add_artist(ellipse)
im = ax1.imshow(dog, cmap="viridis", extent=(-5, 5, -5, 5))
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.title.set_text('dog 2D')
cbar = fig.colorbar(im, ax=ax1)
ax2 = fig.add_subplot(3,2,6,projection='3d')
ax2.contour3D(x, y, dog, 100, cmap=cm.viridis)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_zlabel('z')
ax2.title.set_text('dog 3D')
ax3 = fig.add_subplot(3,2,1)
im1 = ax3.imshow(z1, cmap="viridis", extent=(-5, 5, -5, 5))
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.title.set_text('g1 2D')
ax4 = fig.add_subplot(3,2,2,projection='3d')
ax4.contour3D(x, y, z1, 50, cmap=cm.viridis)
ax4.set_xlabel('x')
ax4.set_ylabel('y')
ax4.set_zlabel('z')
ax4.title.set_text('g1 3D')
ax5 = fig.add_subplot(3,2,3)
im2 = ax5.imshow(z2, cmap="viridis", extent=(-5, 5, -5, 5))
ax5.set_xlabel('x')
ax5.set_ylabel('y')
ax5.title.set_text('g2 2D')
ax6 = fig.add_subplot(3,2,4,projection='3d')
ax6.contour3D(x, y, z2, 50, cmap=cm.viridis)
ax6.set_xlabel('x')
ax6.set_ylabel('y')
ax6.set_zlabel('z')
ax6.title.set_text('g2 3D')
plt.show()
| 25.927835 | 111 | 0.652485 |
3d0b63ff899d9630d5763b8599ddc075bb3c108f | 620 | py | Python | PycharmProjects/PythonValidacao/consome_api.py | FeFSRibeiro/learning-python | 4f642aa7e1c6523f5209f83ece7e974bfb3ef24e | [
"Apache-2.0"
] | null | null | null | PycharmProjects/PythonValidacao/consome_api.py | FeFSRibeiro/learning-python | 4f642aa7e1c6523f5209f83ece7e974bfb3ef24e | [
"Apache-2.0"
] | null | null | null | PycharmProjects/PythonValidacao/consome_api.py | FeFSRibeiro/learning-python | 4f642aa7e1c6523f5209f83ece7e974bfb3ef24e | [
"Apache-2.0"
] | null | null | null | import requests
| 22.142857 | 66 | 0.540323 |
3d0b84039e886dcbf5a0882295390d0af7dd865b | 3,928 | py | Python | tools/convert_lightning2venot.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | 18 | 2019-07-22T01:35:24.000Z | 2022-02-10T11:25:42.000Z | tools/convert_lightning2venot.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | null | null | null | tools/convert_lightning2venot.py | ucl-exoplanets/TauREx_public | 28d47f829a2873cf15e3bfb0419b8bc4e5bc03dd | [
"CC-BY-4.0"
] | 1 | 2017-10-19T15:14:06.000Z | 2017-10-19T15:14:06.000Z | #! /usr/bin/python
#small script that shits out the venot file format equivalent for the lightening project
import numpy as np
import pylab as pl
import pyfits as pf
import glob, os, sys
AMU = 1.660538921e-27
KBOLTZ = 1.380648813e-23
G = 6.67384e-11
RSOL = 6.955e8
RJUP = 6.9911e7
#RJUP = 7.1492e7 # Jo's radius
MJUP = 1.898e27
AU = 1.49e11
DIR = '/Users/ingowaldmann/Dropbox/UCLlocal/REPOS/taurex/Input/lightening'
# FILENAME = 'Earth-Today-Lightning-Full.dat'
FILENAME = 'Modern-Earth-noLightning-Full.dat'
#EARTH
planet_mass = 5.97237e24 #kg
planet_radius = 6.371e6 #m
planet_mu = 28.97 * AMU#kg
data = np.loadtxt(os.path.join(DIR,FILENAME),skiprows=1)
[nlayers,ncols] = np.shape(data)
fheader = open(os.path.join(DIR,FILENAME),'r')
header = fheader.readlines()
c=0
for line in header:
head = line
break
fheader.close()
#rebuilding header line
newhead = 'alt(km) '+head[:2]+'m'+head[2:]
newhead_small = head[62:]
print head.split()
molnames = ['C_1D','H','N','O','O_1D','O_1S', 'CO', 'H2','HO','N2', 'NO', 'O2', 'O2_D', 'O3', 'CH4',
'CO2', 'H2O', 'HO2', 'N2O', 'NO2', 'H2O2', 'HNO3', 'CH2O2', 'HCOOH', 'CH3ONO', 'e-',
'H+', 'O+', 'NO+','O2+', 'C','HN','CNC','H2N','H3N','C+','C-','N+','O-','CO+','HO+','N2+','CHO+',
'CH3','CHO','HCN','HNO','NO3','C2H2','C2H6','CH2O','HNO2','N2O3','CH3O2','CH3OH','CH4O2','H3O+']
molweights = [14,1,14,16,18,18,28,2,17,28,30,32,34,48,16,44,18,33,44,46,34,63,46,46,61,0,1,16,30,32,12,15,38,16,17,12,12,14,16,28,17,28,29,
15,29,27,31,62,26,28,30,48,76,47,32,52,19]
badwords = ['p(bar)' ,'T(K)' , 'NH(cm-3)' , 'Kzz(cm2s-1)' , 'Hz(cm)', 'zeta(s-1)']
mollist = []
for mol in head.split():
if mol in molnames:
mollist.append(molweights[molnames.index(mol)])
elif mol not in badwords:
mollist.append(2.3)
print 'FILLED: ',mol
else:
print 'OUT: ',mol
# mollist.append(2.3)
moleweigthstr = ' '.join(str(e) for e in mollist)
#create ranking of most important molecules according to abundance
molabundance =[]
mnamelist =[]
c=0
for mol in head.split():
mnamelist.append(mol)
molabundance.append(np.max(data[:,c]))
c+=1
mnamelist = np.asarray(mnamelist[6:])
molabundance = np.asarray(molabundance[6:])
midx = np.argsort(molabundance)
print midx[::-1]
print mnamelist[midx][::-1]
print molabundance[midx][::-1]
pressure_profile_levels = data[:,0] * 1000.0 #converting bar to mbar
temperature_profile = data[:,1]
H = np.zeros(nlayers)
g = np.zeros(nlayers)
z = np.zeros((nlayers,1))
g[0] = (G * planet_mass) / (planet_radius**2) # surface gravity (0th layer)
H[0] = (KBOLTZ*temperature_profile[0])/(planet_mu*g[0]) # scaleheight at the surface (0th layer)
for i in xrange(1, nlayers):
deltaz = (-1.)*H[i-1]*np.log(pressure_profile_levels[i]/pressure_profile_levels[i-1])
z[i] = z[i-1] + deltaz # altitude at the i-th layer
with np.errstate(over='ignore'):
g[i] = (G * planet_mass) / ((planet_radius + z[i])**2) # gravity at the i-th layer
with np.errstate(divide='ignore'):
H[i] = (KBOLTZ*temperature_profile[i])/(planet_mu*g[i])
z /=1e3 #converting m to km
OUT = np.hstack((z,data))
OUT2 = OUT[:,:3]
[s1,s2] = np.shape(data[:,6:])
OUT3 = np.zeros((s1,s2+2))
OUT3[:,0] = z[:,0]
OUT3[:,1] = data[:,0]
OUT3[:,2:] = data[:,6:]
with open(FILENAME[:-4]+'_conv.dat','wb') as outfile:
outfile.write(newhead)
outfile.write(moleweigthstr+'\n')
np.savetxt(outfile, OUT)
with open(FILENAME[:-4]+'_mixing.dat','wb') as outfile:
outfile.write(newhead_small)
outfile.write(moleweigthstr+'\n')
np.savetxt(outfile, OUT3)
np.savetxt(FILENAME[:-4]+'_tp.dat',OUT2)
pl.figure(1)
pl.plot(np.log(molabundance[midx][::-1]),linewidth=3.0)
pl.gca().xaxis.set_ticks(np.arange(0, len(molabundance), 1.0))
pl.gca().set_xticklabels(mnamelist[midx][::-1])
pl.ylabel('log(mixing ratio)')
pl.show() | 26.90411 | 139 | 0.630855 |
3d0d5a4bdcb6949d58811e00ce041b7deeb69354 | 288 | py | Python | setup.py | NWeis97/ML_Ops_Project | cc4c65fec679b08675e76a24ad7e44de1b5df29a | [
"MIT"
] | null | null | null | setup.py | NWeis97/ML_Ops_Project | cc4c65fec679b08675e76a24ad7e44de1b5df29a | [
"MIT"
] | null | null | null | setup.py | NWeis97/ML_Ops_Project | cc4c65fec679b08675e76a24ad7e44de1b5df29a | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name="src",
packages=find_packages(),
version="0.1.0",
description="This project contains the final exercise of S1, "
+ "in which we will continue to build upon",
author="Nicolai Weisbjerg",
license="MIT",
)
| 24 | 66 | 0.677083 |
3d0f1eb7e22b4173d6bb7ae45401f9b8d7518586 | 2,534 | py | Python | webapi.py | rhalkyard/gmdrec | d81f0c714d302f655660f15d1e62d2c3fbe40e33 | [
"BSD-3-Clause"
] | null | null | null | webapi.py | rhalkyard/gmdrec | d81f0c714d302f655660f15d1e62d2c3fbe40e33 | [
"BSD-3-Clause"
] | null | null | null | webapi.py | rhalkyard/gmdrec | d81f0c714d302f655660f15d1e62d2c3fbe40e33 | [
"BSD-3-Clause"
] | null | null | null | # Talking to the music player and sanitizing data.
import datetime
import requests
from requests.exceptions import Timeout
from unihandecode import Unihandecoder
from settings import server_url
| 36.724638 | 103 | 0.670876 |
3d10fbe580e5ebf53db4ece3b62cd88392386b54 | 7,239 | py | Python | puresnmp/aio/api/pythonic.py | badboybeyer/puresnmp | 2f2757e0d064f1017f86e0e07661ac8e3c9f2eca | [
"MIT"
] | null | null | null | puresnmp/aio/api/pythonic.py | badboybeyer/puresnmp | 2f2757e0d064f1017f86e0e07661ac8e3c9f2eca | [
"MIT"
] | null | null | null | puresnmp/aio/api/pythonic.py | badboybeyer/puresnmp | 2f2757e0d064f1017f86e0e07661ac8e3c9f2eca | [
"MIT"
] | null | null | null | """
This module contains the high-level functions to access the library with
asyncio. Care is taken to make this as pythonic as possible and hide as many
of the gory implementations as possible.
This module provides "syntactic sugar" around the lower-level, but almost
identical, module :py:mod:`puresnmp.aio.api.raw`. The "raw" module
returns the variable types unmodified which are all subclasses of
:py:class:`puresnmp.x690.types.Type`.
"""
# TODO (advanced): This module should not make use of it's own functions. The
# is beginning to be too "thick", containing too much business logic for a
# mere abstraction layer.
# module exists as an abstraction layer only. If one function uses a
# "siblng" function, valuable information is lost. In general, this module
from __future__ import unicode_literals
import logging
from collections import OrderedDict
from datetime import datetime, timedelta
from typing import TYPE_CHECKING
from . import raw
from ...pdu import VarBind
from ...util import BulkResult
from ...x690.types import Type
from ...x690.util import tablify
if TYPE_CHECKING: # pragma: no cover
# pylint: disable=unused-import, invalid-name
from typing import Any, Callable, Dict, Generator, List, Tuple, Union
Pythonized = Union[str, bytes, int, datetime, timedelta]
try:
unicode # type: Callable[[Any], str]
except NameError:
# pylint: disable=invalid-name
unicode = str # type: Callable[[Any], str]
_set = set
LOG = logging.getLogger(__name__)
| 35.485294 | 135 | 0.668739 |
3d11c0e83e935667e3b5fa635f505dab77f68c4f | 471 | py | Python | dataset_models/normalization/aroundZeroNormalizer.py | Zvezdin/blockchain-predictor | df6f939037471dd50b7b9c96673d89b04b646ef2 | [
"MIT"
] | 35 | 2017-10-25T17:10:35.000Z | 2022-03-20T18:12:06.000Z | dataset_models/normalization/aroundZeroNormalizer.py | Zvezdin/blockchain-predictor | df6f939037471dd50b7b9c96673d89b04b646ef2 | [
"MIT"
] | 2 | 2017-09-20T17:39:15.000Z | 2018-04-01T17:20:29.000Z | dataset_models/normalization/aroundZeroNormalizer.py | Zvezdin/blockchain-predictor | df6f939037471dd50b7b9c96673d89b04b646ef2 | [
"MIT"
] | 10 | 2017-12-01T13:47:04.000Z | 2021-12-16T06:53:17.000Z | import numpy as np
from .normalizer import Normalizer
| 23.55 | 68 | 0.694268 |
3d12eb10a495e1684e9f8cb66cfdd0b53f9884df | 1,160 | py | Python | viz/scripts/closest_over_time_with_shading.py | zhicongchen/histwords | 12fb83492fdccca795d266966a8b58c13f81c54c | [
"Apache-2.0"
] | 2 | 2022-01-05T10:32:56.000Z | 2022-02-14T16:45:59.000Z | viz/scripts/closest_over_time_with_shading.py | zhicongchen/histwords | 12fb83492fdccca795d266966a8b58c13f81c54c | [
"Apache-2.0"
] | null | null | null | viz/scripts/closest_over_time_with_shading.py | zhicongchen/histwords | 12fb83492fdccca795d266966a8b58c13f81c54c | [
"Apache-2.0"
] | null | null | null | import helpers
import sys
from representations.sequentialembedding import SequentialEmbedding
"""
Let's examine the closest neighbors for a word over time
"""
import collections
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
WORDS = helpers.get_words()
if __name__ == "__main__":
embeddings = helpers.load_embeddings()
for word1 in WORDS:
time_sims, lookups, nearests, sims = helpers.get_time_sims(embeddings, word1)
helpers.clear_figure()
# we remove word1 from our words because we just want to plot the different
# related words
words = filter(lambda word: word.split("|")[0] != word1, lookups.keys())
words = list(words)
values = [lookups[word] for word in words]
fitted = helpers.fit_tsne(values)
if not len(fitted):
print("Couldn't model word", word1)
continue
cmap = helpers.get_cmap(len(time_sims))
annotations = helpers.plot_words(word1, words, fitted, cmap, sims)
helpers.savefig("%s_shaded" % word1)
for year, sim in time_sims.items():
print(year, sim)
| 28.292683 | 85 | 0.667241 |
3d1335d9fc99401818ca88efe979fffcb933a101 | 10,108 | py | Python | Products/LDAPUserFolder/interfaces.py | phgv/Products.LDAPUserFolder | eb9db778916f47a80b3df069a31d0a2100b26423 | [
"ZPL-2.1"
] | null | null | null | Products/LDAPUserFolder/interfaces.py | phgv/Products.LDAPUserFolder | eb9db778916f47a80b3df069a31d0a2100b26423 | [
"ZPL-2.1"
] | null | null | null | Products/LDAPUserFolder/interfaces.py | phgv/Products.LDAPUserFolder | eb9db778916f47a80b3df069a31d0a2100b26423 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2000-2009 Jens Vagelpohl and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Interfaces for LDAPUserFolder package classes
"""
from AccessControl.interfaces import IStandardUserFolder
from AccessControl.interfaces import IUser
| 38.580153 | 79 | 0.662347 |
3d162a8de2cf611aacdd649aadbeb0516127e28a | 461 | py | Python | arequests/exceptions.py | fhag/telegram2 | 65a685637b444e40ef47a17c2a3b83c2ddb81459 | [
"BSD-2-Clause"
] | null | null | null | arequests/exceptions.py | fhag/telegram2 | 65a685637b444e40ef47a17c2a3b83c2ddb81459 | [
"BSD-2-Clause"
] | null | null | null | arequests/exceptions.py | fhag/telegram2 | 65a685637b444e40ef47a17c2a3b83c2ddb81459 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Exceptions for Arequests
Created on Tue Nov 13 08:34:14 2018
@author: gfi
"""
| 19.208333 | 56 | 0.694143 |
3d178f904fde1c17f64bf8f943648ae02b442d5e | 5,014 | py | Python | NASA/Python_codes/drivers/02_remove_outliers_n_jumps/01_intersect_remove_jumps_JFD/01_remove_jumps_JFD_intersect.py | HNoorazar/Kirti | fb7108dac1190774bd90a527aaa8a3cb405f127d | [
"MIT"
] | null | null | null | NASA/Python_codes/drivers/02_remove_outliers_n_jumps/01_intersect_remove_jumps_JFD/01_remove_jumps_JFD_intersect.py | HNoorazar/Kirti | fb7108dac1190774bd90a527aaa8a3cb405f127d | [
"MIT"
] | null | null | null | NASA/Python_codes/drivers/02_remove_outliers_n_jumps/01_intersect_remove_jumps_JFD/01_remove_jumps_JFD_intersect.py | HNoorazar/Kirti | fb7108dac1190774bd90a527aaa8a3cb405f127d | [
"MIT"
] | null | null | null | ####
#### Nov 16, 2021
####
"""
Regularize the EVI and NDVI of fields in individual years for training set creation.
"""
import csv
import numpy as np
import pandas as pd
from math import factorial
import scipy
import scipy.signal
import os, os.path
from datetime import date
import datetime
import time
import sys
start_time = time.time()
# search path for modules
# look @ https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/NASA/')
import NASA_core as nc
import NASA_plot_core as ncp
####################################################################################
###
### Parameters
###
####################################################################################
indeks = sys.argv[1]
batch_number = int(sys.argv[2])
print ("Terminal Arguments are: ")
print (indeks)
print (batch_number)
print ("__________________________________________")
if indeks == "NDVI":
NoVI = "EVI"
else:
NoVI = "NDVI"
IDcolName = "ID"
####################################################################################
###
### Aeolus Directories
###
####################################################################################
data_base = "/data/hydro/users/Hossein/NASA/"
data_dir = data_base + "/02_outliers_removed/"
SF_data_dir = "/data/hydro/users/Hossein/NASA/000_shapefile_data_part/"
output_dir = data_base + "/03_jumps_removed/"
os.makedirs(output_dir, exist_ok=True)
print ("data_dir is: " + data_dir)
print ("output_dir is: " + output_dir)
########################################################################################
###
### process data
###
########################################################################################
SF_data_IDs = pd.read_csv(SF_data_dir + "10_intersect_East_Irr_2008_2018_2cols_data_part.csv")
SF_data_IDs.sort_values(by=['ID'], inplace=True)
SF_data_IDs.reset_index(drop=True, inplace=True)
# there are
batch_size = int(np.ceil(69271/40))
batch_IDs = SF_data_IDs.loc[(batch_number-1)*batch_size : (batch_number*batch_size-1)]
out_name = output_dir + "NoJump_intersect_" + indeks + "_JFD.csv"
common_part = "T1C2L2_inters_2008_2018_EastIrr_2008-01-01_2022-01-01"
f_names = ["noOutlier_" + "L5_" + common_part + "_" + indeks + ".csv",
"noOutlier_" + "L7_" + common_part + "_" + indeks + ".csv",
"noOutlier_" + "L8_" + common_part + "_" + indeks + ".csv"]
L5 = pd.read_csv(data_dir + f_names[0], low_memory=False)
L7 = pd.read_csv(data_dir + f_names[1], low_memory=False)
L8 = pd.read_csv(data_dir + f_names[2], low_memory=False)
L5.drop([NoVI], axis=1, inplace=True)
L5 = L5[L5[indeks].notna()]
L7.drop([NoVI], axis=1, inplace=True)
L7 = L7[L7[indeks].notna()]
L8.drop([NoVI], axis=1, inplace=True)
L8 = L8[L8[indeks].notna()]
L578 = pd.concat([L5, L7, L8])
del(L5, L7, L8)
L578['human_system_start_time'] = pd.to_datetime(L578['human_system_start_time'])
L578["ID"] = L578["ID"].astype(str)
L578 = L578[L578.ID.isin(list(batch_IDs.ID))].copy()
########################################################################################
###
### List of unique polygons
###
IDs = L578[IDcolName].unique()
print(len(IDs))
########################################################################################
###
### initialize output data.
###
output_df = pd.DataFrame(data = None,
index = np.arange(L578.shape[0]),
columns = L578.columns)
counter = 0
row_pointer = 0
for a_poly in IDs:
if (counter % 1000 == 0):
print (counter)
curr_field = L578[L578[IDcolName]==a_poly].copy()
################################################################
# Sort by DoY (sanitary check)
curr_field.sort_values(by=['human_system_start_time'], inplace=True)
curr_field.reset_index(drop=True, inplace=True)
################################################################
no_Outlier_TS = nc.correct_big_jumps_1DaySeries_JFD(dataTMS_jumpie = curr_field,
give_col = indeks,
maxjump_perDay = 0.018)
output_df[row_pointer: row_pointer + curr_field.shape[0]] = no_Outlier_TS.values
counter += 1
row_pointer += curr_field.shape[0]
####################################################################################
###
### Write the outputs
###
####################################################################################
output_df.drop_duplicates(inplace=True)
output_df.to_csv(out_name, index = False)
end_time = time.time()
print ("it took {:.0f} minutes to run this code.".format((end_time - start_time)/60))
| 31.936306 | 94 | 0.500798 |
3d17f39c53c3cfba5e53d1120441f6ea46dbc0cf | 7,227 | py | Python | pyquante2/ints/one.py | Konjkov/pyquante2 | 4ca0c8c078cafb769d20a4624b9bd907a748b1a2 | [
"BSD-3-Clause"
] | null | null | null | pyquante2/ints/one.py | Konjkov/pyquante2 | 4ca0c8c078cafb769d20a4624b9bd907a748b1a2 | [
"BSD-3-Clause"
] | null | null | null | pyquante2/ints/one.py | Konjkov/pyquante2 | 4ca0c8c078cafb769d20a4624b9bd907a748b1a2 | [
"BSD-3-Clause"
] | null | null | null | """
One electron integrals.
"""
from numpy import pi,exp,floor,array,isclose
from math import factorial
from pyquante2.utils import binomial, fact2, Fgamma, norm2
# Notes:
# The versions S,T,V include the normalization constants
# The version overlap,kinetic,nuclear_attraction do not.
# This is so, for example, the kinetic routines can call the potential routines
# without the normalization constants getting in the way.
def S(a,b):
"""
Simple interface to the overlap function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(S(s,s),1.0)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(S(sc,sc),1.0)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(S(sc,s),1.0)
True
>>> isclose(S(s,sc),1.0)
True
"""
if b.contracted:
return sum(cb*S(pb,a) for (cb,pb) in b)
elif a.contracted:
return sum(ca*S(b,pa) for (ca,pa) in a)
return a.norm*b.norm*overlap(a.exponent,a.powers,
a.origin,b.exponent,b.powers,b.origin)
def T(a,b):
"""
Simple interface to the kinetic function.
>>> from pyquante2 import pgbf,cgbf
>>> from pyquante2.basis.pgbf import pgbf
>>> s = pgbf(1)
>>> isclose(T(s,s),1.5)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(T(sc,sc),1.5)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(T(sc,s),1.5)
True
>>> isclose(T(s,sc),1.5)
True
"""
if b.contracted:
return sum(cb*T(pb,a) for (cb,pb) in b)
elif a.contracted:
return sum(ca*T(b,pa) for (ca,pa) in a)
return a.norm*b.norm*kinetic(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin)
def V(a,b,C):
"""
Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True
"""
if b.contracted:
return sum(cb*V(pb,a,C) for (cb,pb) in b)
elif a.contracted:
return sum(ca*V(b,pa,C) for (ca,pa) in a)
return a.norm*b.norm*nuclear_attraction(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin,C)
def overlap(alpha1,lmn1,A,alpha2,lmn2,B):
"""
Full form of the overlap integral. Taken from THO eq. 2.12
>>> isclose(overlap(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d')),1.968701)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
rab2 = norm2(A-B)
gamma = alpha1+alpha2
P = gaussian_product_center(alpha1,A,alpha2,B)
pre = pow(pi/gamma,1.5)*exp(-alpha1*alpha2*rab2/gamma)
wx = overlap1d(l1,l2,P[0]-A[0],P[0]-B[0],gamma)
wy = overlap1d(m1,m2,P[1]-A[1],P[1]-B[1],gamma)
wz = overlap1d(n1,n2,P[2]-A[2],P[2]-B[2],gamma)
return pre*wx*wy*wz
def overlap1d(l1,l2,PAx,PBx,gamma):
"""
The one-dimensional component of the overlap integral. Taken from THO eq. 2.12
>>> isclose(overlap1d(0,0,0,0,1),1.0)
True
"""
total = 0
for i in range(1+int(floor(0.5*(l1+l2)))):
total += binomial_prefactor(2*i,l1,l2,PAx,PBx)* \
fact2(2*i-1)/pow(2*gamma,i)
return total
def gaussian_product_center(alpha1,A,alpha2,B):
"""
The center of the Gaussian resulting from the product of two Gaussians:
>>> gaussian_product_center(1,array((0,0,0),'d'),1,array((0,0,0),'d'))
array([ 0., 0., 0.])
"""
return (alpha1*A+alpha2*B)/(alpha1+alpha2)
def binomial_prefactor(s,ia,ib,xpa,xpb):
"""
The integral prefactor containing the binomial coefficients from Augspurger and Dykstra.
>>> binomial_prefactor(0,0,0,0,0)
1
"""
total= 0
for t in range(s+1):
if s-ia <= t <= ib:
total += binomial(ia,s-t)*binomial(ib,t)* \
pow(xpa,ia-s+t)*pow(xpb,ib-t)
return total
def kinetic(alpha1,lmn1,A,alpha2,lmn2,B):
"""
The full form of the kinetic energy integral
>>> isclose(kinetic(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d')),2.953052)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
term0 = alpha2*(2*(l2+m2+n2)+3)*\
overlap(alpha1,(l1,m1,n1),A,\
alpha2,(l2,m2,n2),B)
term1 = -2*pow(alpha2,2)*\
(overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2+2,m2,n2),B)
+ overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2+2,n2),B)
+ overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2,n2+2),B))
term2 = -0.5*(l2*(l2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2-2,m2,n2),B) +
m2*(m2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2-2,n2),B) +
n2*(n2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2,n2-2),B))
return term0+term1+term2
def nuclear_attraction(alpha1,lmn1,A,alpha2,lmn2,B,C):
"""
Full form of the nuclear attraction integral
>>> isclose(nuclear_attraction(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d'),array((0,0,0),'d')),-3.141593)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
gamma = alpha1+alpha2
P = gaussian_product_center(alpha1,A,alpha2,B)
rab2 = norm2(A-B)
rcp2 = norm2(C-P)
dPA = P-A
dPB = P-B
dPC = P-C
Ax = A_array(l1,l2,dPA[0],dPB[0],dPC[0],gamma)
Ay = A_array(m1,m2,dPA[1],dPB[1],dPC[1],gamma)
Az = A_array(n1,n2,dPA[2],dPB[2],dPC[2],gamma)
total = 0.
for I in range(l1+l2+1):
for J in range(m1+m2+1):
for K in range(n1+n2+1):
total += Ax[I]*Ay[J]*Az[K]*Fgamma(I+J+K,rcp2*gamma)
val= -2*pi/gamma*exp(-alpha1*alpha2*rab2/gamma)*total
return val
def A_term(i,r,u,l1,l2,PAx,PBx,CPx,gamma):
"""
THO eq. 2.18
>>> A_term(0,0,0,0,0,0,0,0,1)
1.0
>>> A_term(0,0,0,0,1,1,1,1,1)
1.0
>>> A_term(1,0,0,0,1,1,1,1,1)
-1.0
>>> A_term(0,0,0,1,1,1,1,1,1)
1.0
>>> A_term(1,0,0,1,1,1,1,1,1)
-2.0
>>> A_term(2,0,0,1,1,1,1,1,1)
1.0
>>> A_term(2,0,1,1,1,1,1,1,1)
-0.5
>>> A_term(2,1,0,1,1,1,1,1,1)
0.5
"""
return pow(-1,i)*binomial_prefactor(i,l1,l2,PAx,PBx)*\
pow(-1,u)*factorial(i)*pow(CPx,i-2*r-2*u)*\
pow(0.25/gamma,r+u)/factorial(r)/factorial(u)/factorial(i-2*r-2*u)
def A_array(l1,l2,PA,PB,CP,g):
"""
THO eq. 2.18 and 3.1
>>> A_array(0,0,0,0,0,1)
[1.0]
>>> A_array(0,1,1,1,1,1)
[1.0, -1.0]
>>> A_array(1,1,1,1,1,1)
[1.5, -2.5, 1.0]
"""
Imax = l1+l2+1
A = [0]*Imax
for i in range(Imax):
for r in range(int(floor(i/2)+1)):
for u in range(int(floor((i-2*r)/2)+1)):
I = i-2*r-u
A[I] = A[I] + A_term(i,r,u,l1,l2,PA,PB,CP,g)
return A
if __name__ == '__main__':
import doctest; doctest.testmod()
| 28.908 | 123 | 0.530372 |
3d189022514ffa92e24cccd1441a05b0577b4e2e | 2,169 | py | Python | tests/test_pyhive_runBCFTools_VC.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 3 | 2018-04-20T15:04:34.000Z | 2022-03-30T06:36:02.000Z | tests/test_pyhive_runBCFTools_VC.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 7 | 2019-06-06T09:22:20.000Z | 2021-11-23T17:41:52.000Z | tests/test_pyhive_runBCFTools_VC.py | elowy01/igsr_analysis | ffea4885227c2299f886a4f41e70b6e1f6bb43da | [
"Apache-2.0"
] | 5 | 2017-11-02T11:17:35.000Z | 2021-12-11T19:34:09.000Z | import os
import subprocess
import glob
import pytest
# test_pyhive_runBCFTools_VC.py
def test_runBCFTools_VC(bcftools_folder, hive_dir, datadir, clean_tmp):
"""
Test function to run BCFTools mpileup|call on a BAM file
"""
bam_file = "{0}/exampleBAM.bam".format(datadir)
reference = "{0}/exampleFASTA.fasta".format(datadir)
work_dir = "{0}/outdir/".format(datadir)
annots = "\"['DP','SP','AD']\""
command = "perl {0}/scripts/standaloneJob.pl PyHive.VariantCalling.BCFTools_caller -language python3 \
-outprefix {1} -work_dir {2} -chunk {3} -bam {4} -reference {5} \
-bcftools_folder {6} -annots {7} -verbose True".format(hive_dir, 'out', work_dir,
"\"['chr1','10000','30000']\"", bam_file,
reference, bcftools_folder, annots)
try:
subprocess.check_output(command, shell=True)
assert True
except subprocess.CalledProcessError as exc:
assert False
raise Exception(exc.output)
def test_runBCFTools_VC_woptions(bcftools_folder, hive_dir, datadir, clean_tmp):
"""
Test function to run BCFTools mpileup|call on a BAM file
using some options and arguments
"""
bam_file = "{0}/exampleBAM.bam".format(datadir)
reference = "{0}/exampleFASTA.fasta".format(datadir)
work_dir = "{0}/outdir/".format(datadir)
annots = "\"['DP','SP','AD']\""
command = "perl {0}/scripts/standaloneJob.pl PyHive.VariantCalling.BCFTools_caller -language python3 \
-outprefix {1} -work_dir {2} -chunk {3} -bam {4} -reference {5} \
-bcftools_folder {6} -annots {7} -E 1 -p 1 -m_pileup 3 -m_call 1 -v 1 " \
"-F 0.05 -C 25 -verbose True".format(hive_dir, 'out', work_dir,
"\"['chr1','10000','30000']\"", bam_file,
reference, bcftools_folder, annots)
try:
subprocess.check_output(command, shell=True)
assert True
except subprocess.CalledProcessError as exc:
assert False
raise Exception(exc.output)
| 38.052632 | 106 | 0.599355 |
3d192b649d8b6388f0dcd7b9e46896429e77993c | 2,497 | py | Python | src/sentry/api/endpoints/organization_projects.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 1 | 2021-01-13T15:40:03.000Z | 2021-01-13T15:40:03.000Z | src/sentry/api/endpoints/organization_projects.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | src/sentry/api/endpoints/organization_projects.py | fotinakis/sentry | c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c | [
"BSD-3-Clause"
] | 1 | 2017-04-08T04:09:18.000Z | 2017-04-08T04:09:18.000Z | from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import Project, Team
from sentry.utils.apidocs import scenario, attach_scenarios
| 35.169014 | 87 | 0.60793 |
3d19367388f755b58d5ae7968cf859f7a856e8cf | 3,708 | py | Python | account/models.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | account/models.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | account/models.py | Vicynet/kwiktalk | 198efdd5965cc0cd3ee8dcf5e469d9022330ec25 | [
"bzip2-1.0.6"
] | null | null | null | from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.shortcuts import render
from cloudinary.models import CloudinaryField
from .utils import get_random_code
from django.template.defaultfilters import slugify
from django.contrib.auth import get_user_model
from kwikposts.models import KwikPost, Comment, Like
from django.db.models import Q
# Create your models here.
STATUS_CHOICES = (
('send', 'send'),
('accepted', 'accepted')
)
| 35.653846 | 94 | 0.682848 |
3d1a374772b07f26b88bbef32d5d37abe99122f6 | 6,708 | py | Python | senta/data/field_reader/generate_label_field_reader.py | zgzwelldone/Senta | e01986dd17217bed82023c81d06588d63e0e19c7 | [
"Apache-2.0"
] | null | null | null | senta/data/field_reader/generate_label_field_reader.py | zgzwelldone/Senta | e01986dd17217bed82023c81d06588d63e0e19c7 | [
"Apache-2.0"
] | null | null | null | senta/data/field_reader/generate_label_field_reader.py | zgzwelldone/Senta | e01986dd17217bed82023c81d06588d63e0e19c7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*
"""
:py:class:`GenerateLabelFieldReader`
"""
import numpy as np
from senta.common.register import RegisterSet
from senta.common.rule import DataShape, FieldLength, InstanceName
from senta.data.field_reader.base_field_reader import BaseFieldReader
from senta.data.util_helper import generate_pad_batch_data
from senta.modules.token_embedding.custom_fluid_embedding import CustomFluidTokenEmbedding
| 42.455696 | 125 | 0.595707 |
3d1af19d66ed56a399f8f9e67b61d733395f81e4 | 1,270 | py | Python | algorithm/python/alphabet_board_path.py | cocoa-maemae/leetcode | b7724b4d10387797167b18ec36d77e7418a6d85a | [
"MIT"
] | 1 | 2021-09-29T11:22:02.000Z | 2021-09-29T11:22:02.000Z | algorithm/python/alphabet_board_path.py | cocoa-maemae/leetcode | b7724b4d10387797167b18ec36d77e7418a6d85a | [
"MIT"
] | null | null | null | algorithm/python/alphabet_board_path.py | cocoa-maemae/leetcode | b7724b4d10387797167b18ec36d77e7418a6d85a | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 23.090909 | 84 | 0.451969 |
3d1b05e27dcbcf9ee33da727db0c1fba95fb1a61 | 20,881 | py | Python | src/virtual-wan/azext_vwan/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2022-03-22T15:02:32.000Z | 2022-03-22T15:02:32.000Z | src/virtual-wan/azext_vwan/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2021-02-10T22:04:59.000Z | 2021-02-10T22:04:59.000Z | src/virtual-wan/azext_vwan/custom.py | michimune/azure-cli-extensions | 697e2c674e5c0825d44c72d714542fe01331e107 | [
"MIT"
] | 1 | 2021-06-03T19:31:10.000Z | 2021-06-03T19:31:10.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from knack.log import get_logger
from azure.cli.core.util import sdk_no_wait
from ._client_factory import network_client_factory, network_client_route_table_factory
from ._util import _get_property
logger = get_logger(__name__)
# region VirtualWAN
# endregion
# region VirtualHubs
# pylint: disable=inconsistent-return-statements
# pylint: disable=inconsistent-return-statements
# pylint: disable=inconsistent-return-statements
# pylint: disable=inconsistent-return-statements
# pylint: disable=inconsistent-return-statements
# endregion
# region VpnGateways
# pylint: disable=inconsistent-return-statements
# pylint: disable=inconsistent-return-statements
# endregion
# region VpnSites
# endregion
| 44.239407 | 118 | 0.696135 |
3d1b0b413997d06798ff0dafc9e1b1d24a206754 | 720 | py | Python | aiogram/contrib/middlewares/environment.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | null | null | null | aiogram/contrib/middlewares/environment.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | 4 | 2020-11-04T15:55:55.000Z | 2020-11-08T21:36:02.000Z | aiogram/contrib/middlewares/environment.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | null | null | null | import asyncio
from aiogram.dispatcher.middlewares import BaseMiddleware
| 28.8 | 71 | 0.6375 |
3d1b7856aab4b6896a8bd50f1e84b7518ab5535b | 21 | py | Python | custom_components/ztm/__init__.py | peetereczek/ztm | 1fd4870720dca16863d085759a360f1ebdd9ab1f | [
"MIT"
] | 4 | 2020-02-23T08:08:12.000Z | 2021-06-26T15:46:27.000Z | custom_components/ztm/__init__.py | peetereczek/ztm | 1fd4870720dca16863d085759a360f1ebdd9ab1f | [
"MIT"
] | 15 | 2020-01-30T09:54:58.000Z | 2022-02-02T11:13:32.000Z | custom_components/ztm/__init__.py | peetereczek/ztm | 1fd4870720dca16863d085759a360f1ebdd9ab1f | [
"MIT"
] | 1 | 2022-01-17T08:51:34.000Z | 2022-01-17T08:51:34.000Z | """
module init
""" | 7 | 12 | 0.47619 |
3d1bc451ecce134829f141f42c2d16c8641046f1 | 899 | py | Python | Aula37/Controller/squad_controller.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula37/Controller/squad_controller.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula37/Controller/squad_controller.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | from Dao.squad_dao import SquadDao
from Model.squad import *
from Controller.backend_controller import BackendController
from Controller.frontend_controller import FrontendController
from Controller.sgbd_controller import SgbdController
| 29.966667 | 67 | 0.716352 |
3d1cffcdcc57d52b339c43b36508e37229c2109b | 1,065 | py | Python | airbyte-integrations/connectors/source-square/source_square/utils.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 6,215 | 2020-09-21T13:45:56.000Z | 2022-03-31T21:21:45.000Z | airbyte-integrations/connectors/source-square/source_square/utils.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 8,448 | 2020-09-21T00:43:50.000Z | 2022-03-31T23:56:06.000Z | airbyte-integrations/connectors/source-square/source_square/utils.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1,251 | 2020-09-20T05:48:47.000Z | 2022-03-31T10:41:29.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Union
def separate_by_count(total_length: int, part_count: int) -> (int, int):
"""
Calculates parts needed to separate count by part_count value
For example: separate_by_count(total_length=196582, part_count=10000) returns (19, 6582) -> 19*10000 + 6582=196582
:param total_length:
:param part_count:
:return: Returns the total_parts and last part count
"""
total_parts = total_length // part_count
last_part = total_length - (part_count * total_parts)
return total_parts, last_part
| 29.583333 | 118 | 0.697653 |
3d1ff7cb54534895504bf777de713d7de280d59d | 19,218 | py | Python | hypermapper/plot_pareto.py | adelejjeh/hypermapper | 02bd83b5b1d3feb9907cf1187864ded66ba2c539 | [
"MIT"
] | null | null | null | hypermapper/plot_pareto.py | adelejjeh/hypermapper | 02bd83b5b1d3feb9907cf1187864ded66ba2c539 | [
"MIT"
] | null | null | null | hypermapper/plot_pareto.py | adelejjeh/hypermapper | 02bd83b5b1d3feb9907cf1187864ded66ba2c539 | [
"MIT"
] | null | null | null | """
Plots design space exploration results.
"""
import json
from collections import OrderedDict, defaultdict
import matplotlib
from jsonschema import Draft4Validator
from pkg_resources import resource_stream
matplotlib.use("agg") # noqa
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import os
import sys
import warnings
# ensure backward compatibility
try:
from hypermapper import space
from hypermapper.utility_functions import (
deal_with_relative_and_absolute_path,
get_next_color,
get_last_dir_and_file_names,
Logger,
extend_with_default,
)
except ImportError:
if os.getenv("HYPERMAPPER_HOME"): # noqa
warnings.warn(
"Found environment variable 'HYPERMAPPER_HOME', used to update the system path. Support might be discontinued in the future. Please make sure your installation is working without this environment variable, e.g., by installing with 'pip install hypermapper'.",
DeprecationWarning,
2,
) # noqa
sys.path.append(os.environ["HYPERMAPPER_HOME"]) # noqa
ppath = os.getenv("PYTHONPATH")
if ppath:
path_items = ppath.split(":")
scripts_path = ["hypermapper/scripts", "hypermapper_dev/scripts"]
if os.getenv("HYPERMAPPER_HOME"):
scripts_path.append(os.path.join(os.getenv("HYPERMAPPER_HOME"), "scripts"))
truncated_items = [
p for p in sys.path if len([q for q in scripts_path if q in p]) == 0
]
if len(truncated_items) < len(sys.path):
warnings.warn(
"Found hypermapper in PYTHONPATH. Usage is deprecated and might break things. "
"Please remove all hypermapper references from PYTHONPATH. Trying to import"
"without hypermapper in PYTHONPATH..."
)
sys.path = truncated_items
sys.path.append(".") # noqa
sys.path = list(OrderedDict.fromkeys(sys.path))
from hypermapper import space
from hypermapper.utility_functions import (
deal_with_relative_and_absolute_path,
get_next_color,
get_last_dir_and_file_names,
Logger,
extend_with_default,
)
debug = False
def plot(parameters_file, list_of_pairs_of_files=[], image_output_file=None):
"""
Plot the results of the previously run design space exploration.
"""
try:
hypermapper_pwd = os.environ["PWD"]
hypermapper_home = os.environ["HYPERMAPPER_HOME"]
os.chdir(hypermapper_home)
except:
hypermapper_home = "."
hypermapper_pwd = "."
show_samples = False
filename, file_extension = os.path.splitext(parameters_file)
if file_extension != ".json":
print(
"Error: invalid file name. \nThe input file has to be a .json file not a %s"
% file_extension
)
exit(1)
with open(parameters_file, "r") as f:
config = json.load(f)
schema = json.load(resource_stream("hypermapper", "schema.json"))
DefaultValidatingDraft4Validator = extend_with_default(Draft4Validator)
DefaultValidatingDraft4Validator(schema).validate(config)
application_name = config["application_name"]
optimization_metrics = config["optimization_objectives"]
feasible_output = config["feasible_output"]
feasible_output_name = feasible_output["name"]
run_directory = config["run_directory"]
if run_directory == ".":
run_directory = hypermapper_pwd
config["run_directory"] = run_directory
xlog = config["output_image"]["image_xlog"]
ylog = config["output_image"]["image_ylog"]
if "optimization_objectives_labels_image_pdf" in config["output_image"]:
optimization_objectives_labels_image_pdf = config["output_image"][
"optimization_objectives_labels_image_pdf"
]
else:
optimization_objectives_labels_image_pdf = optimization_metrics
# Only consider the files in the json file if there are no input files.
if list_of_pairs_of_files == []:
output_pareto_file = config["output_pareto_file"]
if output_pareto_file == "output_pareto.csv":
output_pareto_file = application_name + "_" + output_pareto_file
output_data_file = config["output_data_file"]
if output_data_file == "output_samples.csv":
output_data_file = application_name + "_" + output_data_file
list_of_pairs_of_files.append(
(
deal_with_relative_and_absolute_path(run_directory, output_pareto_file),
deal_with_relative_and_absolute_path(run_directory, output_data_file),
)
)
else:
for idx, (output_pareto_file, output_data_file) in enumerate(
list_of_pairs_of_files
):
list_of_pairs_of_files[idx] = (
deal_with_relative_and_absolute_path(run_directory, output_pareto_file),
deal_with_relative_and_absolute_path(run_directory, output_data_file),
)
if image_output_file != None:
output_image_pdf_file = image_output_file
output_image_pdf_file = deal_with_relative_and_absolute_path(
run_directory, output_image_pdf_file
)
filename = os.path.basename(output_image_pdf_file)
path = os.path.dirname(output_image_pdf_file)
if path == "":
output_image_pdf_file_with_all_samples = "all_" + filename
else:
output_image_pdf_file_with_all_samples = path + "/" + "all_" + filename
else:
tmp_file_name = config["output_image"]["output_image_pdf_file"]
if tmp_file_name == "output_pareto.pdf":
tmp_file_name = application_name + "_" + tmp_file_name
output_image_pdf_file = deal_with_relative_and_absolute_path(
run_directory, tmp_file_name
)
filename = os.path.basename(output_image_pdf_file)
path = os.path.dirname(output_image_pdf_file)
if path == "":
output_image_pdf_file_with_all_samples = "all_" + filename
else:
output_image_pdf_file_with_all_samples = path + "/" + "all_" + filename
str_files = ""
for e in list_of_pairs_of_files:
str_files += str(e[0] + " " + e[1] + " ")
print("######### plot_pareto.py ##########################")
print("### Parameters file is %s" % parameters_file)
print("### The Pareto and DSE data files are: %s" % str_files)
print("### The first output pdf image is %s" % output_image_pdf_file)
print(
"### The second output pdf image is %s" % output_image_pdf_file_with_all_samples
)
print("################################################")
param_space = space.Space(config)
xelem = optimization_metrics[0]
yelem = optimization_metrics[1]
handler_map_for_legend = {}
xlabel = optimization_objectives_labels_image_pdf[0]
ylabel = optimization_objectives_labels_image_pdf[1]
x_max = float("-inf")
x_min = float("inf")
y_max = float("-inf")
y_min = float("inf")
print_legend = True
fig = plt.figure()
ax1 = plt.subplot(1, 1, 1)
if xlog:
ax1.set_xscale("log")
if ylog:
ax1.set_yscale("log")
objective_1_max = objective_2_max = 1
objective_1_is_percentage = objective_2_is_percentage = False
if "objective_1_max" in config["output_image"]:
objective_1_max = config["output_image"]["objective_1_max"]
objective_1_is_percentage = True
if "objective_2_max" in config["output_image"]:
objective_2_max = config["output_image"]["objective_2_max"]
objective_2_is_percentage = True
input_data_array = {}
fast_addressing_of_data_array = {}
non_valid_optimization_obj_1 = defaultdict(list)
non_valid_optimization_obj_2 = defaultdict(list)
for (
file_pair
) in (
list_of_pairs_of_files
): # file_pair is tuple containing: (pareto file, DSE file)
next_color = get_next_color()
#############################################################################
###### Load data from files and do preprocessing on the data before plotting.
#############################################################################
for file in file_pair:
print(("Loading data from %s ..." % file))
(
input_data_array[file],
fast_addressing_of_data_array[file],
) = param_space.load_data_file(file, debug)
if input_data_array[file] == None:
print("Error: no data found in input data file: %s. \n" % file_pair[1])
exit(1)
if (xelem not in input_data_array[file]) or (
yelem not in input_data_array[file]
):
print(
"Error: the optimization variables have not been found in input data file %s. \n"
% file
)
exit(1)
print(("Parameters are " + str(list(input_data_array[file].keys())) + "\n"))
input_data_array[file][xelem] = [
float(input_data_array[file][xelem][i]) / objective_1_max
for i in range(len(input_data_array[file][xelem]))
]
input_data_array[file][yelem] = [
float(input_data_array[file][yelem][i]) / objective_2_max
for i in range(len(input_data_array[file][yelem]))
]
if objective_1_is_percentage:
input_data_array[file][xelem] = [
input_data_array[file][xelem][i] * 100
for i in range(len(input_data_array[file][xelem]))
]
if objective_2_is_percentage:
input_data_array[file][yelem] = [
input_data_array[file][yelem][i] * 100
for i in range(len(input_data_array[file][yelem]))
]
x_max, x_min, y_max, y_min = compute_min_max_samples(
input_data_array[file], x_max, x_min, xelem, y_max, y_min, yelem
)
input_data_array_size = len(
input_data_array[file][list(input_data_array[file].keys())[0]]
)
print("Size of the data file %s is %d" % (file, input_data_array_size))
file_pareto = file_pair[0] # This is the Pareto file
file_search = file_pair[1] # This is the DSE file
######################################################################################################
###### Compute invalid samples to be plot in a different color (and remove them from the data arrays).
######################################################################################################
if show_samples:
i = 0
for ind in range(len(input_data_array[file][yelem])):
if input_data_array[file][feasible_output_name][i] == False:
non_valid_optimization_obj_2[file_search].append(
input_data_array[file][yelem][i]
)
non_valid_optimization_obj_1[file_search].append(
input_data_array[file][xelem][i]
)
for key in list(input_data_array[file].keys()):
del input_data_array[file][key][i]
else:
i += 1
label_is = get_last_dir_and_file_names(file_pareto)
(all_samples,) = plt.plot(
input_data_array[file_search][xelem],
input_data_array[file_search][yelem],
color=next_color,
linestyle="None",
marker=".",
mew=0.5,
markersize=3,
fillstyle="none",
label=label_is,
)
plt.plot(
input_data_array[file_pareto][xelem],
input_data_array[file_pareto][yelem],
linestyle="None",
marker=".",
mew=0.5,
markersize=3,
fillstyle="none",
)
handler_map_for_legend[all_samples] = HandlerLine2D(numpoints=1)
################################################################################################################
##### Create a straight Pareto plot: we need to add one point for each point of the data in paretoX and paretoY.
##### We also need to reorder the points on the x axis first.
################################################################################################################
straight_pareto_x = list()
straight_pareto_y = list()
if len(input_data_array[file_pareto][xelem]) != 0:
data_array_pareto_x, data_array_pareto_y = (
list(t)
for t in zip(
*sorted(
zip(
input_data_array[file_pareto][xelem],
input_data_array[file_pareto][yelem],
)
)
)
)
for j in range(len(data_array_pareto_x)):
straight_pareto_x.append(data_array_pareto_x[j])
straight_pareto_x.append(data_array_pareto_x[j])
straight_pareto_y.append(data_array_pareto_y[j])
straight_pareto_y.append(data_array_pareto_y[j])
straight_pareto_x.append(x_max) # Just insert the max on the x axis
straight_pareto_y.insert(0, y_max) # Just insert the max on the y axis
label_is = "Pareto - " + get_last_dir_and_file_names(file_pareto)
(pareto_front,) = plt.plot(
straight_pareto_x,
straight_pareto_y,
label=label_is,
linewidth=1,
color=next_color,
)
handler_map_for_legend[pareto_front] = HandlerLine2D(numpoints=1)
label_is = "Invalid Samples - " + get_last_dir_and_file_names(file_search)
if show_samples:
(non_valid,) = plt.plot(
non_valid_optimization_obj_1[file_search],
non_valid_optimization_obj_2[file_search],
linestyle="None",
marker=".",
mew=0.5,
markersize=3,
fillstyle="none",
label=label_is,
)
handler_map_for_legend[non_valid] = HandlerLine2D(numpoints=1)
plt.ylabel(ylabel, fontsize=16)
plt.xlabel(xlabel, fontsize=16)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(
14
) # Set the fontsize of the label on the ticks of the x axis
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(
14
) # Set the fontsize of the label on the ticks of the y axis
# Add the legend with some customizations
if print_legend:
lgd = ax1.legend(
handler_map=handler_map_for_legend,
loc="best",
bbox_to_anchor=(1, 1),
fancybox=True,
shadow=True,
ncol=1,
prop={"size": 14},
) # Display legend.
font = {"size": 16}
matplotlib.rc("font", **font)
fig.savefig(output_image_pdf_file_with_all_samples, dpi=120, bbox_inches="tight")
if objective_1_is_percentage:
plt.xlim(0, 100)
if objective_2_is_percentage:
plt.ylim(0, 100)
fig.savefig(output_image_pdf_file, dpi=120, bbox_inches="tight")
def compute_min_max_samples(input_data_array, x_max, x_min, xelem, y_max, y_min, yelem):
"""
Compute the min and max on the x and y axis.
:param input_data_array: computes the max and min on this data.
:param x_max: input and output variable.
:param x_min: input and output variable.
:param xelem: variable to select the column that refers to the objective one in the array input_data_array.
:param y_max: input and output variable.
:param y_min: input and output variable.
:param yelem: variable to select the column that refers to the objective two in the array input_data_array.
:return: min and max on both axes
"""
for elem in zip(input_data_array[xelem], input_data_array[yelem]):
x_max = max(x_max, elem[0])
y_max = max(y_max, elem[1])
x_min = min(x_min, elem[0])
y_min = min(y_min, elem[1])
if x_min == float("inf"):
print("Warning: x_min is infinity. Execution not interrupted.")
if y_min == float("inf"):
print("Warning: y_min is infinity. Execution not interrupted.")
if x_max == float("-inf"):
print("Warning: x_max is - infinity. Execution not interrupted.")
if y_max == float("-inf"):
print("Warning: y_max is - infinity. Execution not interrupted.")
return x_max, x_min, y_max, y_min
if __name__ == "__main__":
main()
| 39.220408 | 271 | 0.57774 |
3d200274ebad98aa4c72c04b9e6aca07e97be031 | 1,872 | py | Python | foodshering/authapp/forms.py | malfin/silvehanger | c71a936a0c59c5a6fb909861cf2197b72782642d | [
"Apache-2.0"
] | null | null | null | foodshering/authapp/forms.py | malfin/silvehanger | c71a936a0c59c5a6fb909861cf2197b72782642d | [
"Apache-2.0"
] | null | null | null | foodshering/authapp/forms.py | malfin/silvehanger | c71a936a0c59c5a6fb909861cf2197b72782642d | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, PasswordChangeForm, UserChangeForm
from authapp.models import UserProfile, Status
| 31.2 | 110 | 0.608974 |
3d220060f34001abd4191e581365ad915971f136 | 340 | py | Python | devices/parser/serializers.py | City-of-Helsinki/hel-data-pipe | e473237cd00a54a791337ac611e99556dc37ea35 | [
"MIT"
] | 1 | 2021-02-25T14:21:41.000Z | 2021-02-25T14:21:41.000Z | devices/parser/serializers.py | City-of-Helsinki/hel-data-pipe | e473237cd00a54a791337ac611e99556dc37ea35 | [
"MIT"
] | 9 | 2020-11-23T11:56:56.000Z | 2021-02-25T12:20:05.000Z | devices/parser/serializers.py | City-of-Helsinki/hel-data-pipe | e473237cd00a54a791337ac611e99556dc37ea35 | [
"MIT"
] | 1 | 2021-07-25T12:16:53.000Z | 2021-07-25T12:16:53.000Z | from rest_framework import serializers
from .models import Device, SensorType
| 21.25 | 63 | 0.723529 |
3d224cb8121fbd91cf794debf39fda90674c7943 | 82 | py | Python | technews/__init__.py | WisChang005/technews_watcher | 454ef30bab7731c629f0e3b577ce340c48a6cbe7 | [
"MIT"
] | 1 | 2019-03-31T15:34:10.000Z | 2019-03-31T15:34:10.000Z | technews/__init__.py | WisChang005/technews_watcher | 454ef30bab7731c629f0e3b577ce340c48a6cbe7 | [
"MIT"
] | null | null | null | technews/__init__.py | WisChang005/technews_watcher | 454ef30bab7731c629f0e3b577ce340c48a6cbe7 | [
"MIT"
] | null | null | null | from .technews_helper import TechNews
from .mail_helper import EmailContentHelper
| 27.333333 | 43 | 0.878049 |
3d2303695d686e9f8b4033b5136f35315cde3220 | 696 | py | Python | core/migrations/0002_auto_20191102_1734.py | manulangat1/djcommerce | 2cd92631479ef949e0f05a255f2f50feca728802 | [
"MIT"
] | 1 | 2020-02-08T16:29:41.000Z | 2020-02-08T16:29:41.000Z | core/migrations/0002_auto_20191102_1734.py | manulangat1/djcommerce | 2cd92631479ef949e0f05a255f2f50feca728802 | [
"MIT"
] | 15 | 2020-05-04T13:22:32.000Z | 2022-03-12T00:27:28.000Z | core/migrations/0002_auto_20191102_1734.py | manulangat1/djcommerce | 2cd92631479ef949e0f05a255f2f50feca728802 | [
"MIT"
] | 1 | 2020-10-17T08:54:31.000Z | 2020-10-17T08:54:31.000Z | # Generated by Django 2.2.6 on 2019-11-02 17:34
from django.db import migrations, models
| 29 | 140 | 0.556034 |
3d25c2e6e29e6e78df3ddd62294d2447deebe52c | 28 | py | Python | aoc_tools/__init__.py | dannyboywoop/AOC_Tools | b47374ae465c5772d7b4c09f40eb6e69d68cc144 | [
"MIT"
] | null | null | null | aoc_tools/__init__.py | dannyboywoop/AOC_Tools | b47374ae465c5772d7b4c09f40eb6e69d68cc144 | [
"MIT"
] | null | null | null | aoc_tools/__init__.py | dannyboywoop/AOC_Tools | b47374ae465c5772d7b4c09f40eb6e69d68cc144 | [
"MIT"
] | null | null | null | from ._advent_timer import * | 28 | 28 | 0.821429 |
3d2603c4df2972be551558b1de82be8e153176f4 | 915 | py | Python | stamper/migrations/0004_auto_20161208_1658.py | uploadcare/stump | 8070ff42f01972fa86b4a2eaba580dad65482ef2 | [
"MIT"
] | null | null | null | stamper/migrations/0004_auto_20161208_1658.py | uploadcare/stump | 8070ff42f01972fa86b4a2eaba580dad65482ef2 | [
"MIT"
] | null | null | null | stamper/migrations/0004_auto_20161208_1658.py | uploadcare/stump | 8070ff42f01972fa86b4a2eaba580dad65482ef2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-12-08 16:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
| 30.5 | 123 | 0.64153 |
3d26af74dac8b1e4a7a1fd6ba44e20f27a15ed52 | 7,493 | py | Python | lemon.py | lab-sigma/learning-to-rationalize | 05678fdf67661651c39c7d754541b239cb1577eb | [
"MIT"
] | null | null | null | lemon.py | lab-sigma/learning-to-rationalize | 05678fdf67661651c39c7d754541b239cb1577eb | [
"MIT"
] | 1 | 2022-02-02T02:27:59.000Z | 2022-02-02T02:28:51.000Z | lemon.py | lab-sigma/learning-to-rationalize | 05678fdf67661651c39c7d754541b239cb1577eb | [
"MIT"
] | null | null | null | import argparse,time,os,pickle
import matplotlib.pyplot as plt
import numpy as np
from player import *
plt.switch_backend('agg')
np.set_printoptions(precision=2)
def find_latest(prefix, suffix):
i = 0
while os.path.exists(f'{prefix}{i}{suffix}'):
i += 1
return i
if __name__ == '__main__':
parser = argparse.ArgumentParser()
describe = lambda names : ''.join( [', {}: {}'.format(i, n) for i,n in enumerate(names)] )
parser.add_argument('--std', type=float, default=0, help='noise std. in feedback')
parser.add_argument('--iterations', type=int, default=100, help='number of rounds to play')
parser.add_argument('--strategy', type=int, help='player strategy' + describe(strategy_choice_names))
parser.add_argument('--num_sellers', type=int, help='number of sellers ' )
parser.add_argument('--num_actions', type=int, help='number of buyers ')
parser.add_argument('--unit', type=float, default=1, help='discretized unit')
parser.add_argument('--minx', type=float, default=0, help='min action')
parser.add_argument('--samples', type=int, default=100, help='number of samples to save' )
parser.add_argument('--new', default=False, action='store_true', help='whether to generate a new env instance')
parser.add_argument('--num_repeat', type=int, default=1, help='number of repeated simulation')
parser.add_argument('--force_env', default=False, action='store_true', help='whether to use a specified env instance')
args = parser.parse_args()
std = args.std
iterations = args.iterations
strategy = args.strategy
num_sellers = args.num_sellers
num_buyers = 1
num_actions = args.num_actions
num_players = num_sellers+num_buyers
unit = args.unit
minx = args.minx
samples = args.samples
env_name = "lemon3"
strategy_name = strategy_choice_names[strategy]
j = 0
while j < args.num_repeat:
log_dir = f'results/{env_name}/{strategy_name}'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print("created directory")
else:
print("existing directory")
prefix = f'results/{env_name}/{num_sellers}_{num_buyers}|{std}|{unit}|{minx}#'
if not args.force_env:
i = find_latest(prefix, '.pickle')
if not args.new and i > 0:
env_dir = prefix + str(i-1) + '.pickle'
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
else:
env = lemon(std, num_sellers, num_actions, unit, minx)
env_dir = prefix + str(i) + '.pickle'
f = open(env_dir, 'wb')
pickle.dump(env, f )
print("save env at "+ env_dir)
f.close()
else:
i = specified_env[j]
env_dir = prefix + str(i) + '.pickle'
if not os.path.exists(log_dir):
print("env path not found ", log_dir)
exit()
f = open(env_dir, 'rb')
env = pickle.load(f)
print("load env at " + env_dir)
f.close()
player_module = __import__('player')
if strategy != 4:
players = [getattr(player_module, strategy_name)(num_actions, iterations) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations) for i in range(num_sellers) ] )
else:
a0 = 50
b0 = 0.5
a1 = 50
b1 = 0.5
players = [getattr(player_module, strategy_name)(num_actions, iterations, a0, b0) ]
players.extend( [getattr(player_module, strategy_name)(2, iterations, a1, b1) for i in range(num_sellers) ] )
print(f'beta = {players[0].beta}, b = {players[0].b}, beta = {players[1].beta}, b = {players[1].b}' )
i = find_latest(f'{log_dir}/', '.log')
log_dir = f'{log_dir}/{i}'
L = logger(log_dir, env, iterations, samples=samples)
start = time.time()
L.write("iterations: "+str(iterations) + "\n")
L.write('Environment:\n\t'+str(env)+'\n')
actions = np.zeros(num_players, dtype=int)
action_probs = np.zeros(num_players, dtype=float)
for t in range(1, iterations+1):
for i, p in enumerate(players):
actions[i] = p.act()
action_probs[i] = p.action_prob[1]
rewards, supply, price, avg_quality = env.feedback( actions )
for a, p, r in zip(actions, players, rewards ):
p.feedback(a, r)
L.record_round(t, supply, price, avg_quality, action_probs)
for i, p in enumerate(players):
L.write(f'Player{i}:\n\t{p}\n')
L.plot()
end = time.time()
print(log_dir, end-start)
j += 1
| 31.091286 | 119 | 0.68237 |
3d26e189eb8a7096fbff4e3b70771b2698d8bd96 | 1,910 | py | Python | src/osaction.py | ivan-georgiev/urlmonitor | 1280127a1d8c52dcbcd871bba55abaf23a1ca3ce | [
"MIT"
] | null | null | null | src/osaction.py | ivan-georgiev/urlmonitor | 1280127a1d8c52dcbcd871bba55abaf23a1ca3ce | [
"MIT"
] | null | null | null | src/osaction.py | ivan-georgiev/urlmonitor | 1280127a1d8c52dcbcd871bba55abaf23a1ca3ce | [
"MIT"
] | null | null | null | # pylint: disable=too-many-arguments
"""
Observer implemtation doing OS command
"""
from base.iobserver import IObserver
import subprocess
import logging
import os
import sys
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(name)s: %(message)s',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
datefmt='%H:%M:%S',
stream=sys.stderr,
)
logger = logging.getLogger('osaction')
| 28.088235 | 104 | 0.587958 |