hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58f91a9f5c9302c8e95efa47c83b819f09e32089 | 1,248 | py | Python | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
| 37.818182 | 97 | 0.684295 |
58f92d8b76c80f99fb368b4b75fdb05787830601 | 2,772 | py | Python | src_2d/help/compute_dice.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 19 | 2020-07-14T02:23:58.000Z | 2022-03-15T12:22:49.000Z | src_2d/help/compute_dice.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 4 | 2020-09-25T22:42:40.000Z | 2021-08-25T15:03:29.000Z | src_2d/help/compute_dice.py | xzluo97/MvMM-RegNet | c08d5df14b4a9c4a98c66973ff4950aba7f416e4 | [
"MIT"
] | 7 | 2020-08-29T15:46:13.000Z | 2021-07-16T01:51:28.000Z | """
Compute Dice between test ground truth and predictions from groupwise registration.
"""
import os
import nibabel as nib
import glob
import numpy as np
from core import utils_2d
from core.metrics_2d import OverlapMetrics
if __name__ == '__main__':
gt_path = '../../../../../../dataset/C0T2LGE/label_center_data/test/*label.nii.gz'
pred_path = '../../../../../../results/MSCMR/test_predictions_1.5mm_group3_fusion15/*label.nii.gz'
pred_names = utils_2d.strsort(glob.glob(pred_path))
gt_names = utils_2d.strsort([name for name in glob.glob(gt_path) if os.path.basename(name).split('_')[1] == 'DE'])
pred_gt_names = dict(zip(pred_names, gt_names))
print(pred_gt_names)
average_dice = []
myo_dice = []
LV_dice = []
RV_dice = []
for name in pred_names:
pred_label = load_nifty(name)
one_hot_pred = one_hot_label(pred_label, (0, 200, 500, 600))
gt_label = load_nifty(pred_gt_names[name])
gt_label = np.concatenate([gt for gt in np.dsplit(gt_label, gt_label.shape[-1])
if np.all([np.sum(gt==i) > 0 for i in [200, 500, 600]])], axis=-1)
one_hot_gt = one_hot_label(gt_label, (0, 200, 500, 600))
Dice = OverlapMetrics(n_class=4, mode='np')
dice = Dice.averaged_foreground_dice(one_hot_gt, one_hot_pred)
m_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=1)
l_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=2)
r_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=3)
average_dice.append(dice)
myo_dice.append(m_dice)
LV_dice.append(l_dice)
RV_dice.append(r_dice)
print("Average foreground Dice for %s: %.4f" % (os.path.basename(name), dice))
print("Myocardium Dice for %s: %.4f" % (os.path.basename(name), m_dice))
print("LV Dice for %s: %.4f" % (os.path.basename(name), l_dice))
print("RV Dice for %s: %.4f" % (os.path.basename(name), r_dice))
print("Average prediction Dice: %.4f" % np.mean(average_dice))
print("Average myocardium Dice: %.4f" % np.mean(myo_dice))
print("Average LV Dice: %.4f" % np.mean(LV_dice))
print("Average RV Dice: %.4f" % np.mean(RV_dice))
| 35.538462 | 119 | 0.626623 |
58f98f05dc6e23f4ee940bfbe966fafa9a03fa4c | 14,359 | py | Python | farmos_ext/farm.py | applecreekacres/farmos.py.ext | 91db8a6d5532661650869d34c2ff94e1fed02f02 | [
"MIT"
] | null | null | null | farmos_ext/farm.py | applecreekacres/farmos.py.ext | 91db8a6d5532661650869d34c2ff94e1fed02f02 | [
"MIT"
] | 46 | 2021-02-21T21:05:07.000Z | 2022-03-15T23:05:25.000Z | farmos_ext/farm.py | applecreekacres/farmos.py.ext | 91db8a6d5532661650869d34c2ff94e1fed02f02 | [
"MIT"
] | null | null | null | """Main farm access."""
from __future__ import annotations
import os
from datetime import datetime
from typing import Dict, Iterable, Iterator, List, Type, Union
from farmos_ext.area import Area
from farmos_ext.asset import Asset, Equipment, Planting
from farmos_ext.log import (Activity, Birth, Harvest, Input, Log, Maintenance,
Medical, Observation, Purchase, Sale, Seeding,
SoilTest, Transplanting)
from farmos_ext.others import Content, Quantity
from farmos_ext.term import Crop, CropFamily, Season, Term, Unit
from farmOS import farmOS # pylint: disable=wrong-import-order
from farmOS.client import BaseAPI # pylint: disable=wrong-import-order
def farm():
"""Access to farm with provided credentials."""
return Farm()
# pylint: disable=too-many-public-methods
def plantings(self, filters: Dict = None) -> Iterable[Planting]:
if not filters:
filters = {'type': 'planting'}
else:
filters.update({'type': 'planting'})
return self.assets(filters, Planting)
| 37.393229 | 109 | 0.537503 |
58f9aedfba7b25435acbe41455b6f6873bd36f40 | 2,768 | py | Python | tests/io/v3/base/test_csv_iterator.py | alpesh-te/pyTenable | 4b5381a7757561f7ac1e79c2e2679356dd533540 | [
"MIT"
] | null | null | null | tests/io/v3/base/test_csv_iterator.py | alpesh-te/pyTenable | 4b5381a7757561f7ac1e79c2e2679356dd533540 | [
"MIT"
] | 25 | 2021-11-16T18:41:36.000Z | 2022-03-25T05:43:31.000Z | tests/io/v3/base/test_csv_iterator.py | alpesh-te/pyTenable | 4b5381a7757561f7ac1e79c2e2679356dd533540 | [
"MIT"
] | 2 | 2022-03-02T12:24:40.000Z | 2022-03-29T05:12:04.000Z | '''
Testing the CSV iterators
'''
import responses
from tenable.io.v3.base.iterators.explore_iterator import CSVChunkIterator
USERS_BASE_URL = r'https://cloud.tenable.com/api/v3/assets/search'
CSV_TEXT = (
'created,display_ipv4_address,first_observed,id,'
'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,'
'is_public,last_observed,name,network.id,network.name,'
'observation_sources,sources,types,updated\n'
'2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,'
'"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,false,'
'false,true,2021-11-24T13:43:56.442Z,192.12.13.7,'
'"00000000-0000-0000-0000-000000000000",Default,'
'"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",'
'test_v3,host,2021-11-24T13:43:56.709Z\n'
)
CSV_TEXT_2 = (
'created,display_ipv4_address,first_observed,id,ipv4_addresses,'
'ipv6_addresses,is_deleted,is_licensed,is_public,last_observed,'
'name,network.id,network.name,observation_sources,sources,'
'types,updated\ncreated,display_ipv4_address,first_observed,id,'
'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,'
'is_public,last_observed,name,network.id,network.name,'
'observation_sources,sources,types,updated\n'
'2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,'
'"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,'
'false,false,true,2021-11-24T13:43:56.442Z,192.12.13.7,'
'"00000000-0000-0000-0000-000000000000",Default,'
'"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",'
'test_v3,host,2021-11-24T13:43:56.709Z\n'
)
CSV_HEADERS = {
'Date': 'Wed, 08 Dec 2021 04:42:28 GMT',
'Content-Type': 'text/csv;charset=UTF-8',
'Content-Length': '508',
'Connection': 'keep-alive',
'Set-Cookie': 'nginx-cloud-site-id=qa-develop; path=/; '
'HttpOnly; SameSite=Strict; Secure',
'X-Request-Uuid': '4d43db5bac4decd79fc198e06a8113bd',
'X-Continuation-Token': 'fasd563456fghfgfFGHFGHRT',
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': 'DENY',
'X-Xss-Protection': '1; mode=block',
'Cache-Control': 'no-store',
'Strict-Transport-Security': 'max-age=63072000; includeSubDomains',
'X-Gateway-Site-ID': 'nginx-router-jm8uw-us-east-1-eng',
'Pragma': 'no-cache',
'Expect-CT': 'enforce, max-age=86400',
'X-Path-Handler': 'tenable-io',
}
| 35.948052 | 74 | 0.695087 |
58fb3efb05d44bd1aa9c7179a47db0c343140765 | 3,716 | py | Python | startt.py | OnyyMexicanCat/RinkoglionitoBot | 12e48e679b46710bbeaa7e98f02b09a512609031 | [
"MIT"
] | null | null | null | startt.py | OnyyMexicanCat/RinkoglionitoBot | 12e48e679b46710bbeaa7e98f02b09a512609031 | [
"MIT"
] | null | null | null | startt.py | OnyyMexicanCat/RinkoglionitoBot | 12e48e679b46710bbeaa7e98f02b09a512609031 | [
"MIT"
] | null | null | null | from telegram.ext import *
from telegram import *
import time
| 62.983051 | 655 | 0.660388 |
58fb480ae327d41355b8b8179c7dede058c21b5b | 222 | py | Python | server/config.py | nikitinivan/Crypton | 90db77f4066d763e55f55c0fb540dc99aa2495e3 | [
"MIT"
] | null | null | null | server/config.py | nikitinivan/Crypton | 90db77f4066d763e55f55c0fb540dc99aa2495e3 | [
"MIT"
] | null | null | null | server/config.py | nikitinivan/Crypton | 90db77f4066d763e55f55c0fb540dc99aa2495e3 | [
"MIT"
] | null | null | null | import os
| 24.666667 | 74 | 0.725225 |
58fc01c36853b26f8562e022eac13585ff61105f | 69 | py | Python | nbviewerbot/__main__.py | JohnPaton/nbviewerbot | a9564655ba041e53db9a6916fb424e9582704321 | [
"MIT"
] | 7 | 2018-08-06T20:02:13.000Z | 2021-04-12T06:04:46.000Z | nbviewerbot/__main__.py | JohnPaton/nbviewerbot | a9564655ba041e53db9a6916fb424e9582704321 | [
"MIT"
] | 5 | 2018-09-13T20:53:32.000Z | 2021-03-31T18:55:48.000Z | nbviewerbot/__main__.py | JohnPaton/nbviewerbot | a9564655ba041e53db9a6916fb424e9582704321 | [
"MIT"
] | null | null | null | import nbviewerbot
if __name__ == "__main__":
nbviewerbot.cli()
| 13.8 | 26 | 0.710145 |
58fd05c379e33e35d0b95e61f85e13decd24ff2f | 1,571 | py | Python | fetch_data.py | ASabryMazroua/Arabic-Dialect-Classification | e0e778379a321022d4d05b54b067ab6541793434 | [
"MIT"
] | 1 | 2022-03-19T04:40:27.000Z | 2022-03-19T04:40:27.000Z | fetch_data.py | ASabryMazroua/Arabic-Dialect-Classification | e0e778379a321022d4d05b54b067ab6541793434 | [
"MIT"
] | null | null | null | fetch_data.py | ASabryMazroua/Arabic-Dialect-Classification | e0e778379a321022d4d05b54b067ab6541793434 | [
"MIT"
] | null | null | null | import json
import math
import requests
import pandas as pd
def fetch_data(ids):
'''
A function to fetch data from the API.
Parameters:
ids (list): A list of ids (integrs) to fetch
Returns:
text (dict): A dictionary where keys are the ids and values are the text
'''
results = {}
# We'll loop over the ids to fetch the text data
# We'll split ids into 1000 because of the limit of the API
# Futrue work:
# we can handle if the connection timed out or any other problem that would happen
# we can add some assertion to make sure that ids are valid
for i in range(math.ceil(len(ids)/1000)):
sub_ids = json.dumps(ids[i*1000:1000*(i+1)])
while True:
r = requests.post("https://recruitment.aimtechnologies.co/ai-tasks", sub_ids)
# print(r.status_code)
if r.status_code == 200:
results.update(json.loads(r.text))
break;
print(f"We managed to fetch {len(results)} samples of text.")
return results
if __name__ == '__main__':
#Read the ids' file, then fetch data, and write the file to a csv
source_data = pd.read_csv("files/dialect_dataset.csv")
text_dict = fetch_data(list(source_data.loc[:,"id"].astype(str)))
#We'll make sure that we managed to fetch all the ids
if len(source_data) == len(text_dict):
source_data.loc[:,"text"] = text_dict.values()
source_data.to_csv("data/full_dialect_dataset.csv",encoding='utf-8-sig') | 38.317073 | 89 | 0.624443 |
58fd528adc4ec458f11e0462bce2b5ed5cc03175 | 2,305 | py | Python | tests/test.py | daniel-sasu/roadwatch-data-processor | 10317998a7f336bd1a26dc95b54e7bb7785cfd22 | [
"MIT"
] | null | null | null | tests/test.py | daniel-sasu/roadwatch-data-processor | 10317998a7f336bd1a26dc95b54e7bb7785cfd22 | [
"MIT"
] | null | null | null | tests/test.py | daniel-sasu/roadwatch-data-processor | 10317998a7f336bd1a26dc95b54e7bb7785cfd22 | [
"MIT"
] | null | null | null | # tests/test.py
from rw_data_proc.core import *
import unittest
| 39.741379 | 107 | 0.610846 |
58fdec912e446a48a537a766eb98ce253951af60 | 1,496 | py | Python | booking_spaces/booking/forms.py | pvlvnk/booking | 701c8e1d8ceefde03090cd93bf954874d9fe349e | [
"MIT"
] | null | null | null | booking_spaces/booking/forms.py | pvlvnk/booking | 701c8e1d8ceefde03090cd93bf954874d9fe349e | [
"MIT"
] | null | null | null | booking_spaces/booking/forms.py | pvlvnk/booking | 701c8e1d8ceefde03090cd93bf954874d9fe349e | [
"MIT"
] | null | null | null | from booking.models import Schedule, ParkingSpace
from datetime import datetime as dt
from django import forms
| 24.933333 | 73 | 0.68115 |
58ff16ed56b02ccd24c5cca15503e57704dd6fd0 | 20,793 | py | Python | tests/unit/aiplatform/test_matching_engine_index_endpoint.py | kthytang/python-aiplatform | e82c1792293396045a1032df015a3700fc38609b | [
"Apache-2.0"
] | null | null | null | tests/unit/aiplatform/test_matching_engine_index_endpoint.py | kthytang/python-aiplatform | e82c1792293396045a1032df015a3700fc38609b | [
"Apache-2.0"
] | null | null | null | tests/unit/aiplatform/test_matching_engine_index_endpoint.py | kthytang/python-aiplatform | e82c1792293396045a1032df015a3700fc38609b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from importlib import reload
from unittest import mock
from unittest.mock import patch
from google.api_core import operation
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import (
matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref,
index_endpoint as gca_index_endpoint,
index as gca_index,
)
from google.cloud.aiplatform.compat.services import (
index_endpoint_service_client,
index_service_client,
)
from google.protobuf import field_mask_pb2
import pytest
# project
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
# index
_TEST_INDEX_ID = "index_id"
_TEST_INDEX_NAME = f"{_TEST_PARENT}/indexes/{_TEST_INDEX_ID}"
_TEST_INDEX_DISPLAY_NAME = "index_display_name"
# index_endpoint
_TEST_INDEX_ENDPOINT_ID = "index_endpoint_id"
_TEST_INDEX_ENDPOINT_NAME = f"{_TEST_PARENT}/indexEndpoints/{_TEST_INDEX_ENDPOINT_ID}"
_TEST_INDEX_ENDPOINT_DISPLAY_NAME = "index_endpoint_display_name"
_TEST_INDEX_ENDPOINT_DESCRIPTION = "index_endpoint_description"
_TEST_INDEX_DESCRIPTION = "index_description"
_TEST_INDEX_ENDPOINT_VPC_NETWORK = "projects/{}/global/networks/{}".format(
"12345", "network"
)
_TEST_LABELS = {"my_key": "my_value"}
_TEST_DISPLAY_NAME_UPDATE = "my new display name"
_TEST_DESCRIPTION_UPDATE = "my description update"
_TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
# deployment
_TEST_DEPLOYED_INDEX_ID = "deployed_index_id"
_TEST_DEPLOYED_INDEX_DISPLAY_NAME = "deployed_index_display_name"
_TEST_MIN_REPLICA_COUNT = 2
_TEST_MAX_REPLICA_COUNT = 2
_TEST_ENABLE_ACCESS_LOGGING = False
_TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range-1", "vertex-ai-ip-range-2"]
_TEST_DEPLOYMENT_GROUP = "prod"
_TEST_AUTH_CONFIG_AUDIENCES = ["a", "b"]
_TEST_AUTH_CONFIG_ALLOWED_ISSUERS = [
"service-account-name-1@project-id.iam.gserviceaccount.com",
"service-account-name-2@project-id.iam.gserviceaccount.com",
]
# deployment_updated
_TEST_MIN_REPLICA_COUNT_UPDATED = 4
_TEST_MAX_REPLICA_COUNT_UPDATED = 4
# request_metadata
_TEST_REQUEST_METADATA = ()
# Lists
_TEST_INDEX_ENDPOINT_LIST = [
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
]
# Match
_TEST_QUERIES = [
[
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
]
_TEST_NUM_NEIGHBOURS = 1
# All index mocks
# All index_endpoint mocks
| 33.537097 | 90 | 0.67994 |
58ff85cb63e954368b68902aa3d1e1f0e1df7bef | 1,440 | py | Python | vrc_log_viewer.py | 27Cobalter/vrc_log_viewer | 00b5d106488fb95c605ef873a75fd26bd7d1d37f | [
"MIT"
] | 2 | 2019-08-29T05:58:22.000Z | 2019-09-09T15:29:16.000Z | vrc_log_viewer.py | 27Cobalter/vrc_log_viewer | 00b5d106488fb95c605ef873a75fd26bd7d1d37f | [
"MIT"
] | 1 | 2022-03-20T08:11:00.000Z | 2022-03-20T08:11:00.000Z | vrc_log_viewer.py | 27Cobalter/vrc_log_viewer | 00b5d106488fb95c605ef873a75fd26bd7d1d37f | [
"MIT"
] | 2 | 2020-02-04T03:19:57.000Z | 2021-02-08T15:17:22.000Z | import glob
import os
import re
import sys
import time
import yaml
if __name__ == "__main__":
with open("config.yml", "r") as config:
conf = yaml.load(config, Loader=yaml.SafeLoader)
print("load config")
reg = []
for pattern in conf["reg"]:
print(" " + pattern)
reg.append(re.compile(pattern))
vrcdir = os.environ["USERPROFILE"] + "\\AppData\\LocalLow\\VRChat\\VRChat\\"
logfile = vrcdir + conf["logfile"]
if len(sys.argv) > 1:
logfile = sys.argv[1]
if logfile == vrcdir:
logfiles = glob.glob(vrcdir + "output_log_*.txt")
logfiles.sort(key=os.path.getctime, reverse=True)
logfile = logfiles[0]
with open(logfile, "r", encoding="utf-8") as f:
print("open logfile : ", logfile)
loglines = tail(f, conf["past"])
for line in loglines:
for pattern in reg:
match = pattern.match(line)
if not match:
continue
message = ""
for group in match.groups():
message = message + group + " "
print(message)
| 27.169811 | 80 | 0.532639 |
450019e12b1cc5c40f3f6bff8bbb906c38cceb65 | 9,032 | py | Python | src/intermediate_representation/sem2sql/infer_from_clause.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null | src/intermediate_representation/sem2sql/infer_from_clause.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null | src/intermediate_representation/sem2sql/infer_from_clause.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null |
def _find_and_remove_star_table(columns, join_clause):
"""
Starting from 3 tables we have to deal with the "star-table" effect - a join with a joining table where we only wanna know e.g. the count(*) of the third table.
In that case we don't need to join the third table - we just do a count over the join with the joining table.
In general, the additional join is not an issue - but is seen as incorrect by the spider-evaluation and therefore we have to remove it.
Example:
SELECT T2.concert_name , T2.theme , count(*) FROM singer_in_concert AS T1 JOIN concert AS T2 ON T1.concert_id = T2.concert_id GROUP BY T2.concert_id ---> GOOD
SELECT T1.concert_Name, T1.Theme, count(*) FROM concert AS T1 JOIN singer_in_concert AS T3 JOIN singer AS T2 GROUP BY T1.concert_ID -----> BAD, REMOVE "singer" join.
"""
# unfortunately auto tuple unpacking doesn't work anymore in python 3, therefore this comment: a "column" contains the 3 elements "aggregator, "column name", "table".
star_tables = list(map(lambda column: column[2], filter(lambda column: column[1] == '*', columns)))
# remove duplicates
star_tables = list(set(star_tables))
assert len(star_tables) <= 1, "The case of having multiple star-joins is currently not supported (and not part of the spider-dataset)"
if len(star_tables) == 1:
star_table = star_tables[0]
# we need to make sure the table we try to remove is not used at any other place - e.g. in the SELECT or in the WHERE clause.
# only then we can safely remove it
if len(list(filter(lambda column: column[1] != '*' and column[2] == star_table, columns))) == 0:
# we only remove star-tables if they are the start or end table in the graph.
# remember, an join_clause tuple looks like this: (start, start_alias, end, end_alias, entry_column, exit_column)
start_edge = join_clause[0]
start_edge_from, _, start_edge_to, _, _, _ = start_edge
end_edge = join_clause[len(join_clause) - 1]
end_edge_from, _, end_edge_to, _, _, _ = end_edge
if start_edge_from == star_table:
if second_table_in_edge_is_availabe_elswhere(start_edge_to, join_clause[1:]):
return join_clause[1:]
if end_edge_to == star_table:
if second_table_in_edge_is_availabe_elswhere(end_edge_from, join_clause[:-1]):
return join_clause[:-1]
return join_clause
def second_table_in_edge_is_availabe_elswhere(second_table, remaining_edges):
"""
By removing an edge, we basically remove two tables. If there schema is a "normal" schema, where the edges are "A --> B", "B --> C"
this is not an issue.
We we though have a non-linear schema, like "A --> B", "A --> C" we can't just remove the first edge - we would loose B completely!
To avoid this we make sure the second table in the edge we plan to remove is available in another edge.
A schema where we have to deal with this issue is e.g. "flight_2", where two relations go from "flights" to "airports".
"""
for edge in remaining_edges:
start, _, end, _, _, _ = edge
if second_table == start or second_table == end:
return True
return False
| 45.616162 | 175 | 0.665633 |
450109704aaa9e57ec8952a08e13c1c362e0340c | 21 | py | Python | test.py | AlanFnz/profiles-rest-api | c606999f86235ed74fd98421bd02bc598d5a5463 | [
"MIT"
] | null | null | null | test.py | AlanFnz/profiles-rest-api | c606999f86235ed74fd98421bd02bc598d5a5463 | [
"MIT"
] | null | null | null | test.py | AlanFnz/profiles-rest-api | c606999f86235ed74fd98421bd02bc598d5a5463 | [
"MIT"
] | null | null | null | print('Test script')
| 10.5 | 20 | 0.714286 |
45022a2b63c33a9252e7e2198671dfbcf5309e06 | 2,208 | py | Python | src/markdown_storage/folder.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | src/markdown_storage/folder.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | src/markdown_storage/folder.py | stephanpoetschner/markdown_storage | 69005db4484010e0d2282bdeb0d0bcc30a316932 | [
"MIT"
] | null | null | null | import os
from .exceptions import MarkdownError, MetadataError
from .file import ContentFile
# def __next__(self):
# import ipdb; ipdb.set_trace()
# for file in self._files:
# return file
# raise StopIteration
# def sort(self, key=None, reverse=False):
# return ContentIterator(self._files, key, reverse)
#
# class ContentIterator(object):
# def __init__(self, items, key=None, reverse=False):
# self.items = sorted(items, key=key, reverse=reverse)
# self.i = 0
#
# def __iter__(self):
# return self
#
# def next(self):
# if self.i >= len(self.items):
# raise StopIteration
#
# item = self.items[self.i]
# self.i += 1
#
# return item
#
# def sorted(self, sort_key):
# return ContentIterator(self, self.items, sort_key)
#
#
| 24 | 62 | 0.579257 |
4506dc61f56a8eae8242703dae9838d15d5a49a2 | 2,327 | py | Python | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | 1 | 2020-07-25T13:53:35.000Z | 2020-07-25T13:53:35.000Z | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
'''
Test for session module
'''
import unittest
import uestc_eams
from .mock_server import LoginMockServer
from .utils import HookedMethod, MakeResponse
mock_login = LoginMockServer()
| 34.731343 | 77 | 0.65578 |
4507d40889bdeb2efc06f9fd94721d09e699f4c0 | 159 | py | Python | Asignacion2/App/main.py | HarambeGeek/uip-iq-pc3 | 6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e | [
"Apache-2.0"
] | null | null | null | Asignacion2/App/main.py | HarambeGeek/uip-iq-pc3 | 6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e | [
"Apache-2.0"
] | null | null | null | Asignacion2/App/main.py | HarambeGeek/uip-iq-pc3 | 6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e | [
"Apache-2.0"
] | null | null | null | from App.numeros import numeros
if __name__ == "__main__":
x = int(input("Ingrese el nmero que desea evaluar: \n"))
pi = numeros(x)
pi.parImpar() | 26.5 | 61 | 0.660377 |
45085ba62031c2ca173669eca6938ca7aaf578c8 | 6,950 | py | Python | tensorflow/python/distribute/test_util.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 11 | 2018-01-03T15:11:09.000Z | 2021-04-13T05:47:27.000Z | tensorflow/python/distribute/test_util.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 88 | 2020-11-24T08:18:10.000Z | 2022-03-25T20:28:30.000Z | tensorflow/python/distribute/test_util.py | TL-Rubick/tensorflow | 6cf1ccf6060a95aad3ccc84544d0aa166990ec72 | [
"Apache-2.0"
] | 10 | 2018-07-31T10:56:21.000Z | 2019-10-07T08:05:21.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl import app
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def gather(strategy, value):
"""Gathers value from all workers.
This is intended for tests before we implement an official all-gather API.
Args:
strategy: a `tf.distribute.Strategy`.
value: a nested structure of n-dim `tf.distribute.DistributedValue` of
`tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica.
Cannot contain tf.sparse.SparseTensor.
Returns:
a (n+1)-dim `tf.Tensor`.
"""
return nest.map_structure(functools.partial(_gather, strategy), value)
def _gather(strategy, value):
"""Gathers a single value."""
# pylint: disable=protected-access
if not isinstance(value, values.DistributedValues):
value = values.PerReplica([ops.convert_to_tensor(value)])
if not isinstance(strategy.extended,
collective_all_reduce_strategy.CollectiveAllReduceExtended):
return array_ops.stack(value._values)
assert len(strategy.extended.worker_devices) == len(value._values)
inputs = [array_ops.expand_dims_v2(v, axis=0) for v in value._values]
return strategy.gather(values.PerReplica(inputs), axis=0)
# pylint: enable=protected-access
def set_logical_devices_to_at_least(device, num):
"""Create logical devices of at least a given number."""
if num < 1:
raise ValueError("`num` must be at least 1 not %r" % (num,))
physical_devices = config.list_physical_devices(device)
if not physical_devices:
raise RuntimeError("No {} found".format(device))
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
if device.upper() == "GPU":
logical_devices.append(
context.LogicalDeviceConfiguration(memory_limit=2048))
else:
logical_devices.append(context.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
config.set_logical_device_configuration(physical_devices[-1], logical_devices)
def main(enable_v2_behavior=True, config_logical_devices=True):
"""All-in-one main function for tf.distribute tests."""
if config_logical_devices:
app.call_after_init(_set_logical_devices)
if enable_v2_behavior:
v2_compat.enable_v2_behavior()
else:
v2_compat.disable_v2_behavior()
# TODO(b/131360402): configure default logical devices.
multi_process_runner.test_main()
def _op_dependencies(op):
"""Returns the data and control dependencies of a tf.Operation combined."""
deps = []
for node in itertools.chain(op.inputs, op.control_inputs):
if isinstance(node, ops.Tensor):
node = node.op
assert isinstance(node, ops.Operation)
deps.append(node)
return deps
def topological_sort_operations(operations):
"""Topological sorts a list of operations.
This does a topological sort of the operations in a graph. The edges include
both data dependencies and control dependencies. Note that the edge goes from
an operation to its dependencies.
Args:
operations: a list of tf.Operation in the same graph.
Returns:
A map from a tf.Operation to its topological order.
"""
in_degrees = {}
for op in operations:
if op not in in_degrees:
in_degrees[op] = 0
for next_op in _op_dependencies(op):
in_degrees[next_op] = in_degrees.get(next_op, 0) + 1
nexts = []
for op, in_degree in in_degrees.items():
if in_degree == 0:
nexts.append(op)
order = {}
next_order = 0
while nexts:
op, nexts = nexts[0], nexts[1:]
order[op] = next_order
next_order += 1
for next_op in _op_dependencies(op):
in_degrees[next_op] -= 1
if in_degrees[next_op] == 0:
nexts.append(next_op)
assert len(order) == len(operations)
return order
def _exists_dependency(start, end):
"""Returns whether there exists a dependency chain from start to end."""
nexts = [start]
while nexts:
op, nexts = nexts[0], nexts[1:]
for next_op in _op_dependencies(op):
if next_op == end:
return True
nexts.append(next_op)
return False
def assert_sequential_execution(order, operations):
"""Asserts there's a deterministic execution order between the operations.
Args:
order: a map from a tf.Operation to its topological order.
operations: a list of operations that should be executed sequentially. It
can be given in any order.
"""
# Topological ordering guarantees that, if there's a dependency from N_a to
# N_b, then order[N_a] < order[N_b]. If there do exist a path of dependencies
# among the operations, it always goes from a operation with a smaller
# topological order to one with a larger topological order. Therefore, we only
# need to sort the operations by their topological orders, and verify that
# there's a path of dependency between adjacent pairs.
operations = sorted(operations, key=lambda op: order[op])
for i in range(len(operations) - 1):
if not _exists_dependency(operations[i], operations[i + 1]):
print(operations[i].graph.as_graph_def())
raise AssertionError(
"No dependency between {} and {}. Graph is dumped to stdout.".format(
operations[i].name, operations[i + 1].name))
| 36.010363 | 80 | 0.726475 |
4508cb0373bf214606e00078a7d58793158e28a1 | 3,928 | py | Python | experiment_data_handler.py | grdddj/Diploma-Thesis---Inverse-Heat-Transfer | 636182fa4c57913002675ed3afca8c1b3dc35e1c | [
"MIT"
] | 2 | 2019-09-09T18:49:14.000Z | 2021-11-15T23:41:00.000Z | experiment_data_handler.py | grdddj/Diploma-Thesis---Inverse-Heat-Transfer | 636182fa4c57913002675ed3afca8c1b3dc35e1c | [
"MIT"
] | null | null | null | experiment_data_handler.py | grdddj/Diploma-Thesis---Inverse-Heat-Transfer | 636182fa4c57913002675ed3afca8c1b3dc35e1c | [
"MIT"
] | null | null | null | import csv
| 38.509804 | 118 | 0.621436 |
450a6b5edd6e30d83bb61609d61f4702dee03bf9 | 23,457 | py | Python | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
| 34.394428 | 87 | 0.336488 |
450a8b0c8c6133dd03a986ca11b5d16bc7850c24 | 9,945 | py | Python | test_fast_ndimage.py | grlee77/skimage_accel_demos | 96606ca27c8c622733958c01620bc55e616319db | [
"BSD-3-Clause"
] | null | null | null | test_fast_ndimage.py | grlee77/skimage_accel_demos | 96606ca27c8c622733958c01620bc55e616319db | [
"BSD-3-Clause"
] | null | null | null | test_fast_ndimage.py | grlee77/skimage_accel_demos | 96606ca27c8c622733958c01620bc55e616319db | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.testing import assert_allclose, run_module_suite
from fast_ndimage import (
median_filter, sobel, convolve, correlate, gaussian_filter,
gaussian_filter1d, uniform_filter, uniform_filter1d)
# TODO: test threading
if __name__ == "__main__":
run_module_suite()
| 44.2 | 173 | 0.627954 |
450aba433942ebcf2d5698d6bec5bdbf826e634d | 628 | py | Python | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | import sys
from itertools import count, islice
def write_sequence(num):
"""Write Recaman's sequence to a text file"""
filename = "recaman.txt"
with open(filename, mode="wt", encoding="utf-8") as f:
f.writelines(f"{r}\n" for r in islice(sequence(), num))
if __name__ == '__main__':
write_sequence(num=int(sys.argv[1]))
| 20.258065 | 63 | 0.565287 |
450b1dc0c660308c26a032b98dc820700aea0504 | 533 | py | Python | 2018/aoc/d8/test.py | lukaselmer/adventofcode | b96ffc9040b63b338bca653830ba4ff7e90a8b2a | [
"MIT"
] | 1 | 2018-12-12T22:59:44.000Z | 2018-12-12T22:59:44.000Z | 2018/aoc/d8/test.py | lukaselmer/adventofcode | b96ffc9040b63b338bca653830ba4ff7e90a8b2a | [
"MIT"
] | null | null | null | 2018/aoc/d8/test.py | lukaselmer/adventofcode | b96ffc9040b63b338bca653830ba4ff7e90a8b2a | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import mock_open, patch
from aoc.d8.main import metadata_sum, supervalue
DATA = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\n"
if __name__ == "__main__":
unittest.main()
| 25.380952 | 63 | 0.682927 |
450b884de08b19e1126451db4abb472fc660d42a | 955 | py | Python | tests/calculators/openbabel/test_obabel_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | null | null | null | tests/calculators/openbabel/test_obabel_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | null | null | null | tests/calculators/openbabel/test_obabel_calculators.py | stevenbennett96/stko | ee340af4fc549d5a2c3e9cba8360661335efe0fd | [
"MIT"
] | 2 | 2020-05-08T17:51:25.000Z | 2020-05-11T09:03:24.000Z | import stko
import pytest
try:
from openbabel import openbabel
except ImportError:
openbabel = None
| 29.84375 | 62 | 0.709948 |
450c6f3fc4e2b60e9dc2c7675ce23445e63cfa2b | 1,773 | py | Python | psono/restapi/views/membership_decline.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 48 | 2018-04-19T15:50:58.000Z | 2022-01-23T15:58:11.000Z | psono/restapi/views/membership_decline.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 9 | 2018-09-13T14:56:18.000Z | 2020-01-17T16:44:33.000Z | psono/restapi/views/membership_decline.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 11 | 2019-09-20T11:53:47.000Z | 2021-07-18T22:41:31.000Z | from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from django.core.cache import cache
from django.conf import settings
from ..authentication import TokenAuthentication
from ..app_settings import (
MembershipDeclineSerializer,
)
| 30.568966 | 106 | 0.693739 |
450c825cc3c91a3ffe9479b87c3868422b01ed4b | 5,350 | py | Python | tests/data/samplers/bucket_batch_sampler_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | tests/data/samplers/bucket_batch_sampler_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | tests/data/samplers/bucket_batch_sampler_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | from allennlp.common import Params
from allennlp.data import Instance, Token, Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import BucketBatchSampler
from allennlp.data.data_loaders import MultiProcessDataLoader
from .sampler_test import SamplerTest
| 35.90604 | 95 | 0.601121 |
45112f2abb035e911415cb428c007f107a543914 | 3,332 | py | Python | tests/test_sphere.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | tests/test_sphere.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | tests/test_sphere.py | dkirkby/batoid | 734dccc289eb7abab77a62cdc14563ed5981753b | [
"BSD-2-Clause"
] | null | null | null | import batoid
import numpy as np
import math
from test_helpers import timer, do_pickle, all_obj_diff
if __name__ == '__main__':
test_properties()
test_sag()
test_intersect()
test_intersect_vectorized()
test_ne()
test_fail()
| 28.237288 | 88 | 0.545618 |
4511821928e83d509f748b6119d6ba8794c26a88 | 2,678 | py | Python | site_stats/middlewares.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | 1 | 2018-11-22T21:13:25.000Z | 2018-11-22T21:13:25.000Z | site_stats/middlewares.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | null | null | null | site_stats/middlewares.py | ganlvtech/blueking-django-startup-project | 042aa36b0757c0d3929d88bc23534f54963d333e | [
"MIT"
] | null | null | null | from django.http.response import HttpResponseForbidden
from .models import Counter, VisitLog
from .utils import get_client_ip
| 34.779221 | 79 | 0.596341 |
4511a28651e9a1abc5a51540a0f550556e34f6c9 | 1,753 | py | Python | gpu_bdb/bdb_tools/q24_utils.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 62 | 2020-05-14T13:33:02.000Z | 2020-10-29T13:28:26.000Z | gpu_bdb/bdb_tools/q24_utils.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 104 | 2020-07-01T21:07:42.000Z | 2020-11-13T16:36:04.000Z | gpu_bdb/bdb_tools/q24_utils.py | VibhuJawa/gpu-bdb | 13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9 | [
"Apache-2.0"
] | 21 | 2020-05-14T14:44:40.000Z | 2020-11-07T12:08:28.000Z | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
ws_cols = ["ws_item_sk", "ws_sold_date_sk", "ws_quantity"]
item_cols = ["i_item_sk", "i_current_price"]
imp_cols = [
"imp_item_sk",
"imp_competitor_price",
"imp_start_date",
"imp_end_date",
"imp_sk",
]
ss_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_quantity"]
| 34.372549 | 75 | 0.714775 |
451235b4dc66c44ae6da7b46c7877673b9a0d562 | 8,175 | py | Python | tests/test_compare.py | mys-lang/mys | 070431fdedd7a6bf537f3a30583cd44f644cdbf4 | [
"MIT"
] | 59 | 2021-01-06T14:21:40.000Z | 2022-02-22T21:49:39.000Z | tests/test_compare.py | mys-lang/mys | 070431fdedd7a6bf537f3a30583cd44f644cdbf4 | [
"MIT"
] | 31 | 2021-01-05T00:32:36.000Z | 2022-02-23T13:34:33.000Z | tests/test_compare.py | mys-lang/mys | 070431fdedd7a6bf537f3a30583cd44f644cdbf4 | [
"MIT"
] | 7 | 2021-01-03T11:53:03.000Z | 2022-02-22T17:49:42.000Z | from .utils import TestCase
from .utils import build_and_test_module
from .utils import transpile_source
| 34.493671 | 78 | 0.416147 |
4512ba1f9249887e49626300dacbdb0fac5b7fbe | 170 | py | Python | epidemioptim/environments/cost_functions/costs/__init__.py | goodhamgupta/EpidemiOptim | a4fe3fcfc2d82a10db16a168526982c03ca2c8d3 | [
"MIT"
] | null | null | null | epidemioptim/environments/cost_functions/costs/__init__.py | goodhamgupta/EpidemiOptim | a4fe3fcfc2d82a10db16a168526982c03ca2c8d3 | [
"MIT"
] | null | null | null | epidemioptim/environments/cost_functions/costs/__init__.py | goodhamgupta/EpidemiOptim | a4fe3fcfc2d82a10db16a168526982c03ca2c8d3 | [
"MIT"
] | null | null | null | from epidemioptim.environments.cost_functions.costs.death_toll_cost import DeathToll
from epidemioptim.environments.cost_functions.costs.gdp_recess_cost import GdpRecess
| 56.666667 | 84 | 0.905882 |
451354227c3d203ff804c452ae15b439b4e8924c | 1,587 | py | Python | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | """70 Binary Tree Level Order Traversal II"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
| 24.415385 | 61 | 0.477001 |
451370b67d994c4274e51949234d8fd9c7be285d | 2,554 | py | Python | yuque_py/clients/client.py | kingJiaYouwen/yuque-py | 451ec6b88860a984de9456d48c0af341676513a3 | [
"MIT"
] | null | null | null | yuque_py/clients/client.py | kingJiaYouwen/yuque-py | 451ec6b88860a984de9456d48c0af341676513a3 | [
"MIT"
] | null | null | null | yuque_py/clients/client.py | kingJiaYouwen/yuque-py | 451ec6b88860a984de9456d48c0af341676513a3 | [
"MIT"
] | null | null | null | import typing
import requests
from urllib.parse import urlencode
from .abstract_client import AbstractClient
from yuque_py.exceptions.request_error import RequestError
| 32.74359 | 85 | 0.643696 |
45138db5ed51843c9a5afaaec91f905c3ac8de23 | 671 | py | Python | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 1 | 2021-06-12T08:46:32.000Z | 2021-06-12T08:46:32.000Z | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 8 | 2020-07-01T15:06:52.000Z | 2022-02-20T09:11:23.000Z | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 3 | 2020-03-01T17:02:24.000Z | 2020-07-05T14:37:59.000Z | from dry_rest_permissions.generics import DRYPermissions
from rest_framework import viewsets
from results.models.sports import Sport
from results.serializers.sports import SportSerializer
| 20.96875 | 56 | 0.728763 |
45148079bc72efab4e9fdeacd43d659e9726c7f1 | 1,332 | py | Python | tests/pvsystemprofiler/test_equation_of_time.py | slacgismo/pv-system-profiler | 5ab663cd186511605bbb1e6aa387c8b897e47d83 | [
"BSD-2-Clause"
] | 4 | 2020-08-18T14:28:44.000Z | 2021-10-14T13:17:03.000Z | tests/pvsystemprofiler/test_equation_of_time.py | slacgismo/pv-system-profiler | 5ab663cd186511605bbb1e6aa387c8b897e47d83 | [
"BSD-2-Clause"
] | 10 | 2020-04-14T18:57:03.000Z | 2021-09-14T15:26:24.000Z | tests/pvsystemprofiler/test_equation_of_time.py | slacgismo/pv-system-profiler | 5ab663cd186511605bbb1e6aa387c8b897e47d83 | [
"BSD-2-Clause"
] | null | null | null | import unittest
import os
from pathlib import Path
import numpy as np
path = Path.cwd().parent.parent
os.chdir(path)
from pvsystemprofiler.utilities.equation_of_time import eot_da_rosa, eot_duffie
if __name__ == '__main__':
unittest.main()
| 32.487805 | 101 | 0.722973 |
4514a6b0a130ee0e5e4417b6086e78904e058a13 | 1,942 | py | Python | raiden/tests/unit/transfer/test_node.py | gasparmedina/raiden | 649c43b7233b9e95f13831e61d5db187d583367a | [
"MIT"
] | null | null | null | raiden/tests/unit/transfer/test_node.py | gasparmedina/raiden | 649c43b7233b9e95f13831e61d5db187d583367a | [
"MIT"
] | null | null | null | raiden/tests/unit/transfer/test_node.py | gasparmedina/raiden | 649c43b7233b9e95f13831e61d5db187d583367a | [
"MIT"
] | 4 | 2019-01-24T14:45:06.000Z | 2019-04-01T16:12:40.000Z | from raiden.constants import EMPTY_MERKLE_ROOT
from raiden.tests.utils.factories import HOP1, HOP2, UNIT_SECRETHASH, make_block_hash
from raiden.transfer.events import ContractSendChannelBatchUnlock
from raiden.transfer.node import is_transaction_effect_satisfied
from raiden.transfer.state_change import ContractReceiveChannelBatchUnlock
| 45.162791 | 91 | 0.785273 |
451505779ddfa18b109340abfbe8b097a645a054 | 2,826 | py | Python | tfx/utils/channel_test.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | 2 | 2019-07-08T20:56:13.000Z | 2020-08-04T17:07:26.000Z | tfx/utils/channel_test.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | tfx/utils/channel_test.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.channel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard Imports
import tensorflow as tf
from tfx.utils import channel
from tfx.utils import types
if __name__ == '__main__':
tf.test.main()
| 34.888889 | 74 | 0.754777 |
45154cfb764af63ed99ff0eaf6a51b8393aa6827 | 1,870 | py | Python | lyrics.py | samiraafreen/lyrics-generator | 37f894bdb8986c153985104af83e12ef8d6dac07 | [
"MIT"
] | null | null | null | lyrics.py | samiraafreen/lyrics-generator | 37f894bdb8986c153985104af83e12ef8d6dac07 | [
"MIT"
] | null | null | null | lyrics.py | samiraafreen/lyrics-generator | 37f894bdb8986c153985104af83e12ef8d6dac07 | [
"MIT"
] | null | null | null | import configparser
import requests
from bs4 import BeautifulSoup
token = getAccessToken()
#searchMusicArtist("drake")
#print(getArtistID('drake'))
#print(getTopTenSongs('drake'))
#print(scrapeLyricText('drake')) | 28.333333 | 95 | 0.648128 |
45156df577791ff159e318cbc3c7550a59d3c192 | 2,968 | py | Python | src/slack/slack_json_factories/dialog_json/comment.py | yejingyu/jama-slack-integration | 55d1366d6cde3962e788afebe0001747cbe11fe8 | [
"MIT"
] | 1 | 2019-07-30T01:41:53.000Z | 2019-07-30T01:41:53.000Z | src/slack/slack_json_factories/dialog_json/comment.py | yejingyu/jama-slack-integration | 55d1366d6cde3962e788afebe0001747cbe11fe8 | [
"MIT"
] | 4 | 2018-11-16T05:56:06.000Z | 2018-11-29T05:07:52.000Z | src/slack/slack_json_factories/dialog_json/comment.py | yejingyu/jama-slack-integration | 55d1366d6cde3962e788afebe0001747cbe11fe8 | [
"MIT"
] | 6 | 2018-11-08T03:49:28.000Z | 2019-04-29T19:53:25.000Z | def comment_dialog(data=None):
"""
Function takes in a JSON object, and uses the following format:
https://api.slack.com/dialogs
Returns created JSON object, then is sent back to Slack.
"""
text = ""
state = ""
project_holder = None
item_holder = None
if data is not None:
if data["type"] == "message_action":
text = data["message"]["text"] + "\n"
# get attachment images from the massage
if "attachments" in data["message"]:
text += "Attachments:\n"
for att in data["message"]["attachments"]:
text += att["title"] + ":\n"
if "image_url" in att:
text += att["image_url"] + "\n"
# get files from the massage
if "files" in data["message"]:
text += "Attach files:\n"
for file in data["message"]["files"]:
text += file["title"] + ":\n"
text += file["url_private"] + "\n"
if data["type"] == "interactive_message":
if data["callback_id"] == "bot_project":
label = data["original_message"]["attachments"][0]["fallback"]
project_holder = [
{
"label": label,
"value": data["actions"][0]["value"]
}
]
state = data["actions"][0]["value"]
elif data["callback_id"] == "bot_item":
label = data["original_message"]["attachments"][0]["fallback"]
item_holder = [
{
"label": label,
"value": data["actions"][0]["value"]
}
]
return {
"title": "JamaConnect - Comment",
"submit_label": "Submit",
"callback_id": "comment",
"elements": [
{
"label": "Search Projects:",
"type": "select",
"name": "project",
"optional": "true",
"data_source": "external",
"selected_options": project_holder
},
{
"label": "Project ID:",
"type": "select",
"name": "project_id",
"optional": "true",
"data_source": "external",
"selected_options": project_holder
},
{
"label": "Item ID or Name:",
"type": "select",
"name": "item",
"data_source": "external",
"min_query_length": 0,
"selected_options": item_holder
},
{
"type": "textarea",
"label": "Comment",
"name": "comment",
"value": text
}
],
"state": state
}
| 34.511628 | 78 | 0.412062 |
45168a0a61e3273b57493bda1e9d073423e6c698 | 8,105 | py | Python | tests/hahm/test_config_flow.py | Voxxie/custom_homematic | d199f1fcc565febe42e686198a9eb33ef4d755f6 | [
"MIT"
] | null | null | null | tests/hahm/test_config_flow.py | Voxxie/custom_homematic | d199f1fcc565febe42e686198a9eb33ef4d755f6 | [
"MIT"
] | null | null | null | tests/hahm/test_config_flow.py | Voxxie/custom_homematic | d199f1fcc565febe42e686198a9eb33ef4d755f6 | [
"MIT"
] | null | null | null | """Test the HaHomematic config flow."""
from typing import Any
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.hahm.config_flow import (
ATTR_BICDOS_RF_ENABLED,
ATTR_BICDOS_RF_PORT,
ATTR_HMIP_RF_ENABLED,
ATTR_HOST,
ATTR_HS485D_ENABLED,
ATTR_INSTANCE_NAME,
ATTR_PASSWORD,
ATTR_PORT,
ATTR_TLS,
ATTR_USERNAME,
ATTR_VIRTUAL_DEVICES_ENABLED,
IF_BIDCOS_RF_NAME,
IF_HMIP_RF_NAME,
IF_HS485D_NAME,
IF_VIRTUAL_DEVICES_NAME,
CannotConnect,
InvalidAuth,
)
from homeassistant.components.hahm.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
TEST_INSTANCE_NAME = "pytest"
TEST_HOST = "1.1.1.1"
TEST_USERNAME = "test-username"
TEST_PASSWORD = "test-password"
| 33.217213 | 84 | 0.664405 |
451695e3856e2d5dd4a42abbf9ad2c012826eaed | 792 | py | Python | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | from functools import partial
def register_as_decorator(func):
"""
Register extensions, transforms, or addons function as decorator.
"""
return wrapper
| 26.4 | 78 | 0.616162 |
4516fa710b28e684423724f2bca16759c34404c0 | 5,883 | py | Python | Applications/Examples/python/market_price_authentication.py | Refinitiv/websocket-api | 15a5957510d2bb246cbbf65ed999ff0089b3a65d | [
"Apache-2.0"
] | 36 | 2019-01-08T17:43:38.000Z | 2022-03-11T21:59:58.000Z | Applications/Examples/python/market_price_authentication.py | thomsonreuters/websocket-api | 52c940a01d40a6c073d35922d8214d927327caa4 | [
"Apache-2.0"
] | 14 | 2019-12-27T15:58:12.000Z | 2021-11-03T21:39:27.000Z | Applications/Examples/python/market_price_authentication.py | thomsonreuters/websocket-api | 52c940a01d40a6c073d35922d8214d927327caa4 | [
"Apache-2.0"
] | 28 | 2019-01-22T21:43:15.000Z | 2022-03-29T11:43:05.000Z | #|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data using Websockets with authentication """
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
from threading import Thread, Event
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Global Default Variables
app_id = '555'
auth_hostname = '127.0.0.1'
auth_port = '8443'
hostname = '127.0.0.1'
password = ''
position = socket.gethostbyname(socket.gethostname())
token = ''
user = ''
port = '15000'
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': 'TRI.N',
},
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws, close_status_code, close_msg):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "password=", "position=", "auth_hostname=", "auth_port="])
except getopt.GetoptError:
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--password"):
password = arg
elif opt in ("--position"):
position = arg
elif opt in ("--auth_hostname"):
auth_hostname = arg
elif opt in ("--auth_port"):
auth_port = arg
# Send login info for authentication token
print("Sending authentication request...")
r = requests.post('https://{}:{}/getToken'.format(auth_hostname, auth_port),
data={'username': user, 'password': password},
verify=True)
auth_json = r.json()
print("RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
if auth_json['success'] is True:
token = r.cookies['AuthToken']
print('Authentication Succeeded. Received AuthToken: {}'.format(token))
cookie = "AuthToken={};AuthPosition={};applicationId={};".format(token, position, app_id)
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'],
cookie=cookie)
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
else:
print('Authentication failed')
| 34.810651 | 235 | 0.590345 |
4517ac136f86ccb5533a40509e2b215d308bd04d | 571 | py | Python | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
if __name__ == '__main__':
main() | 16.794118 | 54 | 0.528897 |
451838fb8b3acc8747399824b9d60c1c29d67e5c | 3,416 | py | Python | test_kmethods.py | quinlan-lab/kmertools | 93e90919c26e2fc899a905b77748857404389e13 | [
"MIT"
] | 1 | 2020-08-25T01:35:38.000Z | 2020-08-25T01:35:38.000Z | test_kmethods.py | quinlan-lab/kmertools | 93e90919c26e2fc899a905b77748857404389e13 | [
"MIT"
] | null | null | null | test_kmethods.py | quinlan-lab/kmertools | 93e90919c26e2fc899a905b77748857404389e13 | [
"MIT"
] | 1 | 2021-07-13T23:21:56.000Z | 2021-07-13T23:21:56.000Z | from unittest import TestCase
from eskedit.kmethods import *
| 30.5 | 63 | 0.660422 |
45187c05fe7efccfb6cd2366904c5d7b0e9849c8 | 8,747 | py | Python | distributed_social_network/users/views.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | null | null | null | distributed_social_network/users/views.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 51 | 2019-03-22T00:31:06.000Z | 2021-06-10T21:17:30.000Z | distributed_social_network/users/views.py | leevtori/CMPUT404-project | 52214288855ae4b3f05b8d17e67a2686debffb19 | [
"Apache-2.0"
] | 1 | 2019-08-03T14:41:22.000Z | 2019-08-03T14:41:22.000Z | from django.urls import reverse_lazy, reverse
from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect
from requests.auth import HTTPBasicAuth
from .models import User, Node
from .forms import CustomUserCreationForm, UserCreationForm
from django.views.generic import ListView
from django.views.generic.edit import UpdateView
from django.views import View
from django.views import generic
import requests
from users.serializers import *
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
import json
| 36.598326 | 115 | 0.658169 |
4519b87ff604566c931c34b6c30b719a965b014c | 1,569 | py | Python | util.py | reckenrode/ParanoiaCharGen | 07aa5a5cf1699ae47edab95ee5490b2f1d48c501 | [
"BSD-2-Clause"
] | 1 | 2021-12-20T00:03:34.000Z | 2021-12-20T00:03:34.000Z | util.py | reckenrode/ParanoiaCharGen | 07aa5a5cf1699ae47edab95ee5490b2f1d48c501 | [
"BSD-2-Clause"
] | null | null | null | util.py | reckenrode/ParanoiaCharGen | 07aa5a5cf1699ae47edab95ee5490b2f1d48c501 | [
"BSD-2-Clause"
] | null | null | null | #from weakref import WeakValueDictionary
import random, operator, weakref
def format_service_group(group):
"""pretty prints the group"""
rstr = '%s [%s]'
if group.cover != None: # Spy for IntSec
return rstr % (group.cover, group.cover.firm)
elif group.spyon != None:
return rstr % (group.spyon, group.spyon.firm)
else:
return rstr % (group, group.firm)
def build_skill_table(skill):
"""makes an nx2 table of the skill's specs where n = len(skill.specs)"""
table = [[spec, skill[spec]] for spec in skill]
table.sort(lambda x, y: cmp(x[0], y[0]))
if 'Energy Weapons' not in skill:
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
return table | 30.173077 | 109 | 0.702358 |
451a01d6bf880434d082fec4bb6d94642deb72ee | 2,195 | py | Python | moex/tests/test_service.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | null | null | null | moex/tests/test_service.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | 11 | 2021-02-21T19:39:41.000Z | 2021-06-13T16:29:47.000Z | moex/tests/test_service.py | ChanTerelLy/broker-account-analist | a723c83fe9a924905eb0754b4acb1231b31f9c87 | [
"MIT"
] | 2 | 2021-11-16T16:31:37.000Z | 2022-02-11T02:55:37.000Z | import asyncio
import unittest
from moex.service import Cbr, Moex
if __name__ == '__main__':
unittest.main() | 34.296875 | 119 | 0.655125 |
451aa8ccde2d865dd652ad209fefdf68afe0ad46 | 2,820 | py | Python | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
st.title("Relatrio de Aula")
df = pd.read_csv('data/emocoes.csv')
agg = pd.read_csv('data/agg.csv')
Engajado = df[df['emocao'] == 'Engajado']
Engajado_agg = Engajado.groupby(['emocao', 'pessoa']).size().reset_index(name='size')
Engajado_agg = Engajado_agg.sort_values(by=['size'], ascending=False)
emotions_count = df.value_counts('emocao').reset_index()
login_blocks = generate_login_block()
password = login(login_blocks)
drive_block = st.empty()
google_drive = drive_block.text_input('Link da aula para processamento', '')
id_block = st.empty()
if google_drive != '':
drive_block.empty()
id_block.text("ID da Aula processada: 182916f6-756d-40d6-95fc-3283ba5efdf8")
if is_authenticated(password):
id_block.empty()
drive_block.empty()
clean_blocks(login_blocks)
st.balloons()
graph_columns()
elif password:
st.info("Aula no encontrada. Por favor, insira um ID vlido.") | 32.413793 | 134 | 0.699291 |
451af08e7abf6ac7cd2e6c4f9832ea860419a281 | 4,410 | py | Python | other_nominate/register_other_nominate.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 1 | 2019-08-05T21:43:09.000Z | 2019-08-05T21:43:09.000Z | other_nominate/register_other_nominate.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | 3 | 2020-03-31T05:53:37.000Z | 2021-12-13T20:07:39.000Z | other_nominate/register_other_nominate.py | kondounagi/japanese_movies_dataset | 349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e | [
"MIT"
] | null | null | null | import argparse
import json
import os
from os import listdir
from os.path import isfile, join
if __name__ == '__main__':
main()
| 35.28 | 80 | 0.460544 |
451b7210c711e56db4043ee4381d442e6b1a9d25 | 5,477 | py | Python | PC6/table.py | ReneCapella/pythonTinkering | 93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a | [
"MIT"
] | null | null | null | PC6/table.py | ReneCapella/pythonTinkering | 93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a | [
"MIT"
] | null | null | null | PC6/table.py | ReneCapella/pythonTinkering | 93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a | [
"MIT"
] | null | null | null |
col = 0
row = 0
header = ""
headers = []
border_size = -1
while col < 1 or col > 100:
col = input("How many columns do you want (1 to 100)? ")
col = int(col)
while row < 1 or col > 100:
row = input("How many rows do you want (1 to 100)? ")
row = int(row)
while header != "Y" and header != "N":
header = input("Do you want headers? Y/N ")
# If headers are wanted, give them names
if header == "Y":
header = True
for n in range(col):
headers.append(input("Header #" + str(n + 1) + ": "))
else:
header = False
while border_size < 0 or border_size > 5:
border_size = input("Enter a number for border size 1 to 5 ")
border_size = int(border_size)
# DEMOOOOOO
table = Table(col, row, headers, border_size)
table.make_table()
table.add_headers(["1", "2", "4"])
print("Here are your current headers: ")
print(table.get_headers())
print("Here is your current border size: ")
print(table.get_border_size())
table.make_table()
table.delete_cols(3, ["1", "2", "4"])
print("Here are your headers now: ")
print(table.get_headers())
print("Let's check your column count: ")
print(table.get_col_count())
# table.delete_cols(4) # should throw error
table.set_row_count(3)
table.add_rows(5)
print("Row count should be 8 because I just set it to 3 and added 5: ")
print(table.get_row_count())
| 33.601227 | 95 | 0.617126 |
451d32ddace64c14dc2a20c09b0af3249bd93791 | 676 | py | Python | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 15 | 2019-06-27T02:48:02.000Z | 2020-11-29T09:01:29.000Z | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 16 | 2019-07-26T19:51:55.000Z | 2022-03-12T00:00:24.000Z | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 7 | 2019-06-26T11:10:50.000Z | 2020-09-04T08:52:58.000Z | from .. import db
from .base import BaseModel
| 30.727273 | 91 | 0.659763 |
451d5be5b8b8cc7a0af4de177c971df7cd94b93b | 13,474 | py | Python | mate3/devices.py | kodonnell/mate3 | 6c378cc7d5eee59e322075b7fcdc91c49b24265f | [
"MIT"
] | null | null | null | mate3/devices.py | kodonnell/mate3 | 6c378cc7d5eee59e322075b7fcdc91c49b24265f | [
"MIT"
] | null | null | null | mate3/devices.py | kodonnell/mate3 | 6c378cc7d5eee59e322075b7fcdc91c49b24265f | [
"MIT"
] | null | null | null | import dataclasses as dc
from typing import Any, Dict, Iterable, List, Optional
from loguru import logger
from mate3.field_values import FieldValue, ModelValues
from mate3.read import AllModelReads
from mate3.sunspec.fields import IntegerField
from mate3.sunspec.model_base import Model
from mate3.sunspec.models import (
ChargeControllerConfigurationModel,
ChargeControllerModel,
FLEXnetDCConfigurationModel,
FLEXnetDCRealTimeModel,
FXInverterConfigurationModel,
FXInverterRealTimeModel,
OutBackModel,
OutBackSystemControlModel,
RadianInverterConfigurationModel,
SinglePhaseRadianInverterRealTimeModel,
SplitPhaseRadianInverterRealTimeModel,
)
from mate3.sunspec.values import (
ChargeControllerConfigurationValues,
ChargeControllerValues,
FLEXnetDCConfigurationValues,
FLEXnetDCRealTimeValues,
FXInverterConfigurationValues,
FXInverterRealTimeValues,
OPTICSPacketStatisticsValues,
OutBackSystemControlValues,
OutBackValues,
RadianInverterConfigurationValues,
SinglePhaseRadianInverterRealTimeValues,
SplitPhaseRadianInverterRealTimeValues,
)
class DeviceValues:
"""
This is basically a way for storing state (i.e. current values) about all devices. It's the main interface for users
to access values etc.
"""
def _get_single_device(self, name: str) -> ModelValues:
"""
Helper function so that e.g. if there's only one charge controller in self.charge_controllers, you can call
self.charge_controller to get it.
"""
devices = getattr(self, f"{name}s")
if len(devices) != 1:
raise RuntimeError(
(
f"Must be one, and only one, {name} device to be able to use `{name}` attribute - but there are "
f"{len(devices)}"
)
)
return list(devices.values())[0]
def update(self, all_reads: AllModelReads) -> None:
"""
This is the key method, and is used to update the state of the devices with new values.
"""
# Update mate:
self._update_model_and_config(
all_reads=all_reads,
model_class=OutBackModel,
config_class=OutBackSystemControlModel,
config_values_class=OutBackSystemControlValues,
device_values=self.mate3s,
device_class=Mate3DeviceValues,
)
# Charge controller
self._update_model_and_config(
all_reads=all_reads,
model_class=ChargeControllerModel,
config_class=ChargeControllerConfigurationModel,
config_values_class=ChargeControllerConfigurationValues,
device_values=self.charge_controllers,
device_class=ChargeControllerDeviceValues,
)
# FNDCs
self._update_model_and_config(
all_reads=all_reads,
model_class=FLEXnetDCRealTimeModel,
config_class=FLEXnetDCConfigurationModel,
config_values_class=FLEXnetDCConfigurationValues,
device_values=self.fndcs,
device_class=FNDCDeviceValues,
)
# FX inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=FXInverterRealTimeModel,
config_class=FXInverterConfigurationModel,
config_values_class=FXInverterConfigurationValues,
device_values=self.fx_inverters,
device_class=FXInverterDeviceValues,
)
# Single phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SinglePhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.single_phase_radian_inverters,
device_class=SinglePhaseRadianInverterDeviceValues,
)
# Split phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SplitPhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.split_phase_radian_inverters,
device_class=SplitPhaseRadianInverterDeviceValues,
)
def _update_model_and_config(
self,
all_reads: AllModelReads,
model_class: Model,
config_class: Model,
config_values_class: ModelValues,
device_values: Dict[int, ModelValues],
device_class: ModelValues,
) -> None:
model_field_reads_per_port = all_reads.get_reads_per_model_by_port(model_class)
config_field_reads_per_port = all_reads.get_reads_per_model_by_port(config_class)
# OK, there's a few options around whether the above variables contain anything.
# - Both present, then we're good - continue. All devices should have a configuration class.
# - Model isn't present - this means the device itself wasn't detected, so ignore. Note that usually this would
# imply the config class is null (since the config shouldn't be there if the device isn't) except in the case
# of Radian inverters, as the same config class is shared across both single and split phase devices (so that
# if only one type is present, the other will have empty model values and non-empty config).
# - Both are missing - this is covered by the above.
# So, the short summary is we only care about devices where the model field values are present, and in all other
# cases there *should* be config field values too.
if model_field_reads_per_port is None:
return
else:
if config_field_reads_per_port is None:
logger.warning(
(
f"Only model ({model_class}) field values and no config ({config_class}) fields were read. This"
f" is undefined behaviour, so ignoring {model_class}."
)
)
return
# Check model and config have the same ports:
if set(model_field_reads_per_port).symmetric_difference(set(config_field_reads_per_port)):
raise RuntimeError("Config and models have different ports!")
# Create/update any devices for the given ports:
for port in model_field_reads_per_port:
model_reads_this_port = model_field_reads_per_port[port]
config_reads_this_port = config_field_reads_per_port[port]
if port not in device_values:
# OK, it's new - create it:
config_values = self._create_new_model_values(
model=config_class,
values_class=config_values_class,
device_address=config_reads_this_port["did"].address,
)
device_values[port] = self._create_new_model_values(
model=model_class,
values_class=device_class,
device_address=model_reads_this_port["did"].address,
config=config_values,
)
# Either way, update the field values:
for reads, device_val in (
(model_reads_this_port, device_values[port]),
(config_reads_this_port, device_values[port].config),
):
for field_name, field_read in reads.items():
field_value = getattr(device_val, field_name)
field_value._raw_value = field_read.raw_value
field_value._implemented = field_read.implemented
field_value._last_read = field_read.time
# If there are any ports that were used for this device, but are no longer, remove them:
old_device_ports = set(list(device_values.keys())) - set(model_field_reads_per_port.keys())
for port in old_device_ports:
logger.warning(
f"Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored."
)
del device_values[port]
| 38.062147 | 120 | 0.656672 |
451de10c0477bdaf31e0d063879d50b5418e6b0b | 490 | py | Python | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | 1 | 2019-07-04T04:49:05.000Z | 2019-07-04T04:49:05.000Z | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | null | null | null | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from srv_sub_pub.srv import *
NAME = "add_two_ints_server"
if __name__ == "__main__":
add_two_ints_server()
| 22.272727 | 71 | 0.681633 |
451f9b7ff4174b43f88b83397cc76cc631f10347 | 148 | py | Python | app/captcha/handlers/verify.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | app/captcha/handlers/verify.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | app/captcha/handlers/verify.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
| 18.5 | 42 | 0.662162 |
451fd926ec9ad4d32166fa0f1f6362133bc3d0df | 1,113 | py | Python | simplydomain/src/module_recursion.py | SimplySecurity/SimplyDomain-Old | 101dd55b213009b449a96a1fa8b143d85dcdba88 | [
"BSD-3-Clause"
] | 17 | 2018-08-08T11:51:26.000Z | 2022-03-27T19:43:25.000Z | simplydomain/src/module_recursion.py | SimplySecurity/SimplyDomain-Old | 101dd55b213009b449a96a1fa8b143d85dcdba88 | [
"BSD-3-Clause"
] | 10 | 2018-06-14T21:33:49.000Z | 2020-08-26T18:10:54.000Z | simplydomain/src/module_recursion.py | SimplySecurity/SimplyDomain-Old | 101dd55b213009b449a96a1fa8b143d85dcdba88 | [
"BSD-3-Clause"
] | 6 | 2018-07-20T17:52:03.000Z | 2021-10-18T09:08:33.000Z | import multiprocessing as mp
| 21.403846 | 70 | 0.674753 |
451ff3d3aabbbe325d6f684b5fc8911f70524e81 | 1,691 | py | Python | python/tests/spatial_operator/test_polygon_range.py | Maxar-Corp/GeoSpark | 6248c6773dc88bf3354ea9b223f16ceb064e7627 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-10-19T07:57:29.000Z | 2021-10-19T07:57:29.000Z | python/tests/spatial_operator/test_polygon_range.py | mayankkt9/GeoSpark | 618da90413f7d86c59def92ba765fbd6d9d49761 | [
"Apache-2.0",
"MIT"
] | 3 | 2020-03-24T18:20:35.000Z | 2021-02-02T22:36:37.000Z | python/tests/spatial_operator/test_polygon_range.py | mayankkt9/GeoSpark | 618da90413f7d86c59def92ba765fbd6d9d49761 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-26T15:51:22.000Z | 2021-09-26T15:51:22.000Z | import os
from pyspark import StorageLevel
from geospark.core.SpatialRDD import PolygonRDD
from geospark.core.enums import IndexType, FileDataSplitter
from geospark.core.geom.envelope import Envelope
from geospark.core.spatialOperator import RangeQuery
from tests.test_base import TestBase
from tests.tools import tests_path
input_location = os.path.join(tests_path, "resources/primaryroads-polygon.csv")
splitter = FileDataSplitter.CSV
gridType = "rtree"
indexType = "rtree"
| 36.76087 | 97 | 0.707865 |
45202479629fa2ae422e3a2c76ead8cf08a4c08c | 2,004 | py | Python | river/compose/renamer.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 1,105 | 2019-01-24T15:15:30.000Z | 2020-11-10T18:27:00.000Z | river/compose/renamer.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 328 | 2019-01-25T13:48:43.000Z | 2020-11-11T11:41:44.000Z | river/compose/renamer.py | online-ml/creme | 60872844e6052b5ef20e4075aea30f9031377136 | [
"BSD-3-Clause"
] | 150 | 2019-01-29T19:05:21.000Z | 2020-11-11T11:50:14.000Z | from typing import Dict
from river import base
__all__ = ["Renamer", "Prefixer", "Suffixer"]
| 21.094737 | 120 | 0.560878 |
452450d09a4bf187252d74d278741b2191dfc928 | 4,660 | py | Python | open_publishing/catalog/catalog_types.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | open_publishing/catalog/catalog_types.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | open_publishing/catalog/catalog_types.py | open-publishing/open-publishing-api | 0d1646bb2460c6f35cba610a355941d2e07bfefd | [
"BSD-3-Clause"
] | null | null | null | from open_publishing.core import FieldGroup
from open_publishing.core import FieldDescriptor
from open_publishing.core.enums import CatalogType, VLBCategory, AcademicCategory
from open_publishing.core import SimpleField
from open_publishing.extendable_enum_field import ExtendableEnumField
from open_publishing.genre import GenresList
from open_publishing.bisac import BisacList
from .thema import ThemaList
from .subject import SubjectField
from .series import SeriesList
from .institution import InstitutionField
| 41.981982 | 106 | 0.547425 |
4524547bc1c556606b6ef59589378a69ffa68a6d | 2,263 | py | Python | app/request.py | aenshtyn/News-Update | 2a09099cd6468d00e2e1972072a88db3e4b7cb78 | [
"MIT"
] | null | null | null | app/request.py | aenshtyn/News-Update | 2a09099cd6468d00e2e1972072a88db3e4b7cb78 | [
"MIT"
] | null | null | null | app/request.py | aenshtyn/News-Update | 2a09099cd6468d00e2e1972072a88db3e4b7cb78 | [
"MIT"
] | null | null | null | import urllib.request,json
from .models import Source,Article
# Getting Api Key
api_key = None
#Getting the base urls
source_base_url = None
article_base_url = None
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = source_base_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources']:
source_results_list = get_sources_response['sources']
source_results = process_results(source_results_list)
return source_results
def process_results(source_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
| 30.173333 | 86 | 0.699956 |
4524add159eab216540f8144d587795ca3f57c91 | 5,027 | py | Python | corai_util/finance/src/param_iv.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | 1 | 2022-01-01T22:10:04.000Z | 2022-01-01T22:10:04.000Z | corai_util/finance/src/param_iv.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | corai_util/finance/src/param_iv.py | Code-Cornelius/python_libraries | 71c388da60e2aeb94369c3813faca93bf6a18ebf | [
"MIT"
] | null | null | null | # normal libraries
import math
import numpy as np
# priv_libraries
from corai_util.finance.src.financials import compute_price, compute_integral
from corai_util.finance.src.implied_vol import implied_volatility_newton, total_implied_vol_newton
phi_heston = lambda xx: (1 - (1 - np.exp(-xx)) / xx) / xx
phi_heston_lambda = lambda xx, lamb: phi_heston(xx * lamb)
phi_heston_curry = lambda lamb: lambda xx: phi_heston_lambda(xx, lamb)
phi_power_law = lambda eta, gamma: lambda theta: eta * theta ** (- gamma)
# section ######################################################################
# #############################################################################
# parametrisation
# section ######################################################################
# #############################################################################
# SSVI
def natural_SVIparam2density(xx_for_density, parameters):
# """ takes natural SVI parameters. """
"""
Semantics:
From
Args:
xx_for_density:
parameters:
Returns:
"""
w = total_implied_vol_ssvi
w_dash = total_implied_vol_ssvi_dash
w_dash_dash = total_implied_vol_ssvi_dash_dash
return total_implied_vol2density_litzenberg(xx_for_density, w, w_dash, w_dash_dash, parameters)
def natural_SVIparameters2price(log_asset_for_density, parameters, log_moneyness):
""" takes natural SVI parameters."""
values_density_of_SVI = natural_SVIparam2density(log_asset_for_density, parameters) * np.exp(-log_asset_for_density)
asset_for_density = np.exp(log_asset_for_density) # density of S_T
s0 = compute_integral(asset_for_density, values_density_of_SVI)
c_k = compute_price(asset_for_density, np.exp(log_moneyness), values_density_of_SVI)
return values_density_of_SVI, c_k, s0
def natural_SVIparameters2TIV(val_density, parameters, log_moneyness):
""" takes natural SVI parameters."""
values_density_of_SVI, c_k, s0 = natural_SVIparameters2price(val_density, parameters, log_moneyness)
sigma = implied_volatility_newton(True, s0, np.exp(log_moneyness), 1, 0, 0, c_k)
total_implied_vol = 1 * sigma * sigma
total_implied_vol = total_implied_vol_newton(True, s0, np.exp(log_moneyness), 0, 0, c_k)
return values_density_of_SVI, c_k, s0, total_implied_vol
| 33.966216 | 120 | 0.63139 |
45266515995c4fa2eef2c47f14074dcb92d42fdb | 687 | py | Python | cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py | angelusualle/algorithms | 86286a49db2a755bc57330cb455bcbd8241ea6be | [
"Apache-2.0"
] | null | null | null | import unittest
from get_all_permutations_of_string import get_all_permutations_of_string, get_all_permutations_of_string_with_dups | 76.333333 | 176 | 0.764192 |
4526f09b63533011d0dbd7fc3b49ed217cae0f86 | 8,171 | py | Python | third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 2 | 2018-03-07T08:31:29.000Z | 2019-02-01T10:10:48.000Z | third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 1 | 2021-02-23T14:52:22.000Z | 2021-02-23T14:52:22.000Z | xtrabackup_main/xb_partial_test.py | isabella232/kewpie | 47d67124fa755719eda3ca5a621a2abf0322d3f9 | [
"Apache-2.0"
] | 1 | 2020-11-13T10:17:28.000Z | 2020-11-13T10:17:28.000Z | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [['--innodb-file-per-table']]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
| 45.648045 | 135 | 0.571778 |
45271fd81da1faed37c0972df122fea04a51747b | 104 | py | Python | filter.py | Gerrydh/MPP-Recursion | de81bb0dcd50f7f66971db9000e6262767168b8f | [
"Apache-2.0"
] | null | null | null | filter.py | Gerrydh/MPP-Recursion | de81bb0dcd50f7f66971db9000e6262767168b8f | [
"Apache-2.0"
] | null | null | null | filter.py | Gerrydh/MPP-Recursion | de81bb0dcd50f7f66971db9000e6262767168b8f | [
"Apache-2.0"
] | null | null | null | print filter((lambda x: (x%2) ==0 ), [1,2,3,4,5,6])
print filter((lambda x: (x%2) !=0 ), [1,2,3,4,5,6]) | 52 | 52 | 0.519231 |
45278aea9c424ae5e3cd32a1bd843d89d29dbea4 | 156 | py | Python | project euler/q2.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | project euler/q2.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | project euler/q2.py | milkmeat/thomas | fbc72af34267488d931a4885d4e19fce22fea582 | [
"MIT"
] | null | null | null | l=[0]*100
l[0]=1
l[1]=2
for x in range (2,100):
l[x]=l[x-1]+l[x-2]
#print l
f=0
for c in l:
if c%2==0 and c<4000000:
f=f+c
print f | 14.181818 | 29 | 0.474359 |
452797680ac9c44f15c014b0a008440ac1ea29cb | 12,809 | py | Python | recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py | mj-kh/speechbrain | 9351f61cc057ddf3f8a0b7074a9c3c857dec84ed | [
"Apache-2.0"
] | 3,913 | 2021-03-14T13:54:52.000Z | 2022-03-30T05:09:55.000Z | recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py | mj-kh/speechbrain | 9351f61cc057ddf3f8a0b7074a9c3c857dec84ed | [
"Apache-2.0"
] | 667 | 2021-03-14T20:11:17.000Z | 2022-03-31T04:07:17.000Z | recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py | mj-kh/speechbrain | 9351f61cc057ddf3f8a0b7074a9c3c857dec84ed | [
"Apache-2.0"
] | 785 | 2021-03-14T13:20:57.000Z | 2022-03-31T03:26:03.000Z | #!/usr/bin/env/python3
"""Recipe for training a wav2vec-based ctc ASR system with librispeech.
The system employs wav2vec as its encoder. Decoding is performed with
ctc greedy decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
The neural network is trained on CTC likelihood target and character units
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
Authors
* Sung-Lin Yeh 2021
* Titouan Parcollet 2021
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 3. Define text pipeline:
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, label_encoder = dataio_prepare(
hparams
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = label_encoder
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 35.879552 | 89 | 0.640331 |
45284a1d25fe21c81004bcc320ecfac7a3fe05f4 | 907 | py | Python | src/dask_awkward/tests/test_utils.py | douglasdavis/dask-awkward | e8829d32ed080d643c7e4242036ce64aee60eda6 | [
"BSD-3-Clause"
] | 21 | 2021-09-09T19:32:30.000Z | 2022-03-01T15:42:06.000Z | src/dask_awkward/tests/test_utils.py | douglasdavis/dask-awkward | e8829d32ed080d643c7e4242036ce64aee60eda6 | [
"BSD-3-Clause"
] | 14 | 2021-09-23T16:54:10.000Z | 2022-03-23T19:24:53.000Z | src/dask_awkward/tests/test_utils.py | douglasdavis/dask-awkward | e8829d32ed080d643c7e4242036ce64aee60eda6 | [
"BSD-3-Clause"
] | 3 | 2021-09-09T19:32:32.000Z | 2021-11-18T17:27:35.000Z | from __future__ import annotations
from ..utils import normalize_single_outer_inner_index
| 22.675 | 62 | 0.46527 |
45288cac480034ff3c670253791c5dd9e04dcb61 | 16,413 | py | Python | core/client_socket.py | schalekamp/ibapipy | a9e02d604d9f4a2ad87e78089654b29305aa110d | [
"Apache-2.0"
] | 1 | 2020-08-13T05:45:48.000Z | 2020-08-13T05:45:48.000Z | core/client_socket.py | schalekamp/ibapipy | a9e02d604d9f4a2ad87e78089654b29305aa110d | [
"Apache-2.0"
] | null | null | null | core/client_socket.py | schalekamp/ibapipy | a9e02d604d9f4a2ad87e78089654b29305aa110d | [
"Apache-2.0"
] | null | null | null | """Implements the EClientSocket interface for the Interactive Brokers API."""
import threading
import ibapipy.config as config
from ibapipy.core.network_handler import NetworkHandler
def check(value):
"""Check to see if the specified value is equal to JAVA_INT_MAX or
JAVA_DOUBLE_MAX and return None if such is the case; otherwise return
'value'.
Interactive Brokers will set certain integers and floats to be their
maximum possible value in Java.
This is used as a sentinal value that should be replaced with an EOL when
transmitting. Here, we check the value and, if it is a max, return None
which the codec will interpret as an EOL.
Keyword arguments:
value -- integer or floating-point value to check
"""
if is_java_int_max(value) or is_java_double_max(value):
return None
else:
return value
def is_java_double_max(number):
"""Returns True if the specified number is equal to the maximum value of
a Double in Java; False, otherwise.
Keyword arguments:
number -- number to check
"""
return type(number) == float and number == config.JAVA_DOUBLE_MAX
def is_java_int_max(number):
"""Returns True if the specified number is equal to the maximum value of
an Integer in Java; False, otherwise.
Keyword arguments:
number -- number to check
"""
return type(number) == int and number == config.JAVA_INT_MAX
def listen(client, in_queue):
"""Listen to messages in the specified incoming queue and call the
appropriate methods in the client.
Keyword arguments:
client -- client
in_queue -- incoming message queue
"""
# Loop until we receive a stop message in the incoming queue
while True:
method, parms = in_queue.get()
if method == 'stop':
return
elif method is None:
continue
elif hasattr(client, method):
getattr(client, method)(*parms)
else:
parms = list(parms)
parms.insert(0, method)
getattr(client, 'update_unknown')(*parms)
| 36.718121 | 79 | 0.628709 |
4529524b72ee8b6f655a486a5542d22fd69041be | 2,234 | py | Python | common.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | common.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | common.py | shawnau/DataScienceBowl2018 | 3c6f0f26dd86b71aad55fca52314e6432d0b3a82 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#numerical libs
import numpy as np
import random
import matplotlib
matplotlib.use('TkAgg')
import cv2
# torch libs
import torch
from torch.utils.data.sampler import *
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import *
from torch.nn import init
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
# std libs
import collections
import copy
import numbers
import math
import inspect
import shutil
from timeit import default_timer as timer
import csv
import pandas as pd
import pickle
import glob
import sys
from distutils.dir_util import copy_tree
import time
import matplotlib.pyplot as plt
import skimage
import skimage.color
import skimage.morphology
from scipy import ndimage
print('@%s: ' % os.path.basename(__file__))
if 1:
SEED = int(time.time())
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print ('\tset random seed')
print ('\t\tSEED=%d'%SEED)
if 1:
# uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
print('\tset cuda environment')
print('\t\ttorch.__version__ =', torch.__version__)
print('\t\ttorch.version.cuda =', torch.version.cuda)
print('\t\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version())
try:
print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =',os.environ['CUDA_VISIBLE_DEVICES'])
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =','None')
NUM_CUDA_DEVICES = 1
print('\t\ttorch.cuda.device_count() =', torch.cuda.device_count())
print('\t\ttorch.cuda.current_device() =', torch.cuda.current_device())
print('')
| 26.282353 | 90 | 0.723814 |
452af70b9e1700ac30a7f0af42ce0f50e0812342 | 1,460 | py | Python | NumberExtractor.py | Dikshit15/SolveSudoku | 7a84e64c9b708c730179f65c8cce8a360ff96d7f | [
"MIT"
] | 54 | 2019-01-03T20:05:26.000Z | 2022-02-22T12:46:47.000Z | NumberExtractor.py | Dikshit15/SolveSudoku | 7a84e64c9b708c730179f65c8cce8a360ff96d7f | [
"MIT"
] | 1 | 2021-05-18T07:05:28.000Z | 2021-05-20T04:38:30.000Z | NumberExtractor.py | Dikshit15/SolveSudoku | 7a84e64c9b708c730179f65c8cce8a360ff96d7f | [
"MIT"
] | 29 | 2019-02-28T13:54:45.000Z | 2021-12-17T03:22:33.000Z | import numpy as np
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras.models import model_from_json
# Load the saved model
json_file = open('models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("models/model.h5")
print("Loaded saved model from disk.")
# evaluate loaded model on test data
| 31.73913 | 92 | 0.656164 |
452b051b1e4ced509f2b30c049b1e85fd074fa94 | 38,889 | py | Python | code/Rts.py | andreschristen/RTs | d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9 | [
"CC0-1.0"
] | null | null | null | code/Rts.py | andreschristen/RTs | d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9 | [
"CC0-1.0"
] | null | null | null | code/Rts.py | andreschristen/RTs | d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr J A Christen (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrn, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 15051512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_P(data=data[:,1],\
tau=tau, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
"""
def loglikelihood_NB( x, mu, psi):
mu_psi = mu/psi
return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\
-(x + psi)*log(1 + mu_psi) + x*log(mu_psi)
"""
def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt Using a Negative Binomial instead of Poisson.
Here one needs to fix psi = 1/theta (= 10).
Extension of (not documented):
Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 15051512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
quantiles = zeros(len(q))
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
R = linspace( 0.1, 3.0, num=100)
DeltaR = R[1]-R[0]
#omega = 1
#theta = THETA_MEAN #0.01
#psi = 1/theta
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for t in range(max(m-n,0), m):
#S1 = 0.0
log_likelihood_I = zeros(R.shape) ## Same size of array for values for R
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
Gammak = I @ w[(m-(t-k)):] #\Gamma_k
#S1 += Gammak
I_k = data[(t-k)]
log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi)
log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b)
pdf = exp(log_post)
pdf /= sum(pdf)*DeltaR
cdf = cumsum(pdf)*DeltaR
if simulate:
u = uniform.rvs()
rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]]
else:
for i,qua in enumerate(q):
quantiles[i] = R[where(cdf < qua)[0][-1]]
rt[:,t-(m-n)] = quantiles
return rt
def PlotRts_NB( data_fnam, init_date, psi, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_NB.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_NB(data=data[:,1],\
tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
##### Dirctionary with general information for the metro zone or region to be analyzed:
##### id Name not used Population init date
ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\
"15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\
"31-01": ["Mrida", 2, 1.237697e6, date(2020, 3, 7)],\
"17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\
"12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\
"25-01": ["Culiacn", 2, 0.962871e6, date(2020, 3, 1)],\
"23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]}
### The correponding data files have two columns separated by space, deaths and incidence.
### Each row is one day.
### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc.
if __name__=='__main__':
rcParams.update({'font.size': 14})
close('all')
#Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 )
fig, ax = subplots( num=30, figsize=( 4.5, 3.5))
PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax)
### Plota the erlang( a=5, scale=9/5 ) alternative
PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax)
ax.set_xlim((0,20))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_ylabel(r"Density")
ax.set_xlabel("days")
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/Covid19_SerialTimeDist.png")
### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper
claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01']
n=60 ## Number of days to calculate the Rt's
trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days
x_jump = 7 ## For ploting, put ticks every x_jump days.
for i,clave in enumerate(claves):
print(clave)
### Open an instance of the Rts_AR class:
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n)
tst.CalculateRts() # Most be called before ploting the Rt's
### Plot the Rts:
fig, ax = subplots( num=i+1, figsize=( 8, 3.5))
### Plot Cori et al (2013) Poisson model version:
PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\
n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black')
### Plot ours:
tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave)
ax.set_title("")
ax.set_ylabel(r"$R_t$")
ax.set_xlabel("")
ax.set_title(ZMs[clave][0] + ", Mexico")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
if clave == '9-01':
m_max = tst.m
ax.set_xlabel("day.month, 2020")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
### Figure with Cori et al (2013) posterior distributions of '31-01' and '9-01'
fig1, ax1 = subplots( num=20, nrows=1, ncols=2, figsize=( 10, 3.5))
color = [ "red", "black", "darkred"]
for i,clave in enumerate([ '31-01', '9-01']):
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data, '.-', color=color[i], label=ZMs[clave][0])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[i])
last_date = tst.init_date + timedelta(tst.m)
ax1[0].set_xlabel('')
ax1[0].set_xticks(range(0,tst.m,x_jump*2))
ax1[0].set_xticklabels([(last_date-timedelta(tst.m-i)).strftime("%d.%m") for i in range(0,tst.m,x_jump*2)], ha='right')
ax1[0].tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax1[0].set_xlabel("day.month, 2020")
#ax1[0].set_ylim((0,1.1*max(tst.data[-n:])))
ax1[0].grid(color='grey', linestyle='--', linewidth=0.5)
ax1[0].set_ylabel(r"Incidence")
ax1[0].legend(loc=0, shadow = False)
### Add '31-01', with incidence multiplied by 10
clave = '31-01'
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data*10, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data*10, '.-', color=color[2])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[2])
ax1[1].set_xticks(arange(0.8,1.4,0.2))
ax1[1].set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax1[1].grid(color='grey', linestyle='--', linewidth=0.5)
fig1.tight_layout()
fig1.savefig("../figs/Rts_Compare.png")
### Comparison of results changing the serial time distribution
fig, ax = subplots( num=31, figsize=( 4.5, 3.5))
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax)
#### Here we change the serial time: Any other positive density could be used.
tst = Rts_AR( clave, IP_dist=erlang( a=5, scale=9/5), init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax, color='grey')
ax.set_xlim((0.5,2.5))
ax.set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_Compare.png" % (clave,))
"""
################# Example of use of Rts_NB_psi and Rts_NB (not documented)
T=100000
for clave in claves: #Instance of the object and run the MCMC
tst = Rts_NB_psi( clave, init_date=ZMs[clave][3], n=n)
if T > 0:
tst.RunMCMC(T=T)
### Plot the Rts
close(1)
fig, ax = subplots( num=1, figsize=( 10, 3.5) )
tst.PlotRts( ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB_psi.")
fig.savefig("../figs/%s_Rts_NB_psi.png" % (clave,))
### Plot the posterior distribution of \psi
close(3)
fig, ax = subplots( num=3, figsize=( 5,5) )
tst.PlotPostPsi(ax=ax)
ax.set_title(ZMs[clave][0])
fig.savefig("../figs/%s_Rts_NB_Post_psi.png" % clave)
### Fix \psi with the postrior expeted value and use that for PlotRts_NB
close(2)
fig, ax = subplots( num=2, figsize=( 10, 3.5) )
psi = mean(tst.psi_samples) #Posterior mean of psi
PlotRts_NB( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3],\
n=n, psi=psi, ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB, fixed $\psi$.")
fig.savefig("../figs/%s_Rts.png" % (clave,))
"""
| 44.906467 | 156 | 0.550618 |
452ce291eab1e58321278df273620d4a3c795783 | 678 | py | Python | zombieclusters.py | tnkteja/notthisagain | 85e2b2cbea1298a052986e9dfe5e73d022b537f3 | [
"MIT"
] | null | null | null | zombieclusters.py | tnkteja/notthisagain | 85e2b2cbea1298a052986e9dfe5e73d022b537f3 | [
"MIT"
] | null | null | null | zombieclusters.py | tnkteja/notthisagain | 85e2b2cbea1298a052986e9dfe5e73d022b537f3 | [
"MIT"
] | null | null | null |
def zombieCluster(zombies):
cm=clusterManager(clusters={i:cluster(members=[i]) for i in xrange(len(zombies))})
for i,row in enumerate(zombies):
for j,column in enumerate(row):
if column == '1':
cm.merge(i,j)
return cm.count()
| 26.076923 | 86 | 0.59292 |
452dfafcf95365869f17107edc7e9285e32b7078 | 2,989 | py | Python | CondTools/Ecal/python/EcalO2O_laser_online_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | CondTools/Ecal/python/EcalO2O_laser_online_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | CondTools/Ecal/python/EcalO2O_laser_online_cfg.py | SWuchterl/cmssw | 769b4a7ef81796579af7d626da6039dfa0347b8e | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("ProcessOne")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.DBParameters.authenticationPath = '/nfshome0/popcondev/conddb'
#
# Choose the output database
#
process.CondDBCommon.connect = 'oracle://cms_orcon_prod/CMS_COND_42X_ECAL_LASP'
#process.CondDBCommon.connect = 'sqlite_file:DB.db'
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.untracked.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:DBLog.db'),
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
#
# Be sure to comment the following line while testing
#
#process.PoolDBOutputService.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
process.Test1 = cms.EDAnalyzer("ExTestEcalLaserAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('EcalLaserAPDPNRatiosRcd'),
loggingOn = cms.untracked.bool(True),
Source = cms.PSet(
# maxtime is mandatory
# it can be expressed either as an absolute time with format YYYY-MM-DD HH24:MI:SS
# or as a relative time w.r.t. now, using -N, where N is expressed in units
# of hours
# maxtime = cms.string("-40"),
maxtime = cms.string("2012-12-12 23:59:59"),
sequences = cms.string("16"),
OnlineDBUser = cms.string('CMS_ECAL_LASER_COND'),
# debug must be False for production
debug = cms.bool(False),
# if fake is True, no insertion in the db is performed
fake = cms.bool(True),
OnlineDBPassword = cms.string('0r4cms_3c4l_2011'),
OnlineDBSID = cms.string('CMS_OMDS_LB')
)
)
process.p = cms.Path(process.Test1)
| 41.513889 | 112 | 0.577451 |
452e242fef5c444f6a84742a55e2adf53a8f64d3 | 9,907 | py | Python | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | from algosdk import logic
from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn
from ..contract_strings import algofi_manager_strings as manager_strings
from .prepend import get_init_txns
from ..utils import TransactionGroup, Transactions, randint, int_to_bytes
OPT_IN_MIN_BALANCE=0.65
def prepare_staking_contract_optin_transactions(manager_app_id, market_app_id, sender, storage_address, suggested_params):
"""Returns a :class:`TransactionGroup` object representing a staking contract opt in
group transaction. The sender and storage account opt in to the staking application
and the storage account is rekeyed to the manager account address, rendering it
unable to be transacted against by the sender and therefore immutable.
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param max_atomic_opt_in_market_app_ids: max opt in market app ids
:type max_atomic_opt_in_market_app_ids: list
:param sender: account address for the sender
:type sender: string
:param storage_address: address of the storage account
:type storage_address: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:return: :class:`TransactionGroup` object representing a manager opt in group transaction
:rtype: :class:`TransactionGroup`
"""
txn_payment = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=storage_address,
amt=int(OPT_IN_MIN_BALANCE*1e6)
)
txn_market = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=market_app_id
)
txn_user_opt_in_manager = ApplicationOptInTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id
)
app_address = logic.get_application_address(manager_app_id)
txn_storage_opt_in_manager = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=manager_app_id,
rekey_to=app_address
)
txn_group = TransactionGroup([txn_payment, txn_market, txn_user_opt_in_manager, txn_storage_opt_in_manager])
return txn_group
def prepare_stake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, market_address, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a stake
transaction against the algofi protocol. The sender sends assets to the
staking account and is credited with a stake.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of asset to supply for minting collateral
:type amount: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the asset market application
:type market_app_id: int
:param market_address: account address for the market application
:type market_address: string
:param oracle_app_id: id of the asset market application
:type oracle_app_id: int
:param asset_id: asset id of the asset being supplied, defaults to None (algo)
:type asset_id: int, optional
:return: :class:`TransactionGroup` object representing a mint to collateral group transaction
:rtype: :class:`TransactionGroup`
"""
supported_oracle_app_ids = [oracle_app_id]
supported_market_app_ids = [market_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.MINT_TO_COLLATERAL,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
)
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
if asset_id:
txn2 = AssetTransferTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount,
index=asset_id
)
else:
txn2 = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2])
return txn_group
def prepare_unstake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a remove stake
group transaction against the algofi protocol. The sender requests to remove stake
from a stake acount and if successful, the stake is removed.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of collateral to remove from the market
:type amount: int
:param asset_id: asset id of the asset underlying the collateral
:type asset_id: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application of the collateral
:type oracle_app_id: int
:return: :class:`TransactionGroup` object representing a remove collateral underlying group transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.REMOVE_COLLATERAL_UNDERLYING,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode(), int_to_bytes(amount)]
)
if asset_id:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
foreign_assets=[asset_id],
accounts=[storage_account]
)
else:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1])
return txn_group
def prepare_claim_staking_rewards_transactions(sender, suggested_params, storage_account, manager_app_id, market_app_id, oracle_app_id, foreign_assets):
"""Returns a :class:`TransactionGroup` object representing a claim rewards
underlying group transaction against the algofi protocol. The sender requests
to claim rewards from the manager acount. If not, the account sends
back the user the amount of asset underlying their posted collateral.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application
:type oracle_app_id: int
:param foreign_assets: list of rewards assets in the staking contract
:type foreign_assets: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.CLAIM_REWARDS,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.claim_rewards.encode()],
accounts=[storage_account],
foreign_assets=foreign_assets
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group | 43.262009 | 159 | 0.731402 |
452e279a6193abc88461babe810211a3f8d434ff | 1,661 | py | Python | main.py | kirantambe/koinex-status-ticker | 487028a00605912e9fee97f4b29f260a2ab4f66f | [
"MIT"
] | null | null | null | main.py | kirantambe/koinex-status-ticker | 487028a00605912e9fee97f4b29f260a2ab4f66f | [
"MIT"
] | 1 | 2021-06-01T21:56:55.000Z | 2021-06-01T21:56:55.000Z | main.py | kirantambe/koinex-status-ticker | 487028a00605912e9fee97f4b29f260a2ab4f66f | [
"MIT"
] | 1 | 2018-01-16T03:51:09.000Z | 2018-01-16T03:51:09.000Z | import rumps
import requests
import json
API_URL = 'https://koinex.in/api/ticker'
UPDATE_INTERVAL = 60
CURRENCIES = {
'Bitcoin': 'BTC',
'Ethereum': 'ETH',
'Ripple': 'XRP',
'Litecoin': 'LTC',
'Bitcoin Cash': 'BCH',
}
if __name__ == "__main__":
KoinexStatusBarApp().run() | 26.790323 | 77 | 0.587598 |
452f2babff6fef2a136326734c3cab066e39250a | 900 | py | Python | app.py | migueljunior/docker | 09effa41a2207294ec9ab8bd34b166c862edea72 | [
"Apache-2.0"
] | null | null | null | app.py | migueljunior/docker | 09effa41a2207294ec9ab8bd34b166c862edea72 | [
"Apache-2.0"
] | null | null | null | app.py | migueljunior/docker | 09effa41a2207294ec9ab8bd34b166c862edea72 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, jsonify
from time import strftime
from socket import gethostname
from socket import gethostbyname
app = Flask(__name__)
if __name__ == '__main__':
app.run(debug=True , port=8888 , host='0.0.0.0') | 31.034483 | 54 | 0.646667 |
452f9d740231a724ca7b77510cb8a67453b7e2aa | 8,605 | py | Python | knn_and_regression/src/free_response.py | WallabyLester/Machine_Learning_From_Scratch | 6042cf421f5de2db61fb570b7c4de64dc03453f3 | [
"MIT"
] | null | null | null | knn_and_regression/src/free_response.py | WallabyLester/Machine_Learning_From_Scratch | 6042cf421f5de2db61fb570b7c4de64dc03453f3 | [
"MIT"
] | null | null | null | knn_and_regression/src/free_response.py | WallabyLester/Machine_Learning_From_Scratch | 6042cf421f5de2db61fb570b7c4de64dc03453f3 | [
"MIT"
] | null | null | null | import numpy as np
from numpy.core.fromnumeric import mean
from numpy.core.numeric import True_
from numpy.testing._private.utils import rand
from polynomial_regression import PolynomialRegression
from generate_regression_data import generate_regression_data
from metrics import mean_squared_error # mse
from math import log # use if scale too large to see error
from k_nearest_neighbor import KNearestNeighbor
try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Number 7, split A
degree = 4
N = 100
x, y = generate_regression_data(degree, N, amount_of_noise=0.1)
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:10]], y[rand_sampl[:10]]
x_test, y_test = x[rand_sampl[10:]], y[rand_sampl[10:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N7_splitA/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N7_splitA/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/lowest_training_and_test_error.png")
# Number 10, split A
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(knn)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k]), kplots[low_test_err_k], label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/lowest_test_error.png")
# Number 9, split B
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:50]], y[rand_sampl[:50]]
x_test, y_test = x[rand_sampl[50:]], y[rand_sampl[50:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N9_splitB/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N9_splitB/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/lowest_training_and_test_error.png")
# Number 10, split B
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(poly)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k].X_training), kplots[low_test_err_k].f, label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/lowest_test_error.png")
| 38.936652 | 176 | 0.664614 |
45303096a42f87f1631edf145b0ae0b347d69c0b | 753 | py | Python | chapter10/exercises/EG10-20 Twinkle Twinkle classes.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | chapter10/exercises/EG10-20 Twinkle Twinkle classes.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | chapter10/exercises/EG10-20 Twinkle Twinkle classes.py | munnep/begin_to_code_with_python | 3ef14d90785526b6b26d262a7627eee73791d7d0 | [
"MIT"
] | null | null | null | # EG10-20 Twinkle Twinkle classes
import time
import snaps
tune = [Note(note=0, duration=0.4), Note(note=0, duration=0.4),
Note(note=7, duration=0.4), Note(note=7, duration=0.4),
Note(note=9, duration=0.4), Note(note=9, duration=0.4),
Note(note=7, duration=0.8), Note(note=5, duration=0.4),
Note(note=5, duration=0.4), Note(note=4, duration=0.4),
Note(note=4, duration=0.4), Note(note=2, duration=0.4),
Note(note=2, duration=0.4), Note(note=0, duration=0.8)]
for note in tune:
note.play()
| 30.12 | 63 | 0.622842 |
453161fcfe76bf1d62b11f9f68c0fa622f378ff1 | 3,523 | py | Python | tests/python/gpu/test_forward.py | xudong-sun/mxnet | fe42d30d5885dd576cb871fd70594c53efce9b42 | [
"Apache-2.0"
] | 31 | 2016-04-29T09:13:44.000Z | 2021-02-16T21:27:00.000Z | tests/python/gpu/test_forward.py | greenpea0104/incubator-mxnet | fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf | [
"Apache-2.0"
] | 23 | 2018-06-11T20:03:54.000Z | 2018-08-10T03:17:49.000Z | tests/python/gpu/test_forward.py | greenpea0104/incubator-mxnet | fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf | [
"Apache-2.0"
] | 47 | 2016-04-19T22:46:09.000Z | 2020-09-30T08:09:16.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import numpy as np
import mxnet as mx
from mxnet.test_utils import *
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from mxnet.gluon import utils
if __name__ == '__main__':
test_consistency(False)
| 43.493827 | 93 | 0.676412 |
4532f548d365251f68edc483eadcb7b23a21e639 | 7,706 | py | Python | docs/src/conf.py | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 23 | 2016-01-26T23:06:53.000Z | 2019-06-11T08:31:32.000Z | docs/src/conf.py | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 73 | 2016-03-16T03:02:35.000Z | 2019-07-18T07:29:52.000Z | docs/src/conf.py | rbeucher/LavaVu | 317a234d69ba3eb06a827a1f8658feb031fe358b | [
"CC-BY-4.0"
] | 6 | 2016-03-25T23:22:49.000Z | 2018-01-16T14:38:09.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
# LavaVu conf based on conf.py from underworld2
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os, sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
sys.path.insert(0, abspath(join(dirname(__file__), '..', '..')))
import setup as lsetup
# -- Project information -----------------------------------------------------
project = 'LavaVu'
copyright = '2020, Monash University'
author = 'Owen Kaluza, Monash University'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = lsetup.version
print('BUILDING LAVAVU DOCS FOR VERSION', release)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx_markdown_tables',
'myst_parser',
# 'nbsphinx',
]
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
#html_theme = 'pyramid'
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Set the width of the content area. Defaults to '900px'
'sidebar_width': '300px',
'page_width': '90%',
#'fixed_sidebar': 'true', #Need to scroll for full table of contents
'font_family': 'sans',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LavaVudoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LavaVu.tex', 'LavaVu Documentation',
'Owen Kaluza', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lavavu', 'LavaVu Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LavaVu', 'LavaVu Documentation',
author, 'LavaVu', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# setup mock classes so no building is required
# generate rst files
import os
import sys
# add current directory for `generate_api_documentation`
sys.path.append(os.path.dirname(__name__))
# add top project directory as well
sys.path.insert(0, os.path.join(os.path.dirname(__name__),'../../lavavu'))
try:
import lavavu
import convert
import points
import tracers
import control
except (Exception) as e:
from mock import Mock as MagicMock
MOCK_MODULES = ['scipy', 'numpy', '_LavaVuPython']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import generate_api_documentation
import subprocess
subprocess.call("./run-nb-to-rst.sh", shell=True)
| 30.101563 | 79 | 0.666623 |
453357739358367ed9649135f97753882d4359cd | 25,824 | py | Python | experiments/pamogk_exp.py | tastanlab/pamogk | fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710 | [
"FTL"
] | 6 | 2020-06-18T14:37:01.000Z | 2021-09-12T07:25:47.000Z | experiments/pamogk_exp.py | tastanlab/pamogk | fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710 | [
"FTL"
] | null | null | null | experiments/pamogk_exp.py | tastanlab/pamogk | fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710 | [
"FTL"
] | 5 | 2020-01-02T09:08:36.000Z | 2021-07-17T12:35:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import mkkm_mr
import networkx as nx
from sklearn.cluster import KMeans, SpectralClustering
from snf_simple import SNF
from pamogk import config
from pamogk import label_mapper
from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp
from pamogk.gene_mapper import uniprot_mapper
from pamogk.kernels.lmkkmeans_train import lmkkmeans_train
from pamogk.kernels.pamogk import kernel
from pamogk.lib.sutils import *
from pamogk.pathway_reader import cx_pathway_reader as cx_pw
# see https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
from pamogk.result_processor.label_analysis import LabelAnalysis
# import sys
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/snf')
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/mkkm-mr')
parser = argparse.ArgumentParser(description='Run PAMOGK-mut algorithms on pathways')
parser.add_argument('--run-id', '-rid', metavar='run-id', dest='run_id', type=str, help='Unique Run ID')
parser.add_argument('--rs-patient-data', '-rs', metavar='file-path', dest='rnaseq_patient_data', type=str2path,
help='rnaseq pathway ID list',
default=config.DATA_DIR / 'kirc_data/unc.edu_KIRC_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_tumor.txt')
parser.add_argument('--rp-patient-data', '-rp', metavar='file-path', dest='rppa_patient_data', type=str2path,
help='rppa pathway ID list', default=config.DATA_DIR / 'kirc_data/kirc_rppa_data')
parser.add_argument('--som-patient-data', '-s', metavar='file-path', dest='som_patient_data', type=str2path,
help='som mut pathway ID list',
default=config.DATA_DIR / 'kirc_data/kirc_somatic_mutation_data.csv')
parser.add_argument('--label', '-m', metavar='label', dest='label', type=str, default='th196',
help='Label value that will be smoothed')
# used values: [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
parser.add_argument('--smoothing-alpha', '-a', metavar='alpha', dest='smoothing_alpha', type=float, default=0.01,
help='Smoothing alpha in range of 0-1')
parser.add_argument('--drop-percent', '-p', metavar='drop-percent', dest='drop_percent', type=int, default=1,
help='Drop percentage in range of 0-100')
parser.add_argument('--threshold', '-t', metavar='threshold', dest='threshold', type=float, default=1.96,
help='Cut off threshold')
parser.add_argument('--continuous', '-c', metavar='bool', dest='continuous', type=str2bool, default=True,
help='Whether to produce continuous values for under/over expressed')
parser.add_argument('--normalize-kernels', '-nk', dest='kernel_normalization', type=str2bool, default=True,
help='Kernel Normalization')
args = {}
def label_som_patient_genes(self, all_pw_map, patients):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
patients: :obj:`list`
list of patients with mutation mappings
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.som_pathways_save_valid(all_pw_map):
return self.restore_som_pathways(all_pw_map)
num_pat = len(patients)
# if there are missing ones calculate all of them
log('Somatic mutation patient pathway labeling')
for ind, patient in enumerate(patients):
pid = patient['pat_id']
genes = patient['mutated_nodes'] # get uniprot gene ids from indices
genes = np.array([genes])
logr(f'Checking patient for somatic mutation {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_label_on_pathways('som', pid, all_pw_map, genes, self.label)
log()
self.save_som_pathways(all_pw_map)
return all_pw_map
def cluster_cont(self, kernels, n_clusters):
snf_K = 20 # number of neighbors, usually (10~30)
snf_t = 20 # number of iterations, usually (10~20)
# SNF
# W = snf_compute.snf(*kernels, K=snf_K, t=snf_t)
W = SNF(kernels, K=snf_K, t=snf_t)
# KMeans
labels = self.kmeans_cluster(W, n_clusters)
np_save_npz(self.result_dir / f'pamogk-snf-kmeans-k={n_clusters}', labels=labels)
# Spectral
labels = SpectralClustering(n_clusters, affinity='precomputed').fit_predict(W)
np_save_npz(self.result_dir / f'pamogk-snf-spectral-k={n_clusters}', labels=labels)
KH = mkkm_mr.lib.kernel_centralize(kernels)
KH = mkkm_mr.lib.kernel_normalize(KH)
num_ker = kernels.shape[0]
gamma0 = np.ones((num_ker, 1)) / num_ker
avgKer = mkkm_mr.lib.combine_kernels(KH, gamma0)
H = mkkm_mr.lib.kernel_kmeans_iter(avgKer, n_clusters)
labels = self.kmeans_cluster(H, n_clusters)
np_save_npz(self.result_dir / f'pamogk-kmeans-k={n_clusters}.csv', labels=labels)
# AAAI - 16 - MKKM-MR
M = mkkm_mr.lib.calM(KH)
lambdas = np.power(2., self.log2_lambdas)
for log2_lambda, lambda_ in zip(self.log2_lambdas, lambdas):
log(f'running for n_clusters={n_clusters} log2_lambda={log2_lambda}')
[H, weights, obj] = mkkm_mr.mkkm_mr(KH, M, n_clusters, lambda_)
labels = self.kmeans_cluster(H, n_clusters)
out_file = self.result_dir / f'pamogk-mkkm-k={n_clusters}-log2_lambda={log2_lambda}'
np_save_npz(out_file, labels=labels, weights=weights, obj=obj)
if __name__ == '__main__':
create_experiment().run()
| 44.755633 | 122 | 0.642852 |
45346855166d8c198852fc2c2b74490101e9dbc6 | 1,703 | py | Python | d3network/data/handschriftencensus_scrap.py | GusRiva/GusRiva | 50d63e3bc84f007b10df6edadbab85e23cf15731 | [
"MIT"
] | null | null | null | d3network/data/handschriftencensus_scrap.py | GusRiva/GusRiva | 50d63e3bc84f007b10df6edadbab85e23cf15731 | [
"MIT"
] | null | null | null | d3network/data/handschriftencensus_scrap.py | GusRiva/GusRiva | 50d63e3bc84f007b10df6edadbab85e23cf15731 | [
"MIT"
] | null | null | null | import requests
from lxml import html
from bs4 import BeautifulSoup
import json
import codecs
import re
#In this variable I will store the information as a dictionary with this structure:
# {number : "Name"}
ms_dict = {}
links_dict = {"links" : []}
for index in range(1,27000):
print(index)
page = requests.get('http://www.handschriftencensus.de/'+ str(index))
c = page.content
soup = BeautifulSoup(c, "lxml")
ms_label = soup.find_all("th", class_="ort")
if len(ms_label) > 0:
ms_label = ms_label[0].text.rstrip()
ms_dict[ "h" + str(index)] = ms_label
inhalt = soup.find_all("a", class_="aw")
for el in inhalt:
work_id = re.findall('/\d+$', el['href'])[0][1:]
links_dict['links'].append( { "source": "h" + str(index), "target": "w" + work_id } )
# In td id="inhalt" get the href, and only the number. Create the links at the same time
# work = work[0].text
# work = work.replace("'","")
# final_dict[index +1] = {"title":work}
#
# signaturen = soup.find_all("ol", class_="signaturen")
# if len(signaturen) > 0:
# final_dict[index+1]["manuscripts"] = []
# signaturen = signaturen[0]
# for elem in signaturen:
# if len(elem) > 1:
# manuscript = elem.find_all("a")[0]
#
# final_dict[index+1]["manuscripts"].append(manuscript.text)
index = index + 1
#Save data as json
with codecs.open('manuscripts_ids.json', 'w', 'utf-8') as outfile:
json.dump(ms_dict,outfile, indent=2)
with codecs.open('links.json', 'w', 'utf-8') as outfile:
json.dump(links_dict,outfile, indent=2)
#To save the data as a csv
# table = pd.DataFrame.from_dict(final_dict, orient='index')
# table.to_csv("Handschriftencensus_full.csv", encoding="utf-8")
| 27.467742 | 90 | 0.658837 |
4534bb68221abad8193f98fdfa1110b766c99aa2 | 2,590 | py | Python | tests/emukit/quadrature/test_quadrature_acquisitions.py | alexgessner/emukit | 355e26bb30edd772a81af2a1267c569d7f446d42 | [
"Apache-2.0"
] | 6 | 2019-06-02T21:23:27.000Z | 2020-02-17T09:46:30.000Z | tests/emukit/quadrature/test_quadrature_acquisitions.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | 4 | 2019-05-17T13:30:21.000Z | 2019-06-21T13:49:19.000Z | tests/emukit/quadrature/test_quadrature_acquisitions.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | 1 | 2020-01-12T19:50:44.000Z | 2020-01-12T19:50:44.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import GPy
from math import isclose
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from emukit.quadrature.methods import VanillaBayesianQuadrature
from emukit.quadrature.acquisitions import MutualInformation, IntegralVarianceReduction
REL_TOL = 1e-5
ABS_TOL = 1e-4
| 28.777778 | 103 | 0.667568 |
4535c1a7513cb60d8687c9c277406f75c8762e19 | 2,039 | py | Python | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | import unittest
import sys
sys.path.insert(2, "..")
from awsec2instances_includes.ProtocolService import ProtocolService
| 36.410714 | 77 | 0.714076 |
4538624158b0321268253bb048733d15b3730192 | 873 | py | Python | mltoolkit/mldp/utils/helpers/nlp/token_matching.py | mancunian1792/FewSum | c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e | [
"MIT"
] | 28 | 2020-10-12T19:05:22.000Z | 2022-03-18T01:19:29.000Z | mltoolkit/mldp/utils/helpers/nlp/token_matching.py | mancunian1792/FewSum | c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e | [
"MIT"
] | 1 | 2022-01-30T01:52:59.000Z | 2022-02-19T08:04:54.000Z | mltoolkit/mldp/utils/helpers/nlp/token_matching.py | mancunian1792/FewSum | c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e | [
"MIT"
] | 7 | 2020-10-29T14:01:04.000Z | 2022-02-22T18:33:10.000Z | from .constants import SPECIAL_TOKENS
try:
import re2 as re
except ImportError:
import re
def twitter_sentiment_token_matching(token):
"""Special token matching function for twitter sentiment data."""
if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token):
return SPECIAL_TOKENS['URL_TOKEN']
if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token):
return SPECIAL_TOKENS['POS_EM_TOKEN']
if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token):
return SPECIAL_TOKENS['NEG_EM_TOKEN']
if 'USER_TOKEN' in SPECIAL_TOKENS and re.match(
r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token):
return SPECIAL_TOKENS['USER_TOKEN']
if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token):
return SPECIAL_TOKENS['HEART_TOKEN']
| 41.571429 | 79 | 0.651775 |
45387e1e55f5181cc1ef4691f476f0481b601834 | 1,503 | py | Python | setup.py | geirem/pyconfig | e99693b7bc0acb3fe6b82acd29e8724336f95c43 | [
"CC0-1.0"
] | 1 | 2020-05-15T16:22:36.000Z | 2020-05-15T16:22:36.000Z | setup.py | geirem/pyconfig | e99693b7bc0acb3fe6b82acd29e8724336f95c43 | [
"CC0-1.0"
] | 9 | 2020-05-14T08:31:48.000Z | 2021-04-22T12:35:15.000Z | setup.py | geirem/pyconfig | e99693b7bc0acb3fe6b82acd29e8724336f95c43 | [
"CC0-1.0"
] | null | null | null | """A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='envyconfig',
version='1.2.1',
description='YAML reader with ENV interpolation.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/geirem/envyconfig',
author='https://github.com/geirem',
author_email='geiremb@gmail.com',
classifiers=[
# https://pypi.org/classifiers/
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
keywords='configtools development',
package_dir={
'': 'src',
},
packages=find_packages(where='src'),
python_requires='>=3.8',
extras_require={
'test': ['pytest'],
'googlesecrets': ["google-cloud-secret-manager"]
},
project_urls={ # Optional
'Bug Reports': 'https://github.com/geirem/envyconfig/issues',
'Funding': 'https://donate.pypi.org',
'Source': 'https://github.com/geirem/envyconfig/',
},
)
| 31.3125 | 75 | 0.641384 |
45389f146c6eea595dda9a6c6445b4f79a204445 | 3,457 | py | Python | pybamm/solvers/scipy_solver.py | danieljtait/PyBaMM | f9d6143770e4a01099f06e3574142424730f731a | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/scipy_solver.py | danieljtait/PyBaMM | f9d6143770e4a01099f06e3574142424730f731a | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/scipy_solver.py | danieljtait/PyBaMM | f9d6143770e4a01099f06e3574142424730f731a | [
"BSD-3-Clause"
] | null | null | null | #
# Solver class using Scipy's adaptive time stepper
#
import casadi
import pybamm
import scipy.integrate as it
import numpy as np
| 33.563107 | 84 | 0.565519 |
4539375fe3de0d453832a057381afb182d19ced7 | 5,204 | py | Python | crits/core/fields.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 738 | 2015-01-02T12:39:55.000Z | 2022-03-23T11:05:51.000Z | crits/core/fields.py | deadbits/crits | 154097a1892e9d3960d6faaed4bd2e912a196a47 | [
"MIT"
] | 605 | 2015-01-01T01:03:39.000Z | 2021-11-17T18:51:07.000Z | crits/core/fields.py | deadbits/crits | 154097a1892e9d3960d6faaed4bd2e912a196a47 | [
"MIT"
] | 316 | 2015-01-07T12:35:01.000Z | 2022-03-30T04:44:30.000Z | import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
#from mongoengine.python_support import str_types
from six import string_types as str_types
import io
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
| 33.574194 | 93 | 0.613951 |
453972bee5e4b38dcaee26d48c6dcec6950939dd | 821 | py | Python | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 2 | 2022-02-13T19:13:16.000Z | 2022-02-17T14:52:05.000Z | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | null | null | null | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 1 | 2022-02-16T20:17:38.000Z | 2022-02-16T20:17:38.000Z | import sys
from PySide6 import QtGui
| 25.65625 | 72 | 0.576127 |
453a307689bad4f488cdb3f14eea66f7d9566594 | 5,079 | py | Python | 2020/day07/day07.py | maxschalz/advent_of_code | 537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae | [
"BSD-3-Clause"
] | null | null | null | 2020/day07/day07.py | maxschalz/advent_of_code | 537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae | [
"BSD-3-Clause"
] | null | null | null | 2020/day07/day07.py | maxschalz/advent_of_code | 537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import unittest
COLOR = "shiny gold"
FNAME = "input.txt"
N_ITER = 1e7
TEST_FNAME = "test_input.txt"
def main():
"""Main function."""
data = load_input(FNAME)
part1(data)
part2(data)
print("\nUnittests")
unittest.main()
def part1(data):
"""Solution to day 7, part 1."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_containing_specific_bag(COLOR)
print(f"{n_bags} bags can contain at least one {COLOR} bag.")
return n_bags
def part2(data):
"""Solution to day 7, part 2."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_inside(COLOR)
print(f"One {COLOR} bag contains {n_bags} other bags.")
return n_bags
def load_input(fname):
"""Read in the data, return as a list."""
with open(fname, "r") as f:
data = f.readlines()
data = [x.strip("\n") for x in data]
return data
if __name__=="__main__":
main()
| 29.189655 | 74 | 0.590471 |
453b632b266da30271e1e4710f1d5bea075bf4fb | 1,937 | py | Python | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 503 | 2015-11-11T22:07:36.000Z | 2022-03-28T21:29:30.000Z | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 209 | 2015-07-13T04:49:38.000Z | 2022-03-25T22:06:18.000Z | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 195 | 2015-07-09T18:11:47.000Z | 2022-03-25T11:56:53.000Z | #!/usr/bin/env python3
#coding: UTF-8
import os
import sys
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())
| 29.348485 | 113 | 0.653588 |
453bee7425b707c77a51058c3fca5c10f29a6d05 | 4,774 | py | Python | codit/stats/de_duplication_stat.py | saikat107/OpenNMT-py | 148b0d860e78120de704f7a6671e8eced251801b | [
"MIT"
] | null | null | null | codit/stats/de_duplication_stat.py | saikat107/OpenNMT-py | 148b0d860e78120de704f7a6671e8eced251801b | [
"MIT"
] | null | null | null | codit/stats/de_duplication_stat.py | saikat107/OpenNMT-py | 148b0d860e78120de704f7a6671e8eced251801b | [
"MIT"
] | null | null | null | import sys, os
import nltk
import numpy as np
if __name__ == '__main__':
result_base = '/home/sc2nf/codit-clone'
option = 'token' # 'token
size = 10
# if option == 'tree':
# file_name = 'codit-all-concrete_' + str(size) + '.2_' + str(2*size) + '_decode_res.txt'
# else:
# file_name = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_name_tree = 'codit-all-concrete_' + str(size) + '.2_' + str(2 * size) + '_decode_res.txt'
file_path_tree = result_base + '/' + file_name_tree
patches_tree = read_patch(file_path_tree, size)
unique_indices = de_duplicate_patches(patches_tree)
# unique_patches_tree = patches_tree[unique_indices]
# unique_count = len(unique_patches_tree)
file_name_token = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_path_token = result_base + '/' + file_name_token
patches_token = read_patch(file_path_token, size)
# unique_patches = patches_token[unique_indices]
unified_patches = []
for idx, (p_tree, p_token) in enumerate(zip(patches_tree, patches_token)):
if idx in unique_indices:
assert isinstance(p_tree, Patch) and isinstance(p_token, Patch)
p_tree.verdict_token = p_token.verdict
unified_patches.append(p_tree)
tree_count = np.sum([1 if p.verdict else 0 for p in unified_patches])
token_count = np.sum([1 if p.verdict_token else 0 for p in unified_patches])
tree_indices = set()
token_indices = set()
for i, p in enumerate(unified_patches):
if p.verdict:
tree_indices.add(i)
if p.verdict_token:
token_indices.add(i)
only_tree = tree_indices.difference(token_indices)
only_token = token_indices.difference(tree_indices)
common = tree_indices.intersection(token_indices)
print(tree_count, token_count, len(only_token), len(only_tree), len(common), len(unified_patches))
#
# total_success_tree = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(unique_patches, total_success_tree)
# tree_success_indices_in_unique = set()
# for idx, p in enumerate(unique_patches):
# if p.verdict:
# tree_success_indices_in_unique.add(idx)
#
#
#
# total_success_token = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(tree_count, total_success_token)
| 35.626866 | 102 | 0.59845 |
453c01d612c70ef3a56e01d8f48863230c296568 | 4,934 | py | Python | pysnark/qaptools/runqapgen.py | Charterhouse/pysnark | 4d8ae194a918c57a84c9f42f2d9809d66e90f006 | [
"RSA-MD"
] | 65 | 2018-01-12T08:49:18.000Z | 2022-03-16T07:35:40.000Z | pysnark/qaptools/runqapgen.py | Charterhouse/pysnark | 4d8ae194a918c57a84c9f42f2d9809d66e90f006 | [
"RSA-MD"
] | 9 | 2018-01-19T21:14:02.000Z | 2019-10-15T09:48:01.000Z | pysnark/qaptools/runqapgen.py | Charterhouse/pysnark | 4d8ae194a918c57a84c9f42f2d9809d66e90f006 | [
"RSA-MD"
] | 13 | 2018-01-15T20:50:57.000Z | 2022-03-25T05:39:36.000Z | # Copyright (c) 2016-2018 Koninklijke Philips N.V. All rights reserved. A
# copyright license for redistribution and use in source and binary forms,
# with or without modification, is hereby granted for non-commercial,
# experimental and research purposes, provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution. If
# you wish to use this software commercially, kindly contact
# info.licensing@philips.com to obtain a commercial license.
#
# This license extends only to copyright and does not include or grant any
# patent license or other license whatsoever.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
import pysnark.options
def run(eksize, pksize, genmk=False):
"""
Run the qapgen tool
:param eksize: Desired master evaluation key size
:param pksize: Desired master public key size
:param genmk: True if a new master secret key should be generated, False otherwise
:return: None
"""
mskfile = pysnark.options.get_mskey_file()
mkeyfile = pysnark.options.get_mkey_file()
mpkeyfile = pysnark.options.get_mpkey_file()
if not genmk and not os.path.isfile(mskfile):
raise IOError("Could not enlarge master key materiak: master secret key missing")
print >> sys.stderr, "*** " + ("Generating" if genmk else "Enlarging") + " master key material"
if subprocess.call([pysnark.options.get_qaptool_exe("qapgen"), str(max(pksize,eksize,0)), str(max(pksize,0)),
mskfile, mkeyfile, mpkeyfile]) != 0:
sys.exit(2)
def get_mekey_size():
"""
Get the size (maximal exponent) of the current master evaluation key
:return: Size, or -1 if key does not exist
"""
try:
mekf = open(pysnark.options.get_mkey_file())
curmk = int(mekf.next().strip().split(" ")[2])
mekf.close()
return curmk
except IOError:
return -1
def get_mpkey_size():
"""
Get the size (maximal exponent) of the current master public key
:return: Size, or -1 if key does not exist
"""
try:
mpkf = open(pysnark.options.get_mpkey_file())
curmpk = int(mpkf.next().strip().split(" ")[2])
mpkf.close()
return curmpk
except IOError:
return -1
def ensure_mkey(eksize, pksize):
"""
Ensures that there are master evaluation and public keys of the given sizes.
If master evaluation/public keys exist but are to small, and there is no
master secret key, this raises an error.
If there is no key material at all, a fresh master secret key will be
generated.
:param eksize: Minimal evaluation key size (-1 if not needed)
:param pksize: Minimal public key size (-1 if not needed)
:return: Actual evaluation key, public key size after key generation
"""
curek = get_mekey_size()
curpk = get_mpkey_size()
havemsk = os.path.isfile(pysnark.options.get_mskey_file())
havekeys = os.path.isfile(pysnark.options.get_mpkey_file()) or os.path.isfile(pysnark.options.get_mkey_file())
if curek < eksize or curpk < pksize:
if havemsk:
run(max(curek, eksize), max(curpk, pksize), False)
return (max(curek, eksize), max(curpk, pksize))
elif havekeys:
raise IOError("Key material too small ("+str(curek)+","+str(curpk)+
")<("+str(eksize)+","+str(pksize)+") and missing master secret key")
else:
run(eksize, pksize, True)
return (eksize,pksize)
else:
return (curek,curpk)
if __name__ == "__main__":
if len(sys.argv)<3:
print >>sys.stderr, "*** Usage:", sys.argv[0], "<eksize>", "<pksize>"
sys.exit(2)
argeksize = int(sys.argv[1])
argpksize = int(sys.argv[2])
run(argeksize, argpksize, not os.path.isfile(pysnark.options.get_mskey_file()))
| 37.378788 | 114 | 0.688285 |
453c20b8c1cf91ca7912ad336c0a4f1a000e5011 | 4,024 | py | Python | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 8 | 2020-12-09T13:01:41.000Z | 2022-01-09T10:06:25.000Z | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 141 | 2019-08-21T20:23:07.000Z | 2022-03-29T14:02:27.000Z | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 7 | 2020-07-28T08:52:06.000Z | 2021-07-26T02:15:36.000Z | """Helper Functions
Some Misc Functions used in this app
"""
import secrets
import string
from functools import wraps
from urllib.parse import urljoin, urlparse
from dateutil import parser
from flask import abort, current_app, request
from flask_login import current_user
from flask_migrate import upgrade
def admin_required_decorator(func):
"""Restrict view function to admin-only
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
return decorated_view_function
def pushover_required(func):
"""Restrict view function to users who have configured Pushover account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
return decorated_function
def youtube_required(func):
"""Restrict view function to users who have configured YouTube account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
return decorated_function
def is_safe_url(target):
"""Helper used to check endpoint before redirecting user
Arguments:
target {url} -- a url with complete scheme and domain to be examine
Returns:
bool -- target is a safe url or not
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def notify_admin(initiator, service, **kwargs):
"""Send Notification to all Admin
A Temporary function used to notify admin
Arguments:
initiator {str} -- Action or reason that trigger this notification
service {str or notification.Service} -- Service used to send notification
**kwargs {dict} -- optional arguments passed to notification
Returns:
dict -- Response from notification service
"""
from ..models.user import User
admins = User.query.filter_by(admin=True).all()
response = {}
for admin in admins:
response[admin.username] = admin.send_notification(initiator, service, **kwargs)
return response
| 27.006711 | 88 | 0.685885 |
453cbca8f170f8d57f86e5292c872a332ff4738e | 1,094 | py | Python | HourlyCrime/hour.py | pauljrodriguezcs/Chicago_Crime_Analysis | 8f385fdfbb8b770631a458edf03f90836f33b674 | [
"MIT"
] | 1 | 2020-02-12T16:25:23.000Z | 2020-02-12T16:25:23.000Z | HourlyCrime/hour.py | pauljrodriguezcs/Chicago_Crime_Analysis | 8f385fdfbb8b770631a458edf03f90836f33b674 | [
"MIT"
] | null | null | null | HourlyCrime/hour.py | pauljrodriguezcs/Chicago_Crime_Analysis | 8f385fdfbb8b770631a458edf03f90836f33b674 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.linalg as la
print("loading time series... ")
plt.figure(figsize=(16,9))
timeSeries = np.loadtxt('TheftTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'r-', label='Theft')
timeSeries = np.loadtxt('BatteryTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'g-', label='Battery')
timeSeries = np.loadtxt('CriminalDamageTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'b-', label='Criminal_Damage')
timeSeries = np.loadtxt('TarcoticsTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'c-', label='Narcotics')
timeSeries = np.loadtxt('AssaultTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'m-', label='Assault')
plt.xticks(np.arange(0,24,step=1))
plt.grid(True)
plt.legend()
plt.xlabel('Hour')
plt.ylabel('Total Crimes')
plt.title('Crime per Hour')
# plt.show()
plt.savefig('CrimePerHour.png',format='png',dpi=600) | 30.388889 | 85 | 0.71755 |
453cc274659ff78328110cd29e7888f4f4d189f2 | 2,733 | py | Python | scripts/annotation_csv.py | RulerOf/keras-yolo3 | 8d091cf42b2f126626ad8610adf31293225b7daa | [
"MIT"
] | 37 | 2018-10-20T15:50:18.000Z | 2021-06-18T14:31:50.000Z | scripts/annotation_csv.py | RulerOf/keras-yolo3 | 8d091cf42b2f126626ad8610adf31293225b7daa | [
"MIT"
] | 34 | 2019-04-10T18:59:08.000Z | 2021-03-24T11:08:36.000Z | scripts/annotation_csv.py | RulerOf/keras-yolo3 | 8d091cf42b2f126626ad8610adf31293225b7daa | [
"MIT"
] | 13 | 2019-08-29T08:19:05.000Z | 2021-09-20T10:13:31.000Z | """
Creating training file from own custom dataset
>> python annotation_csv.py \
--path_dataset ~/Data/PeopleDetections \
--path_output ../model_data
"""
import os
import sys
import glob
import argparse
import logging
import pandas as pd
import tqdm
sys.path += [os.path.abspath('.'), os.path.abspath('..')]
from keras_yolo3.utils import update_path
IMAGE_EXTENSIONS = ('.png', '.jpg')
ANNOT_COLUMNS = ('xmin', 'ymin', 'xmax', 'ymax', 'class')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_params = parse_arguments()
_main(**arg_params)
| 33.740741 | 86 | 0.626418 |
453d3dd28ce2e196af7faf59e068a64e46af26e2 | 3,280 | py | Python | Assignment_2/one_vs_one.py | hthuwal/mcs-ml-assignments | 1d2850b82f49ccf31eec3c8f921f09d6260d13a4 | [
"MIT"
] | 3 | 2018-11-23T10:36:36.000Z | 2021-12-16T17:47:22.000Z | Assignment_2/one_vs_one.py | hthuwal/mcs-ml-assignments | 1d2850b82f49ccf31eec3c8f921f09d6260d13a4 | [
"MIT"
] | null | null | null | Assignment_2/one_vs_one.py | hthuwal/mcs-ml-assignments | 1d2850b82f49ccf31eec3c8f921f09d6260d13a4 | [
"MIT"
] | 7 | 2018-11-14T18:14:12.000Z | 2021-12-16T17:47:34.000Z | from pegasos import bgd_pegasos
import numpy as np
import pandas as pd
import pickle
import sys
retrain = False
wandbs = None
if retrain:
x_train, y_train = read_data("mnist/train.csv")
num_classes = len(set(y_train))
wandbs = [[() for j in range(num_classes)] for i in range(num_classes)]
count = 0
for i in range(num_classes):
for j in range(num_classes):
if(i < j):
count += 1
print("\nClassifier %d: %d vs %d\n" % (count, i, j))
xc, yc = [], []
for x, y in zip(x_train, y_train):
if (y == i):
xc.append(x)
yc.append(1)
elif(y == j):
xc.append(x)
yc.append(-1)
wandbs[i][j] = bgd_pegasos(xc, yc, 10e-4, c=1.0)
with open("models/pegasos.model", "wb") as f:
pickle.dump(wandbs, f)
else:
print("\nLoading Model")
with open("models/pegasos.model", "rb") as f:
wandbs = pickle.load(f)
input_file = sys.argv[1].strip()
output_file = sys.argv[2].strip()
x_set, y_set = read_data(input_file)
print("Predicting")
run2(x_set, y_set, wandbs, output_file)
| 27.107438 | 108 | 0.498171 |
453dfae7aa03af853f997301ef1cbbd1ca05e43a | 1,533 | py | Python | tools/numpy-examples.py | martinahogg/machinelearning | 03b473375e64a0398177194df2fe26a1a89feedf | [
"Apache-2.0"
] | 2 | 2017-08-17T14:38:14.000Z | 2017-08-17T14:40:32.000Z | tools/numpy-examples.py | martinahogg/machinelearning | 03b473375e64a0398177194df2fe26a1a89feedf | [
"Apache-2.0"
] | null | null | null | tools/numpy-examples.py | martinahogg/machinelearning | 03b473375e64a0398177194df2fe26a1a89feedf | [
"Apache-2.0"
] | null | null | null | import numpy as np
# Inner (or dot) product
a = np.array([1,2])
b = np.array([3,4])
np.inner(a, b)
a.dot(b)
# Outer product
a = np.array([1,2])
b = np.array([3,4])
np.outer(a, b)
# Inverse
m = np.array([[1,2], [3,4]])
np.linalg.inv(m)
# Inner (or dot) product
m = np.array([[1,2], [3,4]])
minv = np.linalg.inv(m)
m.dot(minv)
# Diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
m = np.array([1,2])
np.diag(m)
# Determinant
m = np.array([[1,2], [3,4]])
np.linalg.det(m)
# Trace - sum of elements of the diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
np.diag(m).sum()
np.trace(m)
# Transpose
m = np.array([ [1,2], [3,4] ])
m.T
# Gaussian distribution
m = np.random.randn(2,3)
m
# Covariance
X = np.random.randn(100,3)
np.cov(X.T)
# Eigen vectors and values
# For symmetric matrix (m == m.T) and hermitian matrix (m = m.H) we use eigh.
m = np.array([
[ 0.89761228, 0.00538701, -0.03229084],
[ 0.00538701, 1.04860676, -0.25001666],
[-0.03229084, -0.25001666, 0.81116126]])
# The first tuple contains three Eigen values.
# The second tuple contains Eigen vectors stored in columns.
np.linalg.eigh(m)
# Solving linear systems
# The admissions fee at a small far is $1.50 for children an $4.00 for adults.
# On a certain day 2,200 people enter the fair and $5050 is collected.
# How many children and how many adults attended.
#
# Let X1 = number of children
# Let X2 = number of adults
# X1 + X2 = 2200
# 1.5X1 + 4X2 = 5050
a = np.array([ [1,1], [1.5,4] ])
b = np.array( [ 2200, 5050] )
np.linalg.solve(a, b) | 20.716216 | 79 | 0.629485 |
453fdaffee2d4ec5ec8223f0fa753fce8c413273 | 14,337 | py | Python | src/relstorage/tests/util.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | src/relstorage/tests/util.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | src/relstorage/tests/util.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | import os
import platform
import unittest
# ZODB >= 3.9. The blob directory can be a private cache.
shared_blob_dir_choices = (False, True)
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS')
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
if RUNNING_ON_CI:
skipOnCI = unittest.skip
else:
skipOnCI = _do_not_skip
if RUNNING_ON_APPVEYOR:
skipOnAppveyor = unittest.skip
else:
skipOnAppveyor = _do_not_skip
CACHE_SERVERS = None
CACHE_MODULE_NAME = None
if RUNNING_ON_TRAVIS:
# We expect to have access to a local memcache server
# on travis. Use it if we can import drivers.
# pylint:disable=unused-import
try:
import pylibmc
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'relstorage.pylibmc_wrapper'
except ImportError:
try:
import memcache
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'memcache'
except ImportError:
pass
USE_SMALL_BLOBS = ((RUNNING_ON_CI # slow here
or platform.system() == 'Darwin' # interactive testing
or os.environ.get("RS_SMALL_BLOB")) # define
and not os.environ.get('RS_LARGE_BLOB'))
# mysqlclient (aka MySQLdb) and possibly other things that
# use libmysqlclient.so will try to connect over the
# default Unix socket that was established when that
# library was compiled if no host is given. But that
# server may not be running, or may not be the one we want
# to use for testing, so explicitly ask it to use TCP
# socket by giving an IP address (using 'localhost' will
# still try to use the socket.) (The TCP port can be bound
# by non-root, but the default Unix socket often requires
# root permissions to open.)
STANDARD_DATABASE_SERVER_HOST = '127.0.0.1'
DEFAULT_DATABASE_SERVER_HOST = os.environ.get('RS_DB_HOST',
STANDARD_DATABASE_SERVER_HOST)
TEST_UNAVAILABLE_DRIVERS = not bool(os.environ.get('RS_SKIP_UNAVAILABLE_DRIVERS'))
if RUNNING_ON_CI:
TEST_UNAVAILABLE_DRIVERS = False
| 37.046512 | 92 | 0.604032 |
45409f2dbf3fb01dea755f2b203a25f415411768 | 2,435 | py | Python | sktime/datatypes/_panel/_examples.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 5,349 | 2019-03-21T14:56:50.000Z | 2022-03-31T11:25:30.000Z | sktime/datatypes/_panel/_examples.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 1,803 | 2019-03-26T13:33:53.000Z | 2022-03-31T23:58:10.000Z | sktime/datatypes/_panel/_examples.py | marcio55afr/sktime | 25ba2f470f037366ca6b0e529137d3d0a6191e2e | [
"BSD-3-Clause"
] | 911 | 2019-03-25T01:21:30.000Z | 2022-03-31T04:45:51.000Z | # -*- coding: utf-8 -*-
"""Example generation for testing.
Exports dict of examples, useful for testing as fixtures.
example_dict: dict indexed by triple
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are data objects, considered examples for the mtype
all examples with same index are considered "same" on scitype content
if None, indicates that representation is not possible
example_lossy: dict of bool indexed by pairs of str
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are bool, indicate whether representation has information removed
all examples with same index are considered "same" on scitype content
overall, conversions from non-lossy representations to any other ones
should yield the element exactly, identidally (given same index)
"""
import pandas as pd
import numpy as np
example_dict = dict()
example_dict_lossy = dict()
###
X = np.array(
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],
dtype=np.int64,
)
example_dict[("numpy3D", "Panel", 0)] = X
example_dict_lossy[("numpy3D", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),
pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),
pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),
]
example_dict[("df-list", "Panel", 0)] = Xlist
example_dict_lossy[("df-list", "Panel", 0)] = False
cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),
pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),
pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),
]
X = pd.concat(Xlist)
X = X.set_index(["instances", "timepoints"])
example_dict[("pd-multiindex", "Panel", 0)] = X
example_dict_lossy[("pd-multiindex", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
X = pd.DataFrame(columns=cols, index=[0, 1, 2])
X["var_0"] = pd.Series(
[pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]
)
X["var_1"] = pd.Series(
[pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]
)
example_dict[("nested_univ", "Panel", 0)] = X
example_dict_lossy[("nested_univ", "Panel", 0)] = False
| 31.623377 | 79 | 0.632444 |
4540a0cc0547d5162f147799d9341ebb9bb38b1a | 2,951 | py | Python | ipfnlite/get_los_diaggeom.py | guimarais/AUGlite | 8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de | [
"MIT"
] | null | null | null | ipfnlite/get_los_diaggeom.py | guimarais/AUGlite | 8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de | [
"MIT"
] | null | null | null | ipfnlite/get_los_diaggeom.py | guimarais/AUGlite | 8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 15:34:36 2019
@author: eseliuni
"""
from __future__ import print_function
#from builtins import str
#from builtins import range
import os
def get_coordinate_from_line(coordinate, line):
"""
Returns a value of a coordinate from a line
"""
for word in line.split(","):
if str(coordinate)+"=" in word:
if coordinate == "phi":
return float(word[word.index("=")+1:])
else:
return float(word[word.index("=")+1:-1])
def get_los(full_path):
"""
Reads the file *.coordinate from diaggeom with line of sight (LOS) of a
diagnostic. Returns a dictionary with keys:
name: short name of the diagnostic
description: full name of the diagnostic
signals: contains the name of each channel and its LOS
"""
# Split the text to the lines
with open(full_path, "r") as file:
lines = file.readlines()
lines = [line.strip() for line in lines]
los_diag = {"name": lines[0].split()[0],
"description": lines[0][
lines[0].index("(")+1:lines[0].index(")")
],
"signals":{}
}
# Combine lines to the blocks, corresponding specific channel
phrase = "(Line of sight)" # a phrase, that indicates the beginning of the block
signals_line_idx = [ii for ii in range(len(lines)) if phrase in lines[ii]]
signals_line_idx.append(len(lines))
signal_blocks_idx = [(signals_line_idx[ii], signals_line_idx[ii+1]) for ii in range(len(signals_line_idx)-1)[:-1]]
signal_blocks_idx.append((signals_line_idx[-2], signals_line_idx[-1]))
# obtain R, z and phi for each block
for (ii, jj) in signal_blocks_idx:
los = {}
phrase = "From"
block = lines[ii:jj]
line_idx = [ll for ll in range(len(block)) if phrase in block[ll]]
for idx in line_idx:
R = [get_coordinate_from_line("R", block[idx]), get_coordinate_from_line("R", block[idx+1])]
z = [get_coordinate_from_line("z", block[idx]), get_coordinate_from_line("z", block[idx+1])]
phi = [get_coordinate_from_line("phi", block[idx]), get_coordinate_from_line("phi", block[idx+1])]
if block[idx].split()[0] == phrase:
los.update({"0":{"R": R, "z":z, "phi":phi}})
else:
los.update({block[idx].split()[0]:{"R": R, "z":z, "phi":phi}})
los_diag["signals"].update({lines[ii][:lines[ii].index("(")-1]:los})
file.close()
return los_diag
if __name__ == "__main__":
working_dir = os.getcwd()
examples_dir = "../../files/"
path = os.path.join(working_dir, examples_dir)
file_name = 'diaggeom_TS.coords'
los_diag = get_los(os.path.join(path, file_name))
print(los_diag)
| 35.554217 | 118 | 0.583192 |