hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
916ad498f5f7937a47cd76bb93a7df7cec38d72f | 5,354 | py | Python | core_tools/utility/plotting/plot_1D.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | null | null | null | core_tools/utility/plotting/plot_1D.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | null | null | null | core_tools/utility/plotting/plot_1D.py | peendebak/core_tools | 2e43edf0bbc1d7ceb7042559db499535e8f6a076 | [
"BSD-2-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import copy
from core_tools.utility.plotting.plot_settings import plot_layout, graph_settings_1D, _1D_raw_plot_data
from core_tools.utility.plotting.plot_general import _data_plotter
# TODO add log scale support !!!
if __name__ == '__main__':
from colors import MATERIAL_COLOR, Red
# global settings
g = graph_settings_1D()
g.color = Red[::-1]
g.linewidth = 1
a = plotter_1D(graph_setings=g)
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'p', alpha = 1, c=Red[5])
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'l', alpha = 0.3, c=Red[5])
# a.plot()
a.save('test1D_single.svg')
a = plotter_1D(plot_layout(n_plots_x = 1,n_plots_y = 2))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a.save('test1D_12.svg')
# a.plot()
a = plotter_1D(plot_layout(n_plots_x = 2,n_plots_y = 2, share_x=True, share_y=True))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 1')
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 2')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,0].set_labels('x_label', 'y_label')
a[1,0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,1].set_labels('x_label', 'y_label')
a[1,1].add_data(np.linspace(10,50,50), np.sin(np.linspace(10,50,50)))
a.save('test1D_22.svg')
# a.plot()
a = plotter_1D(plot_layout((300, 70), n_plots_x = 6,n_plots_y = 1, share_x=False, share_y=True))
a[0].set_labels('time (ns)', 'Spin up probably (%)')
a[0].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[1].set_labels('time (ns)', 'Spin up probably (%)')
a[1].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[2].set_labels('time (ns)', 'Spin up probably (%)')
a[2].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[3].set_labels('time (ns)', 'Spin up probably (%)')
a[3].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[4].set_labels('time (ns)', 'Spin up probably (%)')
a[4].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[5].set_labels('time (ns)', 'Spin up probably (%)')
a[5].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
print(a)
a.save('test1D_61.svg')
a.plot() | 31.309942 | 111 | 0.686216 |
916bb212bcbe679ba4c75cb54521ee006fb78140 | 5,110 | py | Python | v0.3/achat.py | Forec/lan-ichat | f2ae85ef6a8f2b30126be787e52785971c926d8c | [
"0BSD"
] | 63 | 2016-10-25T06:05:29.000Z | 2021-06-11T01:13:30.000Z | v0.3/achat.py | yyfhust/lan-ichat | f2ae85ef6a8f2b30126be787e52785971c926d8c | [
"0BSD"
] | 1 | 2018-10-16T10:06:19.000Z | 2018-10-16T10:06:19.000Z | v0.3/achat.py | yyfhust/lan-ichat | f2ae85ef6a8f2b30126be787e52785971c926d8c | [
"0BSD"
] | 55 | 2016-10-25T06:05:33.000Z | 2021-12-10T04:58:57.000Z | # last edit date: 2016/11/2
# author: Forec
# LICENSE
# Copyright (c) 2015-2017, Forec <forec@bupt.edu.cn>
# Permission to use, copy, modify, and/or distribute this code for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from socket import *
import threading
import pyaudio
import wave
import sys
import zlib
import struct
import pickle
import time
import numpy as np
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 0.5 | 34.066667 | 77 | 0.541292 |
916c668852524852bcc137172db7eabf2b12d323 | 25 | py | Python | gdb/util.py | dennereed/paleocore | d6da6c39cde96050ee4b9e7213ec1200530cbeee | [
"MIT"
] | 1 | 2021-02-05T19:50:13.000Z | 2021-02-05T19:50:13.000Z | gdb/util.py | dennereed/paleocore | d6da6c39cde96050ee4b9e7213ec1200530cbeee | [
"MIT"
] | 59 | 2020-06-17T22:21:51.000Z | 2022-02-10T05:00:01.000Z | gdb/util.py | dennereed/paleocore | d6da6c39cde96050ee4b9e7213ec1200530cbeee | [
"MIT"
] | 2 | 2020-07-01T14:11:09.000Z | 2020-08-10T17:27:26.000Z | from gdb.models import *
| 12.5 | 24 | 0.76 |
916cb78c9e97224f18aea1ae145aa0983c3481c1 | 274 | py | Python | iwg_blog/blog/views/__init__.py | razortheory/who-iwg-webapp | e2318d286cd9ab87d4d8103bc7b3072cfb99bf76 | [
"MIT"
] | null | null | null | iwg_blog/blog/views/__init__.py | razortheory/who-iwg-webapp | e2318d286cd9ab87d4d8103bc7b3072cfb99bf76 | [
"MIT"
] | null | null | null | iwg_blog/blog/views/__init__.py | razortheory/who-iwg-webapp | e2318d286cd9ab87d4d8103bc7b3072cfb99bf76 | [
"MIT"
] | null | null | null | from .base import ArticleView, ArticlePreviewView, ArticleListView, SearchView, LandingView, \
CategoryView, TagView, SubscribeForUpdates, UnsubscribeFromUpdates
from .ajax import GetArticleSlugAjax, TagsAutocompleteAjax
from .errors import page_not_found, server_error
| 54.8 | 94 | 0.850365 |
916d6d6dc88be47cd9a443a50f8be165dfb36ec7 | 3,167 | py | Python | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | 7 | 2021-09-30T11:13:00.000Z | 2022-03-25T16:19:19.000Z | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | 1 | 2021-11-15T18:36:51.000Z | 2021-11-15T18:36:51.000Z | io_import_rbsp/rbsp/rpak_materials.py | snake-biscuits/io_import_rbsp | 0de47dc70c373cc0417cc222d5d83e6dde72068b | [
"MIT"
] | null | null | null | # by MrSteyk & Dogecore
# TODO: extraction instructions & testing
import json
import os.path
from typing import List
import bpy
loaded_materials = {}
MATERIAL_LOAD_PATH = "" # put your path here
# normal has special logic
MATERIAL_INPUT_LINKING = {
"color": "Base Color",
"rough": "Roughness",
"spec": "Specular",
"illumm": "Emission",
}
| 38.156627 | 115 | 0.649826 |
916e1ddff0241cef174fcd4e5ccac0206688c76b | 636 | py | Python | initcmds/models.py | alldevic/mtauksync | 1a5d325ca8a7878aba5b292d7835546b24bb554c | [
"MIT"
] | null | null | null | initcmds/models.py | alldevic/mtauksync | 1a5d325ca8a7878aba5b292d7835546b24bb554c | [
"MIT"
] | null | null | null | initcmds/models.py | alldevic/mtauksync | 1a5d325ca8a7878aba5b292d7835546b24bb554c | [
"MIT"
] | null | null | null | from django.db import models
TASK_STATUS = (
("c", "created"),
("p", "progress"),
("s", "success"),
("f", "failed")
)
| 27.652174 | 77 | 0.636792 |
916f9138f4bbb1766481eef3ea77cac318445838 | 3,291 | py | Python | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | aardvark/conf/reaper_conf.py | ttsiouts/aardvark | cbf29f332df86814dd581152faf863c0d29ae41c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
reaper_group = cfg.OptGroup(
'reaper',
title='Aardvark Service Options',
help="Configuration options for Aardvark service")
reaper_opts = [
cfg.StrOpt('reaper_driver',
default='chance_driver',
help="""
The driver that the reaper will use
Possible choices:
* strict_driver: The purpose of the preemptibles existence is to eliminate the
idling resources. This driver gets all the possible offers
from the relevant hosts and tries to find the best matching
for the requested resources. The best matching offer is the
combination of preemptible servers that leave the least
possible resources unused.
* chance_driver: A valid host is selected randomly and in a number of
preconfigured retries, the driver tries to find the instances
that have to be culled in order to have the requested
resources available.
"""
),
cfg.IntOpt('alternatives',
default=1,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.IntOpt('max_attempts',
default=5,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.ListOpt('watched_aggregates',
default=[],
help="""
The list of aggregate names that the reaper will try to make space to
Each element of the list can be an aggregate or a combination of aggregates.
Combination of aggregates is a single string with a vertical-line-separated
aggregate names.
e.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}',....
For each element in the list, a reaper thread will be spawned and the request
will be forwarded to the responsible worker.
If the provided list is empty, only one worker will be spawned, responsible for
the whole system.
"""
),
cfg.StrOpt('job_backend',
default='redis',
choices=('redis', 'zookeeper'),
help="""
The backend to use for distributed task management.
For this purpose the Reaper uses OpenStack Taskflow. The two supported
backends are redis and zookeper.
"""
),
cfg.StrOpt('backend_host',
default='localhost',
help="""
Specifies the host where the job board backend can be found.
"""
),
]
| 32.91 | 79 | 0.671832 |
916fbb01e62cdbb436021c5d032e0ff8b5532255 | 3,171 | py | Python | src/Data.py | jhlee93/WNet-cGAN-Keras | 89666be91083735c3259e04907bbfbe1c89fc8f8 | [
"MIT"
] | 7 | 2019-07-09T15:16:52.000Z | 2021-05-13T14:14:48.000Z | src/Data.py | jhlee93/WNet-cGAN-Keras | 89666be91083735c3259e04907bbfbe1c89fc8f8 | [
"MIT"
] | 4 | 2019-07-24T13:35:11.000Z | 2021-04-20T07:59:49.000Z | src/Data.py | jhlee93/WNet-cGAN-Keras | 89666be91083735c3259e04907bbfbe1c89fc8f8 | [
"MIT"
] | 1 | 2021-12-16T13:19:13.000Z | 2021-12-16T13:19:13.000Z | import glob
import numpy as np
| 44.661972 | 198 | 0.571429 |
9170343444c1172d149626528603249b2f63831c | 370 | py | Python | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | count_files.py | xuannianc/keras-retinanet | d1da39592042927aaf3b3eb905a308c327983bed | [
"Apache-2.0"
] | null | null | null | import csv
vat_filenames = set()
train_csv_filename = 'train_annotations.csv'
val_csv_filename = 'val_annotations.csv'
for csv_filename in [train_csv_filename, val_csv_filename]:
for line in csv.reader(open(csv_filename)):
vat_filename = line[0].split('/')[-1]
vat_filenames.add(vat_filename)
print(len(vat_filenames))
vat_filenames.clear()
| 30.833333 | 59 | 0.735135 |
917058eae76c95edb3644d77520d9eb1f3e8a1e9 | 8,908 | py | Python | liberaforms/views/admin.py | ngi-nix/liberaforms | 5882994736292e7ab34c4c9207805b307478a6c7 | [
"MIT"
] | 3 | 2021-09-02T16:45:42.000Z | 2022-02-21T19:06:25.000Z | liberaforms/views/admin.py | ngi-nix/liberaforms | 5882994736292e7ab34c4c9207805b307478a6c7 | [
"MIT"
] | 2 | 2021-08-17T04:13:10.000Z | 2021-09-14T22:48:21.000Z | liberaforms/views/admin.py | ngi-nix/liberaforms | 5882994736292e7ab34c4c9207805b307478a6c7 | [
"MIT"
] | 1 | 2021-08-17T07:13:15.000Z | 2021-08-17T07:13:15.000Z | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, json
from flask import g, request, render_template, redirect
from flask import session, flash, Blueprint
from flask import send_file, after_this_request
from flask_babel import gettext as _
from liberaforms.models.user import User
from liberaforms.models.form import Form
from liberaforms.models.site import Site
from liberaforms.models.invite import Invite
from liberaforms.utils.wraps import *
from liberaforms.utils import utils
from liberaforms.utils.utils import make_url_for, JsonResponse
from liberaforms.utils.dispatcher import Dispatcher
from liberaforms.utils import wtf
from pprint import pprint
admin_bp = Blueprint('admin_bp', __name__,
template_folder='../templates/admin')
""" User management """
""" Form management """
""" Invitations """
""" Personal Admin preferences """
""" ROOT_USERS functions
"""
| 36.064777 | 84 | 0.635608 |
91705feef5320bb231c5d61b510ee6321361c934 | 29,405 | py | Python | python/zephyr/datasets/score_dataset.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
] | 18 | 2021-05-27T04:40:38.000Z | 2022-02-08T19:46:31.000Z | python/zephyr/datasets/score_dataset.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
] | null | null | null | python/zephyr/datasets/score_dataset.py | r-pad/zephyr | c8f45e207c11bfc2b21df169db65a7df892d2848 | [
"MIT"
] | 2 | 2021-11-07T12:42:00.000Z | 2022-03-01T12:51:54.000Z | import os, copy
import cv2
from functools import partial
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
from zephyr.data_util import to_np, vectorize, img2uint8
from zephyr.utils import torch_norm_fast
from zephyr.utils.mask_edge import getRendEdgeScore
from zephyr.utils.edges import generate_distance_image
from zephyr.normals import compute_normals
from zephyr.utils.timer import TorchTimer
try:
from zephyr.datasets.bop_raw_dataset import BopRawDataset
except ImportError:
pass
from zephyr.datasets.prep_dataset import PrepDataset
IMPORTANCE_ORDER = [
28, 27, 32, 33, 36, 35, 29, 16, 26, 22, 13, 4, 26, 21, 22
]
| 44.218045 | 167 | 0.534875 |
91708273d963214e9092983f15d8ef3340677e15 | 814 | py | Python | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | em Python/Roteiro7/Roteiro7__testes_dijkstra.py | GuilhermeEsdras/Grafos | b6556c3d679496d576f65b798a1a584cd73e40f4 | [
"MIT"
] | null | null | null | from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| 22.611111 | 79 | 0.395577 |
9170b4be66538fa8e6767525842e58971759fde7 | 356 | py | Python | QScreenCast/spyder/api.py | awinia-github/QScreenCast | 09d343cae0a1c7f86faf28e08a62bd09976aaf2e | [
"MIT"
] | null | null | null | QScreenCast/spyder/api.py | awinia-github/QScreenCast | 09d343cae0a1c7f86faf28e08a62bd09976aaf2e | [
"MIT"
] | null | null | null | QScreenCast/spyder/api.py | awinia-github/QScreenCast | 09d343cae0a1c7f86faf28e08a62bd09976aaf2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright Tom Hren
# Licensed under the terms of the MIT License
# ----------------------------------------------------------------------------
"""
Python QtScreenCaster Spyder API.
"""
| 25.428571 | 78 | 0.38764 |
917377628f552efbcce428798dd528e6e5fe7134 | 4,196 | py | Python | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | 5 | 2018-10-09T13:39:31.000Z | 2020-03-26T18:39:49.000Z | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | setup.py | aaron19950321/ICOM | d5bd0705776c505dd1df0a1c76a07fee2d218394 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2018-10-09T13:39:36.000Z | 2018-10-09T23:18:39.000Z | import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
) | 30.18705 | 81 | 0.580076 |
91762cf01e789ac760eedf4942c7a866b5214252 | 632 | py | Python | src/lingcomp/farm/features.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | 2 | 2020-12-18T12:26:22.000Z | 2020-12-19T18:47:07.000Z | src/lingcomp/farm/features.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | null | null | null | src/lingcomp/farm/features.py | CharlottePouw/interpreting-complexity | b9a73c0aff18e4c6b4209a6511d00639494c70da | [
"Apache-2.0"
] | 1 | 2021-05-19T13:39:45.000Z | 2021-05-19T13:39:45.000Z | import torch
from farm.data_handler.samples import Sample
from farm.modeling.prediction_head import RegressionHead
| 31.6 | 88 | 0.724684 |
9176396ea025090d1e564363b18149e19bf37323 | 5,057 | py | Python | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | 3 | 2021-02-03T16:37:19.000Z | 2022-02-07T09:59:03.000Z | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | null | null | null | manager/tests/api_view_test_classes.py | UN-ICC/icc-digital-id-manager | aca0109b3202b292145326ec5523ee8f24691a83 | [
"Apache-2.0"
] | 2 | 2021-02-10T16:03:31.000Z | 2022-02-07T08:50:16.000Z | import pytest
from rest_framework import status
from rest_framework.test import APIClient
def returns_status_code_http_200_ok(response):
assert response.status_code == status.HTTP_200_OK
def returns_status_code_http_401_unauthorized(response):
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def returns_status_code_http_201_created(response):
assert response.status_code == status.HTTP_201_CREATED
def returns_status_code_http_204_no_content(response):
assert response.status_code == status.HTTP_204_NO_CONTENT
def returns_status_code_http_405_not_allowed(response):
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def response_has_etag(response):
assert response.get("ETag")
| 31.02454 | 84 | 0.721376 |
9176ff87702ba5b114dba78865e902b3d3390b83 | 2,259 | py | Python | dashboard/dashboard.py | TrustyJAID/Toxic-Cogs | 870d92067ba2a99b9ade2f957f945b95fdbc80f7 | [
"MIT"
] | null | null | null | dashboard/dashboard.py | TrustyJAID/Toxic-Cogs | 870d92067ba2a99b9ade2f957f945b95fdbc80f7 | [
"MIT"
] | null | null | null | dashboard/dashboard.py | TrustyJAID/Toxic-Cogs | 870d92067ba2a99b9ade2f957f945b95fdbc80f7 | [
"MIT"
] | null | null | null | from collections import defaultdict
import discord
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list, inline
from abc import ABC
# ABC Mixins
from dashboard.abc.abc import MixinMeta
from dashboard.abc.mixin import DBMixin, dashboard
# Command Mixins
from dashboard.abc.roles import DashboardRolesMixin
from dashboard.abc.webserver import DashboardWebserverMixin
from dashboard.abc.settings import DashboardSettingsMixin
# RPC Mixins
from dashboard.baserpc import HUMANIZED_PERMISSIONS, DashboardRPC
from dashboard.menus import ClientList, ClientMenu
THEME_COLORS = ["red", "primary", "blue", "green", "greener", "yellow"]
# Thanks to Flare for showing how to use group commands across multiple files. If this breaks, its his fault
| 30.945205 | 110 | 0.657371 |
91773a1b99193243fe941616b2fc5339f203eb98 | 410 | py | Python | algorithms/162.Find-Peak-Element/Python/solution_2.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | algorithms/162.Find-Peak-Element/Python/solution_2.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | algorithms/162.Find-Peak-Element/Python/solution_2.py | hopeness/leetcode | 496455fa967f0704d729b4014f92f52b1d69d690 | [
"MIT"
] | null | null | null | """
https://leetcode.com/problems/find-peak-element/submissions/
"""
from typing import List
| 22.777778 | 60 | 0.473171 |
9177bf15b6da687a6ae646c46fc3addf65d8004a | 2,684 | py | Python | data_loader.py | vinbigdata-medical/MIDL2021-Xray-Classification | 51359126d07573053059c36e3cd95a7fd7100e0e | [
"MIT"
] | 4 | 2021-04-14T08:04:08.000Z | 2021-08-10T10:15:00.000Z | data_loader.py | vinbigdata-medical/MIDL2021-Xray-Classification | 51359126d07573053059c36e3cd95a7fd7100e0e | [
"MIT"
] | 1 | 2022-01-13T12:51:31.000Z | 2022-01-13T12:51:31.000Z | data_loader.py | vinbigdata-medical/MIDL2021-Xray-Classification | 51359126d07573053059c36e3cd95a7fd7100e0e | [
"MIT"
] | null | null | null | from torchvision.datasets import ImageFolder
from torchvision import transforms
import random
import os
import torch
from torch.utils.data.dataloader import DataLoader
from utils import constants, get_default_device
from image_folder_with_path import ImageFolderWithPaths
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
default_device = get_default_device.default_device
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=random.uniform(5, 10)),
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
classes = os.listdir(constants.DATA_PATH + constants.TRAIN_PATH)
training_dataset = ImageFolder(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
valid_dataset = ImageFolder(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
# testing_dataset = ImageFolder(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
# training_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
# valid_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
testing_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
torch.manual_seed(constants.RANDOM_SEED)
train_dl = DataLoader(training_dataset, constants.BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)
val_dl = DataLoader(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
test_dl = DataLoader(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
"""
Now we can wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches
of data to GPU (if available), and use to_device to move our model to GPU (if available)
"""
train_dl = DeviceDataLoader(train_dl, default_device)
val_dl = DeviceDataLoader(val_dl, default_device)
test_dl = DeviceDataLoader(test_dl, default_device) | 37.277778 | 118 | 0.770492 |
9177c031d705388dfe8031bad5b727ad1032aa9e | 4,254 | py | Python | calliope/test/test_analysis.py | sjpfenninger/calliope | a4e49c3b7d37f908bafc84543510eec0b4cf5d9f | [
"Apache-2.0"
] | 1 | 2019-11-11T15:50:16.000Z | 2019-11-11T15:50:16.000Z | calliope/test/test_analysis.py | mhdella/calliope | a4e49c3b7d37f908bafc84543510eec0b4cf5d9f | [
"Apache-2.0"
] | null | null | null | calliope/test/test_analysis.py | mhdella/calliope | a4e49c3b7d37f908bafc84543510eec0b4cf5d9f | [
"Apache-2.0"
] | 1 | 2019-11-11T15:50:18.000Z | 2019-11-11T15:50:18.000Z | # import matplotlib
# matplotlib.use('Qt5Agg') # Prevents `Invalid DISPLAY variable` errors
import pytest
import tempfile
from calliope import Model
from calliope.utils import AttrDict
from calliope import analysis
from . import common
from .common import assert_almost_equal, solver, solver_io
import matplotlib.pyplot as plt
plt.switch_backend('agg') # Prevents `Invalid DISPLAY variable` errors
| 36.672414 | 99 | 0.603197 |
917a6b3b8a05d7c695e7c6d3cb38a9324f5ab905 | 302 | py | Python | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | mol/data/reader.py | TzuTingWei/mol | 9499925443f389d8e960b6d656f2953d21df3e3b | [
"MIT"
] | null | null | null | import os
from mol.util import read_xyz
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'look_and_say.dat')
with open(filename, 'r') as handle:
look_and_say = handle.read()
| 25.166667 | 58 | 0.748344 |
917a93c6b5689f031c6779f12176c0d60e186575 | 13,198 | py | Python | cinder/tests/unit/targets/test_spdknvmf.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2015-04-02T21:44:36.000Z | 2016-04-29T21:19:04.000Z | cinder/tests/unit/targets/test_spdknvmf.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2016-04-29T21:45:26.000Z | 2016-05-04T19:41:23.000Z | cinder/tests/unit/targets/test_spdknvmf.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 4 | 2016-01-27T00:25:52.000Z | 2021-03-25T19:54:08.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
| 32.268949 | 78 | 0.539855 |
917afbcd55aefac0dcfd4785b8010a4e43b0d1c3 | 4,204 | py | Python | server/algos/euler/transformer.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 2 | 2017-11-27T21:55:21.000Z | 2017-12-30T03:34:40.000Z | server/algos/euler/transformer.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 30 | 2017-09-06T12:00:08.000Z | 2018-06-20T22:47:46.000Z | server/algos/euler/transformer.py | yizhang7210/Acre | c98cf8a4fdfb223a1958e8e61df759f889a1b13f | [
"MIT"
] | 1 | 2021-04-05T13:59:37.000Z | 2021-04-05T13:59:37.000Z | """ This is algos.euler.transformer module.
This module is responsible for transforming raw candle data into training
samples usable to the Euler algorithm.
"""
import datetime
import decimal
from algos.euler.models import training_samples as ts
from core.models import instruments
from datasource.models import candles
TWO_PLACES = decimal.Decimal('0.01')
def extract_features(day_candle):
""" Extract the features for the learning algorithm from a daily candle.
The Features are:
high_bid, low_bid, close_bid, open_ask, high_ask, low_ask,
and close_ask (all relative to open_bid) in pips.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
features: List of Decimals. The features described above, all in two
decimal places.
"""
multiplier = day_candle.instrument.multiplier
features = [
day_candle.high_bid,
day_candle.low_bid,
day_candle.close_bid,
day_candle.open_ask,
day_candle.high_ask,
day_candle.low_ask,
day_candle.close_ask,
]
features = [multiplier * (x - day_candle.open_bid) for x in features]
features = [decimal.Decimal(x).quantize(TWO_PLACES) for x in features]
return features
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES)
def build_sample_row(candle_previous, candle_next):
""" Build one training sample from two consecutive days of candles.
Args:
candle_previous: candles.Candle object. Candle of first day.
candle_next: candles.Candle object. Candle of second day.
Returns:
sample: TrainingSample object. One training sample for learning.
"""
return ts.create_one(
instrument=candle_next.instrument,
date=candle_next.start_time.date() + datetime.timedelta(1),
features=extract_features(candle_previous),
target=get_profitable_change(candle_next))
def get_start_time(instrument):
""" Get the start time for retrieving candles of the given instrument.
This is determined by the last training sample in the database.
Args:
instrument: Instrument object. The given instrument.
Returns:
start_time: Datetime object. The datetime from which to query
candles from to fill the rest of the training samples.
"""
last_sample = ts.get_last(instrument)
if last_sample is not None:
start_date = last_sample.date - datetime.timedelta(1)
return datetime.datetime.combine(start_date, datetime.time())
return datetime.datetime(2005, 1, 1)
def run():
""" Update the training samples in the database from the latest candles.
This should be run daily to ensure the training set is up-to-date.
Args:
None.
"""
all_new_samples = []
for instrument in instruments.get_all():
start_time = get_start_time(instrument)
new_candles = candles.get_candles(
instrument=instrument, start=start_time, order_by='start_time')
for i in range(len(new_candles) - 1):
all_new_samples.append(
build_sample_row(new_candles[i], new_candles[i + 1]))
ts.insert_many(all_new_samples)
| 35.033333 | 80 | 0.674833 |
917b4bfe42198de5b3e0fb37cbc4743cf9cac201 | 142 | py | Python | diagrams/outscale/__init__.py | analyticsftw/diagrams | 217af329a323084bb98031ac1768bc2353e6d9b6 | [
"MIT"
] | 17,037 | 2020-02-03T01:30:30.000Z | 2022-03-31T18:09:15.000Z | diagrams/outscale/__init__.py | analyticsftw/diagrams | 217af329a323084bb98031ac1768bc2353e6d9b6 | [
"MIT"
] | 529 | 2020-02-03T10:43:41.000Z | 2022-03-31T17:33:08.000Z | diagrams/outscale/__init__.py | analyticsftw/diagrams | 217af329a323084bb98031ac1768bc2353e6d9b6 | [
"MIT"
] | 1,068 | 2020-02-05T11:54:29.000Z | 2022-03-30T23:28:55.000Z | from diagrams import Node
| 15.777778 | 36 | 0.690141 |
917b8eb1f8726a411ad6e99afecc5eaca421cc08 | 1,793 | py | Python | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | 3 | 2020-05-11T03:23:17.000Z | 2021-03-16T09:01:48.000Z | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | null | null | null | misc/python/mango/application/main_driver/logstream.py | pymango/pymango | b55f831f0194b214e746b2dfb4d9c6671a1abc38 | [
"BSD-2-Clause"
] | 2 | 2017-03-04T11:03:40.000Z | 2020-08-01T10:01:36.000Z | __doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| 25.985507 | 108 | 0.665365 |
917c31411ccb8a75122b971cca9ce661e5940151 | 9,680 | py | Python | ucdev/cy7c65211/header.py | luftek/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 11 | 2015-07-08T01:28:01.000Z | 2022-01-26T14:29:47.000Z | ucdev/cy7c65211/header.py | luftek/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 5 | 2017-12-07T15:04:00.000Z | 2021-06-02T14:47:14.000Z | ucdev/cy7c65211/header.py | tai/python-ucdev | 8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9 | [
"MIT"
] | 4 | 2017-02-18T18:20:13.000Z | 2022-03-23T16:21:20.000Z | # -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| 25.882353 | 70 | 0.71095 |
917c654b7225932aa925e3dba908d54b0c600e75 | 565 | py | Python | deep_qa/layers/wrappers/output_mask.py | richarajpal/deep_qa | d918335a1bed71b9cfccf1d5743321cee9c61952 | [
"Apache-2.0"
] | 459 | 2017-02-08T13:40:17.000Z | 2021-12-12T12:57:48.000Z | deep_qa/layers/wrappers/output_mask.py | richarajpal/deep_qa | d918335a1bed71b9cfccf1d5743321cee9c61952 | [
"Apache-2.0"
] | 176 | 2017-01-26T01:19:41.000Z | 2018-04-22T19:16:01.000Z | deep_qa/layers/wrappers/output_mask.py | richarajpal/deep_qa | d918335a1bed71b9cfccf1d5743321cee9c61952 | [
"Apache-2.0"
] | 154 | 2017-01-26T01:00:30.000Z | 2021-02-05T10:44:42.000Z | from overrides import overrides
from ..masked_layer import MaskedLayer
| 26.904762 | 98 | 0.695575 |
917d1911394719c31fdc868c9c05aa1015cc7576 | 1,316 | py | Python | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | ljmc/energy.py | karnesh/Monte-Carlo-LJ | f33f08c247df963ca48b9d9f8456e26c0bb19923 | [
"MIT"
] | null | null | null | """
energy.py
function that computes the inter particle energy
It uses truncated 12-6 Lennard Jones potential
All the variables are in reduced units.
"""
def distance(atom1, atom2):
"""
Computes the square of inter particle distance
Minimum image convention is applied for distance calculation for periodic boundary conditions
"""
dx = atom1.x - atom2.x
dy = atom1.y - atom2.y
dz = atom1.z - atom2.z
if dx > halfLx
dx -= Lx
elif dx < -halfLx:
dx += Lx
if dy > halfLy:
dy -= Ly
elif dy < -halfLy:
dy += Ly
if dz > halfLz:
dz -= Lz
elif dz < -halfLz:
dz += Lz
return dx**2 + dy**2 + dz**2
def energy(atom1, atom2, rc):
'''
calculates the energy of the system
'''
## Arithmatic mixing rules - Lorentz Berthlot mixing
eps = (atom1.eps + atom2.eps)/2
sig = (atom1.sigma * atom2.sigma)**0.5
rcsq = rc**2
rsq = distance(atom1, atom2)
if rsq <= rcsq:
energy = 4.0*eps*( (sig/rsq)**6.0 - (sig/rsq)**3.0)
else:
energy = 0.0
def writeEnergy(step, energy):
'''
Writes the energy to a file.
'''
with open('energy.dat', 'a') as f:
f.write('{0} {1}\n'.format(step, energy))
| 19.352941 | 101 | 0.544833 |
917d24af3dd098f693a886046f82e8514c7bd83a | 2,628 | py | Python | CEST/Evaluation/lorenzian.py | ludgerradke/bMRI | dcf93749bb2fba3700e6bcfde691355d55090951 | [
"MIT"
] | null | null | null | CEST/Evaluation/lorenzian.py | ludgerradke/bMRI | dcf93749bb2fba3700e6bcfde691355d55090951 | [
"MIT"
] | null | null | null | CEST/Evaluation/lorenzian.py | ludgerradke/bMRI | dcf93749bb2fba3700e6bcfde691355d55090951 | [
"MIT"
] | null | null | null | import numpy as np
import math
from scipy.optimize import curve_fit
| 45.310345 | 117 | 0.459665 |
917ddc860e3cb5987c6d77cf2eda4923d9234d7a | 7,572 | py | Python | components/network_models_LSTU.py | neuralchen/CooGAN | 3155cbb5a283226474356d3a9f01918609ddd4ec | [
"MIT"
] | 12 | 2020-12-09T07:04:12.000Z | 2022-03-01T03:30:46.000Z | components/network_models_LSTU.py | neuralchen/CooGAN | 3155cbb5a283226474356d3a9f01918609ddd4ec | [
"MIT"
] | null | null | null | components/network_models_LSTU.py | neuralchen/CooGAN | 3155cbb5a283226474356d3a9f01918609ddd4ec | [
"MIT"
] | 4 | 2020-12-23T03:57:53.000Z | 2022-03-28T13:56:14.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: network_models_LSTU.py
# Created Date: Tuesday February 25th 2020
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Tuesday, 25th February 2020 9:57:06 pm
# Modified By: Chen Xuanhong
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
sigmoid = tf.nn.sigmoid
tanh = tf.nn.tanh
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
instance_norm = slim.instance_norm
MAX_DIM = 64 * 16
| 39.233161 | 118 | 0.58822 |
917e0cc4efaf369d4d17aeaeb0fc5c964a039793 | 760 | py | Python | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | 1 | 2020-01-10T21:51:46.000Z | 2020-01-10T21:51:46.000Z | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null | slender/tests/list/test_keep_if.py | torokmark/slender | 3bf815e22f7802ba48706f31ba608cf609e23e68 | [
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
| 28.148148 | 76 | 0.628947 |
9181932ab3632366f38b401fcbe5e47425259914 | 6,809 | py | Python | test/functional/bchn-txbroadcastinterval.py | 1Crazymoney/bitcoin-cash-node | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | [
"MIT"
] | 1 | 2021-11-24T03:54:05.000Z | 2021-11-24T03:54:05.000Z | test/functional/bchn-txbroadcastinterval.py | 1Crazymoney/bitcoin-cash-node | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | [
"MIT"
] | null | null | null | test/functional/bchn-txbroadcastinterval.py | 1Crazymoney/bitcoin-cash-node | 8f82823b3c5d4bcb401b0e4e6b464c1228f936e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Cash Node developers
# Author matricz
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that inv messages are sent according to
an exponential distribution with scale -txbroadcastinterval
The outbound interval should be half of the inbound
"""
import time
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, connect_nodes, disconnect_nodes
from scipy import stats
if __name__ == '__main__':
TxBroadcastIntervalTest().main()
| 46.958621 | 110 | 0.679101 |
91820c594379b0529582b42b9cc165d4cd520738 | 33,871 | py | Python | tests/compute/test_sampler.py | buaaqt/dgl | 64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7 | [
"Apache-2.0"
] | 1 | 2020-07-21T03:03:15.000Z | 2020-07-21T03:03:15.000Z | tests/compute/test_sampler.py | buaaqt/dgl | 64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7 | [
"Apache-2.0"
] | null | null | null | tests/compute/test_sampler.py | buaaqt/dgl | 64f6f3c1a8c2c3e08ec0750b902f3e2c63fd2cd7 | [
"Apache-2.0"
] | null | null | null | import backend as F
import numpy as np
import scipy as sp
import dgl
from dgl import utils
import unittest
from numpy.testing import assert_array_equal
np.random.seed(42)
if __name__ == '__main__':
test_create_full()
test_1neighbor_sampler_all()
test_10neighbor_sampler_all()
test_1neighbor_sampler()
test_10neighbor_sampler()
test_layer_sampler()
test_nonuniform_neighbor_sampler()
test_setseed()
test_negative_sampler()
| 46.783149 | 103 | 0.575507 |
9183b4d3330e5dc6c4da3188d85901cf1703c4d4 | 3,178 | py | Python | plugins/voila/voila/__init__.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 94 | 2021-03-19T19:55:11.000Z | 2022-03-31T19:50:01.000Z | plugins/voila/voila/__init__.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 410 | 2021-03-19T18:04:48.000Z | 2022-03-22T13:56:53.000Z | plugins/voila/voila/__init__.py | srinivasreddych/aws-orbit-workbench | 2d154addff58d26f5459a73c06148aaf5e9fad46 | [
"Apache-2.0"
] | 24 | 2021-03-19T23:16:23.000Z | 2022-03-04T01:05:18.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Optional
import aws_orbit
from aws_orbit.plugins import hooks
from aws_orbit.remote_files import helm
if TYPE_CHECKING:
from aws_orbit.models.context import Context, TeamContext
_logger: logging.Logger = logging.getLogger("aws_orbit")
CHART_PATH = os.path.join(os.path.dirname(__file__))
| 37.833333 | 116 | 0.701385 |
91848acd7c9a76b40212893d24a66f1267e0b221 | 4,316 | py | Python | tools/generate_driver_list.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-08-25T13:11:18.000Z | 2020-10-15T11:29:20.000Z | tools/generate_driver_list.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 5 | 2018-01-25T11:31:56.000Z | 2019-05-06T23:13:35.000Z | tools/generate_driver_list.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-02-20T18:48:24.000Z | 2021-01-30T20:26:18.000Z | #! /usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate list of cinder drivers"""
import argparse
import os
from cinder.interface import util
parser = argparse.ArgumentParser(prog="generate_driver_list")
parser.add_argument("--format", default='str', choices=['str', 'dict'],
help="Output format type")
# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')
CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info
if __name__ == '__main__':
main()
| 30.609929 | 78 | 0.621177 |
9184ffff91bd0e91c571446c2eb2a2d6fb77ed63 | 126 | py | Python | Disp_pythonScript.py | maniegley/python | 0e3a98cbff910cc78b2c0386a9cca6c5bb20eefc | [
"MIT"
] | 1 | 2019-05-04T03:20:44.000Z | 2019-05-04T03:20:44.000Z | Disp_pythonScript.py | maniegley/python | 0e3a98cbff910cc78b2c0386a9cca6c5bb20eefc | [
"MIT"
] | null | null | null | Disp_pythonScript.py | maniegley/python | 0e3a98cbff910cc78b2c0386a9cca6c5bb20eefc | [
"MIT"
] | null | null | null | import sys
f = open("/home/vader/Desktop/test.py", "r")
#read all file
python_script = f.read()
print(python_script)
| 15.75 | 44 | 0.666667 |
9185566c87d7284eaa28e018591be112687ee8a6 | 2,001 | py | Python | email_file.py | grussr/email-file-attachment | afa65b679b3c88b419643e216b9942fdefeaf9fc | [
"MIT"
] | null | null | null | email_file.py | grussr/email-file-attachment | afa65b679b3c88b419643e216b9942fdefeaf9fc | [
"MIT"
] | null | null | null | email_file.py | grussr/email-file-attachment | afa65b679b3c88b419643e216b9942fdefeaf9fc | [
"MIT"
] | null | null | null | import smtplib
import argparse
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import configparser
import json
parser = argparse.ArgumentParser()
parser.add_argument('attachment')
args = parser.parse_args()
attachpath = args.attachment
config = configparser.ConfigParser()
config.read('email_file.ini')
email_from = config['DEFAULT']['From']
email_to_list = json.loads(config['DEFAULT']['To'])
email_subject = config['DEFAULT']['Subject']
email_body = config['DEFAULT']['Body']
email_server = config['DEFAULT']['Server']
email_server_ssl = bool(config['DEFAULT']['Server_SSL'])
email_server_username = config['DEFAULT']['Server_Username']
email_server_password = config['DEFAULT']['Server_Password']
send_mail(email_from, email_to_list, email_subject, email_body, [attachpath], email_server, email_server_ssl, email_server_username, email_server_password)
| 32.274194 | 155 | 0.696152 |
91860dad187d68b19d0b7553594210d867a8ccc4 | 70 | py | Python | logs/constants.py | gonzatorte/sw-utils | 767ec4aa8cbe1e0143f601482024ba1d9b76da64 | [
"MIT"
] | null | null | null | logs/constants.py | gonzatorte/sw-utils | 767ec4aa8cbe1e0143f601482024ba1d9b76da64 | [
"MIT"
] | null | null | null | logs/constants.py | gonzatorte/sw-utils | 767ec4aa8cbe1e0143f601482024ba1d9b76da64 | [
"MIT"
] | null | null | null | import logging
TRACE_LVL = int( (logging.DEBUG + logging.INFO) / 2 )
| 17.5 | 53 | 0.7 |
9186884237c62f08e8e5c91cdb86f2cf165aa0f6 | 173 | py | Python | examples/simple_lakehouse/simple_lakehouse/repo.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 2 | 2021-06-21T17:50:26.000Z | 2021-06-21T19:14:23.000Z | examples/simple_lakehouse/simple_lakehouse/repo.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 1 | 2021-06-21T18:30:02.000Z | 2021-06-25T21:18:39.000Z | examples/simple_lakehouse/simple_lakehouse/repo.py | dbatten5/dagster | d76e50295054ffe5a72f9b292ef57febae499528 | [
"Apache-2.0"
] | 1 | 2021-08-18T17:21:57.000Z | 2021-08-18T17:21:57.000Z | from dagster import repository
from simple_lakehouse.pipelines import simple_lakehouse_pipeline
| 21.625 | 64 | 0.849711 |
9186f6c899c8a19e537fae60a274b21c711b183a | 7,649 | py | Python | demos/odyssey/dodyssey.py | steingabelgaard/reportlab | b9a537e8386fb4b4b80e9ec89e0cdf392dbd6f61 | [
"BSD-3-Clause"
] | 55 | 2019-09-21T02:45:18.000Z | 2021-12-10T13:38:51.000Z | demos/odyssey/dodyssey.py | cnauroth/reportlab | 377d4ff58491dc6de48551e730c3d7f72db783e5 | [
"BSD-3-Clause"
] | 4 | 2019-09-26T03:16:50.000Z | 2021-12-10T13:40:49.000Z | demos/odyssey/dodyssey.py | cnauroth/reportlab | 377d4ff58491dc6de48551e730c3d7f72db783e5 | [
"BSD-3-Clause"
] | 26 | 2019-09-25T03:54:30.000Z | 2022-03-21T14:03:12.000Z | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
chNum = 0
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
firstPre = 1
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
| 31.093496 | 104 | 0.587397 |
9187649de93ea28a41bff761a58a3a5d39922848 | 764 | py | Python | tests/test_fred_fred_view.py | Traceabl3/GamestonkTerminal | 922353cade542ce3f62701e10d816852805b9386 | [
"MIT"
] | null | null | null | tests/test_fred_fred_view.py | Traceabl3/GamestonkTerminal | 922353cade542ce3f62701e10d816852805b9386 | [
"MIT"
] | null | null | null | tests/test_fred_fred_view.py | Traceabl3/GamestonkTerminal | 922353cade542ce3f62701e10d816852805b9386 | [
"MIT"
] | null | null | null | """ econ/fred_view.py tests """
import unittest
from unittest import mock
from io import StringIO
import pandas as pd
# pylint: disable=unused-import
from gamestonk_terminal.econ.fred_view import get_fred_data # noqa: F401
fred_data_mock = """
,GDP
2019-01-01,21115.309
2019-04-01,21329.877
2019-07-01,21540.325
2019-10-01,21747.394
2020-01-01,21561.139
2020-04-01,19520.114
2020-07-01,21170.252
2020-10-01,21494.731
"""
| 24.645161 | 80 | 0.747382 |
9187aae337945bbf532915814ef30a4e08766d0c | 10,938 | py | Python | python27/1.0/lib/linux/gevent/pool.py | jt6562/XX-Net | 7b78e4820a3c78c3ba3e75b3917129d17f00e9fc | [
"BSD-2-Clause"
] | 2 | 2017-04-24T03:04:45.000Z | 2017-09-19T03:38:37.000Z | python27/1.0/lib/linux/gevent/pool.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | null | null | null | python27/1.0/lib/linux/gevent/pool.py | TDUncle/XX-Net | 24b2af60dc0abc1c26211813064bb14c1e22bac8 | [
"BSD-2-Clause"
] | 1 | 2019-04-19T09:11:54.000Z | 2019-04-19T09:11:54.000Z | # Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
"""Managing greenlets in a group.
The :class:`Group` class in this module abstracts a group of running greenlets.
When a greenlet dies, it's automatically removed from the group.
The :class:`Pool` which a subclass of :class:`Group` provides a way to limit
concurrency: its :meth:`spawn <Pool.spawn>` method blocks if the number of
greenlets in the pool has already reached the limit, until there is a free slot.
"""
from gevent.hub import GreenletExit, getcurrent
from gevent.greenlet import joinall, Greenlet
from gevent.timeout import Timeout
from gevent.event import Event
from gevent.coros import Semaphore, DummySemaphore
__all__ = ['Group', 'Pool']
def GreenletSet(*args, **kwargs):
import warnings
warnings.warn("gevent.pool.GreenletSet was renamed to gevent.pool.Group since version 0.13.0", DeprecationWarning, stacklevel=2)
return Group(*args, **kwargs)
| 31.162393 | 132 | 0.598555 |
9187b814b570a612e2b93ab230ce46d039efd3f1 | 4,974 | py | Python | lecarb/estimator/lw/lw_tree.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 34 | 2020-12-14T01:21:29.000Z | 2022-03-29T04:52:46.000Z | lecarb/estimator/lw/lw_tree.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 5 | 2020-12-28T16:06:22.000Z | 2022-01-19T18:28:53.000Z | lecarb/estimator/lw/lw_tree.py | anshumandutt/AreCELearnedYet | e2286c3621dea8e4961057b6197c1e14e75aea5a | [
"MIT"
] | 12 | 2021-02-08T17:50:13.000Z | 2022-03-28T11:09:06.000Z | import time
import logging
from typing import Dict, Any, Tuple
import pickle
import numpy as np
import xgboost as xgb
from .common import load_lw_dataset, encode_query, decode_label
from ..postgres import Postgres
from ..estimator import Estimator
from ..utils import evaluate, run_test
from ...dataset.dataset import load_table
from ...workload.workload import Query
from ...constants import MODEL_ROOT, NUM_THREADS, PKL_PROTO
L = logging.getLogger(__name__)
def test_lw_tree(dataset: str, version: str, workload: str, params: Dict[str, Any], overwrite: bool) -> None:
"""
params:
model: model file name
use_cache: load processed vectors directly instead of build from queries
"""
# uniform thread number
model_file = MODEL_ROOT / dataset / f"{params['model']}.pkl"
L.info(f"Load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load corresonding version of table
table = load_table(dataset, state['version'])
# load model
args = state['args']
model = state['model']
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, params['model'], pg_est, table)
L.info(f"Load and built lw(tree) estimator: {estimator}")
if params['use_cache']:
# test table might has different version with train
test_table = load_table(dataset, version)
lw_dataset = load_lw_dataset(test_table, workload, state['seed'], args.bins)
X, _, gt = lw_dataset['test']
run_test(dataset, version, workload, estimator, overwrite, lw_vec=(X, gt))
else:
run_test(dataset, version, workload, estimator, overwrite)
| 34.783217 | 130 | 0.65963 |
9187ef6ed78f1f18095fecd6ea3ce015376d4dfc | 2,525 | py | Python | fsim/utils.py | yamasampo/fsim | 30100789b03981dd9ea11c5c2e17a3c53910f724 | [
"MIT"
] | null | null | null | fsim/utils.py | yamasampo/fsim | 30100789b03981dd9ea11c5c2e17a3c53910f724 | [
"MIT"
] | null | null | null | fsim/utils.py | yamasampo/fsim | 30100789b03981dd9ea11c5c2e17a3c53910f724 | [
"MIT"
] | null | null | null |
import os
import configparser
from warnings import warn
def write_info_to_file(file_handle, separator, *args, **kw_args):
""" Write arguments or keyword arguments to a file. Values will be
separated by a given separator.
"""
output_lines = []
if len(args) > 0:
output_lines.append(separator.join(args))
if len(kw_args) > 0:
for k, v in kw_args.items():
output_lines.append(f'{k}{separator}{v}')
print('\n'.join(output_lines), file=file_handle)
| 32.371795 | 98 | 0.613861 |
918946b8867e4746cc6439a71e8ab2ad6d7dc6a7 | 2,950 | py | Python | src/pymortests/function.py | mahgadalla/pymor | ee2806b4c93748e716294c42454d611415da7b5e | [
"Unlicense"
] | 1 | 2021-07-26T12:58:50.000Z | 2021-07-26T12:58:50.000Z | src/pymortests/function.py | mahgadalla/pymor | ee2806b4c93748e716294c42454d611415da7b5e | [
"Unlicense"
] | null | null | null | src/pymortests/function.py | mahgadalla/pymor | ee2806b4c93748e716294c42454d611415da7b5e | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.core.pickle import dumps, loads
from pymor.functions.basic import ConstantFunction, GenericFunction
from pymortests.fixtures.function import function, picklable_function, function_argument
from pymortests.fixtures.parameter import parameters_of_type
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
# monkey np.testing.assert_allclose to behave the same as np.allclose
# for some reason, the default atol of np.testing.assert_allclose is 0
# while it is 1e-8 for np.allclose
real_assert_allclose = np.testing.assert_allclose
np.testing.assert_allclose = monkey_allclose
| 38.815789 | 108 | 0.671186 |
918a293306bf241e1a965c6b6c86f2b524157237 | 4,603 | py | Python | Code/userIDCrawler.py | CarberZ/social-media-mining | 41aee64a41244a0692987b75b30dedbd0552be49 | [
"MIT"
] | 2 | 2018-10-16T23:09:00.000Z | 2018-11-14T04:08:00.000Z | Code/userIDCrawler.py | CarberZ/social-media-mining | 41aee64a41244a0692987b75b30dedbd0552be49 | [
"MIT"
] | 1 | 2018-11-14T04:06:13.000Z | 2018-11-14T04:15:56.000Z | Code/userIDCrawler.py | CarberZ/social-media-mining | 41aee64a41244a0692987b75b30dedbd0552be49 | [
"MIT"
] | 1 | 2018-11-14T04:06:31.000Z | 2018-11-14T04:06:31.000Z |
'''
step 1
get the userID and their locations
put them all into a database
'''
from bs4 import BeautifulSoup
import urllib
import sqlite3
from selenium import webdriver
import time
import re
from urllib import request
import random
import pickle
import os
import pytesseract
url_dog = "https://www.douban.com/group/lovelydog/members?start="
url_cat = "https://www.douban.com/group/cat/members?start="
'''
cat = 1 ~ 336770
dog = 1 ~ 156240
'''
# info_dog = getInfo("dog")
# info_dog.crawler()
info_cat = getInfo("cat")
info_cat.crawler()
'''
create table CatPeople
as
select distinct *
from CatPeople_backup
WHERE not location GLOB '*[A-Za-z]*';
pre-processing to delete locations out of China
'''
| 30.483444 | 138 | 0.550728 |
918a3b0f516ea68dd89954d9a42756ad875c22c6 | 33 | py | Python | src/stoat/core/structure/__init__.py | saarkatz/guppy-struct | b9099353312c365cfd788dbd2d168a9c844765be | [
"Apache-2.0"
] | 1 | 2021-12-07T11:59:11.000Z | 2021-12-07T11:59:11.000Z | src/stoat/core/structure/__init__.py | saarkatz/stoat-struct | b9099353312c365cfd788dbd2d168a9c844765be | [
"Apache-2.0"
] | null | null | null | src/stoat/core/structure/__init__.py | saarkatz/stoat-struct | b9099353312c365cfd788dbd2d168a9c844765be | [
"Apache-2.0"
] | null | null | null | from .structure import Structure
| 16.5 | 32 | 0.848485 |
918a81c6af8725a4b95ff16551cc06a18c633a21 | 709 | py | Python | tbase/network/polices_test.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 16 | 2020-03-19T15:12:28.000Z | 2021-12-20T06:02:32.000Z | tbase/network/polices_test.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 14 | 2020-03-23T03:57:00.000Z | 2021-12-20T05:53:33.000Z | tbase/network/polices_test.py | iminders/TradeBaselines | 26eb87f2bcd5f6ff479149219b38b17002be6a40 | [
"MIT"
] | 7 | 2020-03-25T00:30:18.000Z | 2021-01-31T18:45:09.000Z | import unittest
import numpy as np
from tbase.common.cmd_util import set_global_seeds
from tbase.network.polices import RandomPolicy
if __name__ == '__main__':
unittest.main()
| 25.321429 | 65 | 0.671368 |
918a8725328fa6920f55c21e0bb7c5f7406c3135 | 36,887 | py | Python | keystone/tests/unit/core.py | knikolla/keystone | 50f0a50cf4d52d3f61b64713bd4faa7a4626ae53 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/core.py | knikolla/keystone | 50f0a50cf4d52d3f61b64713bd4faa7a4626ae53 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/core.py | knikolla/keystone | 50f0a50cf4d52d3f61b64713bd4faa7a4626ae53 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import base64
import contextlib
import datetime
import functools
import hashlib
import json
import ldap
import os
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
import flask
from flask import testing as flask_testing
import http.client
from oslo_config import fixture as config_fixture
from oslo_context import context as oslo_context
from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy import exc
import testtools
from testtools import testcase
import keystone.api
from keystone.common import context
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.identity.backends.ldap import common as ks_ldap
from keystone import notifications
from keystone.resource.backends import base as resource_base
from keystone.server.flask import application as flask_app
from keystone.server.flask import core as keystone_flask
from keystone.tests.unit import ksfixtures
keystone.conf.configure()
keystone.conf.set_config_defaults()
PID = str(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
TMPDIR = _calc_tmpdir()
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
log.register_options(CONF)
IN_MEM_DB_CONN_STRING = 'sqlite://'
# Strictly matches ISO 8601 timestamps with subsecond precision like:
# 2016-06-28T20:48:56.000000Z
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
TIME_FORMAT_REGEX = r'^\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d{6}Z$'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
def skip_if_cache_disabled(*sections):
"""Skip a test if caching is disabled, this is a decorator.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the caching enabled if `enabled` option in the `cache`
section of the configuration is true.
"""
return wrapper
def skip_if_cache_is_enabled(*sections):
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
return wrapper
NEEDS_REGION_ID = object()
def new_endpoint_ref_with_region(service_id, region, interface='public',
**kwargs):
"""Define an endpoint_ref having a pre-3.2 form.
Contains the deprecated 'region' instead of 'region_id'.
"""
ref = new_endpoint_ref(service_id, interface, region=region,
region_id='invalid', **kwargs)
del ref['region_id']
return ref
def create_user(api, domain_id, **kwargs):
"""Create a user via the API. Keep the created password.
The password is saved and restored when api.create_user() is called.
Only use this routine if there is a requirement for the user object to
have a valid password after api.create_user() is called.
"""
user = new_user_ref(domain_id=domain_id, **kwargs)
password = user['password']
user = api.create_user(user)
user['password'] = password
return user
def _assert_expected_status(f):
"""Add `expected_status_code` as an argument to the test_client methods.
`expected_status_code` must be passed as a kwarg.
"""
TEAPOT_HTTP_STATUS = 418
_default_expected_responses = {
'get': http.client.OK,
'head': http.client.OK,
'post': http.client.CREATED,
'put': http.client.NO_CONTENT,
'patch': http.client.OK,
'delete': http.client.NO_CONTENT,
}
return inner
| 34.217996 | 79 | 0.619676 |
918dd351f71913e5bfee0b534327c85070c34d0b | 17,327 | py | Python | PyISY/Nodes/__init__.py | sneelco/PyISY | f1f916cd7951b1b6a5235bb36444c695fe3294e1 | [
"Apache-2.0"
] | null | null | null | PyISY/Nodes/__init__.py | sneelco/PyISY | f1f916cd7951b1b6a5235bb36444c695fe3294e1 | [
"Apache-2.0"
] | null | null | null | PyISY/Nodes/__init__.py | sneelco/PyISY | f1f916cd7951b1b6a5235bb36444c695fe3294e1 | [
"Apache-2.0"
] | null | null | null |
from .group import Group
from .node import (Node, parse_xml_properties, ATTR_ID)
from time import sleep
from xml.dom import minidom
| 36.324948 | 100 | 0.48612 |
918e36c7c2d321203012c2cecdfb70b87e94940f | 1,329 | py | Python | easyCore/Utils/Logging.py | easyScience/easyCore | 5d16d5b27803277d0c44886f94dab599f764ae0b | [
"BSD-3-Clause"
] | 2 | 2021-11-02T10:22:45.000Z | 2022-02-18T23:41:19.000Z | easyCore/Utils/Logging.py | easyScience/easyCore | 5d16d5b27803277d0c44886f94dab599f764ae0b | [
"BSD-3-Clause"
] | 114 | 2020-06-30T08:52:27.000Z | 2022-03-30T20:47:56.000Z | easyCore/Utils/Logging.py | easyScience/easyCore | 5d16d5b27803277d0c44886f94dab599f764ae0b | [
"BSD-3-Clause"
] | 1 | 2022-03-04T13:01:09.000Z | 2022-03-04T13:01:09.000Z | # SPDX-FileCopyrightText: 2021 easyCore contributors <core@easyscience.software>
# SPDX-License-Identifier: BSD-3-Clause
# 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import logging
| 36.916667 | 91 | 0.645598 |
91903bbb82369647bc8ec6646143a89d378edc88 | 234 | py | Python | iqoptionapi/http/billing.py | mustx1/MYIQ | 3afb597aa8a8abc278b7d70dad46af81789eae3e | [
"MIT"
] | 3 | 2021-06-05T06:58:01.000Z | 2021-11-25T23:52:18.000Z | iqoptionapi/http/billing.py | mustx1/MYIQ | 3afb597aa8a8abc278b7d70dad46af81789eae3e | [
"MIT"
] | 5 | 2022-01-20T00:32:49.000Z | 2022-02-16T23:12:10.000Z | iqoptionapi/http/billing.py | mustx1/MYIQ | 3afb597aa8a8abc278b7d70dad46af81789eae3e | [
"MIT"
] | 2 | 2020-11-10T19:03:38.000Z | 2020-12-07T10:42:36.000Z | """Module for IQ option billing resource."""
from iqoptionapi.http.resource import Resource
| 21.272727 | 47 | 0.709402 |
919092189581e9b39163223362020fad3bbd08e7 | 3,416 | py | Python | defaultsob/core.py | honewatson/defaults | c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81 | [
"0BSD"
] | null | null | null | defaultsob/core.py | honewatson/defaults | c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81 | [
"0BSD"
] | null | null | null | defaultsob/core.py | honewatson/defaults | c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
def ordered_set(iter):
"""Creates an ordered set
@param iter: list or tuple
@return: list with unique values
"""
final = []
for i in iter:
if i not in final:
final.append(i)
return final
def class_slots(ob):
"""Get object attributes from child class attributes
@param ob: Defaults object
@type ob: Defaults
@return: Tuple of slots
"""
current_class = type(ob).__mro__[0]
if not getattr(current_class, 'allslots', None) \
and current_class != object:
_allslots = [list(getattr(cls, '__slots__', []))
for cls in type(ob).__mro__]
_fslots = []
for slot in _allslots:
_fslots = _fslots + slot
current_class.allslots = tuple(ordered_set(_fslots))
return current_class.allslots
def usef(attr):
"""Use another value as default
@param attr: the name of the attribute to
use as alternative value
@return: value of alternative attribute
"""
return use_if_none_cls(attr)
use_name_if_none = usef('Name')
def choose_alt(attr, ob, kwargs):
"""If the declared class attribute of ob is callable
then use that callable to get a default ob
instance value if a value is not available in kwargs.
@param attr: ob class attribute name
@param ob: the object instance whose default value needs to be set
@param kwargs: the kwargs values passed to the ob __init__ method
@return: value to be used to set ob instance
"""
result = ob.__class__.__dict__.get(attr, None)
if type(result).__name__ == "member_descriptor":
result = None
elif callable(result):
result = result(attr, ob, kwargs)
return result
| 27.772358 | 77 | 0.624415 |
9190a55060e46f0f4d728a8eb6583235a5fc4dcf | 3,140 | py | Python | tests/bot_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 36 | 2017-06-12T01:09:46.000Z | 2021-01-31T17:57:41.000Z | tests/bot_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 145 | 2017-06-21T13:31:29.000Z | 2021-06-20T01:01:30.000Z | tests/bot_test.py | item4/yui | 8628d0d54b94ada3cbe7d1b0f624063258bad10a | [
"MIT"
] | 21 | 2017-07-24T15:53:19.000Z | 2021-12-23T04:18:31.000Z | import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
| 27.787611 | 75 | 0.605414 |
9190bf228865d048848fd87f601781ac36e5057a | 2,901 | py | Python | scripts/marker_filter.py | CesMak/aruco_detector_ocv | bb45e39664247779cbbbc8d37b89c4556b4984d6 | [
"BSD-3-Clause"
] | 12 | 2019-03-12T08:47:07.000Z | 2022-02-09T03:59:39.000Z | scripts/marker_filter.py | vprooks/simple_aruco_detector | 40cb7354d7da67028c91b4c4652e8c4a1d2abbbb | [
"MIT"
] | 3 | 2020-07-02T04:25:10.000Z | 2021-08-31T15:56:13.000Z | scripts/marker_filter.py | CesMak/aruco_detector_ocv | bb45e39664247779cbbbc8d37b89c4556b4984d6 | [
"BSD-3-Clause"
] | 11 | 2019-10-25T17:36:44.000Z | 2022-02-16T17:12:38.000Z | #!/usr/bin/env python
import numpy as np
import rospy
import geometry_msgs.msg
import tf2_ros
from tf.transformations import quaternion_slerp
if __name__ == '__main__':
rospy.init_node('marker_filter')
alpha = rospy.get_param('~alpha', 0.9)
parent_frame_id = rospy.get_param('~parent_frame_id', 'kinect2_link')
marker_id = rospy.get_param('~marker_id', 'marker_id0')
marker_filtered_id = rospy.get_param(
'~marker_filtered_id', 'marker_id0_filtered')
rate_value = rospy.get_param('~rate_value', 125)
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
br = tf2_ros.TransformBroadcaster()
marker_pose = None
marker_pose0 = None
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
marker_pose0 = marker_pose
# Lookup the transform
try:
marker_pose_new = tfBuffer.lookup_transform(
parent_frame_id, marker_id, rospy.Time())
if not marker_pose_new is None:
marker_pose = marker_pose_new
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
rospy.logwarn(e)
if marker_pose is None:
rate.sleep()
continue
# Apply running average filter to translation and rotation
if not marker_pose0 is None:
rotation0 = quaternion_to_numpy(marker_pose0.transform.rotation)
rotation = quaternion_to_numpy(marker_pose.transform.rotation)
rotation_interpolated = quaternion_slerp(
rotation0, rotation, 1 - alpha)
translation0 = translation_to_numpy(
marker_pose0.transform.translation)
translation = translation_to_numpy(
marker_pose.transform.translation)
translation = alpha * translation0 + (1 - alpha) * translation
# Update pose of the marker
marker_pose.transform.rotation.x = rotation_interpolated[0]
marker_pose.transform.rotation.y = rotation_interpolated[1]
marker_pose.transform.rotation.z = rotation_interpolated[2]
marker_pose.transform.rotation.w = rotation_interpolated[3]
marker_pose.transform.translation.x = translation[0]
marker_pose.transform.translation.y = translation[1]
marker_pose.transform.translation.z = translation[2]
# Create new transform and broadcast it
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame_id
t.child_frame_id = marker_filtered_id
t.transform = marker_pose.transform
br.sendTransform(t)
rate.sleep()
| 36.2625 | 109 | 0.666322 |
9190f1884667aaeb95f3ee0745ae12dfce3341d8 | 3,713 | py | Python | src/backbone/utils.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | 20 | 2021-11-26T18:05:30.000Z | 2022-02-15T12:21:10.000Z | src/backbone/utils.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | null | null | null | src/backbone/utils.py | hankyul2/FaceDA | 73006327df3668923d4206f81d4976ca1240329d | [
"Apache-2.0"
] | 1 | 2022-02-15T12:21:17.000Z | 2022-02-15T12:21:17.000Z | import os
import subprocess
from pathlib import Path
from torch.hub import load_state_dict_from_url
import numpy as np
model_urls = {
# ResNet
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
# MobileNetV2
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
# Se ResNet
'seresnet18': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
'seresnet34': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth',
'seresnet50': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth',
'seresnet101': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth',
'seresnet152': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth',
'seresnext50_32x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
# ViT
'vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_16.npz',
'vit_base_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_32.npz',
'vit_large_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_16.npz',
'vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_32.npz',
# Hybrid (resnet50 + ViT)
'r50_vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-B_16.npz',
'r50_vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-L_32.npz',
}
| 50.175676 | 127 | 0.720442 |
91918e0b4360daa841c2dd658213e7f9249510fa | 702 | py | Python | crawler1.py | pjha1994/Scrape_reddit | 2a00a83854085e09f0cf53aef81969025876039b | [
"Apache-2.0"
] | null | null | null | crawler1.py | pjha1994/Scrape_reddit | 2a00a83854085e09f0cf53aef81969025876039b | [
"Apache-2.0"
] | null | null | null | crawler1.py | pjha1994/Scrape_reddit | 2a00a83854085e09f0cf53aef81969025876039b | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
links = getLinks("http://www.reddit.com/")
print(links) | 27 | 62 | 0.602564 |
9191a318c08b49c9339f1e4504f721d3f2d1d83b | 2,428 | py | Python | chime2/tests/normal/models/seir_test.py | BrianThomasRoss/CHIME-2 | f084ab552fac5e50841a922293b74d653450790b | [
"BSD-3-Clause"
] | null | null | null | chime2/tests/normal/models/seir_test.py | BrianThomasRoss/CHIME-2 | f084ab552fac5e50841a922293b74d653450790b | [
"BSD-3-Clause"
] | null | null | null | chime2/tests/normal/models/seir_test.py | BrianThomasRoss/CHIME-2 | f084ab552fac5e50841a922293b74d653450790b | [
"BSD-3-Clause"
] | 1 | 2020-11-19T23:08:52.000Z | 2020-11-19T23:08:52.000Z | """Tests for SEIR model in this repo
* Compares conserved quantities
* Compares model against SEIR wo social policies in limit to SIR
"""
from pandas import Series
from pandas.testing import assert_frame_equal, assert_series_equal
from bayes_chime.normal.models import SEIRModel, SIRModel
from pytest import fixture
from tests.normal.models.sir_test import ( # pylint: disable=W0611
fixture_penn_chime_raw_df_no_policy,
fixture_penn_chime_setup,
fixture_sir_data_wo_policy,
)
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
# Does not compare census as this repo uses the exponential distribution
]
PENN_CHIME_COMMIT = "188c35be9561164bedded4a8071a320cbde0d2bc"
def test_conserved_n(seir_data):
"""Checks if S + E + I + R is conserved for SEIR
"""
x, pars = seir_data
n_total = 0
for key in SEIRModel.compartments:
n_total += pars[f"initial_{key}"]
seir_model = SEIRModel()
predictions = seir_model.propagate_uncertainties(x, pars)
n_computed = predictions[SEIRModel.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
)
| 28.232558 | 81 | 0.710461 |
91928996da1f5de4298b9395563c76e7f7e3542f | 4,681 | py | Python | Libraries/mattsLibraries/mathOperations.py | mrware91/PhilTransA-TRXS-Limits | 5592c6c66276cd493d10f066aa636aaf600d3a00 | [
"MIT"
] | null | null | null | Libraries/mattsLibraries/mathOperations.py | mrware91/PhilTransA-TRXS-Limits | 5592c6c66276cd493d10f066aa636aaf600d3a00 | [
"MIT"
] | 2 | 2018-06-19T00:01:27.000Z | 2018-10-16T18:33:24.000Z | Libraries/mattsLibraries/mathOperations.py | mrware91/PhilTransA-TRXS-Limits | 5592c6c66276cd493d10f066aa636aaf600d3a00 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.interpolate import interp1d
from pyTools import *
################################################################################
#~~~~~~~~~Log ops
################################################################################
################################################################################
#~~~~~~~~~Symmeterize data
################################################################################
################################################################################
#~~~~~~~~~3D Shapes
################################################################################
################################################################################
#~~~~~~~~~2D Shapes
################################################################################
################################################################################
#~~~~~~~~~Rotations
################################################################################
| 32.734266 | 129 | 0.470626 |
9192d6d1ce77aea0159f3db895468368ec72c08a | 592 | py | Python | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 20 | 2018-12-24T15:40:49.000Z | 2022-01-10T18:58:41.000Z | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 10 | 2018-08-17T02:01:45.000Z | 2021-01-08T23:34:59.000Z | setup.py | avryhof/ambient_api | 08194b5d8626801f2c2c7369adacb15eace54802 | [
"MIT"
] | 14 | 2018-06-13T23:40:12.000Z | 2022-01-05T06:34:13.000Z | from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="Amos Vryhof",
author_email="amos@vryhofresearch.com",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| 29.6 | 72 | 0.640203 |
9192df0712738e90f6f197873c3a465c79101722 | 585 | py | Python | tests/llvm/static/test_main_is_found/test_main_is_found.py | ganeshutah/FPChecker | 53a471429762ace13f69733cb2f8b7227fc15b9f | [
"Apache-2.0"
] | 19 | 2019-09-28T16:15:45.000Z | 2022-02-15T15:11:28.000Z | tests/llvm/static/test_main_is_found/test_main_is_found.py | tanmaytirpankar/FPChecker | d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf | [
"Apache-2.0"
] | 16 | 2020-02-01T18:43:00.000Z | 2021-12-22T14:47:39.000Z | tests/llvm/static/test_main_is_found/test_main_is_found.py | tanmaytirpankar/FPChecker | d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf | [
"Apache-2.0"
] | 5 | 2020-07-27T18:15:36.000Z | 2021-11-01T18:43:34.000Z | #!/usr/bin/env python
import subprocess
import os
| 22.5 | 82 | 0.666667 |
91936b7f0195e57ee35ddf84cdb73c2bef559977 | 745 | py | Python | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | from collections import deque
| 23.28125 | 47 | 0.436242 |
91941908fbc07382f07b7bc44926ab4220545f9d | 947 | py | Python | src/routes/web.py | enflo/weather-flask | c4d905e1f557b4c9b39d0a578fdbb6fefc839028 | [
"Apache-2.0"
] | null | null | null | src/routes/web.py | enflo/weather-flask | c4d905e1f557b4c9b39d0a578fdbb6fefc839028 | [
"Apache-2.0"
] | null | null | null | src/routes/web.py | enflo/weather-flask | c4d905e1f557b4c9b39d0a578fdbb6fefc839028 | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, render_template
from gateways.models import getWeatherData
web = Blueprint("web", __name__, template_folder='templates')
#@web.route("/profile", methods=['GET'])
#def profile():
# items = getWeatherData.get_last_item()
# return render_template("profile.html",
# celcius=items["temperature"],
# humidity=items["humidity"],
# pressure=items["pressure"])
#@web.route("/about", methods=['GET'])
#def about():
# return render_template("about.html")
| 32.655172 | 61 | 0.564942 |
9195f3cdb36a82835721ebe4e4fc6cc7220eecc8 | 677 | py | Python | changes/buildsteps/lxc.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | changes/buildsteps/lxc.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | changes/buildsteps/lxc.py | bowlofstew/changes | ebd393520e0fdb07c240a8d4e8747281b6186e28 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from changes.buildsteps.default import DefaultBuildStep
| 27.08 | 75 | 0.680945 |
91979003f9cb74dc9f591b8277facbe005dfd825 | 532 | py | Python | swapidemo1.py | anvytran-dev/mycode | 3753c19828f0ecc506a6450bb6b71b4a5d651e5f | [
"MIT"
] | null | null | null | swapidemo1.py | anvytran-dev/mycode | 3753c19828f0ecc506a6450bb6b71b4a5d651e5f | [
"MIT"
] | null | null | null | swapidemo1.py | anvytran-dev/mycode | 3753c19828f0ecc506a6450bb6b71b4a5d651e5f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Star Wars API HTTP response parsing"""
# requests is used to send HTTP requests (get it?)
import requests
URL= "https://swapi.dev/api/people/1"
def main():
"""sending GET request, checking response"""
# SWAPI response is stored in "resp" object
resp= requests.get(URL)
# what kind of python object is "resp"?
print("This object class is:", type(resp), "\n")
# what can we do with it?
print("Methods/Attributes include:", dir(resp))
if __name__ == "__main__":
main()
| 22.166667 | 52 | 0.654135 |
9197f982af32fc988794515b093dd5bf984c98a5 | 4,132 | py | Python | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 2 | 2022-01-14T05:02:04.000Z | 2022-03-02T10:42:59.000Z | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 35 | 2021-11-01T08:59:02.000Z | 2021-11-19T16:47:17.000Z | src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 4710da529d85b588ea249f6e2b4f4cac132bb34f | [
"MIT"
] | 1 | 2022-03-16T07:11:00.000Z | 2022-03-16T07:11:00.000Z | import json
schema = {
"Spartina": {
"ColStart": "2000-04-01",
"ColEnd": "2000-05-31",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.05,
"initial shoot length": 0.015,
"initial diameter": 0.003,
"start growth period": "2000-04-01",
"end growth period": "2000-10-31",
"start winter period": "2000-11-30",
"maximum plant height": [0.8, 1.3],
"maximum diameter": [0.003, 0.005],
"maximum root length": [0.2, 1],
"maximum years in LifeStage": [1, 19],
"numStem": [700, 700], # 3.5. number of stems per m2
"iniCol_frac": 0.6, # 3.6. initial colonization fraction (0-1)
"Cd": [1.1, 1.15], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.4, 0.4], # 3.11. flooding mortality threshold
"floMort_slope": [0.25, 0.25], # 3.12. flooding mortality slope
"vel_thres": [0.15, 0.25], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.4, 0.4], # 3.15 max height during winter time
},
"Salicornia": {
"ColStart": "2000-02-15",
"ColEnd": "2000-04-30",
"random": 20,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 1,
"Number LifeStages": 1,
"initial root length": 0.15,
"initial shoot length": 0.05,
"initial diameter": 0.01,
"start growth period": "2000-02-15",
"end growth period": "2000-10-15",
"start winter period": "2000-11-01",
"maximum plant height": [0.4, 0],
"maximum diameter": [0.015, 0],
"maximum root length": [0.05, 0],
"maximum years in LifeStage": [1, 0],
"numStem": [190, 0], # 3.5. number of stems per m2
"iniCol_frac": 0.2, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0], # 3.7. drag coefficient
"desMort_thres": [400, 1], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 1], # 3.10. dessication mortality slope
"floMort_thres": [0.5, 1], # 3.11. flooding mortality threshold
"floMort_slope": [0.12, 1], # 3.12. flooding mortality slope
"vel_thres": [0.15, 1], # 3.13. flow velocity threshold
"vel_slope": [3, 1], # 3.14. flow velocity slope
"maxH_winter": [0.0, 0.0], # 3.15 max height during winter time
},
"Puccinellia": {
"ColStart": "2000-03-01",
"ColEnd": "2000-04-30",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.02,
"initial shoot length": 0.05,
"initial diameter": 0.004,
"start growth period": "2000-03-01",
"end growth period": "2000-11-15",
"start winter period": "2000-11-30",
"maximum plant height": [0.2, 0.35],
"maximum diameter": [0.004, 0.005],
"maximum root length": [0.15, 0.15],
"maximum years in LifeStage": [1, 19],
"numStem": [6500, 6500], # 3.5. number of stems per m2
"iniCol_frac": 0.3, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0.7], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.35, 0.35], # 3.11. flooding mortality threshold
"floMort_slope": [0.4, 0.4], # 3.12. flooding mortality slope
"vel_thres": [0.25, 0.5], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.2, 0.2], # 3.15 max height during winter time
},
}
with open("constants_veg.json", "w") as write_file:
json.dump(schema, write_file, indent=4)
| 43.494737 | 76 | 0.547193 |
9198600a03831a59503bb3d3f2827b284d0e1c16 | 2,316 | bzl | Python | format/format.bzl | harshad-deo/TorchVI | f66d1486201368c9906869477ba7ae254d2e7191 | [
"Apache-2.0"
] | null | null | null | format/format.bzl | harshad-deo/TorchVI | f66d1486201368c9906869477ba7ae254d2e7191 | [
"Apache-2.0"
] | null | null | null | format/format.bzl | harshad-deo/TorchVI | f66d1486201368c9906869477ba7ae254d2e7191 | [
"Apache-2.0"
] | null | null | null |
format_py = rule(
implementation = _format_py_impl,
executable = True,
attrs = {
"srcs": attr.label_list(
allow_files = [".py"],
mandatory = True,
),
"_fmt": attr.label(
cfg = "host",
default = "//format:format_py",
executable = True,
),
"_style": attr.label(
allow_single_file = True,
default = ":setup.cfg",
),
},
)
| 30.473684 | 88 | 0.577288 |
91993f87e0ff04f74f7a6f31b278e5b76bf7a8ba | 1,376 | py | Python | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 4 | 2017-10-10T14:00:40.000Z | 2021-01-27T14:08:26.000Z | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 115 | 2019-10-24T11:18:33.000Z | 2022-03-11T23:15:42.000Z | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | efba0984a3dc71558eef97724c85e274a712798c | [
"MIT"
] | 5 | 2017-09-22T21:42:39.000Z | 2020-02-07T02:18:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
| 32 | 95 | 0.641715 |
919b5dc79a4db8bc0c773739c3eacec33d693967 | 11,973 | py | Python | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 169 | 2020-11-25T07:42:26.000Z | 2022-03-30T03:08:35.000Z | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 22 | 2020-12-22T13:14:24.000Z | 2022-03-31T08:41:26.000Z | histoGAN.py | mahmoudnafifi/HistoGAN | 50be1482638ace3ec85d733e849dec494ede155b | [
"MIT"
] | 19 | 2020-11-28T17:28:46.000Z | 2022-02-23T06:09:23.000Z | """
If you find this code useful, please cite our paper:
Mahmoud Afifi, Marcus A. Brubaker, and Michael S. Brown. "HistoGAN:
Controlling Colors of GAN-Generated and Real Images via Color Histograms."
In CVPR, 2021.
@inproceedings{afifi2021histogan,
title={Histo{GAN}: Controlling Colors of {GAN}-Generated and Real Images via
Color Histograms},
author={Afifi, Mahmoud and Brubaker, Marcus A. and Brown, Michael S.},
booktitle={CVPR},
year={2021}
}
"""
from tqdm import tqdm
from histoGAN import Trainer, NanException
from histogram_classes.RGBuvHistBlock import RGBuvHistBlock
from datetime import datetime
import torch
import argparse
from retry.api import retry_call
import os
from PIL import Image
from torchvision import transforms
import numpy as np
SCALE = 1 / np.sqrt(2.0)
if __name__ == "__main__":
args = get_args()
torch.cuda.set_device(args.gpu)
train_from_folder(
data=args.data,
results_dir=args.results_dir,
models_dir=args.models_dir,
name=args.name,
new=args.new,
load_from=args.load_from,
image_size=args.image_size,
network_capacity=args.network_capacity,
transparent=args.transparent,
batch_size=args.batch_size,
gradient_accumulate_every=args.gradient_accumulate_every,
num_train_steps=args.num_train_steps,
learning_rate=args.learning_rate,
num_workers=args.num_workers,
save_every=args.save_every,
generate=args.generate,
save_noise_latent=args.save_n_l,
target_noise_file=args.target_n,
target_latent_file=args.target_l,
num_image_tiles=args.num_image_tiles,
trunc_psi=args.trunc_psi,
fp16=args.fp16,
fq_layers=args.fq_layers,
fq_dict_size=args.fq_dict_size,
attn_layers=args.attn_layers,
hist_method=args.hist_method,
hist_resizing=args.hist_resizing,
hist_sigma=args.hist_sigma,
hist_bin=args.hist_bin,
hist_insz=args.hist_insz,
target_hist=args.target_hist,
alpha=args.alpha,
aug_prob=args.aug_prob,
dataset_aug_prob=args.dataset_aug_prob,
aug_types=args.aug_types
)
| 39.127451 | 80 | 0.647791 |
919b5e557651de3e6e934fa6c4b16a3e517ceea9 | 501 | py | Python | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-05 18:52
from __future__ import unicode_literals
from django.db import migrations, models
| 26.368421 | 87 | 0.670659 |
919c72f34a550015e3cadb40b602759ce1ee194d | 14,482 | py | Python | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 211 | 2016-06-06T08:32:36.000Z | 2021-07-03T16:50:16.000Z | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 42 | 2017-01-05T02:45:13.000Z | 2020-08-11T23:45:27.000Z | benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 45952e21a35e32a04b7607b121085973369a42db | [
"BSL-1.0",
"Apache-2.0"
] | 58 | 2016-10-27T07:37:08.000Z | 2021-07-03T16:50:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
| 51.90681 | 116 | 0.646596 |
919e14a6393eda0c7e38c0fd3d5e470f7982030f | 11,038 | py | Python | first-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | null | null | null | first-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | 1 | 2021-06-02T03:55:13.000Z | 2021-06-02T03:55:13.000Z | first-floor.py | levabd/smart-climat-daemon | 8ff273eeb74fb03ea04fda11b0128fa13d35b500 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from miio import chuangmi_plug
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
state = {}
f = open('/home/pi/smart-climat-daemon/ac_state.json')
state = json.load(f)
plug_type = 'chuangmi.plug.m1'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def turn_on_humidifier():
"""Turn on humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.on()
def turn_off_humidifier():
"""Turn off humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.off()
def check_if_ac_off():
"""Check if AC is turned off."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if response.json()['props']['boot'] == 0:
return True
return False
return None
def check_if_ac_cool():
"""Check if AC is turned for a automate cooling."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) or ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '001':
return False
if not response.json()['props']['wdNumber'] == 25:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def check_if_ac_heat():
"""Check if AC is turned for a automate heating."""
status_url = 'http://smart.levabd.pp.ua:2003/status/key/27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '100':
return False
if not response.json()['props']['wdNumber'] == 23:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def turn_on_heat_ac():
"""Turn on AC on a first floor for a heating if it was not."""
if (state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1:
return
heat_url = 'http://smart.levabd.pp.ua:2003/heat/key/27fbc501b51b47663e77c46816a'
ac_heat = check_if_ac_heat()
if ac_heat is not None:
if not ac_heat:
state['triedTurnedHeat'] = 1
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(heat_url)
print(response.json())
else:
if state['triedTurnedHeat'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_on_cool_ac():
"""Turn on AC on a first floor for a cooling if it was not."""
if (state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1:
return
cool_url = 'http://smart.levabd.pp.ua:2003/cool/key/27fbc501b51b47663e77c46816a'
ac_cool = check_if_ac_cool()
if ac_cool is not None:
if not ac_cool:
state['triedTurnedCool'] = 1
state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(cool_url)
print(response.json())
else:
if state['triedTurnedCool'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 1
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_off_ac():
"""Turn off AC on a first floor."""
if (state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1:
return
turn_url = 'http://smart.levabd.pp.ua:2003/power-off/key/27fbc501b51b47663e77c46816a'
ac_off = check_if_ac_off()
if ac_off is not None:
if not ac_off:
state['triedTurnedOff'] = 1
state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(turn_url)
print(response.json())
else:
if state['triedTurnedOff'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 1
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def record_temp_humid(temperature, humidity):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/lr.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity():
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
poller = MiTempBtPoller('58:2d:34:38:c0:91', backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check_if_ac_cool()
(today, temperature, humidity) = poll_temp_humidity()
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity)
try:
if (humidity > 49) and (today.month < 10) and (today.month > 4):
turn_off_humidifier()
if (humidity < 31) and (today.month < 10) and (today.month > 4):
turn_on_humidifier()
if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
turn_on_humidifier()
if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
turn_off_humidifier()
# Prevent Sleep of Xiaomi Smart Plug
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=0,
lazy_discover=True,
model='chuangmi.plug.m1')
print(hummidifier_plug.status())
except Exception:
print("Can not connect to humidifier")
# clear env at night
if today.hour == 4:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
if (today.hour > -1) and (today.hour < 7):
turn_off_ac()
if (temperature > 26.4) and (today.month < 6) and (today.month > 4) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 26.4) and (today.month < 10) and (today.month > 8) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 27.3) and (today.month < 9) and (today.month > 5) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature < 23.5) and (today.month < 10) and (today.month > 4):
turn_off_ac()
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
turn_off_ac()
if __name__ == '__main__':
main()
| 37.80137 | 118 | 0.602102 |
919e36250164a66af6592305ae454fa0dbde1d43 | 642 | py | Python | reservior_classification.py | Optimist-Prime/QML-for-MNIST-classification | 7513b3faa548166dba3df927a248e8c7f1ab2a15 | [
"BSD-3-Clause"
] | 1 | 2020-02-04T12:51:47.000Z | 2020-02-04T12:51:47.000Z | reservior_classification.py | Optimist-Prime/QML-for-MNIST-classification | 7513b3faa548166dba3df927a248e8c7f1ab2a15 | [
"BSD-3-Clause"
] | null | null | null | reservior_classification.py | Optimist-Prime/QML-for-MNIST-classification | 7513b3faa548166dba3df927a248e8c7f1ab2a15 | [
"BSD-3-Clause"
] | null | null | null | import pickle
from sklearn.neural_network import MLPClassifier
train = pickle.load(open('train_pca_reservoir_output_200samples.pickle','rb'))
test = pickle.load(open('test_pca_reservoir_output_50samples.pickle','rb'))
train_num = 200
test_num = 50
mlp = MLPClassifier(hidden_layer_sizes=(2000,), max_iter=100, alpha=1e-5,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1, batch_size= 20)
mlp.fit(train[0], train[1][:train_num])
print("Training set score: %f" % mlp.score(train[0], train[1][:train_num]))
print("Test set score: %f" % mlp.score(test[0], test[1][:test_num]))
| 37.764706 | 78 | 0.700935 |
919f4e67778a5a961b0e58f4deb0ff4d5a7ee8e6 | 4,099 | py | Python | util.py | delmarrerikaine/LPG-PCA | deb631ee2c4c88190ce4204fcbc0765ae5cd8f53 | [
"MIT"
] | 1 | 2021-05-07T01:00:18.000Z | 2021-05-07T01:00:18.000Z | util.py | delmarrerikaine/LPG-PCA | deb631ee2c4c88190ce4204fcbc0765ae5cd8f53 | [
"MIT"
] | null | null | null | util.py | delmarrerikaine/LPG-PCA | deb631ee2c4c88190ce4204fcbc0765ae5cd8f53 | [
"MIT"
] | 2 | 2019-06-29T16:30:32.000Z | 2020-11-18T17:40:47.000Z | import numpy as np
import pandas as pd
from skimage import io
import skimage.measure as measure
import os
from lpg_pca_impl import denoise
| 36.598214 | 141 | 0.658941 |
91a0653094ec563d20865f6d3bbca729f2752582 | 3,178 | py | Python | ui/ui.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | ui/ui.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | ui/ui.py | kringen/wingnut | 73be4f8393720ff0932ab069543e5f2d2308296d | [
"MIT"
] | null | null | null | import redis
from rq import Queue, Connection
from flask import Flask, render_template, Blueprint, jsonify, request
import tasks
import rq_dashboard
from wingnut import Wingnut
app = Flask(
__name__,
template_folder="./templates",
static_folder="./static",
)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
if __name__ == "__main__":
app.run(host="0.0.0.0",debug=1)
| 29.425926 | 77 | 0.608559 |
91a50c39cf3d401ee6a7a290edb9d36a330b0540 | 42 | py | Python | pytaboola/__init__.py | Openmail/pytaboola | ed71b3b9c5fb2e4452d4b6d40aec1ff037dd5436 | [
"MIT"
] | null | null | null | pytaboola/__init__.py | Openmail/pytaboola | ed71b3b9c5fb2e4452d4b6d40aec1ff037dd5436 | [
"MIT"
] | 2 | 2020-04-27T23:41:57.000Z | 2020-07-30T20:48:59.000Z | pytaboola/__init__.py | Openmail/pytaboola | ed71b3b9c5fb2e4452d4b6d40aec1ff037dd5436 | [
"MIT"
] | null | null | null | from pytaboola.client import TaboolaClient | 42 | 42 | 0.904762 |
91a63511fb79b5745ac6428aee3eedeaa5046fe6 | 1,410 | py | Python | omkar/code.py | omi28/ga-learner-dst-repo | 396c35ea56028717a96aed6ca771e39ebf68dc5b | [
"MIT"
] | null | null | null | omkar/code.py | omi28/ga-learner-dst-repo | 396c35ea56028717a96aed6ca771e39ebf68dc5b | [
"MIT"
] | null | null | null | omkar/code.py | omi28/ga-learner-dst-repo | 396c35ea56028717a96aed6ca771e39ebf68dc5b | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#New record
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
data.shape
cenus=np.concatenate((new_record,data),axis=0)
cenus.shape
print(cenus)
age=cenus[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
mean_age=np.mean(age)
age_std=np.std(age)
race=cenus[:,2]
print(race)
race_0=(race==0)
len_0=len(race[race_0])
print(len_0)
race_1=(race==1)
len_1=len(race[race_1])
race_2=(race==2)
race_3=(race==3)
race_4=(race==4)
len_2=len(race[race_2])
len_3=len(race[race_3])
len_4=len(race[race_4])
minority_race=3
print(minority_race)
senior_citizen=(age>60)
working_hour_sum=sum(cenus[:,6][senior_citizen])
print(working_hour_sum)
senior_citizen_len=len(age[senior_citizen])
avg_working_hours=working_hour_sum/senior_citizen_len
avg_working_hours=round(avg_working_hours,2)
education_num=cenus[:,1]
print(education_num)
high=education_num>10
#high=education_num[high]
print(high)
low=education_num<=10
#low=education_num[low]
print(low)
INCOME=cenus[:,7][high]
print(INCOME)
print(np.mean(INCOME))
avg_pay_high=round(np.mean(INCOME),2)
print(avg_pay_high)
LOW_AVG=cenus[:,7][low]
avg_pay_low=round(np.mean(LOW_AVG),2)
print(avg_pay_low)
#Code starts here
| 20.434783 | 57 | 0.719858 |
91a824d6a95f0e9a4a572ff289971a58109b3c3c | 3,887 | py | Python | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 8 | 2015-09-10T21:49:25.000Z | 2022-02-02T04:39:00.000Z | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 34 | 2015-09-10T21:40:09.000Z | 2020-09-04T22:16:08.000Z | test/present.py | jchampio/apache-websocket | 18ad4ae2fc99381b8d75785f492a479f789b322b | [
"Apache-2.0"
] | 5 | 2016-01-22T05:16:54.000Z | 2017-10-18T12:28:02.000Z | #! /usr/bin/env python
#
# Presents the results of an Autobahn TestSuite run in TAP format.
#
# Copyright 2015 Jacob Champion
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import StrictVersion
import json
import os.path
import sys
import textwrap
import yamlish
def filter_report(report):
"""Filters a test report dict down to only the interesting keys."""
INTERESTING_KEYS = [
'behavior',
'behaviorClose',
'expected',
'received',
'expectedClose',
'remoteCloseCode'
]
return { key: report[key] for key in INTERESTING_KEYS }
def prepare_description(report):
"""Constructs a description from a test report."""
raw = report['description']
# Wrap to at most 80 characters.
wrapped = textwrap.wrap(raw, 80)
description = wrapped[0]
if len(wrapped) > 1:
# If the text is longer than one line, add an ellipsis.
description += '...'
return description
#
# MAIN
#
# Read the index.
results_dir = 'test-results'
with open(os.path.join(results_dir, 'index.json'), 'r') as index_file:
index = json.load(index_file)['AutobahnPython']
# Sort the tests by numeric ID so we print them in a sane order.
test_ids = list(index.keys())
test_ids.sort(key=StrictVersion)
# Print the TAP header.
print('TAP version 13')
print('1..{0!s}'.format(len(test_ids)))
count = 0
skipped_count = 0
failed_count = 0
for test_id in test_ids:
count += 1
passed = True
skipped = False
report = None
result = index[test_id]
# Try to get additional information from this test's report file.
try:
path = os.path.join(results_dir, result['reportfile'])
with open(path, 'r') as f:
report = json.load(f)
description = prepare_description(report)
except Exception as e:
description = '[could not load report file: {0!s}]'.format(e)
test_result = result['behavior']
close_result = result['behaviorClose']
# Interpret the result for this test.
if test_result != 'OK' and test_result != 'INFORMATIONAL':
if test_result == 'UNIMPLEMENTED':
skipped = True
else:
passed = False
elif close_result != 'OK' and close_result != 'INFORMATIONAL':
passed = False
# Print the TAP result.
print(u'{0} {1} - [{2}] {3}{4}'.format('ok' if passed else 'not ok',
count,
test_id,
description,
' # SKIP unimplemented' if skipped
else ''))
# Print a YAMLish diagnostic for failed tests.
if report and not passed:
output = filter_report(report)
diagnostic = yamlish.dumps(output)
for line in diagnostic.splitlines():
print(' ' + line)
if not passed:
failed_count += 1
if skipped:
skipped_count += 1
# Print a final result.
print('# Autobahn|TestSuite {0}'.format('PASSED' if not failed_count else 'FAILED'))
print('# total {0}'.format(count))
print('# passed {0}'.format(count - failed_count - skipped_count))
print('# skipped {0}'.format(skipped_count))
print('# failed {0}'.format(failed_count))
exit(0 if not failed_count else 1)
| 28.792593 | 84 | 0.623874 |
91a977f44ca6b26789c3c66246a46fa0280ee2a7 | 1,143 | py | Python | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 39 | 2016-12-24T02:57:55.000Z | 2022-02-15T09:29:43.000Z | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 32 | 2016-11-21T15:05:07.000Z | 2021-12-06T11:52:32.000Z | softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | efee5c3c276033d526a0cdba504d43deff71581e | [
"BSD-3-Clause"
] | 13 | 2016-12-14T10:42:22.000Z | 2022-01-01T20:35:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| 32.657143 | 185 | 0.582677 |
91aa65150dc0f4a17f1e9ed16821f5753cc86fa6 | 389 | py | Python | python/Excel/enumerateCells.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | python/Excel/enumerateCells.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | python/Excel/enumerateCells.py | davidgjy/arch-lib | b4402b96d2540995a848e6c5f600b2d99847ded6 | [
"Apache-2.0"
] | null | null | null | import openpyxl
wb = openpyxl.load_workbook('example.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
rows = sheet.get_highest_row()
cols = sheet.get_highest_column()
for i in range(1, rows + 1):
for j in range(1, cols + 1):
print('%s: %s' % (sheet.cell(row=i, column=j).coordinate, sheet.cell(row=i, column=j).value))
print('---------------------------------------------')
| 32.416667 | 96 | 0.59126 |
91ab6aa12f229c7b9ddab5414461949479dfe028 | 787 | py | Python | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso | 95c8087c0182bdd576598eb8cd39c440e58e15d7 | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | # Generated by Django 3.1.13 on 2021-10-04 11:44
from django.db import migrations, models
import django.db.models.deletion
| 28.107143 | 90 | 0.590851 |
91ac9d140e7247cc524f64941c877611ed2cbd70 | 6,257 | py | Python | CurrencyExchange.py | aarana14/CurrencyExchange | e3f35c1481acf19683a74a41509b1dd37ae48594 | [
"MIT"
] | null | null | null | CurrencyExchange.py | aarana14/CurrencyExchange | e3f35c1481acf19683a74a41509b1dd37ae48594 | [
"MIT"
] | null | null | null | CurrencyExchange.py | aarana14/CurrencyExchange | e3f35c1481acf19683a74a41509b1dd37ae48594 | [
"MIT"
] | null | null | null | #import external libraries used in code
import requests, json
import pycountry
print('Currency Exchange')
currencies = []
if __name__ == "__main__":
findCurrency()
help()
currencyAmount, fromCurrency, toCurrency = userData()
rate = realTimeRate(fromCurrency, toCurrency)
completeExchange(rate, currencyAmount, fromCurrency, toCurrency)
| 38.863354 | 189 | 0.61563 |
91ad7c273462430b62373174e1161a8ff1416f63 | 715 | py | Python | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | r, c, m = map(int, input().split())
n = int(input())
op = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]
board = [[0 for _ in range(c)] for _ in range(r)]
for ra, rb, ca, cb in op:
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
board[j][k] += 1
cnt = 0
for i in range(r):
for j in range(c):
board[i][j] %= 4
if board[i][j] == 0:
cnt += 1
for i in range(n):
ra, rb, ca, cb = op[i]
cnti = cnt
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
if board[j][k] == 0:
cnti -= 1
elif board[j][k] == 1:
cnti += 1
if cnti == m:
print(i + 1)
| 25.535714 | 73 | 0.439161 |
91ad8a5fd94219e90c24839542dbfefd0cc9fc70 | 6,142 | py | Python | scripts/analyse_bse.py | QU-XIAO/yambopy | ff65a4f90c1bfefe642ebc61e490efe781709ff9 | [
"BSD-3-Clause"
] | 21 | 2016-04-07T20:53:29.000Z | 2021-05-14T08:06:02.000Z | scripts/analyse_bse.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 22 | 2016-06-14T22:29:47.000Z | 2021-09-16T15:36:26.000Z | scripts/analyse_bse.py | alexmoratalla/yambopy | 8ec0e1e18868ccaadb3eab36c55e6a47021e257d | [
"BSD-3-Clause"
] | 15 | 2016-06-14T18:40:57.000Z | 2021-08-07T13:17:43.000Z | # Copyright (C) 2018 Alexandre Morlet, Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import operator
def analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack ):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_bse/ folder.
By default, the graphical interface is deactivated (assuming you run on a cluster because of ypp calls).
See line 2 inside the script.
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_bse')
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print(jobname)
# input value
# BndsRn__ is a special case
if var.startswith('BndsRnX'):
# format : [1, nband, ...]
inp = invars[key]['variables'][var][0][1]
else:
inp = invars[key]['variables'][var][0]
print('Preparing JSON file. Calling ypp if necessary.')
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=exc_int,max_energy=exc_max_E,Degen_Step=exc_degen)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
print('JSON file prepared and loaded.')
### Plotting the absorption spectra
# BSE spectra
plt.plot(data['E/ev[1]'], data['EPS-Im[2]'],label=jobname,lw=2)
# # Axes : lines for exciton energies (disabled, would make a mess)
# for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= exc_n-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
print(excitons)
np.savetxt(outname+'.dat',excitons,header=header)
#np.savetxt(outname,excitons,header=header,fmt='%1f')
print(outname+'.dat')
else:
print('-nt flag : no text produced.')
if draw:
plt.xlabel('$\omega$ (eV)')
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
#plt.draw()
#plt.show()
plt.savefig(outname+'.png', bbox_inches='tight')
print(outname+'.png')
else:
print('-nd flag : no plot produced.')
print('Done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Study convergence on BS calculations using ypp calls.')
pa = parser.add_argument
pa('folder', help='Folder containing SAVE and convergence runs.' )
pa('variable', help='Variable tested (e.g. FFTGvecs)' )
pa('-ne','--numbexc', help='Number of excitons to read beyond threshold', default=2,type=int)
pa('-ie','--intexc', help='Minimum intensity for excitons to be considered bright', default=0.05,type=float)
pa('-de','--degenexc', help='Energy threshold under which different peaks are merged (eV)', default=0.01,type=float)
pa('-me','--maxexc', help='Energy threshold after which excitons are not read anymore (eV)', default=8.0,type=float)
pa('-np','--nopack', help='Skips packing o- files into .json files', action='store_false')
pa('-nt','--notext', help='Skips writing the .dat file', action='store_false')
pa('-nd','--nodraw', help='Skips drawing (plotting) the abs spectra', action='store_false')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
folder = args.folder
var = args.variable
exc_n = args.numbexc
exc_int = args.intexc
exc_degen = args.degenexc
exc_max_E = args.maxexc
pack = args.nopack
text = args.text
draw = args.draw
analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack=pack, text=text, draw=draw )
| 36.559524 | 122 | 0.632693 |
91ae1121ab522c5ec74869736cdca27ee08ca053 | 3,080 | py | Python | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | halmodule.py | richteer/pyfatafl | 1faddcf5d9eb36cbc6952b9a8e8bb899989f7112 | [
"MIT"
] | null | null | null | from module import XMPPModule
import halutils
import pyfatafl
# Commented to avoid loading before its ready
| 29.615385 | 147 | 0.656494 |
91aeb848169969b77dd6c9be3484be7a02c40a1b | 2,218 | py | Python | tools/acetz.py | arkhipenko/AceTime | bc6e6aa530e309b62a204b7574322ba013066b06 | [
"MIT"
] | 1 | 2021-02-23T06:17:36.000Z | 2021-02-23T06:17:36.000Z | tools/acetz.py | arkhipenko/AceTime | bc6e6aa530e309b62a204b7574322ba013066b06 | [
"MIT"
] | null | null | null | tools/acetz.py | arkhipenko/AceTime | bc6e6aa530e309b62a204b7574322ba013066b06 | [
"MIT"
] | null | null | null | from typing import cast, Optional
from datetime import datetime, tzinfo, timedelta
from zonedbpy import zone_infos
from zone_processor.zone_specifier import ZoneSpecifier
from zone_processor.inline_zone_info import ZoneInfo
__version__ = '1.1'
| 34.65625 | 73 | 0.628945 |
91b2c92f668693110e6ccdfb6fa82e177d314e5d | 8,510 | py | Python | z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 554539540
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 3, 17)
assert board is not None
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_golden_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 3) == 1
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 2) == 1
board251673140 = gamma_board(board)
assert board251673140 is not None
assert board251673140 == (".2....\n"
".2....\n"
"...1..\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
".1...2\n"
".3....\n")
del board251673140
board251673140 = None
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 3, 4, 5) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 29
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 7) == 1
board281476409 = gamma_board(board)
assert board281476409 is not None
assert board281476409 == ("12....\n"
".2....\n"
"3..13.\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
"31...2\n"
".3.3..\n")
del board281476409
board281476409 = None
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 2, 1) == 1
board412285252 = gamma_board(board)
assert board412285252 is not None
assert board412285252 == ("12....\n"
".2....\n"
"3..13.\n"
"32..22\n"
"131.1.\n"
"113.1.\n"
"311..2\n"
"13.3..\n")
del board412285252
board412285252 = None
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_free_fields(board, 3) == 23
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_free_fields(board, 3) == 21
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 1, 5) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_busy_fields(board, 1) == 16
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_golden_move(board, 2, 2, 2) == 1
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_free_fields(board, 1) == 13
assert gamma_move(board, 2, 2, 6) == 1
assert gamma_move(board, 2, 5, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 7, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 1, 7, 2) == 0
board481507094 = gamma_board(board)
assert board481507094 is not None
assert board481507094 == ("12...1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board481507094
board481507094 = None
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 7) == 1
board984249076 = gamma_board(board)
assert board984249076 is not None
assert board984249076 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board984249076
board984249076 = None
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 1
board492321582 = gamma_board(board)
assert board492321582 is not None
assert board492321582 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board492321582
board492321582 = None
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 3, 2, 1) == 0
gamma_delete(board)
| 30.722022 | 46 | 0.653114 |
91b37c8672721c9195859e7e71caa5db1a857b4d | 25,928 | py | Python | examples/run_chemistry_parser.py | ZhuoyuWei/transformers | 16d0ebd55d17dd5095231566a0544ecebd56bc9c | [
"Apache-2.0"
] | null | null | null | examples/run_chemistry_parser.py | ZhuoyuWei/transformers | 16d0ebd55d17dd5095231566a0544ecebd56bc9c | [
"Apache-2.0"
] | null | null | null | examples/run_chemistry_parser.py | ZhuoyuWei/transformers | 16d0ebd55d17dd5095231566a0544ecebd56bc9c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2019 The HuggingFace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning seq2seq models for sequence generation."""
import argparse
import functools
import logging
import os
import random
import sys
sys.path.append(r'../')
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
AutoTokenizer,
BertForMaskedLM,
BertConfig,
PreTrainedEncoderDecoder,
Model2Models,
)
from utils_summarization import (
CNNDailyMailDataset,
encode_for_summarization,
fit_to_block_size,
build_lm_labels,
build_mask,
compute_token_type_ids,
)
from utils_chemistry import (ChemistryDataset,)
'''
class InputExample(object):
def __init__(self,example_id,question_input,question_varible_output=None,condition_output=None):
self.example_id=example_id
self.question_input=question_input
self.question_varible_output=question_varible_output
self.condition_output=condition_output
'''
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# ------------
# Load dataset
# ------------
def collate(data, tokenizer, input_block_size,output_block_size):
""" List of tuple as an input. """
question_inputs=[]
question_varible_outputs=[]
condition_outputs=[]
for i,example in enumerate(data):
question_input=tokenizer.encode(example.question_input)
question_input=fit_to_block_size(question_input, input_block_size, tokenizer.pad_token_id)
question_inputs.append(question_input)
if example.question_varible_output is not None:
question_varible_output=tokenizer.encode(example.question_varible_output)
else:
question_varible_output=tokenizer.build_inputs_with_special_tokens([])
question_varible_output=fit_to_block_size(question_varible_output, output_block_size, tokenizer.pad_token_id)
question_varible_outputs.append(question_varible_output)
if example.condition_output is not None:
condition_output=tokenizer.encode(example.condition_output)
else:
condition_output=tokenizer.build_inputs_with_special_tokens([])
condition_output=fit_to_block_size(condition_output, output_block_size, tokenizer.pad_token_id)
condition_outputs.append(condition_output)
question_inputs = torch.tensor(question_inputs)
question_varible_outputs = torch.tensor(question_varible_outputs)
condition_outputs = torch.tensor(condition_outputs)
question_inputs_mask = build_mask(question_inputs, tokenizer.pad_token_id)
question_varible_outputs_mask = build_mask(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask = build_mask(condition_outputs, tokenizer.pad_token_id)
question_varible_outputs_mask_lm_labels = build_lm_labels(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask_lm_labels = build_lm_labels(condition_outputs, tokenizer.pad_token_id)
return (
question_inputs,
[question_varible_outputs,condition_outputs],
question_inputs_mask,
[question_varible_outputs_mask,condition_outputs_mask],
[question_varible_outputs_mask_lm_labels,condition_outputs_mask_lm_labels],
)
# ----------
# Optimizers
# ----------
# ------------
# Train
# ------------
def train(args, model, tokenizer):
""" Fine-tune the pretrained model on the corpus. """
set_seed(args)
# Load the data
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = load_and_cache_examples(args, tokenizer, "train")
train_sampler = RandomSampler(train_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=model_collate_fn,
)
# Training schedule
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = t_total // (
len(train_dataloader) // args.gradient_accumulation_steps + 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare the optimizer
#lr = {"encoder": 0.002, "decoder": 0.2}
lr = {"encoder": args.encoder_lr, "decoder": args.decoder_lr}
#warmup_steps = {"encoder": 20000, "decoder": 10000}
warmup_steps = {"encoder": args.encoder_warmup, "decoder": args.decoder_warmup}
optimizer = BertSumOptimizer(model, lr, warmup_steps)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
model.zero_grad()
train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=False)
global_step = 0
tr_loss = 0.0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('source: {}'.format(source))
#print('target: {}'.format(target))
feed_source=None
feed_targets=[None]*len(target)
feed_encoder_mask=None
feed_decoder_masks=[None]*len(decoder_mask)
feed_lm_labels=[None]*len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
model.train()
#print('debug by zhuoyu: source = {}'.format(source))
#print('debug by zhuoyu: target = {}'.format(target))
#print('debug by zhuoyu, device:')
#print('feed source {}'.format(feed_source.device))
#print('feed target {}'.format([str(feed_target.device) for feed_target in feed_targets]))
#print('feed encoder mask {}'.format(feed_encoder_mask.device))
#print('feed decoder masks {}'.format([str(feed_decoder_mask.device) for feed_decoder_mask in feed_decoder_masks]))
#print('feed lm labels {}'.format([str(feed_lm_label.device) for feed_lm_label in feed_lm_labels]))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
)
loss=0
for i in range(len(model.decoders)):
#print('outputs[{}][0] type: {}'.format(i,type(outputs[i][0])))
loss += outputs[i][0]
#print(loss)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
# ------------
# Train
# ------------
if __name__ == "__main__":
main()
| 34.432935 | 127 | 0.612234 |
91b41dc55c2835ad843b049f4f5251bad3abf07e | 676 | py | Python | envergo/geodata/management/commands/import_shapefiles.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | envergo/geodata/management/commands/import_shapefiles.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | 6 | 2021-07-12T14:33:18.000Z | 2022-02-14T10:36:09.000Z | envergo/geodata/management/commands/import_shapefiles.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | from django.contrib.gis.gdal import DataSource
from django.contrib.gis.utils import LayerMapping
from django.core.management.base import BaseCommand
from envergo.geodata.models import Zone
| 32.190476 | 60 | 0.695266 |
91b47f9da5c47dfa6628ace04164ad0d1bc8a057 | 1,710 | py | Python | vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py | duanqiaobb/vim-for-java | 01b60e4494e65a73c9a9de00f50259d8a7c8d0bb | [
"MIT"
] | null | null | null | vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py | duanqiaobb/vim-for-java | 01b60e4494e65a73c9a9de00f50259d8a7c8d0bb | [
"MIT"
] | null | null | null | vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py | duanqiaobb/vim-for-java | 01b60e4494e65a73c9a9de00f50259d8a7c8d0bb | [
"MIT"
] | null | null | null | from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Anonymous Expansion {{{#
# End: Anonymous Expansion #}}}
| 25.147059 | 77 | 0.615789 |
91b495763107bc2ceb225b3984a8b4ffae309299 | 2,914 | py | Python | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 1 | 2021-08-09T19:28:49.000Z | 2021-08-09T19:28:49.000Z | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | null | null | null | data_converter/data_converter.py | jkchen2/JshBot-plugins | b5999fecf0df067e34673ff193dcfbf8c7e2fde2 | [
"MIT"
] | 2 | 2017-07-14T00:15:54.000Z | 2019-03-02T09:46:21.000Z | import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
| 38.853333 | 96 | 0.576527 |
91b5d5a9da8d21cc54215371e88cbf75203f4ad6 | 374 | py | Python | tut2.py | ankit98040/TKINTER-JIS | 8b650138bf8ab2449da83e910ee33c0caee69a8d | [
"Apache-2.0"
] | null | null | null | tut2.py | ankit98040/TKINTER-JIS | 8b650138bf8ab2449da83e910ee33c0caee69a8d | [
"Apache-2.0"
] | null | null | null | tut2.py | ankit98040/TKINTER-JIS | 8b650138bf8ab2449da83e910ee33c0caee69a8d | [
"Apache-2.0"
] | null | null | null | from tkinter import *
from PIL import Image, ImageTk
#python image library
#imagetk supports jpg image
a1 = Tk()
a1.geometry("455x244")
#for png image
#photo = PhotoImage(file="filename.png")
#a2 = Label(image = photo)
#a2.pack()
image = Image.open("PJXlVd.jpg")
photo = ImageTk.PhotoImage(image)
a2 = Label(image = photo)
a2.pack()
a1.mainloop() | 17 | 41 | 0.671123 |
91b62cc1816352d2c7a0ead7b1bf1eabb9a68df6 | 8,113 | py | Python | dataset.py | mintanwei/IPCLs-Net | 04937df683216a090c0749cc90ab7e517dbab0fd | [
"MIT"
] | null | null | null | dataset.py | mintanwei/IPCLs-Net | 04937df683216a090c0749cc90ab7e517dbab0fd | [
"MIT"
] | null | null | null | dataset.py | mintanwei/IPCLs-Net | 04937df683216a090c0749cc90ab7e517dbab0fd | [
"MIT"
] | null | null | null | import os
import torch
from PIL import Image
from read_csv import csv_to_label_and_bbx
import numpy as np
from torch.utils.data import Subset, random_split, ConcatDataset
def split_index(K=5, len=100):
idx = list(range(len))
final_list = []
for i in range(K):
final_list.append(idx[(i*len)//K:((i+1)*len)//K])
return final_list
def k_fold_index(K=5, len=100, fold=0):
split = split_index(K, len)
val = split[fold]
train = []
for i in range(K):
if i != fold:
train = train + split[i]
return train, val
def stat_dataset(dataset):
class_ids = {1: "A", 2: "B1", 3: "B2", 4: "B3"}
stats = {"A": 0, "B1": 0, "B2": 0, "B3": 0}
for img, target in dataset:
for k in target['labels']:
stats[class_ids[int(k)]] += 1
print(stats)
def NBIFiveFoldDataset(transforms):
ds = NBIFullDataset(root="./NBI_full_dataset/", transforms=transforms)
# n = len(ds)
# for i in range(5):
# train_idx, val_idx = k_fold_index(5, n, i)
# train_subset = Subset(ds, train_idx)
# val_subset = Subset(ds, val_idx)
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
torch.manual_seed(13)
all_subsets = random_split(ds, [46, 46, 46, 45, 45])
fold_i_subsets = []
for i in range(5):
val_subset = all_subsets[i]
train_subset = ConcatDataset([all_subsets[j] for j in range(5) if j != i])
fold_i_subsets.append({"train": train_subset, "val": val_subset})
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
return fold_i_subsets
if __name__ == '__main__':
# ds = NBIFiveFoldDataset(None)
di = "aaa".encode("UTF-8")
result = eval(di)
print(result)
| 32.322709 | 110 | 0.598053 |
91b7b2d421c1a0795b99655b4fa4a8c0503e4114 | 1,056 | py | Python | design_patterns/chapter5/mymath.py | FeliciaMJ/PythonLearningJourney | ae1bfac872ee29256e69df6e0e8e507321404cba | [
"Apache-2.0"
] | null | null | null | design_patterns/chapter5/mymath.py | FeliciaMJ/PythonLearningJourney | ae1bfac872ee29256e69df6e0e8e507321404cba | [
"Apache-2.0"
] | null | null | null | design_patterns/chapter5/mymath.py | FeliciaMJ/PythonLearningJourney | ae1bfac872ee29256e69df6e0e8e507321404cba | [
"Apache-2.0"
] | 2 | 2021-04-04T00:27:29.000Z | 2021-06-05T03:26:53.000Z | # coding: utf-8
import functools
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci',
'func': fibonacci}, {'exec': 'nsum(200)', 'import': 'nsum',
'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import \
{}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: \
{}'.format(m['func'].__name__, m['func'].__doc__,
m['exec'], t.timeit()))
| 25.142857 | 75 | 0.507576 |
91b7d7d61842cf27c4aaa82c80b40afa5304b3b0 | 27 | py | Python | transforms/__init__.py | yangyuke001/emotion-expression.shufflenetv2 | d70fd17871fb758eb4fc7d2f9df430cc7e44ad64 | [
"Apache-2.0"
] | 3 | 2019-11-29T01:29:58.000Z | 2020-09-16T12:48:49.000Z | transforms/__init__.py | yangyuke001/emotion-expression.shufflenetv2 | d70fd17871fb758eb4fc7d2f9df430cc7e44ad64 | [
"Apache-2.0"
] | null | null | null | transforms/__init__.py | yangyuke001/emotion-expression.shufflenetv2 | d70fd17871fb758eb4fc7d2f9df430cc7e44ad64 | [
"Apache-2.0"
] | null | null | null |
from .transforms import *
| 9 | 25 | 0.740741 |
91b880c2b2d9577a02c8519251133c3cee61564c | 14,894 | py | Python | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | # !\usr\bin\python
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import scipy.optimize
from matplotlib import animation
from scipy.integrate import ode
import pdb
# Material parameters
rho = 7800.
E = 2.e11
nu = 0.3
mu = 0.5*E/(1.+nu)
kappa = E/(3.*(1.-2.*nu))
lamb = kappa-2.*mu/3.
sigy = 100.0e6
H = 100.08e6
beta=(6.*mu**2)/(3.*mu+H)
from mpl_toolkits.mplot3d import proj3d
proj3d.persp_transformation = orthogonal_proj
Samples=5
# Sample constant stress component sig22
sig22=np.linspace(0.,sigy,Samples)
#sig22=np.linspace(-sigy/np.sqrt(1-nu+nu**2),sigy/np.sqrt(1-nu+nu**2),Samples)
Samples*=10
sig=np.zeros((Samples,Samples))
tau=np.zeros((Samples,Samples))
frames=[10,20,40]
frames=[5,10,15,20]
col=["r","g","b","y","c","m","k","p"]
tauM=1.5*sigy/np.sqrt(3.)
sigM=1.5*sigy/np.sqrt(1-nu+nu**2)
tauM=sigM
Niter=1000
TAU=np.zeros((Niter,len(frames),len(sig22)))
SIG11=np.zeros((Niter,len(frames),len(sig22)))
SIG22=np.zeros((Niter,len(frames),len(sig22)))
eigsigS=np.zeros((Niter,len(frames),len(sig22),3))
criterionS=np.zeros((Niter,len(frames)))
PsiS=np.zeros((Samples,len(sig22)))
plast_S=np.zeros((Niter,len(frames)))
LodeAngle_S=np.zeros((Niter,len(frames)))
# Boolean to plot the upadted yield surface
updated_criterion=False
for k in range(len(sig22)-1):
s22=sig22[k]
Delta=(4.*sigy**2- 3.*s22**2)
sigMax=(s22+np.sqrt(Delta))/2.
sigMin=(s22-np.sqrt(Delta))/2.
# Sample stress component sig11
sig[:,k]=np.linspace(sigMin,sigMax,Samples)
sig[:,k]=np.linspace(0.,sigMax,Samples)
# Compute shear stress satisfying the criterion given sig11 and sig22
for i in range(Samples):
s11=sig[i,k]
delta=(s11*s22 -s11**2-s22**2 + sigy**2)/3.
if np.abs(delta)<10. : delta=np.abs(delta)
tauMax=np.sqrt(delta)
f_vm=lambda x:computeCriterion(s11,s22,x,0.,sigy)
tau[i,k]=np.sqrt(delta)
## LOADING PATHS PLOTS
for k in range(len(sig22)-1)[1:]:
s22=sig22[k]
sigM=1.25*np.max(sig[:,k])
tauM=1.25*np.max(tau[:,k])
## For each value of sig22 trace the loading paths given by psis from yield surface to an arbitrary shear stress level
approx=np.zeros((len(frames),2))
ordonnees=np.zeros((len(frames),Samples))
abscisses=np.zeros((len(frames),Samples))
radius_S=np.zeros(len(frames))
for s,i in enumerate(frames):
if i==0:
continue
sig0=sig[-1-i,k]
tau0=tau[-1-i,k]
dsig=(sigM-sig0)/Niter
SIG11[:,s,k]=np.linspace(sig0,sigM,Niter)
TAU[0,s,k]=tau0
SIG22[0,s,k]=s22
#rSlow = ode(computePsiSlow).set_integrator('vode',method='bdf')
rSlow = ode(computePsiSlow).set_integrator('vode',method='adams',order=12)
rSlow.set_initial_value(np.array([TAU[0,s,k],SIG22[0,s,k]]),SIG11[0,s,k]).set_f_params(0.,lamb,mu,beta,'planeStress',rho)
sigma = np.matrix([[SIG11[0,s,k],TAU[0,s,k],0.],[TAU[0,s,k],SIG22[0,s,k],0.],[0.,0.,0.]])
eigsig=np.linalg.eig(sigma)[0]
eigsigS[0,s,k,:]=eigsig
LodeAngle_S[0,s]=computeLodeAngle(sigma[0,0],SIG22[0,s,k],sigma[0,1],0.)
p=0.
epsp33=0.
for j in range(Niter-1):
rSlow.set_f_params(np.array([TAU[j,s,k],SIG22[j,s,k]]),0.,lamb,mu,beta,'planeStress',rho)
if not rSlow.successful():
print "Integration issues in slow wave path"
break
rSlow.integrate(rSlow.t+dsig)
TAU[j+1,s,k],SIG22[j+1,s,k]=rSlow.y
sigma = np.array([SIG11[j,s,k],np.sqrt(2.)*TAU[j,s,k],SIG22[j,s,k],0.])
sigman = np.array([SIG11[j+1,s,k],np.sqrt(2.)*TAU[j+1,s,k],SIG22[j+1,s,k],0.])
f_vm=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
#if f_vm>0. :
#p+=updateEquivalentPlasticStrain(sigma,sigman,H)
#residual=lambda x: plasticResidual(sigma,sigman,p,x,H)
residual=lambda x: computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*x)
p=scipy.optimize.root(residual,p,method='hybr',options={'xtol':1.e-12}).x[0]
criterionS[j+1,s]=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
plast_S[j+1,s]=p
LodeAngle_S[j+1,s]=computeLodeAngle(sigman[0],sigman[2],sigman[1]/np.sqrt(2.),0.)
# Eigenvalues of sigma (for deviatoric plane plots)
sigma = np.matrix([[SIG11[j+1,s,k],TAU[j+1,s,k],0.],[TAU[j+1,s,k],SIG22[j+1,s,k],0.],[0.,0.,0.]])
eigsigS[j+1,s,k,:]=computeEigenStresses(sigma)
print "Final equivalent plastic strain after slow wave : ",p
radius_S[s]=sigy+H*p
TAU_MAX_S=np.max(ordonnees)
SIG_MAX_S=np.max(abscisses)
### SUBPLOTS SETTINGS
fig = plt.figure()
ax2=plt.subplot2grid((1,2),(0,1),projection='3d')
ax1d1=plt.subplot2grid((1,2),(0,0))
ax1d1.grid()
ax1d1.set_xlabel(r'$\Theta$', fontsize=24)
ax1d1.set_ylabel('p', fontsize=24)
fvm1=ax1d1.twinx()
fvm1.set_ylabel('f',fontsize=18.)
fvm1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
cylindre=vonMisesYieldSurface(sigy)
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color="k")
elevation_Angle_radian=np.arctan(1./np.sqrt(2.0))
angle_degree= 180.*elevation_Angle_radian/np.pi
radius=1.*np.sqrt((2./3.)*sigy**2)
ax2.set_xlim(-1.*radius,1.*radius)
ax2.set_ylim(-1.*radius,1.*radius)
ax2.set_zlim(-1.*radius,1.*radius)
ax2.view_init(angle_degree,45.)
ax2.plot([0.,sigy],[0.,sigy],[0.,sigy],color="k")
ax2.set_xlabel(r'$\sigma_1$',size=24.)
ax2.set_ylabel(r'$\sigma_2$',size=24.)
ax2.set_zlabel(r'$\sigma_3$',size=24.)
for p in range(len(frames)):
if updated_criterion :
cylindre=vonMisesYieldSurface(radius_S[p])
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color=col[p],linestyle='--')
## 2D plot of equivalent plastic strain evolution
ax1d1.plot(LodeAngle_S[:Niter/5,p],plast_S[:Niter/5,p],col[p])
#ax1d1_2.plot(LodeAngle_S[:Niter/5,p],SIG33_S[:Niter/5,p,k],col[p],marker='o')
fvm1.plot(LodeAngle_S[:,p],criterionS[:,p],col[p],linestyle='--')
## 3D plots of loading paths (deviatoric plane)
ax2.plot(eigsigS[:,p,k,0],eigsigS[:,p,k,1],eigsigS[:,p,k,2],color=col[p],marker="o")
ax2.plot([-sigy,sigy],[0.,0.],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([0.,0.],[-sigy,sigy],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([-radius,radius],[radius,-radius],[0.,0.],color="k",linestyle="--",lw=1.)
#plt.show()
fig = plt.figure()
ax1=plt.subplot2grid((1,2),(0,0))
ax2=plt.subplot2grid((1,2),(0,1))
ax1.set_xlabel(r'$\sigma_{11}$',size=28.)
ax1.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax1.set_zlabel(r'$\sigma_{22}$',size=28.)
ax2.set_xlabel(r'$\sigma_{22}$',size=28.)
ax2.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax2.set_zlabel(r'$\sigma_{11}$',size=28.)
ax1.grid()
ax2.grid()
#ax2.view_init(-90.,-0.)
#ax1.view_init(-90.,0.)
for s,i in enumerate(frames):
sig0=sig[-1-i,k]
s22max=(sig0+np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22min=(sig0-np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22=np.linspace(s22min,s22max,Samples)
s12=np.sqrt((sigy**2- sig0**2-s22**2+sig0*s22)/3.)
ax2.plot(s22,s12,color=col[s])
ax1.plot(sig[:,k],tau[:,k],'k')
#ax2.plot(sig[:,k],tau[:,k],sig22[k],'k')
for p in range(len(frames)):
ax1.plot(SIG11[:,p,k],TAU[:,p,k],color=col[p])
ax2.plot(SIG22[:,p,k],TAU[:,p,k],color=col[p])
plt.show()
| 37.422111 | 216 | 0.589701 |
91b88e3d926b20d74b8739d087b18e11fc2bf047 | 343 | py | Python | pyhsms/core/connectionstate.py | cherish-web/pyhsms | 83a88b8b45bf1aba30cb7572f44a02478009052b | [
"MIT"
] | 2 | 2021-05-01T12:02:12.000Z | 2021-05-03T14:37:27.000Z | pyhsms/core/connectionstate.py | cherish-web/pyhsms | 83a88b8b45bf1aba30cb7572f44a02478009052b | [
"MIT"
] | null | null | null | pyhsms/core/connectionstate.py | cherish-web/pyhsms | 83a88b8b45bf1aba30cb7572f44a02478009052b | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
#@Time : 2020/7/29 09:49
#@Author : cherish_peng
#@Email : 1058386071@qq.com
#@File : connectionstate.py
#@Software : PyCharm
from enum import Enum | 21.4375 | 32 | 0.626822 |
91b96455218c552cfb88f8804f7f9440605930b5 | 84,787 | py | Python | lifelines/fitters/coxph_fitter.py | msanpe/lifelines | a73d441f6347332ca870bf2ec32eeeca410dc6de | [
"MIT"
] | null | null | null | lifelines/fitters/coxph_fitter.py | msanpe/lifelines | a73d441f6347332ca870bf2ec32eeeca410dc6de | [
"MIT"
] | null | null | null | lifelines/fitters/coxph_fitter.py | msanpe/lifelines | a73d441f6347332ca870bf2ec32eeeca410dc6de | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.any(E):
# sometimes strata have no deaths. This means nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate sums of Ties, if this is an event
diff_against.append((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to sum.
weighted_mean = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_mean += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def compute_residuals(self, training_dataframe, kind):
"""
Parameters
----------
training_dataframe : pandas DataFrame
the same training DataFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)
resids = getattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(25)
headers = []
headers.append(("duration col", "'%s'" % self.duration_col))
if self.event_col:
headers.append(("event col", "'%s'" % self.event_col))
if self.weights_col:
headers.append(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
headers.append(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
headers.append(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
headers.append(("robust variance", True))
if self.strata:
headers.append(("strata", self.strata))
headers.extend(
[
("number of observations", "{:g}".format(self.weights.sum())),
("number of events observed", "{:g}".format(self.weights[self.event_observed > 0].sum())),
("partial log-likelihood", "{:.{prec}f}".format(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_called),
]
)
p = Printer(headers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: DataFrame
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - mean(x_{train}))'\beta}`
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{mean}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, "Series not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X.reindex(order, axis="columns")
X = X.astype(float)
X = X.values
X = X.astype(float)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).astype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _get_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = pd.DataFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: DataFrame
the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_df = survival_df.rename(columns={"baseline cumulative hazard": "baseline survival"})
return survival_df
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(len(columns)))
log_hazards = self.params_.loc[columns].values.copy()
order = list(range(len(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.get_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the mean of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmap='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = len(covariates)
values = np.asarray(values)
if len(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_mean.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(len(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.append(ax)
return axes
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
| 40.374762 | 354 | 0.585868 |
91ba64e37706ae1e4223523b060a3928b5d8e678 | 393 | py | Python | nlp_server/config/test/test_config.py | asevans48/NLPServer | 6feb1d89748165f9efea40d0777d355044c48176 | [
"Apache-2.0"
] | null | null | null | nlp_server/config/test/test_config.py | asevans48/NLPServer | 6feb1d89748165f9efea40d0777d355044c48176 | [
"Apache-2.0"
] | null | null | null | nlp_server/config/test/test_config.py | asevans48/NLPServer | 6feb1d89748165f9efea40d0777d355044c48176 | [
"Apache-2.0"
] | null | null | null | """
Test configuration loading
@author aevans
"""
import os
from nlp_server.config import load_config
def test_load_config():
"""
Test loading a configuration
"""
current_dir = os.path.curdir
test_path = os.path.sep.join([current_dir, 'data', 'test_config.json'])
cfg = load_config.load_config(test_path)
assert cfg is not None
assert cfg.use_gpu is False
| 18.714286 | 75 | 0.699746 |
91bc729480a0e69ec82630c25580e01aa1aa5937 | 4,469 | py | Python | frappe/utils/safe_exec.py | ektai/frappe3 | 44aa948b4d5a0d729eacfb3dabdc9c8894ae1799 | [
"MIT"
] | null | null | null | frappe/utils/safe_exec.py | ektai/frappe3 | 44aa948b4d5a0d729eacfb3dabdc9c8894ae1799 | [
"MIT"
] | null | null | null | frappe/utils/safe_exec.py | ektai/frappe3 | 44aa948b4d5a0d729eacfb3dabdc9c8894ae1799 | [
"MIT"
] | null | null | null |
import os, json, inspect
import mimetypes
from html2text import html2text
from RestrictedPython import compile_restricted, safe_globals
import RestrictedPython.Guards
import frappe
import frappe.utils
import frappe.utils.data
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
from frappe.www.printview import get_visible_columns
import frappe.exceptions | 29.401316 | 118 | 0.762587 |