hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
430c92ccfdcf3dc35e86f6e05e4602bd002c581a | 6,982 | py | Python | appimagebuilder/orchestrator.py | AppImageCrafters/AppImageBuilder | dd041050e65f8eff28f878a092fd07bcf3ec5a4d | [
"MIT"
] | null | null | null | appimagebuilder/orchestrator.py | AppImageCrafters/AppImageBuilder | dd041050e65f8eff28f878a092fd07bcf3ec5a4d | [
"MIT"
] | 1 | 2019-11-12T03:52:01.000Z | 2019-11-12T03:52:01.000Z | appimagebuilder/orchestrator.py | AppImageCrafters/AppImageBuilder | dd041050e65f8eff28f878a092fd07bcf3ec5a4d | [
"MIT"
] | null | null | null | # Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import pathlib
from appimagebuilder.utils.finder import Finder
from appimagebuilder.context import AppInfo, Context, BundleInfo
from appimagebuilder.commands.apt_deploy import AptDeployCommand
from appimagebuilder.commands.create_appimage import CreateAppImageCommand
from appimagebuilder.commands.file_deploy import FileDeployCommand
from appimagebuilder.commands.pacman_deploy import PacmanDeployCommand
from appimagebuilder.commands.run_script import RunScriptCommand
from appimagebuilder.commands.run_test import RunTestCommand
from appimagebuilder.commands.setup_app_info import SetupAppInfoCommand
from appimagebuilder.commands.setup_runtime import SetupRuntimeCommand
from appimagebuilder.commands.setup_symlinks import SetupSymlinksCommand
from appimagebuilder.commands.deploy_record import (
WriteDeployRecordCommand,
)
from appimagebuilder.recipe.roamer import Roamer
| 37.138298 | 83 | 0.660556 |
430cfcded466d6e4b55330f3c637f5af1e8d4960 | 2,743 | py | Python | API_Collections/googlemap_geocode.py | Musketeer-Liu/Auto_Coding_Tools_Box | 96ffe3f194eb3571d290086400ef518cef4e0774 | [
"MIT"
] | null | null | null | API_Collections/googlemap_geocode.py | Musketeer-Liu/Auto_Coding_Tools_Box | 96ffe3f194eb3571d290086400ef518cef4e0774 | [
"MIT"
] | null | null | null | API_Collections/googlemap_geocode.py | Musketeer-Liu/Auto_Coding_Tools_Box | 96ffe3f194eb3571d290086400ef518cef4e0774 | [
"MIT"
] | null | null | null | # python3 --> Enter Python Shell
# from geocode import getGeocodeLocation
# getGeocodeLocation("Place you wanto to query")
import httplib2
import json
# san_francisco = getGeocodeLocation("San Francisco, CA")
# response header: {'content-type': 'application/json; charset=UTF-8', 'date': 'Sat, 27 Jan 2018 06:25:35 GMT', 'expires': 'Sun, 28 Jan 2018 06:25:35 GMT', 'cache-control': 'public, max-age=86400', 'vary': 'Accept-Language', 'access-control-allow-origin': '*', 'server': 'mafe', 'content-length': '1749', 'x-xss-protection': '1; mode=block', 'x-frame-options': 'SAMEORIGIN', 'alt-svc': 'hq=":443"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=":443"; ma=2592000; v="41,39,38,37,35"', 'status': '200', '-content-encoding': 'gzip', 'content-location': 'https://maps.googleapis.com/maps/api/geocode/json?address=San+Francisco,+CA&key=AIzaSyDZHGnbFkjZcOEgYPpDqlO2YhBHKsNxhnE'}
# san_francisco
# {'results': [{'address_components': [{'long_name': 'San Francisco', 'short_name': 'SF', 'types': ['locality', 'political']}, {'long_name': 'San Francisco County', 'short_name': 'San Francisco County', 'types': ['administrative_area_level_2', 'political']}, {'long_name': 'California', 'short_name': 'CA', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'United States', 'short_name': 'US', 'types': ['country', 'political']}], 'formatted_address': 'San Francisco, CA, USA', 'geometry': {'bounds': {'northeast': {'lat': 37.9298239, 'lng': -122.28178}, 'southwest': {'lat': 37.6398299, 'lng': -123.173825}}, 'location': {'lat': 37.7749295, 'lng': -122.4194155}, 'location_type': 'APPROXIMATE', 'viewport': {'northeast': {'lat': 37.812,'lng': -122.3482}, 'southwest': {'lat': 37.70339999999999, 'lng': -122.527}}}, 'place_id': 'ChIJIQBpAG2ahYAR_6128GcTUEo', 'types': ['locality', 'political']}], 'status': 'OK'}
# san_francisco.keys()
# dict_keys(['results', 'status'])
# san_francisco['results'][0]['geometry']['location']['lat']
# 37.7749295
# san_francisco['results'][0]['geometry']['location']['lng']
# -122.4194155
| 66.902439 | 932 | 0.679913 |
430da45d8833848dec38a5b05491d18df5c37b6a | 1,717 | py | Python | backend/core/actions/actionGenerator.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | null | null | null | backend/core/actions/actionGenerator.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | 50 | 2021-03-28T03:06:19.000Z | 2021-10-18T12:36:16.000Z | backend/core/actions/actionGenerator.py | makakken/roseguarden | 9a867f3d5e979b990bf474dcba81e5e9d0814c6a | [
"MIT"
] | 1 | 2021-07-30T07:12:46.000Z | 2021-07-30T07:12:46.000Z | """
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
| 27.253968 | 78 | 0.670355 |
430dcb1829dec99ac70255fe07e1c633f6a84f85 | 5,877 | py | Python | lib/csv/csv.py | arnscott/gcounter | ffb6628f1b1f0e6c70168ff738fd51fa08e0df18 | [
"MIT"
] | null | null | null | lib/csv/csv.py | arnscott/gcounter | ffb6628f1b1f0e6c70168ff738fd51fa08e0df18 | [
"MIT"
] | 1 | 2018-11-30T14:09:40.000Z | 2018-12-03T12:41:01.000Z | lib/csv/csv.py | arnscott/gcounter | ffb6628f1b1f0e6c70168ff738fd51fa08e0df18 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2018 Aaron Michael Scott
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import csv
import os
def reader(file_path='', delimiter=','):
"""Returns a CSVReader object
"""
if os.path.isfile(file_path):
if os.access(file_path, os.R_OK):
return CSVReader(file_path, delimiter=delimiter)
else:
raise Exception('{fname} exists but is not readable.'.format(fname=file_path))
else:
raise Exception('{fname} does not exist'.format(fname=file_path))
def writer(file_path='', headers=[]):
"""Returns a CSVWriter object
"""
if not os.path.isfile(file_path):
if isinstance(headers, list):
return CSVWriter(file_path=file_path, headers=headers)
else:
raise Exception('Headers need to be in a list object.')
else:
raise Exception('{fname} is already a file. Please write to a new location.'.format(fname=file_path))
def the_date():
return datetime.date.today().strftime('%m_%d_%Y')
| 34.775148 | 109 | 0.612047 |
430e631a7ab886f89000f1e0dc6f369df4ae43f7 | 1,056 | py | Python | Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py | karant17/Test | e44bf79f597d53de2b891372ffccf7f13c74ede3 | [
"MIT"
] | 7 | 2017-02-16T15:25:47.000Z | 2021-11-08T13:10:15.000Z | Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py | karant17/Test | e44bf79f597d53de2b891372ffccf7f13c74ede3 | [
"MIT"
] | null | null | null | Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py | karant17/Test | e44bf79f597d53de2b891372ffccf7f13c74ede3 | [
"MIT"
] | 7 | 2017-03-06T08:47:27.000Z | 2021-12-11T12:42:43.000Z | from PyQt4.QtGui import QImage, QPainter
from PyQt4.QtCore import QSize
# configure the output image
width = 800
height = 600
dpi = 92
img = QImage(QSize(width, height), QImage.Format_RGB32)
img.setDotsPerMeterX(dpi / 25.4 * 1000)
img.setDotsPerMeterY(dpi / 25.4 * 1000)
# get the map layers and extent
layers = [ layer.id() for layer in iface.legendInterface().layers() ]
extent = iface.mapCanvas().extent()
# configure map settings for export
mapSettings = QgsMapSettings()
mapSettings.setMapUnits(0)
mapSettings.setExtent(extent)
mapSettings.setOutputDpi(dpi)
mapSettings.setOutputSize(QSize(width, height))
mapSettings.setLayers(layers)
mapSettings.setFlags(QgsMapSettings.Antialiasing | QgsMapSettings.UseAdvancedEffects | QgsMapSettings.ForceVectorOutput | QgsMapSettings.DrawLabeling)
# configure and run painter
p = QPainter()
p.begin(img)
mapRenderer = QgsMapRendererCustomPainterJob(mapSettings, p)
mapRenderer.start()
mapRenderer.waitForFinished()
p.end()
# save the result
img.save("C:/temp/custom_export.png","png") | 36.413793 | 151 | 0.773674 |
430f1a041d4b52037c87f1c1a590ae76e5b36f2e | 13,604 | py | Python | tools/generate_cropped_dataset.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | 3 | 2022-02-10T17:35:41.000Z | 2022-03-04T10:38:58.000Z | tools/generate_cropped_dataset.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | 3 | 2022-02-02T09:12:18.000Z | 2022-02-16T13:42:30.000Z | tools/generate_cropped_dataset.py | DIVA-DIA/DIVA-DAF | 0ae3b873d04f1852d9053cb4cb2fbc7bda73471c | [
"MIT"
] | null | null | null | """
Load a dataset of historic documents by specifying the folder where its located.
"""
import argparse
# Utils
import itertools
import logging
import math
from datetime import datetime
from pathlib import Path
from torchvision.datasets.folder import has_file_allowed_extension, pil_loader
from torchvision.transforms import functional as F
from tqdm import tqdm
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.gif')
JPG_EXTENSIONS = ('.jpg', '.jpeg')
def get_img_paths_uncropped(directory):
"""
Parameters
----------
directory: string
parent directory with images inside
Returns
-------
paths: list of paths
"""
paths = []
directory = Path(directory).expanduser()
if not directory.is_dir():
logging.error(f'Directory not found ({directory})')
for subdir in sorted(directory.iterdir()):
if not subdir.is_dir():
continue
for img_name in sorted(subdir.iterdir()):
if has_file_allowed_extension(str(img_name), IMG_EXTENSIONS):
paths.append((subdir / img_name, str(subdir.stem)))
return paths
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
help='Path to the root folder of the dataset (contains train/val/test)',
type=Path,
required=True)
parser.add_argument('-o', '--output_path',
help='Path to the output folder',
type=Path,
required=True)
parser.add_argument('-tr', '--crop_size_train',
help='Size of the crops in the training set',
type=int,
required=True)
parser.add_argument('-v', '--crop_size_val',
help='Size of the crops in the validation set',
type=int,
required=True)
parser.add_argument('-te', '--crop_size_test',
help='Size of the crops in the test set',
type=int,
required=True)
parser.add_argument('-ov', '--overlap',
help='Overlap of the different crops (between 0-1)',
type=float,
default=0.5)
parser.add_argument('-l', '--leading_zeros_length',
help='amount of leading zeros to encode the coordinates',
type=int,
default=4)
parser.add_argument('-oe', '--override_existing',
help='If true overrides the images ',
type=bool,
default=False)
args = parser.parse_args()
dataset_generator = CroppedDatasetGenerator(**args.__dict__)
dataset_generator.write_crops()
# example call arguments
# -i
# /Users/voegtlil/Documents/04_Datasets/003-DataSet/CB55-10-segmentation
# -o
# /Users/voegtlil/Desktop/fun
# -tr
# 300
# -v
# 300
# -te
# 256
# example call arguments
# -i
# /dataset/DIVA-HisDB/segmentation/CB55
# -o
# /net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/temp-CB55
# -tr
# 300
# -v
# 300
# -te
# 256
# dataset_generator = CroppedDatasetGenerator(
# input_path=Path('/dataset/DIVA-HisDB/segmentation/CB55'),
# output_path=Path('/net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/CB55'),
# crop_size_train=300,
# crop_size_val=300,
# crop_size_test=256,
# overlap=0.5,
# leading_zeros_length=4,
# override_existing=False)
# dataset_generator.write_crops()
| 39.777778 | 120 | 0.585049 |
430f3dd58c283b4aea777f240b325f4a7f3a3026 | 332 | py | Python | run.py | seanzhangJM/torch_model_demo | 3ab3e841e77cf780198516c1910c906acdd3082d | [
"MIT"
] | null | null | null | run.py | seanzhangJM/torch_model_demo | 3ab3e841e77cf780198516c1910c906acdd3082d | [
"MIT"
] | null | null | null | run.py | seanzhangJM/torch_model_demo | 3ab3e841e77cf780198516c1910c906acdd3082d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/12/27 14:04
# @Author : zhangjianming
# @Email : YYDSPanda@163.com
# @File : run_task.py
# @Software: PyCharm
import sys
sys.path.extend(["."])
from torch_model_demo.task.run_task import train_fashion_demo
if __name__ == '__main__':
train_fashion_demo()
| 19.529412 | 61 | 0.683735 |
430f4b6111489ee13ac7ae5b12340f3777b684e0 | 11,275 | py | Python | practice/4_tracking/tracker.py | OrangeRedeng/CV-SUMMER-CAMP-2021 | 74a65d0b21e4876e1fc1c3d931af76193f36e617 | [
"Apache-2.0"
] | 13 | 2021-07-05T08:44:33.000Z | 2021-10-13T09:57:58.000Z | practice/4_tracking/tracker.py | OrangeRedeng/CV-SUMMER-CAMP-2021 | 74a65d0b21e4876e1fc1c3d931af76193f36e617 | [
"Apache-2.0"
] | 117 | 2021-07-06T11:21:50.000Z | 2021-10-06T15:48:50.000Z | practice/4_tracking/tracker.py | OrangeRedeng/CV-SUMMER-CAMP-2021 | 74a65d0b21e4876e1fc1c3d931af76193f36e617 | [
"Apache-2.0"
] | 43 | 2021-04-26T07:45:14.000Z | 2021-11-06T11:19:05.000Z | import numpy as np
import math
import logging as log
import sys
from tqdm import tqdm
from common.feature_distance import calc_features_similarity
from common.common_objects import DetectedObject, validate_detected_object, Bbox
from common.common_objects import get_bbox_center, get_dist, calc_bbox_area
from common.find_best_assignment import solve_assignment_problem
from common.annotation import AnnotationObject, AnnotationStorage
def convert_tracks_to_annotation_storage(tracks):
ann_objects_by_frame_index = {}
for cur_track in tqdm(tracks, desc="Converting"):
track_id = cur_track.get_id()
first_frame_index = cur_track.objects[0].frame_index
last_frame_index = cur_track.objects[-1].frame_index
for frame_index in range(first_frame_index, last_frame_index+1):
bbox = cur_track.get_bbox_for_frame(frame_index)
tl_x = math.floor(bbox.tl_x)
tl_y = math.floor(bbox.tl_y)
br_x = math.ceil(bbox.br_x)
br_y = math.ceil(bbox.br_y)
detect_obj = DetectedObject(frame_index=frame_index,
bbox=Bbox(tl_x, tl_y, br_x, br_y),
appearance_feature=[])
ann_obj = AnnotationObject(detect_obj=detect_obj,
track_id=track_id)
if frame_index not in ann_objects_by_frame_index:
ann_objects_by_frame_index[frame_index] = {}
ann_objects_by_frame_index[frame_index][track_id] = ann_obj
annotation_objects = []
for frame_index in sorted(ann_objects_by_frame_index.keys()):
cur_ann_objects = ann_objects_by_frame_index[frame_index]
for track_id in sorted(cur_ann_objects.keys()):
annotation_objects.append(cur_ann_objects[track_id])
annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)
return annotation_storage
| 41.300366 | 123 | 0.655787 |
431041bc3b78a2b35eead16f02c1cdb50d1dd82f | 16,308 | py | Python | gm2m/managers.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | null | null | null | gm2m/managers.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | null | null | null | gm2m/managers.py | mikewolfd/django-gm2m | a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771 | [
"MIT"
] | null | null | null | from django.db import router
from django.db.models import Q, Manager
from django.db import connections
from .contenttypes import ct, get_content_type
from .query import GM2MTgtQuerySet
def create_gm2m_related_manager(superclass=None, **kwargs):
"""
Dynamically create a manager class that only concerns an instance (source
or target)
"""
bases = [GM2MBaseManager]
if superclass is None:
# no superclass provided, the manager is a generic target model manager
bases.insert(0, GM2MBaseTgtManager)
else:
# superclass provided, the manager is a source model manager and also
# derives from superclass
bases.insert(0, GM2MBaseSrcManager)
bases.append(superclass)
# Django's Manager constructor sets model to None, we store it under the
# class's attribute '_model' and it is retrieved in __init__
kwargs['_model'] = kwargs.pop('model')
return type(Manager)('GM2MManager', tuple(bases), kwargs)
| 35.841758 | 81 | 0.531886 |
43109599c3c8fc1c990f73e67e01c2d6cb021aa0 | 6,802 | py | Python | rastreador-de-bolso/TwitterListener.py | vitorduarte/RastreadorDeBolso | 5c3bab222fced6f0d7367299b5007a628a408b4f | [
"MIT"
] | 1 | 2020-10-15T21:36:06.000Z | 2020-10-15T21:36:06.000Z | rastreador-de-bolso/TwitterListener.py | vitorduarte/RastreadorDeBolso | 5c3bab222fced6f0d7367299b5007a628a408b4f | [
"MIT"
] | 3 | 2021-06-08T21:38:20.000Z | 2022-01-13T02:46:26.000Z | rastreador-de-bolso/TwitterListener.py | BambataTech/rastreador-de-bolso | 5c3bab222fced6f0d7367299b5007a628a408b4f | [
"MIT"
] | null | null | null | from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import logging
import coloredlogs
import os
import pathlib
import time
import twitter as tt
from utils import retry
from fetch_likes import get_user_likes, login
from conf.settings import USER_ID, USERNAME, PASSWORD
CURR_PATH = pathlib.Path(__file__).parent.absolute()
TWEETS_FOLDER = os.path.join(CURR_PATH, 'screenshots')
LIKED_FOLDER = os.path.join(CURR_PATH, 'screenshots', 'liked')
| 36.569892 | 92 | 0.58718 |
43118cb0eb019b0c97db7741f34ce6ca041f8dc1 | 296 | py | Python | ASR_TransV1/Load_sp_model.py | HariKrishna-Vydana/ASR_Transformer | a37dc7f1add148b14ca1d265d72fc4e9d9dd0fc0 | [
"MIT"
] | 1 | 2020-10-25T00:21:40.000Z | 2020-10-25T00:21:40.000Z | ASR_TransV1/Load_sp_model.py | HariKrishna-Vydana/ASR_Transformer | a37dc7f1add148b14ca1d265d72fc4e9d9dd0fc0 | [
"MIT"
] | null | null | null | ASR_TransV1/Load_sp_model.py | HariKrishna-Vydana/ASR_Transformer | a37dc7f1add148b14ca1d265d72fc4e9d9dd0fc0 | [
"MIT"
] | 1 | 2021-09-08T10:32:55.000Z | 2021-09-08T10:32:55.000Z | #!/usr/bin/python
import sys
import os
from os.path import join, isdir
import sentencepiece as spm
#--------------------------
#--------------------------
| 19.733333 | 49 | 0.581081 |
4312500ffaaa31023ff14a2c64c200a842122fb2 | 2,213 | py | Python | fiepipedesktoplib/gitlabserver/shell/manager.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | fiepipedesktoplib/gitlabserver/shell/manager.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | fiepipedesktoplib/gitlabserver/shell/manager.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | import typing
from fiepipelib.gitlabserver.data.gitlab_server import GitLabServer
from fiepipelib.gitlabserver.routines.manager import GitLabServerManagerInteractiveRoutines
from fiepipedesktoplib.gitlabserver.shell.gitlab_hostname_input_ui import GitLabHostnameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_username_input_ui import GitLabUsernameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_private_token_input_ui import GitLabPrivateTokenInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlabserver import GitLabServerShell
from fiepipedesktoplib.gitlabserver.shell.server_name_var_command import GitLabServerNameVar
from fiepipedesktoplib.locallymanagedtypes.shells.AbstractLocalManagedTypeCommand import LocalManagedTypeCommand
from fiepipedesktoplib.shells.AbstractShell import AbstractShell
from fiepipedesktoplib.shells.variables.fqdn_var_command import FQDNVarCommand
if __name__ == '__main__':
main()
| 49.177778 | 129 | 0.770899 |
4312e79aaad5f7fe2f84f838da0893835b628082 | 470 | py | Python | fairseq/models/wav2vec/eteh_model/transformer/repeat.py | gaochangfeng/fairseq | 70a468230b8fb558caa394322b02fface663e17a | [
"MIT"
] | null | null | null | fairseq/models/wav2vec/eteh_model/transformer/repeat.py | gaochangfeng/fairseq | 70a468230b8fb558caa394322b02fface663e17a | [
"MIT"
] | null | null | null | fairseq/models/wav2vec/eteh_model/transformer/repeat.py | gaochangfeng/fairseq | 70a468230b8fb558caa394322b02fface663e17a | [
"MIT"
] | null | null | null | import torch
def repeat(N, fn):
"""repeat module N times
:param int N: repeat time
:param function fn: function to generate module
:return: repeated modules
:rtype: MultiSequential
"""
return MultiSequential(*[fn(n) for n in range(N)])
| 21.363636 | 54 | 0.634043 |
4313dd60cdb94904d246c40eddbdc84286d54a32 | 857 | py | Python | torch_lib/Nets.py | troncosoae/jetson-exp | 0c1a46b969b95bb9c350f78955ae6ca7f41b43b5 | [
"MIT"
] | null | null | null | torch_lib/Nets.py | troncosoae/jetson-exp | 0c1a46b969b95bb9c350f78955ae6ca7f41b43b5 | [
"MIT"
] | null | null | null | torch_lib/Nets.py | troncosoae/jetson-exp | 0c1a46b969b95bb9c350f78955ae6ca7f41b43b5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
| 31.740741 | 70 | 0.584597 |
4313de468396c7f2ca9e8be49eccd525b21cb61b | 309 | py | Python | test123.py | umousesonic/zinc | 9e170269d3b209a80ac79d5850894ddc1d95c62f | [
"BSD-3-Clause"
] | null | null | null | test123.py | umousesonic/zinc | 9e170269d3b209a80ac79d5850894ddc1d95c62f | [
"BSD-3-Clause"
] | null | null | null | test123.py | umousesonic/zinc | 9e170269d3b209a80ac79d5850894ddc1d95c62f | [
"BSD-3-Clause"
] | null | null | null | from runner import runner
if __name__ == '__main__':
r = runner()
p = 'public class main{public static void main (String[] args){' \
'public String StudentAnswer(String myInput){' \
'return "myOutput"; ' \
'}System.out.println("hello world!");}}'
print (r.sendCode(p, '')) | 34.333333 | 70 | 0.601942 |
4315cbe9d3768c563f263560ae3ec49245d0ab6e | 8,101 | py | Python | beancount_bot/bot.py | dumbPy/beancount_bot | 388a17f165c22b30e7f6377161eb5bf63578168a | [
"MIT"
] | null | null | null | beancount_bot/bot.py | dumbPy/beancount_bot | 388a17f165c22b30e7f6377161eb5bf63578168a | [
"MIT"
] | null | null | null | beancount_bot/bot.py | dumbPy/beancount_bot | 388a17f165c22b30e7f6377161eb5bf63578168a | [
"MIT"
] | null | null | null | import traceback
import telebot
from telebot import apihelper
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, MessageEntity, Message, CallbackQuery
from beancount_bot import transaction
from beancount_bot.config import get_config, load_config
from beancount_bot.dispatcher import Dispatcher
from beancount_bot.i18n import _
from beancount_bot.session import get_session, SESS_AUTH, get_session_for, set_session
from beancount_bot.task import load_task, get_task
from beancount_bot.transaction import get_manager
from beancount_bot.util import logger
apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(token=None, parse_mode=None)
#######
# Authentication #
#######
def check_auth() -> bool:
"""
Check if you log in
:return:
"""
return SESS_AUTH in bot.session and bot.session[SESS_AUTH]
def auth_token_handler(message: Message):
"""
Login token callback
:param message:
:return:
"""
if check_auth():
return
# Unconfirmation is considered an authentication token
auth_token = get_config('bot.auth_token')
if auth_token == message.text:
set_session(message.from_user.id, SESS_AUTH, True)
bot.reply_to(message, _("Authentic success"))
else:
bot.reply_to(message, _("Authentication token error"))
#######
# instruction #
#######
def show_usage_for(message: Message, d: Dispatcher):
"""
Show the method of use of a specific processor
:param message:
:param d:
:return:
"""
usage = _("help{name}\n\n{usage}").format(name=d.get_name(), usage=d.get_usage())
bot.reply_to(message, usage)
#######
# trade #
#######
def serving():
"""
start up Bot
:return:
"""
# set up Token
token = get_config('bot.token')
bot.token = token
# Set a proxy
proxy = get_config('bot.proxy')
if proxy is not None:
apihelper.proxy = {'https': proxy}
# start up
bot.infinity_polling()
| 30.340824 | 178 | 0.641155 |
4317b20c71fc0c90d2e65c623d90563c13f6fda9 | 8,933 | py | Python | test/unit/metrics/test_group_sklearn_wrappers.py | GeGao2014/fairlearn | b0841c8b07ead6a285bdbc0ea61cac2338cbc96e | [
"MIT"
] | 2 | 2019-11-30T09:02:42.000Z | 2019-12-02T10:24:29.000Z | test/unit/metrics/test_group_sklearn_wrappers.py | GeGao2014/fairlearn | b0841c8b07ead6a285bdbc0ea61cac2338cbc96e | [
"MIT"
] | null | null | null | test/unit/metrics/test_group_sklearn_wrappers.py | GeGao2014/fairlearn | b0841c8b07ead6a285bdbc0ea61cac2338cbc96e | [
"MIT"
] | 1 | 2020-03-24T14:42:04.000Z | 2020-03-24T14:42:04.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
# ======================================================================================
# ======================================================================================
# ======================================================================================
# ======================================================================================
# =============================================================================================
# =============================================================================================
# =============================================================================================
| 39.179825 | 98 | 0.636069 |
4317fc5d9fcdfa4c3f22eb8a8bb944e1c61c7e2a | 12,833 | py | Python | deeplearning/tf_util.py | cbschaff/nlimb | f0564b00bab1b3367aaa88163e49bebc88f349bb | [
"MIT"
] | 12 | 2018-10-26T19:33:05.000Z | 2022-01-17T11:47:59.000Z | deeplearning/tf_util.py | cbschaff/nlimb | f0564b00bab1b3367aaa88163e49bebc88f349bb | [
"MIT"
] | 9 | 2020-01-28T22:30:55.000Z | 2022-03-11T23:32:04.000Z | deeplearning/tf_util.py | cbschaff/nlimb | f0564b00bab1b3367aaa88163e49bebc88f349bb | [
"MIT"
] | 3 | 2019-07-09T14:56:01.000Z | 2019-11-18T06:58:41.000Z | """
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
return _ortho_init
def normc_initializer(std=1.0):
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| 34.590296 | 116 | 0.580924 |
431830fa7f1548920feb149a4d5dc17216d7a063 | 1,695 | py | Python | Util/constant.py | RoboCupULaval/StrategyAI | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 13 | 2018-03-14T10:20:10.000Z | 2021-12-10T05:36:47.000Z | Util/constant.py | RoboCupULaval/StrategyIA | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 200 | 2016-04-29T23:13:01.000Z | 2018-03-13T14:36:39.000Z | Util/constant.py | RoboCupULaval/StrategyIA | ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19 | [
"MIT"
] | 45 | 2015-07-04T18:57:39.000Z | 2018-01-11T16:11:13.000Z | # Under MIT License, see LICENSE.txt
""" Module dfinissant des constantes de programmations python pour l'IA """
from enum import Enum
ROBOT_RADIUS = 90
ROBOT_DIAMETER = ROBOT_RADIUS * 2
ROBOT_CENTER_TO_KICKER = 60
BALL_RADIUS = 21
MAX_PLAYER_ON_FIELD_PER_TEAM = 6
BALL_OUTSIDE_FIELD_BUFFER = 200
# Radius and angles for tactics
DISTANCE_BEHIND = ROBOT_RADIUS + 30 # in millimeters
ANGLE_TO_GRAB_BALL = 1 # in radians; must be large in case ball moves fast
RADIUS_TO_GRAB_BALL = ROBOT_RADIUS + 30
ANGLE_TO_HALT = 0.05 # 3 degrees
RADIUS_TO_HALT = ROBOT_RADIUS + BALL_RADIUS
REASONABLE_OFFSET = 50 # To take into account the camera precision and other things
# Rules
KEEPOUT_DISTANCE_FROM_BALL = 500 + ROBOT_RADIUS + REASONABLE_OFFSET
KEEPOUT_DISTANCE_FROM_GOAL = ROBOT_RADIUS + REASONABLE_OFFSET
PADDING_DEFENSE_AREA = 100
# Rule 5.2: Minimum movement before a ball is "in play"
IN_PLAY_MIN_DISTANCE = 50
# Rule 8.2.1: Distance from the opposing team defending zone
INDIRECT_KICK_OFFSET = 200
# Deadzones
POSITION_DEADZONE = ROBOT_RADIUS * 0.1
# Orientation abs_tol
ORIENTATION_ABSOLUTE_TOLERANCE = 0.01 # 0.5 degree
# TeamColor
| 22.6 | 84 | 0.728614 |
4318e19519ef3b4ec8fbfd551e4ad75ec635df69 | 9,102 | py | Python | src/transbigdata/CoordinatesConverter.py | cirno1w/transport | f088b4111992dd5ec6371db71cf1d26689cf8c26 | [
"BSD-3-Clause"
] | 1 | 2022-03-06T00:15:19.000Z | 2022-03-06T00:15:19.000Z | src/transbigdata/CoordinatesConverter.py | anitagraser/transbigdata | 0eb972c78f9154c0a3f780f197ef9af406b2bb71 | [
"BSD-3-Clause"
] | null | null | null | src/transbigdata/CoordinatesConverter.py | anitagraser/transbigdata | 0eb972c78f9154c0a3f780f197ef9af406b2bb71 | [
"BSD-3-Clause"
] | null | null | null |
import numpy as np
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def gcj02tobd09(lng, lat):
"""
Convert coordinates from GCJ02 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
z = np.sqrt(lng * lng + lat * lat) + 0.00002 * np.sin(lat * x_pi)
theta = np.arctan2(lat, lng) + 0.000003 * np.cos(lng * x_pi)
bd_lng = z * np.cos(theta) + 0.0065
bd_lat = z * np.sin(theta) + 0.006
return bd_lng, bd_lat
def bd09togcj02(bd_lon, bd_lat):
"""
Convert coordinates from BD09 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
bd_lon = bd_lon.astype(float)
bd_lat = bd_lat.astype(float)
except:
bd_lon = float(bd_lon)
bd_lat = float(bd_lat)
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = np.sqrt(x * x + y * y) - 0.00002 * np.sin(y * x_pi)
theta = np.arctan2(y, x) - 0.000003 * np.cos(x * x_pi)
gg_lng = z * np.cos(theta)
gg_lat = z * np.sin(theta)
return gg_lng, gg_lat
def wgs84togcj02(lng, lat):
"""
Convert coordinates from WGS84 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return mglng, mglat
def gcj02towgs84(lng, lat):
"""
Convert coordinates from GCJ02 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return lng * 2 - mglng, lat * 2 - mglat
def wgs84tobd09(lon,lat):
"""
Convert coordinates from WGS84 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = wgs84togcj02(lon,lat)
lon,lat = gcj02tobd09(lon,lat)
return lon,lat
def bd09towgs84(lon,lat):
"""
Convert coordinates from BD09 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = bd09togcj02(lon,lat)
lon,lat = gcj02towgs84(lon,lat)
return lon,lat
def bd09mctobd09(x,y):
"""
Convert coordinates from BD09MC to BD09
Parameters
-------
x : Series or number
x coordinates
y : Series or number
y coordinates
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
MCBAND = [12890594.86, 8362377.87, 5591021, 3481989.83, 1678043.12, 0]
MC2LL = [
[1.410526172116255e-8, 0.00000898305509648872, -1.9939833816331, 200.9824383106796, -187.2403703815547, 91.6087516669843, -23.38765649603339, 2.57121317296198, -0.03801003308653, 17337981.2],
[-7.435856389565537e-9, 0.000008983055097726239, -0.78625201886289, 96.32687599759846, -1.85204757529826, -59.36935905485877, 47.40033549296737, -16.50741931063887, 2.28786674699375, 10260144.86],
[-3.030883460898826e-8, 0.00000898305509983578, 0.30071316287616, 59.74293618442277, 7.357984074871, -25.38371002664745, 13.45380521110908, -3.29883767235584, 0.32710905363475, 6856817.37],
[-1.981981304930552e-8, 0.000008983055099779535, 0.03278182852591, 40.31678527705744, 0.65659298677277, -4.44255534477492, 0.85341911805263, 0.12923347998204, -0.04625736007561, 4482777.06],
[3.09191371068437e-9, 0.000008983055096812155, 0.00006995724062, 23.10934304144901, -0.00023663490511, -0.6321817810242, -0.00663494467273, 0.03430082397953, -0.00466043876332, 2555164.4],
[2.890871144776878e-9, 0.000008983055095805407, -3.068298e-8, 7.47137025468032, -0.00000353937994, -0.02145144861037, -0.00001234426596, 0.00010322952773, -0.00000323890364, 826088.5]
]
y1 = y.iloc[0]
for cD in range(len(MCBAND)):
if y1 >= MCBAND[cD]:
cE = MC2LL[cD]
break
cD = cE
T = cD[0] + cD[1] * np.abs(x);
cB = np.abs(y) / cD[9]
cE = cD[2] + cD[3] * cB + cD[4] * cB * cB +\
cD[5] * cB * cB * cB + cD[6] * cB * cB * cB * cB +\
cD[7] * cB * cB * cB * cB * cB +\
cD[8] * cB * cB * cB * cB * cB * cB
return T,cE
def getdistance(lon1, lat1, lon2, lat2):
'''
Input the origin/destination location in the sequence of [lon1, lat1, lon2, lat2] (in decimal) from DataFrame. The output is the distance (m).
Parameters
-------
lon1 : Series or number
Start longitude
lat1 : Series or number
Start latitude
lon2 : Series or number
End longitude
lat2 : Series or number
End latitude
return
-------
distance : Series or number
The distance
'''
try:
lon1 = lon1.astype(float)
lat1 = lat1.astype(float)
lon2 = lon2.astype(float)
lat2 = lat2.astype(float)
except:
lon1 = float(lon1)
lat1 = float(lat1)
lon2 = float(lon2)
lat2 = float(lat2)
lon1, lat1, lon2, lat2 = map(lambda r:r*pi/180, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(a**0.5)
r = 6371 #
return c * r * 1000
def transform_shape(gdf,method):
'''
Convert coordinates of all data. The input is the geographic elements DataFrame.
Parameters
-------
gdf : GeoDataFrame
Geographic elements
method : function
The coordinate converting function
return
-------
gdf : GeoDataFrame
The result of converting
'''
from shapely.ops import transform
gdf1 = gdf.copy()
gdf1['geometry'] = gdf1['geometry'].apply(lambda r:transform(method, r))
return gdf1 | 28.267081 | 202 | 0.568556 |
431a5970e46f202baf11c026a61fe4077fcce70d | 8,343 | py | Python | cloudify_rest_client/exceptions.py | aleixsanchis/cloudify-rest-client | 6acaadee8286ab647465824d3c8e13d4c43ca9ba | [
"Apache-2.0"
] | null | null | null | cloudify_rest_client/exceptions.py | aleixsanchis/cloudify-rest-client | 6acaadee8286ab647465824d3c8e13d4c43ca9ba | [
"Apache-2.0"
] | null | null | null | cloudify_rest_client/exceptions.py | aleixsanchis/cloudify-rest-client | 6acaadee8286ab647465824d3c8e13d4c43ca9ba | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
ERROR_MAPPING = dict([
(error.ERROR_CODE, error)
for error in [
DeploymentEnvironmentCreationInProgressError,
DeploymentEnvironmentCreationPendingError,
IllegalExecutionParametersError,
NoSuchIncludeFieldError,
MissingRequiredDeploymentInputError,
UnknownDeploymentInputError,
UnknownDeploymentSecretError,
UnsupportedDeploymentGetSecretError,
FunctionsEvaluationError,
UnknownModificationStageError,
ExistingStartedDeploymentModificationError,
DeploymentModificationAlreadyEndedError,
UserUnauthorizedError,
ForbiddenError,
MaintenanceModeActiveError,
MaintenanceModeActivatingError,
NotModifiedError,
InvalidExecutionUpdateStatus,
PluginInUseError,
PluginInstallationError,
PluginInstallationTimeout,
NotClusterMaster,
RemovedFromCluster,
DeploymentPluginNotFound]])
| 31.364662 | 79 | 0.737744 |
431a5a90835e15a36f13f4092d02d4382895d659 | 1,570 | py | Python | sample-demo-lambda-app/lambda_function.py | sriharshams-aws/aws-codeguru-profiler-python-demo-application | 36e63bc6364871e6a7b29437c1fb68243d2c54f4 | [
"Apache-2.0"
] | 6 | 2020-12-04T00:08:02.000Z | 2021-06-12T05:23:25.000Z | sample-demo-lambda-app/lambda_function.py | sriharshams-aws/aws-codeguru-profiler-python-demo-application | 36e63bc6364871e6a7b29437c1fb68243d2c54f4 | [
"Apache-2.0"
] | 6 | 2020-12-09T11:40:01.000Z | 2021-09-23T09:03:18.000Z | sample-demo-lambda-app/lambda_function.py | sriharshams-aws/aws-codeguru-profiler-python-demo-application | 36e63bc6364871e6a7b29437c1fb68243d2c54f4 | [
"Apache-2.0"
] | 21 | 2020-12-09T01:35:48.000Z | 2022-01-28T09:18:55.000Z | import boto3
import logging
import os
from random import randrange
from urllib.request import urlopen
# It is not recommended to enable DEBUG logs in production,
# this is just to show an example of a recommendation
# by Amazon CodeGuru Profiler.
logging.getLogger('botocore').setLevel(logging.DEBUG)
SITE = 'http://www.python.org/'
CW_NAMESPACE = 'ProfilerPythonDemo'
S3_BUCKET = os.environ['S3_BUCKET']
| 27.54386 | 81 | 0.659236 |
431a7feaee1aa406c7c2670e03999a74240a7466 | 475 | py | Python | api/error_handler.py | chuo06/palindrome | 57660301390d7b2b05780e1f6ab0343e43726619 | [
"MIT"
] | null | null | null | api/error_handler.py | chuo06/palindrome | 57660301390d7b2b05780e1f6ab0343e43726619 | [
"MIT"
] | 1 | 2015-10-22T16:56:55.000Z | 2015-10-22T16:56:55.000Z | api/error_handler.py | chuo06/palindrome | 57660301390d7b2b05780e1f6ab0343e43726619 | [
"MIT"
] | null | null | null | from functools import wraps
from werkzeug.exceptions import HTTPException
from api.exceptions import MessageNotFound
| 25 | 45 | 0.650526 |
431a878ee70ba62b9e15ce81300906f432dc9b82 | 406 | py | Python | src/nile/core/run.py | kootsZhin/nile | 5b685158c06418a126229cfbcaeaaf78a38cd8a0 | [
"MIT"
] | 121 | 2021-10-30T08:42:44.000Z | 2022-03-31T13:17:58.000Z | src/nile/core/run.py | kootsZhin/nile | 5b685158c06418a126229cfbcaeaaf78a38cd8a0 | [
"MIT"
] | 56 | 2021-10-31T16:45:06.000Z | 2022-03-31T04:41:08.000Z | src/nile/core/run.py | kootsZhin/nile | 5b685158c06418a126229cfbcaeaaf78a38cd8a0 | [
"MIT"
] | 22 | 2021-11-18T11:24:56.000Z | 2022-03-30T08:15:18.000Z | """Command to run Nile scripts."""
import logging
from importlib.machinery import SourceFileLoader
from nile.nre import NileRuntimeEnvironment
def run(path, network):
"""Run nile scripts passing on the NRE object."""
logger = logging.getLogger()
logger.disabled = True
script = SourceFileLoader("script", path).load_module()
nre = NileRuntimeEnvironment(network)
script.run(nre)
| 27.066667 | 59 | 0.73399 |
431ad1cf3cfa9d05b69ae287dc97e25b7fff4c83 | 548 | py | Python | Python/Basic Data Types/Lists/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 219 | 2018-06-17T19:47:22.000Z | 2022-03-27T15:28:56.000Z | Python/Basic Data Types/Lists/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 2 | 2020-08-12T16:47:41.000Z | 2020-12-15T17:05:57.000Z | Python/Basic Data Types/Lists/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 182 | 2018-12-12T21:36:50.000Z | 2022-03-26T17:49:51.000Z | array = []
for _ in range(int(input())):
command = input().strip().split(" ")
cmd_type = command[0]
if (cmd_type == "print"):
print(array)
elif (cmd_type == "sort"):
array.sort()
elif (cmd_type == "reverse"):
array.reverse()
elif (cmd_type == "pop"):
array.pop()
elif (cmd_type == "remove"):
array.remove(int(command[1]))
elif (cmd_type == "append"):
array.append(int(command[1]))
elif (cmd_type == "insert"):
array.insert(int(command[1]), int(command[2])) | 30.444444 | 54 | 0.541971 |
431afd38b43ccf5ad48d645a4d0327a638eb0852 | 441 | py | Python | dbestclient/ml/density.py | horeapinca/DBEstClient | 6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26 | [
"BSD-2-Clause"
] | null | null | null | dbestclient/ml/density.py | horeapinca/DBEstClient | 6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26 | [
"BSD-2-Clause"
] | null | null | null | dbestclient/ml/density.py | horeapinca/DBEstClient | 6ccbb24853c31f2a8cc567e03c09ca7aa31e2d26 | [
"BSD-2-Clause"
] | 1 | 2020-09-28T14:22:54.000Z | 2020-09-28T14:22:54.000Z | # Created by Qingzhi Ma at 2019-07-23
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
from sklearn.neighbors import KernelDensity
| 24.5 | 59 | 0.671202 |
431b587034ff91b11e453596c7cd2a1cc508eb0c | 920 | py | Python | setup.py | panchambanerjee/access_spotify | d1c50d1553718755d58d034e8d2049f986ef5f84 | [
"MIT"
] | 4 | 2020-07-26T20:41:03.000Z | 2020-08-04T05:36:32.000Z | setup.py | panchambanerjee/access_spotify | d1c50d1553718755d58d034e8d2049f986ef5f84 | [
"MIT"
] | null | null | null | setup.py | panchambanerjee/access_spotify | d1c50d1553718755d58d034e8d2049f986ef5f84 | [
"MIT"
] | 1 | 2020-08-04T05:36:34.000Z | 2020-08-04T05:36:34.000Z | #!/usr/bin/env python
import setuptools
from setuptools import setup
from os import path
# Read the package requirements
with open("requirements.txt", "r") as f:
requirements = [line.rstrip("\n") for line in f if line != "\n"]
# Read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='access-spotify',
version="1.1",
author="pancham_banerjee",
author_email="panchajanya.banerjee@gmail.com",
packages=setuptools.find_packages(),
scripts=["./bin/access_script.py"],
install_requires=requirements,
license="MIT",
description="A package to get all album and track info for an artist by querying the Spotify API",
long_description=long_description,
long_description_content_type='text/markdown'
)
| 31.724138 | 104 | 0.706522 |
431c1fde6c2d30474384ca5aeeb0ee0eb0db4a71 | 5,096 | py | Python | mundiapi/models/update_plan_request.py | hugocpolos/MundiAPI-PYTHON | 164545cc58bf18c946d5456e9ba4d55a378a339a | [
"MIT"
] | 10 | 2017-08-30T15:53:00.000Z | 2021-02-11T18:06:56.000Z | mundiapi/models/update_plan_request.py | hugocpolos/MundiAPI-PYTHON | 164545cc58bf18c946d5456e9ba4d55a378a339a | [
"MIT"
] | 4 | 2018-05-05T15:15:09.000Z | 2021-12-22T00:52:41.000Z | mundiapi/models/update_plan_request.py | hugocpolos/MundiAPI-PYTHON | 164545cc58bf18c946d5456e9ba4d55a378a339a | [
"MIT"
] | 7 | 2017-04-27T13:46:52.000Z | 2021-04-14T13:44:23.000Z | # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
| 35.144828 | 84 | 0.577119 |
431c4388fab05fa311c4c60aa774db64074aff3d | 528 | py | Python | hearthstone/hslog/utils.py | bertokhoury/python-hearthstone | 635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43 | [
"MIT"
] | 1 | 2021-01-29T04:54:23.000Z | 2021-01-29T04:54:23.000Z | hearthstone/hslog/utils.py | bertokhoury/python-hearthstone | 635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43 | [
"MIT"
] | null | null | null | hearthstone/hslog/utils.py | bertokhoury/python-hearthstone | 635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43 | [
"MIT"
] | null | null | null | from hearthstone.enums import GameTag, TAG_TYPES
| 22.956522 | 74 | 0.702652 |
431e555f5efee68273402bccef7dcb0a30ea9d0c | 2,364 | py | Python | ejemplo_clase_00.py | ernestoarzabala/Curso-Python-Utch | ed5cd89ed85a1021d78fd17d495b3b3ec0203c77 | [
"Unlicense"
] | null | null | null | ejemplo_clase_00.py | ernestoarzabala/Curso-Python-Utch | ed5cd89ed85a1021d78fd17d495b3b3ec0203c77 | [
"Unlicense"
] | null | null | null | ejemplo_clase_00.py | ernestoarzabala/Curso-Python-Utch | ed5cd89ed85a1021d78fd17d495b3b3ec0203c77 | [
"Unlicense"
] | null | null | null | # Archivo ejemplo 00 de creacion de clases en Python
from math import gcd # greatest common denominator = Maximo Comun Divisor (MCD)
if __name__ == "__main__":
a = Fraccion(5,12)
print(a)
b = Fraccion(3,5)
c = a*b
c_real = c.a_numero_real()
print("Multiplicar la fraccion {} por la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu cdigo aqu :-)
a = Fraccion(1,2)
print(a)
b = Fraccion(1,4)
c = a+b
c_real = c.a_numero_real()
print("Sumar la fraccion {} con la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu cdigo aqu :-)
| 38.754098 | 164 | 0.651861 |
431f67abd21ada1dae45fd70ed84a4c58f410719 | 65 | py | Python | addons14/base_rest/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-06-10T14:59:13.000Z | 2021-06-10T14:59:13.000Z | addons14/base_rest/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | null | null | null | addons14/base_rest/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-04-09T09:44:44.000Z | 2021-04-09T09:44:44.000Z | from . import models
from . import components
from . import http
| 16.25 | 24 | 0.769231 |
43204edf29ab75f14a0b24a7c9fd04d677528ff0 | 732 | py | Python | recs/live_project_popularity_recommender.py | WingCode/live-project | 977dfbcaade35d8173dbb6ace102fe8998f1cdf4 | [
"MIT"
] | null | null | null | recs/live_project_popularity_recommender.py | WingCode/live-project | 977dfbcaade35d8173dbb6ace102fe8998f1cdf4 | [
"MIT"
] | 8 | 2021-01-05T00:06:26.000Z | 2022-03-12T01:05:06.000Z | recs/live_project_popularity_recommender.py | WingCode/live-project | 977dfbcaade35d8173dbb6ace102fe8998f1cdf4 | [
"MIT"
] | 4 | 2021-01-04T07:23:17.000Z | 2022-03-18T12:29:37.000Z | import os
import pandas as pd
| 25.241379 | 101 | 0.545082 |
43233962745ef76d4115b7625720cc7b8baedc4d | 178 | py | Python | resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null | resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py | hipnusleo/Laserjet | f53e0b740f48f2feb0c0bb285ec6728b313b4ccc | [
"Apache-2.0"
] | null | null | null |
from distutils.core import setup
import snip_basic_verify
setup(
py_modules=['snip_basic_verify'],
ext_modules=[snip_basic_verify.ffi.verifier.get_extension()])
| 22.25 | 66 | 0.758427 |
43245976f12a77315f00f3cf0db335fcb32e0255 | 647 | py | Python | pce/src/testing/test_pce.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 2 | 2016-09-09T04:19:01.000Z | 2019-02-15T20:28:13.000Z | pce/src/testing/test_pce.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 67 | 2016-06-02T19:37:56.000Z | 2018-02-22T05:23:45.000Z | pce/src/testing/test_pce.py | elise-baumgartner/onramp | beb3c807264fcb70d8069ff2e3990b0ce3f59912 | [
"BSD-3-Clause"
] | 9 | 2015-06-22T22:10:22.000Z | 2016-04-26T15:35:45.000Z | #!../env/bin/python
"""A simple test script for the PCE portion of OnRamp.
Usage: ./test_pce.py
This script is only intended to be run in a fresh install of the repository. It
has side-effects that could corrupt module and user data if run in a production
setting.
Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py
has been called and that the server is running. Also Ensure
./test_pce_config.cfg contains the proper settings.
"""
import nose
import sys
if __name__ == '__main__':
print (__doc__)
response = raw_input('(C)ontinue or (A)bort? ')
if response != 'C':
sys.exit(0)
nose.main()
| 26.958333 | 79 | 0.723338 |
4326718464c0594d943bc8eb250db3e42117293d | 58,893 | py | Python | tobac/plotting.py | w-herbst/tobac | 9f3b9812e9a13a26373e42d356f7d571366bb967 | [
"BSD-3-Clause"
] | 36 | 2018-11-12T10:42:22.000Z | 2022-03-08T04:29:58.000Z | tobac/plotting.py | w-herbst/tobac | 9f3b9812e9a13a26373e42d356f7d571366bb967 | [
"BSD-3-Clause"
] | 71 | 2018-12-04T13:11:54.000Z | 2022-03-30T23:15:26.000Z | tobac/plotting.py | w-herbst/tobac | 9f3b9812e9a13a26373e42d356f7d571366bb967 | [
"BSD-3-Clause"
] | 28 | 2018-11-19T07:51:02.000Z | 2022-02-17T16:26:40.000Z | import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import logging
from .analysis import lifetime_histogram
from .analysis import histogram_cellwise,histogram_featurewise
import numpy as np
def plot_mask_cell_track_follow(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
for i_row,row in track_cell.iterrows():
constraint_time = Constraint(time=row['time'])
constraint_x = Constraint(projection_x_coordinate = lambda cell: row['projection_x_coordinate']-width < cell < row['projection_x_coordinate']+width)
constraint_y = Constraint(projection_y_coordinate = lambda cell: row['projection_y_coordinate']-width < cell < row['projection_y_coordinate']+width)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
cells=list(unique(mask_total_i.core_data()))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track[track['cell'].isin(cells)]
track_i=track_i[track_i['time']==row['time']]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==row['time']]
if features is None:
features_i=None
else:
features_i=features[features['time']==row['time']]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.85, top=0.80)
datestring_stamp = row['time'].strftime('%Y-%m-%d %H:%M:%S')
celltime_stamp = "%02d:%02d:%02d" % (row['time_cell'].dt.total_seconds() // 3600,(row['time_cell'].dt.total_seconds() % 3600) // 60, row['time_cell'].dt.total_seconds() % 60 )
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = row['time'].strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_follow(cell_i=cell,track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
width=width,
axes=ax1,title=title,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_follow(cell_i,track, cog,features, mask_total,
field_contour, field_filled,
axes=None,width=10000,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None
):
'''Make individual plot for cell centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_cell_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
x_pos=track[track['cell']==cell_i]['projection_x_coordinate'].item()
y_pos=track[track['cell']==cell_i]['projection_y_coordinate'].item()
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, nlevels_field_filled)
plot_field_filled = axes.contourf((field_filled.coord('projection_x_coordinate').points-x_pos)/1000,
(field_filled.coord('projection_y_coordinate').points-y_pos)/1000,
field_filled.data,
cmap=cmap_field_filled,norm=norm_field_filled,
levels=levels_field_filled,vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm_filled= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm_filled.set_array([])
cbar_field_filled = plt.colorbar(sm_filled, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, nlevels_field_contour)
if norm_field_contour:
vmin_field_contour=None,
vmax_field_contour=None,
plot_field_contour = axes.contour((field_contour.coord('projection_x_coordinate').points-x_pos)/1000,
(field_contour.coord('projection_y_coordinate').points-y_pos)/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = int(row['cell'])
if cell==cell_i:
color='darkred'
else:
color='darkorange'
cell_string=' '+str(int(row['cell']))
axes.text((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_cell_surface(mask_total, cell, track, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_total
axes.contour((mask_total_i_surface.coord('projection_x_coordinate').points-x_pos)/1000,
(mask_total_i_surface.coord('projection_y_coordinate').points-y_pos)/1000,
mask_total_i_surface.data,
levels=[0, cell], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot((row['x_M']-x_pos)/1000, (row['y_M']-y_pos)/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim([-1*width/1000, width/1000])
axes.set_ylim([-1*width/1000, width/1000])
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_static(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_static(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
field_filled.coord('projection_y_coordinate').points/1000,
field_filled.data,
levels=levels_field_filled, norm=norm_field_filled,
cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm1.set_array([])
cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
field_contour.coord('projection_y_coordinate').points/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
axes.text(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
mask_total_i_surface.coord('projection_y_coordinate').points/1000,
mask_total_i_surface.data,
levels=[0, feature], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot(row['x_M']/1000, row['y_M']/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_2D3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
ele=10,azim=30,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1=plt.figure(figsize=(20 / 2.54, 10 / 2.54))
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.9, top=0.9,wspace=0.3, hspace=0.25)
# make two subplots for figure:
gs1 = gridspec.GridSpec(1, 2,width_ratios=[1,1.2])
fig1.add_subplot(gs1[0])
fig1.add_subplot(gs1[1], projection='3d')
ax1 = fig1.get_axes()
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
ax1[1]=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[1],title=title,
ele=ele,azim=azim,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_track_3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
# fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
# fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=(10/2.54, 10/2.54), subplot_kw={'projection': '3d'})
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_3Dstatic(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False,
ele=10.,azim=210.
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
axes.view_init(elev=ele, azim=azim)
axes.grid(b=False)
axes.set_frame_on(False)
# make the panes transparent
axes.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# make the grid lines transparent
axes.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
if title is not None:
axes.set_title(title,horizontalalignment='left',loc='left')
# colors_mask = ['pink','darkred', 'orange', 'darkred', 'red', 'darkorange']
x = mask_total.coord('projection_x_coordinate').points
y = mask_total.coord('projection_y_coordinate').points
z = mask_total.coord('model_level_number').points
# z = mask_total.coord('geopotential_height').points
zz, yy, xx = np.meshgrid(z, y, x, indexing='ij')
# z_alt = mask_total.coord('geopotential_height').points
# divider = make_axes_locatable(axes)
# if field_filled is not None:
# if levels_field_filled is None:
# levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
# plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
# field_filled.coord('projection_y_coordinate').points/1000,
# field_filled.data,
# levels=levels_field_filled, norm=norm_field_filled,
# cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
# cax_filled = divider.append_axes("right", size="5%", pad=0.1)
# norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
# sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
# sm1.set_array([])
# cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
# cbar_field_filled.ax.set_ylabel(label_field_filled)
# cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
# if field_contour is not None:
# if levels_field_contour is None:
# levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
# plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
# field_contour.coord('projection_y_coordinate').points/1000,
# field_contour.data,
# cmap=cmap_field_contour,norm=norm_field_contour,
# levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
# linewidths=linewidths_contour)
# if contour_labels:
# axes.clabel(plot_field_contour, fontsize=10)
# cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
# if norm_field_contour:
# vmin_field_contour=None
# vmax_field_contour=None
# norm_contour=norm_field_contour
# else:
# norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
#
# sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
# sm_contour.set_array([])
#
# cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
# cbar_field_contour.ax.set_xlabel(label_field_contour)
# cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
#
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
# axes.text(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# cell_string,color=color,fontsize=6, clip_on=True)
# # Plot marker for tracked cell centre as a cross
# axes.plot(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# 'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
# z_coord = 'model_level_number'
# if len(mask_total.shape)==3:
# mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
# elif len(mask_total.shape)==2:
# mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
# axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
# mask_total_i_surface.coord('projection_y_coordinate').points/1000,
# 0,
# mask_total_i_surface.data,
# levels=[0, feature], colors=color, linestyles=':',linewidth=1)
mask_feature = mask_total.data == feature
axes.scatter(
# xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature]/1000,
xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature],
c=color, marker=',',
s=5,#60000.0 * TWC_i[Mask_particle],
alpha=0.3, cmap='inferno', label=cell_string,rasterized=True)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_zlim([0, 100])
# axes.set_zlim([0, 20])
# axes.set_zticks([0, 5,10,15, 20])
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.zaxis.set_rotate_label(False) # disable automatic rotation
# axes.set_zlabel('z (km)', rotation=90)
axes.set_zlabel('model level', rotation=90)
return axes
def plot_mask_cell_track_static_timeseries(cell,track, cog, features, mask_total,
field_contour, field_filled,
track_variable=None,variable=None,variable_ylabel=None,variable_label=[None],variable_legend=False,variable_color=None,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(20/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
import pandas as pd
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
time_min=track_cell['time'].min()
# time_max=track_cell['time'].max()
track_variable_cell=track_variable[track_variable['cell']==cell]
track_variable_cell['time_cell']=pd.to_timedelta(track_variable_cell['time_cell'])
# track_variable_cell=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_max)]
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=2, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.90, top=0.85,wspace=0.3)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=celltime_stamp + ' , ' + datestring_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
# plot evolving timeseries of variable to second axis:
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
track_variable_past=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_i)]
track_variable_current=track_variable_cell[track_variable_cell['time']==time_i]
if variable_color is None:
variable_color='navy'
if type(variable) is str:
# logging.debug('variable: '+str(variable))
if type(variable_color) is str:
variable_color={variable:variable_color}
variable=[variable]
for i_variable,variable_i in enumerate(variable):
color=variable_color[variable_i]
ax1[1].plot(track_variable_past['time_cell'].dt.total_seconds()/ 60.,track_variable_past[variable_i].values,color=color,linestyle='-',label=variable_label[i_variable])
ax1[1].plot(track_variable_current['time_cell'].dt.total_seconds()/ 60.,track_variable_current[variable_i].values,color=color,marker='o',markersize=4,fillstyle='full')
ax1[1].yaxis.tick_right()
ax1[1].yaxis.set_label_position("right")
ax1[1].set_xlim([0,2*60])
ax1[1].set_xticks(np.arange(0,120,15))
ax1[1].set_ylim([0,max(10,1.25*track_variable_cell[variable].max().max())])
ax1[1].set_xlabel('cell lifetime (min)')
if variable_ylabel==None:
variable_ylabel=variable
ax1[1].set_ylabel(variable_ylabel)
ax1[1].set_title(title)
# insert legend, if flag is True
if variable_legend:
if (len(variable_label)<5):
ncol=1
else:
ncol=2
ax1[1].legend(loc='upper right', bbox_to_anchor=(1, 1),ncol=ncol,fontsize=8)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
| 46.154389 | 219 | 0.614708 |
43278d398c31ca35a7dadee17fca420abdd89662 | 608 | py | Python | api/urls.py | nf1s/covid-backend | 5529cccad2b0b596d8a720fd6211035e6376820f | [
"MIT"
] | null | null | null | api/urls.py | nf1s/covid-backend | 5529cccad2b0b596d8a720fd6211035e6376820f | [
"MIT"
] | 1 | 2020-03-21T16:20:28.000Z | 2020-03-21T16:20:28.000Z | api/urls.py | ahmednafies/covid-backend | 5529cccad2b0b596d8a720fd6211035e6376820f | [
"MIT"
] | null | null | null | from sanic import Blueprint
from sanic_transmute import add_route
from .views import (
get_all,
get_status_by_country_id,
get_status_by_country_name,
get_deaths,
get_active_cases,
get_recovered_cases,
get_confirmed_cases,
list_countries,
)
cases = Blueprint("cases", url_prefix="/cases")
add_route(cases, get_all)
add_route(cases, get_status_by_country_id)
add_route(cases, get_status_by_country_name)
add_route(cases, get_deaths)
add_route(cases, get_active_cases)
add_route(cases, get_recovered_cases)
add_route(cases, get_confirmed_cases)
add_route(cases, list_countries)
| 26.434783 | 47 | 0.804276 |
4327e63a016b0fdf98132c5f404968581fab3fee | 1,860 | py | Python | scribdl/test/test_download.py | fatshotty/scribd-downloader | d07e301c0a7781cf0b8cf38846061e043e8b86e9 | [
"MIT"
] | 182 | 2019-09-25T18:48:09.000Z | 2022-03-22T01:22:21.000Z | scribdl/test/test_download.py | fatshotty/scribd-downloader | d07e301c0a7781cf0b8cf38846061e043e8b86e9 | [
"MIT"
] | 38 | 2019-09-11T00:51:35.000Z | 2022-03-30T12:05:19.000Z | scribdl/test/test_download.py | fatshotty/scribd-downloader | d07e301c0a7781cf0b8cf38846061e043e8b86e9 | [
"MIT"
] | 83 | 2019-10-11T12:07:29.000Z | 2022-03-31T05:06:47.000Z | from ..downloader import Downloader
import os
import pytest
| 38.75 | 119 | 0.768817 |
432938f7572380d6dce4bd872cd6f38e7889cce7 | 863 | py | Python | app/migrations/0005_auto_20210619_2310.py | hungitptit/boecdjango | a1125bd292b5fd3a0610eda6e592017f8268c96c | [
"MIT"
] | null | null | null | app/migrations/0005_auto_20210619_2310.py | hungitptit/boecdjango | a1125bd292b5fd3a0610eda6e592017f8268c96c | [
"MIT"
] | null | null | null | app/migrations/0005_auto_20210619_2310.py | hungitptit/boecdjango | a1125bd292b5fd3a0610eda6e592017f8268c96c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-19 16:10
from django.db import migrations, models
import django.utils.timezone
| 27.83871 | 116 | 0.602549 |
432a6247ae50ed5ff0d32ef0b60b3d2a095bea22 | 1,441 | py | Python | vision_datasets/common/dataset_registry.py | shonohs/vision-datasets | bdd0ebf5c0c0561486ebb0b96600196b2b89f77c | [
"MIT"
] | null | null | null | vision_datasets/common/dataset_registry.py | shonohs/vision-datasets | bdd0ebf5c0c0561486ebb0b96600196b2b89f77c | [
"MIT"
] | null | null | null | vision_datasets/common/dataset_registry.py | shonohs/vision-datasets | bdd0ebf5c0c0561486ebb0b96600196b2b89f77c | [
"MIT"
] | null | null | null | import copy
import json
from .dataset_info import DatasetInfoFactory
| 36.948718 | 129 | 0.679389 |
432a6cd43a1645c5ef69788411b16a04cd68ac58 | 20,941 | py | Python | yasql/apps/sqlorders/views.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 443 | 2018-02-08T02:53:48.000Z | 2020-10-13T10:01:55.000Z | yasql/apps/sqlorders/views.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 27 | 2020-10-14T10:01:52.000Z | 2022-03-12T00:49:47.000Z | yasql/apps/sqlorders/views.py | Fanduzi/YaSQL | bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5 | [
"Apache-2.0"
] | 148 | 2018-03-15T06:07:25.000Z | 2020-08-17T14:58:45.000Z | # -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
| 41.715139 | 111 | 0.633351 |
432ad11a5c271d697e37438e64317a7886323133 | 1,489 | py | Python | perp_adj.py | shmakn99/Knowledge-Graph-VG | ce2b0d6e16199357f1afc4aa7e58f74aae35e023 | [
"MIT"
] | null | null | null | perp_adj.py | shmakn99/Knowledge-Graph-VG | ce2b0d6e16199357f1afc4aa7e58f74aae35e023 | [
"MIT"
] | null | null | null | perp_adj.py | shmakn99/Knowledge-Graph-VG | ce2b0d6e16199357f1afc4aa7e58f74aae35e023 | [
"MIT"
] | null | null | null | import glove_util as gut
import numpy as np
from sklearn.decomposition import TruncatedSVD
import json
with open('freq_count_pred.json') as f:
freq_count_pred = json.load(f)
with open('relationships.json') as f:
relationships = json.load(f)
predicate_embedding = {}
sentences = []
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
w_avg = weighted_avg(relation['predicate'],0.001,300)
sentences.append(w_avg)
predicate_embedding[relation['relationship_id']] = w_avg
pc = get_pc(np.array(sentences))[0]
projection_space = np.outer(pc,pc)
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
predicate_embedding[relation['relationship_id']] = predicate_embedding[relation['relationship_id']] - np.matmul(projection_space,predicate_embedding[relation['relationship_id']])
with open('predicate_embedding_300.json','w') as f:
json.dump(predicate_embedding,f)
| 22.560606 | 181 | 0.725319 |
432b745399b0d0440cefd7ae239847b77b6d7688 | 3,009 | py | Python | crypt.py | ElyTgy/VaultDB | 9eef6f7298d26bd9a18d403971e1c3c6e7a2bf8a | [
"MIT"
] | 2 | 2021-09-27T07:40:21.000Z | 2021-10-04T17:32:40.000Z | crypt.py | ElyTgy/VaultDB | 9eef6f7298d26bd9a18d403971e1c3c6e7a2bf8a | [
"MIT"
] | 3 | 2021-10-01T17:47:20.000Z | 2021-10-21T07:57:13.000Z | crypt.py | ElyTgy/VaultDB | 9eef6f7298d26bd9a18d403971e1c3c6e7a2bf8a | [
"MIT"
] | 3 | 2021-09-26T13:26:05.000Z | 2021-10-22T02:53:20.000Z | # Importing Fernet class
from cryptography.fernet import Fernet
# Importing dump and load function
from pickle import dump,load
# To generate a strong pw
# To get master pw from the file
# To get key from the file
# To store master pw in the file
# Checking if user is running program for first time
# Function to copy pw to clipboard
# Encrypting the text
# Decrypting the text
| 30.393939 | 114 | 0.613825 |
432d72d5f01ae5a38ba02b41cf1e7cf13ab1b0ea | 1,107 | py | Python | oecp/executor/null.py | openeuler-mirror/oecp | 967ed6b9e53f2da5f795f49bb5b5fc0423372863 | [
"MulanPSL-1.0"
] | null | null | null | oecp/executor/null.py | openeuler-mirror/oecp | 967ed6b9e53f2da5f795f49bb5b5fc0423372863 | [
"MulanPSL-1.0"
] | null | null | null | oecp/executor/null.py | openeuler-mirror/oecp | 967ed6b9e53f2da5f795f49bb5b5fc0423372863 | [
"MulanPSL-1.0"
] | null | null | null | # -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# **********************************************************************************
"""
from oecp.executor.base import CompareExecutor
| 38.172414 | 98 | 0.588076 |
432e74ae233189ec17dd1f03b1127352c4327439 | 1,518 | py | Python | courses/models.py | Biswa5812/CaramelIT-Django-Backend | 1f896cb75295d17345a862b99837f0bdf60868b4 | [
"MIT"
] | 1 | 2021-08-06T08:36:40.000Z | 2021-08-06T08:36:40.000Z | courses/models.py | Biswa5812/CaramelIT-Django-Backend | 1f896cb75295d17345a862b99837f0bdf60868b4 | [
"MIT"
] | 7 | 2021-04-08T21:58:03.000Z | 2022-01-13T03:09:17.000Z | courses/models.py | Biswa5812/CaramelIT-Django-Backend | 1f896cb75295d17345a862b99837f0bdf60868b4 | [
"MIT"
] | 3 | 2020-07-21T07:01:31.000Z | 2021-01-16T10:47:30.000Z | from django.db import models
from django.utils import timezone
# Course Category
# Course Subcategory
# Course
# Course resources
| 42.166667 | 81 | 0.78722 |
432f6dd85dd7a23f729a99a79b5f40586fb8f07f | 2,732 | py | Python | dino/validation/events/message/limit_msg_length.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 150 | 2016-10-05T11:09:36.000Z | 2022-03-06T16:24:41.000Z | dino/validation/events/message/limit_msg_length.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 27 | 2017-03-02T03:37:02.000Z | 2022-02-10T04:59:54.000Z | dino/validation/events/message/limit_msg_length.py | thenetcircle/dino | 1047c3458e91a1b4189e9f48f1393b3a68a935b3 | [
"Apache-2.0"
] | 21 | 2016-11-11T07:51:48.000Z | 2020-04-26T21:38:33.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from yapsy.IPlugin import IPlugin
from activitystreams.models.activity import Activity
from dino import utils
from dino.config import ErrorCodes
from dino.config import ConfigKeys
from dino.environ import GNEnvironment
logger = logging.getLogger(__name__)
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
| 36.918919 | 114 | 0.688507 |
432f8e360fc047fc0c5026f477fadfd50ec95d5c | 4,779 | py | Python | zabbix/prom2zabbix.py | tldr-devops/telegraf-monitoring-agent-setup | 1f0b0f658acf9e685c121ffaee658bbe3fbad022 | [
"MIT"
] | null | null | null | zabbix/prom2zabbix.py | tldr-devops/telegraf-monitoring-agent-setup | 1f0b0f658acf9e685c121ffaee658bbe3fbad022 | [
"MIT"
] | null | null | null | zabbix/prom2zabbix.py | tldr-devops/telegraf-monitoring-agent-setup | 1f0b0f658acf9e685c121ffaee658bbe3fbad022 | [
"MIT"
] | 1 | 2022-03-31T20:26:21.000Z | 2022-03-31T20:26:21.000Z | #!/usr/bin/env python
# Script for parsing prometheus metrics format and send it into zabbix server
# MIT License
# https://github.com/Friz-zy/telegraf-monitoring-agent-setup
import re
import os
import sys
import time
import json
import socket
import optparse
try:
from urllib.request import urlopen
except:
from urllib import urlopen
METRICS = {
'default': {
'sort_labels': ['name', 'id', 'host', 'path', 'device', 'source', 'cpu'],
},
'docker_container_': {
'sort_labels': ['host', 'source', 'device', 'cpu'],
},
}
if __name__ == "__main__":
main()
| 32.958621 | 122 | 0.514124 |
4331808474e580c548bdad9e356ef4402fccebc7 | 6,239 | py | Python | NAS/run_NAS.py | gatech-sysml/CompOFA | baf561f14a561547ff51933e45f90ddf00cbb3cf | [
"Apache-2.0"
] | 20 | 2021-04-18T09:13:06.000Z | 2022-03-29T03:54:23.000Z | NAS/run_NAS.py | compofa-blind-review/compofa-iclr21 | a97b726f17519e666c6fcdb4ec0b90cfa64d8d9f | [
"Apache-2.0"
] | 2 | 2021-07-02T16:08:17.000Z | 2022-02-16T09:20:47.000Z | NAS/run_NAS.py | compofa-blind-review/compofa-iclr21 | a97b726f17519e666c6fcdb4ec0b90cfa64d8d9f | [
"Apache-2.0"
] | 2 | 2021-09-06T06:48:20.000Z | 2021-12-02T12:11:30.000Z | # CompOFA Compound Once-For-All Networks for Faster Multi-Platform Deployment
# Under blind review at ICLR 2021: https://openreview.net/forum?id=IgIk8RRT-Z
#
# Implementation based on:
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import os
import sys
import torch
import time
import math
import copy
import random
import argparse
import torch.nn as nn
import numpy as np
import pandas as pd
from torchvision import transforms, datasets
from matplotlib import pyplot as plt
sys.path.append("..")
from ofa.model_zoo import ofa_net
from ofa.utils import download_url
from accuracy_predictor import AccuracyPredictor
from flops_table import FLOPsTable
from latency_table import LatencyTable
from evolution_finder import EvolutionFinder
from imagenet_eval_helper import evaluate_ofa_subnet, evaluate_ofa_specialized
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--net',
metavar='OFANET',
help='OFA network',
required=True)
parser.add_argument(
'-t',
'--target-hardware',
metavar='TARGET_HARDWARE',
help='Target Hardware',
required=True)
parser.add_argument(
'--imagenet-path',
metavar='IMAGENET_PATH',
help='The path of ImageNet',
type=str,
required=True)
args = parser.parse_args()
arch = {'compofa' : ('compofa', 'model_best_compofa_simple.pth.tar'),
'compofa-elastic' : ('compofa-elastic', 'model_best_compofa_simple_elastic.pth.tar'),
'ofa_mbv3_d234_e346_k357_w1.0' : ('ofa', 'ofa_mbv3_d234_e346_k357_w1.0'),
}
hardware_latency = {'note10' : [15, 20, 25, 30],
'gpu' : [15, 25, 35, 45],
'cpu' : [12, 15, 18, 21]}
MODEL_DIR = '../ofa/checkpoints/%s' % (arch[args.net][1])
imagenet_data_path = args.imagenet_path
# imagenet_data_path = '/srv/data/datasets/ImageNet/'
# set random seed
random_seed = 3
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
print('Successfully imported all packages and configured random seed to %d!'%random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(random_seed)
print('Using GPU.')
else:
print('Using CPU.')
# Initialize the OFA Network
ofa_network = ofa_net(args.net, model_dir=MODEL_DIR, pretrained=True)
if args.target_hardware == 'cpu':
ofa_network = ofa_network.cpu()
else:
ofa_network = ofa_network.cuda()
print('The OFA Network is ready.')
# Carry out data transforms
if cuda_available:
data_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
root=os.path.join(imagenet_data_path, 'val'),
transform=build_val_transform(224)
),
batch_size=250, # test batch size
shuffle=True,
num_workers=16, # number of workers for the data loader
pin_memory=True,
drop_last=False,
)
print('The ImageNet dataloader is ready.')
else:
data_loader = None
print('Since GPU is not found in the environment, we skip all scripts related to ImageNet evaluation.')
# set up the accuracy predictor
accuracy_predictor = AccuracyPredictor(
pretrained=True,
device='cuda:0' if cuda_available else 'cpu'
)
print('The accuracy predictor is ready!')
print(accuracy_predictor.model)
# set up the latency table
target_hardware = args.target_hardware
use_latency_table = True if target_hardware == 'note10' else False
latency_table = LatencyTable(device=target_hardware,
use_latency_table=use_latency_table,
network=args.net)
""" Hyper-parameters for the evolutionary search process
You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net.
"""
latency_constraint = hardware_latency[args.target_hardware][0] # ms
P = 100 # The size of population in each generation
N = 500 # How many generations of population to be searched
r = 0.25 # The ratio of networks that are used as parents for next generation
params = {
'constraint_type': target_hardware, # Let's do FLOPs-constrained search
'efficiency_constraint': latency_constraint,
'mutate_prob': 0.1, # The probability of mutation in evolutionary search
'mutation_ratio': 0.5, # The ratio of networks that are generated through mutation in generation n >= 2.
'efficiency_predictor': latency_table, # To use a predefined efficiency predictor.
'accuracy_predictor': accuracy_predictor, # To use a predefined accuracy_predictor predictor.
'population_size': P,
'max_time_budget': N,
'parent_ratio': r,
'arch' : arch[args.net][0],
}
# initialize the evolution finder and run NAS
finder = EvolutionFinder(**params)
result_lis = []
for latency in hardware_latency[args.target_hardware]:
finder.set_efficiency_constraint(latency)
best_valids, best_info = finder.run_evolution_search()
result_lis.append(best_info)
print("NAS Completed!")
# evaluate the searched model on ImageNet
models = []
if cuda_available:
for result in result_lis:
_, net_config, latency = result
print('Evaluating the sub-network with latency = %.1f ms on %s' % (latency, target_hardware))
top1 = evaluate_ofa_subnet(
ofa_network,
imagenet_data_path,
net_config,
data_loader,
batch_size=250,
device='cuda:0' if cuda_available else 'cpu')
models.append([net_config, top1, latency])
df = pd.DataFrame(models, columns=['Model', 'Accuracy', 'Latency'])
df.to_csv('NAS_results.csv')
print('NAS results saved to NAS_results.csv')
| 34.28022 | 118 | 0.703478 |
4331f36c8fbfd4af3f45057825bf7f902a91aa4d | 2,911 | py | Python | application/model/radar_score_20180117/score_calculate.py | ace-gabriel/chrome-extension | be0b7d7278f56f8218be7f734b3fb1e05a4f3eb9 | [
"MIT"
] | 4 | 2018-09-11T22:27:55.000Z | 2018-11-16T22:54:14.000Z | application/model/radar_score_20180117/score_calculate.py | ace-gabriel/chrome-extension | be0b7d7278f56f8218be7f734b3fb1e05a4f3eb9 | [
"MIT"
] | null | null | null | application/model/radar_score_20180117/score_calculate.py | ace-gabriel/chrome-extension | be0b7d7278f56f8218be7f734b3fb1e05a4f3eb9 | [
"MIT"
] | null | null | null | # coding: utf-8
import pickle
# import json
# import types
path = 'application/model/radar_score_20180117/'
if __name__ == '__main__':
# README
print "This is a program calculating house's 5 scores:" \
"Anti Drop Score," \
"House Appreciation," \
"Possess Cost," \
"Long-term Income" \
"Short-term Income"
| 27.72381 | 103 | 0.564411 |
43335b3cc2cb4c21d4856a039a41d9b440f02982 | 951 | py | Python | Dominant_cell.py | xi6th/Python_Algorithm | 05852b6fe133df2d83ae464b779b0818b173919d | [
"MIT"
] | null | null | null | Dominant_cell.py | xi6th/Python_Algorithm | 05852b6fe133df2d83ae464b779b0818b173919d | [
"MIT"
] | null | null | null | Dominant_cell.py | xi6th/Python_Algorithm | 05852b6fe133df2d83ae464b779b0818b173919d | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
from typing import Counter
#
# Complete the 'numCells' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY grid as parameter.
#
grid = [[1, 2, 7], [4, 5, 6], [8, 8, 9]]
print(numCells(grid))
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
# grid_rows = int(input().strip())
# grid_columns = int(input().strip())
# grid = []
# for _ in range(grid_rows):
# grid.append(list(map(int, input().rstrip().split())))
# result = numCells(grid)
# fptr.write(str(result) + '\n')
# fptr.close()
| 19.8125 | 63 | 0.602524 |
43338fccc231cf2b75bc14f3df4523f468ef4c58 | 347 | py | Python | evetool/urls.py | Sult/evetool | 155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f | [
"MIT"
] | null | null | null | evetool/urls.py | Sult/evetool | 155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f | [
"MIT"
] | null | null | null | evetool/urls.py | Sult/evetool | 155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'evetool.views.home', name='home'),
url(r'^', include('users.urls')),
url(r'^', include('apis.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 31.545455 | 67 | 0.691643 |
433408402a1699c513f68c745b4d958c3d3e01cc | 375 | py | Python | actvenv.py | lastone9182/console-keep | 250b49653be9d370a1bb0f1c39c5f853c2eaa47e | [
"MIT"
] | null | null | null | actvenv.py | lastone9182/console-keep | 250b49653be9d370a1bb0f1c39c5f853c2eaa47e | [
"MIT"
] | null | null | null | actvenv.py | lastone9182/console-keep | 250b49653be9d370a1bb0f1c39c5f853c2eaa47e | [
"MIT"
] | null | null | null | import os
# virtualenv
SCRIPTDIR = os.path.realpath(os.path.dirname(__file__))
venv_name = '_ck'
osdir = 'Scripts' if os.name is 'nt' else 'bin'
venv = os.path.join(venv_name, osdir, 'activate_this.py')
activate_this = (os.path.join(SCRIPTDIR, venv))
# Python 3: exec(open(...).read()), Python 2: execfile(...)
exec(open(activate_this).read(), dict(__file__=activate_this)) | 34.090909 | 62 | 0.714667 |
43342d0254660446a56231ce55513c2e38b5ae8e | 1,036 | py | Python | testing/scripts/checklicenses.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | testing/scripts/checklicenses.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | testing/scripts/checklicenses.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| 22.042553 | 72 | 0.655405 |
43352b59b8e176e10113ef95c3a83be9ee114213 | 2,139 | py | Python | autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py | gaohuan2015/Auto-PyTorch | 3c6bf7e051b32284d2655cc484aee1a8c982c04e | [
"Apache-2.0"
] | 1 | 2019-11-19T12:22:46.000Z | 2019-11-19T12:22:46.000Z | autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py | gaohuan2015/Auto-PyTorch | 3c6bf7e051b32284d2655cc484aee1a8c982c04e | [
"Apache-2.0"
] | null | null | null | autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py | gaohuan2015/Auto-PyTorch | 3c6bf7e051b32284d2655cc484aee1a8c982c04e | [
"Apache-2.0"
] | null | null | null |
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
import traceback | 41.941176 | 102 | 0.622721 |
43355f2d68e669881638faa623ef2c93af39b15e | 913 | py | Python | csv/query_csv.py | RobustPerception/python_examples | c79e8f4745fe255fc327e31e96a2065dedca23c1 | [
"Apache-2.0"
] | 31 | 2016-03-14T09:48:02.000Z | 2020-08-12T18:23:47.000Z | csv/query_csv.py | RobustPerception/python_examples | c79e8f4745fe255fc327e31e96a2065dedca23c1 | [
"Apache-2.0"
] | 2 | 2018-05-24T11:18:58.000Z | 2021-10-03T09:57:37.000Z | csv/query_csv.py | RobustPerception/python_examples | c79e8f4745fe255fc327e31e96a2065dedca23c1 | [
"Apache-2.0"
] | 27 | 2016-04-14T17:46:48.000Z | 2021-10-03T08:51:11.000Z | import csv
import requests
import sys
"""
A simple program to print the result of a Prometheus query as CSV.
"""
if len(sys.argv) != 3:
print('Usage: {0} http://prometheus:9090 a_query'.format(sys.argv[0]))
sys.exit(1)
response = requests.get('{0}/api/v1/query'.format(sys.argv[1]),
params={'query': sys.argv[2]})
results = response.json()['data']['result']
# Build a list of all labelnames used.
labelnames = set()
for result in results:
labelnames.update(result['metric'].keys())
# Canonicalize
labelnames.discard('__name__')
labelnames = sorted(labelnames)
writer = csv.writer(sys.stdout)
# Write the header,
writer.writerow(['name', 'timestamp', 'value'] + labelnames)
# Write the samples.
for result in results:
l = [result['metric'].get('__name__', '')] + result['value']
for label in labelnames:
l.append(result['metric'].get(label, ''))
writer.writerow(l)
| 25.361111 | 74 | 0.671413 |
43369a6ebfc0d1acdeab1dc4fb9b48324cf2ec3d | 4,696 | py | Python | vehicle/tests.py | COS301-SE-2020/ctrlintelligencecapstone | ddfc92408ed296c6bf64b2dd071b948a1446ede8 | [
"MIT"
] | null | null | null | vehicle/tests.py | COS301-SE-2020/ctrlintelligencecapstone | ddfc92408ed296c6bf64b2dd071b948a1446ede8 | [
"MIT"
] | null | null | null | vehicle/tests.py | COS301-SE-2020/ctrlintelligencecapstone | ddfc92408ed296c6bf64b2dd071b948a1446ede8 | [
"MIT"
] | 1 | 2021-05-18T02:53:10.000Z | 2021-05-18T02:53:10.000Z | from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
import requests
import pytest
import json
from django.core.management import call_command
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete, m2m_changed
from rest_framework.test import APIClient
# Create your tests here.
# @pytest.fixture(autouse=True)
# def django_db_setup(django_db_setup, django_db_blocker):
# signals = [pre_save, post_save, pre_delete, post_delete, m2m_changed]
# restore = {}
# with django_db_blocker.unblock():
# call_command("loaddata", "test_stuff.json")
| 23.48 | 118 | 0.5773 |
4336c7b257868aa7e53dc95e1f352acf6bc002a4 | 175 | py | Python | simple_exercises/lanesexercises/py_functions2/rep_ex3.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null | simple_exercises/lanesexercises/py_functions2/rep_ex3.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null | simple_exercises/lanesexercises/py_functions2/rep_ex3.py | ilante/programming_immanuela_englander | 45d51c99b09ae335a67e03ac5ea79fc775bdf0bd | [
"MIT"
] | null | null | null |
# 3. Define a function to check whether a number is even
print(even(4))
print(even(-5))
| 15.909091 | 56 | 0.6 |
4337ba6700b6f7409e4f2ff2a13fe2038bd8af6e | 4,229 | py | Python | book_figures/chapter5/fig_posterior_cauchy.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | 3 | 2017-02-23T07:59:15.000Z | 2021-01-16T18:49:32.000Z | book_figures/chapter5/fig_posterior_cauchy.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | null | null | null | book_figures/chapter5/fig_posterior_cauchy.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | 1 | 2021-01-16T18:49:36.000Z | 2021-01-16T18:49:36.000Z | """
Posterior for Cauchy Distribution
---------------------------------
Figure 5.11
The solid lines show the posterior pdf :math:`p(\mu|{x_i},I)` (top-left panel)
and the posterior pdf :math:`p(\gamma|{x_i},I)` (top-right panel) for the
two-dimensional pdf from figure 5.10. The dashed lines show the distribution
of approximate estimates of :math:`\mu` and :math:`\gamma` based on the median
and interquartile range. The bottom panels show the corresponding cumulative
distributions.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.stats import median_sigmaG
from astroML.resample import bootstrap
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(x, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
x = np.asarray(x)
n = x.size
# expand x for broadcasting
shape = np.broadcast(gamma, mu).shape
x = x.reshape(x.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (x - mu) ** 2), 0))
def estimate_mu_gamma(xi, axis=None):
"""Equation 3.54: Cauchy point estimates"""
q25, q50, q75 = np.percentile(xi, [25, 50, 75], axis=axis)
return q50, 0.5 * (q75 - q25)
#------------------------------------------------------------
# Draw a random sample from the cauchy distribution, and compute
# marginalized posteriors of mu and gamma
np.random.seed(44)
n = 10
mu_0 = 0
gamma_0 = 2
xi = cauchy(mu_0, gamma_0).rvs(n)
gamma = np.linspace(0.01, 5, 70)
dgamma = gamma[1] - gamma[0]
mu = np.linspace(-3, 3, 70)
dmu = mu[1] - mu[0]
likelihood = np.exp(cauchy_logL(xi, gamma[:, np.newaxis], mu))
pmu = likelihood.sum(0)
pmu /= pmu.sum() * dmu
pgamma = likelihood.sum(1)
pgamma /= pgamma.sum() * dgamma
#------------------------------------------------------------
# bootstrap estimate
mu_bins = np.linspace(-3, 3, 21)
gamma_bins = np.linspace(0, 5, 17)
mu_bootstrap, gamma_bootstrap = bootstrap(xi, 20000, estimate_mu_gamma,
kwargs=dict(axis=1), random_state=0)
#------------------------------------------------------------
# Plot results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(wspace=0.35, right=0.95,
hspace=0.2, top=0.95)
# first axes: mu posterior
ax1 = fig.add_subplot(221)
ax1.plot(mu, pmu, '-k')
ax1.hist(mu_bootstrap, mu_bins, normed=True,
histtype='step', color='b', linestyle='dashed')
ax1.set_xlabel(r'$\mu$')
ax1.set_ylabel(r'$p(\mu|x,I)$')
# second axes: mu cumulative posterior
ax2 = fig.add_subplot(223, sharex=ax1)
ax2.plot(mu, pmu.cumsum() * dmu, '-k')
ax2.hist(mu_bootstrap, mu_bins, normed=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax2.set_xlabel(r'$\mu$')
ax2.set_ylabel(r'$P(<\mu|x,I)$')
ax2.set_xlim(-3, 3)
# third axes: gamma posterior
ax3 = fig.add_subplot(222, sharey=ax1)
ax3.plot(gamma, pgamma, '-k')
ax3.hist(gamma_bootstrap, gamma_bins, normed=True,
histtype='step', color='b', linestyle='dashed')
ax3.set_xlabel(r'$\gamma$')
ax3.set_ylabel(r'$p(\gamma|x,I)$')
ax3.set_ylim(-0.05, 1.1)
# fourth axes: gamma cumulative posterior
ax4 = fig.add_subplot(224, sharex=ax3, sharey=ax2)
ax4.plot(gamma, pgamma.cumsum() * dgamma, '-k')
ax4.hist(gamma_bootstrap, gamma_bins, normed=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax4.set_xlabel(r'$\gamma$')
ax4.set_ylabel(r'$P(<\gamma|x,I)$')
ax4.set_ylim(-0.05, 1.1)
ax4.set_xlim(0, 4)
plt.show()
| 32.782946 | 79 | 0.64105 |
4337eb54a2cf6f8bdc85fd9f00b9444d1da0bf1a | 9,090 | py | Python | plaso/formatters/file_system.py | SamuelePilleri/plaso | f5687f12a89c7309797ccc285da78e855c120579 | [
"Apache-2.0"
] | null | null | null | plaso/formatters/file_system.py | SamuelePilleri/plaso | f5687f12a89c7309797ccc285da78e855c120579 | [
"Apache-2.0"
] | null | null | null | plaso/formatters/file_system.py | SamuelePilleri/plaso | f5687f12a89c7309797ccc285da78e855c120579 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""The file system stat event formatter."""
from __future__ import unicode_literals
from dfvfs.lib import definitions as dfvfs_definitions
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
manager.FormattersManager.RegisterFormatters([
FileStatEventFormatter, NTFSFileStatEventFormatter,
NTFSUSNChangeEventFormatter])
| 33.791822 | 79 | 0.706931 |
433a593c55202319269a697379cad0ea0390e623 | 555 | py | Python | applications/serializers.py | junlegend/back-landing-career | cfc01b439629e48ff058fa1693af8d5a3a37949a | [
"MIT"
] | null | null | null | applications/serializers.py | junlegend/back-landing-career | cfc01b439629e48ff058fa1693af8d5a3a37949a | [
"MIT"
] | null | null | null | applications/serializers.py | junlegend/back-landing-career | cfc01b439629e48ff058fa1693af8d5a3a37949a | [
"MIT"
] | null | null | null | from rest_framework import serializers
from applications.models import Application | 32.647059 | 86 | 0.736937 |
433b76089cf8c989828e437cbbad09a9205ff737 | 8,440 | py | Python | qualtrics_iat/qualtrics_tools.py | ycui1/QualtricsIAT | c81b12e2669e1e58b4653e85c0d22ac5a821b174 | [
"MIT"
] | null | null | null | qualtrics_iat/qualtrics_tools.py | ycui1/QualtricsIAT | c81b12e2669e1e58b4653e85c0d22ac5a821b174 | [
"MIT"
] | null | null | null | qualtrics_iat/qualtrics_tools.py | ycui1/QualtricsIAT | c81b12e2669e1e58b4653e85c0d22ac5a821b174 | [
"MIT"
] | null | null | null | from pathlib import Path
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
# api_token = "iNKzBVNVAoTMhwnT2amhZRAP4dTBjkEVw9AbpRWg"
# brand_center = "mdanderson.co1"
# data_center = "iad1"
# headers = {"x-api-token": api_token}
def upload_images_web(self,
image_files,
library_id,
creating_full_url,
qualtrics_folder,
image_type):
"""Upload images from the web app to the Qualtrics server
:param image_files: Bytes, the uploaded bytes data from the web app
:param library_id: str, Qualtrics library ID number
:param creating_full_url: bool, whether returns the IDs only or the full URLs
:param qualtrics_folder: str, the Qualtrics Graphics folder for the uploaded images
:param image_type: str, the image file type
:return list[str], the list of image IDs or URLs
"""
image_urls = list()
upload_url = f"{self.api_base_url}/libraries/{library_id}/graphics"
file_count_digit = len(str(len(image_files)))
for file_i, file in enumerate(image_files, start=1):
encoded_fields = {'file': (f"image{file_i:0>{file_count_digit}}.{image_type}", file, f'image/{image_type}')}
image_url_id = self._upload_image(encoded_fields, qualtrics_folder, upload_url, file, creating_full_url)
image_urls.append(image_url_id)
return image_urls
def delete_images(self, library_id, image_url_ids):
"""Delete images from the specified library
:param library_id: str, the library ID number
:param image_url_ids: list[str], the image IDs or full URLs
:return dict, the deletion report"""
report = dict()
for image_url_id in image_url_ids:
if image_url_id.find("=") > 0:
image_url_id = image_url_id[image_url_id.index("=") + 1:]
url = f'{self.api_base_url}/libraries/{library_id}/graphics/{image_url_id}'
delete_response = requests.delete(url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete image: {image_url_id}")
else:
report[image_url_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def create_survey(self, template_json):
"""Create the survey using the JSON template
:param template_json: str in the JSON format, the JSON file for the qsf file
:return str, the created Survey ID number
"""
upload_url = f"{self.api_base_url}/survey-definitions"
creation_response = requests.post(
upload_url,
json=template_json,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
survey_id = creation_response.json()['result']['SurveyID']
except KeyError:
raise Exception("Couldn't create the survey. Please check the params.")
return survey_id
def delete_survey(self, survey_id):
"""Delete the survey
:param survey_id: str, the survey ID number
:return dict, the deletion report
"""
report = dict()
delete_url = f"{self.api_base_url}/survey-definitions/{survey_id}"
delete_response = requests.delete(delete_url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete survey: {survey_id}")
else:
report[survey_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def export_responses(self, survey_id, file_format="csv", data_folder=None):
"""Export responses from the Qualtrics survey"""
download_url = f"{self.api_base_url}/surveys/{survey_id}/export-responses/"
download_payload = f'{{"format": "{file_format}"}}'
download_response = requests.post(
download_url,
data=download_payload,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
progress_id = download_response.json()["result"]["progressId"]
file_id = self._monitor_progress(download_url, progress_id)
file_content = self._download_file(download_url, file_id)
except KeyError:
raise Exception("Can't download the responses. Please check the params.")
return file_content
| 45.621622 | 120 | 0.632464 |
433c71e69aaf2d22844233c421ede8abdf861e77 | 241 | py | Python | linter.py | dndrsn/SublimeLinter-contrib-cspell | ba2335a9282335e52282ee93f3bb2a55f9536984 | [
"MIT"
] | null | null | null | linter.py | dndrsn/SublimeLinter-contrib-cspell | ba2335a9282335e52282ee93f3bb2a55f9536984 | [
"MIT"
] | null | null | null | linter.py | dndrsn/SublimeLinter-contrib-cspell | ba2335a9282335e52282ee93f3bb2a55f9536984 | [
"MIT"
] | null | null | null | from SublimeLinter.lint import Linter, STREAM_STDOUT
| 26.777778 | 67 | 0.618257 |
433e58236c454031e14219f73017c1003e0c9d8b | 238 | py | Python | metal/gdb/__init__.py | cHemingway/test | 7fcbd56ad6fe5368b927ea146363bf3d69cd7617 | [
"Apache-2.0"
] | 24 | 2020-07-24T18:33:58.000Z | 2022-03-23T21:00:19.000Z | metal/gdb/__init__.py | cHemingway/test | 7fcbd56ad6fe5368b927ea146363bf3d69cd7617 | [
"Apache-2.0"
] | 4 | 2020-07-27T05:44:26.000Z | 2021-09-02T16:05:47.000Z | metal/gdb/__init__.py | cHemingway/test | 7fcbd56ad6fe5368b927ea146363bf3d69cd7617 | [
"Apache-2.0"
] | 1 | 2020-07-25T15:13:20.000Z | 2020-07-25T15:13:20.000Z | from metal.gdb.metal_break import Breakpoint, MetalBreakpoint
from metal.gdb.exitcode import ExitBreakpoint
from metal.gdb.timeout import Timeout
from metal.gdb.newlib import NewlibBreakpoints
from metal.gdb.argv import ArgvBreakpoint
| 26.444444 | 61 | 0.852941 |
433fe053f9b13b1595ca272851794d156b8d5378 | 11,693 | py | Python | portfolio/gui/tabresults/righttable.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | portfolio/gui/tabresults/righttable.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | portfolio/gui/tabresults/righttable.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from datetime import datetime
from PyQt5.QtWidgets import QTableWidgetItem, QTableWidget, QAbstractItemView, QMenu, QMessageBox
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt, pyqtSignal, QObject
from portfolio.db.fdbhandler import results, strategies, balances
def updatingdata(func):
"""
Decorator to flag self.updatingdata_flag whenever a function
that edits data without user intervention is being run
"""
return wrapper
| 39.107023 | 120 | 0.584281 |
434140c6bb3287e6ed3f82da31b35ca3a7bbad65 | 451 | py | Python | setup.py | NikolaiT/proxychecker | cd6a024668826c415f91e909c98e4110ffc8c10d | [
"BSD-3-Clause"
] | 1 | 2015-02-24T06:30:12.000Z | 2015-02-24T06:30:12.000Z | setup.py | NikolaiT/proxychecker | cd6a024668826c415f91e909c98e4110ffc8c10d | [
"BSD-3-Clause"
] | null | null | null | setup.py | NikolaiT/proxychecker | cd6a024668826c415f91e909c98e4110ffc8c10d | [
"BSD-3-Clause"
] | 2 | 2015-03-19T11:30:49.000Z | 2020-03-29T12:08:01.000Z | #!/usr/bin/env python
from distutils.core import setup
VERSION = "0.0.1"
setup(
author='Nikolai Tschacher',
name = "proxychecker",
version = VERSION,
description = "A Python proxychecker module that makes use of socks",
url = "http://incolumitas.com",
license = "BSD",
author_email = "admin@incolumitas.com",
keywords = ["socks", "proxy", "proxychecker"],
py_modules = ['proxychecker', 'sockshandler', 'socks']
)
| 26.529412 | 73 | 0.656319 |
43468039289e0d25ecbf534436703bc05e6e79e6 | 5,156 | py | Python | python/app/plugins/http/Struts2/S2_052.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 351 | 2020-02-26T05:23:26.000Z | 2022-03-26T12:39:19.000Z | python/app/plugins/http/Struts2/S2_052.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 15 | 2020-03-26T07:31:49.000Z | 2022-03-09T02:12:17.000Z | python/app/plugins/http/Struts2/S2_052.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 99 | 2020-02-28T07:30:46.000Z | 2022-03-16T16:41:09.000Z | #!/usr/bin/env python3
from app.lib.utils.request import request
from app.lib.utils.encode import base64encode
from app.lib.utils.common import get_capta, get_useragent
if __name__ == "__main__":
S2_052 = S2_052_BaseVerify('http://127.0.0.1:8088/struts2_rest_showcase_war_exploded/orders/3') | 48.641509 | 138 | 0.413693 |
4346e00af4df20f2f609af7be11fe806991cbce3 | 905 | py | Python | UPD/extension/utils.py | RIDCorix/UPD | 8694d119181a4afffafbfbab510f697399c1ea13 | [
"MIT"
] | null | null | null | UPD/extension/utils.py | RIDCorix/UPD | 8694d119181a4afffafbfbab510f697399c1ea13 | [
"MIT"
] | null | null | null | UPD/extension/utils.py | RIDCorix/UPD | 8694d119181a4afffafbfbab510f697399c1ea13 | [
"MIT"
] | null | null | null | import sys
# def get_tools():
# manager = PluginManager()
# manager.setPluginPlaces(["plugins/file_cabinet"])
# manager.collectPlugins()
# return [plugin.plugin_object for plugin in manager.getAllPlugins()]
| 34.807692 | 85 | 0.654144 |
4346fdc0a3d3d41ed572ed723800bf5f1dc198ab | 1,574 | py | Python | sbin/preload_findit_coverage_2.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 28 | 2019-09-09T08:12:31.000Z | 2021-12-17T00:09:14.000Z | sbin/preload_findit_coverage_2.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 33 | 2019-11-07T05:36:04.000Z | 2022-01-29T01:14:57.000Z | sbin/preload_findit_coverage_2.py | cariaso/metapub | bfa361dd6e5de8ee0859e596d490fb478f7dcfba | [
"Apache-2.0"
] | 10 | 2019-09-09T10:04:05.000Z | 2021-06-08T16:00:14.000Z | from __future__ import absolute_import, print_function, unicode_literals
# "preload" for FindIt #2: iterate over same journal list, but actually
# load a PubMedArticle object on each PMID. (no list output created)
from metapub import FindIt, PubMedFetcher
from metapub.findit.dances import the_doi_2step
from config import JOURNAL_ISOABBR_LIST_FILENAME
fetch = PubMedFetcher()
if __name__ == '__main__':
main()
| 30.269231 | 96 | 0.628971 |
434716a29a916c0a3138b2d8297566e972c6c138 | 7,640 | py | Python | sgcache/control.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 13 | 2017-09-06T21:48:57.000Z | 2022-02-08T20:50:52.000Z | sgcache/control.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 1 | 2021-04-04T18:07:04.000Z | 2021-04-04T18:07:04.000Z | sgcache/control.py | vfxetc/sgcache | 670bfac2904373e19c2dac7504d2d7f87018833d | [
"BSD-3-Clause"
] | 1 | 2019-07-19T01:23:19.000Z | 2019-07-19T01:23:19.000Z | from __future__ import absolute_import
from select import select
import errno
import functools
import itertools
import json
import logging
import os
import socket
import threading
import time
import traceback
log = logging.getLogger(__name__)
from .utils import makedirs, unlink
base_handlers = {
'ping': lambda control, msg: {'type': 'pong', 'pid': os.getpid()}
}
| 27.383513 | 109 | 0.534817 |
434721dba4ee0af8165b368cf20f7e199d6dcfdf | 502 | py | Python | lantz/drivers/tektronix/tds1002b.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 4 | 2019-05-04T00:10:53.000Z | 2020-10-22T18:08:40.000Z | lantz/drivers/tektronix/tds1002b.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 3 | 2019-07-12T13:44:17.000Z | 2020-10-22T19:32:08.000Z | lantz/drivers/tektronix/tds1002b.py | mtsolmn/lantz-drivers | f48caf9000ddd08f2abb837d832e341410af4788 | [
"BSD-3-Clause"
] | 9 | 2019-04-03T17:07:03.000Z | 2021-02-15T21:53:55.000Z | # -*- coding: utf-8 -*-
"""
lantz.drivers.tektronix.tds1012
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers to control an oscilloscope.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz.core import Feat, MessageBasedDriver
| 22.818182 | 68 | 0.633466 |
4348293155a11622c60c701da79d91d559f0de88 | 48,209 | py | Python | specs/dxgi.py | linkmauve/apitrace | a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1 | [
"MIT"
] | 1 | 2020-06-09T18:54:09.000Z | 2020-06-09T18:54:09.000Z | specs/dxgi.py | linkmauve/apitrace | a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1 | [
"MIT"
] | 2 | 2020-06-09T18:54:32.000Z | 2021-01-22T21:05:43.000Z | specs/dxgi.py | linkmauve/apitrace | a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1 | [
"MIT"
] | 1 | 2020-11-07T20:55:34.000Z | 2020-11-07T20:55:34.000Z | ##########################################################################
#
# Copyright 2014 VMware, Inc
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from .winapi import *
DXGI_FORMAT = Enum("DXGI_FORMAT", [
"DXGI_FORMAT_UNKNOWN",
"DXGI_FORMAT_R32G32B32A32_TYPELESS",
"DXGI_FORMAT_R32G32B32A32_FLOAT",
"DXGI_FORMAT_R32G32B32A32_UINT",
"DXGI_FORMAT_R32G32B32A32_SINT",
"DXGI_FORMAT_R32G32B32_TYPELESS",
"DXGI_FORMAT_R32G32B32_FLOAT",
"DXGI_FORMAT_R32G32B32_UINT",
"DXGI_FORMAT_R32G32B32_SINT",
"DXGI_FORMAT_R16G16B16A16_TYPELESS",
"DXGI_FORMAT_R16G16B16A16_FLOAT",
"DXGI_FORMAT_R16G16B16A16_UNORM",
"DXGI_FORMAT_R16G16B16A16_UINT",
"DXGI_FORMAT_R16G16B16A16_SNORM",
"DXGI_FORMAT_R16G16B16A16_SINT",
"DXGI_FORMAT_R32G32_TYPELESS",
"DXGI_FORMAT_R32G32_FLOAT",
"DXGI_FORMAT_R32G32_UINT",
"DXGI_FORMAT_R32G32_SINT",
"DXGI_FORMAT_R32G8X24_TYPELESS",
"DXGI_FORMAT_D32_FLOAT_S8X24_UINT",
"DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS",
"DXGI_FORMAT_X32_TYPELESS_G8X24_UINT",
"DXGI_FORMAT_R10G10B10A2_TYPELESS",
"DXGI_FORMAT_R10G10B10A2_UNORM",
"DXGI_FORMAT_R10G10B10A2_UINT",
"DXGI_FORMAT_R11G11B10_FLOAT",
"DXGI_FORMAT_R8G8B8A8_TYPELESS",
"DXGI_FORMAT_R8G8B8A8_UNORM",
"DXGI_FORMAT_R8G8B8A8_UNORM_SRGB",
"DXGI_FORMAT_R8G8B8A8_UINT",
"DXGI_FORMAT_R8G8B8A8_SNORM",
"DXGI_FORMAT_R8G8B8A8_SINT",
"DXGI_FORMAT_R16G16_TYPELESS",
"DXGI_FORMAT_R16G16_FLOAT",
"DXGI_FORMAT_R16G16_UNORM",
"DXGI_FORMAT_R16G16_UINT",
"DXGI_FORMAT_R16G16_SNORM",
"DXGI_FORMAT_R16G16_SINT",
"DXGI_FORMAT_R32_TYPELESS",
"DXGI_FORMAT_D32_FLOAT",
"DXGI_FORMAT_R32_FLOAT",
"DXGI_FORMAT_R32_UINT",
"DXGI_FORMAT_R32_SINT",
"DXGI_FORMAT_R24G8_TYPELESS",
"DXGI_FORMAT_D24_UNORM_S8_UINT",
"DXGI_FORMAT_R24_UNORM_X8_TYPELESS",
"DXGI_FORMAT_X24_TYPELESS_G8_UINT",
"DXGI_FORMAT_R8G8_TYPELESS",
"DXGI_FORMAT_R8G8_UNORM",
"DXGI_FORMAT_R8G8_UINT",
"DXGI_FORMAT_R8G8_SNORM",
"DXGI_FORMAT_R8G8_SINT",
"DXGI_FORMAT_R16_TYPELESS",
"DXGI_FORMAT_R16_FLOAT",
"DXGI_FORMAT_D16_UNORM",
"DXGI_FORMAT_R16_UNORM",
"DXGI_FORMAT_R16_UINT",
"DXGI_FORMAT_R16_SNORM",
"DXGI_FORMAT_R16_SINT",
"DXGI_FORMAT_R8_TYPELESS",
"DXGI_FORMAT_R8_UNORM",
"DXGI_FORMAT_R8_UINT",
"DXGI_FORMAT_R8_SNORM",
"DXGI_FORMAT_R8_SINT",
"DXGI_FORMAT_A8_UNORM",
"DXGI_FORMAT_R1_UNORM",
"DXGI_FORMAT_R9G9B9E5_SHAREDEXP",
"DXGI_FORMAT_R8G8_B8G8_UNORM",
"DXGI_FORMAT_G8R8_G8B8_UNORM",
"DXGI_FORMAT_BC1_TYPELESS",
"DXGI_FORMAT_BC1_UNORM",
"DXGI_FORMAT_BC1_UNORM_SRGB",
"DXGI_FORMAT_BC2_TYPELESS",
"DXGI_FORMAT_BC2_UNORM",
"DXGI_FORMAT_BC2_UNORM_SRGB",
"DXGI_FORMAT_BC3_TYPELESS",
"DXGI_FORMAT_BC3_UNORM",
"DXGI_FORMAT_BC3_UNORM_SRGB",
"DXGI_FORMAT_BC4_TYPELESS",
"DXGI_FORMAT_BC4_UNORM",
"DXGI_FORMAT_BC4_SNORM",
"DXGI_FORMAT_BC5_TYPELESS",
"DXGI_FORMAT_BC5_UNORM",
"DXGI_FORMAT_BC5_SNORM",
"DXGI_FORMAT_B5G6R5_UNORM",
"DXGI_FORMAT_B5G5R5A1_UNORM",
"DXGI_FORMAT_B8G8R8A8_UNORM",
"DXGI_FORMAT_B8G8R8X8_UNORM",
"DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM",
"DXGI_FORMAT_B8G8R8A8_TYPELESS",
"DXGI_FORMAT_B8G8R8A8_UNORM_SRGB",
"DXGI_FORMAT_B8G8R8X8_TYPELESS",
"DXGI_FORMAT_B8G8R8X8_UNORM_SRGB",
"DXGI_FORMAT_BC6H_TYPELESS",
"DXGI_FORMAT_BC6H_UF16",
"DXGI_FORMAT_BC6H_SF16",
"DXGI_FORMAT_BC7_TYPELESS",
"DXGI_FORMAT_BC7_UNORM",
"DXGI_FORMAT_BC7_UNORM_SRGB",
"DXGI_FORMAT_AYUV",
"DXGI_FORMAT_Y410",
"DXGI_FORMAT_Y416",
"DXGI_FORMAT_NV12",
"DXGI_FORMAT_P010",
"DXGI_FORMAT_P016",
"DXGI_FORMAT_420_OPAQUE",
"DXGI_FORMAT_YUY2",
"DXGI_FORMAT_Y210",
"DXGI_FORMAT_Y216",
"DXGI_FORMAT_NV11",
"DXGI_FORMAT_AI44",
"DXGI_FORMAT_IA44",
"DXGI_FORMAT_P8",
"DXGI_FORMAT_A8P8",
"DXGI_FORMAT_B4G4R4A4_UNORM",
])
HRESULT = MAKE_HRESULT([
"DXGI_STATUS_OCCLUDED",
"DXGI_STATUS_CLIPPED",
"DXGI_STATUS_NO_REDIRECTION",
"DXGI_STATUS_NO_DESKTOP_ACCESS",
"DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE",
"DXGI_STATUS_MODE_CHANGED",
"DXGI_STATUS_MODE_CHANGE_IN_PROGRESS",
"DXGI_ERROR_INVALID_CALL",
"DXGI_ERROR_NOT_FOUND",
"DXGI_ERROR_MORE_DATA",
"DXGI_ERROR_UNSUPPORTED",
"DXGI_ERROR_DEVICE_REMOVED",
"DXGI_ERROR_DEVICE_HUNG",
"DXGI_ERROR_DEVICE_RESET",
"DXGI_ERROR_WAS_STILL_DRAWING",
"DXGI_ERROR_FRAME_STATISTICS_DISJOINT",
"DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE",
"DXGI_ERROR_DRIVER_INTERNAL_ERROR",
"DXGI_ERROR_NONEXCLUSIVE",
"DXGI_ERROR_NOT_CURRENTLY_AVAILABLE",
"DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED",
"DXGI_ERROR_REMOTE_OUTOFMEMORY",
# IDXGIKeyedMutex::AcquireSync
"WAIT_ABANDONED",
"WAIT_TIMEOUT",
])
DXGI_RGB = Struct("DXGI_RGB", [
(Float, "Red"),
(Float, "Green"),
(Float, "Blue"),
])
DXGI_GAMMA_CONTROL = Struct("DXGI_GAMMA_CONTROL", [
(DXGI_RGB, "Scale"),
(DXGI_RGB, "Offset"),
(Array(DXGI_RGB, 1025), "GammaCurve"),
])
DXGI_GAMMA_CONTROL_CAPABILITIES = Struct("DXGI_GAMMA_CONTROL_CAPABILITIES", [
(BOOL, "ScaleAndOffsetSupported"),
(Float, "MaxConvertedValue"),
(Float, "MinConvertedValue"),
(UINT, "NumGammaControlPoints"),
(Array(Float, "{self}.NumGammaControlPoints"), "ControlPointPositions"),
])
DXGI_RATIONAL = Struct("DXGI_RATIONAL", [
(UINT, "Numerator"),
(UINT, "Denominator"),
])
DXGI_MODE_SCANLINE_ORDER = Enum("DXGI_MODE_SCANLINE_ORDER", [
"DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED",
"DXGI_MODE_SCANLINE_ORDER_PROGRESSIVE",
"DXGI_MODE_SCANLINE_ORDER_UPPER_FIELD_FIRST",
"DXGI_MODE_SCANLINE_ORDER_LOWER_FIELD_FIRST",
])
DXGI_MODE_SCALING = Enum("DXGI_MODE_SCALING", [
"DXGI_MODE_SCALING_UNSPECIFIED",
"DXGI_MODE_SCALING_CENTERED",
"DXGI_MODE_SCALING_STRETCHED",
])
DXGI_MODE_ROTATION = Enum("DXGI_MODE_ROTATION", [
"DXGI_MODE_ROTATION_UNSPECIFIED",
"DXGI_MODE_ROTATION_IDENTITY",
"DXGI_MODE_ROTATION_ROTATE90",
"DXGI_MODE_ROTATION_ROTATE180",
"DXGI_MODE_ROTATION_ROTATE270",
])
DXGI_MODE_DESC = Struct("DXGI_MODE_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_FORMAT, "Format"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
])
DXGI_QUALITY_LEVEL = FakeEnum(UINT, [
"DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN",
"DXGI_CENTER_MULTISAMPLE_QUALITY_PATTERN",
])
DXGI_SAMPLE_DESC = Struct("DXGI_SAMPLE_DESC", [
(UINT, "Count"),
(DXGI_QUALITY_LEVEL, "Quality"),
])
DXGI_RGBA = Struct("DXGI_RGBA", [
(Float, "r"),
(Float, "g"),
(Float, "b"),
(Float, "a"),
])
IDXGIObject = Interface("IDXGIObject", IUnknown)
IDXGIDeviceSubObject = Interface("IDXGIDeviceSubObject", IDXGIObject)
IDXGIResource = Interface("IDXGIResource", IDXGIDeviceSubObject)
IDXGIKeyedMutex = Interface("IDXGIKeyedMutex", IDXGIDeviceSubObject)
IDXGISurface = Interface("IDXGISurface", IDXGIDeviceSubObject)
IDXGISurface1 = Interface("IDXGISurface1", IDXGISurface)
IDXGIAdapter = Interface("IDXGIAdapter", IDXGIObject)
IDXGIOutput = Interface("IDXGIOutput", IDXGIObject)
IDXGISwapChain = Interface("IDXGISwapChain", IDXGIDeviceSubObject)
IDXGIFactory = Interface("IDXGIFactory", IDXGIObject)
IDXGIDevice = Interface("IDXGIDevice", IDXGIObject)
IDXGIFactory1 = Interface("IDXGIFactory1", IDXGIFactory)
IDXGIAdapter1 = Interface("IDXGIAdapter1", IDXGIAdapter)
IDXGIDevice1 = Interface("IDXGIDevice1", IDXGIDevice)
DXGI_USAGE = Flags(UINT, [
"DXGI_CPU_ACCESS_NONE", # 0
"DXGI_CPU_ACCESS_SCRATCH", # 3
"DXGI_CPU_ACCESS_DYNAMIC", # 1
"DXGI_CPU_ACCESS_READ_WRITE", # 2
"DXGI_USAGE_SHADER_INPUT",
"DXGI_USAGE_RENDER_TARGET_OUTPUT",
"DXGI_USAGE_BACK_BUFFER",
"DXGI_USAGE_SHARED",
"DXGI_USAGE_READ_ONLY",
"DXGI_USAGE_DISCARD_ON_PRESENT",
"DXGI_USAGE_UNORDERED_ACCESS",
])
DXGI_FRAME_STATISTICS = Struct("DXGI_FRAME_STATISTICS", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
])
DXGI_MAPPED_RECT = Struct("DXGI_MAPPED_RECT", [
(INT, "Pitch"),
(LinearPointer(BYTE, "_MappedSize"), "pBits"),
])
DXGI_ADAPTER_DESC = Struct("DXGI_ADAPTER_DESC", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
])
DXGI_OUTPUT_DESC = Struct("DXGI_OUTPUT_DESC", [
(WString, "DeviceName"),
(RECT, "DesktopCoordinates"),
(BOOL, "AttachedToDesktop"),
(DXGI_MODE_ROTATION, "Rotation"),
(HMONITOR, "Monitor"),
])
DXGI_SHARED_RESOURCE = Struct("DXGI_SHARED_RESOURCE", [
(HANDLE, "Handle"),
])
DXGI_RESOURCE_PRIORITY = FakeEnum(UINT, [
"DXGI_RESOURCE_PRIORITY_MINIMUM",
"DXGI_RESOURCE_PRIORITY_LOW",
"DXGI_RESOURCE_PRIORITY_NORMAL",
"DXGI_RESOURCE_PRIORITY_HIGH",
"DXGI_RESOURCE_PRIORITY_MAXIMUM",
])
DXGI_RESIDENCY = Enum("DXGI_RESIDENCY", [
"DXGI_RESIDENCY_FULLY_RESIDENT",
"DXGI_RESIDENCY_RESIDENT_IN_SHARED_MEMORY",
"DXGI_RESIDENCY_EVICTED_TO_DISK",
])
DXGI_SURFACE_DESC = Struct("DXGI_SURFACE_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
])
DXGI_SWAP_EFFECT = Enum("DXGI_SWAP_EFFECT", [
"DXGI_SWAP_EFFECT_DISCARD",
"DXGI_SWAP_EFFECT_SEQUENTIAL",
"DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL",
"DXGI_SWAP_EFFECT_FLIP_DISCARD",
])
DXGI_SWAP_CHAIN_FLAG = Flags(UINT, [
"DXGI_SWAP_CHAIN_FLAG_NONPREROTATED",
"DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH",
"DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE",
"DXGI_SWAP_CHAIN_FLAG_RESTRICTED_CONTENT",
"DXGI_SWAP_CHAIN_FLAG_RESTRICT_SHARED_RESOURCE_DRIVER",
"DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY",
"DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT",
"DXGI_SWAP_CHAIN_FLAG_FOREGROUND_LAYER",
"DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO",
"DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO",
"DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED",
"DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING",
#"DXGI_SWAP_CHAIN_FLAG_RESTRICTED_TO_ALL_HOLOGRAPHIC_DISPLAYS", # DXGI 1.6
])
DXGI_SWAP_CHAIN_DESC = Struct("DXGI_SWAP_CHAIN_DESC", [
(DXGI_MODE_DESC, "BufferDesc"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(DXGI_USAGE, "BufferUsage"),
(UINT, "BufferCount"),
(HWND, "OutputWindow"),
(BOOL, "Windowed"),
(DXGI_SWAP_EFFECT, "SwapEffect"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
IDXGIObject.methods += [
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "Name"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "Name"), (OpaquePointer(Const(IUnknown)), "pUnknown")], sideeffects=False),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "Name"), InOut(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False),
StdMethod(HRESULT, "GetParent", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppParent")]),
]
IDXGIDeviceSubObject.methods += [
StdMethod(HRESULT, "GetDevice", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppDevice")]),
]
SHARED_HANDLE = Handle("shared_handle", RAW_HANDLE)
IDXGIResource.methods += [
StdMethod(HRESULT, "GetSharedHandle", [Out(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "GetUsage", [Out(Pointer(DXGI_USAGE), "pUsage")], sideeffects=False),
StdMethod(HRESULT, "SetEvictionPriority", [(DXGI_RESOURCE_PRIORITY, "EvictionPriority")]),
StdMethod(HRESULT, "GetEvictionPriority", [Out(Pointer(DXGI_RESOURCE_PRIORITY), "pEvictionPriority")], sideeffects=False),
]
DWORD_TIMEOUT = FakeEnum(DWORD, [
"INFINITE",
])
IDXGIKeyedMutex.methods += [
StdMethod(HRESULT, "AcquireSync", [(UINT64, "Key"), (DWORD_TIMEOUT, "dwMilliseconds")], sideeffects=False),
StdMethod(HRESULT, "ReleaseSync", [(UINT64, "Key")]),
]
DXGI_MAP = Flags(UINT, [
"DXGI_MAP_READ",
"DXGI_MAP_WRITE",
"DXGI_MAP_DISCARD",
])
IDXGISurface.methods += [
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SURFACE_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "Map", [Out(Pointer(DXGI_MAPPED_RECT), "pLockedRect"), (DXGI_MAP, "MapFlags")]),
StdMethod(HRESULT, "Unmap", []),
]
IDXGISurface1.methods += [
StdMethod(HRESULT, "GetDC", [(BOOL, "Discard"), Out(Pointer(HDC), "phdc")]),
StdMethod(HRESULT, "ReleaseDC", [(Pointer(RECT), "pDirtyRect")]),
]
IDXGIAdapter.methods += [
StdMethod(HRESULT, "EnumOutputs", [(UINT, "Output"), Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_ADAPTER_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "CheckInterfaceSupport", [(REFGUID, "InterfaceName"), Out(Pointer(LARGE_INTEGER), "pUMDVersion")], sideeffects=False),
]
DXGI_ENUM_MODES = Flags(UINT, [
"DXGI_ENUM_MODES_INTERLACED",
"DXGI_ENUM_MODES_SCALING",
"DXGI_ENUM_MODES_STEREO",
"DXGI_ENUM_MODES_DISABLED_STEREO",
])
IDXGIOutput.methods += [
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_OUTPUT_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "GetDisplayModeList", [(DXGI_FORMAT, "EnumFormat"), (DXGI_ENUM_MODES, "Flags"), InOut(Pointer(UINT), "pNumModes"), Out(Array(DXGI_MODE_DESC, "*pNumModes"), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "FindClosestMatchingMode", [(Pointer(Const(DXGI_MODE_DESC)), "pModeToMatch"), Out(Pointer(DXGI_MODE_DESC), "pClosestMatch"), (ObjPointer(IUnknown), "pConcernedDevice")], sideeffects=False),
StdMethod(HRESULT, "WaitForVBlank", []),
StdMethod(HRESULT, "TakeOwnership", [(ObjPointer(IUnknown), "pDevice"), (BOOL, "Exclusive")]),
StdMethod(Void, "ReleaseOwnership", []),
StdMethod(HRESULT, "GetGammaControlCapabilities", [Out(Pointer(DXGI_GAMMA_CONTROL_CAPABILITIES), "pGammaCaps")], sideeffects=False),
StdMethod(HRESULT, "SetGammaControl", [(Pointer(Const(DXGI_GAMMA_CONTROL)), "pArray")], sideeffects=False), # Avoid NumGammaControlPoints mismatch
StdMethod(HRESULT, "GetGammaControl", [Out(Pointer(DXGI_GAMMA_CONTROL), "pArray")], sideeffects=False),
StdMethod(HRESULT, "SetDisplaySurface", [(ObjPointer(IDXGISurface), "pScanoutSurface")]),
StdMethod(HRESULT, "GetDisplaySurfaceData", [(ObjPointer(IDXGISurface), "pDestination")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
]
DXGI_PRESENT = Flags(UINT, [
"DXGI_PRESENT_TEST",
"DXGI_PRESENT_DO_NOT_SEQUENCE",
"DXGI_PRESENT_RESTART",
"DXGI_PRESENT_DO_NOT_WAIT",
"DXGI_PRESENT_STEREO_PREFER_RIGHT",
"DXGI_PRESENT_STEREO_TEMPORARY_MONO",
"DXGI_PRESENT_RESTRICT_TO_OUTPUT",
"DXGI_PRESENT_USE_DURATION",
])
IDXGISwapChain.methods += [
StdMethod(HRESULT, "Present", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "GetBuffer", [(UINT, "Buffer"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppSurface")]),
StdMethod(HRESULT, "SetFullscreenState", [(BOOL, "Fullscreen"), (ObjPointer(IDXGIOutput), "pTarget")]),
StdMethod(HRESULT, "GetFullscreenState", [Out(Pointer(BOOL), "pFullscreen"), Out(Pointer(ObjPointer(IDXGIOutput)), "ppTarget")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "ResizeBuffers", [(UINT, "BufferCount"), (UINT, "Width"), (UINT, "Height"), (DXGI_FORMAT, "NewFormat"), (DXGI_SWAP_CHAIN_FLAG, "SwapChainFlags")]),
StdMethod(HRESULT, "ResizeTarget", [(Pointer(Const(DXGI_MODE_DESC)), "pNewTargetParameters")]),
StdMethod(HRESULT, "GetContainingOutput", [Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False),
]
DXGI_MWA = Flags(UINT, [
"DXGI_MWA_NO_WINDOW_CHANGES",
"DXGI_MWA_NO_ALT_ENTER",
"DXGI_MWA_NO_PRINT_SCREEN",
"DXGI_MWA_VALID",
])
IDXGIFactory.methods += [
StdMethod(HRESULT, "EnumAdapters", [(UINT, "Adapter"), Out(Pointer(ObjPointer(IDXGIAdapter)), "ppAdapter")]),
StdMethod(HRESULT, "MakeWindowAssociation", [(HWND, "WindowHandle"), (DXGI_MWA, "Flags")], sideeffects=False),
StdMethod(HRESULT, "GetWindowAssociation", [Out(Pointer(HWND), "pWindowHandle")], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChain", [(ObjPointer(IUnknown), "pDevice"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateSoftwareAdapter", [(HMODULE, "Module"), Out(Pointer(ObjPointer(IDXGIAdapter)), "ppAdapter")]),
]
IDXGIDevice.methods += [
StdMethod(HRESULT, "GetAdapter", [Out(Pointer(ObjPointer(IDXGIAdapter)), "pAdapter")]),
StdMethod(HRESULT, "CreateSurface", [(Pointer(Const(DXGI_SURFACE_DESC)), "pDesc"), (UINT, "NumSurfaces"), (DXGI_USAGE, "Usage"), (Pointer(Const(DXGI_SHARED_RESOURCE)), "pSharedResource"), Out(Pointer(ObjPointer(IDXGISurface)), "ppSurface")]),
StdMethod(HRESULT, "QueryResourceResidency", [(Array(Const(ObjPointer(IUnknown)), "NumResources"), "ppResources"), Out(Array(DXGI_RESIDENCY, "NumResources"), "pResidencyStatus"), (UINT, "NumResources")], sideeffects=False),
StdMethod(HRESULT, "SetGPUThreadPriority", [(INT, "Priority")]),
StdMethod(HRESULT, "GetGPUThreadPriority", [Out(Pointer(INT), "pPriority")], sideeffects=False),
]
DXGI_ADAPTER_FLAG = FakeEnum(UINT, [
"DXGI_ADAPTER_FLAG_NONE",
"DXGI_ADAPTER_FLAG_REMOTE",
"DXGI_ADAPTER_FLAG_SOFTWARE",
])
DXGI_ADAPTER_DESC1 = Struct("DXGI_ADAPTER_DESC1", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
DXGI_DISPLAY_COLOR_SPACE = Struct("DXGI_DISPLAY_COLOR_SPACE", [
(Array(Array(FLOAT, 8), 2), "PrimaryCoordinates"),
(Array(Array(FLOAT, 16), 2), "WhitePoints"),
])
IDXGIFactory1.methods += [
StdMethod(HRESULT, "EnumAdapters1", [(UINT, "Adapter"), Out(Pointer(ObjPointer(IDXGIAdapter1)), "ppAdapter")]),
StdMethod(BOOL, "IsCurrent", [], sideeffects=False),
]
IDXGIAdapter1.methods += [
StdMethod(HRESULT, "GetDesc1", [Out(Pointer(DXGI_ADAPTER_DESC1), "pDesc")], sideeffects=False),
]
IDXGIDevice1.methods += [
StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]),
StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False),
]
dxgi = Module('dxgi')
dxgi.addInterfaces([
IDXGIKeyedMutex,
IDXGIFactory1,
IDXGIDevice1,
IDXGIAdapter1,
IDXGIResource,
])
dxgi.addFunctions([
StdFunction(HRESULT, "CreateDXGIFactory", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
StdFunction(HRESULT, "CreateDXGIFactory1", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
StdFunction(HRESULT, "DXGID3D10CreateDevice", [(HMODULE, "hModule"), (ObjPointer(IDXGIFactory), "pFactory"), (ObjPointer(IDXGIAdapter), "pAdapter"), (UINT, "Flags"), (OpaquePointer(Const(IUnknown)), "pUnknown"), Out(Pointer(ObjPointer(Void)), "ppDevice")], internal=True),
StdFunction(HRESULT, "DXGID3D10CreateLayeredDevice", [(UINT), (UINT), (UINT), (UINT), (UINT)], internal=True),
StdFunction(SIZE_T, "DXGID3D10GetLayeredDeviceSize", [(OpaqueArray(Const(Void), "NumLayers"), "pLayers"), (UINT, "NumLayers")], internal=True),
StdFunction(HRESULT, "DXGID3D10RegisterLayers", [(OpaqueArray(Const(Void), "NumLayers"), "pLayers"), (UINT, "NumLayers")], internal=True),
])
#
# DXGI 1.2
#
IDXGIDisplayControl = Interface("IDXGIDisplayControl", IUnknown)
IDXGIDisplayControl.methods += [
StdMethod(BOOL, "IsStereoEnabled", [], sideeffects=False),
StdMethod(Void, "SetStereoEnabled", [(BOOL, "enabled")]),
]
DXGI_OUTDUPL_MOVE_RECT = Struct("DXGI_OUTDUPL_MOVE_RECT", [
(POINT, "SourcePoint"),
(RECT, "DestinationRect"),
])
DXGI_OUTDUPL_DESC = Struct("DXGI_OUTDUPL_DESC", [
(DXGI_MODE_DESC, "ModeDesc"),
(DXGI_MODE_ROTATION, "Rotation"),
(BOOL, "DesktopImageInSystemMemory"),
])
DXGI_OUTDUPL_POINTER_POSITION = Struct("DXGI_OUTDUPL_POINTER_POSITION", [
(POINT, "Position"),
(BOOL, "Visible"),
])
DXGI_OUTDUPL_POINTER_SHAPE_TYPE = Enum("DXGI_OUTDUPL_POINTER_SHAPE_TYPE", [
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME",
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR",
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR",
])
DXGI_OUTDUPL_POINTER_SHAPE_INFO = Struct("DXGI_OUTDUPL_POINTER_SHAPE_INFO", [
(UINT, "Type"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Pitch"),
(POINT, "HotSpot"),
])
DXGI_OUTDUPL_FRAME_INFO = Struct("DXGI_OUTDUPL_FRAME_INFO", [
(LARGE_INTEGER, "LastPresentTime"),
(LARGE_INTEGER, "LastMouseUpdateTime"),
(UINT, "AccumulatedFrames"),
(BOOL, "RectsCoalesced"),
(BOOL, "ProtectedContentMaskedOut"),
(DXGI_OUTDUPL_POINTER_POSITION, "PointerPosition"),
(UINT, "TotalMetadataBufferSize"),
(UINT, "PointerShapeBufferSize"),
])
IDXGIOutputDuplication = Interface("IDXGIOutputDuplication", IDXGIObject)
IDXGIOutputDuplication.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(DXGI_OUTDUPL_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "AcquireNextFrame", [(UINT, "TimeoutInMilliseconds"), Out(Pointer(DXGI_OUTDUPL_FRAME_INFO), "pFrameInfo"), Out(Pointer(ObjPointer(IDXGIResource)), "ppDesktopResource")]),
StdMethod(HRESULT, "GetFrameDirtyRects", [(UINT, "DirtyRectsBufferSize"), Out(Array(RECT, "DirtyRectsBufferSize"), "pDirtyRectsBuffer"), Out(Pointer(UINT), "pDirtyRectsBufferSizeRequired")], sideeffects=False),
StdMethod(HRESULT, "GetFrameMoveRects", [(UINT, "MoveRectsBufferSize"), Out(Array(DXGI_OUTDUPL_MOVE_RECT, "MoveRectsBufferSize"), "pMoveRectBuffer"), Out(Pointer(UINT), "pMoveRectsBufferSizeRequired")], sideeffects=False),
StdMethod(HRESULT, "GetFramePointerShape", [(UINT, "PointerShapeBufferSize"), Out(OpaqueBlob(Void, "PointerShapeBufferSize"), "pPointerShapeBuffer"), Out(Pointer(UINT), "pPointerShapeBufferSizeRequired"), Out(Pointer(DXGI_OUTDUPL_POINTER_SHAPE_INFO), "pPointerShapeInfo")], sideeffects=False),
StdMethod(HRESULT, "MapDesktopSurface", [Out(Pointer(DXGI_MAPPED_RECT), "pLockedRect")], sideeffects=False),
StdMethod(HRESULT, "UnMapDesktopSurface", [], sideeffects=False),
StdMethod(HRESULT, "ReleaseFrame", []),
]
DXGI_ALPHA_MODE = Enum("DXGI_ALPHA_MODE", [
"DXGI_ALPHA_MODE_UNSPECIFIED",
"DXGI_ALPHA_MODE_PREMULTIPLIED",
"DXGI_ALPHA_MODE_STRAIGHT",
"DXGI_ALPHA_MODE_IGNORE",
])
IDXGISurface2 = Interface("IDXGISurface2", IDXGISurface1)
IDXGISurface2.methods += [
StdMethod(HRESULT, "GetResource", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppParentResource"), Out(Pointer(UINT), "pSubresourceIndex")]),
]
DXGI_SHARED_RESOURCE_FLAG = Flags(DWORD, [
"DXGI_SHARED_RESOURCE_READ",
"DXGI_SHARED_RESOURCE_WRITE",
])
IDXGIResource1 = Interface("IDXGIResource1", IDXGIResource)
IDXGIResource1.methods += [
StdMethod(HRESULT, "CreateSubresourceSurface", [(UINT, "index"), Out(Pointer(ObjPointer(IDXGISurface2)), "ppSurface")]),
StdMethod(HRESULT, "CreateSharedHandle", [(Pointer(Const(SECURITY_ATTRIBUTES)), "pAttributes"), (DXGI_SHARED_RESOURCE_FLAG, "dwAccess"), (LPCWSTR, "lpName"), Out(Pointer(HANDLE), "pHandle")]),
]
DXGI_OFFER_RESOURCE_PRIORITY = Enum("DXGI_OFFER_RESOURCE_PRIORITY", [
"DXGI_OFFER_RESOURCE_PRIORITY_LOW",
"DXGI_OFFER_RESOURCE_PRIORITY_NORMAL",
"DXGI_OFFER_RESOURCE_PRIORITY_HIGH",
])
IDXGIDevice2 = Interface("IDXGIDevice2", IDXGIDevice1)
IDXGIDevice2.methods += [
StdMethod(HRESULT, "OfferResources", [(UINT, "NumResources"), (Array(Const(ObjPointer(IDXGIResource)), "NumResources"), "ppResources"), (DXGI_OFFER_RESOURCE_PRIORITY, "Priority")]),
StdMethod(HRESULT, "ReclaimResources", [(UINT, "NumResources"), (Array(Const(ObjPointer(IDXGIResource)), "NumResources"), "ppResources"), Out(Pointer(BOOL), "pDiscarded")]),
StdMethod(HRESULT, "EnqueueSetEvent", [(HANDLE, "hEvent")], sideeffects=False),
]
DXGI_MODE_DESC1 = Struct("DXGI_MODE_DESC1", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_FORMAT, "Format"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
(BOOL, "Stereo"),
])
DXGI_SCALING = Enum("DXGI_SCALING", [
"DXGI_SCALING_STRETCH",
"DXGI_SCALING_NONE",
"DXGI_SCALING_ASPECT_RATIO_STRETCH",
])
DXGI_SWAP_CHAIN_DESC1 = Struct("DXGI_SWAP_CHAIN_DESC1", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_FORMAT, "Format"),
(BOOL, "Stereo"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(DXGI_USAGE, "BufferUsage"),
(UINT, "BufferCount"),
(DXGI_SCALING, "Scaling"),
(DXGI_SWAP_EFFECT, "SwapEffect"),
(DXGI_ALPHA_MODE, "AlphaMode"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
DXGI_SWAP_CHAIN_FULLSCREEN_DESC = Struct("DXGI_SWAP_CHAIN_FULLSCREEN_DESC", [
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
(BOOL, "Windowed"),
])
DXGI_PRESENT_PARAMETERS = Struct("DXGI_PRESENT_PARAMETERS", [
(UINT, "DirtyRectsCount"),
(Array(RECT, "{self}.DirtyRectsCount"), "pDirtyRects"),
(Pointer(RECT), "pScrollRect"),
(Pointer(POINT), "pScrollOffset"),
])
IDXGISwapChain1 = Interface("IDXGISwapChain1", IDXGISwapChain)
IDXGISwapChain1.methods += [
StdMethod(HRESULT, "GetDesc1", [(Out(Pointer(DXGI_SWAP_CHAIN_DESC1), "pDesc"))], sideeffects=False),
StdMethod(HRESULT, "GetFullscreenDesc", [(Out(Pointer(DXGI_SWAP_CHAIN_FULLSCREEN_DESC), "pDesc"))], sideeffects=False),
StdMethod(HRESULT, "GetHwnd", [(Out(Pointer(HWND), "pHwnd"))], sideeffects=False),
StdMethod(HRESULT, "GetCoreWindow", [(REFIID, "riid"), (Out(Pointer(ObjPointer(Void)), "ppUnk"))]),
StdMethod(HRESULT, "Present1", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags"), (Pointer(Const(DXGI_PRESENT_PARAMETERS)), "pPresentParameters")]),
StdMethod(BOOL, "IsTemporaryMonoSupported", [], sideeffects=False),
StdMethod(HRESULT, "GetRestrictToOutput", [(Out(Pointer(ObjPointer(IDXGIOutput)), "ppRestrictToOutput"))]),
StdMethod(HRESULT, "SetBackgroundColor", [(Pointer(Const(DXGI_RGBA)), "pColor")]),
StdMethod(HRESULT, "GetBackgroundColor", [(Out(Pointer(DXGI_RGBA), "pColor"))], sideeffects=False),
StdMethod(HRESULT, "SetRotation", [(DXGI_MODE_ROTATION, "Rotation")]),
StdMethod(HRESULT, "GetRotation", [(Out(Pointer(DXGI_MODE_ROTATION), "pRotation"))], sideeffects=False),
]
IDXGIFactory2 = Interface("IDXGIFactory2", IDXGIFactory1)
IDXGIFactory2.methods += [
StdMethod(BOOL, "IsWindowedStereoEnabled", [], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChainForHwnd", [(ObjPointer(IUnknown), "pDevice"), (HWND, "hWnd"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (Pointer(Const(DXGI_SWAP_CHAIN_FULLSCREEN_DESC)), "pFullscreenDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateSwapChainForCoreWindow", [(ObjPointer(IUnknown), "pDevice"), (ObjPointer(IUnknown), "pWindow"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "GetSharedResourceAdapterLuid", [(HANDLE, "hResource"), Out(Pointer(LUID), "pLuid")], sideeffects=False),
StdMethod(HRESULT, "RegisterStereoStatusWindow", [(HWND, "WindowHandle"), (UINT, "wMsg"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterStereoStatusEvent", [(HANDLE, "hEvent"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(Void, "UnregisterStereoStatus", [(DWORD, "dwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterOcclusionStatusWindow", [(HWND, "WindowHandle"), (UINT, "wMsg"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterOcclusionStatusEvent", [(HANDLE, "hEvent"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(Void, "UnregisterOcclusionStatus", [(DWORD, "dwCookie")], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChainForComposition", [(ObjPointer(IUnknown), "pDevice"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
]
DXGI_GRAPHICS_PREEMPTION_GRANULARITY = Enum("DXGI_GRAPHICS_PREEMPTION_GRANULARITY", [
"DXGI_GRAPHICS_PREEMPTION_DMA_BUFFER_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_PRIMITIVE_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_TRIANGLE_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_PIXEL_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_INSTRUCTION_BOUNDARY",
])
DXGI_COMPUTE_PREEMPTION_GRANULARITY = Enum("DXGI_COMPUTE_PREEMPTION_GRANULARITY", [
"DXGI_COMPUTE_PREEMPTION_DMA_BUFFER_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_DISPATCH_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_THREAD_GROUP_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_THREAD_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_INSTRUCTION_BOUNDARY",
])
DXGI_ADAPTER_DESC2 = Struct("DXGI_ADAPTER_DESC2", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
(DXGI_ADAPTER_FLAG, "Flags"),
(DXGI_GRAPHICS_PREEMPTION_GRANULARITY, "GraphicsPreemptionGranularity"),
(DXGI_COMPUTE_PREEMPTION_GRANULARITY, "ComputePreemptionGranularity"),
])
IDXGIAdapter2 = Interface("IDXGIAdapter2", IDXGIAdapter1)
IDXGIAdapter2.methods += [
StdMethod(HRESULT, "GetDesc2", [Out(Pointer(DXGI_ADAPTER_DESC2), "pDesc")], sideeffects=False),
]
IDXGIOutput1 = Interface("IDXGIOutput1", IDXGIOutput)
IDXGIOutput1.methods += [
StdMethod(HRESULT, "GetDisplayModeList1", [(DXGI_FORMAT, "EnumFormat"), (DXGI_ENUM_MODES, "Flags"), InOut(Pointer(UINT), "pNumModes"), Out(Array(DXGI_MODE_DESC1, "*pNumModes"), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "FindClosestMatchingMode1", [(Pointer(Const(DXGI_MODE_DESC1)), "pModeToMatch"), Out(Pointer(DXGI_MODE_DESC1), "pClosestMatch"), (ObjPointer(IUnknown), "pConcernedDevice")], sideeffects=False),
StdMethod(HRESULT, "GetDisplaySurfaceData1", [(ObjPointer(IDXGIResource), "pDestination")]),
StdMethod(HRESULT, "DuplicateOutput", [(ObjPointer(IUnknown), "pDevice"), Out(Pointer(ObjPointer(IDXGIOutputDuplication)), "ppOutputDuplication")]),
]
dxgi.addInterfaces([
IDXGIDisplayControl,
IDXGIDevice2,
IDXGISwapChain1,
IDXGIFactory2,
IDXGIResource1,
IDXGIAdapter2,
IDXGIOutput1,
])
#
# DXGI 1.3
#
DXGI_CREATE_FACTORY_FLAGS = Flags(UINT, [
"DXGI_CREATE_FACTORY_DEBUG",
])
dxgi.addFunctions([
StdFunction(HRESULT, "CreateDXGIFactory2", [(DXGI_CREATE_FACTORY_FLAGS, "Flags"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
])
IDXGIDevice3 = Interface("IDXGIDevice3", IDXGIDevice2)
IDXGIDevice3.methods += [
StdMethod(Void, "Trim", []),
]
DXGI_MATRIX_3X2_F = Struct("DXGI_MATRIX_3X2_F", [
(FLOAT, "_11"),
(FLOAT, "_12"),
(FLOAT, "_21"),
(FLOAT, "_22"),
(FLOAT, "_31"),
(FLOAT, "_32"),
])
IDXGISwapChain2 = Interface("IDXGISwapChain2", IDXGISwapChain1)
IDXGISwapChain2.methods += [
StdMethod(HRESULT, "SetSourceSize", [(UINT, "Width"), (UINT, "Height")]),
StdMethod(HRESULT, "GetSourceSize", [Out(Pointer(UINT), "pWidth"), Out(Pointer(UINT), "pHeight")], sideeffects=False),
StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]),
StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False),
StdMethod(HANDLE, "GetFrameLatencyWaitableObject", [], sideeffects=False),
StdMethod(HRESULT, "SetMatrixTransform", [(Pointer(Const(DXGI_MATRIX_3X2_F)), "pMatrix")]),
StdMethod(HRESULT, "GetMatrixTransform", [Out(Pointer(DXGI_MATRIX_3X2_F), "pMatrix")], sideeffects=False),
]
IDXGIOutput2 = Interface("IDXGIOutput2", IDXGIOutput1)
IDXGIOutput2.methods += [
StdMethod(BOOL, "SupportsOverlays", [], sideeffects=False),
]
IDXGIFactory3 = Interface("IDXGIFactory3", IDXGIFactory2)
IDXGIFactory3.methods += [
StdMethod(DXGI_CREATE_FACTORY_FLAGS, "GetCreationFlags", [], sideeffects=False),
]
DXGI_DECODE_SWAP_CHAIN_DESC = Struct("DXGI_DECODE_SWAP_CHAIN_DESC", [
(UINT, "Flags"),
])
# XXX: Flags
DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS = Enum("DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS", [
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_NOMINAL_RANGE",
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_BT709",
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_xvYCC",
])
IDXGIDecodeSwapChain = Interface("IDXGIDecodeSwapChain", IUnknown)
IDXGIDecodeSwapChain.methods += [
StdMethod(HRESULT, "PresentBuffer", [(UINT, "BufferToPresent"), (UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "SetSourceRect", [(Pointer(Const(RECT)), "pRect")]),
StdMethod(HRESULT, "SetTargetRect", [(Pointer(Const(RECT)), "pRect")]),
StdMethod(HRESULT, "SetDestSize", [(UINT, "Width"), (UINT, "Height")]),
StdMethod(HRESULT, "GetSourceRect", [Out(Pointer(RECT), "pRect")], sideeffects=False),
StdMethod(HRESULT, "GetTargetRect", [Out(Pointer(RECT), "pRect")], sideeffects=False),
StdMethod(HRESULT, "GetDestSize", [Out(Pointer(UINT), "pWidth"), Out(Pointer(UINT), "pHeight")], sideeffects=False),
StdMethod(HRESULT, "SetColorSpace", [(DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS, "ColorSpace")]),
StdMethod(DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS, "GetColorSpace", [], sideeffects=False),
]
IDXGIFactoryMedia = Interface("IDXGIFactoryMedia", IUnknown)
IDXGIFactoryMedia.methods += [
StdMethod(HRESULT, "CreateSwapChainForCompositionSurfaceHandle", [(ObjPointer(IUnknown), "pDevice"), (HANDLE, "hSurface"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateDecodeSwapChainForCompositionSurfaceHandle", [(ObjPointer(IUnknown), "pDevice"), (HANDLE, "hSurface"), (Pointer(DXGI_DECODE_SWAP_CHAIN_DESC), "pDesc"), (ObjPointer(IDXGIResource), "pYuvDecodeBuffers"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGIDecodeSwapChain)), "ppSwapChain")]),
]
DXGI_FRAME_PRESENTATION_MODE = Enum("DXGI_FRAME_PRESENTATION_MODE", [
"DXGI_FRAME_PRESENTATION_MODE_COMPOSED",
"DXGI_FRAME_PRESENTATION_MODE_OVERLAY",
"DXGI_FRAME_PRESENTATION_MODE_NONE",
])
DXGI_FRAME_STATISTICS_MEDIA = Struct("DXGI_FRAME_STATISTICS_MEDIA", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
(DXGI_FRAME_PRESENTATION_MODE, "CompositionMode"),
(UINT, "ApprovedPresentDuration"),
])
IDXGISwapChainMedia = Interface("IDXGISwapChainMedia", IUnknown)
IDXGISwapChainMedia.methods += [
StdMethod(HRESULT, "GetFrameStatisticsMedia", [Out(Pointer(DXGI_FRAME_STATISTICS_MEDIA), "pStats")], sideeffects=False),
StdMethod(HRESULT, "SetPresentDuration", [(UINT, "Duration")]),
StdMethod(HRESULT, "CheckPresentDurationSupport", [(UINT, "DesiredPresentDuration"), Out(Pointer(UINT), "pClosestSmallerPresentDuration"), Out(Pointer(UINT), "pClosestLargerPresentDuration")], sideeffects=False),
]
DXGI_OVERLAY_SUPPORT_FLAG = FakeEnum(UINT, [
"DXGI_OVERLAY_SUPPORT_FLAG_DIRECT",
"DXGI_OVERLAY_SUPPORT_FLAG_SCALING",
])
IDXGIOutput3 = Interface("IDXGIOutput3", IDXGIOutput2)
IDXGIOutput3.methods += [
StdMethod(HRESULT, "CheckOverlaySupport", [(DXGI_FORMAT, "EnumFormat"), (ObjPointer(IUnknown), "pConcernedDevice"), Out(Pointer(DXGI_OVERLAY_SUPPORT_FLAG), "pFlags")], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIDevice3,
IDXGISwapChain2,
IDXGISwapChainMedia,
IDXGIOutput3,
IDXGIFactory3,
IDXGIFactoryMedia,
])
#
# Undocumented interfaces
#
IDXGIFactoryDWM = Interface("IDXGIFactoryDWM", IUnknown)
IDXGISwapChainDWM = Interface("IDXGISwapChainDWM", IDXGIDeviceSubObject)
IDXGIFactoryDWM.methods += [
StdMethod(HRESULT, "CreateSwapChain", [(ObjPointer(IUnknown), "pDevice"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc"), (ObjPointer(IDXGIOutput), "pOutput"), Out(Pointer(ObjPointer(IDXGISwapChainDWM)), "ppSwapChain")]),
]
# http://shchetinin.blogspot.co.uk/2012/04/dwm-graphics-directx-win8win7.html
IDXGISwapChainDWM.methods += [
StdMethod(HRESULT, "Present", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "GetBuffer", [(UINT, "Buffer"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppSurface")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "ResizeBuffers", [(UINT, "BufferCount"), (UINT, "Width"), (UINT, "Height"), (DXGI_FORMAT, "NewFormat"), (DXGI_SWAP_CHAIN_FLAG, "SwapChainFlags")]),
StdMethod(HRESULT, "ResizeTarget", [(Pointer(Const(DXGI_MODE_DESC)), "pNewTargetParameters")]),
StdMethod(HRESULT, "GetContainingOutput", [Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIFactoryDWM,
])
#
# DXGI 1.4
#
DXGI_COLOR_SPACE_TYPE = Enum('DXGI_COLOR_SPACE_TYPE', [
'DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709',
'DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709',
'DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P709',
'DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P2020',
'DXGI_COLOR_SPACE_RESERVED',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_NONE_P709_X601',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P601',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P709',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020',
'DXGI_COLOR_SPACE_CUSTOM',
])
DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG = Enum('DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG', [
'DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_PRESENT',
'DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_OVERLAY_PRESENT',
])
DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG = Enum('DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG', [
'DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG_PRESENT',
])
DXGI_MEMORY_SEGMENT_GROUP = Enum('DXGI_MEMORY_SEGMENT_GROUP', [
'DXGI_MEMORY_SEGMENT_GROUP_LOCAL',
'DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL',
])
DXGI_QUERY_VIDEO_MEMORY_INFO = Struct('DXGI_QUERY_VIDEO_MEMORY_INFO', [
(UINT64, 'Budget'),
(UINT64, 'CurrentUsage'),
(UINT64, 'AvailableForReservation'),
(UINT64, 'CurrentReservation'),
])
IDXGISwapChain3 = Interface('IDXGISwapChain3', IDXGISwapChain2)
IDXGIOutput4 = Interface('IDXGIOutput4', IDXGIOutput3)
IDXGIFactory4 = Interface('IDXGIFactory4', IDXGIFactory3)
IDXGIAdapter3 = Interface('IDXGIAdapter3', IDXGIAdapter2)
IDXGISwapChain3.methods += [
StdMethod(UINT, 'GetCurrentBackBufferIndex', []),
StdMethod(HRESULT, 'CheckColorSpaceSupport', [(DXGI_COLOR_SPACE_TYPE, 'ColorSpace'), Out(Pointer(UINT), 'pColorSpaceSupport')], sideeffects=False),
StdMethod(HRESULT, 'SetColorSpace1', [(DXGI_COLOR_SPACE_TYPE, 'ColorSpace')]),
StdMethod(HRESULT, 'ResizeBuffers1', [(UINT, 'BufferCount'), (UINT, 'Width'), (UINT, 'Height'), (DXGI_FORMAT, 'Format'), (DXGI_SWAP_CHAIN_FLAG, 'SwapChainFlags'), (Pointer(Const(UINT)), 'pCreationNodeMask'), (Array(Const(ObjPointer(IUnknown)), 'BufferCount'), 'ppPresentQueue')]),
]
IDXGIOutput4.methods += [
StdMethod(HRESULT, 'CheckOverlayColorSpaceSupport', [(DXGI_FORMAT, 'Format'), (DXGI_COLOR_SPACE_TYPE, 'ColorSpace'), (ObjPointer(IUnknown), 'pConcernedDevice'), Out(Pointer(UINT), 'pFlags')], sideeffects=False),
]
IDXGIFactory4.methods += [
StdMethod(HRESULT, 'EnumAdapterByLuid', [(LUID, 'AdapterLuid'), (REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
StdMethod(HRESULT, 'EnumWarpAdapter', [(REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
]
IDXGIAdapter3.methods += [
StdMethod(HRESULT, 'RegisterHardwareContentProtectionTeardownStatusEvent', [(HANDLE, 'hEvent'), Out(Pointer(DWORD), 'pdwCookie')], sideeffects=False),
StdMethod(Void, 'UnregisterHardwareContentProtectionTeardownStatus', [(DWORD, 'dwCookie')], sideeffects=False),
StdMethod(HRESULT, 'QueryVideoMemoryInfo', [(UINT, 'NodeIndex'), (DXGI_MEMORY_SEGMENT_GROUP, 'MemorySegmentGroup'), Out(Pointer(DXGI_QUERY_VIDEO_MEMORY_INFO), 'pVideoMemoryInfo')], sideeffects=False),
StdMethod(HRESULT, 'SetVideoMemoryReservation', [(UINT, 'NodeIndex'), (DXGI_MEMORY_SEGMENT_GROUP, 'MemorySegmentGroup'), (UINT64, 'Reservation')]),
StdMethod(HRESULT, 'RegisterVideoMemoryBudgetChangeNotificationEvent', [(HANDLE, 'hEvent'), Out(Pointer(DWORD), 'pdwCookie')], sideeffects=False),
StdMethod(Void, 'UnregisterVideoMemoryBudgetChangeNotification', [(DWORD, 'dwCookie')], sideeffects=False),
]
dxgi.addInterfaces([
IDXGISwapChain3,
IDXGIOutput4,
IDXGIFactory4,
IDXGIAdapter3,
])
#
# DXGI 1.5
#
DXGI_HDR_METADATA_TYPE = Enum('DXGI_HDR_METADATA_TYPE', [
'DXGI_HDR_METADATA_TYPE_NONE',
'DXGI_HDR_METADATA_TYPE_HDR10',
])
DXGI_HDR_METADATA_HDR10 = Struct('DXGI_HDR_METADATA_HDR10', [
(Array(UINT16, 2), 'RedPrimary'),
(Array(UINT16, 2), 'GreenPrimary'),
(Array(UINT16, 2), 'BluePrimary'),
(Array(UINT16, 2), 'WhitePoint'),
(UINT, 'MaxMasteringLuminance'),
(UINT, 'MinMasteringLuminance'),
(UINT16, 'MaxContentLightLevel'),
(UINT16, 'MaxFrameAverageLightLevel'),
])
DXGI_OFFER_RESOURCE_FLAGS = FakeEnum(UINT, [
'DXGI_OFFER_RESOURCE_FLAG_ALLOW_DECOMMIT',
])
DXGI_RECLAIM_RESOURCE_RESULTS = Enum('DXGI_RECLAIM_RESOURCE_RESULTS', [
'DXGI_RECLAIM_RESOURCE_RESULT_OK',
'DXGI_RECLAIM_RESOURCE_RESULT_DISCARDED',
'DXGI_RECLAIM_RESOURCE_RESULT_NOT_COMMITTED',
])
DXGI_FEATURE, DXGI_FEATURE_DATA = EnumPolymorphic('DXGI_FEATURE', 'Feature', [
('DXGI_FEATURE_PRESENT_ALLOW_TEARING', Pointer(BOOL)),
], Blob(Void, "FeatureSupportDataSize"), False)
IDXGIOutput5 = Interface('IDXGIOutput5', IDXGIOutput4)
IDXGISwapChain4 = Interface('IDXGISwapChain4', IDXGISwapChain3)
IDXGIDevice4 = Interface('IDXGIDevice4', IDXGIDevice3)
IDXGIFactory5 = Interface('IDXGIFactory5', IDXGIFactory4)
IDXGIOutput5.methods += [
StdMethod(HRESULT, 'DuplicateOutput1', [(ObjPointer(IUnknown), 'pDevice'), (UINT, 'Flags'), (UINT, 'SupportedFormatsCount'), (Array(Const(DXGI_FORMAT), 'SupportedFormatsCount'), 'pSupportedFormats'), Out(Pointer(ObjPointer(IDXGIOutputDuplication)), 'ppOutputDuplication')]),
]
IDXGISwapChain4.methods += [
StdMethod(HRESULT, 'SetHDRMetaData', [(DXGI_HDR_METADATA_TYPE, 'Type'), (UINT, 'Size'), (Blob(Void, 'Size'), 'pMetaData')]),
]
IDXGIDevice4.methods += [
StdMethod(HRESULT, 'OfferResources1', [(UINT, 'NumResources'), (Array(Const(ObjPointer(IDXGIResource)), 'NumResources'), 'ppResources'), (DXGI_OFFER_RESOURCE_PRIORITY, 'Priority'), (DXGI_OFFER_RESOURCE_FLAGS, 'Flags')]),
StdMethod(HRESULT, 'ReclaimResources1', [(UINT, 'NumResources'), (Array(Const(ObjPointer(IDXGIResource)), 'NumResources'), 'ppResources'), Out(Array(DXGI_RECLAIM_RESOURCE_RESULTS, 'NumResources'), 'pResults')]),
]
IDXGIFactory5.methods += [
StdMethod(HRESULT, 'CheckFeatureSupport', [(DXGI_FEATURE, 'Feature'), Out(DXGI_FEATURE_DATA, 'pFeatureSupportData'), (UINT, 'FeatureSupportDataSize')], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIOutput5,
IDXGISwapChain4,
IDXGIDevice4,
IDXGIFactory5,
])
#
# DXGI 1.6
#
DXGI_ADAPTER_FLAG3 = Enum('DXGI_ADAPTER_FLAG3', [
'DXGI_ADAPTER_FLAG3_NONE',
'DXGI_ADAPTER_FLAG3_REMOTE',
'DXGI_ADAPTER_FLAG3_SOFTWARE',
'DXGI_ADAPTER_FLAG3_ACG_COMPATIBLE',
'DXGI_ADAPTER_FLAG3_FORCE_DWORD',
'DXGI_ADAPTER_FLAG3_SUPPORT_MONITORED_FENCES',
'DXGI_ADAPTER_FLAG3_SUPPORT_NON_MONITORED_FENCES',
'DXGI_ADAPTER_FLAG3_KEYED_MUTEX_CONFORMANCE',
])
DXGI_ADAPTER_DESC3 = Struct('DXGI_ADAPTER_DESC3', [
(WString, 'Description'),
(UINT, 'VendorId'),
(UINT, 'DeviceId'),
(UINT, 'SubSysId'),
(UINT, 'Revision'),
(SIZE_T, 'DedicatedVideoMemory'),
(SIZE_T, 'DedicatedSystemMemory'),
(SIZE_T, 'SharedSystemMemory'),
(LUID, 'AdapterLuid'),
(DXGI_ADAPTER_FLAG3, 'Flags'),
(DXGI_GRAPHICS_PREEMPTION_GRANULARITY, 'GraphicsPreemptionGranularity'),
(DXGI_COMPUTE_PREEMPTION_GRANULARITY, 'ComputePreemptionGranularity'),
])
DXGI_OUTPUT_DESC1 = Struct('DXGI_OUTPUT_DESC1', [
(WString, 'DeviceName'),
(RECT, 'DesktopCoordinates'),
(BOOL, 'AttachedToDesktop'),
(DXGI_MODE_ROTATION, 'Rotation'),
(HMONITOR, 'Monitor'),
(UINT, 'BitsPerColor'),
(DXGI_COLOR_SPACE_TYPE, 'ColorSpace'),
(Array(FLOAT, 2), 'RedPrimary'),
(Array(FLOAT, 2), 'GreenPrimary'),
(Array(FLOAT, 2), 'BluePrimary'),
(Array(FLOAT, 2), 'WhitePoint'),
(FLOAT, 'MinLuminance'),
(FLOAT, 'MaxLuminance'),
(FLOAT, 'MaxFullFrameLuminance'),
])
DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAGS = Flags(UINT, [
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_FULLSCREEN',
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_WINDOWED',
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_CURSOR_STRETCHED',
])
DXGI_GPU_PREFERENCE = Enum('DXGI_GPU_PREFERENCE', [
'DXGI_GPU_PREFERENCE_UNSPECIFIED',
'DXGI_GPU_PREFERENCE_MINIMUM_POWER',
'DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE',
])
IDXGIFactory6 = Interface('IDXGIFactory6', IDXGIFactory5)
IDXGIAdapter4 = Interface('IDXGIAdapter4', IDXGIAdapter3)
IDXGIOutput6 = Interface('IDXGIOutput6', IDXGIOutput5)
IDXGIAdapter4.methods += [
StdMethod(HRESULT, 'GetDesc3', [Out(Pointer(DXGI_ADAPTER_DESC3), 'pDesc')], sideeffects=False),
]
IDXGIOutput6.methods += [
StdMethod(HRESULT, 'GetDesc1', [Out(Pointer(DXGI_OUTPUT_DESC1), 'pDesc')], sideeffects=False),
StdMethod(HRESULT, 'CheckHardwareCompositionSupport', [Out(Pointer(DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAGS), 'pFlags')], sideeffects=False),
]
IDXGIFactory6.methods += [
StdMethod(HRESULT, 'EnumAdapterByGpuPreference', [(UINT, 'Adapter'), (DXGI_GPU_PREFERENCE, 'GpuPreference'), (REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
]
dxgi.addInterfaces([
IDXGIFactory6,
IDXGIAdapter4,
IDXGIOutput6,
])
dxgi.addFunctions([
StdFunction(HRESULT, "DXGIDeclareAdapterRemovalSupport", [], sideeffects=False),
])
| 41.523686 | 344 | 0.733245 |
4a2897501c4860742a9eedfd04efe05fea8a41e0 | 1,465 | py | Python | code/camera_calib.py | nitchith/CarND-Advanced-Lane-Lines | 8e9e4d369f95f2076aa3b99c9015ac95c20037fc | [
"MIT"
] | null | null | null | code/camera_calib.py | nitchith/CarND-Advanced-Lane-Lines | 8e9e4d369f95f2076aa3b99c9015ac95c20037fc | [
"MIT"
] | null | null | null | code/camera_calib.py | nitchith/CarND-Advanced-Lane-Lines | 8e9e4d369f95f2076aa3b99c9015ac95c20037fc | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt | 34.069767 | 83 | 0.610239 |
4a2aaff818a2a9076ae44aab87a87c809613d1d6 | 34 | py | Python | python-jenkins/yaml_read_config/custom_log.py | MathiasStadler/docker-jenkins-scripted | 3f908987ab0428dd2239b524150ff3b65c71104c | [
"Apache-2.0"
] | null | null | null | python-jenkins/yaml_read_config/custom_log.py | MathiasStadler/docker-jenkins-scripted | 3f908987ab0428dd2239b524150ff3b65c71104c | [
"Apache-2.0"
] | null | null | null | python-jenkins/yaml_read_config/custom_log.py | MathiasStadler/docker-jenkins-scripted | 3f908987ab0428dd2239b524150ff3b65c71104c | [
"Apache-2.0"
] | 1 | 2020-02-11T04:42:40.000Z | 2020-02-11T04:42:40.000Z | """ module logging"""
# logging
| 6.8 | 21 | 0.588235 |
4a2c8215b731a53474eb2fa6ab29c369314e2b86 | 22,135 | py | Python | src/stratis_cli/_actions/_pool.py | stratis-storage/stratis-cli | 16efcfe50558785ff44a1570ca554edb2006f8d2 | [
"Apache-2.0"
] | 94 | 2017-02-06T11:01:02.000Z | 2022-03-19T16:20:50.000Z | src/stratis_cli/_actions/_pool.py | stratis-storage/stratis-cli | 16efcfe50558785ff44a1570ca554edb2006f8d2 | [
"Apache-2.0"
] | 564 | 2016-08-30T16:23:46.000Z | 2022-03-31T01:41:16.000Z | src/stratis_cli/_actions/_pool.py | stratis-storage/stratis-cli | 16efcfe50558785ff44a1570ca554edb2006f8d2 | [
"Apache-2.0"
] | 41 | 2016-09-13T12:31:42.000Z | 2022-03-23T09:42:04.000Z | # Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pool actions.
"""
# isort: STDLIB
import os
from collections import defaultdict
# isort: THIRDPARTY
from justbytes import Range
from .._constants import PoolMaintenanceErrorCode
from .._errors import (
StratisCliAggregateError,
StratisCliEngineError,
StratisCliIncoherenceError,
StratisCliInUseOtherTierError,
StratisCliInUseSameTierError,
StratisCliNameConflictError,
StratisCliNoChangeError,
StratisCliPartialChangeError,
StratisCliPartialFailureError,
)
from .._stratisd_constants import BlockDevTiers, PoolActionAvailability, StratisdErrors
from ._connection import get_object
from ._constants import TOP_OBJECT
from ._formatting import get_property, print_table, size_triple, to_hyphenated
from ._utils import get_clevis_info
def _generate_pools_to_blockdevs(managed_objects, to_be_added, tier):
"""
Generate a map of pools to which block devices they own
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param tier: tier to search for blockdevs to be added
:type tier: _stratisd_constants.BlockDevTiers
:returns: a map of pool names to sets of strings containing blockdevs they own
:rtype: dict of str * frozenset of str
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, MOPool, devs, pools
pool_map = dict(
(path, str(MOPool(info).Name()))
for (path, info) in pools().search(managed_objects)
)
pools_to_blockdevs = defaultdict(list)
for modev in (
modev
for modev in (
MODev(info)
for (_, info) in devs(props={"Tier": tier}).search(managed_objects)
)
if str(modev.Devnode()) in to_be_added
):
pools_to_blockdevs[pool_map[modev.Pool()]].append(str(modev.Devnode()))
return dict(
(pool, frozenset(blockdevs)) for pool, blockdevs in pools_to_blockdevs.items()
)
def _check_opposite_tier(managed_objects, to_be_added, other_tier):
"""
Check whether specified blockdevs are already in the other tier.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param other_tier: the other tier, not the one requested
:type other_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliInUseOtherTierError: if blockdevs are used by other tier
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, other_tier
)
if pools_to_blockdevs != {}:
raise StratisCliInUseOtherTierError(
pools_to_blockdevs,
BlockDevTiers.DATA
if other_tier == BlockDevTiers.CACHE
else BlockDevTiers.CACHE,
)
def _check_same_tier(pool_name, managed_objects, to_be_added, this_tier):
"""
Check whether specified blockdevs are already in the tier to which they
are to be added.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param this_tier: the tier requested
:type this_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliPartialChangeError: if blockdevs are used by this tier
:raises StratisCliInUseSameTierError: if blockdevs are used by this tier in another pool
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, this_tier
)
owned_by_current_pool = frozenset(pools_to_blockdevs.get(pool_name, []))
owned_by_other_pools = dict(
(pool, devnodes)
for pool, devnodes in pools_to_blockdevs.items()
if pool_name != pool
)
if owned_by_current_pool != frozenset():
raise StratisCliPartialChangeError(
"add to cache" if this_tier == BlockDevTiers.CACHE else "add to data",
to_be_added.difference(owned_by_current_pool),
to_be_added.intersection(owned_by_current_pool),
)
if owned_by_other_pools != {}:
raise StratisCliInUseSameTierError(owned_by_other_pools, this_tier)
def _fetch_locked_pools_property(proxy):
"""
Fetch the LockedPools property from stratisd.
:param proxy: proxy to the top object in stratisd
:return: a representation of unlocked devices
:rtype: dict
:raises StratisCliEngineError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
return Manager.Properties.LockedPools.Get(proxy)
| 36.830283 | 92 | 0.614954 |
4a2ca921ee39571f1f1e9f4e267400d5739cf49c | 1,474 | py | Python | synchrobot/chat_user.py | Esenin/telegram_vk_pipe_bot | db92fe062a121beebbc386975660d5a76f1b396c | [
"MIT"
] | 2 | 2016-09-20T19:41:40.000Z | 2016-10-05T12:49:18.000Z | synchrobot/chat_user.py | Esenin/telegram_vk_pipe_bot | db92fe062a121beebbc386975660d5a76f1b396c | [
"MIT"
] | null | null | null | synchrobot/chat_user.py | Esenin/telegram_vk_pipe_bot | db92fe062a121beebbc386975660d5a76f1b396c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Author: Ivan Senin
import calendar
import time
import datetime as dt
import json
| 26.321429 | 102 | 0.708955 |
4a2d85e10d7ec8df4402f2b20294f47dcb467eb8 | 16,294 | py | Python | backend/src/contaxy/schema/auth.py | Felipe-Renck/contaxy | 532d1f01aad1ea8155bc10216acedca601d37889 | [
"MIT"
] | null | null | null | backend/src/contaxy/schema/auth.py | Felipe-Renck/contaxy | 532d1f01aad1ea8155bc10216acedca601d37889 | [
"MIT"
] | null | null | null | backend/src/contaxy/schema/auth.py | Felipe-Renck/contaxy | 532d1f01aad1ea8155bc10216acedca601d37889 | [
"MIT"
] | null | null | null | from datetime import datetime, timezone
from enum import Enum
from typing import Dict, List, Optional
import pydantic
from fastapi import HTTPException, Path, status
from pydantic import BaseModel, EmailStr, Field
from contaxy.schema.exceptions import ClientValueError
from contaxy.schema.shared import MAX_DESCRIPTION_LENGTH
from contaxy.utils.fastapi_utils import as_form
USERS_KIND = "users"
ADMIN_ROLE = "roles/admin"
USER_ROLE = "roles/user"
contaxy_token_purposes = {purpose for purpose in TokenPurpose}
# Oauth Specific Code
# TODO: Replaced with pydantic class
# class OAuth2TokenRequestForm:
# """OAuth2 Token Endpoint Request Form."""
# def __init__(
# self,
# grant_type: OAuth2TokenGrantTypes = Form(
# ...,
# description="Grant type. Determines the mechanism used to authorize the creation of the tokens.",
# ),
# username: Optional[str] = Form(
# None, description="Required for `password` grant type. The users username."
# ),
# password: Optional[str] = Form(
# None, description="Required for `password` grant type. The users password."
# ),
# scope: Optional[str] = Form(
# None,
# description="Scopes that the client wants to be included in the access token. List of space-delimited, case-sensitive strings",
# ),
# client_id: Optional[str] = Form(
# None,
# description="The client identifier issued to the client during the registration process",
# ),
# client_secret: Optional[str] = Form(
# None,
# description=" The client secret. The client MAY omit the parameter if the client secret is an empty string.",
# ),
# code: Optional[str] = Form(
# None,
# description="Required for `authorization_code` grant type. The value is what was returned from the authorization endpoint.",
# ),
# redirect_uri: Optional[str] = Form(
# None,
# description="Required for `authorization_code` grant type. Specifies the callback location where the authorization was sent. This value must match the `redirect_uri` used to generate the original authorization_code.",
# ),
# refresh_token: Optional[str] = Form(
# None,
# description="Required for `refresh_token` grant type. The refresh token previously issued to the client.",
# ),
# state: Optional[str] = Form(
# None,
# description="An opaque value used by the client to maintain state between the request and callback. The parameter SHOULD be used for preventing cross-site request forgery.",
# ),
# set_as_cookie: Optional[bool] = Form(
# False,
# description="If `true`, the access (and refresh) token will be set as cookie instead of the response body.",
# ),
# ):
# self.grant_type = grant_type
# self.username = username
# self.password = password
# self.scopes = []
# if scope:
# self.scopes = str(scope).split()
# self.client_id = client_id
# self.client_secret = client_secret
# self.code = code
# self.redirect_uri = redirect_uri
# self.refresh_token = refresh_token
# self.state = state
# self.set_as_cookie = set_as_cookie
# TODO: Not used right now
# class OAuth2AuthorizeRequestForm:
# """OAuth2 Authorize Endpoint Request Form."""
# def __init__(
# self,
# response_type: AuthorizeResponseType = Form(
# ...,
# description="Either code for requesting an authorization code or token for requesting an access token (implicit grant).",
# ),
# client_id: Optional[str] = Form(
# None, description="The public identifier of the client."
# ),
# redirect_uri: Optional[str] = Form(None, description="Redirection URL."),
# scope: Optional[str] = Form(
# None, description="The scope of the access request."
# ),
# state: Optional[str] = Form(
# None,
# description="An opaque value used by the client to maintain state between the request and callback. The parameter SHOULD be used for preventing cross-site request forgery",
# ),
# nonce: Optional[str] = Form(None),
# ):
# self.response_type = response_type
# self.client_id = client_id
# self.redirect_uri = redirect_uri
# self.scope = scope
# self.state = state
# self.nonce = nonce
USER_ID_PARAM = Path(
...,
title="User ID",
description="A valid user ID.",
# TODO: add length restriction
)
# User Models
| 37.457471 | 266 | 0.651774 |
4a2f072d42921424ab487e97b0b0a7b2ce429f4d | 1,193 | py | Python | setup.py | richarddwang/hugdatafast | 714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e | [
"Apache-2.0"
] | 19 | 2020-08-28T08:35:21.000Z | 2021-03-08T18:42:46.000Z | setup.py | richarddwang/hugdatafast | 714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e | [
"Apache-2.0"
] | 3 | 2020-08-31T15:57:55.000Z | 2020-09-05T09:34:09.000Z | setup.py | richarddwang/hugdatafast | 714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e | [
"Apache-2.0"
] | null | null | null | import setuptools
from hugdatafast.__init__ import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIRED_PKGS = [
'fastai>=2.0.8',
'fastscore>=1.0.1', # change of store_attr api
'datasets',
]
setuptools.setup(
name="hugdatafast",
version=__version__,
author="Richard Wang",
author_email="richardyy1188@gmail.com",
description="The elegant bridge between hugginface data and fastai",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/richarddwang/hugdatafast",
license='Apache 2.0',
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires='>=3.6',
install_requires=REQUIRED_PKGS,
keywords='datasets machine learning datasets metrics fastai huggingface',
) | 33.138889 | 77 | 0.68399 |
4a2f0deb5d53bf6a5794ae465d5f56eb3c3bd2c5 | 3,591 | py | Python | tests/scripts/test_repository_actor_definition.py | drehak/leapp | 062c76859e6b4a68592c6a387e44a2c1d36949ff | [
"Apache-2.0"
] | null | null | null | tests/scripts/test_repository_actor_definition.py | drehak/leapp | 062c76859e6b4a68592c6a387e44a2c1d36949ff | [
"Apache-2.0"
] | 3 | 2022-01-31T10:24:53.000Z | 2022-03-29T12:30:04.000Z | tests/scripts/test_repository_actor_definition.py | drehak/leapp | 062c76859e6b4a68592c6a387e44a2c1d36949ff | [
"Apache-2.0"
] | null | null | null | import pytest
from leapp.repository.actor_definition import ActorDefinition, ActorInspectionFailedError, MultipleActorsError
from leapp.exceptions import UnsupportedDefinitionKindError
from leapp.repository import DefinitionKind
from helpers import repository_dir
import logging
import mock
_FAKE_META_DATA = {
'description': 'Fake Description',
'class_name': 'FakeActor',
'name': 'fake-actor',
'path': 'actors/test',
'tags': (),
'consumes': (),
'produces': (),
'dialogs': (),
}
| 49.875 | 118 | 0.601225 |
4a2f4eecfe75a9c91356c84f877db3d3e9fc53fc | 2,139 | py | Python | iHome/house/models.py | yeyuning1/iHome | aceb87d786ab66cf74ff47f549ec73388d21c9e3 | [
"MIT"
] | 2 | 2019-08-13T07:34:35.000Z | 2019-08-13T08:11:46.000Z | iHome/house/models.py | yeyuning1/iHome | aceb87d786ab66cf74ff47f549ec73388d21c9e3 | [
"MIT"
] | null | null | null | iHome/house/models.py | yeyuning1/iHome | aceb87d786ab66cf74ff47f549ec73388d21c9e3 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from utils.models import BaseModel
| 41.941176 | 104 | 0.697522 |
4a31433e8acb3aa3c417194791048caf8fdb3d24 | 15,863 | py | Python | cltwit/main.py | Psycojoker/cltwit | 3164f263df60d608da124ceb7d1e56bbdde7c930 | [
"WTFPL",
"Unlicense"
] | null | null | null | cltwit/main.py | Psycojoker/cltwit | 3164f263df60d608da124ceb7d1e56bbdde7c930 | [
"WTFPL",
"Unlicense"
] | null | null | null | cltwit/main.py | Psycojoker/cltwit | 3164f263df60d608da124ceb7d1e56bbdde7c930 | [
"WTFPL",
"Unlicense"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Cltwit is a command line twitter utility
Author : Jrme Launay
Date : 2013
"""
import os
import sys
import re
import getopt
import gettext
import sqlite3
import webbrowser
import ConfigParser
from sqlite2csv import sqlite2csv
from cltwitdb import cltwitdb
from utils import LocalTimezone
from cltwitreport import TweetsReport
APP_NAME = 'cltwit'
LOC_PATH = os.path.dirname(__file__) + '/locale'
gettext.find(APP_NAME, LOC_PATH)
gettext.install(APP_NAME, LOC_PATH, True)
try:
import tweepy
except ImportError:
print(_("Veuillez installer tweetpy https://github.com/tweepy/tweepy"))
sys.exit()
# Rpertoire pour conf et bdd
__cltwitdir__ = os.path.expanduser("~/.config/cltwit")
# Fichier de configuration
__configfile__ = __cltwitdir__ + "/cltwit.conf"
# base de donnes et table sqlite
__dblocation__ = __cltwitdir__ + '/data.db'
__tablename__ = 'twitter'
__Local__ = LocalTimezone()
# gestion des couleurs sur le terminal
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def has_colours(stream):
"""Vrifier la prise en charge des couleurs par le terminal"""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # couleurs auto sur un TTY
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# Si erreur on suppose false
return False
__has_colours__ = has_colours(sys.stdout)
def printout(text, colour=WHITE):
"""Print en couleur"""
if __has_colours__:
seq = "\x1b[1;%dm" % (30 + colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text.encode("Utf-8"))
def checkdb():
""" Vrifier la prsence de la bdd sqlite et la crer si absente """
if (not os.path.exists(__dblocation__)):
printout(_(u"Vous devez d'abord lancer la commande --database create \
pour crer une base de donnes de vos tweets."), RED)
sys.exit()
def checkconfig():
"""Rcuprer la configuration ou la crer"""
# On ouvre le fichier de conf
config = ConfigParser.RawConfigParser()
try:
config.read(__configfile__)
if config.has_option('twitterapi', 'access_token'):
access_token = config.get('twitterapi', 'access_token')
if config.has_option('twitterapi', 'access_password'):
access_password = config.get('twitterapi', 'access_password')
except:
pass
auth = tweepy.OAuthHandler("Jus1rnqM6S0WojJfOH1kQ",
"AHQ5sTC8YYArHilXmqnsstOivY6ygQ2N27L1zBwk")
# Si aucune conf , autorisation de connexion twitter via OAuth
if not(config.has_option('twitterapi', 'access_token') and
config.has_option('twitterapi', 'access_password')):
# On ouvre le navigateur web pour rcuprer le numro d'autorisation
while True:
try:
webbrowser.open(auth.get_authorization_url())
var = raw_input(_("Entrez le token !\n"))
auth.get_access_token(var)
except Exception, e:
print(str(e))
continue
break
var = auth.access_token
# On rcupre le token et le password
access_password = str(var).split("&")[0].split("=")[1]
access_token = str(var).split("&")[1].split("=")[1]
# crire le fichier de conf avec les informations rcupres
try:
cfgfile = open(__configfile__, 'w')
if not(config.has_section('twitterapi')):
config.add_section('twitterapi')
config.set('twitterapi', 'access_token', access_token)
config.set('twitterapi', 'access_password', access_password)
config.write(cfgfile)
except IOError:
pass
finally:
cfgfile.close()
else: # Si un fichier de conf existait dj
auth.set_access_token(access_token, access_password)
return auth
def login():
""" Se connecter l'api twitter via tweepy """
auth = checkconfig()
api = tweepy.API(auth)
# On vrifie la connexion l'api en rcuprant le user name
try:
twittername = api.me().screen_name
except Exception, e:
if 'Unable to get username' in (str(e)):
printout(_(u"Impossible de s'authentifier avec l'api Twitter.\
Fonctionne en mode dconnect"), RED)
print("\n")
twittername = "offline_mode"
printout(_(u"Authentifi avec le user twitter {0}").format(twittername.decode('utf-8')), GREEN)
print("\n")
return api, auth, twittername
def get_friends_followers(api):
"""Renvoie la liste des id des friends et followers"""
friend_id = []
follower_id = []
printout(_(u"Rcupration des Followers..."), YELLOW)
print("\n")
for follower in tweepy.Cursor(api.followers).items():
follower_id.append(follower.id)
printout((u"Rcupration des Friends..."), YELLOW)
print("\n")
for friend in tweepy.Cursor(api.friends).items():
friend_id.append(friend.id)
return friend_id, follower_id
def get_diff(liste1, liste2):
"""Renvoie les objets de liste1 qui ne sont pas dans liste2"""
return list(set(liste1).difference(set(liste2)))
def follow_users(api, user):
"""Suivre une personne"""
try:
api.create_friendship(user)
printout(_(u"Vous suivez maintenant {0}").format(api.get_user(user).screen_name.decode('utf-8')), GREEN)
except Exception, e:
print(e)
def unfollow_user(api, user):
"""Cesser de suivre une personne"""
try:
api.destroy_friendship(user)
printout(_(u"Vous ne suivez plus {0}").format(api.get_user(user).screen_name.decode('utf-8')), GREEN)
except Exception, e:
print(e)
def main(argv=None):
""" Point d'entre """
# Si le rpertoire pour la conf et la base de donnes n'existe pas le crer
if not os.path.exists(__cltwitdir__):
os.makedirs(__cltwitdir__)
#~ twittername = "offline_mode"
# Traitement des arguments
if argv is None:
argv = sys.argv
if len(argv) == 1:
help()
try:
opts, args = getopt.getopt(sys.argv[1:], "r:ahfut:o:s:d:",
["report", "api", "help", "follow", "unfollow", "tweet=", "output=", "search=", "database="])
except getopt.GetoptError, err:
print(err)
help()
sys.exit()
# traitement des options
for option, value in opts:
if option in ('-a', '--api'):
api, auth, twittername = login()
res = api.rate_limit_status()
rtime = res['reset_time']
rhits = res['remaining_hits']
hlimit = res['hourly_limit']
from dateutil.parser import parse
drtime = parse(rtime)
printout(_("Informations sur l'utilisation de l'api Twitter"), YELLOW)
print("\n")
# Dfinir l'heure locale qui correspond l'heure renvoye
# par l'api Twitter
rlocaltime = drtime.astimezone(__Local__)
printout(_("Maximum d'appels par heure: "), BLUE)
print hlimit
printout(_("Nombre d'appels restants: "), BLUE)
print rhits
printout(_("Heure du prochain reset: "), BLUE)
print rlocaltime.strftime("%H:%M %Y-%m-%d")
if option in ('-r', '--report'):
api, auth, twittername = login()
checkdb()
conn = sqlite3.connect(__dblocation__)
c = conn.cursor()
c.execute("select substr(date, 1,4) from twitter order by date asc limit 1")
dmois = c.fetchone()[0]
c.execute("select substr(date, 1,4) from twitter order by date desc limit 1")
fmois = c.fetchone()[0]
# Requte des donnes exporter
dd = dict()
for a in range(int(dmois), int(fmois) + 1):
result = []
for m in range(1, 13):
mois = ('{num:02d}'.format(num=m))
c.execute("select count(*) from twitter where substr(date, 1,4) = '{0}' and substr(date, 6,2) = '{1}'".format(a, mois))
result.append(c.fetchone()[0])
dd[a] = result
c.close()
conn.close()
treport = TweetsReport(value)
# twittername = "offline"
treport.ecrireTitre(twittername)
nb = 0
for annee, donnees in dd.items():
nb += 1
if nb == 4:
treport.NextPage()
nb = 1
saut = 0
if nb == 1:
saut = 0
if nb == 2:
saut = 200
if nb == 3:
saut = 400
treport.ecrireLegende(saut, annee, donnees)
treport.addPie(saut, donnees)
treport.save()
printout(_(u"Report {0} cr !").format(value), GREEN)
print("\n")
sys.exit(0)
if option in ('-d', '--database'):
if value in ('u', 'update'):
# Se connecter l'api twitter
api, auth, twittername = login()
# Mettre jour la base de donnes
db = cltwitdb(__dblocation__, __tablename__)
printout(_(u"Mise jour de la base de donnes de {0}").format(twittername.decode('utf-8')), YELLOW)
print("\n")
nb = db.update(api, twittername)
printout(_(u"Ajout de {0} tweet(s) dans la base de donnes.").format(nb), GREEN)
if value in ('c', 'create'):
# Se connecter l'api twitter
api, auth, twittername = login()
# Crer la base de donnes
db = cltwitdb(__dblocation__, __tablename__)
printout(_(u"Cration de la liste des tweets de ") + twittername.decode('utf-8'), YELLOW)
db.create(api, twittername)
printout(_(u"Base de donnes cre"), GREEN)
sys.exit()
#~ database_create(api,twittername)
if option in ("-o", "--output"):
# Exporter en csv
checkdb()
conn = sqlite3.connect(__dblocation__)
c = conn.cursor()
# Requte des donnes exporter
c.execute('select date, tweet, url from {0} order by date desc'.format(__tablename__))
# On appelle la classe sqlite2csv qui se charge de l'export
export = sqlite2csv(open(value, "wb"))
# Entte du fichier csv
export.writerow(["Date", "Tweet", "URL"])
# Lignes du fichier csv
export.writerows(c)
# On ferme la connexion sqlite et le curseur
c.close()
conn.close()
printout(_(u"Fichier csv {0} cr.").format(value.decode('utf-8')), GREEN)
sys.exit()
if option in ("-s", "--search"):
# Rechercher un motif dans la base des tweets
checkdb()
printout(_(u"Recherche de {0} dans vos anciens tweets...")
.format(value.decode('utf-8')), YELLOW)
print("\n")
# la mthode search retourne un tuple avec les champs
# qui contiennent le motif
db = cltwitdb(__dblocation__, __tablename__)
results = db.search(value, "tweet")
for result in results:
print((u"{0} -> {1}\n{2}\n\n").format(result[1].decode('utf-8'), result[4].decode('utf-8'), result[2].decode('utf-8')))
if option in ("-u", "--unfollow"):
# Se connecter l'api twitter
api, auth, twittername = login()
# Crer les liste friend et followers (par id)
friend_id, follower_id = get_friends_followers(api)
# Cration des listes follow et unfollow
follow_liste = get_diff(follower_id, friend_id)
unfollow_liste = get_diff(friend_id, follower_id)
# Un-follow
printout(_("Vous suivez {0} personnes qui ne vous suivent pas.")
.format(len(unfollow_liste)), YELLOW)
print("\n")
printout(_("Voulez changer cela ? (o/N)"), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
for user in unfollow_liste:
printout(_("Voulez-vous cesser de suivre {0} ? (o/N)")
.format(api.get_user(user).screen_name), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
unfollow_user(api, user)
if option in ("-f", "--follow"):
# Se connecter l'api twitter
api, auth, twittername = login()
# Crer les liste friend et followers (par id)
friend_id, follower_id = get_friends_followers(api)
# Cration des listes follow et unfollow
follow_liste = get_diff(follower_id, friend_id)
unfollow_liste = get_diff(friend_id, follower_id)
# follow
printout(_("{0} personnes vous suivent alors que vous ne les suivez pas.")
.format(len(follow_liste)), YELLOW)
print("\n")
printout(_("Voulez changer cela ? (o/N)"), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
for user in follow_liste:
printout(_("Voulez-vous suivre {0} ? (o/N)"
.format(api.get_user(user).screen_name)), BLUE)
print("\n")
reponse = raw_input("> ")
if (reponse.lower() == 'o' or reponse.lower() == 'y'):
follow_users(api, user)
if option in ("-t", "--tweet"):
# Se connecter l'api twitter
api, auth, twittername = login()
# Envoyer un tweet
tweet_size = len(re.sub("https://\S*", "X"*23, re.sub("http://\S*", "X"*22, value)))
if tweet_size < 141:
api.update_status(value)
print("\n")
printout(_(u"Tweet envoy !"), GREEN)
else:
printout(_(u"La limite pour un tweet est de 140 caractres, votre message \
fait {0} caractres de trop").format(str(tweet_size - 140).decode('utf-8')), RED)
sys.exit()
if option in ("-h", "--help"):
help()
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print("\n")
print(_(u"Merci d'avoir utilis clitwit !"))
| 36.635104 | 140 | 0.566034 |
4a32ad81cfcc28f835805b24183250a1a290fdeb | 235 | py | Python | weibo_image_spider/exceptions.py | lonsty/weibo-pic-spider-hd | c7dae38b51209296cc8e71aa6fb80f094d549198 | [
"MIT"
] | null | null | null | weibo_image_spider/exceptions.py | lonsty/weibo-pic-spider-hd | c7dae38b51209296cc8e71aa6fb80f094d549198 | [
"MIT"
] | null | null | null | weibo_image_spider/exceptions.py | lonsty/weibo-pic-spider-hd | c7dae38b51209296cc8e71aa6fb80f094d549198 | [
"MIT"
] | null | null | null | # @AUTHOR : lonsty
# @DATE : 2020/3/28 18:01
| 12.368421 | 41 | 0.719149 |
4a33a995384ea8c9d2b8647bf4341ccfb7cc9243 | 1,135 | py | Python | WebHtmlExample/WebHtmlExample.py | lilei644/python-learning-example | 71910a32bc8b3b8f23ba13babb583af453405bbe | [
"MIT"
] | 2 | 2018-01-20T02:24:20.000Z | 2018-06-07T18:16:59.000Z | WebHtmlExample/WebHtmlExample.py | lilei644/python-learning-example | 71910a32bc8b3b8f23ba13babb583af453405bbe | [
"MIT"
] | null | null | null | WebHtmlExample/WebHtmlExample.py | lilei644/python-learning-example | 71910a32bc8b3b8f23ba13babb583af453405bbe | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import re
#
# User-Agent
headers = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
#
#
if __name__ == '__main__':
get_weather()
get_bar()
| 31.527778 | 142 | 0.65022 |
4a3450cf5aa9171992cee7901efa3fe712343d3d | 1,158 | py | Python | Codi/diode.py | JosepFanals/HELM | feb579f37eb0850ba2a7acef18f8d3d78b9e599c | [
"MIT"
] | 1 | 2020-09-03T14:46:35.000Z | 2020-09-03T14:46:35.000Z | Codi/diode.py | JosepFanals/HELM | feb579f37eb0850ba2a7acef18f8d3d78b9e599c | [
"MIT"
] | 1 | 2021-09-09T12:54:09.000Z | 2021-09-14T07:47:58.000Z | Codi/diode.py | JosepFanals/HELM | feb579f37eb0850ba2a7acef18f8d3d78b9e599c | [
"MIT"
] | null | null | null | import numpy as np
import math
import matplotlib.pyplot as plt
U = 5 # equival a l'E
R = 2 # equival a R1
R2 = 3
P = 1.2
Vt = 0.026
Is = 0.000005
n = 200 # profunditat
Vd = np.zeros(n) # sries
Vl = np.zeros(n)
I1 = np.zeros(n)
I1[0] = U / R # inicialitzaci de les sries
Vd[0] = Vt * math.log(1 + I1[0] / Is)
Vl[0] = P / I1[0]
for i in range(1, n): # clcul dels coeficients
I1[i] = (1 / R + 1 / R2) * (-Vd[i - 1] - Vl[i - 1])
Vd[i] = (i * Vt * I1[i] - convVd(Vd, I1, i)) / (i * (Is + I1[0]))
Vl[i] = -convVlI(Vl, I1, i) / I1[0]
If = sum(I1)
Vdf = sum(Vd)
Vlf = sum(Vl)
print('I1: ' + str(If))
print('Vd: ' + str(Vdf))
print('Vl: ' + str(Vlf))
print('P: ' + str(Vlf * If))
Vdfinal = np.zeros(n) # per tal de veure com evoluciona la tensi del dode
for j in range(n):
Vdfinal[j] = np.sum([Vd[:(j+1)]])
print(Vdfinal)
| 19.3 | 76 | 0.541451 |
4a35a45ae37a4457776d2a6b8d99ec59f3f7c227 | 614 | py | Python | proxybroker/errors.py | aljeshishe/ProxyBroker | 195c050162275f63ebe033be765abec90601e3e1 | [
"Apache-2.0"
] | null | null | null | proxybroker/errors.py | aljeshishe/ProxyBroker | 195c050162275f63ebe033be765abec90601e3e1 | [
"Apache-2.0"
] | null | null | null | proxybroker/errors.py | aljeshishe/ProxyBroker | 195c050162275f63ebe033be765abec90601e3e1 | [
"Apache-2.0"
] | 1 | 2020-04-30T09:25:25.000Z | 2020-04-30T09:25:25.000Z | """Errors."""
| 12.28 | 56 | 0.732899 |
4a36242f8ee5ebc5d59f9cbb0e67fddbadbb4a7c | 729 | py | Python | questionanswering/models/pooling.py | lvying1991/KBQA-System | 55e69c8320df3f7b199860afc76e8a0ab66f540e | [
"Apache-2.0"
] | 2 | 2019-09-10T13:20:27.000Z | 2019-11-14T12:58:40.000Z | questionanswering/models/pooling.py | lvying1991/KBQA-System | 55e69c8320df3f7b199860afc76e8a0ab66f540e | [
"Apache-2.0"
] | null | null | null | questionanswering/models/pooling.py | lvying1991/KBQA-System | 55e69c8320df3f7b199860afc76e8a0ab66f540e | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn as nn
from torch import autograd
| 25.137931 | 92 | 0.650206 |
4a37446fd29ea2b6044d47c4ec0b0027825d51e4 | 2,623 | py | Python | tests/unit/app/test_session.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/app/test_session.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/app/test_session.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null |
import pytest
from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config
from whylogs.app.config import SessionConfig
from whylogs.app.session import Session
from pandas import util
| 26.23 | 122 | 0.716737 |
4a379f8a8c2abcf1cc5791849c692674276f7e20 | 851 | py | Python | Packages/constants.py | Bemesko/Intelligence-of-Home-GUI | 4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b | [
"MIT"
] | null | null | null | Packages/constants.py | Bemesko/Intelligence-of-Home-GUI | 4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b | [
"MIT"
] | null | null | null | Packages/constants.py | Bemesko/Intelligence-of-Home-GUI | 4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b | [
"MIT"
] | null | null | null | import enum
BASELINE = "baseline"
ENERGY = "energy"
MAX_PRICE = "max_price"
START_PRICE = "starting_price"
INCREMENT = "increment"
MIN_PRICE = "min_price"
MAX_LOT_SIZE = "max_lot_size_wh"
NAMESERVER_AGENT_AMOUNT = 3
ATTRIBUTE_LIST_LENGTH = 50
NEXT_ENERGY_CONSUMPTION = "next_energy_consumption"
NEXT_ENERGY_GENERATION = "next_energy_generation"
ENERGY_DIFFERENCE = "energy_difference"
ENERGY_MARKET_PRICE = "energy_market_price"
WANTED_ENERGY = "wanted_energy"
ENERGY_BUY_MAX_PRICE = "energy_buy_max_price"
ENERGY_BUY_STARTING_PRICE = "energy_buy_starting_price"
ENERGY_BUY_PRICE_INCREMENT = "energy_buy_price_increment"
ENERGY_SELL_MIN_PRICE = "energy_sell_min_price"
| 24.314286 | 57 | 0.788484 |
4a37bdd049a40072735c67bea9e8cc13a3a7a335 | 1,553 | py | Python | target/tests.py | groundupnews/gu | c7179ee3d058c8749d250d681032a76dc8d599d5 | [
"BSD-3-Clause"
] | 19 | 2018-01-28T14:35:40.000Z | 2020-12-04T03:04:02.000Z | target/tests.py | groundupnews/gu | c7179ee3d058c8749d250d681032a76dc8d599d5 | [
"BSD-3-Clause"
] | 8 | 2018-06-02T14:28:28.000Z | 2021-08-06T10:22:37.000Z | target/tests.py | groundupnews/gu | c7179ee3d058c8749d250d681032a76dc8d599d5 | [
"BSD-3-Clause"
] | 21 | 2018-02-25T14:07:48.000Z | 2020-05-28T23:10:52.000Z | from django.contrib.auth.models import User
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from target import models
from django.utils import timezone
# Create your tests here.
| 33.76087 | 78 | 0.63812 |
4a38f4cdb8c158390444f36146a5ad23b2ae9c67 | 4,998 | py | Python | jenkinsapi/view.py | julienduchesne/jenkinsapi | 369dc54a8d5bb1f4e985c647378b9e1e62c26961 | [
"MIT"
] | null | null | null | jenkinsapi/view.py | julienduchesne/jenkinsapi | 369dc54a8d5bb1f4e985c647378b9e1e62c26961 | [
"MIT"
] | 52 | 2019-06-25T12:47:14.000Z | 2021-04-12T12:24:08.000Z | jenkinsapi/view.py | klauern/jenkinsapi | 605ad22a0109d3f51452c7abd23b0376a44682da | [
"MIT"
] | null | null | null | """
Module for jenkinsapi views
"""
import six
import logging
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.job import Job
from jenkinsapi.custom_exceptions import NotFound
log = logging.getLogger(__name__)
| 30.290909 | 79 | 0.580232 |
4a39a497868bd170b5a86c4ae6d32db864cbebc8 | 7,240 | py | Python | core/vision/collection.py | jmarangola/cv-chess | c1bf1754b622e76bc2bc92276b96760c321a8bd9 | [
"MIT"
] | null | null | null | core/vision/collection.py | jmarangola/cv-chess | c1bf1754b622e76bc2bc92276b96760c321a8bd9 | [
"MIT"
] | null | null | null | core/vision/collection.py | jmarangola/cv-chess | c1bf1754b622e76bc2bc92276b96760c321a8bd9 | [
"MIT"
] | null | null | null | """
Autonomous dataset collection of data for jetson nano
John Marangola - marangol@bc.edu
"""
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
| 31.754386 | 180 | 0.604144 |
4a3a7096be78dd2d3c57cba31752bc3f172e277d | 3,475 | py | Python | tests/test_sbfc.py | htwangtw/sbfc | 5119017a643b82efbfaaf373a26f191a51f8283a | [
"BSD-3-Clause"
] | null | null | null | tests/test_sbfc.py | htwangtw/sbfc | 5119017a643b82efbfaaf373a26f191a51f8283a | [
"BSD-3-Clause"
] | 13 | 2021-04-29T16:11:18.000Z | 2022-02-22T18:10:36.000Z | tests/test_sbfc.py | htwangtw/sbfc | 5119017a643b82efbfaaf373a26f191a51f8283a | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
import pandas as pd
from nilearn import datasets
from sbfc.parser import seed_base_connectivity
seed = os.path.dirname(__file__) + "/data/difumo64_pcc.nii.gz"
| 29.700855 | 82 | 0.639424 |
4a3cf72d3d9f4ab9e1a082a0ec19d609ba13facf | 528 | py | Python | final_project/machinetranslation/tests/test.py | ChrisOmeh/xzceb-flask_eng_fr | 6ce4a79539b8ace4bce999c32a9f58aa73827e5c | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests/test.py | ChrisOmeh/xzceb-flask_eng_fr | 6ce4a79539b8ace4bce999c32a9f58aa73827e5c | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/tests/test.py | ChrisOmeh/xzceb-flask_eng_fr | 6ce4a79539b8ace4bce999c32a9f58aa73827e5c | [
"Apache-2.0"
] | null | null | null | import unittest
from translator import english_to_french, french_to_english
if __name__ == "__main__":
unittest.main() | 35.2 | 68 | 0.727273 |
4a3d8daa44bdf458c650e19786cc3f1f2403777e | 3,553 | py | Python | tests/ut/python/parallel/test_auto_parallel_transformer.py | huxian123/mindspore | ec5ba10c82bbd6eccafe32d3a1149add90105bc8 | [
"Apache-2.0"
] | 2 | 2021-04-22T07:00:59.000Z | 2021-11-08T02:49:09.000Z | tests/ut/python/parallel/test_auto_parallel_transformer.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | 1 | 2020-12-29T06:46:38.000Z | 2020-12-29T06:46:38.000Z | tests/ut/python/parallel/test_auto_parallel_transformer.py | ReIadnSan/mindspore | c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5 | [
"Apache-2.0"
] | 1 | 2021-05-10T03:30:36.000Z | 2021-05-10T03:30:36.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
def test_dmnet_train_step():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
input_ = Tensor(np.ones([4096, 4096]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(MultiTransformer()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
_executor.compile(net, input_)
| 30.62931 | 114 | 0.665072 |
4a3dd5e26114808a45a3424f7c019a215fa96e04 | 6,227 | py | Python | cloudcafe/compute/events/models/common.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/events/models/common.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/events/models/common.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | """
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.compute.events.models.base import (
EventBaseModel, EventBaseListModel)
| 30.826733 | 78 | 0.605107 |
4a3e2e6cca24d36e7e6072a43d4a7616c515981f | 1,446 | py | Python | openpyxl/drawing/tests/test_shapes.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/drawing/tests/test_shapes.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/drawing/tests/test_shapes.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
| 23.322581 | 55 | 0.64177 |
4a4054b106f4552f95f762ef5c1bcfd72acaebe7 | 19,509 | py | Python | raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py | raysect/source | 11f03089d0379fc7fb4d23c6f60c3d255673cec9 | [
"BSD-3-Clause"
] | 71 | 2015-10-25T16:50:18.000Z | 2022-03-02T03:46:19.000Z | raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py | raysect/source | 11f03089d0379fc7fb4d23c6f60c3d255673cec9 | [
"BSD-3-Clause"
] | 336 | 2015-02-11T22:39:54.000Z | 2022-02-22T18:42:32.000Z | raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py | raysect/source | 11f03089d0379fc7fb4d23c6f60c3d255673cec9 | [
"BSD-3-Clause"
] | 24 | 2016-09-11T17:12:10.000Z | 2022-02-24T22:57:09.000Z |
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
from matplotlib.colors import SymLogNorm, Normalize
import scipy
import sys
from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import \
TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues,\
TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven
from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER,\
NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, \
Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, \
NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace
# Force scientific format to get the right number of significant figures
np.set_printoptions(30000, linewidth=100, formatter={'float': lambda x_str: format(x_str, '.'+str(PRECISION)+'E')},
threshold=sys.maxsize)
# Overwrite imported values here.
VISUAL_NOT_TESTS = False
if VISUAL_NOT_TESTS:
NB_X = 51
NB_Y = 51
NB_Z = 51
NB_XSAMPLES = 101
NB_YSAMPLES = 101
NB_ZSAMPLES = 101
X_EXTRAP_DELTA_MIN = 0.04
Y_EXTRAP_DELTA_MIN = 0.04
Z_EXTRAP_DELTA_MIN = 0.04
BIG_VALUE_FACTOR = 20.
SMALL_VALUE_FACTOR = -20.
def docstring_test():
"""
.. code-block:: python
>>> from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
>>>
>>> x = np.linspace(-1., 1., 20)
>>> y = np.linspace(-1., 1., 20)
>>> z = np.linspace(-1., 1., 20)
>>> x_array, y_array, z_array = np.meshgrid(x, y, z, indexing='ij')
>>> f = np.exp(-(x_array**2 + y_array**2 + z_array**2))
>>> interpolator3D = Interpolator3DArray(x, y, z, f, 'cubic', 'nearest', 1.0, 1.0, 1.0)
>>> # Interpolation
>>> interpolator3D(1.0, 1.0, 0.2)
0.1300281183136766
>>> # Extrapolation
>>> interpolator3D(1.0, 1.0, 1.1)
0.0497870683678659
>>> # Extrapolation out of bounds
>>> interpolator3D(1.0, 1.0, 2.1)
ValueError: The specified value (z=2.1) is outside of extrapolation range.
"""
pass
if __name__ == '__main__':
# Calculate for big values, small values, or normal values
big_values = False
small_values = True
log_scale = False
uneven_spacing = False
use_saved_datastore_spline_knots = True
verbose_options = [False, True, False, False]
if VISUAL_NOT_TESTS:
index_x_in = 40
else:
index_x_in = 4
index_y_in = 0
index_z_in = 0
index_y_plot = 0
index_z_plot = 0
print('Using scipy version', scipy.__version__)
# Find the function values to be used
if big_values:
factor = np.power(10., BIG_VALUE_FACTOR)
elif small_values:
factor = np.power(10., SMALL_VALUE_FACTOR)
else:
factor = 1.
if uneven_spacing:
x_in = uneven_linspace(X_LOWER, X_UPPER, NB_X, offset_fraction=1./3.)
y_in = uneven_linspace(Y_LOWER, Y_UPPER, NB_Y, offset_fraction=1./3.)
z_in = uneven_linspace(Z_LOWER, Z_UPPER, NB_Z, offset_fraction=1./3.)
else:
x_in = np.linspace(X_LOWER, X_UPPER, NB_X)
y_in = np.linspace(Y_LOWER, Y_UPPER, NB_Y)
z_in = np.linspace(Z_LOWER, Z_UPPER, NB_Z)
x_in_full, y_in_full, z_in_full = np.meshgrid(x_in, y_in, z_in, indexing='ij')
f_in = function_to_spline(x_in_full, y_in_full, z_in_full, factor)
if use_saved_datastore_spline_knots:
if uneven_spacing:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValuesUneven()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValuesUneven()
else:
reference_loaded_values = TestInterpolatorLoadNormalValuesUneven()
else:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValues()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValues()
else:
reference_loaded_values = TestInterpolatorLoadNormalValues()
f_in = reference_loaded_values.data
if verbose_options[0]:
print('Save this to self.data in test_interpolator:\n', repr(f_in))
xsamples = np.linspace(X_LOWER, X_UPPER, NB_XSAMPLES)
ysamples = np.linspace(Y_LOWER, Y_UPPER, NB_YSAMPLES)
zsamples = np.linspace(Z_LOWER, Z_UPPER, NB_ZSAMPLES)
xsamples_extrapolation, ysamples_extrapolation, zsamples_extrapolation = large_extrapolation_range(
xsamples, ysamples, zsamples, EXTRAPOLATION_RANGE, N_EXTRAPOLATION
)
# # Extrapolation x and y values
xsamples_out_of_bounds, ysamples_out_of_bounds, zsamples_out_of_bounds, xsamples_in_bounds, ysamples_in_bounds, \
zsamples_in_bounds = get_extrapolation_input_values(
X_LOWER, X_UPPER, Y_LOWER, Y_UPPER, Z_LOWER, Z_UPPER, X_EXTRAP_DELTA_MAX, Y_EXTRAP_DELTA_MAX,
Z_EXTRAP_DELTA_MAX, X_EXTRAP_DELTA_MIN, Y_EXTRAP_DELTA_MIN, Z_EXTRAP_DELTA_MIN
)
interpolator3D = Interpolator3DArray(x_in, y_in, z_in, f_in, 'linear', 'linear', extrapolation_range_x=2.0,
extrapolation_range_y=2.0, extrapolation_range_z=2.0)
if VISUAL_NOT_TESTS:
n_lower_upper_interp = 51
else:
n_lower_upper_interp = 19
n_lower = 50
lower_p = 0.9
xsamples_lower_and_upper = np.linspace(X_LOWER, X_UPPER, n_lower_upper_interp)
ysamples_lower_and_upper = np.linspace(Y_LOWER, Y_UPPER, n_lower_upper_interp)
zsamples_lower_and_upper = np.linspace(Z_LOWER, Z_UPPER, n_lower_upper_interp)
xsamples_lower_and_upper = np.concatenate((np.linspace(X_LOWER - (X_UPPER - X_LOWER) * lower_p, X_LOWER, n_lower)[
:-1], xsamples_lower_and_upper,
np.linspace(X_UPPER, X_UPPER + (X_UPPER - X_LOWER) * lower_p, n_lower)[
1:]))
ysamples_lower_and_upper = np.concatenate((np.linspace(Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p, Y_LOWER, n_lower)[
:-1], ysamples_lower_and_upper,
np.linspace(Y_UPPER, Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p, n_lower)[
1:]))
zsamples_lower_and_upper = np.concatenate((np.linspace(Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p, Z_LOWER, n_lower)[
:-1], zsamples_lower_and_upper,
np.linspace(Z_UPPER, Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p, n_lower)[
1:]))
index_ysamples_lower_upper = np.where(x_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# extrapolation to save
f_extrapolation_output = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_extrapolation_output[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[1]:
print('Output of extrapolation to be saved:\n', repr(f_extrapolation_output))
check_plot = True
if check_plot:
import matplotlib.pyplot as plt
from matplotlib import cm
# Install mayavi and pyQt5
main_plots_on = True
if main_plots_on:
fig, ax = plt.subplots(1, 4)
fig1, ax1 = plt.subplots(1, 2)
if not (x_in[index_x_in] == xsamples).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
if not (y_in[index_y_in] == ysamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
index_xsamples = np.where(x_in[index_x_in] == xsamples)[0].item()
index_ysamples_lower_upper = np.where(y_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# index_ysamples_lower_upper = 0
# index_zsamples_lower_upper = 0
index_zsamples_lower_upper = np.where(z_in[index_z_in] == zsamples_lower_and_upper)[0].item()
f_plot_x = f_in[index_x_in, :, :]
y_corners_x = pcolourmesh_corners(y_in)
z_corners_x = pcolourmesh_corners(z_in)
min_colourmap = np.min(f_in)
max_colourmap = np.max(f_in)
if log_scale:
c_norm = SymLogNorm(vmin=min_colourmap, vmax=max_colourmap, linthresh=0.03)
else:
c_norm = Normalize(vmin=min_colourmap, vmax=max_colourmap)
colourmap = cm.get_cmap('viridis', 512)
ax[0].pcolormesh(y_corners_x, z_corners_x, f_plot_x, norm=c_norm, cmap='viridis')
# ax[0].pcolormesh(y_in, z_in, f_plot_x)
ax[0].set_aspect('equal')
f_out = np.zeros((len(xsamples), len(ysamples), len(zsamples)))
for i in range(len(xsamples)):
for j in range(len(ysamples)):
for k in range(len(zsamples)):
f_out[i, j, k] = interpolator3D(xsamples[i], ysamples[j], zsamples[k])
if verbose_options[2]:
print('Test interpolation:\n', repr(f_out))
f_out_lower_and_upper = np.zeros((len(xsamples_lower_and_upper), len(ysamples_lower_and_upper),
len(zsamples_lower_and_upper)))
for i in range(len(xsamples_lower_and_upper)):
for j in range(len(ysamples_lower_and_upper)):
for k in range(len(zsamples_lower_and_upper)):
f_out_lower_and_upper[i, j, k] = interpolator3D(
xsamples_lower_and_upper[i], ysamples_lower_and_upper[j], zsamples_lower_and_upper[k]
)
f_out_extrapolation = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_out_extrapolation[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[3]:
print('New output of extrapolation to be saved:\n', repr(f_out_extrapolation))
index_xsamples_extrap = np.where(x_in[index_x_in] == xsamples_extrapolation)
f_out_x_extrapolation = f_out_extrapolation[index_xsamples_extrap]
im = ax[3].scatter(
ysamples_extrapolation[index_xsamples_extrap], zsamples_extrapolation[index_xsamples_extrap],
c=f_out_x_extrapolation, norm=c_norm, cmap='viridis', s=10
)
ax[3].set_aspect('equal')
f_out_x = f_out[index_xsamples, :, :]
ysamples_mesh, zsamples_mesh = np.meshgrid(ysamples, zsamples)
ax[0].scatter(
ysamples_mesh.ravel(), zsamples_mesh.ravel(), c=f_out_x.ravel(), norm=c_norm, cmap='viridis', s=10
)
index_y_print = -1
index_z_print = 0
index_ysamples_print = np.where(y_in[index_y_print] == ysamples)[0].item()
index_zsamples_print = np.where(z_in[index_z_print] == zsamples)[0].item()
ax[0].set_title('Slice of x', size=20)
ax[1].set_title(f'Interpolated points \nin slice of x={x_in[index_x_in]}', size=20)
y_corners_xsamples = pcolourmesh_corners(ysamples)
z_corners_xsamples = pcolourmesh_corners(zsamples)
im2 = ax[1].pcolormesh(y_corners_xsamples, z_corners_xsamples, f_out_x, norm=c_norm, cmap='viridis')
ax[1].set_aspect('equal')
if not (x_in[index_x_in] == xsamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, n_lower_upper={n_lower}-1, must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1,'
f' NB_Z={NB_Z}-1'
)
index_xsamples_lower_and_upper = np.where(x_in[index_x_in] == xsamples_lower_and_upper)[0].item()
y_corners_xsamples_lower_and_upper = pcolourmesh_corners(ysamples_lower_and_upper)
z_corners_xsamples_lower_and_upper = pcolourmesh_corners(zsamples_lower_and_upper)
f_out_lower_and_upper_x = f_out_lower_and_upper[index_xsamples_lower_and_upper, :, :]
im3 = ax[2].pcolormesh(
y_corners_xsamples_lower_and_upper, z_corners_xsamples_lower_and_upper, f_out_lower_and_upper_x,
norm=c_norm, cmap='viridis'
)
check_array_z = np.zeros(len(zsamples_lower_and_upper))
check_array_y = np.zeros(len(ysamples_lower_and_upper))
for i in range(len(zsamples_lower_and_upper)):
check_array_z[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[index_ysamples_lower_upper], zsamples_lower_and_upper[i]
)
check_array_y[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[i], zsamples_lower_and_upper[index_zsamples_lower_upper]
)
ax1[0].plot(zsamples_lower_and_upper, f_out_lower_and_upper_x[index_ysamples_lower_upper, :])
ax1[0].plot(z_in, f_in[index_x_in, index_y_in, :], 'bo')
ax1[0].plot(zsamples_lower_and_upper, check_array_z, 'gx')
ax1[1].plot(ysamples_lower_and_upper, check_array_y)
# ax1[1].plot(ysamples_lower_and_upper, f_out_lower_and_upper_x[:, index_z_plot])
ax1[0].axvline(z_in[0], color='r', linestyle='--')
ax1[0].axvline(z_in[-1], color='r', linestyle='--')
ax1[1].axvline(y_in[0], color='r', linestyle='--')
ax1[1].axvline(y_in[-1], color='r', linestyle='--')
fig.colorbar(im, ax=ax[0])
fig.colorbar(im2, ax=ax[1])
fig.colorbar(im3, ax=ax[2])
ax[2].set_aspect('equal')
plt.show()
| 49.767857 | 120 | 0.65703 |
4a41ae80cb8630870b8a540d9da1afa369fa489a | 2,875 | py | Python | supertokens_python/recipe_module.py | girish946/supertokens-python | ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c | [
"Apache-2.0"
] | 36 | 2021-10-05T17:06:07.000Z | 2022-03-29T14:11:39.000Z | supertokens_python/recipe_module.py | girish946/supertokens-python | ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c | [
"Apache-2.0"
] | 56 | 2021-09-02T08:24:29.000Z | 2022-03-30T07:29:07.000Z | supertokens_python/recipe_module.py | girish946/supertokens-python | ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c | [
"Apache-2.0"
] | 8 | 2022-01-28T14:49:55.000Z | 2022-03-26T01:28:38.000Z | # Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import abc
from typing import Union, List, TYPE_CHECKING
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .framework.response import BaseResponse
if TYPE_CHECKING:
from supertokens_python.framework.request import BaseRequest
from .supertokens import AppInfo
from .normalised_url_path import NormalisedURLPath
from .exceptions import SuperTokensError
| 34.638554 | 119 | 0.718261 |
4a428a5645724e361b7bbf5d6b4f839753d082e4 | 58 | py | Python | tests/__init__.py | mihaidumitrescu/flake8-html | d5b62c05fb220a5cd6c777feacd69cb726a42e9a | [
"Apache-2.0"
] | 36 | 2017-03-05T13:12:28.000Z | 2021-02-03T15:05:34.000Z | tests/__init__.py | mihaidumitrescu/flake8-html | d5b62c05fb220a5cd6c777feacd69cb726a42e9a | [
"Apache-2.0"
] | 23 | 2017-03-01T19:40:10.000Z | 2022-03-31T17:13:17.000Z | tests/__init__.py | mihaidumitrescu/flake8-html | d5b62c05fb220a5cd6c777feacd69cb726a42e9a | [
"Apache-2.0"
] | 15 | 2017-03-05T13:12:39.000Z | 2022-03-25T14:46:28.000Z | # -*- coding: utf-8 -*-
"""Tests go in this directory."""
| 19.333333 | 33 | 0.551724 |
4a42d347c7abb078f1060ffec9bcd3fae7f3044c | 46 | py | Python | datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py | Yambottle/dj-workflow-template | a47a354af2f9303c898ef403491e69cfc396d196 | [
"MIT"
] | null | null | null | datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py | Yambottle/dj-workflow-template | a47a354af2f9303c898ef403491e69cfc396d196 | [
"MIT"
] | null | null | null | datajoint-workflow/{{cookiecutter.github_repo}}/src/{{cookiecutter.__pkg_import_name}}/version.py | Yambottle/dj-workflow-template | a47a354af2f9303c898ef403491e69cfc396d196 | [
"MIT"
] | 6 | 2022-02-18T20:19:04.000Z | 2022-03-05T05:29:23.000Z | __version__ = "{{cookiecutter._pkg_version}}"
| 23 | 45 | 0.76087 |
4a42eafd975ea0137426e4612231c34ec1b242ab | 4,041 | py | Python | examples/benchmarking/benchmark_bm25.py | shibing624/similarities | f573ae158b0e2a908c1ef549784bd88e23cbd9c6 | [
"Apache-2.0"
] | 16 | 2022-02-23T11:46:18.000Z | 2022-03-29T07:35:33.000Z | examples/benchmarking/benchmark_bm25.py | shibing624/similarities | f573ae158b0e2a908c1ef549784bd88e23cbd9c6 | [
"Apache-2.0"
] | 1 | 2022-03-15T13:51:36.000Z | 2022-03-16T02:56:15.000Z | examples/benchmarking/benchmark_bm25.py | shibing624/similarities | f573ae158b0e2a908c1ef549784bd88e23cbd9c6 | [
"Apache-2.0"
] | 3 | 2022-02-24T02:06:05.000Z | 2022-03-13T11:31:16.000Z | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import datetime
import os
import pathlib
import random
import sys
from loguru import logger
sys.path.append('../..')
from similarities import BM25Similarity
from similarities.utils import http_get
from similarities.data_loader import SearchDataLoader
from similarities.evaluation import evaluate
random.seed(42)
pwd_path = os.path.dirname(os.path.realpath(__file__))
data_path = get_scifact()
#### Loading test queries and corpus in DBPedia
corpus, queries, qrels = SearchDataLoader(data_path).load(split="test")
corpus_ids, query_ids = list(corpus), list(queries)
logger.info(f"corpus: {len(corpus)}, queries: {len(queries)}")
#### Randomly sample 1M pairs from Original Corpus (4.63M pairs)
#### First include all relevant documents (i.e. present in qrels)
corpus_set = set()
for query_id in qrels:
corpus_set.update(list(qrels[query_id].keys()))
corpus_new = {corpus_id: corpus[corpus_id] for corpus_id in corpus_set}
#### Remove already seen k relevant documents and sample (1M - k) docs randomly
remaining_corpus = list(set(corpus_ids) - corpus_set)
sample = min(1000000 - len(corpus_set), len(remaining_corpus))
# sample = 10
for corpus_id in random.sample(remaining_corpus, sample):
corpus_new[corpus_id] = corpus[corpus_id]
corpus_docs = {corpus_id: corpus_new[corpus_id]['title'] + corpus_new[corpus_id]['text'] for corpus_id, corpus in
corpus_new.items()}
#### Index 1M passages into the index (seperately)
model = BM25Similarity(corpus_docs)
#### Saving benchmark times
time_taken_all = {}
for query_id in query_ids:
query = {query_id: queries[query_id]}
#### Measure time to retrieve top-10 BM25 documents using single query latency
start = datetime.datetime.now()
q_res = model.most_similar(query, topn=10)
end = datetime.datetime.now()
# print(q_res)
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
time_taken_all[query_id] = time_taken
# logger.info("query: {}: {} {:.2f}ms".format(query_id, query, time_taken))
# logger.info("\tsearch result: {}".format(results[:2]))
time_taken = list(time_taken_all.values())
logger.info("Average time taken: {:.2f}ms".format(sum(time_taken) / len(time_taken_all)))
#### Saving benchmark times with batch
# queries = [queries[query_id] for query_id in query_ids]
start = datetime.datetime.now()
results = model.most_similar(queries, topn=10)
end = datetime.datetime.now()
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
logger.info("All, Spend {:.2f}ms".format(time_taken))
logger.info("Average time taken: {:.2f}ms".format(time_taken / len(queries)))
logger.info(f"Results size: {len(results)}")
#### Evaluate your retrieval using NDCG@k, MAP@K ...
ndcg, _map, recall, precision = evaluate(qrels, results)
logger.info(f"MAP: {_map}")
| 35.761062 | 113 | 0.717644 |
4a43a63b067e2c9d49aadc213c2c322feea2bc14 | 14,531 | py | Python | tb/test_arp_64.py | sergachev/verilog-ethernet | cef6b47bb3b969120cabce3b89b0c98bb47ca6a9 | [
"MIT"
] | 2 | 2020-01-09T05:58:04.000Z | 2022-01-04T03:29:00.000Z | tb/test_arp_64.py | zslwyuan/verilog-ethernet | cd6b87e984ff7cbeaf11f9468124019f5e654bdb | [
"MIT"
] | null | null | null | tb/test_arp_64.py | zslwyuan/verilog-ethernet | cd6b87e984ff7cbeaf11f9468124019f5e654bdb | [
"MIT"
] | 1 | 2021-09-25T05:45:18.000Z | 2021-09-25T05:45:18.000Z | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import arp_ep
module = 'arp_64'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("../rtl/arp_cache.v")
srcs.append("../rtl/arp_eth_rx_64.v")
srcs.append("../rtl/arp_eth_tx_64.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
if __name__ == '__main__':
print("Running test...")
test_bench()
| 31.727074 | 77 | 0.671874 |