hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f042a0420967f88675a79d4f9cf3ecb5cca91b8 | 1,947 | py | Python | vega/trainer/callbacks/horovod.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/trainer/callbacks/horovod.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/trainer/callbacks/horovod.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data parallel callback."""
import logging
import vega
from vega.common import ClassFactory, ClassType
from .callback import Callback
logger = logging.getLogger(__name__)
| 33.568966 | 78 | 0.698511 |
3f0440a332e725d1be2b9f4d8bf41ca99082b5e6 | 5,580 | py | Python | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2017-04-15T01:48:27.000Z | 2017-04-15T01:48:27.000Z | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 153 | 2017-04-14T18:06:26.000Z | 2017-06-02T13:08:09.000Z | parse_doc.py | nprapps/idp-georgia | 316eba6195b7f410567a7e11eb4811ff7cba54cc | [
"Unlicense"
] | 1 | 2021-02-18T11:15:52.000Z | 2021-02-18T11:15:52.000Z | # _*_ coding:utf-8 _*_
import logging
import re
import app_config
from bs4 import BeautifulSoup
from shortcode import process_shortcode
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
end_doc_regex = re.compile(ur'^\s*[Ee][Nn][Dd]\s*$',
re.UNICODE)
new_section_marker_regex = re.compile(ur'^\s*\+{50,}\s*$',
re.UNICODE)
section_end_marker_regex = re.compile(ur'^\s*-{50,}\s*$',
re.UNICODE)
frontmatter_marker_regex = re.compile(ur'^\s*-{3}\s*$',
re.UNICODE)
extract_metadata_regex = re.compile(ur'^(.*?):(.*)$',
re.UNICODE)
shortcode_regex = re.compile(ur'^\s*\[%\s*.*\s*%\]\s*$', re.UNICODE)
def is_section_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = new_section_marker_regex.match(text)
if m:
return True
else:
return False
def is_section_end_marker(tag):
"""
Checks for the beginning of a new section
"""
text = tag.get_text()
m = section_end_marker_regex.match(text)
if m:
return True
else:
return False
def process_section_contents(contents):
"""
Process episode copy content
In particular parse and generate HTML from shortcodes
"""
logger.debug('--process_post_contents start--')
parsed = []
for tag in contents:
text = tag.get_text()
m = shortcode_regex.match(text)
if m:
parsed.append(process_shortcode(tag))
else:
parsed.append(unicode(tag))
episode_contents = ''.join(parsed)
return episode_contents
def parse_raw_sections(raw_sections):
"""
parse raw episodes into an array of section objects
"""
# Divide each episode into its subparts
# - Headline
# - FrontMatter
# - Contents
sections = []
for raw_section in raw_sections:
section = {}
marker_counter = 0
section_raw_headline = []
section_raw_metadata = []
section_raw_contents = []
for tag in raw_section:
text = tag.get_text()
m = frontmatter_marker_regex.match(text)
if m:
marker_counter += 1
else:
if (marker_counter == 0):
section_raw_headline.append(tag)
elif (marker_counter == 1):
section_raw_metadata.append(tag)
else:
section_raw_contents.append(tag)
section[u'headline'] = process_headline(section_raw_headline)
metadata = process_metadata(section_raw_metadata)
for k, v in metadata.iteritems():
section[k] = v
section[u'contents'] = process_section_contents(section_raw_contents)
sections.append(section)
return sections
def split_sections(doc):
"""
split the raw document into an array of raw sections
"""
logger.debug('--split_sections start--')
raw_sections = []
raw_episode_contents = []
ignore_orphan_text = True
body = doc.soup.body
for child in body.children:
if is_section_marker(child):
# Detected first post stop ignoring orphan text
if ignore_orphan_text:
ignore_orphan_text = False
else:
if ignore_orphan_text:
continue
elif is_section_end_marker(child):
ignore_orphan_text = True
raw_sections.append(raw_episode_contents)
raw_episode_contents = []
else:
raw_episode_contents.append(child)
return raw_sections
def find_section_id(sections, id):
"""
Find the section with a given id
"""
for idx, section in enumerate(sections):
try:
if section['id'] == id:
return idx
except KeyError:
continue
return None
def process_extracted_contents(inline_intro):
"""
Remove html markup
"""
return inline_intro['contents']
def parse(doc):
"""
parse google doc files and extract markup
"""
try:
parsed_document = {}
logger.info('-------------start------------')
raw_sections = split_sections(doc)
sections = parse_raw_sections(raw_sections)
logger.info('Number of sections: %s' % len(sections))
parsed_document['sections'] = sections
finally:
logger.info('-------------end------------')
return parsed_document
| 28.040201 | 78 | 0.58405 |
3f04bc07d2d8f73a71534912779c419ef2aa5148 | 2,162 | py | Python | 01_irc_bot/bot.py | pymug/ARJ_SpoonfeedingSockets_APR2021 | ba741d4fbde11f8ab4ddda704340ab5892c19478 | [
"MIT"
] | null | null | null | 01_irc_bot/bot.py | pymug/ARJ_SpoonfeedingSockets_APR2021 | ba741d4fbde11f8ab4ddda704340ab5892c19478 | [
"MIT"
] | null | null | null | 01_irc_bot/bot.py | pymug/ARJ_SpoonfeedingSockets_APR2021 | ba741d4fbde11f8ab4ddda704340ab5892c19478 | [
"MIT"
] | null | null | null | """
Abdur-Rahmaan Janhangeer
Skeleton of https://github.com/pyhoneybot/honeybot/
"""
import time
import os
import socket
directory = "irc"
if not os.path.exists(directory):
os.makedirs(directory)
target = open(os.path.join(directory, "log.txt"), "w")
BOT_IRC_SERVER = "chat.freenode.net"
BOT_IRC_CHANNEL = "##bottestingmu"
# BOT_IRC_CHANNEL = "#python"
BOT_IRC_PORT = 6667
BOT_NICKNAME = "appinventormuBot"
# BOT_PASSWORD = ''
irc = socket.socket()
irc.connect((BOT_IRC_SERVER, BOT_IRC_PORT))
irc.recv(4096)
irc.send(bytes("NICK " + BOT_NICKNAME + "\r\n", "utf8"))
ping_checker(irc.recv(4096))
irc.send(
bytes(
"USER appinventormuBot appinventormuBot appinventormuBot : appinventormuBot IRC\r\n",
"utf8",
)
)
ping_checker(irc.recv(4096))
# irc.send(bytes('msg NickServ identify ' + BOT_PASSWORD + " \r\n" ,'utf8') )
# ping_checker(irc.recv(4096))
# irc.send(bytes('NICKSERV identify ' + BOT_NICKNAME+' '+BOT_PASSWORD+ '\r\n','utf8' ) )
# ping_checker(irc.recv(4096))
time.sleep(3)
irc.send(bytes("JOIN " + BOT_IRC_CHANNEL + "\r\n", "utf8"))
while 1:
pass
line = irc.recv(4096)
print(line)
ping_checker(line)
if (
line.find(bytes("PRIVMSG", "utf8")) != -1
or line.find(bytes("NOTICE", "utf8")) != -1
):
message_checker(line)
target.write(str(line))
target.flush()
| 25.738095 | 93 | 0.623497 |
3f05790f911b335d2d94be5f242d22af72e43329 | 5,494 | py | Python | xenia_python_client_library/models/attachments_list.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | xenia_python_client_library/models/attachments_list.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | xenia_python_client_library/models/attachments_list.py | DutchAnalytics/xenia-python-client-library | 60dc3e21094086124b552ff5bed5895fee826b57 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Xenia Python Client Library
Python Client Library to interact with the Xenia API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from xenia_python_client_library.configuration import Configuration
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AttachmentsList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AttachmentsList):
return True
return self.to_dict() != other.to_dict()
| 27.60804 | 132 | 0.59028 |
3f05ec3f00a5d7d90f5ef0232521b059bc84d999 | 672 | py | Python | src/AuShadha/registry/icd10/aushadha.py | GosthMan/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
] | 46 | 2015-03-04T14:19:47.000Z | 2021-12-09T02:58:46.000Z | src/AuShadha/registry/icd10/aushadha.py | aytida23/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
] | 2 | 2015-06-05T10:29:04.000Z | 2015-12-06T16:54:10.000Z | src/AuShadha/registry/icd10/aushadha.py | aytida23/AuShadha | 3ab48825a0dba19bf880b6ac6141ab7a6adf1f3e | [
"PostgreSQL"
] | 24 | 2015-03-23T01:38:11.000Z | 2022-01-24T16:23:42.000Z | ################################################################################
# Create a Registration with the UI for a Role.
# Each module's aushadha.py is screened for this
#
# Each Class is registered for a Role in UI
# These can be used to generate Role based UI elements later.
#
# As of now string base role assignement is done.
# This can be later extended to class based role
################################################################################
from .models import Chapter, Section,Diagnosis
from AuShadha.apps.ui.ui import ui as UI
UI.register('RegistryApp',Chapter )
UI.register('DiseaseCodes',Chapter)
UI.register('ReferenceApp',Chapter)
| 37.333333 | 80 | 0.577381 |
3f075c7ec34c5ad02a052b425ce2675ad65347ca | 973 | py | Python | Etap 2/Logia03/Zad1.py | aszokalski/Logia | 5e29745b01623df8a2f162f143656a76056af407 | [
"MIT"
] | null | null | null | Etap 2/Logia03/Zad1.py | aszokalski/Logia | 5e29745b01623df8a2f162f143656a76056af407 | [
"MIT"
] | null | null | null | Etap 2/Logia03/Zad1.py | aszokalski/Logia | 5e29745b01623df8a2f162f143656a76056af407 | [
"MIT"
] | null | null | null | from turtle import *
| 20.702128 | 45 | 0.42446 |
3f07c6d2135990949504b1e72bbaec00f43feafb | 616 | py | Python | server/src/models/movie.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | 2 | 2019-06-17T08:28:03.000Z | 2019-06-17T08:28:32.000Z | server/src/models/movie.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | 3 | 2020-09-05T00:54:20.000Z | 2021-05-07T15:34:58.000Z | server/src/models/movie.py | Rubilmax/netflux | 9e79063b81e3dc78055fc683c230de511827f030 | [
"MIT"
] | null | null | null | """
Define the Movie model
"""
from . import db
from .abc import BaseModel, MetaBaseModel
| 25.666667 | 62 | 0.657468 |
3f07dc93b37cf1bf8c17deb226c77fdb8cc21bba | 17,963 | py | Python | wmt-shared-task/segment-level/segment_level_prism.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | wmt-shared-task/segment-level/segment_level_prism.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | wmt-shared-task/segment-level/segment_level_prism.py | chryssa-zrv/UA_COMET | 527e7c86bd0a0d8ff90efda58e820108a5666b92 | [
"Apache-2.0"
] | null | null | null | f"""
Shell script tho reproduce results for BERTScores in data from WMT18/19 Metrics Shared task.
"""
import argparse
import hashlib
import logging
import os
import sys
from typing import Any, Dict, Iterator, List
import numpy as np
import pandas as pd
import sentencepiece as spm
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq import checkpoint_utils
from fairseq.data import LanguagePairDataset
#!/usr/bin/env python3
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
"""
def compute_kendall(
hyp1_scores: list, hyp2_scores: list, dataframe: pd.DataFrame
) -> (int, list):
""" Computes the official WMT19 shared task Kendall correlation score. """
assert len(hyp1_scores) == len(hyp2_scores) == len(data)
conc, disc = 0, 0
for i, row in tqdm(data.iterrows(), total=len(data), desc="Kendall eval..."):
if hyp1_scores[i] > hyp2_scores[i]:
conc += 1
else:
disc += 1
return (conc - disc) / (conc + disc)
def run_prism(mt: list, ref: list, language=False, temperature=1.0) -> list:
prism = Prism(model_dir="m39v1", lang=language, temperature=temperature)
scores = prism.score(cand=mt, ref=ref, segment_scores=True)
return list(scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Evaluates BERTScores against relative preferences."
)
parser.add_argument(
"--test_path",
default="wmt-metrics/wmt19/de-en/relative-ranks.csv",
help="Path to the test dataframe with relative preferences.",
type=str,
)
parser.add_argument(
"--language", default="en", help="Target language of the testset.", type=str,
)
parser.add_argument(
'--temperature',
type=float,
default=1.0,
help='Softmax temperature: values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument(
"--run_wmt18",
default=False,
help="Runs entire WMT18 evaluation.",
action="store_true",
)
parser.add_argument(
"--run_wmt19",
default=False,
help="Runs entire WMT19 evaluation.",
action="store_true",
)
args = parser.parse_args()
if args.run_wmt18:
lps = [
"en-cs",
"en-de",
"en-et",
"en-fi",
"en-ru",
"en-tr",
"en-zh",
"cs-en",
"de-en",
"et-en",
"fi-en",
"ru-en",
"tr-en",
"zh-en",
]
kendall_scores = {}
for lp in lps:
data = pd.read_csv(f"wmt-metrics/wmt18/{lp}/relative-ranks.csv")
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
#hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), list(data.src), language=lp.split('-')[1])
#hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), list(data.src), language=lp.split('-')[1])
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(lp, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
elif args.run_wmt19:
lps = [
"en-cs",
"en-de",
"en-fi",
"en-gu",
"en-kk",
"en-lt",
"en-ru",
"en-zh",
"de-en",
"fi-en",
"gu-en",
"kk-en",
"lt-en",
"ru-en",
"zh-en",
"de-cs",
"de-fr",
"fr-de",
]
kendall_scores = {}
for lp in lps:
data = pd.read_csv(f"wmt-metrics/wmt19/{lp}/relative-ranks.csv")
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(lp, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
else:
data = pd.read_csv(args.test_path)
kendall_scores = {}
hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature)
kendall = compute_kendall(hyp1_scores, hyp2_scores, data)
print("Results for {}: {}".format(args.test_path, kendall))
kendall_scores[lp] = kendall
print(kendall_scores)
| 40.006682 | 137 | 0.571341 |
3f090c825452547dfa25b58d3c0bf2f6280faf90 | 826 | py | Python | source_code/3-2-download.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 708 | 2017-12-29T05:32:34.000Z | 2022-03-25T14:29:05.000Z | source_code/3-2-download.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 6 | 2018-01-06T07:58:31.000Z | 2020-10-26T15:57:46.000Z | source_code/3-2-download.py | VickyMin1994/easy-scraping-tutorial | 75b7ffc79da397afa95342022c29cd72520f155f | [
"MIT"
] | 609 | 2017-12-29T10:04:20.000Z | 2022-03-23T18:32:37.000Z | import os
os.makedirs('./img/', exist_ok=True)
IMAGE_URL = "https://mofanpy.com/static/img/description/learning_step_flowchart.png"
urllib_download()
print('download image1')
request_download()
print('download image2')
chunk_download()
print('download image3')
| 23.6 | 84 | 0.670702 |
3f09b543086a1b61bb8cf4a38db61dcd67d88667 | 5,787 | py | Python | flare_classifier/cnn.py | Wingham1/hessidf | 18e63e25f9989565f1f361458f7ff8e53f4579e9 | [
"Unlicense"
] | null | null | null | flare_classifier/cnn.py | Wingham1/hessidf | 18e63e25f9989565f1f361458f7ff8e53f4579e9 | [
"Unlicense"
] | 14 | 2020-01-28T23:15:48.000Z | 2022-03-12T00:12:36.000Z | flare_classifier/cnn.py | Wingham1/hessidf | 18e63e25f9989565f1f361458f7ff8e53f4579e9 | [
"Unlicense"
] | null | null | null | from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout
import tensorflow.keras as keras
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
def data_prep(path, img_rows, img_cols, color):
"""
A function to preprocess the input data for a CNN.
The images are resized, normalised to have pixel values between 0-1, converted into greyscale if required and put into a numpy array.
Each class label is turned into a one hot pixel array and added to an ordered numpy array such that the order for the labels is the same as the images.
The data is shuffled to make sure each batch is representative of the overall data during training which will reduce overfitting to each batch.
This function requires that the images for each class are in a seperate directory.
param:
- path, a string of the path to the directory containing the images
- img_rows, an integer for the number of rows the resized image should have
- img_cols, an integer for the number of columns the resized image should have
- color, a boolean that is set to true if the image should be in RGB colour space or false for greyscale
return:
- images, a numpy array of images with pixel values normalised to be between 0 and 1.
numpy array dimensions are [number of images, number of rows, number of columns, number of chanels]
- labels, a numpy array of labels associated with each image (labels are a one hot pixel numpy array [1, 0, 0, ...] or [0, 1, 0, ...], etc)
"""
images = []
labels = []
for image_class in os.listdir(path):
print('image_class =', image_class)
path_to_class_directory = os.path.join(path, image_class)
for img_name in os.listdir(path_to_class_directory):
true_path = os.path.join(path_to_class_directory, img_name)
if color:
images.append(cv2.imread(true_path, 1)/255.0)
else:
images.append(cv2.imread(true_path, 0)/255.0) # greyscale
labels.append(os.listdir(path).index(image_class))
data = list(zip(images, labels))
np.random.shuffle(data)
images, labels = zip(*data)
images = [cv2.resize(img, (img_rows, img_cols), cv2.INTER_AREA) for img in images] # resize images to all be the same
if color:
images = np.array(images).reshape(len(images), img_rows, img_cols, 3)
else:
images = np.array(images).reshape(len(images), img_rows, img_cols, 1)
labels = keras.utils.to_categorical(labels, num_classes=len(os.listdir(path)))
return images, labels
def decode_labels(coded, class_names):
"""
A funtion to get the name of the class by decoding a one hot pixel array.
Uses a list comprehension and boolean indexing.
The list comprehension returns the index of the variable with the highest value in each one hot pixel array.
That list is then used for boolean indexing with a numpy array to get a list of class_names for each label in coded.
Param:
- coded, a numpy array of coded labels
- class_names, a list of the class_names in the same order they were coded (alphabetical)
Return:
- numpy array of class names for each label in coded
"""
return np.array(class_names)[[np.argmax(example) for example in coded]]
def calc_accuracy(pred, real):
"""
A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes
Param:
- pred, a numpy array of predicted classes
- real, a numpy array of the real classes
Return:
- Accuracy as a decimal
"""
return sum(pred==real) / len(pred)
if __name__ == '__main__':
path = 'data'
img_rows = 150
img_cols = 150
is_color = True
model_filename = 'flare_cnn'
print('\nloading training data\n')
num_classes = len(os.listdir(path))
x, y = data_prep(path, img_rows, img_cols, color=is_color)
x_train, x_test, y_train, y_test = train_test_split(x, y)
print('\nbuilding model\n')
cnn = build_CNN(img_rows, img_cols, color=is_color)
print('\ntraining model\n')
cnn.fit(x_train, y_train, batch_size=50, epochs=1, validation_split=0.2)
print('\nsaving model\n')
if is_color:
model_filename = model_filename + '_RGB' + '.h5'
else:
model_filename = model_filename + '_grey' + '.h5'
cnn.save(model_filename)
print('\nsaved model to file {}\n'.format(model_filename))
print('\nloading model\n')
loaded_cnn = keras.models.load_model(model_filename)
print('\ngenerating predictions\n')
predictions = loaded_cnn.predict(x_test)
dec_preds = decode_labels(predictions, os.listdir(path))
dec_ytest = decode_labels(y_test, os.listdir(path))
# F1 score would probably be a better metric due to skew of training expample (num B > num C)
print('\naccuracy =', calc_accuracy(dec_preds, dec_ytest)) | 44.515385 | 155 | 0.683428 |
3f0acb5cf9be9113370cabc267dfa5dafd6e50f5 | 895 | py | Python | survol/sources_types/oracle/library/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | survol/sources_types/oracle/library/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | survol/sources_types/oracle/library/__init__.py | AugustinMascarelli/survol | 7a822900e82d1e6f016dba014af5741558b78f15 | [
"BSD-3-Clause"
] | null | null | null | """
Oracle library
"""
import lib_common
from lib_properties import pc
# Ambiguity with tables, oracle or normal users.
| 28.870968 | 128 | 0.750838 |
3f0adc8f234944eb3b185c95906a510034084c0d | 4,104 | py | Python | src/train.py | rnagumo/dgm_vae | ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4 | [
"MIT"
] | 5 | 2020-05-27T02:28:32.000Z | 2021-03-27T08:07:50.000Z | src/train.py | rnagumo/dgmvae | ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4 | [
"MIT"
] | null | null | null | src/train.py | rnagumo/dgmvae | ea9e1a39f0018c9ed55f13f0b88f4afc4657d7e4 | [
"MIT"
] | null | null | null |
"""Training method"""
import argparse
import json
import os
import pathlib
from typing import Union
import numpy as np
import torch
from torch.backends import cudnn
import pytorch_lightning as pl
import dgmvae.models as dvm
from experiment import VAEUpdater
def export_model(model: Union[torch.nn.Module, torch.jit.ScriptModule],
path: Union[str, pathlib.Path],
input_shape: tuple = (1, 3, 64, 64),
use_script_module: bool = True
) -> Union[str, pathlib.Path]:
"""Exports model.
Args:
model (torch.nn.Module or torch.jit.ScriptModule): Saved model.
path (str or pathlib.Path): Path to file.
input_shape (tuple, optional): Tuple of input data shape.
use_script_module (bool, optional): Boolean flag for using script
module.
Returns:
path (str or pathlib.Path): Path to saved file.
"""
model = model.cpu().eval()
if isinstance(model, torch.jit.ScriptModule):
assert use_script_module, \
"Provided model is a ScriptModule, set use_script_module to True."
if use_script_module:
if not isinstance(model, torch.jit.ScriptModule):
assert input_shape is not None
traced_model = torch.jit.trace(model, torch.zeros(*input_shape))
else:
traced_model = model
torch.jit.save(traced_model, path)
else:
torch.save(model, path) # saves model as a nn.Module
return path
if __name__ == "__main__":
main()
| 29.52518 | 79 | 0.576754 |
3f0db4e9c999e9ae4b627b4d2fef5914dc26a29e | 17,193 | py | Python | kea/axi_lite_registers/_registers.py | SmartAcoustics/Kea | 5790f18dafccfc01fe9dbe98de5bb1a5ce584c56 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3 | 2020-02-28T13:03:59.000Z | 2020-09-20T06:33:04.000Z | kea/axi_lite_registers/_registers.py | SmartAcoustics/Kea | 5790f18dafccfc01fe9dbe98de5bb1a5ce584c56 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | kea/axi_lite_registers/_registers.py | SmartAcoustics/Kea | 5790f18dafccfc01fe9dbe98de5bb1a5ce584c56 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 3 | 2018-12-17T16:33:08.000Z | 2020-01-21T14:10:25.000Z | from myhdl import Signal, intbv, block, always_comb, ConcatSignal
import myhdl
from collections import OrderedDict
import keyword
def _is_valid_name(ident: str) -> bool:
'''Determine if ident is a valid register or bitfield name.
'''
if not isinstance(ident, str):
raise TypeError("expected str, but got {!r}".format(type(ident)))
if not ident.isidentifier():
return False
if keyword.iskeyword(ident):
return False
return True
| 36.89485 | 79 | 0.571163 |
3f0e2d51a2df3a348d377cd1a32d06c17973e189 | 1,429 | py | Python | tools/clear_from_n.py | ubercomrade/MultiDeNA | 128f2963cf0a49f94c85744c5eaaf5c41f0e161c | [
"MIT"
] | null | null | null | tools/clear_from_n.py | ubercomrade/MultiDeNA | 128f2963cf0a49f94c85744c5eaaf5c41f0e161c | [
"MIT"
] | null | null | null | tools/clear_from_n.py | ubercomrade/MultiDeNA | 128f2963cf0a49f94c85744c5eaaf5c41f0e161c | [
"MIT"
] | null | null | null | import random
| 28.019608 | 86 | 0.501749 |
3f109ba5a82b80a619a2cca61182b7519ce6df9d | 2,217 | py | Python | tensorbank/tf/slices.py | pshved/tensorbank | 6a1497b58cfac5e7218ec42c04dd62e17b7bb88c | [
"MIT"
] | 1 | 2020-07-07T09:00:28.000Z | 2020-07-07T09:00:28.000Z | tensorbank/tf/slices.py | pshved/tensorbank | 6a1497b58cfac5e7218ec42c04dd62e17b7bb88c | [
"MIT"
] | null | null | null | tensorbank/tf/slices.py | pshved/tensorbank | 6a1497b58cfac5e7218ec42c04dd62e17b7bb88c | [
"MIT"
] | null | null | null | """Advanced Tensor slicing
==========================
Utilities for advanced tensor slicing and batching operations.
Reference
---------
"""
import tensorflow as tf
def slice_within_stride(x, stride, si=0, ei=None, keepdims=True):
"""Select ``x[..., (i * stride + si):(i * stride + ei)]`` for each i.
The tensor returned will have the last dimension shrunk by a factor of
``(ei-si)/stride``.
As a natural special case, ``tb.multiple_within_stride(x, N)`` is
equivalent to adding a dimension of ``N`` at the end, as in
``tf.expand_dims(x, (..., -1, N))``.
Example:
When predicting anchor positions in SSD, ``num_classes +
num_offsets`` are predicted for each anchor. To get only the
class confidence, this would be used::
logits = model(input)
class_logits = tb.slice_within_stride(
logits,
0,
num_classes,
num_classes + num_offsets)
loss = softmax_cross_entropy_with_logits(
class_preds, class_logits)
Args:
x (tf.Tensor): value to modify
stride (int): stride for the last dimension
si (int): starting index within stride. Negative indices are
supported. Defaults to 0.
ei (int): end index (1 element after the last) within stride.
Negative indices are supported. Defaults to ``None``, which
means "until the last element".
keepdims (bool): if False, adds another dimension that
iterates over each stride. This dimension will be of size
``ei-si``. Defaults to True.
Returns:
tf.Tensor: modified ``x`` with the last dimension sliced.
"""
step1 = tf.reshape(x, (-1, stride))
step2 = step1[..., si:ei]
new_shape = list(x.shape)
new_shape[-1] = -1
if not keepdims:
if ei is None:
ei = stride
# Calculate the size of the slice. This is O(stride) which is
# small.
last_dim_len = len(list(range(stride)[si:ei]))
new_shape.append(last_dim_len)
print("NS: {}".format(new_shape))
step3 = tf.reshape(step2, new_shape)
return step3
| 31.225352 | 74 | 0.592242 |
3f1168ed05032f188730bcd06823c66a0ec28d77 | 5,168 | py | Python | testfixtures/tests/test_roundcomparison.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | testfixtures/tests/test_roundcomparison.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | testfixtures/tests/test_roundcomparison.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright (c) 2014 Simplistix Ltd
# See license.txt for license details.
from decimal import Decimal
from testfixtures import RoundComparison as R, compare, ShouldRaise
from unittest import TestCase
from ..compat import PY2, PY3
| 33.128205 | 79 | 0.629257 |
3f11b3d9455edd6883b563bf0cbd4035db741ccc | 23,628 | py | Python | config/usb_device_cdc.py | newbs/usb | 5aeafc26849673a357a6110713524387f2f5f84d | [
"0BSD"
] | null | null | null | config/usb_device_cdc.py | newbs/usb | 5aeafc26849673a357a6110713524387f2f5f84d | [
"0BSD"
] | null | null | null | config/usb_device_cdc.py | newbs/usb | 5aeafc26849673a357a6110713524387f2f5f84d | [
"0BSD"
] | null | null | null | """*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
currentQSizeRead = 1
currentQSizeWrite = 1
currentQSizeSerialStateNotification = 1
cdcInterfacesNumber = 2
cdcDescriptorSize = 58
cdcEndpointsPic32 = 2
cdcEndpointsSAM = 3
indexFunction = None
configValue = None
startInterfaceNumber = None
numberOfInterfaces = None
useIad = None
epNumberInterrupt = None
epNumberBulkOut = None
epNumberBulkIn = None
cdcEndpointNumber = None
# This function is called when user modifies the CDC Queue Size.
| 46.60355 | 210 | 0.752878 |
3f147cdd3b7dfdb59f469f69eb27289609a80ec7 | 169 | py | Python | quant_test/__init__.py | rgkimball/quant_test | efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2 | [
"Apache-2.0"
] | null | null | null | quant_test/__init__.py | rgkimball/quant_test | efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2 | [
"Apache-2.0"
] | 1 | 2021-02-02T23:10:35.000Z | 2021-02-02T23:10:35.000Z | quant_test/__init__.py | rgkimball/quant_test | efa74de02f6a65c2d61029d6e8a1c0b5ac34b0c2 | [
"Apache-2.0"
] | null | null | null | """
quant_test
~~~~~~
The quant_test package - a Python package template project that is intended
to be used as a cookie-cutter for developing new Python packages.
"""
| 21.125 | 75 | 0.745562 |
3f14a246aafc9d9fb1bbbb14593c493646a1817d | 5,189 | py | Python | django_sql_dashboard/extensions/ExtendedParameter.py | ipamo/django-sql-dashboard | c976bb59db70df200bdc44f1598aab31a25d3930 | [
"Apache-2.0"
] | null | null | null | django_sql_dashboard/extensions/ExtendedParameter.py | ipamo/django-sql-dashboard | c976bb59db70df200bdc44f1598aab31a25d3930 | [
"Apache-2.0"
] | null | null | null | django_sql_dashboard/extensions/ExtendedParameter.py | ipamo/django-sql-dashboard | c976bb59db70df200bdc44f1598aab31a25d3930 | [
"Apache-2.0"
] | null | null | null | import re
from django.utils.html import escape
from django.utils.safestring import mark_safe
from ..utils import Parameter
| 50.378641 | 192 | 0.586626 |
3f159df489050cc9cb8053b59296d74b1792277e | 3,644 | py | Python | jiotc/models/bilstm_model.py | JHP4911/JioTC | be82159bdb0f2f10b1ac85966659626b5e8a7304 | [
"MIT"
] | 4 | 2020-06-17T03:32:23.000Z | 2021-07-02T06:46:26.000Z | jiotc/models/bilstm_model.py | dongrixinyu/JioTC | be82159bdb0f2f10b1ac85966659626b5e8a7304 | [
"MIT"
] | null | null | null | jiotc/models/bilstm_model.py | dongrixinyu/JioTC | be82159bdb0f2f10b1ac85966659626b5e8a7304 | [
"MIT"
] | null | null | null | # -*- coding=utf-8 -*-
# author: dongrixinyu
# contact: dongrixinyu.89@163.com
# blog: https://github.com/dongrixinyu/
# file: bare_embedding.py
# time: 2020-06-12 11:27
import os
import pdb
import logging
from typing import Union, Optional, Dict, Any, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from jiotc.embeddings.base_embedding import BaseEmbedding
from .base_model import BaseModel
# Bidirectional LSTM neural network (many-to-one)
| 33.431193 | 128 | 0.602634 |
3f15b4889cdf171226bf2916a6b9994712b58560 | 56,576 | py | Python | tests/learning/test_prediction_error_delta_function.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | tests/learning/test_prediction_error_delta_function.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | tests/learning/test_prediction_error_delta_function.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from psyneulink import PredictionErrorDeltaFunction
np.set_printoptions(suppress=True)
| 72.255428 | 80 | 0.352128 |
3f18a598378fac5606353de6db627c25234fa321 | 22,823 | py | Python | jts/backend/jobapps/views.py | goupaz/babylon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-08-08T09:03:17.000Z | 2019-08-08T09:03:17.000Z | backend/jobapps/views.py | goupaz/website | ce1bc8b6c52ee0815a7b98842ec3bde0c20e0add | [
"Apache-2.0"
] | 2 | 2020-10-09T19:16:09.000Z | 2020-10-10T20:40:41.000Z | jts/backend/jobapps/views.py | goupaz/babylon-hackathon | 4e638d02705469061e563fec349676d8faa9f648 | [
"MIT"
] | 1 | 2019-07-21T01:42:21.000Z | 2019-07-21T01:42:21.000Z | from datetime import datetime as dt
from django.utils import timezone
import uuid
from django.contrib.auth import get_user_model
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from company.utils import get_or_create_company
from position.utils import get_or_insert_position
from utils import utils
from utils.error_codes import ResponseCodes
from utils.generic_json_creator import create_response
from .models import JobApplication, Contact, ApplicationStatus, StatusHistory
from .models import JobApplicationNote, JobApplicationFile
from .models import Source
from alumni.serializers import AlumniSerializer
from .serializers import ApplicationStatusSerializer
from .serializers import JobApplicationNoteSerializer, JobApplicationFileSerializer
from .serializers import JobApplicationSerializer, ContactSerializer
from .serializers import SourceSerializer
from .serializers import StatusHistorySerializer
User = get_user_model()
| 50.605322 | 131 | 0.623056 |
3f18ab10027c8065766c8a8c8fb7ac830007c2ab | 127 | py | Python | reassign.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
] | null | null | null | reassign.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
] | null | null | null | reassign.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
] | null | null | null | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
x=5
print x
reassign(x)
print x
| 12.7 | 62 | 0.740157 |
3f1926f6984e1a663e867e004da2e2a9429fe1d9 | 6,632 | py | Python | python_modules/dagster/dagster/core/meta/config_types.py | Ramshackle-Jamathon/dagster | 959037ab8d8fb7ed49fbc2daff9fa566f71766f2 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/meta/config_types.py | Ramshackle-Jamathon/dagster | 959037ab8d8fb7ed49fbc2daff9fa566f71766f2 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/meta/config_types.py | Ramshackle-Jamathon/dagster | 959037ab8d8fb7ed49fbc2daff9fa566f71766f2 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from dagster import check
from dagster.config.config_type import ConfigType, ConfigTypeKind
from dagster.config.field import Field
from dagster.core.serdes import whitelist_for_serdes
# This function is used by the recursive descent
# through all the inner types. This does *not*
# recursively descend through the type parameters
# of generic types. It just gets the next level of
# types. Either the direct type parameters of a
# generic type. Or the type refs of all the fields
# if it is a type with fields.
def _get_next_level_refs(ref):
# if a generic type, get type params
# if a type with fields, get refs of the fields
if ConfigTypeKind.is_closed_generic(ref.kind):
return ref.type_param_refs
elif (
ConfigTypeKind.has_fields(ref.kind) and ref.fields
): # still check fields because permissive
return [field_meta.type_ref for field_meta in ref.fields]
# A type reference in these serializable data structures are one of two things
# 1) A closed generic type (e.g. List[Int] of Optional[Set[str]])
# 2) Or a reference to a non-generic type, such as Dict, Selector, or a Scalar.
# Upon deserialization and when hydrated back to the graphql query, it will
# be the responsibility of that module to maintain a dictionary of the
# non-generic types and then do lookups into the dictionary in order to
# to explode the entire type hierarchy requested by the client
TypeRef = (ConfigTypeMeta, NonGenericTypeRefMeta)
def meta_from_field(name, field):
check.str_param(name, 'name')
check.inst_param(field, 'field', Field)
return ConfigFieldMeta(
name=name,
type_ref=type_ref_of(field.config_type),
is_required=field.is_required,
default_provided=field.default_provided,
default_value_as_str=field.default_value_as_str if field.default_provided else None,
description=field.description,
)
def type_ref_of(config_type):
check.inst_param(config_type, 'config_type', ConfigType)
if ConfigTypeKind.is_closed_generic(config_type.kind):
return meta_from_config_type(config_type)
else:
return NonGenericTypeRefMeta(key=config_type.key)
def type_refs_of(type_list):
return list(map(type_ref_of, type_list)) if type_list is not None else None
def meta_from_config_type(config_type):
check.inst_param(config_type, 'config_type', ConfigType)
return ConfigTypeMeta(
key=config_type.key,
given_name=config_type.given_name,
kind=config_type.kind,
description=config_type.description,
type_param_refs=type_refs_of(config_type.type_params),
enum_values=[
ConfigEnumValueMeta(ev.config_value, ev.description) for ev in config_type.enum_values
]
if config_type.kind == ConfigTypeKind.ENUM
else None,
fields=[meta_from_field(name, field) for name, field in config_type.fields.items()]
if ConfigTypeKind.has_fields(config_type.kind)
else None,
)
| 37.258427 | 99 | 0.692853 |
3f197e7a784ea8a0684cc88fb9aeb9e0486240f7 | 624 | py | Python | database/migrations/2017_06_14_205530_create_users_table.py | emirbek/cope | be72b71e8045d1fe16d7ac6c680fc9f274af6c50 | [
"MIT"
] | 2 | 2017-06-21T09:26:51.000Z | 2020-10-15T19:45:20.000Z | database/migrations/2017_06_14_205530_create_users_table.py | emirbek/cope | be72b71e8045d1fe16d7ac6c680fc9f274af6c50 | [
"MIT"
] | 11 | 2017-06-18T21:16:58.000Z | 2021-06-12T18:34:20.000Z | database/migrations/2017_06_14_205530_create_users_table.py | emirbek/cope | be72b71e8045d1fe16d7ac6c680fc9f274af6c50 | [
"MIT"
] | 2 | 2017-10-27T06:53:57.000Z | 2021-09-26T10:26:31.000Z | from orator.migrations import Migration
| 24 | 51 | 0.525641 |
3f1a06109933032a2467ac3c5a49cf17e45b67a0 | 387 | py | Python | make_json.py | jfalcou/infra | 97e05039a3f4f3d69b7c50233aed5e5d60a59605 | [
"BSD-2-Clause"
] | 135 | 2017-01-12T04:39:08.000Z | 2020-05-08T17:08:52.000Z | make_json.py | jfalcou/infra | 97e05039a3f4f3d69b7c50233aed5e5d60a59605 | [
"BSD-2-Clause"
] | 229 | 2017-01-23T12:45:44.000Z | 2020-05-13T17:36:57.000Z | make_json.py | jfalcou/infra | 97e05039a3f4f3d69b7c50233aed5e5d60a59605 | [
"BSD-2-Clause"
] | 106 | 2017-04-18T14:42:34.000Z | 2020-05-07T14:24:34.000Z | from configparser import ConfigParser
import os
import json
obj = {}
config = ConfigParser()
config.read(os.path.join(os.getenv("HOME"), ".aws", "credentials"))
obj["MY_ACCESS_KEY"] = config.get("default", "aws_access_key_id", fallback="")
obj["MY_SECRET_KEY"] = config.get("default", "aws_secret_access_key", fallback="")
with open("config.json", "w") as out:
json.dump(obj, out)
| 29.769231 | 82 | 0.710594 |
3f1cf61b4a31d4bea3fa0897656382d3014a7dec | 1,051 | py | Python | ztest-type1.py | tochiji/ztest-type1 | ca141d13a74708846cba414f2051200d162302a0 | [
"MIT"
] | null | null | null | ztest-type1.py | tochiji/ztest-type1 | ca141d13a74708846cba414f2051200d162302a0 | [
"MIT"
] | null | null | null | ztest-type1.py | tochiji/ztest-type1 | ca141d13a74708846cba414f2051200d162302a0 | [
"MIT"
] | null | null | null | #########################################################
# 1
#########################################################
import sys
import math
# 4
if len(sys.argv[1:]) != 4:
error_usage()
n1,p1,n2,p2 = map(float, sys.argv[1:])
p = ((n1*p1) + (n2*p2))/(n1+n2)
# n30
if (n1 < 30) or (n2 < 30):
error_usage()
# 01
if not (0 <= p1 <= 1) or not (0 <= p2 <= 1):
error_usage()
T = math.fabs(p1 - p2) / math.sqrt((p * (1-p)) * ((1/n1) + (1/n2)))
if T >= 2.58:
print("1% (:" + str(T) + "")
elif T >= 1.96:
print("5% (:" + str(T) + "")
elif T >= 1.65:
print("10% (:" + str(T) + "")
else:
print(" (:" + str(T) + "")
| 24.44186 | 67 | 0.488107 |
3f1d2166206051864985cc1f8d2162c4a056737f | 13,796 | py | Python | flask_demo/main.py | yzj2019/database_learning | a9260799f96010674bb4077180ee45a51481e832 | [
"MIT"
] | null | null | null | flask_demo/main.py | yzj2019/database_learning | a9260799f96010674bb4077180ee45a51481e832 | [
"MIT"
] | null | null | null | flask_demo/main.py | yzj2019/database_learning | a9260799f96010674bb4077180ee45a51481e832 | [
"MIT"
] | null | null | null | # coding=utf-8
import functools
from flask import Flask, session
from flask import redirect
from flask import request, make_response
from flask import render_template
from flask import url_for
from flask_bootstrap import Bootstrap
#
from db import *
# json
import json
# app
app = Flask(__name__, instance_relative_config=True)
bootstrap=Bootstrap(app)
app.secret_key = 'lab3'
# app
#
# urlhost/table
#
#
#
#
#
#
#
#
#
# html
# URLhtml page
#
if __name__ == "__main__":
app.run(host = "0.0.0.0", debug=True) | 34.318408 | 101 | 0.508771 |
3f1e42b52ec11496ab90f620e8e049e8cb9d426e | 1,462 | py | Python | tests/test_env.py | dmitrvk/mymusichere-app | 02a6d5f60a72197e08c98da59b0ef7e7168dcf4b | [
"MIT"
] | null | null | null | tests/test_env.py | dmitrvk/mymusichere-app | 02a6d5f60a72197e08c98da59b0ef7e7168dcf4b | [
"MIT"
] | 14 | 2020-06-06T19:08:03.000Z | 2020-12-03T12:07:04.000Z | tests/test_env.py | dmitrvk/mymusichere-app | 02a6d5f60a72197e08c98da59b0ef7e7168dcf4b | [
"MIT"
] | null | null | null | # Licensed under the MIT License
from mymusichere import env
| 35.658537 | 76 | 0.678523 |
3f1f9aba8ecf3aa6254017a10062ec1345e2b069 | 2,943 | py | Python | tests/factorys.py | 2h4dl/pymilvus | 6af6d4922242ae48d90ed5a1afb891d9e4d1540e | [
"Apache-2.0"
] | null | null | null | tests/factorys.py | 2h4dl/pymilvus | 6af6d4922242ae48d90ed5a1afb891d9e4d1540e | [
"Apache-2.0"
] | null | null | null | tests/factorys.py | 2h4dl/pymilvus | 6af6d4922242ae48d90ed5a1afb891d9e4d1540e | [
"Apache-2.0"
] | null | null | null | # STL imports
import random
import logging
import string
import time
import datetime
import random
import struct
import sys
from functools import wraps
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
logging.getLogger('faker').setLevel(logging.ERROR)
sys.path.append('.')
# grpc
from milvus.grpc_gen import milvus_pb2
fake = faker.Faker()
fake.add_provider(FakerProvider)
| 23.357143 | 115 | 0.675841 |
3f201da335b43cb8e7b8ff1ba5bda41dec4c38c6 | 524 | py | Python | HACKERRANK_Regrex&Parsing/Matrix_Script.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | HACKERRANK_Regrex&Parsing/Matrix_Script.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | HACKERRANK_Regrex&Parsing/Matrix_Script.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | import math
import os
import random
import re
import sys
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
if (n>0 and m>0 and n<100 and m< 100):
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
for _ in range(m):
string = ""
for cols in range (m):
for rows in range (n):
string += matrix[rows][cols]
output = re.sub(r"\b[!@#$%& ]+\b"," ", string)
print(output)
| 21.833333 | 50 | 0.59542 |
3f203a9e4f2175047e23a90b2ce6f785f3b752e7 | 4,495 | py | Python | smartsheet/models/filter.py | Funtimes-Smarts/Python-import-Smart | ffb99887d03e31d10da553c9ee8c7be1238816fc | [
"Apache-2.0"
] | null | null | null | smartsheet/models/filter.py | Funtimes-Smarts/Python-import-Smart | ffb99887d03e31d10da553c9ee8c7be1238816fc | [
"Apache-2.0"
] | null | null | null | smartsheet/models/filter.py | Funtimes-Smarts/Python-import-Smart | ffb99887d03e31d10da553c9ee8c7be1238816fc | [
"Apache-2.0"
] | null | null | null | # pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2016 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .criteria import Criteria
from ..types import TypedList
from ..util import prep
from datetime import datetime
import json
import logging
import six
| 30.787671 | 75 | 0.588654 |
3f20478583f74a50977bf5b718f080efb96af674 | 5,524 | py | Python | utils/train.py | danilonumeroso/MEG | 86f2a664e22082b0ff5d01c8e0ad9618b64e9065 | [
"Apache-2.0"
] | 6 | 2020-10-26T13:53:01.000Z | 2021-03-12T14:26:43.000Z | utils/train.py | danilonumeroso/Explainer | e133c150738f09998d0350e58dece4824ee58a76 | [
"Apache-2.0"
] | null | null | null | utils/train.py | danilonumeroso/Explainer | e133c150738f09998d0350e58dece4824ee58a76 | [
"Apache-2.0"
] | 1 | 2021-03-13T01:08:12.000Z | 2021-03-13T01:08:12.000Z | import torch
import torch.nn.functional as F
import os.path as osp
import json
from torch_geometric.utils import precision, recall
from torch_geometric.utils import f1_score, accuracy
from torch.utils.tensorboard import SummaryWriter
| 33.889571 | 101 | 0.589609 |
3f2291365a0ddde1dace00a736bbde9087e031ac | 9,716 | py | Python | python-sdk/nuimages/scripts/render_images.py | bjajoh/nuscenes-devkit | 5bc5627801c3867de395a500a1905c24171cec7d | [
"Apache-2.0"
] | 1,284 | 2018-09-12T14:08:06.000Z | 2022-03-31T08:28:20.000Z | python-sdk/nuimages/scripts/render_images.py | bjajoh/nuscenes-devkit | 5bc5627801c3867de395a500a1905c24171cec7d | [
"Apache-2.0"
] | 518 | 2018-10-20T08:34:15.000Z | 2022-03-31T08:16:08.000Z | python-sdk/nuimages/scripts/render_images.py | bjajoh/nuscenes-devkit | 5bc5627801c3867de395a500a1905c24171cec7d | [
"Apache-2.0"
] | 487 | 2018-09-13T20:03:21.000Z | 2022-03-31T04:41:17.000Z | # nuScenes dev-kit.
# Code written by Holger Caesar, 2020.
import argparse
import gc
import os
import random
from typing import List
from collections import defaultdict
import cv2
import tqdm
from nuimages.nuimages import NuImages
def render_images(nuim: NuImages,
mode: str = 'all',
cam_name: str = None,
log_name: str = None,
sample_limit: int = 50,
filter_categories: List[str] = None,
out_type: str = 'image',
out_dir: str = '~/Downloads/nuImages',
cleanup: bool = True) -> None:
"""
Render a random selection of images and save them to disk.
Note: The images rendered here are keyframes only.
:param nuim: NuImages instance.
:param mode: What to render:
"image" for the image without annotations,
"annotated" for the image with annotations,
"trajectory" for a rendering of the trajectory of the vehice,
"all" to render all of the above separately.
:param cam_name: Only render images from a particular camera, e.g. "CAM_BACK'.
:param log_name: Only render images from a particular log, e.g. "n013-2018-09-04-13-30-50+0800".
:param sample_limit: Maximum number of samples (images) to render. Note that the mini split only includes 50 images.
:param filter_categories: Specify a list of object_ann category names. Every sample that is rendered must
contain annotations of any of those categories.
:param out_type: The output type as one of the following:
'image': Renders a single image for the image keyframe of each sample.
'video': Renders a video for all images/pcls in the clip associated with each sample.
:param out_dir: Folder to render the images to.
:param cleanup: Whether to delete images after rendering the video. Not relevant for out_type == 'image'.
"""
# Check and convert inputs.
assert out_type in ['image', 'video'], ' Error: Unknown out_type %s!' % out_type
all_modes = ['image', 'annotated', 'trajectory']
assert mode in all_modes + ['all'], 'Error: Unknown mode %s!' % mode
assert not (out_type == 'video' and mode == 'trajectory'), 'Error: Cannot render "trajectory" for videos!'
if mode == 'all':
if out_type == 'image':
modes = all_modes
elif out_type == 'video':
modes = [m for m in all_modes if m not in ['annotated', 'trajectory']]
else:
raise Exception('Error" Unknown mode %s!' % mode)
else:
modes = [mode]
if filter_categories is not None:
category_names = [c['name'] for c in nuim.category]
for category_name in filter_categories:
assert category_name in category_names, 'Error: Invalid object_ann category %s!' % category_name
# Create output folder.
out_dir = os.path.expanduser(out_dir)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Filter by camera.
sample_tokens = [s['token'] for s in nuim.sample]
if cam_name is not None:
sample_tokens_cam = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
if sensor['channel'] == cam_name:
sample_tokens_cam.append(sample_token)
sample_tokens = sample_tokens_cam
# Filter by log.
if log_name is not None:
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
if log['logfile'] == log_name:
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Filter samples by category.
if filter_categories is not None:
# Get categories in each sample.
sd_to_object_cat_names = defaultdict(lambda: set())
for object_ann in nuim.object_ann:
category = nuim.get('category', object_ann['category_token'])
sd_to_object_cat_names[object_ann['sample_data_token']].add(category['name'])
# Filter samples.
sample_tokens_cleaned = []
for sample_token in sample_tokens:
sample = nuim.get('sample', sample_token)
key_camera_token = sample['key_camera_token']
category_names = sd_to_object_cat_names[key_camera_token]
if any([c in category_names for c in filter_categories]):
sample_tokens_cleaned.append(sample_token)
sample_tokens = sample_tokens_cleaned
# Get a random selection of samples.
random.shuffle(sample_tokens)
# Limit number of samples.
sample_tokens = sample_tokens[:sample_limit]
print('Rendering %s for mode %s to folder %s...' % (out_type, mode, out_dir))
for sample_token in tqdm.tqdm(sample_tokens):
sample = nuim.get('sample', sample_token)
log = nuim.get('log', sample['log_token'])
log_name = log['logfile']
key_camera_token = sample['key_camera_token']
sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token)
sample_cam_name = sensor['channel']
sd_tokens = nuim.get_sample_content(sample_token)
# We cannot render a video if there are missing camera sample_datas.
if len(sd_tokens) < 13 and out_type == 'video':
print('Warning: Skipping video for sample token %s, as not all 13 frames exist!' % sample_token)
continue
for mode in modes:
out_path_prefix = os.path.join(out_dir, '%s_%s_%s_%s' % (log_name, sample_token, sample_cam_name, mode))
if out_type == 'image':
write_image(nuim, key_camera_token, mode, '%s.jpg' % out_path_prefix)
elif out_type == 'video':
write_video(nuim, sd_tokens, mode, out_path_prefix, cleanup=cleanup)
def write_video(nuim: NuImages,
sd_tokens: List[str],
mode: str,
out_path_prefix: str,
cleanup: bool = True) -> None:
"""
Render a video by combining all the images of type mode for each sample_data.
:param nuim: NuImages instance.
:param sd_tokens: All sample_data tokens in chronological order.
:param mode: The mode - see render_images().
:param out_path_prefix: The file prefix used for the images and video.
:param cleanup: Whether to delete images after rendering the video.
"""
# Loop through each frame to create the video.
out_paths = []
for i, sd_token in enumerate(sd_tokens):
out_path = '%s_%d.jpg' % (out_path_prefix, i)
out_paths.append(out_path)
write_image(nuim, sd_token, mode, out_path)
# Create video.
first_im = cv2.imread(out_paths[0])
freq = 2 # Display frequency (Hz).
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
video_path = '%s.avi' % out_path_prefix
out = cv2.VideoWriter(video_path, fourcc, freq, first_im.shape[1::-1])
# Load each image and add to the video.
for out_path in out_paths:
im = cv2.imread(out_path)
out.write(im)
# Delete temporary image if requested.
if cleanup:
os.remove(out_path)
# Finalize video.
out.release()
def write_image(nuim: NuImages, sd_token: str, mode: str, out_path: str) -> None:
"""
Render a single image of type mode for the given sample_data.
:param nuim: NuImages instance.
:param sd_token: The sample_data token.
:param mode: The mode - see render_images().
:param out_path: The file to write the image to.
"""
if mode == 'annotated':
nuim.render_image(sd_token, annotation_type='all', out_path=out_path)
elif mode == 'image':
nuim.render_image(sd_token, annotation_type='none', out_path=out_path)
elif mode == 'trajectory':
sample_data = nuim.get('sample_data', sd_token)
nuim.render_trajectory(sample_data['sample_token'], out_path=out_path)
else:
raise Exception('Error: Unknown mode %s!' % mode)
# Trigger garbage collection to avoid memory overflow from the render functions.
gc.collect()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render a random selection of images and save them to disk.')
parser.add_argument('--seed', type=int, default=42) # Set to 0 to disable.
parser.add_argument('--version', type=str, default='v1.0-mini')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuimages')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--mode', type=str, default='all')
parser.add_argument('--cam_name', type=str, default=None)
parser.add_argument('--log_name', type=str, default=None)
parser.add_argument('--sample_limit', type=int, default=50)
parser.add_argument('--filter_categories', action='append')
parser.add_argument('--out_type', type=str, default='image')
parser.add_argument('--out_dir', type=str, default='~/Downloads/nuImages')
args = parser.parse_args()
# Set random seed for reproducible image selection.
if args.seed != 0:
random.seed(args.seed)
# Initialize NuImages class.
nuim_ = NuImages(version=args.version, dataroot=args.dataroot, verbose=bool(args.verbose), lazy=False)
# Render images.
render_images(nuim_, mode=args.mode, cam_name=args.cam_name, log_name=args.log_name, sample_limit=args.sample_limit,
filter_categories=args.filter_categories, out_type=args.out_type, out_dir=args.out_dir)
| 42.614035 | 120 | 0.656546 |
3f23c6741b5a4eb8f1708037600b9e1ee26ac16e | 10,868 | py | Python | version_info.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | version_info.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | version_info.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | import re
from configparser import ConfigParser
from constants import PROJECT_FOLDER, RELEASE_LEVEL_DICT
from release_type import ReleaseLevel, ReleaseType, value_from_key
if __name__ == '__main__': # Test Script
index = 0
version_info = VersionInfo(1, 0, 0, 0, ReleaseLevel.public, None, ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.major)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix)
print(index, version_info)
index += 1
version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor)
print(index, version_info)
index += 1
_version = version_info.convert_to_godot_format()
print(_version)
_pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?")
_match: re.Match = _pattern.match(_version.replace('"', ''))
print(index, VersionInfo(*_match.groups()))
| 39.234657 | 118 | 0.656054 |
3f2514948f103576dc7043e1528909e26cdfc7f7 | 2,302 | py | Python | test/test_create_json_items_from_embark_xml.py | ndlib/mellon-search | 30f7eb267e35d77ee6d126789866d44d825c3e0c | [
"Apache-2.0"
] | null | null | null | test/test_create_json_items_from_embark_xml.py | ndlib/mellon-search | 30f7eb267e35d77ee6d126789866d44d825c3e0c | [
"Apache-2.0"
] | null | null | null | test/test_create_json_items_from_embark_xml.py | ndlib/mellon-search | 30f7eb267e35d77ee6d126789866d44d825c3e0c | [
"Apache-2.0"
] | null | null | null | # test_create_json_items_from_embark_xml.py 2/18/19 sm
""" test create_json_items_from_embark_xml.py """
import sys
import json
import unittest
import csv
from xml.etree.ElementTree import ElementTree, tostring
# add parent directory to path
import os
import inspect
CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENTDIR = os.path.dirname(CURRENTDIR)
sys.path.insert(0, PARENTDIR)
import create_json_items_from_embark_xml
def suite():
""" define test suite """
return unittest.TestLoader().loadTestsFromTestCase(Test)
if __name__ == '__main__':
suite()
unittest.main()
| 36.539683 | 113 | 0.682884 |
3f272482b04c8aa1d417b0e37326c6eff1cef597 | 3,000 | py | Python | plot/laikago/plot_task.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 17 | 2021-05-31T10:55:48.000Z | 2022-03-30T10:09:37.000Z | plot/laikago/plot_task.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 2 | 2021-10-01T22:11:43.000Z | 2021-12-06T02:34:33.000Z | plot/laikago/plot_task.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 3 | 2021-08-24T00:53:18.000Z | 2022-03-31T17:29:07.000Z | import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import pickle
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj
tasks = [
'com_pos', 'com_vel', 'chassis_quat', 'chassis_ang_vel', 'toeFL_pos',
'toeFL_vel', 'toeFR_pos', 'toeFR_vel', 'toeRR_pos', 'toeRR_vel',
'toeRL_pos', 'toeRL_vel'
]
weights = [
'w_com', 'w_chassis_ori', 'w_toeFL', 'w_toeFR', 'w_toeRR', 'w_toeRL'
]
rf_z = ['rf_z_max_toeFL', 'rf_z_max_toeFR', 'rf_z_max_toeRR', 'rf_z_max_toeRL']
time = []
phase = []
rf_cmd = []
des, act = dict(), dict()
for topic in tasks:
des[topic] = []
act[topic] = []
w = dict()
for topic in weights:
w[topic] = []
rf_z_max = dict()
for topic in rf_z:
rf_z_max[topic] = []
with open('data/pnc.pkl', 'rb') as file:
while True:
try:
d = pickle.load(file)
time.append(d['time'])
phase.append(d['phase'])
for topic in tasks:
des[topic].append(d[topic + '_des'])
act[topic].append(d[topic])
for topic in weights:
w[topic].append(d[topic])
for topic in rf_z:
rf_z_max[topic].append(d[topic])
rf_cmd.append(d['rf_cmd'])
except EOFError:
break
for k, v in des.items():
des[k] = np.stack(v, axis=0)
for k, v in act.items():
act[k] = np.stack(v, axis=0)
rf_cmd = np.stack(rf_cmd, axis=0)
phase = np.stack(phase, axis=0)
## =============================================================================
## Plot Task
## =============================================================================
plot_task(time, des['com_pos'], act['com_pos'], des['com_vel'], act['com_vel'],
phase, 'com lin')
plot_task(time, des['chassis_quat'], act['chassis_quat'],
des['chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori')
plot_task(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'],
act['toeFL_vel'], phase, 'left foot lin')
plot_task(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'],
act['toeFR_vel'], phase, 'left foot ori')
plot_task(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'],
act['toeRR_vel'], phase, 'right foot lin')
plot_task(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'],
act['toeRL_vel'], phase, 'right foot ori')
## =============================================================================
## Plot WBC Solutions
## =============================================================================
plot_rf_quad(time, rf_cmd, phase)
## =============================================================================
## Plot Weights and Max Reaction Force Z
## =============================================================================
plot_weights(time, w, phase)
plot_rf_z_max(time, rf_z_max, phase)
plt.show()
| 29.411765 | 94 | 0.515333 |
3f272ad913a6368c2dd0e9360ea0f0c8243524c5 | 3,504 | py | Python | h/views/api/users.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/views/api/users.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | h/views/api/users.py | bibliotechie/h | 16e275f79ef7d1086971bd30ef403501c6b93beb | [
"BSD-2-Clause"
] | null | null | null | from pyramid.httpexceptions import HTTPConflict
from h.auth.util import client_authority
from h.presenters import TrustedUserJSONPresenter
from h.schemas import ValidationError
from h.schemas.api.user import CreateUserAPISchema, UpdateUserAPISchema
from h.services.user_unique import DuplicateUserError
from h.views.api.config import api_config
from h.views.api.exceptions import PayloadError
def _json_payload(request):
try:
return request.json_body
except ValueError as err:
raise PayloadError() from err
| 31.567568 | 81 | 0.710616 |
3f2894b54d3e8597c52938f696795d8309755127 | 239 | py | Python | controllers/social_auth/kivyauth/__init__.py | richierh/SalesKivyMD | f445adc701946ff38865b4a1a00a03529142613e | [
"MIT"
] | 126 | 2020-06-12T15:02:19.000Z | 2022-03-31T10:13:29.000Z | controllers/social_auth/kivyauth/__init__.py | richierh/SalesKivyMD | f445adc701946ff38865b4a1a00a03529142613e | [
"MIT"
] | 13 | 2020-07-01T01:03:26.000Z | 2022-02-21T02:21:24.000Z | controllers/social_auth/kivyauth/__init__.py | richierh/SalesKivyMD | f445adc701946ff38865b4a1a00a03529142613e | [
"MIT"
] | 22 | 2020-06-12T22:24:27.000Z | 2022-03-10T13:24:33.000Z | from kivy.logger import Logger
from kivy.utils import platform
__version__ = "2.3.2"
_log_message = "KivyAuth:" + f" {__version__}" + f' (installed at "{__file__}")'
__all__ = ("login_providers", "auto_login")
Logger.info(_log_message)
| 23.9 | 80 | 0.723849 |
3f28d1e2f76100adc00945a0759d254a0a1638b4 | 20 | py | Python | RDS/circle3_central_services/research_manager/src/api/User/__init__.py | Sciebo-RDS/Sciebo-RDS | d71cf449ed045a2a7a049e2cb77c99fd5a9195bd | [
"MIT"
] | 10 | 2020-06-24T08:22:24.000Z | 2022-01-13T16:17:36.000Z | RDS/circle3_central_services/research_manager/src/api/User/__init__.py | Sciebo-RDS/Sciebo-RDS | d71cf449ed045a2a7a049e2cb77c99fd5a9195bd | [
"MIT"
] | 78 | 2020-01-23T14:32:06.000Z | 2022-03-07T14:11:16.000Z | RDS/circle3_central_services/research_manager/src/api/User/__init__.py | Sciebo-RDS/Sciebo-RDS | d71cf449ed045a2a7a049e2cb77c99fd5a9195bd | [
"MIT"
] | 1 | 2020-06-24T08:33:48.000Z | 2020-06-24T08:33:48.000Z | from .user import * | 20 | 20 | 0.7 |
3f2a269b65ae0fe9d318d9013769d0a87c6d1a66 | 2,475 | py | Python | cesium_app/app_server.py | yaowenxi/cesium | b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee | [
"BSD-3-Clause"
] | 41 | 2016-10-10T23:14:54.000Z | 2021-07-08T19:44:14.000Z | cesium_app/app_server.py | cesium-ml/cesium_web | 6dd9977ff037982d50f740bfb62012b508eebd29 | [
"BSD-3-Clause"
] | 200 | 2016-06-22T19:55:38.000Z | 2022-03-22T18:42:19.000Z | cesium_app/app_server.py | yaowenxi/cesium | b87c8bcafc8a7707877f8b9e9b111a2a99b5aeee | [
"BSD-3-Clause"
] | 26 | 2016-04-21T00:50:03.000Z | 2019-11-04T20:19:53.000Z | import tornado.web
import os
import sys
import pathlib
from baselayer.app.config import Config
from . import models
from baselayer.app import model_util
# This provides `login`, `complete`, and `disconnect` endpoints
from social_tornado.routes import SOCIAL_AUTH_ROUTES
from .handlers import (
ProjectHandler,
DatasetHandler,
FeatureHandler,
PrecomputedFeaturesHandler,
ModelHandler,
PredictionHandler,
FeatureListHandler,
SklearnModelsHandler,
PlotFeaturesHandler,
PredictRawDataHandler
)
def make_app(cfg, baselayer_handlers, baselayer_settings):
"""Create and return a `tornado.web.Application` object with specified
handlers and settings.
Parameters
----------
cfg : Config
Loaded configuration. Can be specified with '--config'
(multiple uses allowed).
baselayer_handlers : list
Tornado handlers needed for baselayer to function.
baselayer_settings : cfg
Settings needed for baselayer to function.
"""
if baselayer_settings['cookie_secret'] == 'abc01234':
print('!' * 80)
print(' Your server is insecure. Please update the secret string ')
print(' in the configuration file!')
print('!' * 80)
for path_name, path in cfg['paths'].items():
if not os.path.exists(path):
print("Creating %s" % path)
try:
os.makedirs(path)
except Exception as e:
print(e)
handlers = baselayer_handlers + [
(r'/project(/.*)?', ProjectHandler),
(r'/dataset(/.*)?', DatasetHandler),
(r'/features(/[0-9]+)?', FeatureHandler),
(r'/features/([0-9]+)/(download)', FeatureHandler),
(r'/precomputed_features(/.*)?', PrecomputedFeaturesHandler),
(r'/models(/[0-9]+)?', ModelHandler),
(r'/models/([0-9]+)/(download)', ModelHandler),
(r'/predictions(/[0-9]+)?', PredictionHandler),
(r'/predictions/([0-9]+)/(download)', PredictionHandler),
(r'/predict_raw_data', PredictRawDataHandler),
(r'/features_list', FeatureListHandler),
(r'/sklearn_models', SklearnModelsHandler),
(r'/plot_features/(.*)', PlotFeaturesHandler)
]
settings = baselayer_settings
# settings.update({}) # Specify additional settings here
app = tornado.web.Application(handlers, **settings)
models.init_db(**cfg['database'])
model_util.create_tables()
return app
| 30.555556 | 76 | 0.641212 |
3f2ad7646cc054828d1f34022e2ec7ed31b8f6a0 | 903 | py | Python | tests/test_building.py | sietekk/elevator | 5058d36df323cc31a078c7016c57cc7f4488fcdc | [
"MIT"
] | null | null | null | tests/test_building.py | sietekk/elevator | 5058d36df323cc31a078c7016c57cc7f4488fcdc | [
"MIT"
] | null | null | null | tests/test_building.py | sietekk/elevator | 5058d36df323cc31a078c7016c57cc7f4488fcdc | [
"MIT"
] | null | null | null | #
# Copyright (c) 2016 Michael Conroy
#
from elevator.building import (
Building,
Floor,
DEFAULT_FLOOR_QTY,
DEFAULT_ELEVATOR_QTY,
)
from elevator.elevator import Elevator
| 24.405405 | 66 | 0.660022 |
3f2bc27a1667776823d6302c9923d489d7a4ce6b | 707 | py | Python | Modulo_3/semana 2/miercoles/main.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_3/semana 2/miercoles/main.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_3/semana 2/miercoles/main.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | import tkinter as tk
from presentacion.formulario import FormularioPersona
try:
ventana=tk.Tk()
centrar_ventana(ventana)
ventana.title("Formulario")
form = FormularioPersona(ventana)
ventana.mainloop()
except Exception as e:
print("Existe un error : ", e)
| 30.73913 | 77 | 0.663366 |
3f2bff3972bb90e7ea59576e6eccf7d56961ec7e | 196 | py | Python | Voting/urls.py | Poornartha/Odonata | 71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb | [
"MIT"
] | null | null | null | Voting/urls.py | Poornartha/Odonata | 71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb | [
"MIT"
] | null | null | null | Voting/urls.py | Poornartha/Odonata | 71e8dfc4e8d93c6ecc1a3a155459b7e43bd28cdb | [
"MIT"
] | null | null | null | from django.urls import path
from .views import teams_all, team_vote
urlpatterns = [
path('teams/all', teams_all, name="teams_all"),
path('teams/<int:pk>', team_vote, name="team_vote"),
] | 28 | 56 | 0.704082 |
3f2fbc4cbbd9085f9e4653f26ebfd93f8e5ea745 | 5,594 | py | Python | models/3-Whats goin on/train_code/resnext50/train.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | models/3-Whats goin on/train_code/resnext50/train.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | models/3-Whats goin on/train_code/resnext50/train.py | cns-iu/HuBMAP---Hacking-the-Kidney | 1a41c887f8edb0b52f5afade384a17dc3d3efec4 | [
"MIT"
] | null | null | null | from Dataset import *
from Network import *
from Functions import *
import os
from fastai.distributed import *
import argparse
import torch
try:
#from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from tqdm import tqdm
opts=get_args()
#set up gpu
os.environ["CUDA_VISIBLE_DEVICES"] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.system('mkdir models')
os.system('mkdir logs')
#dice = Dice_th_pred(np.arange(0.2,0.7,0.01))
#datasets and dataloaders
dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=True, tfms=get_aug())
val_dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=False)
dataloader = DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.workers, drop_last=True)
val_dataloader = DataLoader(val_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.workers, drop_last=True)
#model and optimizer
model = UneXt50().cuda()
#optimizer = Ranger(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3,
# max_lr=1e-3, epochs=opts.epochs, steps_per_epoch=len(dataloader))
criterion=nn.BCEWithLogitsLoss()
opt_level = 'O1'
model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
model = nn.DataParallel(model)
####### Transfer learning #######
if opts.transfer == 1:
best_model_path = "models_scratch/fold4.pth"
state_dict = torch.load(best_model_path)
model.load_state_dict(state_dict)
#some more things
logger=CSVLogger(['epoch','train_loss','val_loss','dice_coef'],f"logs/log_fold{opts.fold}.csv")
metric=Dice_soft()
best_metric=0
#training
scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e2, max_lr=1e-4, epochs=opts.epochs, steps_per_epoch=len(dataloader))
for epoch in range(opts.epochs):
train_loss=0
model.train(True)
for data in tqdm(dataloader):
img=data['img'].to(device)
mask=data['mask'].to(device)
img=cutout(img)
output=model(img)
loss=criterion(output,mask)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
#if step%opts.gradient_accumulation_steps==0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
train_loss+=loss.item()
#break
train_loss/=len(dataloader)
print(f"### validating for epoch {epoch} ###")
val_loss=0
model.eval()
metric.reset()
with torch.no_grad():
for data in tqdm(val_dataloader):
if img.shape[0]%2!=0:
img=img[:-1]
mask=mask[:-1]
img=data['img'].to(device)
mask=data['mask'].to(device)
shape=img.shape
output=model(img)[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2]
output[output != output] = 0
mask=mask[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2]
metric.accumulate(output.detach(), mask)
loss=criterion(output,mask)
val_loss+=loss.item()
val_loss/=len(val_dataloader)
metric_this_epoch=metric.value
# metric_this_epoch=val_loss
logger.log([epoch+1,train_loss,val_loss,metric_this_epoch])
if metric_this_epoch>best_metric:
torch.save(model.state_dict(),f'models/fold{opts.fold}.pth')
best_metric=metric_this_epoch | 44.752 | 163 | 0.704147 |
3f302d5ab9b39b181ba1fa9fc5436b10d4114686 | 6,220 | py | Python | models/node/node.py | InfoCoV/Multi-Cro-CoV-cseBERT | 5edc8e6d9c7de285c8fbb537b72f8f8b081d531d | [
"MIT"
] | null | null | null | models/node/node.py | InfoCoV/Multi-Cro-CoV-cseBERT | 5edc8e6d9c7de285c8fbb537b72f8f8b081d531d | [
"MIT"
] | null | null | null | models/node/node.py | InfoCoV/Multi-Cro-CoV-cseBERT | 5edc8e6d9c7de285c8fbb537b72f8f8b081d531d | [
"MIT"
] | 1 | 2022-02-17T14:32:13.000Z | 2022-02-17T14:32:13.000Z | """
NODE model definition and experiment setup.
Neural Oblivious Decision Ensembles for Deep Learning on Tabular Data
https://arxiv.org/abs/1909.06312
Model details:
https://pytorch-tabular.readthedocs.io/en/latest/models/#nodemodel
"""
import logging
import os.path
import shutil
from sklearn.metrics import classification_report
from omegaconf import OmegaConf
import optuna
from optuna.samplers import TPESampler
from pytorch_tabular import TabularModel
from pytorch_tabular.models import NodeConfig
from pytorch_tabular.config import (
DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig)
from pytorch_tabular.utils import get_class_weighted_cross_entropy
from optuna_utils import OptunaExperiments, run_experiments
LOGGER = logging.getLogger(__name__)
LABEL_COL = "retweet_label"
# updated by train.py before running
config = OmegaConf.create(
{"max_epochs": 50,
"lr_exp_min": -4,
"lr_exp_max": -3,
"alpha_exp_min": -4,
"alpha_exp_max": -3,
"batch_exp_min": 7,
"batch_exp_max": 8,
"num_trees_min": 512,
"num_trees_max": 2560,
"num_trees_step": 512,
"depth_min": 4,
"depth_max": 6,
"categorical_cols": [
"entities.urls", "entities.media", "user_in_net",
"has_covid_keyword", "user.followers_isna",
"users_mention_isna", "following_users_isna",
"users_reply_isna"],
"exp_log_freq": 100,
"seed": 1,
"num_workers": 24,
"embed_categorical": True}
)
| 31.414141 | 78 | 0.646302 |
3f30899107200b08356b7f18f040b82026d98005 | 8,570 | py | Python | dtf/packages/models.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
] | null | null | null | dtf/packages/models.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
] | null | null | null | dtf/packages/models.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.core.urlresolvers import reverse
from djnfusion import server, key
from django.conf import settings
from jsonfield import JSONField
# TODO: change to this. Currently doesnt work. may have something to do with
# the server not in __init__
# from packages.providers.infusionsoft import server, key
from .managers import InfusionsoftTagManager, PackagePurchaseManager
from packages.managers import PackageManager
| 38.430493 | 96 | 0.626604 |
3f3094c51e91fa31dccfedc07336034240d0cf3e | 1,438 | py | Python | chap 2/2_1.py | hmhuy2000/Reinforcement-Learning-SuttonBartoI | 97ca9dc11c4cb4fda74b144e658c3eac756131ff | [
"MIT"
] | null | null | null | chap 2/2_1.py | hmhuy2000/Reinforcement-Learning-SuttonBartoI | 97ca9dc11c4cb4fda74b144e658c3eac756131ff | [
"MIT"
] | null | null | null | chap 2/2_1.py | hmhuy2000/Reinforcement-Learning-SuttonBartoI | 97ca9dc11c4cb4fda74b144e658c3eac756131ff | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
def get_result(e):
bandits = [bandit(np.random.randn(),CFG.variance) for i in range(CFG.n)]
res = []
global cnt
for _ in range(CFG.t):
if (np.random.random()<e):
choose = np.random.choice(CFG.n)
else:
choose = np.argmax([ban.mean for ban in bandits])
val = bandits[choose].get_reward()
res.append(val)
bandits[choose].update(val)
# print(res)
return res
plt.figure(figsize=(20, 10))
for e in CFG.esp:
res = np.zeros(CFG.t)
for tr in trange(CFG.n_try):
res += get_result(e)
print(res.shape)
res /= CFG.n_try
# print(res)
plt.plot(res, label = e)
print(f'done {e}')
plt.xlabel('step')
plt.ylabel('average reward')
plt.legend()
plt.savefig('figure_2_1.png')
plt.show()
| 21.147059 | 76 | 0.553547 |
3f312f416c35a4ef754ae001c14b305991e498d6 | 2,080 | py | Python | jetavator_databricks_client/setup.py | jetavator/jetavator_databricks | 719c934b6391f6f41ca34b4d4df8c697c1a25283 | [
"Apache-2.0"
] | null | null | null | jetavator_databricks_client/setup.py | jetavator/jetavator_databricks | 719c934b6391f6f41ca34b4d4df8c697c1a25283 | [
"Apache-2.0"
] | null | null | null | jetavator_databricks_client/setup.py | jetavator/jetavator_databricks | 719c934b6391f6f41ca34b4d4df8c697c1a25283 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import io
import os
from setuptools import setup, find_packages
# Package metadata
# ----------------
SHORT_NAME = 'databricks_client'
NAME = 'jetavator_databricks_client'
DESCRIPTION = (
'Databricks support for the Jetavator engine '
'to be installed on the client system'
)
URL = 'https://github.com/jetavator/jetavator'
EMAIL = 'joetaylorconsulting@gmail.com'
AUTHOR = 'Joe Taylor'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'jetavator>=0.1.5',
'lazy-property>=0.0.1,<1',
'databricks-cli>=0.14.1,<0.15',
'nbformat>=5.0.8>,<6',
'azure-storage-queue>=12.1.5,<13',
'azure-storage-blob>=12.7.1,<13'
]
# What packages are optional?
EXTRAS = {
# 'some-feature': ['requirement'],
}
# Package setup
# -------------
# Import the README and use it as the long description
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Import the LICENSE
with open(os.path.join(here, 'LICENSE')) as f:
license_text = f.read()
# Load the package's __version__.py module as a dictionary
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license=license_text,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7'
],
entry_points={'jetavator.plugins': f'{SHORT_NAME} = {NAME}'}
)
| 24.186047 | 73 | 0.662019 |
3f31a6445fe5a4545fbddede8dd570fd945d12b3 | 596 | py | Python | web-interface/app/application/misc/pages/misc_options/purposes_sampling.py | horvathi94/seqmeta | 94f2f04c372181c93a6f68b6efe15b141ef02779 | [
"MIT"
] | null | null | null | web-interface/app/application/misc/pages/misc_options/purposes_sampling.py | horvathi94/seqmeta | 94f2f04c372181c93a6f68b6efe15b141ef02779 | [
"MIT"
] | null | null | null | web-interface/app/application/misc/pages/misc_options/purposes_sampling.py | horvathi94/seqmeta | 94f2f04c372181c93a6f68b6efe15b141ef02779 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from .base import _MiscOptionBase
from application.src.misc.sampling import PurposesOfSampling
| 24.833333 | 60 | 0.718121 |
3f327b3abcfcf0bb203bb9dab1e2f88e290b8007 | 1,816 | py | Python | src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py | LeiShi/Synthetic-Diagnostics-Platform | 870120d3fd14b2a3c89c6e6e85625d1e9109a2de | [
"BSD-3-Clause"
] | 5 | 2019-08-16T22:08:19.000Z | 2021-02-24T02:47:05.000Z | src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py | justthepython/Synthetic-Diagnostics-Platform | 5f1cb5c29d182490acbd4f3c167f0e09ec211236 | [
"BSD-3-Clause"
] | 1 | 2016-05-11T12:58:00.000Z | 2016-05-11T17:18:36.000Z | src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py | justthepython/Synthetic-Diagnostics-Platform | 5f1cb5c29d182490acbd4f3c167f0e09ec211236 | [
"BSD-3-Clause"
] | 5 | 2018-04-29T12:35:59.000Z | 2020-01-10T03:38:30.000Z | import sdp.scripts.load_nstx_exp_ref as nstx_exp
#import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp
import pickle
import numpy as np
with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/ref_pos.pck','r') as f:
ref_pos = pickle.load(f)
channel = 9
nt = 50
llim = 1e-7
ulim = 1e-4
time_array = np.linspace(llim,ulim,nt)
cs_mean = np.zeros((nt))
cs_median = np.zeros((nt))
cs_std = np.zeros((nt))
| 26.705882 | 102 | 0.669053 |
3f33208b87772b3914f2f7e4b5518f6f944741b9 | 1,236 | py | Python | tests/integration/suites/default/reboot.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | tests/integration/suites/default/reboot.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | tests/integration/suites/default/reboot.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from framework.lwm2m_test import *
| 34.333333 | 75 | 0.719256 |
3f33ca3b9b3ca0aa03c1d9c5a2a2a55f778438b0 | 22,381 | py | Python | astrodet/scarlet.py | lyf1436/astrodet | 72d69fe38c9d744620866682e36a03c668c284f2 | [
"MIT"
] | null | null | null | astrodet/scarlet.py | lyf1436/astrodet | 72d69fe38c9d744620866682e36a03c668c284f2 | [
"MIT"
] | 2 | 2021-04-15T03:03:24.000Z | 2021-04-26T19:41:57.000Z | astrodet/scarlet.py | lyf1436/astrodet | 72d69fe38c9d744620866682e36a03c668c284f2 | [
"MIT"
] | 3 | 2021-03-18T14:08:12.000Z | 2021-10-08T04:26:14.000Z | import sys, os
import numpy as np
import scarlet
import sep
from astropy.io import ascii
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from astropy.wcs import WCS
def write_scarlet_results(datas, observation, starlet_sources, model_frame, catalog_deblended,
segmentation_masks, dirpath, filters, s):
"""
Saves images in each channel, with headers for each source in image,
such that the number of headers = number of sources detected in image.
Parameters
----------
datas: array
array of Data objects
observation: scarlet function
Scarlet observation objects
starlet_sources: list
List of ScarletSource objects
model_frame: scarlet function
Image frame of source model
catalog_deblended: list
Deblended source detection catalog
segmentation_masks: list
List of segmentation mask of each object in image
dirpath : str
Path to HSC image file directory
filters : list
A list of filters for your images. Default is ['g', 'r', 'i'].
s : str
File basename string
Returns
-------
filename : dict
dictionary of all paths to the saved scarlet files for the particular dataset.
Saved image and model files for each filter, and one total segmentation mask file for all filters.
"""
def _make_hdr(starlet_source, cat):
"""
Helper function to make FITS header and insert metadata.
Parameters
----------
starlet_source: starlet_source
starlet_source object for source k
cat: dict
catalog data for source k
Returns
-------
model_hdr : Astropy fits.Header
FITS header for source k with catalog metadata
"""
# For each header, assign descriptive data about each source
# (x0, y0, w, h) in absolute floating pixel coordinates
bbox_h = starlet_source.bbox.shape[1]
bbox_w = starlet_source.bbox.shape[2]
bbox_y = starlet_source.bbox.origin[1] + int(np.floor(bbox_w/2)) # y-coord of the source's center
bbox_x = starlet_source.bbox.origin[2] + int(np.floor(bbox_w/2)) # x-coord of the source's center
# Ellipse parameters (a, b, theta) from deblend catalog
e_a, e_b, e_theta = cat['a'], cat['b'], cat['theta']
ell_parm = np.concatenate((cat['a'], cat['b'], cat['theta']))
# Add info to header
model_hdr = fits.Header()
model_hdr['bbox'] = ','.join(map(str, [bbox_x, bbox_y, bbox_w, bbox_h]))
model_hdr['area'] = bbox_w * bbox_h
model_hdr['ell_parm'] = ','.join(map(str, list(ell_parm)))
model_hdr['cat_id'] = 1 # Category ID #TODO: set categor_id based on if the source is extended or not
return model_hdr
# Create dict for all saved filenames
segmask_hdul = []
model_hdul = []
filenames = {}
# Filter loop
for i, f in enumerate(filters): # datas is HSC data array with dimensions [filters, N, N]
f = f.upper()
# Primary HDU is full image
img_hdu = fits.PrimaryHDU(data=datas[i])
# Create header entry for each scarlet source
for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)):
# Get each model, make into image
model = starlet_sources[k].get_model(frame=model_frame)
model = observation.render(model)
model = src.bbox.extract_from(model)
model_hdr = _make_hdr(starlet_sources[k], cat)
model_hdu = fits.ImageHDU(data=model[i], header=model_hdr)
model_primary = fits.PrimaryHDU()
model_hdul.append(model_hdu)
# Write final fits file to specified location
# Save full image and then headers per source w/ descriptive info
save_img_hdul = fits.HDUList([img_hdu])
save_model_hdul = fits.HDUList([model_primary, *model_hdul])
# Save list of filenames in dict for each band
filenames[f'img_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_img.fits')
save_img_hdul.writeto(filenames[f'img_{f}'], overwrite=True)
filenames[f'model_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_model.fits')
save_model_hdul.writeto(filenames[f'model_{f}'], overwrite=True)
# If we have segmentation mask data, save them as a separate fits file
if segmentation_masks is not None:
# Create header entry for each scarlet source
for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)):
segmask_hdr = _make_hdr(starlet_sources[k], cat)
# Save each model source k in the image
segmask_hdu = fits.ImageHDU(data=segmentation_masks[k], header=segmask_hdr)
segmask_primary = fits.PrimaryHDU()
segmask_hdul.append(segmask_hdu)
save_segmask_hdul = fits.HDUList([segmask_primary, *segmask_hdul])
# Save list of filenames in dict for each band
filenames['segmask'] = os.path.join(dirpath, f'{f}-{s}_scarlet_segmask.fits')
save_segmask_hdul.writeto(filenames['segmask'], overwrite=True)
return filenames
def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]):
"""
Plots different normalizations of your image using the stretch, Q parameters.
Parameters
----------
stretches : array
List of stretch params you want to permutate through to find optimal image normalization.
Default is [0.01, 0.1, 0.5, 1]
Qs : array
List of Q params you want to permutate through to find optimal image normalization.
Default is [1, 10, 5, 100]
Code adapted from:
https://pmelchior.github.io/scarlet/tutorials/display.html
Returns
-------
fig : Figure object
"""
fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9))
for i, stretch in enumerate(stretches):
for j, Q in enumerate(Qs):
asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q)
# Scale the RGB channels for the image
img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh)
ax[i][j].imshow(img_rgb)
ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q))
ax[i][j].axis('off')
return fig
def make_catalog(datas, lvl=4, wave=True, segmentation_map=False, maskthresh=10.0, object_limit=100000):
"""
Creates a detection catalog by combining low and high resolution data
Parameters
----------
datas: array
array of Data objects
lvl: int
detection lvl
wave: Bool
set to True to use wavelet decomposition of images before combination
subtract_background : Bool
if you want to subtract the background and retrieve an estimate, change to True. But default
is False because HSC images are already background subtracted.
segmentation_map : Bool
Whether to run sep segmentation map
maskthresh : float
Mask threshold for sep segmentation
object_limit : int
Limit on number of objects to detect in image
Code adapted from https://pmelchior.github.io/scarlet/tutorials/wavelet_model.html
Returns
-------
catalog: sextractor catalog
catalog of detected sources (use 'catalog.dtype.names' for info)
bg_rms: array
background level for each data set (set to None if subtract_background is False)
"""
if type(datas) is np.ndarray:
hr_images = datas / np.sum(datas, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(hr_images, axis=0)
else:
data_lr, data_hr = datas
# Create observations for each image
# Interpolate low resolution to high resolution
interp = interpolate(data_lr, data_hr)
# Normalization of the interpolate low res images
interp = interp / np.sum(interp, axis=(1, 2))[:, None, None]
# Normalisation of the high res data
hr_images = data_hr.images / np.sum(data_hr.images, axis=(1, 2))[:, None, None]
# Detection image as the sum over all images
detect_image = np.sum(interp, axis=0) + np.sum(hr_images, axis=0)
detect_image *= np.sum(data_hr.images)
if np.size(detect_image.shape) == 4:
if wave:
# Wavelet detection in the first three levels
wave_detect = scarlet.Starlet(detect_image.mean(axis=0), lvl=5).coefficients
wave_detect[:, -1, :, :] = 0
detect = scarlet.Starlet(coefficients=wave_detect).image
else:
# Direct detection
detect = detect_image.mean(axis=0)
else:
if wave:
wave_detect = scarlet.Starlet(detect_image).coefficients
detect = wave_detect[0][0] + wave_detect[0][1] + wave_detect[0][2]
else:
detect = detect_image
bkg = sep.Background(detect)
# Set the limit on the number of sub-objects when deblending.
sep.set_sub_object_limit(object_limit)
# Extract detection catalog with segmentation maps!
# Can use this to retrieve ellipse params
catalog = sep.extract(detect, lvl, err=bkg.globalrms, segmentation_map=segmentation_map, maskthresh=maskthresh)
# Estimate background
if type(datas) is np.ndarray:
bkg_rms = scarlet.wavelet.mad_wavelet(datas)
else:
bkg_rms = []
for data in datas:
bkg_rms.append(scarlet.wavelet.mad_wavelet(data.images))
return catalog, bkg_rms
def fit_scarlet_blend(starlet_sources, observation, max_iters=15, e_rel=1e-4, plot_likelihood=True):
"""
Creates a detection catalog by combining low and high resolution data
Parameters
----------
datas: array
array of Data objects
Will end early if likelihood and constraints converge
Returns
-------
"""
# Create and fit Blend model. Go for 200 iterations,
# but will end early if likelihood and constraints converge
print(f"Fitting Blend model.")
try:
starlet_blend = scarlet.Blend(starlet_sources, observation)
it, logL = starlet_blend.fit(max_iters, e_rel=e_rel)
print(f"Scarlet ran for {it} iterations to logL = {logL}")
# Catch any exceptions like no detections
except AssertionError as e1:
print(f"Length of detection catalog is {len(catalog)}.")
if plot_likelihood == True:
scarlet.display.show_likelihood(starlet_blend)
plt.show()
return starlet_blend, logL
def _plot_wavelet(datas):
"""
Helper function to plot wavelet transformation diagnostic figures with scarlet
Parameters
----------
datas: array
array of Data objects
Returns
-------
"""
# Declare a starlet object (and performs the transform)
Sw = scarlet.Starlet(datas, lvl=5, direct=True)
# This is the starlet transform as an array
w = Sw.coefficients
# The inverse starlet transform of w (new object otherwise, the tranform is not used)
iw = Sw.image
# TODO: Clean this code up using plt.subplots()
# The wavelet transform of the first slice of images in pictures
lvl = w.shape[1]
plt.figure(figsize=(lvl*5+5,5))
plt.suptitle('Wavelet coefficients')
for i in range(lvl):
plt.subplot(1, lvl, i+1)
plt.title('scale' + str(i+1))
plt.imshow(w[0,i], cmap='inferno')
plt.colorbar()
plt.show()
# Making sure we recover the original image
plt.figure(figsize=(30,10))
plt.subplot(131)
plt.title('Original image', fontsize=20)
plt.imshow(datas[0], cmap='inferno')
plt.colorbar()
plt.subplot(132)
plt.title('Starlet-reconstructed image', fontsize=20)
plt.imshow(iw[0], cmap='inferno')
plt.colorbar()
plt.subplot(133)
plt.title('Absolute difference', fontsize=20)
plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')
plt.colorbar()
plt.show()
return
def _plot_scene(starlet_sources, observation, norm, catalog, show_model=True, show_rendered=True,
show_observed=True, show_residual=True, add_labels=True, add_boxes=True,
add_ellipses=True):
"""
Helper function to plot scene with scarlet
Parameters
----------
starlet_sources: List
List of ScarletSource objects
observation:
Scarlet observation objects
norm:
Scarlet normalization for plotting
catalog: list
Source detection catalog
show_model: bool
Whether to show model
show_rendered: bool
Whether to show rendered model
show_observed: bool
Whether to show observed
show_residual: bool
Whether to show residual
add_labels: bool
Whether to add labels
add_boxes: bool
Whether to add bounding boxes to each panel
add_ellipses: bool
Whether to add ellipses to each panel
Returns
-------
fig : matplotlib Figure
Figure object
"""
fig = scarlet.display.show_scene(starlet_sources, observation=observation, norm=norm,
show_model=show_model, show_rendered=show_rendered,
show_observed=show_observed, show_residual=show_residual,
add_labels=add_labels, add_boxes=add_boxes)
for ax in fig.axes:
# Plot sep ellipse around all sources from the detection catalog
if add_ellipses == True:
for k, src in enumerate(catalog):
# See https://sextractor.readthedocs.io/en/latest/Position.html
e = Ellipse(xy=(src['x'], src['y']),
width=6*src['a'],
height=6*src['b'],
angle=np.rad2deg(src['theta']))
e.set_facecolor('none')
e.set_edgecolor('white')
ax.add_artist(e)
ax.axis('off')
fig.subplots_adjust(wspace=0.01)
plt.show()
return fig
def run_scarlet(datas, filters, stretch=0.1, Q=5, sigma_model=1, sigma_obs=5,
subtract_background=False, max_chi2=5000, max_iters=15, morph_thresh=0.1,
starlet_thresh=0.1, lvl=5, lvl_segmask=2, maskthresh=0.025,
segmentation_map=True, plot_wavelet=False, plot_likelihood=True,
plot_scene=False, plot_sources=False, add_ellipses=True,
add_labels=False, add_boxes=False):
""" Run P. Melchior's scarlet (https://github.com/pmelchior/scarlet) implementation
for source separation. This function will create diagnostic plots, a source detection catalog,
and fit a model for all sources in the observation scene (image).
Parameters
----------
subtract_background : boolean
Whether or not to estimate and subtract the background (often background is already subtracted)
Detault is False
plot_wavelet_transform : boolean
Plot starlet wavelet transform and inverse transform at different scales.
NOTE: Not really useful at large image sizes (> ~few hundred pixels length/height)
Default is False
plot_detections : boolean
Plot detection catalog results. Default is False
plot_likelihood : boolean
Plot likelihood as function of iterations from Blend fit function. Default is True
plot_full_scene : boolean
Plot full scene with the model, rendered model, observation, and residual. Default is False.
plot_all_sources : boolean
Plot the model, rendered model, observation, and spectrum across channels for each object.
WARNING: dumb to do this with a large image with many sources! Default is False
plot_first_isolated_comp : boolean
Plot the subtracted and isolated first (or any) starlet component. Recommended for finding a bright
component. Default is False.
Return
-------
FITS file with...
TODO: fill this out once I get the exact fits file output generated to Colin's liking
"""
norm = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q)
# Generate source catalog using wavelets
catalog, bg_rms_hsc = make_catalog(datas, lvl, wave=True)
# If image is already background subtracted, weights are set to 1
if subtract_background:
weights = np.ones_like(datas) / (bg_rms_hsc**2)[:, None, None]
else:
weights = np.ones_like(datas)
print("Source catalog found ", len(catalog), "objects")
# Plot wavelet transform at different scales
if plot_wavelet == True:
_plot_wavelet(datas)
# Define model frame and observations:
model_psf = scarlet.GaussianPSF(sigma=sigma_model) #, boxsize=100)
model_frame = scarlet.Frame(datas.shape, psf=model_psf, channels=filters)
observation_psf = scarlet.GaussianPSF(sigma=sigma_obs)
observation = scarlet.Observation(datas, psf=observation_psf, weights=weights, channels=filters).match(model_frame)
# Initialize starlet sources to be fit. Assume extended sources for all because
# we are not looking at all detections in each image
# TODO: Plot chi2 vs. binned size and mag. Implement conidition if chi2 > xxx then
# add another component until larger sources are modeled well
print("Initializing starlet sources to be fit.")
# Compute radii and spread of sources
Rs = np.sqrt(catalog['a']**2 + catalog['b']**2)
spread = Rs/sigma_obs
# Array of chi^2 residuals computed after fit on each model
chi2s = np.zeros(len(catalog))
# Loop through detections in catalog
starlet_sources = []
for k, src in enumerate(catalog):
# Is the source compact relative to the PSF?
if spread[k] < 1:
compact = True
else:
compact = False
# Try modeling each source as a single ExtendedSource first
new_source = scarlet.ExtendedSource(model_frame, (src['y'], src['x']), observation,
K=1, thresh=morph_thresh, compact=compact)
starlet_sources.append(new_source)
# Fit scarlet blend
starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood)
print("Computing residuals.")
# Compute reduced chi^2 for each rendered sources
for k, src in enumerate(starlet_sources):
model = src.get_model(frame=model_frame)
model = observation.render(model)
res = datas - model
# Compute in bbox only
res = src.bbox.extract_from(res)
chi2s[k] = np.sum(res**2)
# Replace models with poor fits with StarletSource models
if chi2s[k] > max_chi2:
starlet_sources[k] = scarlet.StarletSource(model_frame,
(catalog["y"][k], catalog["x"][k]), observation,
thresh=morph_thresh, starlet_thresh=starlet_thresh,
full=False)
# If any chi2 residuals are flagged, re-fit the blend with a more complex model
if np.any(chi2s > max_chi2):
print("Re-fitting with Starlet models for poorly-fit sources.")
starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood)
# Extract the deblended catalog and update the chi2 residuals
print('Extracting deblended catalog.')
catalog_deblended = []
segmentation_masks = []
for k, src in enumerate(starlet_sources):
model = src.get_model(frame=model_frame)
model = observation.render(model)
# Compute in bbox only
model = src.bbox.extract_from(model)
# Run sep
try:
cat, _ = make_catalog(model, lvl_segmask, wave=False, segmentation_map=False, maskthresh=maskthresh)
except:
print(f'Exception with source {k}')
cat = []
#if segmentation_map == True:
# cat, mask = cat
# If more than 1 source is detected for some reason (e.g. artifacts)
if len(cat) > 1:
# keep the brightest
idx = np.argmax([c['cflux'] for c in cat])
cat = cat[idx]
# if segmentation_map == True:
# mask = mask[idx]
# If failed to detect model source
if len(cat) == 0:
# Fill with nan
cat = [np.full(catalog[0].shape, np.nan, dtype=catalog.dtype)]
# Append to full catalog
if segmentation_map == True:
# For some reason sep doesn't like these images, so do the segmask ourselves for now
model_det = np.array(model[0,:,:])
mask = np.zeros_like(model_det)
mask[model_det>maskthresh] = 1
segmentation_masks.append(mask)
#plt.imshow(mask)
#plt.show()
catalog_deblended.append(cat)
# Combine catalog named array
catalog_deblended = np.vstack(catalog_deblended)
# Plot scene: rendered model, observations, and residuals
if plot_scene == True:
_plot_scene(starlet_sources, observation, norm, catalog, show_model=False, show_rendered=True,
show_observed=True, show_residual=True, add_labels=add_labels, add_boxes=add_boxes, add_ellipses=add_ellipses)
# Plot each for each source
if plot_sources == True:
scarlet.display.show_sources(starlet_sources, observation, norm=norm,
show_rendered=True, show_observed=True,
add_boxes=add_boxes)
plt.show()
return observation, starlet_sources, model_frame, catalog, catalog_deblended, segmentation_masks | 37.116086 | 131 | 0.629105 |
3f348185cd12109292cb8c384d3bec9afb87b02b | 193 | py | Python | serve.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | serve.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | serve.py | haiyoumeiyou/cherrybrigde | f00a0592240b60cc42b895ad194b0273485956d0 | [
"BSD-3-Clause"
] | null | null | null | from application import bootstrap
bootstrap()
if __name__=='__main__':
import cherrypy
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
| 19.3 | 39 | 0.720207 |
3f3537a4e2a9c606bd390358c783d299bde031c0 | 2,125 | py | Python | ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.smarttags
import typing
from abc import abstractmethod
from ..lang.x_initialization import XInitialization as XInitialization_d46c0cca
if typing.TYPE_CHECKING:
from ..frame.x_controller import XController as XController_b00e0b8f
from .smart_tag_recognizer_mode import SmartTagRecognizerMode as SmartTagRecognizerMode_9179119e
from ..text.x_text_markup import XTextMarkup as XTextMarkup_a5d60b3a
from ..text.x_text_range import XTextRange as XTextRange_9a910ab7
__all__ = ['XRangeBasedSmartTagRecognizer']
| 42.5 | 215 | 0.777412 |
3f3653bf5b35e045e2b4c2aeff6f681433eea55f | 924 | py | Python | apprest/plugins/icat/views/ICAT.py | acampsm/calipsoplus-backend | b66690124bd2f2541318ddb83b18e082b5df5676 | [
"MIT"
] | 4 | 2018-12-04T15:08:27.000Z | 2019-04-11T09:49:41.000Z | apprest/plugins/icat/views/ICAT.py | acampsm/calipsoplus-backend | b66690124bd2f2541318ddb83b18e082b5df5676 | [
"MIT"
] | 63 | 2018-11-22T13:07:56.000Z | 2021-06-10T20:55:58.000Z | apprest/plugins/icat/views/ICAT.py | AlexRogalskiy/calipsoplus-backend | 3f6b034f16668bc154b0f4b759ed62b055f41647 | [
"MIT"
] | 10 | 2018-11-23T08:17:28.000Z | 2022-01-15T23:41:59.000Z | from rest_framework import status
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from apprest.plugins.icat.helpers.complex_encoder import JsonResponse
from apprest.plugins.icat.services.ICAT import ICATService
| 34.222222 | 100 | 0.787879 |
3f374d2a724eacf4543f9a4bee934b7b700f04f6 | 396 | py | Python | python/phevaluator/table_tests/test_hashtable8.py | StTronn/PokerHandEvaluator | 3611a7072c2a62844d6aca32d798aafa59e4606d | [
"Apache-2.0"
] | 1 | 2020-11-12T14:35:02.000Z | 2020-11-12T14:35:02.000Z | python/phevaluator/table_tests/test_hashtable8.py | StTronn/PokerHandEvaluator | 3611a7072c2a62844d6aca32d798aafa59e4606d | [
"Apache-2.0"
] | null | null | null | python/phevaluator/table_tests/test_hashtable8.py | StTronn/PokerHandEvaluator | 3611a7072c2a62844d6aca32d798aafa59e4606d | [
"Apache-2.0"
] | null | null | null | import unittest
from table_tests.utils import BaseTestNoFlushTable
from evaluator.hashtable8 import NO_FLUSH_8
if __name__ == "__main__":
unittest.main()
| 23.294118 | 52 | 0.765152 |
3f37c1453442f813e72c82a4ee9d8d0dd3dbc36c | 921 | py | Python | Marcelina_Skoczylas_praca_domowa_3.py | marcelinaskoczylas/python_wprowadzenie_warsztaty_2021 | 9614d791b253a15a117960deadb7375c681e8a27 | [
"MIT"
] | 1 | 2021-11-12T10:17:02.000Z | 2021-11-12T10:17:02.000Z | Marcelina_Skoczylas_praca_domowa_3.py | marcelinaskoczylas/python_wprowadzenie_warsztaty_2021 | 9614d791b253a15a117960deadb7375c681e8a27 | [
"MIT"
] | 3 | 2021-11-07T12:27:46.000Z | 2021-12-11T18:20:58.000Z | Marcelina_Skoczylas_praca_domowa_3.py | marcelinaskoczylas/python_wprowadzenie_warsztaty_2021 | 9614d791b253a15a117960deadb7375c681e8a27 | [
"MIT"
] | 23 | 2021-11-07T12:16:58.000Z | 2021-11-26T21:03:29.000Z | #zadanie 1
i=1
j=1
k=1
ciag=[1,1]
while len(ciag)<50:
k=i+j
j=i
i=k
ciag.append(k)
print(ciag)
#zadanie 2
wpisane=str(input("Prosz wpisa dowolne sowa po przecinku "))
zmienne=wpisane.split(",")
def funkcja(*args):
'''Funkcja sprawdza dugo sw i usuwa te, ktre s za krtkie'''
lista=[]
lista2=[]
wartosc = int(input("Prosz wpisa jak warto "))
for arg in args:
lista.append(arg)
dlugosc=len(arg)
if len(arg)>wartosc:
lista2.append(arg)
procenty=(len(lista2)/len(lista))*100
return procenty,lista,lista2
print(funkcja(zmienne))
#zadanie 3
liczby=list(input("Prosz wpisa liczby po przecinku: "))
unikalna_lista=[]
n=1
a=liczby[n]
unikalna_lista.append(liczby[0])
while n<len(liczby):
if liczby[n]!=unikalna_lista[n-1]:
unikalna_lista.append(a)
n+=1 | 18.058824 | 72 | 0.606949 |
3f3831fca3eb8519b2004ca6b866229be692631e | 91 | py | Python | rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py | RhinohawkUAV/rh_ros | e13077060bdfcc231adee9731ebfddadcd8d6b4a | [
"MIT"
] | 4 | 2020-05-13T19:34:27.000Z | 2021-09-20T09:01:10.000Z | rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py | RhinohawkUAV/rh_ros | e13077060bdfcc231adee9731ebfddadcd8d6b4a | [
"MIT"
] | null | null | null | rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py | RhinohawkUAV/rh_ros | e13077060bdfcc231adee9731ebfddadcd8d6b4a | [
"MIT"
] | 2 | 2019-09-14T14:45:09.000Z | 2020-11-22T01:46:59.000Z | from linePathSegment import LinePathSegment
from lineSegmentFinder import LineSegmentFinder | 45.5 | 47 | 0.923077 |
3f39db2a6e3725e4d6d3a964e14a0df2e6772218 | 655 | py | Python | days/day01/part2.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | days/day01/part2.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | days/day01/part2.py | jaredbancroft/aoc2021 | 4eaf339cc0c8566da2af13f7cb9cf6fe87355aac | [
"MIT"
] | null | null | null | from helpers import inputs
| 31.190476 | 76 | 0.51145 |
3f3a04716997d73eaef4e151bd98036259ad059e | 1,183 | py | Python | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 18 | 2019-11-23T23:14:53.000Z | 2022-01-10T01:17:08.000Z | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 12 | 2020-11-09T20:39:25.000Z | 2022-03-22T12:46:59.000Z | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 32 | 2020-02-12T15:42:22.000Z | 2022-03-15T16:42:10.000Z | from unicon.eal.dialogs import Statement
from .service_patterns import NxosN5kReloadPatterns
from unicon.plugins.nxos.service_statements import (login_stmt, password_stmt,
enable_vdc, admin_password)
from unicon.plugins.generic.service_statements import (save_env,
auto_provision, auto_install_dialog,
setup_dialog, confirm_reset,
press_enter, confirm_config, module_reload, save_module_cfg,
secure_passwd_std, )
# for nxos n5k single rp reload
pat = NxosN5kReloadPatterns()
reload_confirm_nxos = Statement(pattern=pat.reload_confirm_nxos,
action='sendline(y)',
loop_continue=True,
continue_timer=False)
# reload statement list for nxos n5k single-rp
nxos_reload_statement_list = [save_env, confirm_reset, reload_confirm_nxos,
press_enter, login_stmt, password_stmt,
confirm_config, setup_dialog,
auto_install_dialog, module_reload,
save_module_cfg, secure_passwd_std,
admin_password, auto_provision, enable_vdc]
| 43.814815 | 78 | 0.658495 |
3f3a4efcbbb562167b72ebf68516d5cfd976b799 | 11,321 | py | Python | src/clic/cloud.py | NathanRVance/clic | e28f7f2686f5ac6689b384474e3fdfa4d207f6ec | [
"MIT"
] | 2 | 2017-12-13T03:41:07.000Z | 2019-03-12T14:08:42.000Z | src/clic/cloud.py | NathanRVance/clic | e28f7f2686f5ac6689b384474e3fdfa4d207f6ec | [
"MIT"
] | null | null | null | src/clic/cloud.py | NathanRVance/clic | e28f7f2686f5ac6689b384474e3fdfa4d207f6ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from clic import nodes
import time
import os
import logging as loggingmod
logging = loggingmod.getLogger('cloud')
logging.setLevel(loggingmod.WARNING)
def main():
import argparse
parser = argparse.ArgumentParser(description='Execute cloud API commands')
from clic import version
parser.add_argument('-v', '--version', action='version', version=version.__version__)
image = parser.add_argument_group()
image.add_argument('--image', metavar='NAME', nargs=1, help='Create an image from NAME')
image.add_argument('--recreate', action='store_true', help='Recreate NAME after creating an image')
args = parser.parse_args()
if args.image:
getCloud().makeImage(args.image[0], args.recreate)
| 45.103586 | 192 | 0.569473 |
3f3cb556d7979e79091d66cbe322dfcac371f91c | 3,185 | py | Python | HyperUnmixing/visualization.py | mdbresh/HyperUnmixing | 9ed1be74da48ff80298099497194efa2e97b7fbe | [
"MIT"
] | 1 | 2020-06-03T21:43:29.000Z | 2020-06-03T21:43:29.000Z | HyperUnmixing/visualization.py | mdbresh/HyperUnmixing | 9ed1be74da48ff80298099497194efa2e97b7fbe | [
"MIT"
] | 8 | 2020-04-13T22:12:03.000Z | 2020-05-01T21:37:16.000Z | HyperUnmixing/visualization.py | mdbresh/HyperUnmixing | 9ed1be74da48ff80298099497194efa2e97b7fbe | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import ipywidgets as widgets
import matplotlib.pyplot as plt
from skimage.measure import label, regionprops, regionprops_table
from skimage.color import label2rgb
def Wav_2_Im(im, wn):
'''
Input a 3-D datacube and outputs a normalized slice at one wavenumber.
Parameters
----------
im : array-like image.
Input data.
wn : integer.
Integer index value.
Returns
----------
slice : ndarray.
An image the same size as the input, but with one slice in wavenumber space.
'''
normalized = [] # storage for each normalized slice
img_norm = np.empty(im.shape, dtype=np.float32)
for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int):
image = im[:,:,i]
normalized.append((image - np.min(image))/(np.amax(image) - np.min(image)))
for i in np.linspace(0, im.shape[2]-1, im.shape[2]-1).astype(np.int):
img_norm[:,:,i] = normalized[i-1]
im_slice = img_norm[:,:,wn-750]
return im_slice
def AreaFraction(im, norm_im, image_size):
'''
Input test image, normalized NMF coefficients image, and image size.
Outputs a dictionary of computed properties for regions of interest,
a multidimensional array containing threshold masks, and a list of
computed area fractions for the areas of interest in each threshold mask.
Parameters
----------
im : array-like image.
Image slice to measure.
norm_im : multidimensional array-like image
Image of normalized NMF coefficients.
image_size : integer.
Size of the image.
Returns
---------
regions : dict.
Dictionary of regions of interest and their computed properties.
mask : multidimensional array-like image.
Multidimensional array with each threshold mask image.
area_frac : list.
List of computed area fractions of DPPDTT.
'''
# Set up threshold masks
percents = np.round(np.arange(0.5, 1.0, 0.05),2) # array of thresholds
mask = np.zeros((norm_im.shape[0], norm_im.shape[1], 10)) # ten tested thresholds
for h in range(mask.shape[2]):
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if norm_im[i][j] >= percents[h]:
mask[i][j][h] = 1
else:
mask[i][j][h] = 0
# Compute region properties of labeled images
regions = {}
props = ('area', 'major_axis_length', 'minor_axis_length', 'mean_intensity')
for i in range(mask.shape[2]):
labels = label(mask[:,:,i])
regions[i] = pd.DataFrame(regionprops_table(labels, im, props))
# Compute the area fractions
area_frac = []
for i in range(len(regions.keys())):
area_frac.append(regions[i]['area'].values / image_size**2)
return regions, mask, area_frac
def interactive_hyperimage(image, w=(750,1877,1)):
'''
input:
image: 3D Hyperspectral image
w: wavenumbers, which is desired interval
format is (starting wavenumber, ending wavenumber, step). Default is full spectrum, which is (750,1128,1)
output:
interactive 2D image of hyperspectral image at desired wavenumber
'''
return widgets.interact(update, a=w)
| 25.277778 | 107 | 0.690424 |
3f3d41979c4cb0e2058dd2cdc43c80be671cc4fb | 2,758 | py | Python | velocileptors/Utils/loginterp.py | kokron/velocileptors | 50016dd66ec9a2d33effecc248a48ca7ea7322bf | [
"MIT"
] | null | null | null | velocileptors/Utils/loginterp.py | kokron/velocileptors | 50016dd66ec9a2d33effecc248a48ca7ea7322bf | [
"MIT"
] | null | null | null | velocileptors/Utils/loginterp.py | kokron/velocileptors | 50016dd66ec9a2d33effecc248a48ca7ea7322bf | [
"MIT"
] | null | null | null | import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as interpolate
from scipy.misc import derivative
import inspect
def loginterp(x, y, yint = None, side = "both", lorder = 9, rorder = 9, lp = 1, rp = -2,
ldx = 1e-6, rdx = 1e-6,\
interp_min = -12, interp_max = 12, Nint = 10**5, verbose=False, option='B'):
'''
Extrapolate function by evaluating a log-index of left & right side.
From Chirag Modi's CLEFT code at
https://github.com/modichirag/CLEFT/blob/master/qfuncpool.py
The warning for divergent power laws on both ends is turned off. To turn back on uncomment lines 26-33.
'''
if yint is None:
yint = interpolate(x, y, k = 5)
if side == "both":
side = "lr"
# Make sure there is no zero crossing between the edge points
# If so assume there can't be another crossing nearby
if np.sign(y[lp]) == np.sign(y[lp-1]) and np.sign(y[lp]) == np.sign(y[lp+1]):
l = lp
else:
l = lp + 2
if np.sign(y[rp]) == np.sign(y[rp-1]) and np.sign(y[rp]) == np.sign(y[rp+1]):
r = rp
else:
r = rp - 2
lneff = derivative(yint, x[l], dx = x[l]*ldx, order = lorder)*x[l]/y[l]
rneff = derivative(yint, x[r], dx = x[r]*rdx, order = rorder)*x[r]/y[r]
#print(lneff, rneff)
# uncomment if you like warnings.
#if verbose:
# if lneff < 0:
# print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3])
# print('WARNING: Runaway index on left side, bad interpolation. Left index = %0.3e at %0.3e'%(lneff, x[l]))
# if rneff > 0:
# print( 'In function - ', inspect.getouterframes( inspect.currentframe() )[2][3])
# print('WARNING: Runaway index on right side, bad interpolation. Reft index = %0.3e at %0.3e'%(rneff, x[r]))
if option == 'A':
xl = np.logspace(interp_min, np.log10(x[l]), Nint)
xr = np.logspace(np.log10(x[r]), interp_max, Nint)
yl = y[l]*(xl/x[l])**lneff
yr = y[r]*(xr/x[r])**rneff
#print(xr/x[r])
xint = x[l+1:r].copy()
yint = y[l+1:r].copy()
if side.find("l") > -1:
xint = np.concatenate((xl, xint))
yint = np.concatenate((yl, yint))
if side.find("r") > -1:
xint = np.concatenate((xint, xr))
yint = np.concatenate((yint, yr))
yint2 = interpolate(xint, yint, k = 5, ext=3)
else:
yint2 = lambda xx: (xx <= x[l]) * y[l]*(xx/x[l])**lneff \
+ (xx >= x[r]) * y[r]*(xx/x[r])**rneff \
+ (xx > x[l]) * (xx < x[r]) * interpolate(x, y, k = 5, ext=3)(xx)
return yint2
| 37.27027 | 120 | 0.536983 |
3f3d5388905e53963d743e54574d98fe526396ec | 4,458 | py | Python | src/modeling/calc_target_scale.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 44 | 2020-12-09T06:15:15.000Z | 2022-03-31T02:37:47.000Z | src/modeling/calc_target_scale.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | null | null | null | src/modeling/calc_target_scale.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 7 | 2020-12-09T10:08:32.000Z | 2021-08-17T01:53:51.000Z | from typing import Tuple
import dataclasses
import numpy as np
import torch
from pathlib import Path
from l5kit.data import LocalDataManager, ChunkedDataset
import sys
import os
from tqdm import tqdm
sys.path.append(os.pardir)
sys.path.append(os.path.join(os.pardir, os.pardir))
from lib.evaluation.mask import load_mask_chopped
from lib.rasterization.rasterizer_builder import build_custom_rasterizer
from lib.dataset.faster_agent_dataset import FasterAgentDataset
from lib.utils.yaml_utils import save_yaml, load_yaml
from modeling.load_flag import load_flags, Flags
if __name__ == '__main__':
mode = ""
flags: Flags = load_flags(mode=mode)
flags_dict = dataclasses.asdict(flags)
cfg = load_yaml(flags.cfg_filepath)
out_dir = Path(flags.out_dir)
print(f"cfg {cfg}")
os.makedirs(str(out_dir), exist_ok=True)
print(f"flags: {flags_dict}")
save_yaml(out_dir / 'flags.yaml', flags_dict)
save_yaml(out_dir / 'cfg.yaml', cfg)
debug = flags.debug
# set env variable for data
os.environ["L5KIT_DATA_FOLDER"] = flags.l5kit_data_folder
dm = LocalDataManager(None)
print("init dataset")
train_cfg = cfg["train_data_loader"]
valid_cfg = cfg["valid_data_loader"]
# Build StubRasterizer for fast dataset access
cfg["raster_params"]["map_type"] = "stub_debug"
rasterizer = build_custom_rasterizer(cfg, dm)
print("rasterizer", rasterizer)
train_path = "scenes/sample.zarr" if debug else train_cfg["key"]
train_agents_mask = None
if flags.validation_chopped:
# Use chopped dataset to calc statistics...
num_frames_to_chop = 100
th_agent_prob = cfg["raster_params"]["filter_agents_threshold"]
min_frame_future = 1
num_frames_to_copy = num_frames_to_chop
train_agents_mask = load_mask_chopped(
dm.require(train_path), th_agent_prob, num_frames_to_copy, min_frame_future)
print("train_path", train_path, "train_agents_mask", train_agents_mask.shape)
train_zarr = ChunkedDataset(dm.require(train_path)).open(cached=False)
print("train_zarr", type(train_zarr))
print(f"Open Dataset {flags.pred_mode}...")
train_agent_dataset = FasterAgentDataset(
cfg, train_zarr, rasterizer, min_frame_history=flags.min_frame_history,
min_frame_future=flags.min_frame_future, agents_mask=train_agents_mask
)
print("train_agent_dataset", len(train_agent_dataset))
n_sample = 1_000_000 # Take 1M sample.
target_scale_abs_mean, target_scale_abs_max, target_scale_std = calc_target_scale(train_agent_dataset, n_sample)
chopped_str = "_chopped" if flags.validation_chopped else ""
agent_prob = cfg["raster_params"]["filter_agents_threshold"]
filename = f"target_scale_abs_mean_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_abs_mean)
print("Saving to ", cache_path)
filename = f"target_scale_abs_max_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_abs_max)
print("Saving to ", cache_path)
filename = f"target_scale_std_{agent_prob}_{flags.min_frame_history}_{flags.min_frame_future}{chopped_str}.npz"
cache_path = Path(train_zarr.path) / filename
np.savez_compressed(cache_path, target_scale=target_scale_std)
print("Saving to ", cache_path)
print("target_scale_abs_mean", target_scale_abs_mean)
print("target_scale_abs_max", target_scale_abs_max)
print("target_scale_std", target_scale_std)
import IPython; IPython.embed()
| 40.527273 | 120 | 0.746074 |
3f3d9f5e7b52389bd248948394c8302b4b2c0b67 | 3,679 | py | Python | examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | 11 | 2018-10-26T17:34:29.000Z | 2020-04-27T21:21:33.000Z | examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | null | null | null | examples/solvers using low level utilities/interior_laplace_neumann_panel_polygon.py | dbstein/pybie2d | 1c2d6c05f6dbb4f1ab4476d3824f4dde20f90d58 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import scipy as sp
import scipy.sparse
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path
plt.ion()
import pybie2d
"""o solve an interior Modified Helmholtz problem
On a complicated domain using a global quadr
Demonstrate how to use the pybie2d package tature
This example demonstrates how to do this entirely using low-level routines,
To demonstrate both how to use these low level routines
And to give you an idea what is going on under the hood in the
higher level routines
"""
NG = 1000
h_max = 0.01
# extract some functions for easy calling
squish = pybie2d.misc.curve_descriptions.squished_circle
PPB = pybie2d.boundaries.panel_polygon_boundary.panel_polygon_boundary.Panel_Polygon_Boundary
Grid = pybie2d.grid.Grid
PointSet = pybie2d.point_set.PointSet
Laplace_Layer_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Form
Laplace_Layer_Singular_Form = pybie2d.kernels.high_level.laplace.Laplace_Layer_Singular_Form
Laplace_Layer_Apply = pybie2d.kernels.high_level.laplace.Laplace_Layer_Apply
################################################################################
# define problem
# boundary
boundary = PPB([0,1,1,0], [0,0,1,1], [h_max]*4, [True]*4, dyadic_levels=20, dyadic_base=3)
# solution
solution_func = lambda x, y: 2*x + y
bc = solution_func(boundary.x, boundary.y)
bcx = lambda x, y: 2.0*np.ones_like(x)
bcy = lambda x, y: 1.0*np.ones_like(x)
bcn = lambda x, y, nx, ny: bcx(x, y)*nx + bcy(x, y)*ny
################################################################################
##### solve problem the hard way ###############################################
################################################################################
################################################################################
# find physical region
# (this implements a fast way to tell if points are in or out of the boundary)
# (and of course, for the squish boundary, we could easily figure out something
# faster, but this illustrates a general purpose routine)
gridp = Grid([0,1], NG, [0,1], NG, x_endpoints=[False,False], y_endpoints=[False,False])
################################################################################
# solve for the density
DLP = Laplace_Layer_Singular_Form(boundary, ifdipole=True)
SLPp = (DLP/boundary.weights).T*boundary.weights
A = 0.5*np.eye(boundary.N) + SLPp
tau = np.linalg.solve(A, bcn(boundary.x, boundary.y, boundary.normal_x, boundary.normal_y))
# fix the mean
target = PointSet(x=np.array((0.5)),y=np.array((0.5)))
good_eval = Laplace_Layer_Apply(boundary, target=target, charge=tau)
correction = (2*0.5 + 0.5) - good_eval
################################################################################
# naive evaluation
u = Laplace_Layer_Apply(boundary, gridp, charge=tau)
u = gridp.reshape(u)
u += correction
err_plot(u)
################################################################################
# oversampled
hmax = gridp.xg[1,0] - gridp.xg[0,0]
fbdy, IMAT = boundary.prepare_oversampling(hmax/6.0)
IMAT = sp.sparse.csr_matrix(IMAT)
ftau = IMAT.dot(tau)
u = Laplace_Layer_Apply(fbdy, gridp, charge=ftau)
u = gridp.reshape(u)
u += correction
err_plot(u)
ua = 2*gridp.xg + gridp.yg
| 34.383178 | 93 | 0.618918 |
3f3e92f1f4a6224cee72e432180bdaba79cbc4b7 | 576 | py | Python | controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py | dbscoach/webots-soccer-sim-playground | 464f9052834d0c6896e6a960113720e8ca4e21df | [
"Apache-2.0"
] | null | null | null | controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py | dbscoach/webots-soccer-sim-playground | 464f9052834d0c6896e6a960113720e8ca4e21df | [
"Apache-2.0"
] | null | null | null | controllers/rcj_soccer_referee_supervisor/rcj_soccer_referee_supervisor.py | dbscoach/webots-soccer-sim-playground | 464f9052834d0c6896e6a960113720e8ca4e21df | [
"Apache-2.0"
] | null | null | null | from math import ceil
from referee.consts import MATCH_TIME, TIME_STEP
from referee.referee import RCJSoccerReferee
referee = RCJSoccerReferee(
match_time=MATCH_TIME,
progress_check_steps=ceil(15/(TIME_STEP/1000.0)),
progress_check_threshold=0.5,
ball_progress_check_steps=ceil(10/(TIME_STEP/1000.0)),
ball_progress_check_threshold=0.5,
)
while referee.step(TIME_STEP) != -1:
referee.emit_positions()
if not referee.tick():
break
# When end of match, pause simulator immediately
referee.simulationSetMode(referee.SIMULATION_MODE_PAUSE)
| 27.428571 | 58 | 0.770833 |
3f3edf95fac5cc6b31cb7effd1e2b59006a53ab6 | 4,675 | py | Python | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | null | null | null | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | null | null | null | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | 1 | 2020-11-20T02:56:20.000Z | 2020-11-20T02:56:20.000Z | from flask import Flask, request
import os
from subprocess import Popen, PIPE
import json
from prof_file_util import load_source, load_line_profile, load_graph_profile
from linewise_barchart import linewise_barchart
from valgrind import extract_valgrind_result
from mem_issue_visualize import mem_issue_visualize
app = Flask(__name__)
| 31.802721 | 106 | 0.620535 |
3f3f001f639e3ff68f19c91e138db8007658913f | 998 | py | Python | py/book/ShortestSubarrayLength.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | py/book/ShortestSubarrayLength.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | py/book/ShortestSubarrayLength.py | danyfang/SourceCode | 8168f6058648f2a330a7354daf3a73a4d8a4e730 | [
"MIT"
] | null | null | null | '''
Leetcode problem No 862 Shortest Subarray with Sum at Least K
Solution written by Xuqiang Fang on 1 July, 2018
'''
import collections
main()
| 28.514286 | 69 | 0.490982 |
3f3f1b80d7db0ba49872fa346d7180fa077d1cab | 2,692 | py | Python | djangocms_baseplugins/contact/models.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 2 | 2019-04-14T01:31:22.000Z | 2020-03-05T13:06:57.000Z | djangocms_baseplugins/contact/models.py | benzkji/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | 32 | 2017-04-04T09:28:06.000Z | 2021-08-18T16:23:02.000Z | djangocms_baseplugins/contact/models.py | bnzk/djangocms-baseplugins | 7f041a030ed93dcdec70e4ca777b841846b8f2f2 | [
"MIT"
] | null | null | null | import time
from ckeditor.fields import RichTextField
from django.db import models
from django.utils.translation import ugettext_lazy as _
from requests import ConnectionError
from djangocms_baseplugins.baseplugin.models import AbstractBasePlugin
from djangocms_baseplugins.baseplugin.utils import check_migration_modules_needed
check_migration_modules_needed('contact')
| 34.075949 | 87 | 0.593239 |
3f3fcc2c16b2bfd7c2cf31951c3290a8d5c5992d | 355 | py | Python | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | 1 | 2021-04-05T07:35:59.000Z | 2021-04-05T07:35:59.000Z |
#1. for (len(absolutes)), if signs[i] is true: answer += absolutes[i], else: answer -= absolutes[i]
#2. sum(absolutes) | 29.583333 | 100 | 0.571831 |
3f40172291607ab0c848f7f1917399766b9b515c | 1,082 | py | Python | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | """
pyexcel
~~~~~~~~~~~~~~~~~~~
**pyexcel** is a wrapper library to read, manipulate and
write data in different excel formats: csv, ods, xls, xlsx
and xlsm. It does not support formulas, styles and charts.
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
# flake8: noqa
from .cookbook import (
merge_csv_to_a_book,
merge_all_to_a_book,
split_a_book,
extract_a_sheet_from_a_book,
)
from .core import (
get_array,
iget_array,
get_dict,
get_records,
iget_records,
get_book_dict,
get_sheet,
get_book,
iget_book,
save_as,
isave_as,
save_book_as,
isave_book_as,
)
from .book import Book
from .sheet import Sheet
from .internal.garbagecollector import free_resources
from .deprecated import (
load_book,
load_book_from_memory,
load,
load_from_memory,
load_from_dict,
load_from_records,
Reader,
SeriesReader,
ColumnSeriesReader,
BookReader,
)
from .__version__ import __version__, __author__
| 21.64 | 62 | 0.686691 |
3f4174ff9c8ef5d53e5df7cf324c378ca2b1ce02 | 2,729 | py | Python | tests/resources/selenium/test_nfc.py | Avi-Labs/taurus | 3aa9bc294778d99be545575467fb5897dc815330 | [
"Apache-2.0"
] | 1,743 | 2015-03-30T20:56:03.000Z | 2022-03-31T09:08:37.000Z | tests/resources/selenium/test_nfc.py | Avi-Labs/taurus | 3aa9bc294778d99be545575467fb5897dc815330 | [
"Apache-2.0"
] | 1,159 | 2015-04-01T08:25:53.000Z | 2022-03-29T08:15:31.000Z | tests/resources/selenium/test_nfc.py | Avi-Labs/taurus | 3aa9bc294778d99be545575467fb5897dc815330 | [
"Apache-2.0"
] | 497 | 2015-03-31T21:05:18.000Z | 2022-03-17T12:45:21.000Z | # coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import waiter, get_locator
| 32.488095 | 91 | 0.681568 |
3f418694dc8e68bdf0bfc91861f5c5eb0502eab0 | 5,495 | py | Python | src/onegov/search/dsl.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/search/dsl.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/search/dsl.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from elasticsearch_dsl import Search as BaseSearch
from elasticsearch_dsl.response import Hit as BaseHit
from elasticsearch_dsl.response import Response as BaseResponse
def explanation_value(explanation, text):
""" Gets the value from the explanation for descriptions starting with
the given text.
"""
if explanation.description.startswith(text):
return {
'description': explanation.description,
'value': explanation.value
}
for detail in getattr(explanation, 'details', []):
result = explanation_value(detail, text)
if result:
return result
| 28.471503 | 79 | 0.589263 |
3f4197885d65ac6c21aa8108e7b1eaac4d9a1a2e | 3,862 | py | Python | DistributedStorageBenchmarkTool/EchoHandler.py | shadoobie/dbench | 0cca504048ba918a1502482b7d06a866cda9ab6e | [
"MIT"
] | null | null | null | DistributedStorageBenchmarkTool/EchoHandler.py | shadoobie/dbench | 0cca504048ba918a1502482b7d06a866cda9ab6e | [
"MIT"
] | null | null | null | DistributedStorageBenchmarkTool/EchoHandler.py | shadoobie/dbench | 0cca504048ba918a1502482b7d06a866cda9ab6e | [
"MIT"
] | null | null | null | from SocketServer import BaseRequestHandler, TCPServer
from DistributedStorageBenchmarkTool.StampyMcGetTheLog import StampyMcGetTheLog
# from sets import Set
import re
if __name__ == '__main__':
serv = TCPServer(('', 20000), EchoHandler)
serv.serve_forever() | 39.408163 | 144 | 0.676075 |
3f42306d062bc9168cc3334b385fbe62bb7498d6 | 14,054 | py | Python | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | null | null | null | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | 1 | 2020-09-24T05:28:44.000Z | 2020-09-28T05:34:19.000Z | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2010 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Implements the scheduling of builds for a project.
This module provides the functionality for scheduling builds for a specific
Trac environment. It is used by both the build master and the web interface to
get the list of required builds (revisions not built yet).
Furthermore, the `BuildQueue` class is used by the build master to determine
the next pending build, and to match build slaves against configured target
platforms.
"""
from itertools import ifilter
import re
import time
from trac.util.datefmt import to_timestamp
from trac.util import pretty_timedelta, format_datetime
from trac.attachment import Attachment
from bitten.model import BuildConfig, TargetPlatform, Build, BuildStep
from bitten.util.repository import get_repos
__docformat__ = 'restructuredtext en'
| 40.973761 | 85 | 0.570656 |
3f4449fb416b741bfe8100121589dabfd4bff616 | 434 | py | Python | dynamic-programming/Python/0120-triangle.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | 1 | 2020-04-02T13:31:31.000Z | 2020-04-02T13:31:31.000Z | dynamic-programming/Python/0120-triangle.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | null | null | null | dynamic-programming/Python/0120-triangle.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | null | null | null | from typing import List
| 27.125 | 62 | 0.47235 |
3f4460e3255eb428d7e5749918a9b4a6ef898fc7 | 5,063 | py | Python | regparser/tree/xml_parser/tree_utils.py | pkfec/regulations-parser | ff6b29dcce0449a133e7b93dd462ab3110f80a5d | [
"CC0-1.0"
] | 26 | 2016-06-04T20:48:09.000Z | 2021-07-28T18:13:30.000Z | regparser/tree/xml_parser/tree_utils.py | pkfec/regulations-parser | ff6b29dcce0449a133e7b93dd462ab3110f80a5d | [
"CC0-1.0"
] | 146 | 2016-04-06T19:07:54.000Z | 2022-01-02T20:09:53.000Z | regparser/tree/xml_parser/tree_utils.py | pkfec/regulations-parser | ff6b29dcce0449a133e7b93dd462ab3110f80a5d | [
"CC0-1.0"
] | 28 | 2016-04-09T20:40:48.000Z | 2021-05-08T17:52:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from copy import deepcopy
from functools import wraps
from itertools import chain
from lxml import etree
from six.moves.html_parser import HTMLParser
from regparser.tree.priority_stack import PriorityStack
def prepend_parts(parts_prefix, n):
""" Recursively preprend parts_prefix to the parts of the node
n. Parts is a list of markers that indicates where you are in the
regulation text. """
n.label = parts_prefix + n.label
for c in n.children:
prepend_parts(parts_prefix, c)
return n
def split_text(text, tokens):
"""
Given a body of text that contains tokens,
splice the text along those tokens.
"""
starts = [text.find(t) for t in tokens]
if not starts or starts[0] != 0:
starts.insert(0, 0)
slices = zip(starts, starts[1:])
texts = [text[i[0]:i[1]] for i in slices] + [text[starts[-1]:]]
return texts
def _combine_with_space(prev_text, next_text, add_space_if_needed):
"""Logic to determine where to add spaces to XML. Generally this is just
as matter of checking for space characters, but there are some
outliers"""
prev_text, next_text = prev_text or "", next_text or ""
prev_char, next_char = prev_text[-1:], next_text[:1]
needs_space = (not prev_char.isspace() and
not next_char.isspace() and
next_char and
prev_char not in u'([/<-' and
next_char not in u').;,]>/-')
if add_space_if_needed and needs_space:
return prev_text + " " + next_text
else:
return prev_text + next_text
def replace_xml_node_with_text(node, text):
"""There are some complications w/ lxml when determining where to add the
replacement text. Account for all of that here."""
parent, prev = node.getparent(), node.getprevious()
if prev is not None:
prev.tail = (prev.tail or '') + text
else:
parent.text = (parent.text or '') + text
parent.remove(node)
def replace_xpath(xpath):
"""Decorator to convert all elements matching the provided xpath in to
plain text. This'll convert the wrapped function into a new function which
will search for the provided xpath and replace all matches"""
return decorator
def get_node_text(node, add_spaces=False):
""" Extract all the text from an XML node (including the text of it's
children). """
node = deepcopy(node)
subscript_to_plaintext(node, add_spaces)
superscript_to_plaintext(node, add_spaces)
footnotes_to_plaintext(node, add_spaces)
parts = [node.text] + list(
chain(*([c.text, c.tail] for c in node.getchildren())))
final_text = ''
for part in filter(bool, parts):
final_text = _combine_with_space(final_text, part, add_spaces)
return final_text.strip()
_tag_black_list = ('PRTPAGE', )
def get_node_text_tags_preserved(xml_node):
"""Get the body of an XML node as a string, avoiding a specific blacklist
of bad tags."""
xml_node = deepcopy(xml_node)
etree.strip_tags(xml_node, *_tag_black_list)
# Remove the wrapping tag
node_text = xml_node.text or ''
node_text += ''.join(etree.tounicode(child) for child in xml_node)
node_text = HTMLParser().unescape(node_text)
return node_text
| 33.309211 | 78 | 0.661268 |
3f450a61b8e2b1852d0f1a4d826ca4c04fcbb6db | 10,638 | py | Python | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T04:08:13.000Z | 2019-07-31T04:08:13.000Z | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Backend query implementation classes"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
from aiida.common import exceptions
from aiida.common.lang import abstractclassmethod, type_check
from aiida.common.exceptions import InputValidationError
__all__ = ('BackendQueryBuilder',)
| 36.682759 | 153 | 0.601805 |
3f45b952c9fbaad033d9a0d8b00c659fec74f672 | 675 | py | Python | qiskit/util.py | alejomonbar/qiskit-terra | 207fe593f6f616b0d55b43afe4451dcaa672871a | [
"Apache-2.0"
] | null | null | null | qiskit/util.py | alejomonbar/qiskit-terra | 207fe593f6f616b0d55b43afe4451dcaa672871a | [
"Apache-2.0"
] | null | null | null | qiskit/util.py | alejomonbar/qiskit-terra | 207fe593f6f616b0d55b43afe4451dcaa672871a | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=wildcard-import,unused-wildcard-import
"""Common utilities for Qiskit."""
# Deprecated: for backwards compatibility to be removed in a future release
from qiskit.utils import *
| 35.526316 | 77 | 0.764444 |
3f46011535915198f4025241d624f246d85211f4 | 1,049 | py | Python | examples/plot_spirals.py | zblz/gammapy | 49539f25886433abeedc8852387ab4cd73977006 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_spirals.py | zblz/gammapy | 49539f25886433abeedc8852387ab4cd73977006 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_spirals.py | zblz/gammapy | 49539f25886433abeedc8852387ab4cd73977006 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Plot Milky Way spiral arm models.
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gammapy.astro.population.spatial import ValleeSpiral, FaucherSpiral
vallee_spiral = ValleeSpiral()
faucher_spiral = FaucherSpiral()
#theta = np.arange(0, 720)
radius = np.arange(2.1, 20, 0.1)
for spiralarm_index in range(4):
# Plot Vallee spiral
x, y = vallee_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index)
name = vallee_spiral.spiralarms[spiralarm_index]
plt.plot(x, y, label=name)
# Plot Faucher spiral
x, y = faucher_spiral.xy_position(radius=radius, spiralarm_index=spiralarm_index)
name = faucher_spiral.spiralarms[spiralarm_index]
plt.plot(x, y, ls='-.', label='Faucher ' + name)
plt.plot(vallee_spiral.bar['x'], vallee_spiral.bar['y'])
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.legend(ncol=2)
filename = 'valee_spiral.pdf'
print('Writing {0}'.format(filename))
plt.savefig(filename)
| 28.351351 | 85 | 0.737846 |
3f464d492cb70cfcafd85a5cef1d4df43430ab0b | 7,462 | py | Python | pytc/fitters/bayesian.py | jharman25/pytc | d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac | [
"Unlicense"
] | 20 | 2017-04-27T16:30:03.000Z | 2021-08-12T19:42:05.000Z | pytc/fitters/bayesian.py | jharman25/pytc | d9ccde3f04e35a3d821ff37a4ad42e62a048d4ac | [
"Unlicense"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pytc/fitters/bayesian.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 6 | 2016-06-23T00:54:21.000Z | 2020-05-19T05:24:20.000Z | __description__ = \
"""
Fitter subclass for performing bayesian (MCMC) fits.
"""
__author__ = "Michael J. Harms"
__date__ = "2017-05-10"
from .base import Fitter
import emcee, corner
import numpy as np
import scipy.optimize as optimize
import multiprocessing
| 32.163793 | 90 | 0.598767 |
3f473e7173cd4e6d679a1656ee0296fc204724d2 | 166 | py | Python | code/taskB/models.py | nft-appraiser/nft-appraiser-api | 6d6495049851afd3d9bfc6969d0e1c9bc430dc81 | [
"MIT"
] | null | null | null | code/taskB/models.py | nft-appraiser/nft-appraiser-api | 6d6495049851afd3d9bfc6969d0e1c9bc430dc81 | [
"MIT"
] | null | null | null | code/taskB/models.py | nft-appraiser/nft-appraiser-api | 6d6495049851afd3d9bfc6969d0e1c9bc430dc81 | [
"MIT"
] | null | null | null | from django.db import models
| 23.714286 | 63 | 0.728916 |
3f47e5cac2344784ba9a8fd0999bd621214986ec | 669 | py | Python | DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py | Py-Himanshu-Patel/Learn-Python | 47a50a934cabcce3b1cbdd4c88141a51f21d3a05 | [
"MIT"
] | null | null | null | DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py | Py-Himanshu-Patel/Learn-Python | 47a50a934cabcce3b1cbdd4c88141a51f21d3a05 | [
"MIT"
] | null | null | null | DesignPatterns/FactoryPattern/SimpleFactory/autoFactory.py | Py-Himanshu-Patel/Learn-Python | 47a50a934cabcce3b1cbdd4c88141a51f21d3a05 | [
"MIT"
] | null | null | null | from inspect import isclass, isabstract, getmembers
import autos
| 25.730769 | 72 | 0.647235 |
3f48698c2248b56650a5e482a06629cf79f5bbbd | 9,023 | py | Python | Costa Rican Household Poverty Level Prediction/tens.py | hautan/train_tf | 0946c7a497703f13c156de9f0135296fd91127ee | [
"Apache-2.0"
] | null | null | null | Costa Rican Household Poverty Level Prediction/tens.py | hautan/train_tf | 0946c7a497703f13c156de9f0135296fd91127ee | [
"Apache-2.0"
] | null | null | null | Costa Rican Household Poverty Level Prediction/tens.py | hautan/train_tf | 0946c7a497703f13c156de9f0135296fd91127ee | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example.
# https://www.kaggle.com/c/costa-rican-household-poverty-prediction/data#_=_
import numpy as np
np.set_printoptions(threshold='nan')
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
costa_rica_household = pd.read_csv('data/train.csv')
#x1 =
costa_rica_household.describe()
#x1["v2a1"]
costa_rica_household.head()
list(costa_rica_household.dtypes)
#costa_rica_household = costa_rica_household.fillna(0)
costa_rica_household = costa_rica_household.fillna(costa_rica_household.mean())
#costa_rica_household["idhogar"] = costa_rica_household["idhogar"].apply(lambda x: int(x, 16))
#costa_rica_household["dependency"] = costa_rica_household["dependency"].apply(lambda x: toInt(x))
#costa_rica_household["edjefe"] = costa_rica_household["edjefe"].apply(lambda x: toInt(x))//edjefa
#costa_rica_household.loc[costa_rica_household['dependency'] == "'<='"]
#v1 = costa_rica_household[costa_rica_household['dependency'].apply(lambda x: type(x) == str)]['dependency']
#col_name = costa_rica_household.columns
#print(list(col_name))
#costa_rica_household[["age", "SQBage", "agesq", "r4h1", "r4h2"]]
cols_to_norm = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
cat_cols_to_norm = ['r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3']
cols_of_interest = ['v2a1', 'hacdor', 'rooms', 'hacapo', 'v14a', 'refrig', 'v18q', 'v18q1', 'r4h1', 'r4h2', 'r4h3', 'r4m1', 'r4m2', 'r4m3', 'r4t1', 'r4t2', 'r4t3',
'tamhog', 'tamviv', 'escolari', 'rez_esc', 'hhsize', 'paredblolad', 'paredzocalo', 'paredpreb', 'pareddes', 'paredmad', 'paredzinc', 'paredfibras',
'paredother', 'pisomoscer', 'pisocemento', 'pisoother', 'pisonatur', 'pisonotiene', 'pisomadera', 'techozinc', 'techoentrepiso', 'techocane', 'techootro',
'cielorazo', 'abastaguadentro', 'abastaguafuera', 'abastaguano', 'public', 'planpri', 'noelec', 'coopele', 'sanitario1', 'sanitario2', 'sanitario3',
'sanitario5', 'sanitario6', 'energcocinar1', 'energcocinar2', 'energcocinar3', 'energcocinar4', 'elimbasu1', 'elimbasu2', 'elimbasu3', 'elimbasu4',
'elimbasu5', 'elimbasu6', 'epared1', 'epared2', 'epared3', 'etecho1', 'etecho2', 'etecho3', 'eviv1', 'eviv2', 'eviv3', 'dis', 'male', 'female',
'estadocivil1', 'estadocivil2', 'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6', 'estadocivil7', 'parentesco1', 'parentesco2',
'parentesco3', 'parentesco4', 'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8', 'parentesco9', 'parentesco10', 'parentesco11',
'parentesco12', 'hogar_nin', 'hogar_adul', 'hogar_mayor', 'hogar_total', 'meaneduc', 'instlevel1',
'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5', 'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9', 'bedrooms', 'overcrowding', 'tipovivi1',
'tipovivi2', 'tipovivi3', 'tipovivi4', 'tipovivi5', 'computer', 'television', 'mobilephone', 'qmobilephone', 'lugar1', 'lugar2', 'lugar3', 'lugar4',
'lugar5', 'lugar6', 'area1', 'area2', 'SQBescolari', 'SQBhogar_total', 'SQBedjefe', 'SQBhogar_nin', 'SQBovercrowding', 'SQBdependency',
'SQBmeaned', 'agesq']
#costa_rica_household[cols_to_norm] = costa_rica_household[cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
#costa_rica_household[cat_cols_to_norm] = costa_rica_household[cat_cols_to_norm].apply(lambda x: (x - x.min())/(x.max() - x.min()))
costa_rica_household[cols_of_interest] = costa_rica_household[cols_of_interest].apply(lambda x: (x - x.min())/(x.max() - x.min()))
feat_cols = []
for col_name in cols_to_norm:
col_name = tf.feature_column.numeric_column(col_name)
feat_cols.append(col_name)
age_range_count = [1,2,3,4,5,7]
r4h1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h1'), boundaries=age_range_count)
r4h2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h2'), boundaries=age_range_count)
r4h3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4h3'), boundaries=age_range_count)
crossed_r4h = tf.feature_column.crossed_column([r4h1_bucket, r4h2_bucket, r4h3_bucket], 100)
#fc = [r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h]
r4m1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m1'), boundaries=age_range_count)
r4m2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m2'), boundaries=age_range_count)
r4m3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4m3'), boundaries=age_range_count)
crossed_r4m = tf.feature_column.crossed_column([r4m1_bucket, r4m2_bucket, r4m3_bucket], 100)
r4t1_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t1'), boundaries=age_range_count)
r4t2_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t2'), boundaries=age_range_count)
r4t3_bucket = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('r4t3'), boundaries=age_range_count)
crossed_r4t = tf.feature_column.crossed_column([r4t1_bucket, r4t2_bucket, r4t3_bucket], 100)
feat_cols.extend([r4h1_bucket, r4h2_bucket, r4h3_bucket, crossed_r4h, r4m1_bucket, r4m2_bucket, r4m3_bucket, crossed_r4m, r4t1_bucket, r4t2_bucket, r4t3_bucket, crossed_r4t])
len(feat_cols)
feat_cols[138]
estimator = tf.estimator.LinearClassifier(feature_columns=feat_cols, n_classes=4)
#costa_rica_household[(costa_rica_household.Target == 4)]
x_data = costa_rica_household.drop('Id', axis=1).drop('edjefa', axis=1).drop('idhogar', axis=1).drop('dependency', axis=1).drop('Target', axis=1)
#x_data['idhogar']
#x_data.describe()
#x_data.head()
labels = costa_rica_household['Target']
labels.head()
from sklearn.model_selection import train_test_split
X_train, X_eval, y_train, y_eval = train_test_split(x_data, labels, test_size=0.3, random_state=101)
print(X_train.shape, y_eval.shape)
input_func = tf.estimator.inputs.pandas_input_fn(x=X_train, y=y_train, batch_size=10, num_epochs=100, shuffle=True)
estimator.train(input_fn=input_func,steps=1000)
eval_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, y=y_eval, batch_size=10, num_epochs=1, shuffle=False)
eval_metrics = estimator.evaluate(input_fn=eval_input_func)
print('Eval metrics')
print(eval_metrics)
pred_input_func = tf.estimator.inputs.pandas_input_fn(x=X_eval, shuffle=False)
predictions = []
for predict in estimator.predict(input_fn=pred_input_func):
predictions.append(predict)
predictions
#categorical_columun_voc = tf.feature_column.embedding_column(categorical_columun_voc, 4)
dnn_classifier = tf.estimator.DNNClassifier(hidden_units=[10, 10, 10], feature_columns=feat_cols, n_classes=2)
dnn_classifier.train(input_fn=input_func,steps=1000)
dnn_eval_metrics = dnn_classifier.evaluate(input_fn=eval_input_func)
dnn_eval_metrics
| 62.659722 | 174 | 0.715283 |
3f4b0ed4eea9580bec7a5e2d579164110301a866 | 4,095 | py | Python | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 1 | 2015-03-23T18:52:12.000Z | 2015-03-23T18:52:12.000Z | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | null | null | null | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 2 | 2017-05-21T12:50:41.000Z | 2021-10-17T03:32:45.000Z | import os
import time
import unittest
from DTL.api import *
def main():
unittest.main(verbosity=2)
if __name__ == '__main__':
main() | 35.301724 | 155 | 0.616361 |
3f4bd6114512c0dce72c018cd5c68157e1b63e0a | 2,840 | py | Python | src/yellow_ball/src/ball.py | AndyHUI711/ELEC3210-Group7 | 08e5d9a7566447349a33ef577499ac2edbb9d6c3 | [
"IJG"
] | 1 | 2021-12-16T09:57:44.000Z | 2021-12-16T09:57:44.000Z | src/yellow_ball/src/ball.py | AndyHUI711/ELEC3210-Group7 | 08e5d9a7566447349a33ef577499ac2edbb9d6c3 | [
"IJG"
] | null | null | null | src/yellow_ball/src/ball.py | AndyHUI711/ELEC3210-Group7 | 08e5d9a7566447349a33ef577499ac2edbb9d6c3 | [
"IJG"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import cv2
import math
import rospy
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Bool
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
bridge = CvBridge()
laser_scan_on = True
if __name__ == '__main__':
main()
| 28.979592 | 107 | 0.560915 |
3f4d396e7dff26260074f0fb74d95a3f3b759b61 | 7,358 | py | Python | dlkit/json_/authentication/queries.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | dlkit/json_/authentication/queries.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | dlkit/json_/authentication/queries.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """JSON implementations of authentication queries."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ..osid import queries as osid_queries
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.authentication import queries as abc_authentication_queries
from dlkit.abstract_osid.osid import errors
| 36.425743 | 98 | 0.671786 |
3f4d7a70b7445e8fd4a01a87b193501aed45d294 | 2,944 | py | Python | PyStellar/stellar/Git/service/git_commit_service.py | psgstellar/Stellar | 947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2 | [
"Apache-2.0"
] | 3 | 2021-01-24T17:07:55.000Z | 2021-02-20T20:11:13.000Z | PyStellar/stellar/Git/service/git_commit_service.py | psgstellar/Stellar | 947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2 | [
"Apache-2.0"
] | 61 | 2021-01-10T12:59:01.000Z | 2021-06-24T09:19:20.000Z | PyStellar/stellar/Git/service/git_commit_service.py | psgstellar/Stellar | 947d4b3d9d6b9c74d4c9ebd29683793a8d86fad2 | [
"Apache-2.0"
] | 1 | 2021-01-14T05:23:32.000Z | 2021-01-14T05:23:32.000Z | import requests
import dateutil.parser
import pytz
from Git.dao.git_dao import GitOwnerRepo
| 43.940299 | 176 | 0.52038 |
3f4ed1f83045dc59c913f12ff649d24264b9e68d | 10,199 | py | Python | apps/jetbrains/jetbrains.py | HansKlokkenspel/knausj_talon | af9254f9b5be73187573f113a42c905146c0aabd | [
"Unlicense"
] | null | null | null | apps/jetbrains/jetbrains.py | HansKlokkenspel/knausj_talon | af9254f9b5be73187573f113a42c905146c0aabd | [
"Unlicense"
] | null | null | null | apps/jetbrains/jetbrains.py | HansKlokkenspel/knausj_talon | af9254f9b5be73187573f113a42c905146c0aabd | [
"Unlicense"
] | null | null | null | import os
import os.path
import requests
import time
from pathlib import Path
from talon import ctrl, ui, Module, Context, actions, clip
import tempfile
# Courtesy of https://github.com/anonfunc/talon-user/blob/master/apps/jetbrains.py
extendCommands = []
# Each IDE gets its own port, as otherwise you wouldn't be able
# to run two at the same time and switch between them.
# Note that MPS and IntelliJ ultimate will conflict...
port_mapping = {
"com.google.android.studio": 8652,
"com.jetbrains.AppCode": 8655,
"com.jetbrains.CLion": 8657,
"com.jetbrains.datagrip": 8664,
"com.jetbrains.goland-EAP": 8659,
"com.jetbrains.goland": 8659,
"com.jetbrains.intellij-EAP": 8653,
"com.jetbrains.intellij.ce": 8654,
"com.jetbrains.intellij": 8653,
"com.jetbrains.PhpStorm": 8662,
"com.jetbrains.pycharm": 8658,
"com.jetbrains.rider": 8660,
"com.jetbrains.rubymine": 8661,
"com.jetbrains.WebStorm": 8663,
"google-android-studio": 8652,
"idea64.exe": 8653,
"IntelliJ IDEA": 8653,
"jetbrains-appcode": 8655,
"jetbrains-clion": 8657,
"jetbrains-datagrip": 8664,
"jetbrains-goland-eap": 8659,
"jetbrains-goland": 8659,
"jetbrains-idea-ce": 8654,
"jetbrains-idea-eap": 8653,
"jetbrains-idea": 8653,
"jetbrains-phpstorm": 8662,
"jetbrains-pycharm-ce": 8658,
"jetbrains-pycharm": 8658,
"jetbrains-rider": 8660,
"jetbrains-rubymine": 8661,
"jetbrains-studio": 8652,
"jetbrains-webstorm": 8663,
"PyCharm": 8658,
"pycharm64.exe": 8658,
"webstorm64.exe": 8663,
}
select_verbs_map = {
"clear": ["action EditorBackSpace"],
"collapse": ["action CollapseRegion"],
"comment": ["action CommentByLineComment"],
"copy": ["action EditorCopy"],
"cut": ["action EditorCut"],
"drag down": ["action MoveLineDown"],
"drag up": ["action MoveLineUp"],
"expand": ["action ExpandRegion"],
"indent": ["action EditorIndentLineOrSelection"],
"refactor": ["action Refactorings.QuickListPopupAction"],
"rename": ["action RenameElement"],
"replace": ["action EditorPaste"],
"select": [],
"unindent": ["action EditorUnindentSelection"],
}
movement_verbs_map = {
"fix": ["action ShowIntentionActions"],
"go": [],
"paste": ["action EditorPaste"],
}
ctx = Context()
mod = Module()
mod.list("select_verbs", desc="Verbs for selecting in the IDE")
mod.list("movement_verbs", desc="Verbs for navigating the IDE")
ctx.matches = r"""
app: /jetbrains/
app: IntelliJ IDEA
app: idea64.exe
app: PyCharm
app: PyCharm64.exe
app: pycharm64.exe
app: webstorm64.exe
"""
ctx.lists["user.selection_verbs"] = select_verbs_map.keys()
ctx.lists["user.navigation_verbs"] = movement_verbs_map.keys()
| 31.772586 | 90 | 0.654574 |
3f4f261effbec9ffc0f629f4f48d599f4fe3ee02 | 752 | py | Python | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | from be.table.user import User
from be.table.user_store import User_Store
from be.table.store import Store
| 26.857143 | 94 | 0.610372 |
3f4f714fbc65f277fd1dc4334716ace380650956 | 22,334 | py | Python | lib/roi_data_rel/fast_rcnn_rel.py | champon1020/TRACE | 8ed0aed87e153af66f02502887a4de0d39867209 | [
"MIT"
] | 34 | 2021-08-19T05:59:58.000Z | 2022-03-26T09:26:54.000Z | lib/roi_data_rel/fast_rcnn_rel.py | champon1020/TRACE | 8ed0aed87e153af66f02502887a4de0d39867209 | [
"MIT"
] | 8 | 2021-09-15T05:27:23.000Z | 2022-02-27T12:38:03.000Z | lib/roi_data_rel/fast_rcnn_rel.py | champon1020/TRACE | 8ed0aed87e153af66f02502887a4de0d39867209 | [
"MIT"
] | 6 | 2021-09-16T10:51:38.000Z | 2022-03-05T22:48:54.000Z | # Adapted by Ji Zhang, 2019
#
# Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py
# Original license text:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils_rel.boxes_rel as box_utils_rel
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
if cfg.MODEL.MULTI_RELATION:
prd_gt_overlaps = roidb['prd_gt_overlaps'].toarray()
prd_class_num = prd_gt_overlaps.shape[1]
gt_pair_inds, gt_pair_class = np.where(prd_gt_overlaps > 1.0 - 1e-4)
fg_pair_inds, fg_pair_class = np.where((prd_gt_overlaps >= cfg.TRAIN.FG_THRESH) &
(prd_gt_overlaps <= 1.0 - 1e-4))
hash_gt_pair_inds = prd_class_num * gt_pair_inds + gt_pair_class
hash_fg_pair_inds = prd_class_num * fg_pair_inds + fg_pair_class
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, hash_gt_pair_inds.size + hash_fg_pair_inds.size)
if hash_fg_pair_inds.size > 0 and fg_pairs_per_this_image > hash_gt_pair_inds.size:
hash_fg_pair_inds = npr.choice(
hash_fg_pair_inds, size=(fg_pairs_per_this_image - hash_gt_pair_inds.size), replace=False)
hash_fg_pair_inds = np.append(hash_fg_pair_inds, hash_gt_pair_inds)
elif fg_pairs_per_this_image <= hash_gt_pair_inds.size:
hash_gt_pair_inds = npr.choice(
hash_gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
hash_fg_pair_inds = hash_gt_pair_inds
else:
hash_fg_pair_inds = hash_gt_pair_inds
blob_dict = {}
if cfg.MODEL.USE_BG:
bg_pair_inds, bg_pair_class_inds = np.where((prd_gt_overlaps < cfg.TRAIN.BG_THRESH_HI))
hash_bg_pair_inds = prd_class_num * bg_pair_inds + bg_pair_class_inds
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, hash_bg_pair_inds.size)
if hash_bg_pair_inds.size > 0:
hash_bg_pair_inds = npr.choice(
hash_bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
hash_keep_pair_inds = np.append(hash_fg_pair_inds, hash_bg_pair_inds)
multi_prd_labels = np.zeros(hash_keep_pair_inds.size, dtype=np.int32)
multi_prd_labels[:hash_fg_pair_inds.size] = 1.0 #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num, hash_bg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num, hash_bg_pair_inds % prd_class_num)
else:
multi_prd_labels = np.ones(fg_multi_prd_labels.size, dtype=np.int32) #fg_multi_prd_labels
keep_pair_inds = np.append(hash_fg_pair_inds // prd_class_num)
keep_pair_class = np.append(hash_fg_pair_inds % prd_class_num)
blob_dict['multi_prd_labels_int32'] = multi_prd_labels.astype(np.int32, copy=False)
blob_dict['keep_pair_class_int32'] = keep_pair_class.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([hash_fg_pair_inds.size], dtype=np.int32)
else:
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
if fg_pair_inds.size > 0 and fg_pairs_per_this_image > gt_pair_inds.size:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
elif fg_pairs_per_this_image <= gt_pair_inds.size:
gt_pair_inds = npr.choice(
gt_pair_inds, size=fg_pairs_per_this_image, replace=False)
fg_pair_inds = gt_pair_inds
else:
fg_pair_inds = gt_pair_inds
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
if cfg.MODEL.USE_BG:
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
# logger.info('{} : {}'.format(fg_pair_inds.size, bg_pair_inds.size))
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1
else:
keep_pair_inds = fg_pair_inds
all_prd_labels = fg_prd_labels
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
sampled_all_boxes = roidb['all_boxes']
det_labels = roidb['det_labels']
sampled_sbj_inds = roidb['sbj_id'][keep_pair_inds]
sampled_obj_inds = roidb['obj_id'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
sampled_all_rois = sampled_all_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
all_boxes_repeated_batch_idx = batch_idx * blob_utils.ones((sampled_all_boxes.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
sampled_all_rois = np.hstack((all_boxes_repeated_batch_idx, sampled_all_rois))
int_repeated_batch_idx = batch_idx * np.ones((keep_pair_inds.shape[0], 1), dtype=np.int)
blob_dict['sbj_inds'] = np.hstack((repeated_batch_idx, sampled_sbj_inds.reshape(-1, 1)))
blob_dict['obj_inds'] = np.hstack((repeated_batch_idx, sampled_obj_inds.reshape(-1, 1)))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
blob_dict['det_rois'] = sampled_all_rois
blob_dict['det_labels'] = det_labels
sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat = box_utils_rel.get_spt_features(
sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height'])
blob_dict['spt_feat'] = sampled_spt_feat
if cfg.MODEL.USE_FREQ_BIAS:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE
max_sbj_overlaps = roidb['max_sbj_overlaps']
max_obj_overlaps = roidb['max_obj_overlaps']
# sbj
# Here a naturally existing assumption is, each positive sbj should have at least one positive obj
sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if sbj_pos_pair_pos_inds.size > 0:
sbj_pos_pair_pos_inds = npr.choice(
sbj_pos_pair_pos_inds,
size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)),
replace=False)
if sbj_pos_obj_pos_pair_neg_inds.size > 0:
sbj_pos_obj_pos_pair_neg_inds = npr.choice(
sbj_pos_obj_pos_pair_neg_inds,
size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds
if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0:
sbj_pos_obj_neg_pair_neg_inds = npr.choice(
sbj_pos_obj_neg_pair_neg_inds,
size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)
sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)
binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False)
prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds]
prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1
blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False)
sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1
# 1. set all obj labels > 0
obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1
# 2. find those negative obj
max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds]
obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0]
obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0
blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds]
sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale
sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1))
sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos))
sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos))
blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos
blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos
sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)
blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos
_, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique(
sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0]
blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos
blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos
# obj
# Here a naturally existing assumption is, each positive obj should have at least one positive sbj
obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if obj_pos_pair_pos_inds.size > 0:
obj_pos_pair_pos_inds = npr.choice(
obj_pos_pair_pos_inds,
size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)),
replace=False)
if obj_pos_sbj_pos_pair_neg_inds.size > 0:
obj_pos_sbj_pos_pair_neg_inds = npr.choice(
obj_pos_sbj_pos_pair_neg_inds,
size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds
if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size:
obj_pos_sbj_neg_pair_neg_inds = npr.choice(
obj_pos_sbj_neg_pair_neg_inds,
size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)
obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)
binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False)
prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds]
prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1
blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False)
obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1
# 1. set all sbj labels > 0
sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1
# 2. find those negative sbj
max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds]
sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0]
sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0
blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds]
sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_obj_pos = sampled_sbj_boxes_obj_pos * im_scale
sampled_obj_rois_obj_pos = sampled_obj_boxes_obj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((obj_pos_inds.shape[0], 1))
sampled_sbj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_obj_pos))
sampled_obj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_obj_pos))
blob_dict['sbj_rois_obj_pos'] = sampled_sbj_rois_obj_pos
blob_dict['obj_rois_obj_pos'] = sampled_obj_rois_obj_pos
sampled_rel_rois_obj_pos = box_utils_rel.rois_union(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos)
blob_dict['rel_rois_obj_pos'] = sampled_rel_rois_obj_pos
_, inds_unique_obj_pos, inds_reverse_obj_pos = np.unique(
sampled_obj_rois_obj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_obj_pos.shape[0] == sampled_obj_rois_obj_pos.shape[0]
blob_dict['inds_unique_obj_pos'] = inds_unique_obj_pos
blob_dict['inds_reverse_obj_pos'] = inds_reverse_obj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_obj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_obj_pos, sampled_obj_boxes_obj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_obj_pos'] = sampled_spt_feat_obj_pos
return blob_dict
def _add_rel_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_names):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
lowest_target_lvls = None
for rois_blob_name in rois_blob_names:
target_lvls = fpn_utils.map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
if lowest_target_lvls is None:
lowest_target_lvls = target_lvls
else:
lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_utils.add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,
lvl_max)
_distribute_rois_over_fpn_levels(['sbj_rois'])
_distribute_rois_over_fpn_levels(['obj_rois'])
_distribute_rois_over_fpn_levels(['rel_rois'])
_distribute_rois_over_fpn_levels(['det_rois'])
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
_distribute_rois_over_fpn_levels(['sbj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['sbj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_obj_pos'])
| 57.266667 | 138 | 0.69392 |
3f4fa826c96bbe11c4299dd6bc861d0f137a3106 | 5,957 | py | Python | src/mysql/tables.py | katerina7479/sooty-shearwater | 1319a6f55443a73e50d265286746edd722404649 | [
"MIT"
] | null | null | null | src/mysql/tables.py | katerina7479/sooty-shearwater | 1319a6f55443a73e50d265286746edd722404649 | [
"MIT"
] | null | null | null | src/mysql/tables.py | katerina7479/sooty-shearwater | 1319a6f55443a73e50d265286746edd722404649 | [
"MIT"
] | null | null | null | import time
import re
from src.core.tables import Table, MigrationTable
from src.core.constraints import Index
def get_column_definition(self, column_name):
'''Get the sql column definition
Selects the column type, and YES or NO from the column, IS NULLABLE.
That's enough information to re-create the column.
'''
sql = self.commands.column_definition(self.db.name, self.name, column_name)
ans = self.execute(sql)[0]
if ans[1] == 'NO':
return '{} NOT NULL'.format(ans[0])
else:
return ans[0]
def rename_column(self, old_name, new_name):
'''Rename a column'''
self.execute(self.commands.rename_column(
self.name,
old_name,
new_name,
self.get_column_definition(old_name))
)
| 35.041176 | 108 | 0.583347 |
3f508ed942c873013d3f6f30b02515c9088ebbfe | 2,418 | py | Python | CvZoneCompetition.py | MoranLeven/CvZomeCompetition | f28dc81ad4139902d831c34649ff3996106a2496 | [
"Apache-2.0"
] | null | null | null | CvZoneCompetition.py | MoranLeven/CvZomeCompetition | f28dc81ad4139902d831c34649ff3996106a2496 | [
"Apache-2.0"
] | null | null | null | CvZoneCompetition.py | MoranLeven/CvZomeCompetition | f28dc81ad4139902d831c34649ff3996106a2496 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from time import sleep
import random
length_min = 80 # Minimum length of retangle
height_min = 80 # Minimum height of the angle
offset = 6 #Error allowed between pixel
pos_linha = 550
delay = 60 #FPS of video
detect = []
cars = 0
cap = cv2.VideoCapture ("DRONE-SURVEILLANCE-CONTEST-VIDEO.mp4")
cap.set (3,500)
cap.set (4,500)
subtractor = cv2.bgsegm.createBackgroundSubtractorMOG ()
while True:
ret, frame1 = cap.read ()
time = float(1 / delay)
sleep(time)
gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 10)
img_sub = subtractor.apply(blur)
dilate = cv2.dilate(img_sub, np.ones ((5,5)))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
dilated = cv2.morphologyEx(dilate, cv2. MORPH_CLOSE, kernel)
dilated = cv2.morphologyEx(dilated, cv2. MORPH_CLOSE, kernel)
contour, h = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.line(frame1, (25, pos_linha), (1900, pos_linha), (255,0,0), 3)
for (i, c) in enumerate(contour):
(x, y, w, h) = cv2.boundingRect(c)
validate_contour = (w >= length_min) and (h >= height_min)
if not validate_contour:
continue
cv2.rectangle(frame1, (x, y), (x + w, y + h), (0,255,0), 2)
center = paste_center (x, y, w, h)
detect.append(center)
cv2.circle(frame1, center, 4, (0, 0.255), -1)
cv2.putText(frame1,str(random.randint(1,200)),(x,y),cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
for (x, y) in detect:
if y <(pos_linha + offset) and y> (pos_linha-offset):
cars += 1
cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (0,127,255), 3)
cv2.putText(frame1, str (random.randint (1,200)), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2)
detect.remove((x, y))
print("car is detected:" + str (cars))
cv2.putText(frame1, "Moran 11", (850, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5)
cv2.putText(frame1, str(cars), (1700, 100), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 5)
cv2.imshow("Surveillance Video", frame1)
if cv2.waitKey (10) == 27:
break
cv2.destroyAllWindows ()
cap.release ()
| 33.123288 | 118 | 0.611249 |
3f520d921ea722830523cfc9c9d0a7a9e7da7bf2 | 1,048 | py | Python | setup.py | jiinus/django-db-prefix | 2fae11b30dc339f3d4318a97900cf7cc4ff2cd51 | [
"BSD-3-Clause"
] | 11 | 2016-12-17T20:01:43.000Z | 2022-02-24T16:35:36.000Z | setup.py | jiinus/django-db-prefix | 2fae11b30dc339f3d4318a97900cf7cc4ff2cd51 | [
"BSD-3-Clause"
] | 4 | 2018-09-08T23:44:01.000Z | 2021-09-22T06:33:16.000Z | setup.py | jiinus/django-db-prefix | 2fae11b30dc339f3d4318a97900cf7cc4ff2cd51 | [
"BSD-3-Clause"
] | 11 | 2015-11-13T09:26:58.000Z | 2021-12-20T11:51:44.000Z | # -*- coding: utf-8 -*-
import os.path
from distutils.core import setup
setup(
name='django-db-prefix',
version='1.0',
keywords='django database',
author=u'Ben Slavin <benjamin.slavin@gmail.com>, Denilson S <denilsonsa@gmail.com>',
packages=['django_db_prefix'],
url='https://github.com/denilsonsa/django-db-prefix',
license='BSD licence, see LICENCE',
description='Allow specification of a global, per-app or per-model database table name prefix.',
long_description=read('README.md'),
requires=[
'Django',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Database',
]
)
| 29.942857 | 100 | 0.625954 |
3f52461202339d3dfdcf90633c36a5d40fb4c967 | 2,104 | py | Python | pyleecan/Methods/Slot/HoleUD/build_geometry.py | mjfwest/pyleecan | 6946c863bea62d13f100def2d3f905c9de8721d0 | [
"Apache-2.0"
] | 1 | 2020-10-19T09:01:00.000Z | 2020-10-19T09:01:00.000Z | pyleecan/Methods/Slot/HoleUD/build_geometry.py | mjfwest/pyleecan | 6946c863bea62d13f100def2d3f905c9de8721d0 | [
"Apache-2.0"
] | null | null | null | pyleecan/Methods/Slot/HoleUD/build_geometry.py | mjfwest/pyleecan | 6946c863bea62d13f100def2d3f905c9de8721d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from numpy import arcsin, arctan, cos, exp, array, angle, pi
from numpy import imag as np_imag
from scipy.optimize import fsolve
from ....Classes.Segment import Segment
from ....Classes.SurfLine import SurfLine
from ....Classes.Arc1 import Arc1
from ....Methods import ParentMissingError
from ....Functions.labels import HOLEV_LAB, HOLEM_LAB
def build_geometry(self, alpha=0, delta=0, is_simplified=False):
"""Compute the curve (Segment) needed to plot the Hole.
The ending point of a curve is the starting point of the next curve in
the list
Parameters
----------
self : HoleUD
A HoleUD object
alpha : float
Angle to rotate the slot (Default value = 0) [rad]
delta : complex
Complex to translate the slot (Default value = 0)
is_simplified : bool
True to avoid line superposition (not used)
Returns
-------
surf_list: list
List of SurfLine needed to draw the Hole
"""
surf_list = self.surf_list
# Get correct label for surfaces
lam_label = self.parent.get_label()
R_id, surf_type = self.get_R_id()
vent_label = lam_label + "_" + surf_type + "_R" + str(R_id) + "-T"
mag_label = lam_label + "_" + HOLEM_LAB + "_R" + str(R_id) + "-T"
# Update surface labels
hole_id = 0
mag_id = 0
for surf in surf_list:
if HOLEM_LAB in surf.label:
key = "magnet_" + str(mag_id)
if key in self.magnet_dict and self.magnet_dict[key] is not None:
surf.label = mag_label + str(mag_id) + "-S0"
mag_id += 1
else: # Magnet disabled or not defined
surf.label = vent_label + str(hole_id) + "-S0"
hole_id += 1
elif HOLEV_LAB in surf.label:
surf.label = vent_label + str(hole_id) + "-S0"
hole_id += 1
# Apply the transformations
return_list = list()
for surf in surf_list:
return_list.append(surf.copy())
return_list[-1].rotate(alpha)
return_list[-1].translate(delta)
return return_list
| 30.941176 | 77 | 0.620722 |
3f52ca8d87119aa7ada69b18dd59026206c97a21 | 2,861 | py | Python | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | '''
Local DB Authentication module.
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
'''
import logging
from django.contrib.auth.models import User, Group
from django.contrib.auth.backends import ModelBackend
from tardis.tardis_portal.auth.interfaces import AuthProvider, GroupProvider, UserProvider
logger = logging.getLogger(__name__)
auth_key = u'localdb'
auth_display_name = u'Local DB'
_modelBackend = ModelBackend()
django_user = DjangoUserProvider.name
django_group = DjangoGroupProvider.name
| 26.009091 | 90 | 0.595246 |
3f53cdf77e0b0d349cd123391fe47e0189614f36 | 29,318 | py | Python | src/app.py | hubmapconsortium/search-api | 21900c9ba5d353ab075d4b2cc217085b85d555b1 | [
"MIT"
] | null | null | null | src/app.py | hubmapconsortium/search-api | 21900c9ba5d353ab075d4b2cc217085b85d555b1 | [
"MIT"
] | 248 | 2020-02-27T20:45:25.000Z | 2022-03-30T19:12:58.000Z | src/app.py | sennetconsortium/search-api | 01a5c0ab8ec6abd147e5b04477ba10f80fedfdc3 | [
"MIT"
] | 1 | 2022-02-03T19:49:55.000Z | 2022-02-03T19:49:55.000Z | import os
import time
from pathlib import Path
from flask import Flask, jsonify, abort, request, Response, Request
import concurrent.futures
import threading
import requests
import logging
import ast
from urllib.parse import urlparse
from flask import current_app as app
from urllib3.exceptions import InsecureRequestWarning
from yaml import safe_load
# Local modules
from elasticsearch.indexer import Indexer
from libs.assay_type import AssayType
# HuBMAP commons
from hubmap_commons.hm_auth import AuthHelper
# Set logging fromat and level (default is warning)
# All the API logging is forwarded to the uWSGI server and gets written into the log file `uwsgo-entity-api.log`
# Log rotation is handled via logrotate on the host system with a configuration file
# Do NOT handle log file and rotation via the Python logging to avoid issues with multi-worker processes
logging.basicConfig(format='[%(asctime)s] %(levelname)s in %(module)s:%(lineno)d: %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
# Specify the absolute path of the instance folder and use the config file relative to the instance path
app = Flask(__name__, instance_path=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'instance'), instance_relative_config=True)
app.config.from_pyfile('app.cfg')
# load the index configurations and set the default
INDICES = safe_load((Path(__file__).absolute().parent / 'instance/search-config.yaml').read_text())
DEFAULT_INDEX_WITHOUT_PREFIX = INDICES['default_index']
logger.debug("############ INDICES config LOADED")
logger.debug(INDICES)
# Remove trailing slash / from URL base to avoid "//" caused by config with trailing slash
DEFAULT_ELASTICSEARCH_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['elasticsearch']['url'].strip('/')
DEFAULT_ENTITY_API_URL = INDICES['indices'][DEFAULT_INDEX_WITHOUT_PREFIX]['document_source_endpoint'].strip('/')
# Suppress InsecureRequestWarning warning when requesting status on https with ssl cert verify disabled
requests.packages.urllib3.disable_warnings(category = InsecureRequestWarning)
####################################################################################################
## Register error handlers
####################################################################################################
# Error handler for 400 Bad Request with custom error message
# Error handler for 401 Unauthorized with custom error message
# Error handler for 403 Forbidden with custom error message
# Error handler for 500 Internal Server Error with custom error message
####################################################################################################
## AuthHelper initialization
####################################################################################################
# Initialize AuthHelper class and ensure singleton
try:
if AuthHelper.isInitialized() == False:
auth_helper_instance = AuthHelper.create(app.config['APP_CLIENT_ID'],
app.config['APP_CLIENT_SECRET'])
logger.info("Initialized AuthHelper class successfully :)")
else:
auth_helper_instance = AuthHelper.instance()
except Exception:
msg = "Failed to initialize the AuthHelper class"
# Log the full stack trace, prepend a line with our message
logger.exception(msg)
####################################################################################################
## Default route
####################################################################################################
####################################################################################################
## Assay type API
####################################################################################################
####################################################################################################
## API
####################################################################################################
# Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API.
# general search uses the DEFAULT_INDEX
# Both HTTP GET and HTTP POST can be used to execute search with body against ElasticSearch REST API.
# Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix
# HTTP GET can be used to execute search with body against ElasticSearch REST API.
# HTTP GET can be used to execute search with body against ElasticSearch REST API.
# Note: the index in URL is not he real index in Elasticsearch, it's that index without prefix
# Get a list of indices
# Get the status of Elasticsearch cluster by calling the health API
# This shows the connection status and the cluster health status (if connected)
# This reindex function will also reindex Collection and Upload
# in addition to the Dataset, Donor, Sample entities
# Live reindex without first deleting and recreating the indices
# This just deletes the old document and add the latest document of each entity (if still available)
####################################################################################################
## Internal Functions Used By API
####################################################################################################
# Throws error for 400 Bad Reqeust with message
def bad_request_error(err_msg):
abort(400, description = err_msg)
# Throws error for 401 Unauthorized with message
def unauthorized_error(err_msg):
abort(401, description = err_msg)
# Throws error for 403 Forbidden with message
# Throws error for 500 Internal Server Error with message
# Get user infomation dict based on the http request(headers)
# `group_required` is a boolean, when True, 'hmgroupids' is in the output
"""
Parase the token from Authorization header
Parameters
----------
request_headers: request.headers
The http request headers
admin_access_required : bool
If the token is required to belong to the HuBMAP-Data-Admin group, default to False
Returns
-------
str
The token string if valid
"""
"""
Check if the user with token belongs to the HuBMAP-Data-Admin group
Parameters
----------
request : falsk.request
The flask http request object that containing the Authorization header
with a valid Globus nexus token for checking group information
Returns
-------
bool
True if the user belongs to HuBMAP-Data-Admin group, otherwise False
"""
"""
Get user infomation dict based on the http request(headers)
The result will be used by the trigger methods
Parameters
----------
request : Flask request object
The Flask request passed from the API endpoint
Returns
-------
dict
A dict containing all the user info
{
"scope": "urn:globus:auth:scope:nexus.api.globus.org:groups",
"name": "First Last",
"iss": "https://auth.globus.org",
"client_id": "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114",
"active": True,
"nbf": 1603761442,
"token_type": "Bearer",
"aud": ["nexus.api.globus.org", "21f293b0-5fa5-4ee1-9e0e-3cf88bd70114"],
"iat": 1603761442,
"dependent_tokens_cache_id": "af2d5979090a97536619e8fbad1ebd0afa875c880a0d8058cddf510fc288555c",
"exp": 1603934242,
"sub": "c0f8907a-ec78-48a7-9c85-7da995b05446",
"email": "email@pitt.edu",
"username": "username@pitt.edu",
"hmscopes": ["urn:globus:auth:scope:nexus.api.globus.org:groups"],
}
"""
# Always expect a json body
# We'll need to verify the requested index in URL is valid
# Determine the target real index in Elasticsearch bases on the request header and given index (without prefix)
# The Authorization header with globus token is optional
# Case #1: Authorization header is missing, default to use the `hm_public_<index_without_prefix>`.
# Case #2: Authorization header with valid token, but the member doesn't belong to the HuBMAP-Read group, direct the call to `hm_public_<index_without_prefix>`.
# Case #3: Authorization header presents but with invalid or expired token, return 401 (if someone is sending a token, they might be expecting more than public stuff).
# Case #4: Authorization header presents with a valid token that has the group access, direct the call to `hm_consortium_<index_without_prefix>`.
# Make a call to Elasticsearch
# Get the query string from orignal request
# Get a list of entity uuids via entity-api for a given entity type:
# Collection, Donor, Sample, Dataset, Submission. Case-insensitive.
# Create a dict with HTTP Authorization header with Bearer token
# Gets a list of actually public and private indice names
# Get a list of filtered Elasticsearch indices to expose to end users without the prefix
# For local development/testing
if __name__ == "__main__":
try:
app.run(host='0.0.0.0', port="5005")
except Exception as e:
print("Error during starting debug server.")
print(str(e))
logger.error(e, exc_info=True)
print("Error during startup check the log file for further information")
| 38.934927 | 167 | 0.653728 |
3f55d1d3db5efaf77627369621529da7de9da985 | 149 | py | Python | wordpress/apps.py | 2e2a/django-wordpress | 5417d98128ea6ad4308b250fdee65226e7deb628 | [
"BSD-3-Clause"
] | 1 | 2021-12-03T19:55:27.000Z | 2021-12-03T19:55:27.000Z | wordpress/apps.py | 2e2a/django-wordpress | 5417d98128ea6ad4308b250fdee65226e7deb628 | [
"BSD-3-Clause"
] | null | null | null | wordpress/apps.py | 2e2a/django-wordpress | 5417d98128ea6ad4308b250fdee65226e7deb628 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
| 24.833333 | 53 | 0.771812 |
3f5944acb466684ca6235f591cf5e26e8e10c295 | 2,613 | py | Python | libs/libgmp/libgmp.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | libs/libgmp/libgmp.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | libs/libgmp/libgmp.py | wrobelda/craft-blueprints-kde | 366f460cecd5baebdf3a695696767c8c0e5e7c7e | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 ukasz Wojniowicz <lukasz.wojnilowicz@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import info
from Package.AutoToolsPackageBase import *
from Package.VirtualPackageBase import *
if not CraftCore.compiler.isMSVC():
else:
| 42.836066 | 140 | 0.738615 |