blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f280c39f7214cc27bd841b0e53dbfb11c472c4c1 | a9672f0eb530d0d550070b48fe9d324063ace40b | /dataset/extend_existing_dataset.py | 483a1e4831792d7f6b9b1a2af81868d98beb345d | [
"BSD-3-Clause"
] | permissive | SandUhrGucker/Voice-Cloning-App | 7e025e5493ec0db723e057478e4a11080ed327a3 | 58488aa5690fcb94c778fb6f4d4d909b9f223c72 | refs/heads/main | 2023-07-31T13:10:53.383959 | 2021-09-20T18:53:59 | 2021-09-20T18:53:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | import argparse
import logging
import os
from os.path import dirname, abspath
import sys
sys.path.append(dirname(dirname(abspath(__file__))))
from dataset.audio_processing import convert_audio
from dataset.clip_generator import extend_dataset, MIN_LENGTH, MAX_LENGTH
from dataset.analysis import save_dataset_info
def extend_existing_dataset(
text_path,
audio_path,
transcription_model,
forced_alignment_path,
output_path,
label_path,
suffix,
info_path,
logging=logging,
min_length=MIN_LENGTH,
max_length=MAX_LENGTH,
min_confidence=0.85,
combine_clips=True,
):
"""
Extends an existing dataset.
Converts audio to required format, generates clips & produces required files.
Parameters
----------
text_path : str
Path to source text
audio_path : str
Path to source audio
transcription_model : TranscriptionModel
Transcription model
forced_alignment_path : str
Path to save alignment JSON to
output_path : str
Path to save audio clips to
label_path : str
Path to save label file to
suffix : str
String suffix to append to filenames
info_path : str
Path to save info JSON to
logging : logging (optional)
Logging object to write logs to
min_confidence : float (optional)
Minimum confidence score to generate a clip for
Raises
-------
AssertionError
If given paths are invalid or clips could not be produced
"""
assert os.path.isdir(output_path), "Missing existing dataset clips folder"
assert os.path.isfile(label_path), "Missing existing dataset metadata file"
logging.info(f"Coverting {audio_path}...")
converted_audio = convert_audio(audio_path)
extend_dataset(
converted_audio,
text_path,
transcription_model,
forced_alignment_path,
output_path,
label_path,
suffix,
logging=logging,
min_length=min_length,
max_length=max_length,
min_confidence=min_confidence,
combine_clips=combine_clips,
)
logging.info("Getting dataset info...")
# Do not pass clip lengths from extend_dataset as we need to get size of entire dataset (not just new clips)
save_dataset_info(label_path, output_path, info_path)
if __name__ == "__main__":
"""Extend existing dataset"""
parser = argparse.ArgumentParser(description="Extend existing dataset")
parser.add_argument("-t", "--text_path", help="Path to text file", type=str, required=True)
parser.add_argument("-a", "--audio_path", help="Path to audio file", type=str, required=True)
parser.add_argument(
"-f", "--forced_alignment_path", help="Path to forced alignment JSON", type=str, default="align.json"
)
parser.add_argument("-o", "--output_path", help="Path to save snippets", type=str, default="wavs")
parser.add_argument(
"-l", "--label_path", help="Path to save snippet labelling text file", type=str, default="metadata.csv"
)
parser.add_argument("-s", "--suffix", help="String suffix for added files", type=str, required=True)
parser.add_argument("-i", "--info_path", help="Path to save info file", type=str, default="info.json")
args = parser.parse_args()
extend_existing_dataset(**vars(args))
| [
"bandrew01@qub.ac.uk"
] | bandrew01@qub.ac.uk |
c8266c779bd15012980580dab2a2b0f598c212e9 | 38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a | /nibbler/trading/collectors/AlgoTrader/utils/__init__.py | 35df5938672fc9ea34ff2f1b55ef71e5816f2d1b | [] | no_license | JizzFactoryEmployee/nibblerppman | 0fbc1ce662cf8b4868b41a97291250fae29dc41d | 160e557578a3e8a614450354f6ade233d32b052f | refs/heads/master | 2022-11-14T01:10:31.743000 | 2020-07-04T01:21:52 | 2020-07-04T01:21:52 | 273,835,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | time_frames = {
'1m': 60*1000,
'5m': 60*1000,
'15m': 60*1000,
'1h': 60*60*1000,
'2h': 2*60*60*1000,
'4h': 4*60*60*1000,
'12h': 12*60*60*1000,
'd': 24*60*60*1000,
'w': 7*24*60*60*1000,
'M': 30*24*60*60*1000,
}
from .function_time_frame_multiplier import (
time_frame_mex, time_frame_multiplier
) | [
"52958901+JizzFactoryEmployee@users.noreply.github.com"
] | 52958901+JizzFactoryEmployee@users.noreply.github.com |
c7486e10f1985261033d2b69fb7b594037405208 | 8d3dddecd11126f51440a4aebe8913d90b6d4e0e | /attractions_qunar/attractions_qunar/pipelines.py | d8ef0ed2a585de97f717b11bada2a590c8da4982 | [] | no_license | ivanliu1989/routes-scraper | 108168c4225df70172df4a41869e650efd0ff0dc | 251e03a68d09fd311f0e49545001b777eb8460df | refs/heads/master | 2020-04-22T16:05:11.328359 | 2019-03-03T08:24:38 | 2019-03-03T08:24:38 | 170,497,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class AttractionsQunarPipeline(object):
def process_item(self, item, spider):
return item
| [
"ivan.liuyanfeng@gmail.com"
] | ivan.liuyanfeng@gmail.com |
647ee5c0365253201ebc228f53866ed68e2dac87 | f55d730de1f9740aa8cc56b5d404b454dc560963 | /todo_app/todo/apps.py | 26ae8991b56b54f8890c2dd09e5b7dc38b2cd723 | [] | no_license | momchilantonov/ToDoApp | 4857e5d1c7f9d5ae8b2051f0114d1e59666d9a54 | 546032b977658ef1b5767abc049e4cced1840def | refs/heads/main | 2023-06-01T03:02:37.347426 | 2021-06-23T10:07:11 | 2021-06-23T10:07:11 | 374,223,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.apps import AppConfig
class TodoConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'todo_app.todo'
| [
"eng.antonov@gmail.com"
] | eng.antonov@gmail.com |
866d59ff1f0d2711412caf59b1629bc830a0a8ba | 7486b3af4d4413a96b3e0bf76f776cd8605d7c05 | /WonyJeong/programmers/level2/124.py | 3c76022eaa94e98f79a36ece320078fa5e57430b | [] | no_license | WonyJeong/algorithm-study | 7146e18ec9a3d7f46910e31890768b2e37f8b9b4 | dd659bf75c902800bed226d392d144b691d8e059 | refs/heads/main | 2023-03-31T14:38:47.365622 | 2021-04-02T01:35:36 | 2021-04-02T01:35:36 | 334,309,434 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # def solution(n):
# answer = ""
# arr = ["4", "1", "2"]
# while n:
# answer = arr[n % 3] + answer
# n = n // 3 - (n % 3 == 0)
# return answer
# for i in range(1, 15):
# print(i, " : ", solution(i))
# 9494
import sys
input = sys.stdin.readline
if __name__ == "__main__":
N = int(input().strip())
while N != 0:
text = []
for _ in range(N):
text.append(len(input().strip().split()[0]))
print(text)
print(max(text) + 1)
N = int(input().strip()) | [
"59948675+WonyJeong@users.noreply.github.com"
] | 59948675+WonyJeong@users.noreply.github.com |
5454e587a38bd0c25fc6e81b25d9de677ba6d29e | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/wasabi/tables.py | a6c71603a2adcb97ef07d9afa29efc2944a607c1 | [
"Apache-2.0",
"MIT"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 3,941 | py | # coding: utf8
from __future__ import unicode_literals, print_function
from .util import to_string, zip_longest, basestring_
ALIGN_MAP = {"l": "<", "r": ">", "c": "^"}
def table(
data,
header=None,
footer=None,
divider=False,
widths="auto",
max_col=30,
spacing=3,
aligns=None,
multiline=False,
indent=0,
):
"""Format tabular data.
data (iterable / dict): The data to render. Either a list of lists (one per
row) or a dict for two-column tables.
header (iterable): The header columns.
footer (iterable): The footer columns.
divider (bool): Show a divider line between header/footer and body.
widths (iterable or 'auto'): Column widths in order. If "auto", widths
will be calculated automatically based on the largest value.
max_col (int): Maximum column width.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
multiline (bool): If a cell value is a list of a tuple, render it on
multiple lines, with one value per line.
indent (int): Number of spaces to use for indentation.
RETURNS (unicode): The formatted table.
"""
if isinstance(data, dict):
data = list(data.items())
if multiline:
zipped_data = []
for i, item in enumerate(data):
vals = [v if isinstance(v, (list, tuple)) else [v] for v in item]
zipped_data.extend(list(zip_longest(*vals, fillvalue="")))
if i < len(data) - 1:
zipped_data.append(["" for i in item])
data = zipped_data
if widths == "auto":
widths = _get_max_widths(data, header, footer, max_col)
settings = {
"widths": widths,
"spacing": spacing,
"aligns": aligns,
"indent": indent,
}
divider_row = row(["-" * width for width in widths], **settings)
rows = []
if header:
rows.append(row(header, **settings))
if divider:
rows.append(divider_row)
for i, item in enumerate(data):
rows.append(row(item, **settings))
if footer:
if divider:
rows.append(divider_row)
rows.append(row(footer, **settings))
return "\n{}\n".format("\n".join(rows))
def row(data, widths="auto", spacing=3, aligns=None, indent=0):
"""Format data as a table row.
data (iterable): The individual columns to format.
widths (iterable, int or 'auto'): Column widths, either one integer for all
columns or an iterable of values. If "auto", widths will be calculated
automatically based on the largest value.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
indent (int): Number of spaces to use for indentation.
RETURNS (unicode): The formatted row.
"""
cols = []
if isinstance(aligns, basestring_): # single align value
aligns = [aligns for _ in data]
if not hasattr(widths, "__iter__"): # single number
widths = [widths for _ in range(len(data))]
for i, col in enumerate(data):
align = ALIGN_MAP.get(aligns[i] if aligns and i < len(aligns) else "l")
col_width = len(col) if widths == "auto" else widths[i]
tpl = "{:%s%d}" % (align, col_width)
cols.append(tpl.format(to_string(col)))
return indent * " " + (" " * spacing).join(cols)
def _get_max_widths(data, header, footer, max_col):
all_data = list(data)
if header:
all_data.append(header)
if footer:
all_data.append(footer)
widths = [[len(to_string(col)) for col in item] for item in all_data]
return [min(max(w), max_col) for w in list(zip(*widths))]
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
c890957d28cadac134e3484f1a486d85c08e3454 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_buffets.py | 235a40b7167bfe4459bba376990f1586441cf568 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._buffet import _BUFFET
#calss header
class _BUFFETS(_BUFFET, ):
def __init__(self,):
_BUFFET.__init__(self)
self.name = "BUFFETS"
self.specie = 'verbs'
self.basic = "buffet"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
1be6d2a87f5e4bc6b2fb71d1525a1f78a6dadd41 | e04dbc32247accf073e3089ed4013427ad182c7c | /sumitb2019/C.py | 3653705f97554a609bfbf09a858d5d307f6c71a6 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # from math import factorial,sqrt,ceil,gcd
# from itertools import permutations as permus
from collections import deque,Counter
# import re
# from functools import lru_cache # 簡単メモ化 @lru_cache(maxsize=1000)
# from decimal import Decimal, getcontext
# # getcontext().prec = 1000
# # eps = Decimal(10) ** (-100)
# import numpy as np
# import networkx as nx
# from scipy.sparse.csgraph import shortest_path, dijkstra, floyd_warshall, bellman_ford, johnson
# from scipy.sparse import csr_matrix
# from scipy.special import comb
# slist = "abcdefghijklmnopqrstuvwxyz"
X = int(input())
dp = {100:1,101:1,102:1,103:1,104:1,105:1}
lis = [100,101,102,103,104,105]
que = deque([100,101,102,103,104,105])
while len(que)>0:
num = que.popleft()
for i in lis:
dp[num+i] = 1
if num+i <= 100000 and not(num+i in que):
que.append(num + i)
if X in dp:
ans = 1
else:
ans = 0
print(ans)
# print(*ans) # unpackして出力。間にスペースが入る
# for row in board:
# print(*row,sep="") #unpackして間にスペース入れずに出力する
# print("{:.10f}".format(ans))
# print("{:0=10d}".format(ans))
| [
"twobookscom@gmail.com"
] | twobookscom@gmail.com |
9be1a32eae5acfc9bd5b8570c0052eb586a1891e | 956fd28ea7a7ec83b62cd85691c512e735e60b3a | /bin/azure/mgmt/datamigration/models/project_task_properties_py3.py | cbd010514ded3c5031944e6c904da5df706b5e3a | [
"MIT"
] | permissive | zdmc23/bash-lambda-layer | 5517a27809d33801c65504c11f867d0d511b2e1c | e762df0189cfb894dab2d96bae1655b8857d5efb | refs/heads/master | 2021-01-05T02:32:20.765963 | 2020-02-16T09:41:47 | 2020-02-16T09:41:47 | 240,846,840 | 0 | 0 | MIT | 2020-02-16T06:59:55 | 2020-02-16T06:59:54 | null | UTF-8 | Python | false | false | 2,820 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ProjectTaskProperties(Model):
"""Base class for all types of DMS task properties. If task is not supported
by current client, this object is returned.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ValidateMigrationInputSqlServerSqlMITaskProperties,
MigrateSqlServerSqlDbTaskProperties, MigrateSqlServerSqlMITaskProperties,
GetUserTablesSqlTaskProperties, ConnectToTargetSqlDbTaskProperties,
ConnectToTargetSqlMITaskProperties, ConnectToSourceSqlServerTaskProperties
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar errors: Array of errors. This is ignored if submitted.
:vartype errors: list[~azure.mgmt.datamigration.models.ODataError]
:ivar state: The state of the task. This is ignored if submitted. Possible
values include: 'Unknown', 'Queued', 'Running', 'Canceled', 'Succeeded',
'Failed', 'FailedInputValidation', 'Faulted'
:vartype state: str or ~azure.mgmt.datamigration.models.TaskState
:param task_type: Required. Constant filled by server.
:type task_type: str
"""
_validation = {
'errors': {'readonly': True},
'state': {'readonly': True},
'task_type': {'required': True},
}
_attribute_map = {
'errors': {'key': 'errors', 'type': '[ODataError]'},
'state': {'key': 'state', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
}
_subtype_map = {
'task_type': {'ValidateMigrationInput.SqlServer.AzureSqlDbMI': 'ValidateMigrationInputSqlServerSqlMITaskProperties', 'Migrate.SqlServer.SqlDb': 'MigrateSqlServerSqlDbTaskProperties', 'Migrate.SqlServer.AzureSqlDbMI': 'MigrateSqlServerSqlMITaskProperties', 'GetUserTables.Sql': 'GetUserTablesSqlTaskProperties', 'ConnectToTarget.SqlDb': 'ConnectToTargetSqlDbTaskProperties', 'ConnectToTarget.AzureSqlDbMI': 'ConnectToTargetSqlMITaskProperties', 'ConnectToSource.SqlServer': 'ConnectToSourceSqlServerTaskProperties'}
}
def __init__(self, **kwargs) -> None:
super(ProjectTaskProperties, self).__init__(**kwargs)
self.errors = None
self.state = None
self.task_type = None
| [
"191707+zdmc23@users.noreply.github.com"
] | 191707+zdmc23@users.noreply.github.com |
381379527748a48ff699ba2f1009df2440fa6a78 | 906b969c383a440940af12f0e1cc01daedc475aa | /data_store/mongo_paginator.py | 07c4ecb439f02eb6a36d85a57a4e659afb6101f2 | [] | no_license | ok-water-survey/api | 6f11a1ac2bb0f4b6822c26fae684447a726bc24b | 3e39910ae9c09d208ce2a855a8920d659ed7049b | refs/heads/master | 2021-01-22T03:54:27.868634 | 2015-01-14T16:53:16 | 2015-01-14T16:53:16 | 25,709,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | __author__ = 'mstacy'
import ast
import math
import collections
from ordereddict import OrderedDict
from rest_framework.templatetags.rest_framework import replace_query_param
def MongoDataPagination(DB_MongoClient, database, collection, query=None, page=1, nPerPage=None, uri=''):
db = DB_MongoClient
if query:
query = ast.literal_eval(query)
q = [(k, v) for k, v in query['spec'].items()]
query['spec'] = dict(q)
print query
count = db[database][collection].find(**query).count()
print count
if nPerPage == 0:
page=1
offset=0
max_page=1
else:
max_page = math.ceil(float(count) / nPerPage)
# Page min is 1
if page < 1:
page = 1
#Change page to last page with data
if page * nPerPage > count:
page = int(max_page)
#Cover count =0
if page < 1:
page = 1
offset = (page - 1) * nPerPage
data = [row for row in db[database][collection].find(**query).skip(offset).limit(nPerPage)]
#replace_query_param(uri, 'page', page)
else:
count = db[database][collection].find().count()
if nPerPage == 0:
page=1
offset=0
max_page=1
else:
max_page = math.ceil(float(count) / nPerPage)
print max_page
# Page min is 1
if page < 1:
page = 1
#Change page to last page with data
if page * nPerPage > count:
page = int(max_page)
#Cover count =0
if page < 1:
page = 1
offset = (page - 1) * nPerPage
data = [row for row in db[database][collection].find().skip(offset).limit(nPerPage)]
if page < max_page:
next = replace_query_param(uri, 'page', page + 1)
else:
next = None
if page > 1:
previous = replace_query_param(uri, 'page', page - 1)
else:
previous = None
result = {'count': count, 'meta': {'page': page, 'page_size': nPerPage, 'pages': int(max_page)}, 'next': next,
'previous': previous, 'results': data}
try:
od = collections.OrderedDict(sorted(result.items()))
except:
# older python versions < 2.7
od = OrderedDict(sorted(result.items()))
return od | [
"mbstacy@gmail.com"
] | mbstacy@gmail.com |
805c58d57c9fad852f9e5bb34ff321d60b1010a5 | 11f5853044bdfe25c85951b5c540bf759478c7d0 | /test/test_sequence_context.py | 76aa96db3e0a72c7c035b926cc88ed29fd55fb41 | [
"Apache-2.0"
] | permissive | alexanderwhatley/pepnet | 2dbe894d31cfeef4b7404092ad6034640a33e791 | 82a3087262917d4780ed8facbd49b766f2ff9200 | refs/heads/master | 2020-04-21T06:03:12.297328 | 2019-02-06T20:55:50 | 2019-02-06T20:55:54 | 169,356,393 | 0 | 0 | Apache-2.0 | 2019-02-06T04:49:07 | 2019-02-06T04:49:06 | null | UTF-8 | Python | false | false | 745 | py | from pepnet import Predictor, SequenceInput, Output
import numpy as np
def test_model_with_fixed_length_context():
model = Predictor(
inputs={
"upstream": SequenceInput(length=1, variable_length=False),
"downstream": SequenceInput(length=1, variable_length=False),
"peptide": SequenceInput(length=3, variable_length=True)},
outputs=Output(1, activation="sigmoid"))
Y = np.array([True, False, True, False])
input_dict = {
"upstream": ["Q", "A", "L", "I"],
"downstream": ["S"] * 4,
"peptide": ["SYF", "QQ", "C", "GLL"]
}
model.fit(input_dict, Y, epochs=20)
Y_pred = model.predict(input_dict)
assert (Y == (Y_pred > 0.5)).all(), (Y, Y_pred)
| [
"alex.rubinsteyn@gmail.com"
] | alex.rubinsteyn@gmail.com |
6830ac1d3b3ee6a74480064ecb8f6c97a4c32311 | 653eaef652627b155569b5fe9ab9bb3607fc1e78 | /alg/counterfactual_recurrent_network/CRN_model.py | 50092cc6d21a77d201ad5cdb8d3df7da96bc1d1c | [
"BSD-3-Clause",
"MIT"
] | permissive | IlyaTrofimov/mlforhealthlabpub | 11ab86a83bd2ffd2574364a956b322b0c62406ae | 190cbad2faae9e559ffe7a68143df7f747d70adc | refs/heads/main | 2023-04-16T03:58:38.423288 | 2021-04-21T10:22:43 | 2021-04-21T10:22:43 | 358,528,623 | 0 | 0 | NOASSERTION | 2021-04-16T08:25:26 | 2021-04-16T08:25:25 | null | UTF-8 | Python | false | false | 23,932 | py | '''
Title: Estimating counterfactual treatment outcomes over time through adversarially balanced representations
Authors: Ioana Bica, Ahmed M. Alaa, James Jordon, Mihaela van der Schaar
International Conference on Learning Representations (ICLR) 2020
Last Updated Date: January 15th 2020
Code Author: Ioana Bica (ioana.bica95@gmail.com)
'''
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, DropoutWrapper
from tensorflow.python.ops import rnn
from utils.flip_gradient import flip_gradient
import numpy as np
import os
import logging
class CRN_Model:
def __init__(self, params, hyperparams, b_train_decoder=False):
self.num_treatments = params['num_treatments']
self.num_covariates = params['num_covariates']
self.num_outputs = params['num_outputs']
self.max_sequence_length = params['max_sequence_length']
self.num_epochs = params['num_epochs']
self.br_size = hyperparams['br_size']
self.rnn_hidden_units = hyperparams['rnn_hidden_units']
self.fc_hidden_units = hyperparams['fc_hidden_units']
self.batch_size = hyperparams['batch_size']
self.rnn_keep_prob = hyperparams['rnn_keep_prob']
self.learning_rate = hyperparams['learning_rate']
self.b_train_decoder = b_train_decoder
tf.reset_default_graph()
self.current_covariates = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_covariates])
# Initial previous treatment needs to consist of zeros (this is done when building the feed dictionary)
self.previous_treatments = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_treatments])
self.current_treatments = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_treatments])
self.outputs = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_outputs])
self.active_entries = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_outputs])
self.init_state = None
if (self.b_train_decoder):
self.init_state = tf.placeholder(tf.float32, [None, self.rnn_hidden_units])
self.alpha = tf.placeholder(tf.float32, []) # Gradient reversal scalar
def build_balancing_representation(self):
self.rnn_input = tf.concat([self.current_covariates, self.previous_treatments], axis=-1)
self.sequence_length = self.compute_sequence_length(self.rnn_input)
rnn_cell = DropoutWrapper(LSTMCell(self.rnn_hidden_units, state_is_tuple=False),
output_keep_prob=self.rnn_keep_prob,
state_keep_prob=self.rnn_keep_prob,
variational_recurrent=True,
dtype=tf.float32)
decoder_init_state = None
if (self.b_train_decoder):
decoder_init_state = tf.concat([self.init_state, self.init_state], axis=-1)
rnn_output, _ = rnn.dynamic_rnn(
rnn_cell,
self.rnn_input,
initial_state=decoder_init_state,
dtype=tf.float32,
sequence_length=self.sequence_length)
# Flatten to apply same weights to all time steps.
rnn_output = tf.reshape(rnn_output, [-1, self.rnn_hidden_units])
balancing_representation = tf.layers.dense(rnn_output, self.br_size, activation=tf.nn.elu)
return balancing_representation
def build_treatment_assignments_one_hot(self, balancing_representation):
balancing_representation_gr = flip_gradient(balancing_representation, self.alpha)
treatments_network_layer = tf.layers.dense(balancing_representation_gr, self.fc_hidden_units,
activation=tf.nn.elu)
treatment_logit_predictions = tf.layers.dense(treatments_network_layer, self.num_treatments, activation=None)
treatment_prob_predictions = tf.nn.softmax(treatment_logit_predictions)
return treatment_prob_predictions
def build_outcomes(self, balancing_representation):
current_treatments_reshape = tf.reshape(self.current_treatments, [-1, self.num_treatments])
outcome_network_input = tf.concat([balancing_representation, current_treatments_reshape], axis=-1)
outcome_network_layer = tf.layers.dense(outcome_network_input, self.fc_hidden_units,
activation=tf.nn.elu)
outcome_predictions = tf.layers.dense(outcome_network_layer, self.num_outputs, activation=None)
return outcome_predictions
def train(self, dataset_train, dataset_val, model_name, model_folder):
self.balancing_representation = self.build_balancing_representation()
self.treatment_prob_predictions = self.build_treatment_assignments_one_hot(self.balancing_representation)
self.predictions = self.build_outcomes(self.balancing_representation)
self.loss_treatments = self.compute_loss_treatments_one_hot(target_treatments=self.current_treatments,
treatment_predictions=self.treatment_prob_predictions,
active_entries=self.active_entries)
self.loss_outcomes = self.compute_loss_predictions(self.outputs, self.predictions, self.active_entries)
self.loss = self.loss_outcomes + self.loss_treatments
optimizer = self.get_optimizer()
# Setup tensorflow
tf_device = 'gpu'
if tf_device == "cpu":
tf_config = tf.ConfigProto(log_device_placement=False, device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto(log_device_placement=False, device_count={'GPU': 1})
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
for epoch in range(self.num_epochs):
p = float(epoch) / float(self.num_epochs)
alpha_current = 2. / (1. + np.exp(-10. * p)) - 1
iteration = 0
for (batch_current_covariates, batch_previous_treatments, batch_current_treatments, batch_init_state,
batch_outputs, batch_active_entries) in self.gen_epoch(dataset_train, batch_size=self.batch_size):
feed_dict = self.build_feed_dictionary(batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state, batch_outputs,
batch_active_entries,
alpha_current)
_, training_loss, training_loss_outcomes, training_loss_treatments = self.sess.run(
[optimizer, self.loss, self.loss_outcomes, self.loss_treatments],
feed_dict=feed_dict)
iteration += 1
logging.info(
"Epoch {} out of {} | total loss = {} | outcome loss = {} | "
"treatment loss = {} | current alpha = {} ".format(epoch + 1, self.num_epochs, training_loss,
training_loss_outcomes,
training_loss_treatments, alpha_current))
# Validation loss
validation_loss, validation_loss_outcomes, \
validation_loss_treatments = self.compute_validation_loss(dataset_val)
validation_mse, _ = self.evaluate_predictions(dataset_val)
logging.info(
"Epoch {} Summary| Validation total loss = {} | Validation outcome loss = {} | Validation treatment loss {} | Validation mse = {}".format(
epoch, validation_loss, validation_loss_outcomes, validation_loss_treatments, validation_mse))
checkpoint_name = model_name + "_final"
self.save_network(self.sess, model_folder, checkpoint_name)
def load_model(self, model_name, model_folder):
self.balancing_representation = self.build_balancing_representation()
self.treatment_prob_predictions = self.build_treatment_assignments_one_hot(self.balancing_representation)
self.predictions = self.build_outcomes(self.balancing_representation)
tf_device = 'gpu'
if tf_device == "cpu":
tf_config = tf.ConfigProto(log_device_placement=False, device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto(log_device_placement=False, device_count={'GPU': 1})
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
self.sess.run(tf.global_variables_initializer())
checkpoint_name = model_name + "_final"
self.load_network(self.sess, model_folder, checkpoint_name)
def build_feed_dictionary(self, batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state,
batch_outputs=None, batch_active_entries=None,
alpha_current=1.0, lr_current=0.01, training_mode=True):
batch_size = batch_previous_treatments.shape[0]
zero_init_treatment = np.zeros(shape=[batch_size, 1, self.num_treatments])
new_batch_previous_treatments = np.concatenate([zero_init_treatment, batch_previous_treatments], axis=1)
if training_mode:
if self.b_train_decoder:
feed_dict = {self.current_covariates: batch_current_covariates,
self.previous_treatments: batch_previous_treatments,
self.current_treatments: batch_current_treatments,
self.init_state: batch_init_state,
self.outputs: batch_outputs,
self.active_entries: batch_active_entries,
self.alpha: alpha_current}
else:
feed_dict = {self.current_covariates: batch_current_covariates,
self.previous_treatments: new_batch_previous_treatments,
self.current_treatments: batch_current_treatments,
self.outputs: batch_outputs,
self.active_entries: batch_active_entries,
self.alpha: alpha_current}
else:
if self.b_train_decoder:
feed_dict = {self.current_covariates: batch_current_covariates,
self.previous_treatments: batch_previous_treatments,
self.current_treatments: batch_current_treatments,
self.init_state: batch_init_state,
self.alpha: alpha_current}
else:
feed_dict = {self.current_covariates: batch_current_covariates,
self.previous_treatments: new_batch_previous_treatments,
self.current_treatments: batch_current_treatments,
self.alpha: alpha_current}
return feed_dict
def gen_epoch(self, dataset, batch_size, training_mode=True):
dataset_size = dataset['current_covariates'].shape[0]
num_batches = int(dataset_size / batch_size) + 1
for i in range(num_batches):
if (i == num_batches - 1):
batch_samples = range(dataset_size - batch_size, dataset_size)
else:
batch_samples = range(i * batch_size, (i + 1) * batch_size)
if training_mode:
batch_current_covariates = dataset['current_covariates'][batch_samples, :, :]
batch_previous_treatments = dataset['previous_treatments'][batch_samples, :, :]
batch_current_treatments = dataset['current_treatments'][batch_samples, :, :]
batch_outputs = dataset['outputs'][batch_samples, :, :]
batch_active_entries = dataset['active_entries'][batch_samples, :, :]
batch_init_state = None
if self.b_train_decoder:
batch_init_state = dataset['init_state'][batch_samples, :]
yield (batch_current_covariates, batch_previous_treatments, batch_current_treatments, batch_init_state,
batch_outputs, batch_active_entries)
else:
batch_current_covariates = dataset['current_covariates'][batch_samples, :, :]
batch_previous_treatments = dataset['previous_treatments'][batch_samples, :, :]
batch_current_treatments = dataset['current_treatments'][batch_samples, :, :]
batch_init_state = None
if self.b_train_decoder:
batch_init_state = dataset['init_state'][batch_samples, :]
yield (batch_current_covariates, batch_previous_treatments, batch_current_treatments, batch_init_state)
def compute_validation_loss(self, dataset):
validation_losses = []
validation_losses_outcomes = []
validation_losses_treatments = []
dataset_size = dataset['current_covariates'].shape[0]
if (dataset_size > 10000):
batch_size = 10000
else:
batch_size = dataset_size
for (batch_current_covariates, batch_previous_treatments, batch_current_treatments, batch_init_state,
batch_outputs, batch_active_entries) in self.gen_epoch(dataset, batch_size=batch_size):
feed_dict = self.build_feed_dictionary(batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state, batch_outputs,
batch_active_entries)
validation_loss, validation_loss_outcomes, validation_loss_treatments = self.sess.run(
[self.loss, self.loss_outcomes, self.loss_treatments],
feed_dict=feed_dict)
validation_losses.append(validation_loss)
validation_losses_outcomes.append(validation_loss_outcomes)
validation_losses_treatments.append(validation_loss_treatments)
validation_loss = np.mean(np.array(validation_losses))
validation_loss_outcomes = np.mean(np.array(validation_losses_outcomes))
validation_loss_treatments = np.mean(np.array(validation_losses_treatments))
return validation_loss, validation_loss_outcomes, validation_loss_treatments
def get_balancing_reps(self, dataset):
logging.info("Computing balancing representations.")
dataset_size = dataset['current_covariates'].shape[0]
balancing_reps = np.zeros(
shape=(dataset_size, self.max_sequence_length, self.br_size))
dataset_size = dataset['current_covariates'].shape[0]
if (dataset_size > 10000): # Does not fit into memory
batch_size = 10000
else:
batch_size = dataset_size
num_batches = int(dataset_size / batch_size) + 1
batch_id = 0
num_samples = 50
for (batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state) in self.gen_epoch(dataset, batch_size=batch_size,
training_mode=False):
feed_dict = self.build_feed_dictionary(batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state, training_mode=False)
# Dropout samples
total_predictions = np.zeros(
shape=(batch_size, self.max_sequence_length, self.br_size))
for sample in range(num_samples):
br_outputs = self.sess.run(self.balancing_representation, feed_dict=feed_dict)
br_outputs = np.reshape(br_outputs,
newshape=(-1, self.max_sequence_length, self.br_size))
total_predictions += br_outputs
total_predictions /= num_samples
if (batch_id == num_batches - 1):
batch_samples = range(dataset_size - batch_size, dataset_size)
else:
batch_samples = range(batch_id * batch_size, (batch_id + 1) * batch_size)
batch_id += 1
balancing_reps[batch_samples] = total_predictions
return balancing_reps
def get_predictions(self, dataset):
logging.info("Performing one-step-ahed prediction.")
dataset_size = dataset['current_covariates'].shape[0]
predictions = np.zeros(
shape=(dataset_size, self.max_sequence_length, self.num_outputs))
dataset_size = dataset['current_covariates'].shape[0]
if (dataset_size > 10000):
batch_size = 10000
else:
batch_size = dataset_size
num_batches = int(dataset_size / batch_size) + 1
batch_id = 0
num_samples = 50
for (batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state) in self.gen_epoch(dataset, batch_size=batch_size,
training_mode=False):
feed_dict = self.build_feed_dictionary(batch_current_covariates, batch_previous_treatments,
batch_current_treatments, batch_init_state, training_mode=False)
# Dropout samples
total_predictions = np.zeros(
shape=(batch_size, self.max_sequence_length, self.num_outputs))
for sample in range(num_samples):
predicted_outputs = self.sess.run(self.predictions, feed_dict=feed_dict)
predicted_outputs = np.reshape(predicted_outputs,
newshape=(-1, self.max_sequence_length, self.num_outputs))
total_predictions += predicted_outputs
total_predictions /= num_samples
if (batch_id == num_batches - 1):
batch_samples = range(dataset_size - batch_size, dataset_size)
else:
batch_samples = range(batch_id * batch_size, (batch_id + 1) * batch_size)
batch_id += 1
predictions[batch_samples] = total_predictions
return predictions
def get_autoregressive_sequence_predictions(self, test_data, data_map, encoder_states, encoder_outputs,
projection_horizon):
logging.info("Performing multi-step ahead prediction.")
current_treatments = data_map['current_treatments']
previous_treatments = data_map['previous_treatments']
sequence_lengths = test_data['sequence_lengths'] - 1
num_patient_points = current_treatments.shape[0]
current_dataset = dict()
current_dataset['current_covariates'] = np.zeros(shape=(num_patient_points, projection_horizon,
test_data['current_covariates'].shape[-1]))
current_dataset['previous_treatments'] = np.zeros(shape=(num_patient_points, projection_horizon,
test_data['previous_treatments'].shape[-1]))
current_dataset['current_treatments'] = np.zeros(shape=(num_patient_points, projection_horizon,
test_data['current_treatments'].shape[-1]))
current_dataset['init_state'] = np.zeros((num_patient_points, encoder_states.shape[-1]))
predicted_outputs = np.zeros(shape=(num_patient_points, projection_horizon,
test_data['outputs'].shape[-1]))
for i in range(num_patient_points):
seq_length = int(sequence_lengths[i])
current_dataset['init_state'][i] = encoder_states[i, seq_length - 1]
current_dataset['current_covariates'][i, 0, 0] = encoder_outputs[i, seq_length - 1]
current_dataset['previous_treatments'][i] = previous_treatments[i,
seq_length - 1:seq_length + projection_horizon - 1, :]
current_dataset['current_treatments'][i] = current_treatments[i, seq_length:seq_length + projection_horizon,
:]
for t in range(0, projection_horizon):
print(t)
predictions = self.get_predictions(current_dataset)
for i in range(num_patient_points):
predicted_outputs[i, t] = predictions[i, t]
if (t < projection_horizon - 1):
current_dataset['current_covariates'][i, t + 1, 0] = predictions[i, t, 0]
test_data['predicted_outcomes'] = predicted_outputs
return predicted_outputs
def compute_loss_treatments_one_hot(self, target_treatments, treatment_predictions, active_entries):
treatment_predictions = tf.reshape(treatment_predictions, [-1, self.max_sequence_length, self.num_treatments])
cross_entropy_loss = tf.reduce_sum(
(- target_treatments * tf.log(treatment_predictions + 1e-8)) * active_entries) \
/ tf.reduce_sum(active_entries)
return cross_entropy_loss
def compute_loss_predictions(self, outputs, predictions, active_entries):
predictions = tf.reshape(predictions, [-1, self.max_sequence_length, self.num_outputs])
mse_loss = tf.reduce_sum(tf.square(outputs - predictions) * active_entries) \
/ tf.reduce_sum(active_entries)
return mse_loss
def evaluate_predictions(self, dataset):
predictions = self.get_predictions(dataset)
unscaled_predictions = predictions * dataset['output_stds'] \
+ dataset['output_means']
unscaled_predictions = np.reshape(unscaled_predictions,
newshape=(-1, self.max_sequence_length, self.num_outputs))
unscaled_outputs = dataset['unscaled_outputs']
active_entries = dataset['active_entries']
mse = self.get_mse_at_follow_up_time(unscaled_predictions, unscaled_outputs, active_entries)
mean_mse = np.mean(mse)
return mean_mse, mse
def get_mse_at_follow_up_time(self, prediction, output, active_entires):
mses = np.sum(np.sum((prediction - output) ** 2 * active_entires, axis=-1), axis=0) \
/ active_entires.sum(axis=0).sum(axis=-1)
return mses
def get_optimizer(self):
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
return optimizer
def compute_sequence_length(self, sequence):
used = tf.sign(tf.reduce_max(tf.abs(sequence), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.cast(length, tf.int32)
return length
def save_network(self, tf_session, model_dir, checkpoint_name):
saver = tf.train.Saver(max_to_keep=100000)
vars = 0
for v in tf.global_variables():
vars += np.prod(v.get_shape().as_list())
save_path = saver.save(tf_session, os.path.join(model_dir, "{0}.ckpt".format(checkpoint_name)))
logging.info("Model saved to: {0}".format(save_path))
def load_network(self, tf_session, model_dir, checkpoint_name):
load_path = os.path.join(model_dir, "{0}.ckpt".format(checkpoint_name))
logging.info('Restoring model from {0}'.format(load_path))
saver = tf.train.Saver()
saver.restore(tf_session, load_path)
| [
"e.s.saveliev@gmail.com"
] | e.s.saveliev@gmail.com |
c2cd7ebdf774bed98d83547aca4237ab5a6368de | e76fda1fba459456c4bc105e7a6dcc6277a1a26c | /django_cv/blog/views.py | 8c57056ca229de5098c6a2de02c156e4b20facf7 | [] | no_license | lafabo/i-love-tutorials | 6bb2a684a201975ab523d9721b02761a6269853c | eafcd47fd62e770107c7e1f08e0d6d60a539f1ec | refs/heads/master | 2021-01-21T04:46:56.365199 | 2016-07-20T17:38:03 | 2016-07-20T17:38:03 | 47,709,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post':post})
| [
"lazyfatboy@ya.ru"
] | lazyfatboy@ya.ru |
f71826cc4a17768511a502866746130a64bd50c5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/145.py | fdf3695e27e276ca2a4e819ce28982205a858fd0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,398 | py | """ imports """
from __future__ import division
import glob, pickle, os, time, sys, argparse
from copy import copy
from numpy import array, sin, cos
import numpy as np
from pylab import *
from pprint import pprint
""" global variables """
""" classes """
""" functions """
def solve(C, F, X):
current_production = 2.
current_cumul_time = C / current_production
while True:
time_to_finish = (X - C) / current_production
time_to_finish_with_factory = X / (current_production + F)
time_to_next_factory_with_factory = C / (current_production + F)
if time_to_finish < time_to_finish_with_factory:
current_cumul_time += time_to_finish
break
else:
current_cumul_time += time_to_next_factory_with_factory
current_production += F
return "{:.7f}".format(current_cumul_time)
""" parse input """
## parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("filename", default="default.in", nargs='?')
parser.add_argument("-t", "--test", action="store_true")
parser.add_argument("-l", "--lazytest", action="store_true")
args = parser.parse_args()
output = ""
TIC = time.time()
## read input lines
input_lines = open(args.filename).readlines()
def read_line():
return input_lines.pop(0).strip()
def read_ints():
return [int(x) for x in read_line().split(' ')]
def read_floats():
return [float(x) for x in read_line().split(' ')]
(numquestions,) = read_ints()
for questionindex in xrange(numquestions):
### parse input ###
C, F, X = read_floats()
### calculate answer ###
answer = solve(C, F, X)
assert answer != None
### output ###
#print "Calculating case #{}...".format(questionindex+1)
answer_str = "Case #{}: {}".format(questionindex+1, answer)
output += answer_str + '\n'
print answer_str
## write output
ofile = open('output', 'w').write(output)
TOC = time.time()
#print "done in {} s".format(TOC-TIC)
""" test """
if args.test:
def filter_extension(filename):
filename_parts = filename.split('.')
if len(filename_parts) > 1:
filename_parts = filename_parts[:-1]
return '.'.join(filename_parts)
print
print "== TESTING VALIDITY =="
try:
# check if all input was used
assert not len([l for l in input_lines if l.strip()]), "Not all input was used"
# filter extension of filename
filename_without_extension = filter_extension(args.filename)
# get calculated and correct lines
calculated_lines = [l.strip() for l in output.split('\n') if l.strip()]
correct_lines = [l.strip() for l in open("{}.out".format(filename_without_extension)).readlines() if l.strip()]
# check if number of lines match
assert len(correct_lines) == len(calculated_lines), "calculated {} lines but expected {}".format(len(calculated_lines), \
len(correct_lines))
# apply lazytest: filter away test numer
unfiltered_calculated_lines = calculated_lines
unfiltered_correct_lines = correct_lines
if args.lazytest:
def filter_test_number(l):
if l.startswith("Case #"):
parts = l.split('#')
parts[1] = parts[1][parts[1].index(':'):]
return '#'.join(parts)
else:
return l
calculated_lines = [filter_test_number(l) for l in calculated_lines]
correct_lines = [filter_test_number(l) for l in correct_lines]
# get lines that don't match
incorrect_line_numbers = []
for line_number, (correct_line, calculated_line) in enumerate(zip(correct_lines, calculated_lines)):
if correct_line != calculated_line:
incorrect_line_numbers.append(line_number)
if len(incorrect_line_numbers):
error_msg = "\n"
for line_number in incorrect_line_numbers:
error_msg += ' "{}" should be "{}"\n'.format(unfiltered_calculated_lines[line_number],
unfiltered_correct_lines[line_number])
raise AssertionError(error_msg)
print "SUCCESS"
except AssertionError as e:
print "\nFAILED:"
print str(e)
print
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
fd357ce22203e6ff0b435b0b66e4f227c52cbb08 | 4c0cfe74b972b6f758d479920118185f07b3ae66 | /lab/lab01/tests/q3_1_2.py | fbb2f30f45921e7bc06228e82c457a74816bf068 | [
"BSD-3-Clause"
] | permissive | ds-modules/Colab-data-8 | 20a72aee6b7d051d2aff50a49f02c89891201971 | cccaff13633f8a5ec697cd4aeca9087f2feec2e4 | refs/heads/main | 2023-05-29T04:05:47.976935 | 2021-06-02T23:15:06 | 2021-06-02T23:15:06 | 333,593,562 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | test = { 'name': 'q3_1_2',
'points': 1,
'suites': [ { 'cases': [ { 'code': ">>> #It looks like you didn't give anything the name;\n"
">>> # seconds_in_a_decade. Maybe there's a typo, or maybe you ;\n"
'>>> # just need to run the cell below Question 3.2 where you defined ;\n'
'>>> # seconds_in_a_decade. Click that cell and then click the "run;\n'
'>>> # cell" button in the menu bar above.);\n'
">>> 'seconds_in_a_decade' in vars()\n"
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> # It looks like you didn't change the cell to define;\n"
'>>> # seconds_in_a_decade appropriately. It should be a number,;\n'
">>> # computed using Python's arithmetic. For example, this is;\n"
'>>> # almost right:;\n'
'>>> # seconds_in_a_decade = 10*365*24*60*60;\n'
'>>> seconds_in_a_decade != ...\n'
'True',
'hidden': False,
'locked': False},
{ 'code': ">>> # It looks like you didn't account for leap years.;\n"
'>>> # There were 2 leap years and 8 non-leap years in this period.;\n'
'>>> # Leap years have 366 days instead of 365.;\n'
'>>> seconds_in_a_decade != 315360000\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"cheungclj108@berkeley.edu"
] | cheungclj108@berkeley.edu |
8b7d0a3296d50691cf8a0cffe573214b9f553a5d | f68afe06e4bbf3d523584852063e767e53441b2b | /Toontown/toontown/coghq/CogHQExterior.py | 07ead6fdae6591c90c12eaf8a283850ce41ffca2 | [] | no_license | DankMickey/Toontown-Offline-Squirting-Flower-Modded- | eb18908e7a35a5f7fc95871814207858b94e2600 | 384754c6d97950468bb62ddd8961c564097673a9 | refs/heads/master | 2021-01-19T17:53:36.591832 | 2017-01-15T02:00:04 | 2017-01-15T02:00:04 | 34,639,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,228 | py | from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from otp.nametag import NametagGlobals
class CogHQExterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('CogHQExterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.fsm = ClassicFSM.ClassicFSM('CogHQExterior', [State.State('start', self.enterStart, self.exitStart, ['walk',
'tunnelIn',
'teleportIn',
'doorIn']),
State.State('walk', self.enterWalk, self.exitWalk, ['stickerBook',
'teleportOut',
'tunnelOut',
'DFA',
'doorOut',
'died',
'stopped',
'WaitForBattle',
'battle',
'squished',
'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('doorIn', self.enterDoorIn, self.exitDoorIn, ['walk', 'stopped']),
State.State('doorOut', self.enterDoorOut, self.exitDoorOut, ['walk', 'stopped']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'DFA',
'WaitForBattle',
'battle',
'tunnelOut',
'doorOut',
'squished',
'died']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle', 'walk']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut', 'tunnelOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk', 'WaitForBattle', 'battle']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn', 'final', 'WaitForBattle']),
State.State('died', self.enterDied, self.exitDied, ['quietZone']),
State.State('tunnelIn', self.enterTunnelIn, self.exitTunnelIn, ['walk', 'WaitForBattle', 'battle']),
State.State('tunnelOut', self.enterTunnelOut, self.exitTunnelOut, ['final']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('cogHQExterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
def unload(self):
self.parentFSM.getStateNamed('cogHQExterior').removeChild(self.fsm)
del self.fsm
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.zoneId = requestStatus['zoneId']
BattlePlace.BattlePlace.enter(self)
self.fsm.enterInitialState()
base.playMusic(self.loader.music, looping=1, volume=0.8)
self.loader.geom.reparentTo(render)
self.nodeList = [self.loader.geom]
self._telemLimiter = TLGatherAllAvs('CogHQExterior', RotationLimitToH)
self.accept('doorDoneEvent', self.handleDoorDoneEvent)
self.accept('DistributedDoor_doorTrigger', self.handleDoorTrigger)
NametagGlobals.setMasterArrowsOn(1)
self.tunnelOriginList = base.cr.hoodMgr.addLinkTunnelHooks(self, self.nodeList, self.zoneId)
how = requestStatus['how']
self.fsm.request(how, [requestStatus])
def exit(self):
self.fsm.requestFinalState()
self._telemLimiter.destroy()
del self._telemLimiter
self.loader.music.stop()
for node in self.tunnelOriginList:
node.removeNode()
del self.tunnelOriginList
if self.loader.geom:
self.loader.geom.reparentTo(hidden)
self.ignoreAll()
BattlePlace.BattlePlace.exit(self)
def enterTunnelOut(self, requestStatus):
fromZoneId = self.zoneId - self.zoneId % 100
tunnelName = base.cr.hoodMgr.makeLinkTunnelName(self.loader.hood.id, fromZoneId)
requestStatus['tunnelName'] = tunnelName
BattlePlace.BattlePlace.enterTunnelOut(self, requestStatus)
def enterTeleportIn(self, requestStatus):
x, y, z, h, p, r = base.cr.hoodMgr.getPlaygroundCenterFromId(self.loader.hood.id)
base.localAvatar.setPosHpr(render, x, y, z, h, p, r)
BattlePlace.BattlePlace.enterTeleportIn(self, requestStatus)
def enterTeleportOut(self, requestStatus, callback = None):
if requestStatus.has_key('battle'):
self.__teleportOutDone(requestStatus)
else:
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
avId = requestStatus['avId']
shardId = requestStatus['shardId']
if hoodId == self.loader.hood.hoodId and zoneId == self.loader.hood.hoodId and shardId == None:
self.fsm.request('teleportIn', [requestStatus])
elif hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
def exitTeleportOut(self):
BattlePlace.BattlePlace.exitTeleportOut(self)
def enterSquished(self):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('Squish')
taskMgr.doMethodLater(2.0, self.handleSquishDone, base.localAvatar.uniqueName('finishSquishTask'))
def handleSquishDone(self, extraArgs = []):
base.cr.playGame.getPlace().setState('walk')
def exitSquished(self):
taskMgr.remove(base.localAvatar.uniqueName('finishSquishTask'))
base.localAvatar.laffMeter.stop()
| [
"jareddarty96@gmail.com"
] | jareddarty96@gmail.com |
31fdae76ee89f60e923d6125f8b0c0979a77f28c | d5b48163d236ca770be8e687f92192e2971397e8 | /d2.py | f4cf2b96e37d9a200f82ba75ba05811c122464ae | [] | no_license | Kunal352000/python_program | 191f5d9c82980eb706e11457c2b5af54b0d2ae95 | 7a1c645f9eab87cc45a593955dcb61b35e2ce434 | refs/heads/main | 2023-07-12T19:06:19.121741 | 2021-08-21T11:58:41 | 2021-08-21T11:58:41 | 376,606,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | str=input("Enter your string: ").casefold()
dic={}
for ch in str:
if ch in dic:
dic[ch]=dic[ch]+1
else:
dic[ch]=1
for i in dic:
print(i,":",dic[i])
| [
"noreply@github.com"
] | Kunal352000.noreply@github.com |
8ab606702f52dcff5be842c17d706af92de48878 | bd2c549edd6dc38d7d9aaae0781d5e613e28b2a2 | /guvi79.py | 92681e422696b5ff56ce8e268740c63400738848 | [] | no_license | pavanimallem/pavs1 | 18843530de9f3166332fd077f88320e4dba7d760 | ce766a781dbb46594e319c086ff4f1648ca21d25 | refs/heads/master | 2020-03-24T07:14:15.466577 | 2018-09-11T11:26:07 | 2018-09-11T11:26:07 | 142,557,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | N,M=map(int,raw_input().split())
K=N*M
for i in range(K+1):
if K==i*i:
print("yes")
break
else:
print "no"
| [
"noreply@github.com"
] | pavanimallem.noreply@github.com |
4983254baacc2fcd9bba2d3cd421902e7fafd6a7 | 986a791e0702d354aa1fc7bb0a678bf5d5282564 | /ax/models/torch/botorch_defaults.py | a64651634f8969ba24bc9ba7cb4400dc822c4bb1 | [
"MIT"
] | permissive | jshuadvd/Ax | ab7892979eb22f5dd3488f5aa9a81a110c7c1120 | e4d7826fd9839ec411c08e129acabd3c29154103 | refs/heads/master | 2020-07-28T16:42:59.404248 | 2019-09-19T02:02:27 | 2019-09-19T02:03:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,983 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.objective import ConstrainedMCObjective, LinearMCObjective
from botorch.acquisition.utils import get_acquisition_function, get_infeasible_cost
from botorch.exceptions.errors import UnsupportedError
from botorch.fit import fit_gpytorch_model
from botorch.models.fidelity.gp_regression_fidelity import (
SingleTaskGPLTKernel,
SingleTaskMultiFidelityGP,
)
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP
from botorch.optim.optimize import joint_optimize, sequential_optimize
from botorch.utils import (
get_objective_weights_transform,
get_outcome_constraint_transforms,
)
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
from torch import Tensor
MIN_OBSERVED_NOISE_LEVEL = 1e-7
model_list = [SingleTaskGPLTKernel, SingleTaskMultiFidelityGP]
def get_and_fit_model(
Xs: List[Tensor],
Ys: List[Tensor],
Yvars: List[Tensor],
task_features: List[int],
fidelity_features: List[int],
refit_model: bool = True,
state_dict: Optional[Dict[str, Tensor]] = None,
fidelity_model_id: Optional[int] = None,
**kwargs: Any,
) -> GPyTorchModel:
r"""Instantiates and fits a botorch ModelListGP using the given data.
Args:
Xs: List of X data, one tensor per outcome
Ys: List of Y data, one tensor per outcome
Yvars: List of observed variance of Ys.
task_features: List of columns of X that are tasks.
fidelity_features: List of columns of X that are fidelity parameters.
refit_model: Flag for refitting model.
state_dict: If provided, will set model parameters to this state
dictionary. Otherwise, will fit the model.
fidelity_model_id: set this if you want to use GP models from `model_list`
defined above. The `SingleTaskGPLTKernel` model uses linear truncated
kernel; the `SingleTaskMultiFidelityGP` model uses exponential decay
kernel.
Returns:
A fitted ModelListGP.
"""
if fidelity_model_id is not None and len(task_features) > 0:
raise NotImplementedError(
"Currently do not support MF-GP models with task_features!"
)
if fidelity_model_id is not None and len(fidelity_features) > 1:
raise UnsupportedError(
"Fidelity MF-GP models currently support only one fidelity parameter!"
)
model = None
if len(task_features) > 1:
raise ValueError(
f"This model only supports 1 task feature (got {task_features})"
)
elif len(task_features) == 1:
task_feature = task_features[0]
else:
task_feature = None
if task_feature is None:
if len(Xs) == 1:
# Use single output, single task GP
model = _get_model(
X=Xs[0],
Y=Ys[0],
Yvar=Yvars[0],
task_feature=task_feature,
fidelity_features=fidelity_features,
fidelity_model_id=fidelity_model_id,
)
elif all(torch.equal(Xs[0], X) for X in Xs[1:]):
# Use batched multioutput, single task GP
Y = torch.cat(Ys, dim=-1)
Yvar = torch.cat(Yvars, dim=-1)
model = _get_model(
X=Xs[0],
Y=Y,
Yvar=Yvar,
task_feature=task_feature,
fidelity_features=fidelity_features,
fidelity_model_id=fidelity_model_id,
)
if model is None:
# Use model list
models = [
_get_model(X=X, Y=Y, Yvar=Yvar, task_feature=task_feature)
for X, Y, Yvar in zip(Xs, Ys, Yvars)
]
model = ModelListGP(*models)
model.to(Xs[0])
if state_dict is not None:
model.load_state_dict(state_dict)
if state_dict is None or refit_model:
# TODO: Add bounds for optimization stability - requires revamp upstream
bounds = {}
if isinstance(model, ModelListGP):
mll = SumMarginalLogLikelihood(model.likelihood, model)
else:
# pyre-ignore: [16]
mll = ExactMarginalLogLikelihood(model.likelihood, model)
mll = fit_gpytorch_model(mll, bounds=bounds)
return model
def predict_from_model(model: Model, X: Tensor) -> Tuple[Tensor, Tensor]:
r"""Predicts outcomes given a model and input tensor.
Args:
model: A botorch Model.
X: A `n x d` tensor of input parameters.
Returns:
Tensor: The predicted posterior mean as an `n x o`-dim tensor.
Tensor: The predicted posterior covariance as a `n x o x o`-dim tensor.
"""
with torch.no_grad():
posterior = model.posterior(X)
mean = posterior.mean.cpu().detach()
# TODO: Allow Posterior to (optionally) return the full covariance matrix
variance = posterior.variance.cpu().detach()
cov = variance.unsqueeze(-1) * torch.eye(variance.shape[-1], dtype=variance.dtype)
return mean, cov
def get_NEI(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
) -> AcquisitionFunction:
r"""Instantiates a qNoisyExpectedImprovement acquisition function.
Args:
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
X_observed: A tensor containing points observed for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
X_pending: A tensor containing points whose evaluation is pending (i.e.
that have been submitted for evaluation) present for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
mc_samples: The number of MC samples to use (default: 512).
qmc: If True, use qMC instead of MC (default: True).
Returns:
qNoisyExpectedImprovement: The instantiated acquisition function.
"""
if X_observed is None:
raise ValueError("There are no feasible observed points.")
# construct Objective module
if outcome_constraints is None:
objective = LinearMCObjective(weights=objective_weights)
else:
obj_tf = get_objective_weights_transform(objective_weights)
con_tfs = get_outcome_constraint_transforms(outcome_constraints)
X_observed = torch.as_tensor(X_observed)
inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)
objective = ConstrainedMCObjective(
objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost
)
return get_acquisition_function(
acquisition_function_name="qNEI",
model=model,
objective=objective,
X_observed=X_observed,
X_pending=X_pending,
mc_samples=kwargs.get("mc_samples", 512),
qmc=kwargs.get("qmc", True),
seed=torch.randint(1, 10000, (1,)).item(),
)
def scipy_optimizer(
acq_function: AcquisitionFunction,
bounds: Tensor,
n: int,
inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
fixed_features: Optional[Dict[int, float]] = None,
rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
**kwargs: Any,
) -> Tensor:
r"""Optimizer using scipy's minimize module on a numpy-adpator.
Args:
acq_function: A botorch AcquisitionFunction.
bounds: A `2 x d`-dim tensor, where `bounds[0]` (`bounds[1]`) are the
lower (upper) bounds of the feasible hyperrectangle.
n: The number of candidates to generate.
inequality constraints: A list of tuples (indices, coefficients, rhs),
with each tuple encoding an inequality constraint of the form
`\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
fixed_features: A map {feature_index: value} for features that should
be fixed to a particular value during generation.
rounding_func: A function that rounds an optimization result
appropriately (i.e., according to `round-trip` transformations).
Returns:
A two-element tuple with the following elements:
Tensor: A `n x d`-dim tensor of generated candidates.
Tensor: In the case of joint optimization, a scalar tensor containing
the joint acquisition value of the `n` points. In the case of
sequential optimization, a `n`-dim tensor of conditional acquisition
values, where `i`-th element is the expected acquisition value
conditional on having observed candidates `0,1,...,i-1`.
"""
num_restarts: int = kwargs.get("num_restarts", 20)
raw_samples: int = kwargs.get("num_raw_samples", 50 * num_restarts)
if kwargs.get("joint_optimization", False):
optimize = joint_optimize
else:
optimize = sequential_optimize
# use SLSQP by default for small problems since it yields faster wall times
if "method" not in kwargs:
kwargs["method"] = "SLSQP"
X = optimize(
acq_function=acq_function,
bounds=bounds,
q=n,
num_restarts=num_restarts,
raw_samples=raw_samples,
options=kwargs,
inequality_constraints=inequality_constraints,
fixed_features=fixed_features,
post_processing_func=rounding_func,
)
# TODO: Un-hack this once botorch #234 is part of a stable release
if isinstance(X, tuple):
X, _ = X # pragma: no cover
return X
def _get_model(
X: Tensor,
Y: Tensor,
Yvar: Tensor,
task_feature: Optional[int],
fidelity_features: Optional[List[int]] = None,
fidelity_model_id: Optional[int] = None,
) -> GPyTorchModel:
"""Instantiate a model of type depending on the input data."""
Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL) # pyre-ignore: [16]
is_nan = torch.isnan(Yvar)
any_nan_Yvar = torch.any(is_nan)
all_nan_Yvar = torch.all(is_nan)
if any_nan_Yvar and not all_nan_Yvar:
raise ValueError(
"Mix of known and unknown variances indicates "
"valuation function errors. Variances should all be specified, or "
"none should be."
)
if fidelity_features is None:
fidelity_features = []
if fidelity_model_id is None or len(fidelity_features) == 0:
if task_feature is None and all_nan_Yvar:
gp = SingleTaskGP(train_X=X, train_Y=Y)
elif task_feature is None:
gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar)
elif all_nan_Yvar:
gp = MultiTaskGP(train_X=X, train_Y=Y, task_feature=task_feature)
else:
gp = FixedNoiseMultiTaskGP(
train_X=X,
train_Y=Y.view(-1),
train_Yvar=Yvar.view(-1),
task_feature=task_feature,
)
else:
gp_model = model_list[fidelity_model_id]
# pyre-ignore [29]
gp = gp_model(train_X=X, train_Y=Y, train_data_fidelity=False)
return gp
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d8030447090aa7eec30c37b4ae3f45b0fd8aeb50 | 60c4255fb0cf7ed817ff09d8113bf404cde8e12b | /env/lib/python2.7/site-packages/django/conf/locale/sv/formats.py | 98efdf170c00c4b092105bb360b3f70f4e8332e7 | [] | no_license | adamjberg/finna-be-octo-ninja | 83aba13f619d4fbfb5308e48336917f0ada0459d | cf16bfcb3d7bb4e878ba0b99ad701b5cda8be34c | refs/heads/master | 2021-01-10T20:19:20.849476 | 2014-01-11T05:42:23 | 2014-01-11T05:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y', # '10/25/06'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| [
"ilikecattle@gmail.com"
] | ilikecattle@gmail.com |
327ba90dccd54195e75cd71f17b040212a27108f | f5863cf378bce80d3aa459941dff79ea3c8adf5d | /Leetcode/80.Remove_Duplicates_from_Sorted_Array_II.py | ed2977219a1a145be0cf2b19d26cfb295afc768f | [] | no_license | Taeg92/Problem_solving | 815c13ae7895708948482eeb05411322be00ac12 | 15c0fe0eda4f77d974451777cb01d10882d8aaa9 | refs/heads/master | 2021-11-18T22:03:21.727840 | 2021-09-06T14:21:09 | 2021-09-06T14:21:09 | 235,335,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
idx = 0
for num in nums:
if idx < 2 or num != nums[idx-2]:
nums[idx] = num
idx = idx + 1
return idx
nums = [1, 1, 1, 2, 2, 3]
# Output : 5
sol = Solution()
print(sol.removeDuplicates(nums))
| [
"gtg92t@gmail.com"
] | gtg92t@gmail.com |
f567892b375898d2f9e2c1370a515b0984a11f34 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/lib/jnpr/toby/tmp/RLI/RLI-27K/RLI-27608/Apps/fw_ms_filter_length.py | f3ef23b3af736df13fa3dd6599bba08b7ddaf50c | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,259 | py | import sys,time,os,argparse
from grpc.beta import implementations
import firewall_service_pb2,jnx_addr_pb2,authentication_service_pb2
from firewall_service_pb2 import *
from jnx_addr_pb2 import *
from authentication_service_pb2 import *
from grpc.framework.interfaces.face.face import *
parser = argparse.ArgumentParser()
parser.add_argument('-d','--device', help='Input host name',required=True)
parser.add_argument('-ifl','--iflname', help='Input interface name',required=True)
args = parser.parse_args()
device1 = args.device
APP_USER = 'regress'
APP_PASSWORD = 'MaRtInI'
port = 9999
client_id = '101'
def pause():
programPause = raw_input("Enter to continue...")
print "Executing Python app"
pause()
try:
channel = implementations.insecure_channel(host=device1, port=port)
stub = authentication_service_pb2.beta_create_Login_stub(channel)
login_response = stub.LoginCheck(
authentication_service_pb2.LoginRequest(user_name=APP_USER, password=APP_PASSWORD,
client_id=client_id), 100)
if (login_response.result == 1):
print "Login to ", device1, "successful"
else:
print "Login to ", device1, "failed"
raise SystemExit()
fw = firewall_service_pb2.beta_create_AclService_stub(channel)
flag = 0
res=[]
fname = [
'12345678901234567890',
'!@!@!', 'f1', '0000000000', '-1',
'12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678',
'']
for filtname in fname:
IP1 = IpAddress(addr_string='10.1.1.2')
matchIP1 = AclMatchIpAddress(addr=IP1, prefix_len=32, match_op=ACL_MATCH_OP_EQUAL)
term1match1 = AclEntryMatchMultiService(match_addrs=[matchIP1])
t = AclEntryMultiServiceTerminatingAction(action_accept=1)
nt = AclEntryMultiServiceNonTerminatingAction(action_count=AclActionCounter(counter_name="Match1"),action_syslog=1, action_log=1, action_sample=1)
term1Action1 = AclEntryMultiServiceAction(action_t=t, actions_nt=nt)
adj=AclAdjacency(type=ACL_ADJACENCY_AFTER)
term1=AclMultiServiceEntry(ace_name="t1",ace_op=ACL_ENTRY_OPERATION_ADD,adjacency=adj,matches=term1match1,actions=term1Action1)
tlist1=AclEntry(mservice_entry=term1)
filter=AccessList(acl_name = filtname,acl_type = ACL_TYPE_CLASSIC, acl_family = ACL_FAMILY_MULTISERVICE, acl_flag = ACL_FLAGS_NONE, ace_list=[tlist1])
print filter
result = fw.AccessListAdd(filter,10)
print 'Invoking fw.AccessListAdd \nreturn = ', result
if result.status is ACL_STATUS_EOK:
print "AccessListAdd RPC Passed with filter name : %s" % filtname
res.append("AccessListAdd RPC Passed with filter name : %s and returned %s" % (filtname, result))
else:
print "AccessListAdd RPC Failed with filter name : %s" % filtname
res.append("AccessListAdd RPC Failed with filter name : %s and returned %s" % (filtname, result))
flag += 1
pause()
bind=AccessListObjBind(acl=filter,obj_type=ACL_BIND_OBJ_TYPE_INTERFACE,bind_object=AccessListBindObjPoint(intf=args.iflname + '.0'),bind_direction=ACL_BIND_DIRECTION_INPUT,bind_family=ACL_FAMILY_MULTISERVICE)
print bind
bindaddresult=fw.AccessListBindAdd(bind,10)
print 'Invoking fw.AccessListBindAdd \nreturn = ', bindaddresult
if bindaddresult.status is ACL_STATUS_EOK:
print "AccessListBindAdd RPC Passed with filter name : %s" % filtname
res.append("AccessListBindAdd RPC Passed with filter name : %s and returned %s" % (filtname, bindaddresult))
else:
print "AccessListBindAdd RPC Failed with filter name : %s" % filtname
res.append("AccessListBindAdd RPC Failed with filter name : %s and returned %s" % (filtname, bindaddresult))
flag += 1
pause()
binddelresult = fw.AccessListBindDelete(bind,10)
print 'Invoking fw.AccessListBindDelete \nreturn = ', binddelresult
if binddelresult.status is ACL_STATUS_EOK:
print "AccessListBindDelete RPC Passed with filter name : %s" % filtname
res.append("AccessListBindDelete RPC Passed with filter name : %s and returned %s" % (filtname, binddelresult))
else:
print "AccessListBindDelete RPC Failed with filter name : %s" % filtname
res.append("AccessListBindDelete RPC Failed with filter name : %s and returned %s" % (filtname, binddelresult))
flag += 1
pause()
filter = AccessList(acl_name=filtname,acl_family = ACL_FAMILY_MULTISERVICE)
print filter
acldelresult = fw.AccessListDelete(filter,10)
print 'Invoking fw.AccessListDelete \nreturn = ', acldelresult
if acldelresult.status is ACL_STATUS_EOK:
print "AccessListDelete RPC Passed with filter name : %s" % filtname
res.append("AccessListDelete RPC Passed with filter name : %s and returned %s" % (filtname, acldelresult))
else:
print "AccessListDelete RPC Failed with filter name : %s" % filtname
res.append("AccessListDelete RPC Failed with filter name : %s and returned %s" % (filtname, acldelresult))
flag += 1
pause()
print "FINAL RESULT : \n"
for i in res:
print i
if flag > 0:
print "TEST FAILED"
else:
print "TEST PASSED"
except AbortionError as e:
print "code is ", e.code
print "details is ", e.details
while True:
import signal
os.kill(os.getpid(), signal.SIGTERM)
| [
"srigupta@juniper.net"
] | srigupta@juniper.net |
8ae65a6c0f57089c39e0396a9ff9bfa5bed30b79 | 16daab90ef28ada0e3efc4d38f8c4d62df30c43f | /Speaker-Verification/dataloader.py | 590e3aacc8790eed435482c97ef137b2577d717b | [] | no_license | AntoniaLovjer/adversarial_robustness_audio | 7ba60a0bdf50ed428eecdbc3a2adbbf583129877 | 34ffecaf73332698fccb0d90d2932dde09cc77a5 | refs/heads/master | 2020-09-22T12:16:54.420697 | 2019-12-02T14:22:38 | 2019-12-02T14:22:38 | 225,189,551 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import os
import re
from glob import glob
import torch
import pandas as pd
WORDS = 'yes no up down left right on off stop go silence unknown'.split()
id2name = {i: name for i, name in enumerate(WORDS)}
name2id = {name: i for i, name in id2name.items()}
def load_data(data_dir):
""" Return 2 lists of tuples:
[(user_id, class_id, path), ...] for train
[(user_id, class_id, path), ...] for validation
"""
# Just a simple regexp for paths with three groups:
# prefix, label, user_id
pattern = re.compile("(.+\/)?(\w+)\/([^_]+)_.+wav")
all_files = glob(os.path.join(data_dir, 'train/audio/*/*wav'))
possible = set(WORDS)
data = []
for entry in all_files:
bl_true = True
r = re.match(pattern, entry)
if r:
label, uid = r.group(2), r.group(3)
if label == '_background_noise_':
label = 'silence'
if label not in possible:
label = 'unknown'
label_id = name2id[label]
sample = (uid, label_id, entry)
data.append(sample)
data = pd.DataFrame(data)
data.columns = ['uid', 'label_id', 'path']
return data | [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
cc1d375be7815c3471c6daf7cbea65c02c7d0482 | 3b786d3854e830a4b46ee55851ca186becbfa650 | /SystemTesting/pylib/vmware/linux/cli/linux_switch_impl.py | 0b223e09d9aa54b664e17ddd52aef79123d7ea00 | [] | no_license | Cloudxtreme/MyProject | d81f8d38684333c22084b88141b712c78b140777 | 5b55817c050b637e2747084290f6206d2e622938 | refs/heads/master | 2021-05-31T10:26:42.951835 | 2015-12-10T09:57:04 | 2015-12-10T09:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,789 | py | import vmware.common.utilities as utilities
import vmware.interfaces.switch_interface as switch_interface
import vmware.parsers.horizontal_table_parser as horizontal_table_parser
import vmware.schema.switch.arp_table_schema as arp_table_schema
import vmware.schema.switch.logical_switch_schema as logical_switch_schema
import vmware.schema.switch.mac_table_schema as mac_table_schema
import vmware.schema.switch.vtep_table_schema as vtep_table_schema
# XXX(Salman): How will we differentiate between the overlay logical switches
# and VLAN backed logical switches?
class LinuxSwitchImpl(switch_interface.SwitchInterface):
"""
Class for implementing query operations for overlay logical switches on
the hosts.
Note: This implementation is specific to logical switches backed by OVS.
The output of the commands are documented at: http://goo.gl/SdI2oK
"""
DEFAULT_SOCKET_DIR = '/var/run/openvswitch'
CLI = 'ovs-appctl'
HORIZONTAL_PARSER_TYPE = 'raw/horizontalTable'
IP_ADDRESS = 'IP Address'
REPLICATION_MODE = 'replication_mode'
@classmethod
def _get_nsxa_socket(cls, client_object):
cmd = 'ls %s/nsxa*ctl' % cls.DEFAULT_SOCKET_DIR
nsxa_socket = client_object.connection.request(
cmd).response_data.strip()
if nsxa_socket == '':
raise AssertionError('Unable to locate the nsx agent socket file '
'in: %r' % cls.DEFAULT_SOCKET_DIR)
return nsxa_socket
@classmethod
def get_arp_table(cls, client_object, switch_vni=None):
"""
Fetches the ARP table for the logical switch.
@param switch_vni: VNI to identify the logical switch.
@type switch_vni: int
@return: Returns the ARPTableSchema object.
@rtype: arp_table_schema.ARPTableSchema
"""
attribute_map = {'mac address': 'adapter_mac',
'ip address': 'adapter_ip'}
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = ('%s -t %s vni/arp-table %s' %
(cls.CLI, nsxa_socket, switch_vni))
out = client_object.connection.request(cmd).response_data.split('\n')
# Skip the VNI number in the output.
raw_table_data = '\n'.join(out[1:])
header_keys = ["IP Address", "Mac Address"]
parser = horizontal_table_parser.HorizontalTableParser()
parsed_data = parser.get_parsed_data(raw_table_data,
header_keys=header_keys)
mapped_pydict = utilities.map_attributes(attribute_map, parsed_data)
return arp_table_schema.ARPTableSchema(py_dict=mapped_pydict)
@classmethod
def get_vtep_ip_by_label(cls, client_object, label=None):
"""
Fetches VTEP IP provided the label of that VTEP.
"""
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = '%s -t %s vtep/ip %%s' % (cls.CLI, nsxa_socket)
out = client_object.connection.request(
cmd % label).response_data.strip()
return utilities.parse_one_line_output(
out, record_delim=',', key_val_delim=':')[cls.IP_ADDRESS]
@classmethod
def get_mac_table(cls, client_object, switch_vni=None):
"""
Fetches the MAC table for the logical switch.
@param switch_vni: VNI to identify the logical switch.
@type switch_vni: int
@return: Returns the MACTableSchema object.
@rtype: mac_table_schema.MACTableSchema
"""
header_keys = ['Mac Address', 'VTEP Label']
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = ('%s -t %s vni/mac-vtep-label %s' %
(cls.CLI, nsxa_socket, switch_vni))
out = client_object.connection.request(cmd).response_data
horizontal_parser = horizontal_table_parser.HorizontalTableParser()
# Skip the VNI number in the output.
mac_to_vtep = horizontal_parser.get_parsed_data(
out, header_keys=header_keys, skip_head=1)['table']
py_dicts = []
for mac_vtep in mac_to_vtep:
py_dict = {}
vm_mac = mac_vtep['mac address']
vtep_label = mac_vtep['vtep label']
vtep_ip = cls.get_vtep_ip_by_label(client_object, label=vtep_label)
py_dict['adapter_mac'] = vm_mac
py_dict['adapter_ip'] = vtep_ip
py_dicts.append(py_dict)
py_dict = {'table': py_dicts}
return mac_table_schema.MACTableSchema(py_dict=py_dict)
@classmethod
def get_replication_mode(cls, client_object, switch_vni=None):
"""
Fetches the replication mode of the switch.
@param switch_vni: VNI to identify the logical switch.
@type switch_vni: int
@return: Returns the replication mode in use.
@rtype: str
"""
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = ('%s -t %s vni/replication-mode %s' %
(cls.CLI, nsxa_socket, switch_vni))
out = client_object.connection.request(cmd).response_data.strip()
return utilities.parse_one_line_output(
out, record_delim=',', key_val_delim=':')[cls.REPLICATION_MODE]
@classmethod
def get_logical_switch(cls, client_object, get_logical_switch=None):
"""
Fetches logical switch information.
"""
_ = get_logical_switch
header_keys = ['VNI', 'Controller IP Address', 'Link Status']
attribute_map = {'vni': 'switch_vni',
'controller ip address': 'controller_ip',
'link status': 'controller_status'}
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = ('%s -t %s vni/list ' % (cls.CLI, nsxa_socket))
out = client_object.connection.request(cmd).response_data
horizontal_parser = horizontal_table_parser.HorizontalTableParser()
switch_dicts = horizontal_parser.get_parsed_data(
out, header_keys=header_keys)['table']
for switch_dict in switch_dicts:
replication_mode = cls.get_replication_mode(
client_object, switch_vni=switch_dict['vni'])
switch_dict['replication_mode'] = replication_mode
for dict_key in switch_dict.keys():
switch_dict[dict_key] = switch_dict[dict_key].lower()
mapped_pydict = utilities.map_attributes(
attribute_map, {'table': switch_dicts})
return logical_switch_schema.LogicalSwitchSchema(py_dict=mapped_pydict)
@classmethod
def get_vtep_label(cls, client_object, switch_vni=None):
"""
Fetches the VTEP labels for a given VNI.
"""
header_keys = ['VTEP Label']
nsxa_socket = cls._get_nsxa_socket(client_object)
cmd = ('%s -t %s vni/vtep_list %s' %
(cls.CLI, nsxa_socket, switch_vni))
out = client_object.connection.request(cmd).response_data
horizontal_parser = horizontal_table_parser.HorizontalTableParser()
# Skip the VNI number in the output.
return horizontal_parser.get_parsed_data(
out, header_keys=header_keys, skip_head=1)
@classmethod
def get_vtep_table(cls, client_object, switch_vni=None,
host_switch_name=None):
"""
Fetches the VTEP table i.e. the IP addresses of the VTEPs in this
logical switch/VNI.
"""
vtep_labels = cls.get_vtep_label(
client_object, switch_vni=switch_vni)
attribute_map = {'vtep label': 'adapter_ip'}
for vtep_label in vtep_labels['table']:
label = vtep_label['vtep label']
vtep_label['vtep label'] = cls.get_vtep_ip_by_label(
client_object, label=label)
mapped_pydict = utilities.map_attributes(attribute_map, vtep_labels)
return vtep_table_schema.VtepTableSchema(py_dict=mapped_pydict)
@classmethod
def get_vni_table(cls, client_object, switch_vni=None):
raise NotImplementedError('Only Controller nodes are expected to have '
'VNI table')
@classmethod
def get_ports(cls, client_object, switch_vni):
raise NotImplementedError('STUB')
@classmethod
def is_master_for_vni(cls, client_object, switch_vni=None):
raise NotImplementedError('Only CCP is expected to have knowledge '
'about master controller of a VNI')
@classmethod
def list_portgroup(cls, client_object):
raise NotImplementedError('Port groups do not exist on linux.')
@classmethod
def configure_uplinks(cls, client_object, uplinks=None):
raise NotImplementedError('Can not configure uplinks on logical '
'switches')
| [
"bpei@vmware.com"
] | bpei@vmware.com |
8c757e728d9b1d70c2a7f43d7d50e4cad5895c90 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/7485132/snippet.py | eecd6e2d90eb2489e1c6d00d27db4c3719557144 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,920 | py | """
The Pool module provides some classes for managing a fixed-size thread-safe pool of functionally identical objects. One use for this is database connections, which tend to take awhile to create.
Pool
class that manages the pool of objects.
Constructor
class used to create new instances of items in the Pool.
For more details, use pydoc or read the docstrings in the code.
Credits : Andy Dustman
(Note: I just extracted the code from the Wayback Machine in order to find it more easily, but I didn't touch that code, all the credits goes to Andy Dustman !)
Version : 0.0.0, aka "Probably the first, last, and only release."
Released: 2002-06-30 00:00 UTC
Stability: Perfect in every way
Original source : http://web.archive.org/web/20070610080245/http://dustman.net/andy/python/Pool/0.0.0/Pool.py
"""
from Queue import Queue, Full, Empty
class Pool(Queue):
"""Manage a fixed-size pool of reusable, identical objects."""
def __init__(self, constructor, poolsize=5):
Queue.__init__(self, poolsize)
self.constructor = constructor
def get(self, block=1):
"""Get an object from the pool or a new one if empty."""
try:
return self.empty() and self.constructor() or Queue.get(self, block)
except Empty:
return self.constructor()
def put(self, obj, block=1):
"""Put an object into the pool if it is not full. The caller must
not use the object after this."""
try:
return self.full() and None or Queue.put(self, obj, block)
except Full:
pass
class Constructor:
"""Returns a constructor that returns apply(function, args, kwargs)
when called."""
def __init__(self, function, *args, **kwargs):
self.f = function
self.args = args
self.kwargs = kwargs
def __call__(self):
return apply(self.f, self.args, self.kwargs) | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
cd337e0d2b894589f48c3ec1e73609dc98c7cf3a | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayCommerceKidsRelationQueryResponse.py | 2e7bc0d8e1af57db2e86b5335fa2c3b97ab35b64 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.UserInfoVO import UserInfoVO
class AlipayCommerceKidsRelationQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceKidsRelationQueryResponse, self).__init__()
self._relation_list = None
@property
def relation_list(self):
return self._relation_list
@relation_list.setter
def relation_list(self, value):
if isinstance(value, list):
self._relation_list = list()
for i in value:
if isinstance(i, UserInfoVO):
self._relation_list.append(i)
else:
self._relation_list.append(UserInfoVO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCommerceKidsRelationQueryResponse, self).parse_response_content(response_content)
if 'relation_list' in response:
self.relation_list = response['relation_list']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
e87c9851e2a257f3a6f30c0d4aaa4feb2ddab3f8 | eab72229ae04d1160704cbf90a08a582802a739c | /network.py | 871f969fcafec392143f3dd51e648f4afb7a1b5b | [
"MIT"
] | permissive | megatazm/Crowd-Counting | 444d39b0e3d6e98995f53badf4c073829038b6b7 | 647a055baccee2c3b6b780f38930e2ffd14d1664 | refs/heads/master | 2022-04-01T04:49:16.409675 | 2020-01-31T21:24:02 | 2020-01-31T21:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,957 | py | import numpy as np
import cv2
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import glob
import os
import random
import math
from matplotlib import pyplot as plt
#from matplotlib.pyplot import cm
from PIL import Image
#from keras.models import load_model
#import sys
#from heatmap import *
class MCNN:
def __init__(self, dataset):
self.dataset = dataset
self.LEARNING_RATE = 1e-4
self.x = tf.placeholder(tf.float32, [None, None, None, 1])
self.y_act = tf.placeholder(tf.float32, [None, None, None, 1])
self.y_pre = self.inf(self.x)
# Loss - Euclidean Distance
self.loss = tf.sqrt(tf.reduce_mean(tf.square(self.y_act - self.y_pre)))
self.act_sum = tf.reduce_sum(self.y_act)
self.pre_sum = tf.reduce_sum(self.y_pre)
# Mean Absolute Error
self.MAE = tf.abs(self.act_sum - self.pre_sum)
self.train_step = tf.train.AdamOptimizer(self.LEARNING_RATE).minimize(self.loss)
# Procuring data and densities.
def data_pre_train(self, kind, dataset):
img_path = '.\\data\\formatted_trainval\\shanghaitech_part_' + dataset + '_patches_9\\' + kind + '\\'
den_path = '.\\data\\formatted_trainval\\shanghaitech_part_' + dataset + '_patches_9\\' + kind + '_den\\'
print(den_path)
print('Loading', kind, 'data from dataset', dataset, '...')
img_names = os.listdir(img_path)
img_num = len(img_names)
data = []
for i in range(1, img_num + 1):
if i % 100 == 0:
print(i, '/', img_num)
name = img_names[i - 1]
img = cv2.imread(img_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
den = np.loadtxt(open(den_path + name[:-4] + '.csv'), delimiter = ",")
den_quarter = np.zeros((int(den.shape[0] / 4), int(den.shape[1] / 4)))
for i in range(len(den_quarter)):
for j in range(len(den_quarter[0])):
for p in range(4):
for q in range(4):
den_quarter[i][j] += den[i * 4 + p][j * 4 + q]
data.append([img, den_quarter])
print('Loading', kind, 'data from dataset', dataset, 'finished')
return data
def data_pre_test(self, dataset):
img_path = '.\\data\\original\\shanghaitech\\part_'+ dataset +'\\test_data\\images\\'
den_path = '.\\data\\original\\shanghaitech\\part_'+ dataset +'\\test_data\\ground-truth-csv\\'
#img_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/images/'
#den_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/ground_truth_csv/'
print('loading test data from dataset', dataset, '...')
img_names = os.listdir(img_path)
img_num = len(img_names)
data = []
for i in range(1, img_num + 1):
if i % 50 == 0:
print(i, '/', img_num)
name = 'IMG_' + str(i) + '.jpg'
img = cv2.imread(img_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
den = np.loadtxt(open(den_path + name[:-4] + '.csv'), delimiter = ",")
den_sum = np.sum(den)
data.append([img, den_sum])
#if i <= 2:
#heatmap(den, i, dataset, 'act')
print('load test data from dataset', dataset, 'finished')
return data
def conv2d(self, x, w):
return tf.nn.conv2d(x, w, strides = [1, 1, 1, 1], padding = 'SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
def inf(self, x):
#tf.reset_default_graph()
# s net ###########################################################
w_conv1_1 = tf.get_variable('w_conv1_1', [5, 5, 1, 24])
b_conv1_1 = tf.get_variable('b_conv1_1', [24])
h_conv1_1 = tf.nn.relu(self.conv2d(x, w_conv1_1) + b_conv1_1)
h_pool1_1 = self.max_pool_2x2(h_conv1_1)
w_conv2_1 = tf.get_variable('w_conv2_1', [3, 3, 24, 48])
b_conv2_1 = tf.get_variable('b_conv2_1', [48])
h_conv2_1 = tf.nn.relu(self.conv2d(h_pool1_1, w_conv2_1) + b_conv2_1)
h_pool2_1 = self.max_pool_2x2(h_conv2_1)
w_conv3_1 = tf.get_variable('w_conv3_1', [3, 3, 48, 24])
b_conv3_1 = tf.get_variable('b_conv3_1', [24])
h_conv3_1 = tf.nn.relu(self.conv2d(h_pool2_1, w_conv3_1) + b_conv3_1)
w_conv4_1 = tf.get_variable('w_conv4_1', [3, 3, 24, 12])
b_conv4_1 = tf.get_variable('b_conv4_1', [12])
h_conv4_1 = tf.nn.relu(self.conv2d(h_conv3_1, w_conv4_1) + b_conv4_1)
# m net ###########################################################
w_conv1_2 = tf.get_variable('w_conv1_2', [7, 7, 1, 20])
b_conv1_2 = tf.get_variable('b_conv1_2', [20])
h_conv1_2 = tf.nn.relu(self.conv2d(x, w_conv1_2) + b_conv1_2)
h_pool1_2 = self.max_pool_2x2(h_conv1_2)
w_conv2_2 = tf.get_variable('w_conv2_2', [5, 5, 20, 40])
b_conv2_2 = tf.get_variable('b_conv2_2', [40])
h_conv2_2 = tf.nn.relu(self.conv2d(h_pool1_2, w_conv2_2) + b_conv2_2)
h_pool2_2 = self.max_pool_2x2(h_conv2_2)
w_conv3_2 = tf.get_variable('w_conv3_2', [5, 5, 40, 20])
b_conv3_2 = tf.get_variable('b_conv3_2', [20])
h_conv3_2 = tf.nn.relu(self.conv2d(h_pool2_2, w_conv3_2) + b_conv3_2)
w_conv4_2 = tf.get_variable('w_conv4_2', [5, 5, 20, 10])
b_conv4_2 = tf.get_variable('b_conv4_2', [10])
h_conv4_2 = tf.nn.relu(self.conv2d(h_conv3_2, w_conv4_2) + b_conv4_2)
# l net ###########################################################
w_conv1_3 = tf.get_variable('w_conv1_3', [9, 9, 1, 16])
b_conv1_3 = tf.get_variable('b_conv1_3', [16])
h_conv1_3 = tf.nn.relu(self.conv2d(x, w_conv1_3) + b_conv1_3)
h_pool1_3 = self.max_pool_2x2(h_conv1_3)
w_conv2_3 = tf.get_variable('w_conv2_3', [7, 7, 16, 32])
b_conv2_3 = tf.get_variable('b_conv2_3', [32])
h_conv2_3 = tf.nn.relu(self.conv2d(h_pool1_3, w_conv2_3) + b_conv2_3)
h_pool2_3 = self.max_pool_2x2(h_conv2_3)
w_conv3_3 = tf.get_variable('w_conv3_3', [7, 7, 32, 16])
b_conv3_3 = tf.get_variable('b_conv3_3', [16])
h_conv3_3 = tf.nn.relu(self.conv2d(h_pool2_3, w_conv3_3) + b_conv3_3)
w_conv4_3 = tf.get_variable('w_conv4_3', [7, 7, 16, 8])
b_conv4_3 = tf.get_variable('b_conv4_3', [8])
h_conv4_3 = tf.nn.relu(self.conv2d(h_conv3_3, w_conv4_3) + b_conv4_3)
# merge ###########################################################
h_conv4_merge = tf.concat([h_conv4_1, h_conv4_2, h_conv4_3], 3)
w_conv5 = tf.get_variable('w_conv5', [1, 1, 30, 1])
b_conv5 = tf.get_variable('b_conv5', [1])
h_conv5 = self.conv2d(h_conv4_merge, w_conv5) + b_conv5
y_pre = h_conv5
return y_pre
def train(self, max_epoch):
with tf.Session() as sess:
if not os.path.exists('./model' + self.dataset):
sess.run(tf.global_variables_initializer())
else:
saver = tf.train.Saver()
saver.restore(sess, 'model' + self.dataset + '/model.ckpt')
data_train = self.data_pre_train('train', self.dataset)
data_val = self.data_pre_train('val', self.dataset)
best_mae = 10000
for epoch in range(max_epoch):
#training process
epoch_mae = 0
random.shuffle(data_train)
for i in range(len(data_train)):
data = data_train[i]
x_in = np.reshape(data[0], (1, data[0].shape[0], data[0].shape[1], 1))
y_ground = np.reshape(data[1], (1, data[1].shape[0], data[1].shape[1], 1))
#print(x_in + " "+ y_ground)
_, l, y_a, y_p, act_s, pre_s, m = sess.run( \
[self.train_step, self.loss, self.y_act, self.y_pre, \
self.act_sum, self.pre_sum, self.MAE], \
feed_dict = {self.x: x_in, self.y_act: y_ground})
if i % 500 == 0:
print('epoch', epoch, 'step', i, 'mae:', m)
epoch_mae += m
#print(epoch_mae)
''' Chal raha hai! '''
epoch_mae /= len(data_train)
print('epoch', epoch + 1, 'train_mae:', epoch_mae)
#validation process
val_mae = 0
val_mse = 0
for i in range(len(data_val)):
data = data_val[i]
x_in = np.reshape(data[0], (1, data[0].shape[0], data[0].shape[1], 1))
y_ground = np.reshape(data[1], (1, data[1].shape[0], data[1].shape[1], 1))
act_s, pre_s, m = sess.run( \
[self.act_sum, self.pre_sum, self.MAE], \
feed_dict = {self.x: x_in, self.y_act: y_ground})
val_mae += m
val_mse += (act_s - pre_s) * (act_s - pre_s)
val_mae /= len(data_val)
val_mse = math.sqrt(val_mse / len(data_val))
print('epoch', epoch, 'valid_mae:', val_mae, 'valid_mse:', val_mse)
if val_mae < best_mae:
best_mae = val_mae
print('best mae so far, saving model.')
saver = tf.train.Saver()
saver.save(sess, 'model' + self.dataset + '/model.ckpt')
else:
print('best mae:', best_mae)
print('**************************')
def test(self):
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, 'model' + self.dataset + '/model.ckpt')
data = self.data_pre_test(self.dataset)
re=0
mae = 0
mse = 0
for i in range(1, len(data) + 1):
# for i in range(1,21):
if i % 20 == 0:
print(i, '/', len(data))
d = data[i - 1]
x_in = d[0]
y_a = d[1]
x_in = np.reshape(d[0], (1, d[0].shape[0], d[0].shape[1], 1))
y_p_den = sess.run(self.y_pre, feed_dict = {self.x: x_in})
#print('y_p_den : ', y_p_den)
y_p = np.sum(y_p_den)
print('I : ', i)
print('y_p : ', y_p)
print('y_a: ',y_a)
#if i <= 2:
#y_p_den = np.reshape(y_p_den, (y_p_den.shape[1], y_p_den.shape[2]))
#heatmap(y_p_den, i, self.dataset, 'pre')
mae += abs(y_a - y_p)
re+=( y_p/y_a)*100
mse += (y_a - y_p) * (y_a - y_p)
mae /= len(data)
mse = math.sqrt(mse / len(data))
re=(re/len(data))
if(re>100):
re=re-100
re=100-re
print('Accuracy: ',re)
print('mae: ', mae)
print('mse: ', mse)
def predict(self, path):
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, 'model' + self.dataset + '/model.ckpt')
dirs = [f for f in glob.glob(path + '/*/')]
images = []
for x in dirs:
images.append([f for f in glob.glob(x + '/*_crop_y.png')])
images.sort()
images = [item for sublist in images for item in sublist]
file = open("output.csv", "w")
for img in images:
#img_path = '.\\data\\original\\shanghaitech\\part_'+ self.dataset +'\\test_data\\images\\'
data = []
#name = 'teste_1.jpg'
#name = 'centraliens.jpg'
#img = cv2.imread(img_path + name, 0)
img_ = cv2.imread(img, 0)
xres, yres = img_.shape
img_ = np.array(img_)
img_ = (img_ - 127.5) / 128
data.append([img_])
d = data[0]
x_in = d[0]
x_in = np.reshape(d[0], (1, d[0].shape[0], d[0].shape[1], 1))
y_p_den = sess.run(self.y_pre, feed_dict = {self.x: x_in})
a, b, c, d = y_p_den.shape
den = y_p_den.reshape((b, c))
y_p = 0
for i in range(b):
for j in range(c):
if den[i][j] > 0:
y_p += den[i][j]
#y_p = np.sum(den)
print("Image ", img, " predicted : ", y_p)
print("******** ", images.index(img)/len(images), images.index(img), " of ", len(images), "*********")
file.write(img + "," + str(y_p) + '\n')
#den = np.rot90(den, 3)
#den = cv2.resize(den, (xres, yres))
#color_img = cv2.cvtColor(den, cv2.COLOR_GRAY2RGB)
#Image.fromarray(den_pos, mode='L').save(img[:-4] + '_pos.png')
#Image.fromarray(den, mode='RGB').save(img[:-4] + '_pos.png')
#plt.imsave(img[:-4] + '_pos.png', den, cmap='gray')
#im = Image.fromarray(den)
#print(den)
#cv2.imwrite(img[:-4] + '_pos.png', den)
#im.save(img[:-4] + '_pos.png', format='png')
# Make the plot
#print(den.shape)
plt.imshow(den)
#plt.axis([0, 970, 0, 1790])
plt.axis('off')
plt.savefig(img[:-4] + '_pos.png', bbox_inches='tight', pad_inches=0.0, dpi=600)
#plt.show()
| [
"gpsunicamp016@gmail.com"
] | gpsunicamp016@gmail.com |
ba3b6c4fcd09c39307eb920d7fc4fae0708ca11c | 5f6b05b4a953a4cf4692b1164391bb57ad097e9d | /graph_nets-master_tf2/graph_nets/tests/modules_test.py | a4365baa76e1fd6002f1df6408823ab861801fe4 | [
"Apache-2.0"
] | permissive | 2877992943/somelib | 0972fea9b7b22435d3086a71690102388d642a3a | 83a3c40501a9fa667564391b57b1e1bbc63a2290 | refs/heads/master | 2021-01-03T17:06:22.607657 | 2020-02-13T02:53:06 | 2020-02-13T02:53:06 | 240,163,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,995 | py | # Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for modules.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import modules
from graph_nets import utils_np
from graph_nets import utils_tf
import numpy as np
import sonnet as snt
import tensorflow as tf
SMALL_GRAPH_1 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 1],
"receivers": [1, 2],
}
SMALL_GRAPH_2 = {
"globals": [-1.1, -1.2, -1.3],
"nodes": [[-10.1, -10.2], [-20.1, -20.2], [-30.1, -30.2]],
"edges": [[-101., -102., -103., -104.]],
"senders": [1,],
"receivers": [2,],
}
SMALL_GRAPH_3 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [1, 1],
"receivers": [0, 2],
}
SMALL_GRAPH_4 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 2],
"receivers": [1, 1],
}
def _mask_leading_dimension(tensor):
return tf.compat.v1.placeholder_with_default(tensor,
[None] + tensor.get_shape().as_list()[1:])
class GraphModuleTest(tf.test.TestCase, parameterized.TestCase):
"""Base class for all the tests in this file."""
def setUp(self):
super(GraphModuleTest, self).setUp()
tf.compat.v1.set_random_seed(0)
def _get_input_graph(self, none_field=None):
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2, SMALL_GRAPH_3, SMALL_GRAPH_4])
if none_field:
input_graph = input_graph.replace(**{none_field: None})
return input_graph
def _get_shaped_input_graph(self):
return graphs.GraphsTuple(
nodes=tf.zeros([3, 4, 5, 11], dtype=tf.float32),
edges=tf.zeros([5, 4, 5, 12], dtype=tf.float32),
globals=tf.zeros([2, 4, 5, 13], dtype=tf.float32),
receivers=tf.range(5, dtype=tf.int32) // 3,
senders=tf.range(5, dtype=tf.int32) % 3,
n_node=tf.constant([2, 1], dtype=tf.int32),
n_edge=tf.constant([3, 2], dtype=tf.int32),
)
def _get_shaped_model_fns(self):
edge_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
node_model_fn = functools.partial(
snt.Conv2D, output_channels=8, kernel_shape=[3, 3])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=7, kernel_shape=[3, 3])
return edge_model_fn, node_model_fn, global_model_fn
def _assert_build_and_run(self, network, input_graph):
# No error at construction time.
output = network(input_graph)
# No error at runtime.
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(output)
class GraphIndependentTest(GraphModuleTest):
def _get_model(self, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.nets.MLP, output_sizes=[5]),
"node_model_fn": functools.partial(snt.nets.MLP, output_sizes=[10]),
"global_model_fn": functools.partial(snt.nets.MLP, output_sizes=[15]),
}
if name:
kwargs["name"] = name
return modules.GraphIndependent(**kwargs)
def test_same_as_subblocks(self):
"""Compares the output to explicit subblocks output."""
input_graph = self._get_input_graph()
model = self._get_model()
output_graph = model(input_graph)
expected_output_edges = model._edge_model(input_graph.edges)
expected_output_nodes = model._node_model(input_graph.nodes)
expected_output_globals = model._global_model(input_graph.globals)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph,
expected_output_edges,
expected_output_nodes,
expected_output_globals))
self.assertAllEqual(expected_edges_out, output_graph_out.edges)
self.assertAllEqual(expected_nodes_out, output_graph_out.nodes)
self.assertAllEqual(expected_globals_out, output_graph_out.globals)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphIndependent."""
name = name if name is not None else "graph_independent"
expected_var_shapes_dict = {
name + "/edge_model/mlp/linear_0/b:0": [5],
name + "/edge_model/mlp/linear_0/w:0": [4, 5],
name + "/node_model/mlp/linear_0/b:0": [10],
name + "/node_model/mlp/linear_0/w:0": [2, 10],
name + "/global_model/mlp/linear_0/b:0": [15],
name + "/global_model/mlp/linear_0/w:0": [3, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
def test_gradient_flow(self):
"""Verifies that gradient flow is as expected."""
input_graph = self._get_input_graph()
model = self._get_model()
output_graph = model(input_graph)
for input_field in ["nodes", "edges", "globals"]:
input_tensor = getattr(input_graph, input_field)
for output_field in ["nodes", "edges", "globals"]:
output_tensor = getattr(output_graph, output_field)
gradients = tf.gradients(ys=output_tensor, xs=input_tensor)
if input_field == output_field:
self.assertNotEqual(None, gradients[0])
else:
self.assertListEqual([None], gradients)
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A GraphIndependent does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(a=v, perm=[0, 2, 1, 3]), [field_to_reshape])
network = modules.GraphIndependent(
edge_model_fn, node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class GraphNetworkTest(GraphModuleTest):
def _get_model(self):
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
return modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphNetwork."""
name = name if name is not None else "graph_network"
expected_var_shapes_dict = {
name + "/edge_block/mlp/linear_0/b:0": [5],
name + "/edge_block/mlp/linear_0/w:0": [4 + 4 + 3, 5],
name + "/node_block/mlp/linear_0/b:0": [10],
name + "/node_block/mlp/linear_0/w:0": [5 + 2 + 3, 10],
name + "/global_block/mlp/linear_0/b:0": [15],
name + "/global_block/mlp/linear_0/w:0": [10 + 5 + 3, 15],
}
input_graph = self._get_input_graph()
extra_kwargs = {"name": name} if name else {}
model = modules.GraphNetwork(
edge_model_fn=functools.partial(snt.nets.MLP, output_sizes=[5]),
node_model_fn=functools.partial(snt.nets.MLP, output_sizes=[10]),
global_model_fn=functools.partial(snt.nets.MLP, output_sizes=[15]),
**extra_kwargs)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("reduce sum reduction", tf.math.unsorted_segment_sum,),
("reduce max or zero reduction", blocks.unsorted_segment_max_or_zero,),)
def test_same_as_subblocks(self, reducer):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock` and `GlobalBlock`.
"""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn,
reducer=reducer)
output_graph = graph_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=True)
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
received_edges_reducer=reducer)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_nodes=True,
use_edges=True,
use_globals=True,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self.assertAllEqual(expected_edges_out, output_graph_out.edges)
self.assertAllEqual(expected_nodes_out, output_graph_out.nodes)
self.assertAllEqual(expected_globals_out, output_graph_out.globals)
def test_dynamic_batch_sizes(self):
"""Checks that all batch sizes are as expected through a GraphNetwork."""
input_graph = self._get_input_graph()
placeholders = input_graph.map(_mask_leading_dimension, graphs.ALL_FIELDS)
model = self._get_model()
output = model(placeholders)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
other_input_graph = utils_np.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
actual = sess.run(output, {placeholders: other_input_graph})
for k, v in other_input_graph._asdict().items():
self.assertEqual(v.shape[0], getattr(actual, k).shape[0])
@parameterized.named_parameters(
("float64 data", tf.float64, tf.int32),
("int64 indices", tf.float32, tf.int64),)
def test_dtypes(self, data_dtype, indices_dtype):
"""Checks that all the output types are as expected in a GraphNetwork."""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype),
["nodes", "globals", "edges"])
input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype),
["senders", "receivers"])
model = self._get_model()
output = model(input_graph)
for field in ["nodes", "globals", "edges"]:
self.assertEqual(data_dtype, getattr(output, field).dtype)
for field in ["receivers", "senders"]:
self.assertEqual(indices_dtype, getattr(output, field).dtype)
@parameterized.named_parameters(
("edges only", True, False, False, False),
("receivers only", False, True, False, False),
("senders only", False, False, True, False),
("globals only", False, False, False, True),)
def test_edge_block_options(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals):
"""Test for configuring the EdgeBlock options."""
reducer = tf.math.unsorted_segment_sum
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=10)
edge_block_opt = {"use_edges": use_edges,
"use_receiver_nodes": use_receiver_nodes,
"use_sender_nodes": use_sender_nodes,
"use_globals": use_globals}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt,
reducer=reducer)
output_graph = graph_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = expected_output_edge_block
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self.assertAllEqual(expected_edges_out, output_graph_out.edges)
self.assertAllEqual(expected_nodes_out, output_graph_out.nodes)
self.assertAllEqual(expected_globals_out, output_graph_out.globals)
@parameterized.named_parameters(
("received edges only", True, False, False, False, None, None),
("received edges, max reduction",
True, False, False, False, tf.math.unsorted_segment_max, None),
("sent edges only", False, True, False, False, None, None),
("sent edges, max reduction",
False, True, False, False, None, tf.math.unsorted_segment_max),
("nodes only", False, False, True, False, None, None),
("globals only", False, False, False, True, None, None),
)
def test_node_block_options(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
received_edges_reducer,
sent_edges_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_received_edges:
received_edges_reducer = received_edges_reducer or tf.math.unsorted_segment_sum
if use_sent_edges:
sent_edges_reducer = sent_edges_reducer or tf.math.unsorted_segment_sum
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
node_model_fn = functools.partial(snt.Linear, output_size=10)
node_block_opt = {"use_received_edges": use_received_edges,
"use_sent_edges": use_sent_edges,
"use_nodes": use_nodes,
"use_globals": use_globals,
"received_edges_reducer": received_edges_reducer,
"sent_edges_reducer": sent_edges_reducer}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = graph_network(input_graph)
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=use_nodes,
use_sent_edges=use_sent_edges,
use_received_edges=use_received_edges,
use_globals=use_globals,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = node_block(input_graph)
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self.assertAllEqual(expected_edges_out, output_graph_out.edges)
self.assertAllEqual(expected_nodes_out, output_graph_out.nodes)
self.assertAllEqual(expected_globals_out, output_graph_out.globals)
@parameterized.named_parameters(
("edges only", True, False, False, None, None),
("edges only, max", True, False, False, tf.math.unsorted_segment_max, None),
("nodes only", False, True, False, None, None),
("nodes only, max", False, True, False, None, tf.math.unsorted_segment_max),
("globals only", False, False, True, None, None),
)
def test_global_block_options(self,
use_edges,
use_nodes,
use_globals,
edges_reducer,
nodes_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_edges:
edges_reducer = edges_reducer or tf.math.unsorted_segment_sum
if use_nodes:
nodes_reducer = nodes_reducer or tf.math.unsorted_segment_sum
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
global_model_fn = functools.partial(snt.Linear, output_size=10)
global_block_opt = {"use_globals": use_globals,
"use_nodes": use_nodes,
"use_edges": use_edges,
"edges_reducer": edges_reducer,
"nodes_reducer": nodes_reducer}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = graph_network(input_graph)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = expected_output_edge_block
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self.assertAllEqual(expected_edges_out, output_graph_out.edges)
self.assertAllEqual(expected_nodes_out, output_graph_out.nodes)
self.assertAllEqual(expected_globals_out, output_graph_out.globals)
def test_higher_rank_outputs(self):
"""Tests that a graph net can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
network = modules.GraphNetwork(*self._get_shaped_model_fns())
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),
("wrongly shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(a=v, perm=[0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_raises(self):
"""A error should be raised if partial outputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
edge_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn_2, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
node_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn_2, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
class InteractionNetworkTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"node_model_fn": functools.partial(snt.Linear, output_size=10)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.InteractionNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by an InteractionNetwork."""
name = name if name is not None else "interaction_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2 + 4, 5],
name + "/node_block/linear/b:0": [10],
name + "/node_block/linear/w:0": [5 + 2, 10],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum,),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
("no globals", tf.math.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
interaction_network = self._get_model(reducer)
output_graph = interaction_network(input_graph)
edges_out = output_graph.edges
nodes_out = output_graph.nodes
self.assertAllEqual(input_graph.globals, output_graph.globals)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: interaction_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=False)
node_block = blocks.NodeBlock(
node_model_fn=lambda: interaction_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=False,
received_edges_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(actual_edges_out, actual_nodes_out,
expected_edges_out, expected_nodes_out) = sess.run(
[edges_out, nodes_out, expected_edges, expected_nodes])
self.assertAllEqual(expected_edges_out, actual_edges_out)
self.assertAllEqual(expected_nodes_out, actual_nodes_out)
@parameterized.named_parameters(
("no nodes", ["nodes"],),
("no edge data", ["edges"],),
("no edges", ["edges", "receivers", "senders"],),
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
interaction_network = self._get_model()
with self.assertRaises(ValueError):
interaction_network(input_graph)
def test_higher_rank_outputs(self):
"""Tests that an IN can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""Am exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(a=v, perm=[0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""The globals can have an arbitrary shape in the input."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.replace(
globals=tf.transpose(a=input_graph.globals, perm=[0, 2, 1, 3]))
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
class RelationNetworkTest(GraphModuleTest):
def _get_model(self, reducer=tf.math.unsorted_segment_sum, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.RelationNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a RelationNetwork."""
name = name if name is not None else "relation_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum, None),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, None),
("no edges", tf.math.unsorted_segment_sum, "edges"),
("no globals", tf.math.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `GlobalBlock`.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
relation_network = self._get_model(reducer)
output_graph = relation_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: relation_network._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=False)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: relation_network._global_block._global_model,
use_edges=True,
use_nodes=False,
use_globals=False,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_global_block = global_block(expected_output_edge_block)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.nodes, output_graph.nodes)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(actual_globals_out, expected_globals_out) = sess.run(
(output_graph.globals, expected_output_global_block.globals))
self.assertAllEqual(expected_globals_out, actual_globals_out)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
relation_network = self._get_model()
with self.assertRaises(ValueError):
relation_network(input_graph)
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A RelationNetwork does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, _, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(a=v, perm=[0, 2, 1, 3]), [field_to_reshape])
network = modules.RelationNetwork(edge_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class DeepSetsTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"node_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.DeepSets(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "deep_sets"
expected_var_shapes_dict = {
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [2 + 3, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum, []),
("no edge data", tf.math.unsorted_segment_sum, ["edges"]),
("no edges", tf.math.unsorted_segment_sum, ["edges", "receivers", "senders"]),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, []),
)
def test_same_as_subblocks(self, reducer, none_fields):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the NodeBlock.
none_fields: (list of strings) The corresponding fields are removed from
the input graph.
"""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda _: None, none_fields)
deep_sets = self._get_model(reducer)
output_graph = deep_sets(input_graph)
output_nodes = output_graph.nodes
output_globals = output_graph.globals
node_block = blocks.NodeBlock(
node_model_fn=lambda: deep_sets._node_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=True)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: deep_sets._global_block._global_model,
use_edges=False,
use_nodes=True,
use_globals=False,
nodes_reducer=reducer)
node_block_out = node_block(input_graph)
expected_nodes = node_block_out.nodes
expected_globals = global_block(node_block_out).globals
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers)
self.assertAllEqual(input_graph.senders, output_graph.senders)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
(output_nodes_, output_globals_, expected_nodes_,
expected_globals_) = sess.run(
[output_nodes, output_globals, expected_nodes, expected_globals])
self.assertAllEqual(expected_nodes_, output_nodes_)
self.assertAllEqual(expected_globals_, output_globals_)
@parameterized.parameters(
("nodes",), ("globals",),
)
def test_field_must_not_be_none(self, none_field):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.replace(**{none_field: None})
deep_sets = self._get_model()
with self.assertRaises(ValueError):
deep_sets(input_graph)
def test_incompatible_higher_rank_inputs_raises(self):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
nodes=tf.transpose(a=input_graph.nodes, perm=[0, 2, 1, 3]))
graph_network = modules.DeepSets(node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_no_raise(self):
"""There is no constraint on the size of the partial outputs."""
input_graph = self._get_shaped_input_graph()
node_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""A DeepSets does not make assumptions on the shape if its input edges."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
edges=tf.transpose(a=input_graph.edges, perm=[0, 2, 1, 3]))
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class CommNetTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "comm_net"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [15],
name + "/edge_block/linear/w:0": [2, 15],
name + "/node_encoder_block/linear/b:0": [8],
name + "/node_encoder_block/linear/w:0": [2, 8],
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [15 + 8, 5],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum,),
("no edges", tf.math.unsorted_segment_sum, "edges"),
("no globals", tf.math.unsorted_segment_sum, "globals"),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
comm_net = self._get_model(reducer)
output_graph = comm_net(input_graph)
output_nodes = output_graph.nodes
edge_subblock = blocks.EdgeBlock(
edge_model_fn=lambda: comm_net._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=True,
use_globals=False)
node_encoder_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_encoder_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
node_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_block._node_model,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
edge_block_out = edge_subblock(input_graph)
encoded_nodes = node_encoder_subblock(input_graph).nodes
node_input_graph = input_graph.replace(
edges=edge_block_out.edges, nodes=encoded_nodes)
node_block_out = node_subblock(node_input_graph)
expected_nodes = node_block_out.nodes
self.assertAllEqual(input_graph.globals, output_graph.globals)
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers,)
self.assertAllEqual(input_graph.senders, output_graph.senders)
with self.test_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
actual_nodes_output, expected_nodes_output = sess.run(
[output_nodes, expected_nodes])
self.assertAllEqual(expected_nodes_output, actual_nodes_output)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
comm_net = self._get_model()
with self.assertRaises(ValueError):
comm_net(input_graph)
def test_higher_rank_outputs(self):
"""Tests that a CommNet can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
graph_network = modules.CommNet(*self._get_shaped_model_fns())
self._assert_build_and_run(graph_network, input_graph)
class SelfAttentionTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
LOGITS_1D = [np.log(2), np.log(2), np.log(2), 0., 0., 0.]
SOFTMAX_1D = [1., 2/3, 0.5, 0.25, 0.25, 1/3]
LOGITS_2D = [[np.log(2), 1.], [np.log(2), 1.], [np.log(2), 1.],
[0., 1.], [0., 1.], [0., 1.]]
SOFTMAX_2D = [[1., 1.], [2/3, 0.5], [1/2, 1/3],
[1/4, 1/3], [1/4, 1/3], [1/3, 0.5]]
SENDERS = [0, 2, 2, 3, 4, 3]
RECEIVERS = [1, 5, 6, 6, 6, 5]
N_NODE = [2, 5]
N_EDGE = [1, 5]
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D),
("two dimensional", LOGITS_2D, SOFTMAX_2D),)
def test_unsorted_segment_softmax(self, data, expected_softmax):
"""Verifies variable names and shapes created by a DeepSets network."""
data = tf.constant(data, dtype=tf.float32)
segment_ids = tf.constant(self.RECEIVERS, dtype=tf.int32)
num_segments = tf.constant(sum(self.N_NODE), dtype=tf.int32)
actual_softmax = modules._unsorted_segment_softmax(
data, segment_ids, num_segments)
with self.test_session() as sess:
actual_softmax_output = sess.run(actual_softmax)
self.assertAllClose(expected_softmax, actual_softmax_output)
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D,
modules._unsorted_segment_softmax),
("two dimensional", LOGITS_2D, SOFTMAX_2D,
modules._unsorted_segment_softmax),)
def test_received_edges_normalizer(self, logits,
expected_normalized, normalizer):
graph = graphs.GraphsTuple(
nodes=None,
edges=logits,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),
)
actual_normalized_edges = modules._received_edges_normalizer(
graph, normalizer)
with self.test_session() as sess:
actual_normalized_edges_output = sess.run(actual_normalized_edges)
self.assertAllClose(expected_normalized, actual_normalized_edges_output)
def test_self_attention(self):
# Just one feature per node.
values_np = np.arange(sum(self.N_NODE)) + 1.
# Multiple heads, one positive values, one negative values.
values_np = np.stack([values_np, values_np*-1.], axis=-1)
# Multiple features per node, per head, at different scales.
values_np = np.stack([values_np, values_np*0.1], axis=-1)
values = tf.constant(values_np, dtype=tf.float32)
keys_np = [
[[0.3, 0.4]]*2, # Irrelevant (only sender to one node)
[[0.1, 0.5]]*2, # Not used (is not a sender)
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 1], [1, 1]],
[[0.4, 0.3]]*2, # Not used (is not a sender)
[[0.3, 0.2]]*2] # Not used (is not a sender)
keys = tf.constant(keys_np, dtype=tf.float32)
queries_np = [
[[0.2, 0.7]]*2, # Not used (is not a receiver)
[[0.3, 0.2]]*2, # Irrelevant (only receives from one node)
[[0.2, 0.8]]*2, # Not used (is not a receiver)
[[0.2, 0.4]]*2, # Not used (is not a receiver)
[[0.3, 0.9]]*2, # Not used (is not a receiver)
[[0, np.log(2)], [np.log(3), 0]],
[[np.log(2), 0], [0, np.log(3)]]]
queries = tf.constant(queries_np, dtype=tf.float32)
attention_graph = graphs.GraphsTuple(
nodes=None,
edges=None,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),)
self_attention = modules.SelfAttention()
output_graph = self_attention(values, keys, queries, attention_graph)
mixed_nodes = output_graph.nodes
with self.test_session() as sess:
mixed_nodes_output = sess.run(mixed_nodes)
expected_mixed_nodes = [
[[0., 0.], [0., 0.]], # Does not receive any edges
[[1., 0.1], [-1., -0.1]], # Only receives from n0.
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[11/3, 11/3*0.1], # Head one, receives from n2(1/3) n3(2/3)
[-15/4, -15/4*0.1]], # Head two, receives from n2(1/4) n3(3/4)
[[20/5, 20/5*0.1], # Head one, receives from n2(2/5) n3(1/5) n4(2/5)
[-28/7, -28/7*0.1]], # Head two, receives from n2(3/7) n3(1/7) n4(3/7)
]
self.assertAllClose(expected_mixed_nodes, mixed_nodes_output)
if __name__ == "__main__":
tf.test.main()
| [
"2877992943@qq.com"
] | 2877992943@qq.com |
6d1f6f15112b1fc0a2a3b86385e1f8c6d9ccc2c6 | c94914207f9a3918d0cd09807a101e81a8e7d9a8 | /Finance/Project Alpha/ProjectAlpha/risk.py | d757e8ef74c1c99631a212d1c47fd4a172fc535c | [] | no_license | matthewmercuri/codedeposit | eb8265d6219284ee75cba2e1abf50f1b0ef17a09 | f51d3b1bdacec96af699bc8feb993f6ed60f772c | refs/heads/main | 2023-06-10T21:27:42.550236 | 2021-07-08T20:57:53 | 2021-07-08T20:57:53 | 373,534,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import numpy as np
''' Ideally, we would want to put these metrics in a df
or database with the return series of the portfolio. How
would this translate to when we use these metrics to backtest
a portfolio that trades?
- want to figure out a way where we can just pass the history
to any one method and have a result outputted
- start with simpler methods like alpha, beta, etc.
- we want to start this class by being applied to the return
df (which would be the return series of the port)
- finally, we want a method that prints out ALL of the metrics
'''
class Risk:
def __init__(self):
pass
def _metrics(self, history):
''' Calls all of the individual methods
- Perhaps this can aggregrate and format too?
'''
def sharpe_ratio(self, returns, rf_rate=0):
returns = np.array(returns)
returns_std = np.std(returns)
sr = (np.mean(returns) - rf_rate) / returns_std
return sr
def treynor_measure(self, history):
pass
def sortino(self):
pass
def max_drawdown(self):
pass
def var(self):
pass
def cvar(self):
pass
def cagr(self):
pass
def roi(self):
pass
def pandl(self):
pass
def alpha(self):
pass
def beta(self):
pass
def std(self):
pass
def r_squared(self):
pass
def corr_matrix(self):
pass
| [
"mercurimatthew@gmail.com"
] | mercurimatthew@gmail.com |
a5cfaddec32dede10839f846563eaef3853c4601 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ifmgr-cfg/gn-create-xr-ifmgr-cfg-34-ydk.py | a3abe62056bc1ac1ba680e56c1d574ac69b5d763 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 3,721 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ifmgr-cfg.
usage: gn-create-xr-ifmgr-cfg-34-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ifmgr_cfg \
as xr_ifmgr_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_interface_configurations(interface_configurations):
"""Add config data to interface_configurations object."""
# configure IPv4 interface
interface_configuration = interface_configurations.InterfaceConfiguration()
interface_configuration.active = "act"
interface_configuration.interface_name = "GigabitEthernet0/0/0/0"
interface_configuration.description = "CONNECTS TO LSR1 (g0/0/0/1)"
mtu = interface_configuration.mtus.Mtu()
mtu.owner = "GigabitEthernet"
mtu.mtu = 9192
interface_configuration.mtus.mtu.append(mtu)
primary = interface_configuration.ipv4_network.addresses.Primary()
primary.address = "172.16.1.0"
primary.netmask = "255.255.255.254"
interface_configuration.ipv4_network.addresses.primary = primary
interface_configuration.statistics.load_interval = 30
interface_configurations.interface_configuration.append(interface_configuration)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
interface_configurations = xr_ifmgr_cfg.InterfaceConfigurations() # create object
config_interface_configurations(interface_configurations) # add object configuration
# create configuration on gNMI device
crud.create(provider, interface_configurations)
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
bff56bbdcdcd2052423680ef460434500e1e3f6d | 64660f7d708569135777d3ae429feed513f5d87f | /notebooks/_solutions/pandas_06_data_cleaning2.py | 63584f4c83d54a6bb5b985d4237a721a10f351d6 | [
"BSD-3-Clause"
] | permissive | jorisvandenbossche/DS-python-data-analysis | ea8fd46e9160d00be8550aa8d87ea33146161b54 | be5d5030e891590990f9044ac66b116799d83fe5 | refs/heads/main | 2022-12-13T03:53:52.365280 | 2022-12-04T18:54:39 | 2022-12-04T18:54:39 | 73,628,771 | 87 | 67 | BSD-3-Clause | 2022-12-12T15:00:28 | 2016-11-13T16:39:51 | Jupyter Notebook | UTF-8 | Python | false | false | 78 | py | casualties = casualties_nl.rename(columns=clean_column_name)
casualties.head() | [
"noreply@github.com"
] | jorisvandenbossche.noreply@github.com |
44d1bb63f6fb5cac8e01d44a916c0761f1f59078 | d58bc2475a41e7c36e22947565c099908f84cfd6 | /samples/openapi3/client/petstore/python-experimental/petstore_api/paths/user_logout/get.py | c8a52c7734766188dde2478208cb0fdeff83f8cb | [
"Apache-2.0"
] | permissive | yaronius/openapi-generator | d8390dc2cfd9330d3f05a1f517612d793e332ead | 9f3fac53c1689b82bf4c99b664e10e4a5decfb8e | refs/heads/master | 2022-11-03T02:27:44.670087 | 2022-08-17T12:17:30 | 2022-08-17T12:17:30 | 175,407,506 | 0 | 0 | Apache-2.0 | 2023-09-04T20:41:29 | 2019-03-13T11:30:05 | Java | UTF-8 | Python | false | false | 4,404 | py | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
import functools # noqa: F401
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
NoneClass,
BoolClass,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
from . import path
@dataclass
class ApiResponseForDefault(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_default = api_client.OpenApiResponse(
response_cls=ApiResponseForDefault,
)
_status_code_to_response = {
'default': _response_for_default,
}
class BaseApi(api_client.Api):
def _logout_user(
self: api_client.Api,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
"""
Logs out current logged in user session
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
# TODO add cookie handling
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
default_response = _status_code_to_response.get('default')
if default_response:
api_response = default_response.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class LogoutUser(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def logout_user(
self: BaseApi,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
return self._logout_user(
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def get(
self: BaseApi,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization
]:
return self._logout_user(
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"noreply@github.com"
] | yaronius.noreply@github.com |
6136bbbbebc5bb771fd00b992c25ef41855cd34d | c3d5dcf3c18e0e652d81cdf2edb87bdc0e3f2c72 | /user/models.py | 1978c2d4f31c9d846418bad20d246153683fb04f | [] | no_license | webclinic017/easy_ledger | bc6743e4826d6d67d2d2b7f38476760077b2c7c3 | 9e85a726000cc54fc77d368a48a828a716664c07 | refs/heads/main | 2023-04-10T09:11:15.530056 | 2021-04-23T09:35:14 | 2021-04-23T09:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class User(AbstractUser):
username = models.CharField(max_length=200, blank=True, null=True)
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return "{}".format(self.email)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='profile')
dob = models.DateField(blank=True, null=True)
phone = models.CharField(max_length=20, blank=True, null=True)
address = models.CharField(max_length=255, blank=True, null=True)
country = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
zip = models.CharField(max_length=5, blank=True, null=True) | [
"ppark9553@gmail.com"
] | ppark9553@gmail.com |
91fc770e5872a64b5c1af1537736fd0d19afceae | 28024c936d258d56e3c02bfda44204fab8f32964 | /client/dvaclient/constants.py | a156927b6d8fd9bd73732cbd03f6105d087656ee | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | skgone/DeepVideoAnalytics | c1db730af253a2ec3acb9c7e6cce82d3264b72f2 | 6897dd92feed23af974dbcb7b5f0d2a9377f66b2 | refs/heads/master | 2021-07-01T01:36:07.086632 | 2020-09-20T10:07:26 | 2020-09-20T10:07:26 | 137,233,795 | 0 | 0 | null | 2018-06-13T15:19:50 | 2018-06-13T15:19:49 | null | UTF-8 | Python | false | false | 443 | py | SCHEDULE = 'S'
PROCESS = 'V'
QUERY = 'Q'
DETECTION = 'D'
INDEXING = 'I'
TRAINAPPROX = 'A'
CLASSIFICATION = 'C'
IMAGES = 'I'
VIDEOS = 'V'
INDEX = 'X'
TENSORFLOW = 'T'
CAFFE = 'C'
PYTORCH = 'P'
OPENCV = 'O'
MXNET = 'M'
INDEXER = 'I'
APPROXIMATOR = 'P'
DETECTOR = 'D'
ANALYZER = 'A'
SEGMENTER = 'S'
YOLO = "Y"
TFD = "T"
EXACT = 'E'
LOPQ = 'L'
ANNOTATION = 'A'
SEGMENTATION = 'S'
TRANSFORM = 'T'
POLYGON = 'P'
VIDEO_EXPORT = 'V'
MODEL_EXPORT = 'M' | [
"akshayubhat@gmail.com"
] | akshayubhat@gmail.com |
a6c4d1ea49aea7407ef9c59364ed2480517c29ca | b170bd640f7259d641008a974a59ae7111788ca1 | /dcgan.py | 461d9841337d1f2a6656215a573991491876b815 | [] | no_license | kant/chainer-dcgan | b75522fd09a3e4936d68e175318dfedb6e574480 | 20062e3e6c1156857363ea0ed3cfe4abd63b2a8c | refs/heads/master | 2020-04-26T05:37:14.165650 | 2017-09-14T09:17:01 | 2017-09-14T09:17:01 | 173,340,366 | 0 | 0 | null | 2019-03-01T17:14:22 | 2019-03-01T17:14:21 | null | UTF-8 | Python | false | false | 4,667 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import chainer
from chainer import training
from chainer.training import extensions
from net import Discriminator
from net import Generator
from updater import DCGANUpdater
from visualize import out_generated_image
def main():
parser = argparse.ArgumentParser(description='DCGAN')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=500,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--dataset', '-i', default='',
help='Directory of image files.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result image')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--gennum','-v', default=10,
help='visualize image rows and columns number')
parser.add_argument('--n_hidden', '-n', type=int, default=100,
help='Number of hidden units (z)')
parser.add_argument('--seed', type=int, default=0,
help='Random seed of z at visualization stage')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# n_hidden: {}'.format(args.n_hidden))
print('# epoch: {}'.format(args.epoch))
print('')
#学習モデルの作成
gen = Generator(n_hidden=args.n_hidden)
dis = Discriminator()
if args.gpu >= 0:
#modelをGPU用に変換
chainer.cuda.get_device(args.gpu).use()
gen.to_gpu()
dis.to_gpu()
#oputimizerのsetup
def make_optimizer(model, alpha=0.0002, beta1=0.5):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001), 'hook_dec')
return optimizer
opt_gen = make_optimizer(gen)
opt_dis = make_optimizer(dis)
if args.dataset == '':
#データセットの読み込み defaultはcifar10
train, _ = chainer.datasets.get_cifar10(withlabel=False, scale=255.)
else:
all_files = os.listdir(args.dataset)
image_files = [f for f in all_files if ('png' in f or 'jpg' in f)]
print('{} contains {} image files'
.format(args.dataset, len(image_files)))
train = chainer.datasets\
.ImageDataset(paths=image_files, root=args.dataset)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
#trainerのセットアップ
updater = DCGANUpdater(
models=(gen, dis),
iterator=train_iter,
optimizer={
'gen': opt_gen, 'dis': opt_dis},
device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(
extensions.snapshot(filename='snapshot_iter_{.updater.epoch}.npz'),
trigger=(100,'epoch'))
trainer.extend(extensions.snapshot_object(
gen, 'gen_iter_{.updater.epoch}.npz'), trigger=(100,'epoch'))
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.epoch}.npz'), trigger=(100,'epoch'))
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'gen/loss', 'dis/loss',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_generated_image(
gen, dis,
int(args.gennum),int(args.gennum), args.seed, args.out),
trigger=snapshot_interval)
if args.resume:
#学習済みmodelの読み込み
chainer.serializers.load_npz(args.resume, trainer)
#学習の実行
trainer.run()
chainer.serializers.save_npz("genmodel{0}".format(args.datasets), trainer)
if __name__ == '__main__':
main()
| [
"ytkmkw@gmail.com"
] | ytkmkw@gmail.com |
8c89741248027e94946cf868971e08a4eb9cc57c | 5bd4206a94086d75b2808c4929063fb6b50108f5 | /loopy/transform/iname.py | 4c3cd0a6988b269139eceb7028423407765282f6 | [
"MIT"
] | permissive | christophsk/loopy | 7c10f30810be223c56ba99d64061b781b1cd62b0 | baef6e7603b2bba683327fd43cb006864c225aa6 | refs/heads/master | 2021-06-01T08:17:54.948840 | 2016-07-14T02:16:16 | 2016-07-14T02:16:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,451 | py | from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from six.moves import zip
import islpy as isl
from islpy import dim_type
from loopy.symbolic import (
RuleAwareIdentityMapper, RuleAwareSubstitutionMapper,
SubstitutionRuleMappingContext)
from loopy.diagnostic import LoopyError
__doc__ = """
.. currentmodule:: loopy
.. autofunction:: split_iname
.. autofunction:: chunk_iname
.. autofunction:: join_inames
.. autofunction:: tag_inames
.. autofunction:: duplicate_inames
.. autofunction:: rename_iname
.. autofunction:: remove_unused_inames
.. autofunction:: set_loop_priority
.. autofunction:: split_reduction_inward
.. autofunction:: split_reduction_outward
.. autofunction:: affine_map_inames
.. autofunction:: realize_ilp
.. autofunction:: find_unused_axis_tag
.. autofunction:: make_reduction_inames_unique
"""
# {{{ set loop priority
def set_loop_priority(kernel, loop_priority):
"""Indicates the textual order in which loops should be entered in the
kernel code. Note that this priority has an advisory role only. If the
kernel logically requires a different nesting, priority is ignored.
Priority is only considered if loop nesting is ambiguous.
:arg: an iterable of inames, or, for brevity, a comma-seaprated string of
inames
"""
if isinstance(loop_priority, str):
loop_priority = [s.strip() for s in loop_priority.split(",")]
return kernel.copy(loop_priority=loop_priority)
# }}}
# {{{ split/chunk inames
# {{{ backend
class _InameSplitter(RuleAwareIdentityMapper):
def __init__(self, rule_mapping_context, within,
split_iname, outer_iname, inner_iname, replacement_index):
super(_InameSplitter, self).__init__(rule_mapping_context)
self.within = within
self.split_iname = split_iname
self.outer_iname = outer_iname
self.inner_iname = inner_iname
self.replacement_index = replacement_index
def map_reduction(self, expr, expn_state):
if (self.split_iname in expr.inames
and self.split_iname not in expn_state.arg_context
and self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)):
new_inames = list(expr.inames)
new_inames.remove(self.split_iname)
new_inames.extend([self.outer_iname, self.inner_iname])
from loopy.symbolic import Reduction
return Reduction(expr.operation, tuple(new_inames),
self.rec(expr.expr, expn_state),
expr.allow_simultaneous)
else:
return super(_InameSplitter, self).map_reduction(expr, expn_state)
def map_variable(self, expr, expn_state):
if (expr.name == self.split_iname
and self.split_iname not in expn_state.arg_context
and self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)):
return self.replacement_index
else:
return super(_InameSplitter, self).map_variable(expr, expn_state)
def _split_iname_backend(kernel, split_iname,
fixed_length, fixed_length_is_inner,
make_new_loop_index,
outer_iname=None, inner_iname=None,
outer_tag=None, inner_tag=None,
slabs=(0, 0), do_tagged_check=True,
within=None):
"""
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
"""
existing_tag = kernel.iname_to_tag.get(split_iname)
from loopy.kernel.data import ForceSequentialTag
if do_tagged_check and (
existing_tag is not None
and not isinstance(existing_tag, ForceSequentialTag)):
raise LoopyError("cannot split already tagged iname '%s'" % split_iname)
if split_iname not in kernel.all_inames():
raise ValueError("cannot split loop for unknown variable '%s'" % split_iname)
applied_iname_rewrites = kernel.applied_iname_rewrites[:]
vng = kernel.get_var_name_generator()
if outer_iname is None:
outer_iname = vng(split_iname+"_outer")
if inner_iname is None:
inner_iname = vng(split_iname+"_inner")
def process_set(s):
var_dict = s.get_var_dict()
if split_iname not in var_dict:
return s
orig_dim_type, _ = var_dict[split_iname]
outer_var_nr = s.dim(orig_dim_type)
inner_var_nr = s.dim(orig_dim_type)+1
s = s.add_dims(orig_dim_type, 2)
s = s.set_dim_name(orig_dim_type, outer_var_nr, outer_iname)
s = s.set_dim_name(orig_dim_type, inner_var_nr, inner_iname)
from loopy.isl_helpers import make_slab
if fixed_length_is_inner:
fixed_iname, var_length_iname = inner_iname, outer_iname
else:
fixed_iname, var_length_iname = outer_iname, inner_iname
space = s.get_space()
fixed_constraint_set = (
make_slab(space, fixed_iname, 0, fixed_length)
# name = fixed_iname + fixed_length*var_length_iname
.add_constraint(isl.Constraint.eq_from_names(
space, {
split_iname: 1,
fixed_iname: -1,
var_length_iname: -fixed_length})))
name_dim_type, name_idx = space.get_var_dict()[split_iname]
s = s.intersect(fixed_constraint_set)
if within is None:
s = s.project_out(name_dim_type, name_idx, 1)
return s
new_domains = [process_set(dom) for dom in kernel.domains]
from pymbolic import var
inner = var(inner_iname)
outer = var(outer_iname)
new_loop_index = make_new_loop_index(inner, outer)
subst_map = {var(split_iname): new_loop_index}
applied_iname_rewrites.append(subst_map)
# {{{ update forced_iname deps
new_insns = []
for insn in kernel.instructions:
if split_iname in insn.forced_iname_deps:
new_forced_iname_deps = (
(insn.forced_iname_deps.copy()
- frozenset([split_iname]))
| frozenset([outer_iname, inner_iname]))
else:
new_forced_iname_deps = insn.forced_iname_deps
insn = insn.copy(
forced_iname_deps=new_forced_iname_deps)
new_insns.append(insn)
# }}}
iname_slab_increments = kernel.iname_slab_increments.copy()
iname_slab_increments[outer_iname] = slabs
new_loop_priority = []
for prio_iname in kernel.loop_priority:
if prio_iname == split_iname:
new_loop_priority.append(outer_iname)
new_loop_priority.append(inner_iname)
else:
new_loop_priority.append(prio_iname)
kernel = kernel.copy(
domains=new_domains,
iname_slab_increments=iname_slab_increments,
instructions=new_insns,
applied_iname_rewrites=applied_iname_rewrites,
loop_priority=new_loop_priority)
from loopy.match import parse_stack_match
within = parse_stack_match(within)
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, kernel.get_var_name_generator())
ins = _InameSplitter(rule_mapping_context, within,
split_iname, outer_iname, inner_iname, new_loop_index)
kernel = ins.map_kernel(kernel)
kernel = rule_mapping_context.finish_kernel(kernel)
if existing_tag is not None:
kernel = tag_inames(kernel,
{outer_iname: existing_tag, inner_iname: existing_tag})
return tag_inames(kernel, {outer_iname: outer_tag, inner_iname: inner_tag})
# }}}
# {{{ split iname
def split_iname(kernel, split_iname, inner_length,
outer_iname=None, inner_iname=None,
outer_tag=None, inner_tag=None,
slabs=(0, 0), do_tagged_check=True,
within=None):
"""Split *split_iname* into two inames (an 'inner' one and an 'outer' one)
so that ``split_iname == inner + outer*inner_length`` and *inner* is of
constant length *inner_length*.
:arg outer_iname: The new iname to use for the 'inner' (fixed-length)
loop. Defaults to a name derived from ``split_iname + "_outer"``
:arg inner_iname: The new iname to use for the 'inner' (fixed-length)
loop. Defaults to a name derived from ``split_iname + "_inner"``
:arg inner_length: a positive integer
:arg slabs:
A tuple ``(head_it_count, tail_it_count)`` indicating the
number of leading/trailing iterations of *outer_iname*
for which separate code should be generated.
:arg outer_tag: The iname tag (see :ref:`iname-tags`) to apply to
*outer_iname*.
:arg inner_tag: The iname tag (see :ref:`iname-tags`) to apply to
*inner_iname*.
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
"""
def make_new_loop_index(inner, outer):
return inner + outer*inner_length
return _split_iname_backend(kernel, split_iname,
fixed_length=inner_length, fixed_length_is_inner=True,
make_new_loop_index=make_new_loop_index,
outer_iname=outer_iname, inner_iname=inner_iname,
outer_tag=outer_tag, inner_tag=inner_tag,
slabs=slabs, do_tagged_check=do_tagged_check,
within=within)
# }}}
# {{{ chunk iname
def chunk_iname(kernel, split_iname, num_chunks,
outer_iname=None, inner_iname=None,
outer_tag=None, inner_tag=None,
slabs=(0, 0), do_tagged_check=True,
within=None):
"""
Split *split_iname* into two inames (an 'inner' one and an 'outer' one)
so that ``split_iname == inner + outer*chunk_length`` and *outer* is of
fixed length *num_chunks*.
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
.. versionadded:: 2016.2
"""
size = kernel.get_iname_bounds(split_iname).size
k0 = isl.Aff.zero_on_domain(size.domain().space)
chunk_ceil = size.div(k0+num_chunks).ceil()
chunk_floor = size.div(k0+num_chunks).floor()
chunk_diff = chunk_ceil - chunk_floor
chunk_mod = size.mod_val(num_chunks)
from loopy.symbolic import pw_aff_to_expr
from pymbolic.primitives import Min
def make_new_loop_index(inner, outer):
# These two expressions are equivalent. Benchmarking between the
# two was inconclusive, although one is shorter.
if 0:
# Triggers isl issues in check pass.
return (
inner +
pw_aff_to_expr(chunk_floor) * outer
+
pw_aff_to_expr(chunk_diff) * Min(
(outer, pw_aff_to_expr(chunk_mod))))
else:
return (
inner +
pw_aff_to_expr(chunk_ceil) * Min(
(outer, pw_aff_to_expr(chunk_mod)))
+
pw_aff_to_expr(chunk_floor) * (
outer - Min((outer, pw_aff_to_expr(chunk_mod)))))
# {{{ check that iname is a box iname
# Since the linearization used in the constraint used to map the domain
# does not match the linearization in make_new_loop_index, we can't really
# tolerate if the iname in question has constraints that make it non-boxy,
# since these sub-indices would end up in the wrong spot.
for dom in kernel.domains:
var_dict = dom.get_var_dict()
if split_iname not in var_dict:
continue
dt, idx = var_dict[split_iname]
assert dt == dim_type.set
aff_zero = isl.Aff.zero_on_domain(dom.space)
aff_split_iname = aff_zero.set_coefficient_val(dim_type.in_, idx, 1)
aligned_size = isl.align_spaces(size, aff_zero)
box_dom = (
dom
.eliminate(dt, idx, 1)
& aff_zero.le_set(aff_split_iname)
& aff_split_iname.lt_set(aligned_size)
)
if not (
box_dom <= dom
and
dom <= box_dom):
raise LoopyError("domain '%s' is not box-shape about iname "
"'%s', cannot use chunk_iname()"
% (dom, split_iname))
# }}}
return _split_iname_backend(kernel, split_iname,
fixed_length=num_chunks, fixed_length_is_inner=False,
make_new_loop_index=make_new_loop_index,
outer_iname=outer_iname, inner_iname=inner_iname,
outer_tag=outer_tag, inner_tag=inner_tag,
slabs=slabs, do_tagged_check=do_tagged_check,
within=within)
# }}}
# }}}
# {{{ join inames
class _InameJoiner(RuleAwareSubstitutionMapper):
def __init__(self, rule_mapping_context, within, subst_func,
joined_inames, new_iname):
super(_InameJoiner, self).__init__(rule_mapping_context,
subst_func, within)
self.joined_inames = set(joined_inames)
self.new_iname = new_iname
def map_reduction(self, expr, expn_state):
expr_inames = set(expr.inames)
overlap = (self.join_inames & expr_inames
- set(expn_state.arg_context))
if overlap and self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack):
if overlap != expr_inames:
raise LoopyError(
"Cannot join inames '%s' if there is a reduction "
"that does not use all of the inames being joined. "
"(Found one with just '%s'.)"
% (
", ".join(self.joined_inames),
", ".join(expr_inames)))
new_inames = expr_inames - self.joined_inames
new_inames.add(self.new_iname)
from loopy.symbolic import Reduction
return Reduction(expr.operation, tuple(new_inames),
self.rec(expr.expr, expn_state),
expr.allow_simultaneous)
else:
return super(_InameJoiner, self).map_reduction(expr, expn_state)
def join_inames(kernel, inames, new_iname=None, tag=None, within=None):
"""
:arg inames: fastest varying last
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
"""
# now fastest varying first
inames = inames[::-1]
if new_iname is None:
new_iname = kernel.get_var_name_generator()("_and_".join(inames))
from loopy.kernel.tools import DomainChanger
domch = DomainChanger(kernel, frozenset(inames))
for iname in inames:
if kernel.get_home_domain_index(iname) != domch.leaf_domain_index:
raise LoopyError("iname '%s' is not 'at home' in the "
"join's leaf domain" % iname)
new_domain = domch.domain
new_dim_idx = new_domain.dim(dim_type.set)
new_domain = new_domain.add_dims(dim_type.set, 1)
new_domain = new_domain.set_dim_name(dim_type.set, new_dim_idx, new_iname)
joint_aff = zero = isl.Aff.zero_on_domain(new_domain.space)
subst_dict = {}
base_divisor = 1
from pymbolic import var
for i, iname in enumerate(inames):
iname_dt, iname_idx = zero.get_space().get_var_dict()[iname]
iname_aff = zero.add_coefficient_val(iname_dt, iname_idx, 1)
joint_aff = joint_aff + base_divisor*iname_aff
bounds = kernel.get_iname_bounds(iname, constants_only=True)
from loopy.isl_helpers import (
static_max_of_pw_aff, static_value_of_pw_aff)
from loopy.symbolic import pw_aff_to_expr
length = int(pw_aff_to_expr(
static_max_of_pw_aff(bounds.size, constants_only=True)))
try:
lower_bound_aff = static_value_of_pw_aff(
bounds.lower_bound_pw_aff.coalesce(),
constants_only=False)
except Exception as e:
raise type(e)("while finding lower bound of '%s': " % iname)
my_val = var(new_iname) // base_divisor
if i+1 < len(inames):
my_val %= length
my_val += pw_aff_to_expr(lower_bound_aff)
subst_dict[iname] = my_val
base_divisor *= length
from loopy.isl_helpers import iname_rel_aff
new_domain = new_domain.add_constraint(
isl.Constraint.equality_from_aff(
iname_rel_aff(new_domain.get_space(), new_iname, "==", joint_aff)))
for i, iname in enumerate(inames):
iname_to_dim = new_domain.get_space().get_var_dict()
iname_dt, iname_idx = iname_to_dim[iname]
if within is None:
new_domain = new_domain.project_out(iname_dt, iname_idx, 1)
def subst_forced_iname_deps(fid):
result = set()
for iname in fid:
if iname in inames:
result.add(new_iname)
else:
result.add(iname)
return frozenset(result)
new_insns = [
insn.copy(
forced_iname_deps=subst_forced_iname_deps(insn.forced_iname_deps))
for insn in kernel.instructions]
kernel = (kernel
.copy(
instructions=new_insns,
domains=domch.get_domains_with(new_domain),
applied_iname_rewrites=kernel.applied_iname_rewrites + [subst_dict]
))
from loopy.match import parse_stack_match
within = parse_stack_match(within)
from pymbolic.mapper.substitutor import make_subst_func
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, kernel.get_var_name_generator())
ijoin = _InameJoiner(rule_mapping_context, within,
make_subst_func(subst_dict),
inames, new_iname)
kernel = rule_mapping_context.finish_kernel(
ijoin.map_kernel(kernel))
if tag is not None:
kernel = tag_inames(kernel, {new_iname: tag})
return kernel
# }}}
# {{{ tag inames
def tag_inames(kernel, iname_to_tag, force=False, ignore_nonexistent=False):
"""Tag an iname
:arg iname_to_tag: a list of tuples ``(iname, new_tag)``. *new_tag* is given
as an instance of a subclass of :class:`loopy.kernel.data.IndexTag` or
as a string as shown in :ref:`iname-tags`. May also be a dictionary
for backwards compatibility.
"""
if isinstance(iname_to_tag, dict):
iname_to_tag = list(six.iteritems(iname_to_tag))
elif isinstance(iname_to_tag, str):
def parse_kv(s):
colon_index = s.find(":")
if colon_index == -1:
raise ValueError("tag decl '%s' has no colon" % s)
return (s[:colon_index].strip(), s[colon_index+1:].strip())
iname_to_tag = [
parse_kv(s) for s in iname_to_tag.split(",")
if s.strip()]
from loopy.kernel.data import parse_tag as inner_parse_tag
def parse_tag(tag):
if isinstance(tag, str):
if tag.startswith("like."):
return kernel.iname_to_tag.get(tag[5:])
elif tag == "unused.g":
return find_unused_axis_tag(kernel, "g")
elif tag == "unused.l":
return find_unused_axis_tag(kernel, "l")
return inner_parse_tag(tag)
iname_to_tag = [(iname, parse_tag(tag)) for iname, tag in iname_to_tag]
from loopy.kernel.data import (ParallelTag, AutoLocalIndexTagBase,
ForceSequentialTag)
new_iname_to_tag = kernel.iname_to_tag.copy()
for iname, new_tag in iname_to_tag:
if iname not in kernel.all_inames():
if ignore_nonexistent:
continue
else:
raise LoopyError("iname '%s' does not exist" % iname)
old_tag = kernel.iname_to_tag.get(iname)
retag_ok = False
if isinstance(old_tag, (AutoLocalIndexTagBase, ForceSequentialTag)):
retag_ok = True
if not retag_ok and old_tag is not None and new_tag is None:
raise ValueError("cannot untag iname '%s'" % iname)
if iname not in kernel.all_inames():
raise ValueError("cannot tag '%s'--not known" % iname)
if isinstance(new_tag, ParallelTag) \
and isinstance(old_tag, ForceSequentialTag):
raise ValueError("cannot tag '%s' as parallel--"
"iname requires sequential execution" % iname)
if isinstance(new_tag, ForceSequentialTag) \
and isinstance(old_tag, ParallelTag):
raise ValueError("'%s' is already tagged as parallel, "
"but is now prohibited from being parallel "
"(likely because of participation in a precompute or "
"a reduction)" % iname)
if (not retag_ok) and (not force) \
and old_tag is not None and (old_tag != new_tag):
raise LoopyError("'%s' is already tagged '%s'--cannot retag"
% (iname, old_tag))
new_iname_to_tag[iname] = new_tag
return kernel.copy(iname_to_tag=new_iname_to_tag)
# }}}
# {{{ duplicate inames
class _InameDuplicator(RuleAwareIdentityMapper):
def __init__(self, rule_mapping_context,
old_to_new, within):
super(_InameDuplicator, self).__init__(rule_mapping_context)
self.old_to_new = old_to_new
self.old_inames_set = set(six.iterkeys(old_to_new))
self.within = within
def map_reduction(self, expr, expn_state):
if (set(expr.inames) & self.old_inames_set
and self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)):
new_inames = tuple(
self.old_to_new.get(iname, iname)
if iname not in expn_state.arg_context
else iname
for iname in expr.inames)
from loopy.symbolic import Reduction
return Reduction(expr.operation, new_inames,
self.rec(expr.expr, expn_state),
expr.allow_simultaneous)
else:
return super(_InameDuplicator, self).map_reduction(expr, expn_state)
def map_variable(self, expr, expn_state):
new_name = self.old_to_new.get(expr.name)
if (new_name is None
or expr.name in expn_state.arg_context
or not self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)):
return super(_InameDuplicator, self).map_variable(expr, expn_state)
else:
from pymbolic import var
return var(new_name)
def map_instruction(self, kernel, insn):
if not self.within(kernel, insn, ()):
return insn
new_fid = frozenset(
self.old_to_new.get(iname, iname)
for iname in insn.forced_iname_deps)
return insn.copy(forced_iname_deps=new_fid)
def duplicate_inames(knl, inames, within, new_inames=None, suffix=None,
tags={}):
"""
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
"""
# {{{ normalize arguments, find unique new_inames
if isinstance(inames, str):
inames = [iname.strip() for iname in inames.split(",")]
if isinstance(new_inames, str):
new_inames = [iname.strip() for iname in new_inames.split(",")]
from loopy.match import parse_stack_match
within = parse_stack_match(within)
if new_inames is None:
new_inames = [None] * len(inames)
if len(new_inames) != len(inames):
raise ValueError("new_inames must have the same number of entries as inames")
name_gen = knl.get_var_name_generator()
for i, iname in enumerate(inames):
new_iname = new_inames[i]
if new_iname is None:
new_iname = iname
if suffix is not None:
new_iname += suffix
new_iname = name_gen(new_iname)
else:
if name_gen.is_name_conflicting(new_iname):
raise ValueError("new iname '%s' conflicts with existing names"
% new_iname)
name_gen.add_name(new_iname)
new_inames[i] = new_iname
# }}}
# {{{ duplicate the inames
for old_iname, new_iname in zip(inames, new_inames):
from loopy.kernel.tools import DomainChanger
domch = DomainChanger(knl, frozenset([old_iname]))
from loopy.isl_helpers import duplicate_axes
knl = knl.copy(
domains=domch.get_domains_with(
duplicate_axes(domch.domain, [old_iname], [new_iname])))
# }}}
# {{{ change the inames in the code
rule_mapping_context = SubstitutionRuleMappingContext(
knl.substitutions, name_gen)
indup = _InameDuplicator(rule_mapping_context,
old_to_new=dict(list(zip(inames, new_inames))),
within=within)
knl = rule_mapping_context.finish_kernel(
indup.map_kernel(knl))
# }}}
# {{{ realize tags
for old_iname, new_iname in zip(inames, new_inames):
new_tag = tags.get(old_iname)
if new_tag is not None:
knl = tag_inames(knl, {new_iname: new_tag})
# }}}
return knl
# }}}
# {{{ rename_inames
def rename_iname(knl, old_iname, new_iname, existing_ok=False, within=None):
"""
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
:arg existing_ok: execute even if *new_iname* already exists
"""
var_name_gen = knl.get_var_name_generator()
does_exist = var_name_gen.is_name_conflicting(new_iname)
if old_iname not in knl.all_inames():
raise LoopyError("old iname '%s' does not exist" % old_iname)
if does_exist and not existing_ok:
raise LoopyError("iname '%s' conflicts with an existing identifier"
"--cannot rename" % new_iname)
if does_exist:
# {{{ check that the domains match up
dom = knl.get_inames_domain(frozenset((old_iname, new_iname)))
var_dict = dom.get_var_dict()
_, old_idx = var_dict[old_iname]
_, new_idx = var_dict[new_iname]
par_idx = dom.dim(dim_type.param)
dom_old = dom.move_dims(
dim_type.param, par_idx, dim_type.set, old_idx, 1)
dom_old = dom_old.move_dims(
dim_type.set, dom_old.dim(dim_type.set), dim_type.param, par_idx, 1)
dom_old = dom_old.project_out(
dim_type.set, new_idx if new_idx < old_idx else new_idx - 1, 1)
par_idx = dom.dim(dim_type.param)
dom_new = dom.move_dims(
dim_type.param, par_idx, dim_type.set, new_idx, 1)
dom_new = dom_new.move_dims(
dim_type.set, dom_new.dim(dim_type.set), dim_type.param, par_idx, 1)
dom_new = dom_new.project_out(
dim_type.set, old_idx if old_idx < new_idx else old_idx - 1, 1)
if not (dom_old <= dom_new and dom_new <= dom_old):
raise LoopyError(
"inames {old} and {new} do not iterate over the same domain"
.format(old=old_iname, new=new_iname))
# }}}
from pymbolic import var
subst_dict = {old_iname: var(new_iname)}
from loopy.match import parse_stack_match
within = parse_stack_match(within)
from pymbolic.mapper.substitutor import make_subst_func
rule_mapping_context = SubstitutionRuleMappingContext(
knl.substitutions, var_name_gen)
smap = RuleAwareSubstitutionMapper(rule_mapping_context,
make_subst_func(subst_dict), within)
knl = rule_mapping_context.finish_kernel(
smap.map_kernel(knl))
new_instructions = []
for insn in knl.instructions:
if (old_iname in insn.forced_iname_deps
and within(knl, insn, ())):
insn = insn.copy(
forced_iname_deps=(
(insn.forced_iname_deps - frozenset([old_iname]))
| frozenset([new_iname])))
new_instructions.append(insn)
knl = knl.copy(instructions=new_instructions)
else:
knl = duplicate_inames(
knl, [old_iname], within=within, new_inames=[new_iname])
knl = remove_unused_inames(knl, [old_iname])
return knl
# }}}
# {{{ remove unused inames
def remove_unused_inames(knl, inames=None):
"""Delete those among *inames* that are unused, i.e. project them
out of the domain. If these inames pose implicit restrictions on
other inames, these restrictions will persist as existentially
quantified variables.
:arg inames: may be an iterable of inames or a string of comma-separated inames.
"""
# {{{ normalize arguments
if inames is None:
inames = knl.all_inames()
elif isinstance(inames, str):
inames = inames.split(",")
# }}}
# {{{ check which inames are unused
import loopy as lp
exp_knl = lp.expand_subst(knl)
inames = set(inames)
used_inames = set()
for insn in exp_knl.instructions:
used_inames.update(
exp_knl.insn_inames(insn.id)
| insn.reduction_inames())
unused_inames = inames - used_inames
# }}}
# {{{ remove them
from loopy.kernel.tools import DomainChanger
for iname in unused_inames:
domch = DomainChanger(knl, (iname,))
dom = domch.domain
dt, idx = dom.get_var_dict()[iname]
dom = dom.project_out(dt, idx, 1)
knl = knl.copy(domains=domch.get_domains_with(dom))
# }}}
return knl
# }}}
# {{{ split_reduction
class _ReductionSplitter(RuleAwareIdentityMapper):
def __init__(self, rule_mapping_context, within, inames, direction):
super(_ReductionSplitter, self).__init__(
rule_mapping_context)
self.within = within
self.inames = inames
self.direction = direction
def map_reduction(self, expr, expn_state):
if set(expr.inames) & set(expn_state.arg_context):
# FIXME
raise NotImplementedError()
if (self.inames <= set(expr.inames)
and self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)):
leftover_inames = set(expr.inames) - self.inames
from loopy.symbolic import Reduction
if self.direction == "in":
return Reduction(expr.operation, tuple(leftover_inames),
Reduction(expr.operation, tuple(self.inames),
self.rec(expr.expr, expn_state),
expr.allow_simultaneous),
expr.allow_simultaneous)
elif self.direction == "out":
return Reduction(expr.operation, tuple(self.inames),
Reduction(expr.operation, tuple(leftover_inames),
self.rec(expr.expr, expn_state),
expr.allow_simultaneous))
else:
assert False
else:
return super(_ReductionSplitter, self).map_reduction(expr, expn_state)
def _split_reduction(kernel, inames, direction, within=None):
if direction not in ["in", "out"]:
raise ValueError("invalid value for 'direction': %s" % direction)
if isinstance(inames, str):
inames = inames.split(",")
inames = set(inames)
from loopy.match import parse_stack_match
within = parse_stack_match(within)
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, kernel.get_var_name_generator())
rsplit = _ReductionSplitter(rule_mapping_context,
within, inames, direction)
return rule_mapping_context.finish_kernel(
rsplit.map_kernel(kernel))
def split_reduction_inward(kernel, inames, within=None):
"""Takes a reduction of the form::
sum([i,j,k], ...)
and splits it into two nested reductions::
sum([j,k], sum([i], ...))
In this case, *inames* would have been ``"i"`` indicating that
the iname ``i`` should be made the iname governing the inner reduction.
:arg inames: A list of inames, or a comma-separated string that can
be parsed into those
"""
return _split_reduction(kernel, inames, "in", within)
def split_reduction_outward(kernel, inames, within=None):
"""Takes a reduction of the form::
sum([i,j,k], ...)
and splits it into two nested reductions::
sum([i], sum([j,k], ...))
In this case, *inames* would have been ``"i"`` indicating that
the iname ``i`` should be made the iname governing the outer reduction.
:arg inames: A list of inames, or a comma-separated string that can
be parsed into those
"""
return _split_reduction(kernel, inames, "out", within)
# }}}
# {{{ affine map inames
def affine_map_inames(kernel, old_inames, new_inames, equations):
"""Return a new *kernel* where the affine transform
specified by *equations* has been applied to the inames.
:arg old_inames: A list of inames to be replaced by affine transforms
of their values.
May also be a string of comma-separated inames.
:arg new_inames: A list of new inames that are not yet used in *kernel*,
but have their values established in terms of *old_inames* by
*equations*.
May also be a string of comma-separated inames.
:arg equations: A list of equations estabilishing a relationship
between *old_inames* and *new_inames*. Each equation may be
a tuple ``(lhs, rhs)`` of expressions or a string, with left and
right hand side of the equation separated by ``=``.
"""
# {{{ check and parse arguments
if isinstance(new_inames, str):
new_inames = new_inames.split(",")
new_inames = [iname.strip() for iname in new_inames]
if isinstance(old_inames, str):
old_inames = old_inames.split(",")
old_inames = [iname.strip() for iname in old_inames]
if isinstance(equations, str):
equations = [equations]
import re
eqn_re = re.compile(r"^([^=]+)=([^=]+)$")
def parse_equation(eqn):
if isinstance(eqn, str):
eqn_match = eqn_re.match(eqn)
if not eqn_match:
raise ValueError("invalid equation: %s" % eqn)
from loopy.symbolic import parse
lhs = parse(eqn_match.group(1))
rhs = parse(eqn_match.group(2))
return (lhs, rhs)
elif isinstance(eqn, tuple):
if len(eqn) != 2:
raise ValueError("unexpected length of equation tuple, "
"got %d, should be 2" % len(eqn))
return eqn
else:
raise ValueError("unexpected type of equation"
"got %d, should be string or tuple"
% type(eqn).__name__)
equations = [parse_equation(eqn) for eqn in equations]
all_vars = kernel.all_variable_names()
for iname in new_inames:
if iname in all_vars:
raise LoopyError("new iname '%s' is already used in kernel"
% iname)
for iname in old_inames:
if iname not in kernel.all_inames():
raise LoopyError("old iname '%s' not known" % iname)
# }}}
# {{{ substitute iname use
from pymbolic.algorithm import solve_affine_equations_for
old_inames_to_expr = solve_affine_equations_for(old_inames, equations)
subst_dict = dict(
(v.name, expr)
for v, expr in old_inames_to_expr.items())
var_name_gen = kernel.get_var_name_generator()
from pymbolic.mapper.substitutor import make_subst_func
from loopy.match import parse_stack_match
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, var_name_gen)
old_to_new = RuleAwareSubstitutionMapper(rule_mapping_context,
make_subst_func(subst_dict), within=parse_stack_match(None))
kernel = (
rule_mapping_context.finish_kernel(
old_to_new.map_kernel(kernel))
.copy(
applied_iname_rewrites=kernel.applied_iname_rewrites + [subst_dict]
))
# }}}
# {{{ change domains
new_inames_set = frozenset(new_inames)
old_inames_set = frozenset(old_inames)
new_domains = []
for idom, dom in enumerate(kernel.domains):
dom_var_dict = dom.get_var_dict()
old_iname_overlap = [
iname
for iname in old_inames
if iname in dom_var_dict]
if not old_iname_overlap:
new_domains.append(dom)
continue
from loopy.symbolic import get_dependencies
dom_new_inames = set()
dom_old_inames = set()
# mapping for new inames to dim_types
new_iname_dim_types = {}
dom_equations = []
for iname in old_iname_overlap:
for ieqn, (lhs, rhs) in enumerate(equations):
eqn_deps = get_dependencies(lhs) | get_dependencies(rhs)
if iname in eqn_deps:
dom_new_inames.update(eqn_deps & new_inames_set)
dom_old_inames.update(eqn_deps & old_inames_set)
if dom_old_inames:
dom_equations.append((lhs, rhs))
this_eqn_old_iname_dim_types = set(
dom_var_dict[old_iname][0]
for old_iname in eqn_deps & old_inames_set)
if this_eqn_old_iname_dim_types:
if len(this_eqn_old_iname_dim_types) > 1:
raise ValueError("inames '%s' (from equation %d (0-based)) "
"in domain %d (0-based) are not "
"of a uniform dim_type"
% (", ".join(eqn_deps & old_inames_set), ieqn, idom))
this_eqn_new_iname_dim_type, = this_eqn_old_iname_dim_types
for new_iname in eqn_deps & new_inames_set:
if new_iname in new_iname_dim_types:
if (this_eqn_new_iname_dim_type
!= new_iname_dim_types[new_iname]):
raise ValueError("dim_type disagreement for "
"iname '%s' (from equation %d (0-based)) "
"in domain %d (0-based)"
% (new_iname, ieqn, idom))
else:
new_iname_dim_types[new_iname] = \
this_eqn_new_iname_dim_type
if not dom_old_inames <= set(dom_var_dict):
raise ValueError("domain %d (0-based) does not know about "
"all old inames (specifically '%s') needed to define new inames"
% (idom, ", ".join(dom_old_inames - set(dom_var_dict))))
# add inames to domain with correct dim_types
dom_new_inames = list(dom_new_inames)
for iname in dom_new_inames:
dt = new_iname_dim_types[iname]
iname_idx = dom.dim(dt)
dom = dom.add_dims(dt, 1)
dom = dom.set_dim_name(dt, iname_idx, iname)
# add equations
from loopy.symbolic import aff_from_expr
for lhs, rhs in dom_equations:
dom = dom.add_constraint(
isl.Constraint.equality_from_aff(
aff_from_expr(dom.space, rhs - lhs)))
# project out old inames
for iname in dom_old_inames:
dt, idx = dom.get_var_dict()[iname]
dom = dom.project_out(dt, idx, 1)
new_domains.append(dom)
# }}}
# {{{ switch iname refs in instructions
def fix_iname_set(insn_id, inames):
if old_inames_set <= inames:
return (inames - old_inames_set) | new_inames_set
elif old_inames_set & inames:
raise LoopyError("instruction '%s' uses only a part (%s), not all, "
"of the old inames"
% (insn_id, ", ".join(old_inames_set & inames)))
else:
return inames
new_instructions = [
insn.copy(forced_iname_deps=fix_iname_set(
insn.id, insn.forced_iname_deps))
for insn in kernel.instructions]
# }}}
return kernel.copy(domains=new_domains, instructions=new_instructions)
# }}}
# {{{ find unused axes
def find_unused_axis_tag(kernel, kind, insn_match=None):
"""For one of the hardware-parallel execution tags, find an unused
axis.
:arg insn_match: An instruction match as understood by
:func:`loopy.match.parse_match`.
:arg kind: may be "l" or "g", or the corresponding tag class name
:returns: an :class:`GroupIndexTag` or :class:`LocalIndexTag`
that is not being used within the instructions matched by
*insn_match*.
"""
used_axes = set()
from looopy.kernel.data import GroupIndexTag, LocalIndexTag
if isinstance(kind, str):
found = False
for cls in [GroupIndexTag, LocalIndexTag]:
if kind == cls.print_name:
kind = cls
found = True
break
if not found:
raise LoopyError("invlaid tag kind: %s" % kind)
from loopy.match import parse_match
match = parse_match(insn_match)
insns = [insn for insn in kernel.instructions if match(kernel, insn)]
for insn in insns:
for iname in kernel.insn_inames(insn):
dim_tag = kernel.iname_to_tag.get(iname)
if isinstance(dim_tag, kind):
used_axes.add(kind.axis)
i = 0
while i in used_axes:
i += 1
return kind(i)
# }}}
# {{{ separate_loop_head_tail_slab
# undocumented, because not super-useful
def separate_loop_head_tail_slab(kernel, iname, head_it_count, tail_it_count):
"""Mark *iname* so that the separate code is generated for
the lower *head_it_count* and the upper *tail_it_count*
iterations of the loop on *iname*.
"""
iname_slab_increments = kernel.iname_slab_increments.copy()
iname_slab_increments[iname] = (head_it_count, tail_it_count)
return kernel.copy(iname_slab_increments=iname_slab_increments)
# }}}
# {{{ make_reduction_inames_unique
class _ReductionInameUniquifier(RuleAwareIdentityMapper):
def __init__(self, rule_mapping_context, inames, within):
super(_ReductionInameUniquifier, self).__init__(rule_mapping_context)
self.inames = inames
self.old_to_new = []
self.within = within
self.iname_to_red_count = {}
self.iname_to_nonsimultaneous_red_count = {}
def map_reduction(self, expr, expn_state):
within = self.within(
expn_state.kernel,
expn_state.instruction,
expn_state.stack)
for iname in expr.inames:
self.iname_to_red_count[iname] = (
self.iname_to_red_count.get(iname, 0) + 1)
if not expr.allow_simultaneous:
self.iname_to_nonsimultaneous_red_count[iname] = (
self.iname_to_nonsimultaneous_red_count.get(iname, 0) + 1)
if within and not expr.allow_simultaneous:
subst_dict = {}
from pymbolic import var
new_inames = []
for iname in expr.inames:
if (
not (self.inames is None or iname in self.inames)
or
self.iname_to_red_count[iname] <= 1):
new_inames.append(iname)
continue
new_iname = self.rule_mapping_context.make_unique_var_name(iname)
subst_dict[iname] = var(new_iname)
self.old_to_new.append((iname, new_iname))
new_inames.append(new_iname)
from loopy.symbolic import SubstitutionMapper
from pymbolic.mapper.substitutor import make_subst_func
from loopy.symbolic import Reduction
return Reduction(expr.operation, tuple(new_inames),
self.rec(
SubstitutionMapper(make_subst_func(subst_dict))(
expr.expr),
expn_state),
expr.allow_simultaneous)
else:
return super(_ReductionInameUniquifier, self).map_reduction(
expr, expn_state)
def make_reduction_inames_unique(kernel, inames=None, within=None):
"""
:arg inames: if not *None*, only apply to these inames
:arg within: a stack match as understood by
:func:`loopy.match.parse_stack_match`.
.. versionadded:: 2016.2
"""
name_gen = kernel.get_var_name_generator()
from loopy.match import parse_stack_match
within = parse_stack_match(within)
# {{{ change kernel
rule_mapping_context = SubstitutionRuleMappingContext(
kernel.substitutions, name_gen)
r_uniq = _ReductionInameUniquifier(rule_mapping_context,
inames, within=within)
kernel = rule_mapping_context.finish_kernel(
r_uniq.map_kernel(kernel))
# }}}
# {{{ duplicate the inames
for old_iname, new_iname in r_uniq.old_to_new:
from loopy.kernel.tools import DomainChanger
domch = DomainChanger(kernel, frozenset([old_iname]))
from loopy.isl_helpers import duplicate_axes
kernel = kernel.copy(
domains=domch.get_domains_with(
duplicate_axes(domch.domain, [old_iname], [new_iname])))
# }}}
return kernel
# }}}
# vim: foldmethod=marker
| [
"inform@tiker.net"
] | inform@tiker.net |
fe77dea5d4d477345bc22432f871ea702b083913 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03106/s992142201.py | 9c7161f9a8277d4dc2ac8882018659d1ef28d852 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | a,b,k=map(int,input().split())
Min=min(a,b)
l=[]
for i in range(Min,0,-1):
if a%i==0 and b%i==0:
l.append(i)
#print(l)
print(l[k-1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e25a8b7a032e5d796003b9abe21a15a6d1a72cc6 | 3141eceaae7509301a125e10c5ba4ec4c24b6412 | /pydruid/db/sqlalchemy.py | 9ac51f822a4441fd3639443b990874982ba159e5 | [
"Apache-2.0"
] | permissive | isabella232/pydruid_DEPRECATED | a6959112709845632d51976661bb7358efc227c7 | 471bc21308f98a7f9d245e2ea76e54892bd0b805 | refs/heads/lyft | 2023-03-11T08:29:49.961290 | 2020-10-09T19:17:20 | 2020-10-09T19:17:20 | 315,906,418 | 0 | 0 | NOASSERTION | 2021-02-23T18:56:22 | 2020-11-25T10:36:33 | null | UTF-8 | Python | false | false | 7,412 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from sqlalchemy.engine import default
from sqlalchemy.sql import compiler
from sqlalchemy import types
import pydruid.db
from pydruid.db import exceptions
RESERVED_SCHEMAS = ['INFORMATION_SCHEMA']
type_map = {
'char': types.String,
'varchar': types.String,
'float': types.Float,
'decimal': types.Float,
'real': types.Float,
'double': types.Float,
'boolean': types.Boolean,
'tinyint': types.BigInteger,
'smallint': types.BigInteger,
'integer': types.BigInteger,
'bigint': types.BigInteger,
'timestamp': types.TIMESTAMP,
'date': types.DATE,
'other': types.BLOB,
}
class UniversalSet(object):
def __contains__(self, item):
return True
class DruidIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = UniversalSet()
class DruidCompiler(compiler.SQLCompiler):
pass
class DruidTypeCompiler(compiler.GenericTypeCompiler):
def visit_REAL(self, type_, **kwargs):
return "DOUBLE"
def visit_NUMERIC(self, type_, **kwargs):
return "LONG"
visit_DECIMAL = visit_NUMERIC
visit_INTEGER = visit_NUMERIC
visit_SMALLINT = visit_NUMERIC
visit_BIGINT = visit_NUMERIC
visit_BOOLEAN = visit_NUMERIC
visit_TIMESTAMP = visit_NUMERIC
visit_DATE = visit_NUMERIC
def visit_CHAR(self, type_, **kwargs):
return "STRING"
visit_NCHAR = visit_CHAR
visit_VARCHAR = visit_CHAR
visit_NVARCHAR = visit_CHAR
visit_TEXT = visit_CHAR
def visit_DATETIME(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type DATETIME is not supported')
def visit_TIME(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type TIME is not supported')
def visit_BINARY(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type BINARY is not supported')
def visit_VARBINARY(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type VARBINARY is not supported')
def visit_BLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type BLOB is not supported')
def visit_CLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type CBLOB is not supported')
def visit_NCLOB(self, type_, **kwargs):
raise exceptions.NotSupportedError('Type NCBLOB is not supported')
class DruidDialect(default.DefaultDialect):
name = 'druid'
scheme = 'http'
driver = 'rest'
user = None
password = None
preparer = DruidIdentifierPreparer
statement_compiler = DruidCompiler
type_compiler = DruidTypeCompiler
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = False
supports_empty_insert = False
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
def __init__(self, context=None, *args, **kwargs):
super(DruidDialect, self).__init__(*args, **kwargs)
self.context = context or {}
@classmethod
def dbapi(cls):
return pydruid.db
def create_connect_args(self, url):
kwargs = {
'host': url.host,
'port': url.port or 8082,
'user': url.username or None,
'password': url.password or None,
'path': url.database,
'scheme': self.scheme,
'context': self.context,
'header': url.query.get('header') == 'true',
}
return ([], kwargs)
def get_schema_names(self, connection, **kwargs):
# Each Druid datasource appears as a table in the "druid" schema. This
# is also the default schema, so Druid datasources can be referenced as
# either druid.dataSourceName or simply dataSourceName.
result = connection.execute(
'SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA')
return [
row.SCHEMA_NAME for row in result
if row.SCHEMA_NAME not in RESERVED_SCHEMAS
]
def has_table(self, connection, table_name, schema=None):
query = """
SELECT COUNT(*) > 0 AS exists_
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = '{table_name}'
""".format(table_name=table_name)
result = connection.execute(query)
return result.fetchone().exists_
def get_table_names(self, connection, schema=None, **kwargs):
query = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES"
if schema:
query = "{query} WHERE TABLE_SCHEMA = '{schema}'".format(
query=query, schema=schema)
result = connection.execute(query)
return [row.TABLE_NAME for row in result]
def get_view_names(self, connection, schema=None, **kwargs):
return []
def get_table_options(self, connection, table_name, schema=None, **kwargs):
return {}
def get_columns(self, connection, table_name, schema=None, **kwargs):
query = """
SELECT COLUMN_NAME,
DATA_TYPE,
IS_NULLABLE,
COLUMN_DEFAULT
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = '{table_name}'
""".format(table_name=table_name)
if schema:
query = "{query} AND TABLE_SCHEMA = '{schema}'".format(
query=query, schema=schema)
result = connection.execute(query)
return [
{
'name': row.COLUMN_NAME,
'type': type_map[row.DATA_TYPE.lower()],
'nullable': get_is_nullable(row.IS_NULLABLE),
'default': get_default(row.COLUMN_DEFAULT),
}
for row in result
]
def get_pk_constraint(self, connection, table_name, schema=None, **kwargs):
return {'constrained_columns': [], 'name': None}
def get_foreign_keys(self, connection, table_name, schema=None, **kwargs):
return []
def get_check_constraints(
self,
connection,
table_name,
schema=None,
**kwargs
):
return []
def get_table_comment(self, connection, table_name, schema=None, **kwargs):
return {'text': ''}
def get_indexes(self, connection, table_name, schema=None, **kwargs):
return []
def get_unique_constraints(
self,
connection,
table_name,
schema=None,
**kwargs
):
return []
def get_view_definition(
self,
connection,
view_name,
schema=None,
**kwargs
):
pass
def do_rollback(self, dbapi_connection):
pass
def _check_unicode_returns(self, connection, additional_tests=None):
return True
def _check_unicode_description(self, connection):
return True
DruidHTTPDialect = DruidDialect
class DruidHTTPSDialect(DruidDialect):
scheme = 'https'
def get_is_nullable(druid_is_nullable):
# this should be 'YES' or 'NO'; we default to no
return druid_is_nullable.lower() == 'yes'
def get_default(druid_column_default):
# currently unused, returns ''
return str(druid_column_default) if druid_column_default != '' else None
| [
"maximebeauchemin@gmail.com"
] | maximebeauchemin@gmail.com |
21641b139d3431097cf789c444bfd4c467e6ebe1 | 20d1bf9505929948f3fc50040e1471baadbe0351 | /keras_frcnn/muscima_annotation_generator.py | 708bb5917465720f3738e2e50da596878874f90b | [
"MIT"
] | permissive | JuanuMusic/MusicObjectDetector | 1d1c57508196fe66f8a3a4acb4ef3ea28f652510 | 0382c36fa4b4ac29d80980204b0710a5af3c0bad | refs/heads/master | 2021-09-02T07:27:06.277597 | 2017-12-31T12:54:44 | 2017-12-31T12:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,787 | py | import os
from itertools import groupby
from lxml import etree
from typing import List, Tuple
from lxml.etree import Element, SubElement
def create_annotations_in_plain_format(exported_annotations_file_path: str, objects_appearing_in_cropped_image: List[
Tuple[str, str, Tuple[int, int, int, int]]]):
with open(exported_annotations_file_path, "a") as annotations_file:
for object_appearing_in_cropped_image in objects_appearing_in_cropped_image:
file_name = object_appearing_in_cropped_image[0]
class_name = object_appearing_in_cropped_image[1]
translated_bounding_box = object_appearing_in_cropped_image[2]
trans_top, trans_left, trans_bottom, trans_right = translated_bounding_box
annotations_file.write("{0},{1},{2},{3},{4},{5}\n".format(file_name,
trans_left,
trans_top,
trans_right,
trans_bottom,
class_name))
def create_annotations_in_pascal_voc_format(annotations_folder: str,
file_name: str,
objects_appearing_in_cropped_image: List[
Tuple[str, str, Tuple[int, int, int, int]]],
image_width: int,
image_height: int,
image_depth: int):
os.makedirs(annotations_folder, exist_ok=True)
annotation = Element("annotation")
folder = SubElement(annotation, "folder")
folder.text = "muscima_pp_cropped_images"
filename = SubElement(annotation, "filename")
filename.text = file_name
source = SubElement(annotation, "source")
database = SubElement(source, "database")
database.text = "MUSCIMA++"
source_annotation = SubElement(source, "annotation")
source_annotation.text = "MUSCIMA++ (v0.9.1)"
image = SubElement(source, "image")
image.text = "CVC-MUSCIMA"
size = SubElement(annotation, "size")
width = SubElement(size, "width")
width.text = str(image_width)
height = SubElement(size, "height")
height.text = str(image_height)
depth = SubElement(size, "depth")
depth.text = str(image_depth)
# Write results to file
for detected_object in objects_appearing_in_cropped_image:
class_name = detected_object[1]
translated_bounding_box = detected_object[2]
ymin, xmin, ymax, xmax = translated_bounding_box
object = SubElement(annotation, "object")
name = SubElement(object, "name")
name.text = class_name
pose = SubElement(object, "pose")
pose.text = "Unspecified"
truncated = SubElement(object, "truncated")
truncated.text = "0"
difficult = SubElement(object, "difficult")
difficult.text = "0"
bndbox = SubElement(object, "bndbox")
bb_xmin = SubElement(bndbox, "xmin")
bb_xmin.text = str(xmin)
bb_ymin = SubElement(bndbox, "ymin")
bb_ymin.text = str(ymin)
bb_xmax = SubElement(bndbox, "xmax")
bb_xmax.text = str(xmax)
bb_ymax = SubElement(bndbox, "ymax")
bb_ymax.text = str(ymax)
xml_file_path = os.path.join(annotations_folder, os.path.splitext(file_name)[0] + ".xml")
pretty_xml_string = etree.tostring(annotation, pretty_print=True)
with open(xml_file_path, "wb") as xml_file:
xml_file.write(pretty_xml_string)
| [
"alexander.pacha@gmail.com"
] | alexander.pacha@gmail.com |
f22791718a15c54fba750f4dd80a0d6972ecc52f | 077f29021738c3b577c7c3d9ef5851d76e93cbed | /demo/assignments/days_between.py | bff4dffec1b0f26912c3685b986474e7419edc8c | [] | no_license | srikanthpragada/PYTHON_10_JULY_2020 | fb410d87260eb290ebcc5ac6a88b6d6b01ee15b5 | b7a586cbcd49934d36facb4dd748c54038838334 | refs/heads/master | 2022-12-05T09:05:33.192365 | 2020-08-26T14:27:09 | 2020-08-26T14:27:09 | 279,319,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from datetime import datetime, date
file = open("dates.txt", "rt")
dates = []
for line in file:
parts = line.strip().split(",")
try:
if len(parts) == 2:
fd = datetime.strptime(parts[0], "%d-%m-%Y")
sd = datetime.strptime(parts[1], "%d-%m-%Y")
elif len(parts) == 1:
fd = datetime.strptime(parts[0], "%d-%m-%Y")
sd = datetime.now()
days = (sd - fd).days
dates.append((fd, sd, days)) # Add tuple with data to list
except:
# print("Invalid line :", line)
pass
for t in sorted(dates, key=lambda t: t[2]):
print(f"{t[0].strftime('%d-%m-%Y')} - {t[1].strftime('%d-%m-%Y')} - {t[2]:4}")
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
32369f6c433f82f5061430fab8e40f62b23a08e0 | 7deda84f7a280f5a0ee69b98c6a6e7a2225dab24 | /KBL/migrations/0009_auto_20200514_1542.py | 3b32eeb87339f7dfb830c0485a45742ab4d6c936 | [] | no_license | Cornex-Inc/Coffee | 476e30f29412373fb847b2d518331e6c6b9fdbbf | fcd86f20152e2b0905f223ff0e40b1881db634cf | refs/heads/master | 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 | Python | UTF-8 | Python | false | false | 444 | py | # Generated by Django 2.1.15 on 2020-05-14 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('KBL', '0008_customer_company_date_establishment'),
]
operations = [
migrations.AlterField(
model_name='customer_company',
name='date_establishment',
field=models.CharField(default='0000-00-00', max_length=10),
),
]
| [
"khm4321@naver.com"
] | khm4321@naver.com |
01de42efe5caf477e9a65304c3a50ec28a367993 | 9b8b2b867379d04bdd2568bfd211f456401ce702 | /LogisticandSVM.py | ceaef98d4cbe1b3622d090e09ed77873dda733fb | [] | no_license | bhatnagaranshika02/Machine-Learning-Practice | fb246ba5bac20aae18c58a7f58b529c63fd09bdd | 7790bb624d467c221749c7a16f4e2486668e6dbe | refs/heads/master | 2022-11-19T09:27:52.848116 | 2020-07-20T21:08:16 | 2020-07-20T21:08:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from sklearn import datasets
digits=datasets.load_digits()
X_train,x_test,Y_train,y_test=train_test_split(digits.data,digits.target)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print(lr.score(X_train, y_train))
print(lr.score(X_test, y_test))
# Apply SVM and print scores
svm = SVC()
svm.fit(X_train, y_train)
print(svm.score(X_train, y_train))
print(svm.score(X_test, y_test))
| [
"bhatnagaranshika02@gmail.com"
] | bhatnagaranshika02@gmail.com |
6c730c4a0976bf8d2ad2c79816467ea1a0eff151 | 090324db0c04d8c30ad6688547cfea47858bf3af | /soko/struct/glue.py | 821c9fd3e606fc467d0a40edc857959287d960d6 | [] | no_license | fidlej/sokobot | b82c4c36d73e224d0d0e1635021ca04485da589e | d3d04753a5043e6a22dafd132fa633d8bc66b9ea | refs/heads/master | 2021-01-21T13:14:29.523501 | 2011-06-12T07:34:14 | 2011-06-12T07:34:14 | 32,650,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py |
from soko.env.env import Env
from soko.struct.expanders.pushexpander import PushExpander
from soko.struct.estimators import sokoestimator
from soko.struct import modeling
class EnvGlue(Env):
def __init__(self, maze):
self.maze = maze
def configure(self, config):
#TODO: allow to use different classes based on the command line args
self.expander = PushExpander()
self.estimator = sokoestimator.SokoEnvSokoEstimator()
self.estimator.setup_goal(self.maze)
def init(self):
return modeling.extract_state(self.maze)
def get_actions(self, s):
return self.expander.get_actions(s)
def predict(self, s, a):
return modeling.predict(s, a)
def estim_cost(self, s, cost_limit=None):
return self.estimator.estim_cost(s)
def format(self, s):
from soko.mazing import Maze
return str(Maze(s))
| [
"ivo@danihelka.net"
] | ivo@danihelka.net |
e5abebf008390550a580703fa551688a1b0449eb | 6eec2948c0907b5377de51e61014a48dff3d5ce7 | /cairo2/arcos.py | 4c1d2659d7d0e1656107dd73408a3a525d35e77a | [] | no_license | clcneogeek325/Script_python | 4da937cb2caee93a2e0eb945e77ccac8e88ec4bc | 87607c97fa738b3e64aefbe0e8c4425724ecff73 | refs/heads/master | 2021-01-17T07:44:07.124077 | 2016-06-04T03:26:44 | 2016-06-04T03:26:44 | 15,943,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | #!/usr/bin/env python
import gtk, pygtk, cairo
pygtk.require('2.0')
class dibujar:
def __init__(self):
self.ventana = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.ventana.set_position(gtk.WIN_POS_CENTER)
self.ventana.set_size_request(400,300)
self.ventana.set_title("Dibujando un circulo")
self.areaDibujo = gtk.DrawingArea()
self.areaDibujo.connect("expose-event", self.dibujando)
self.ventana.add(self.areaDibujo)
self.ventana.connect("destroy", gtk.main_quit)
self.ventana.show_all()
def main(self):
gtk.main()
def dibujando(self, widget,areaDibujo ):
self.style = self.areaDibujo.get_style()
self.gc = self.style.fg_gc[gtk.STATE_NORMAL]
self.areaDibujo.window.draw_arc(self.gc, True , 100 ,50 ,200, 200, 0, 365*74)
if __name__ == "__main__":
objeto = dibujar()
objeto.main()
| [
"clcneogeek@gmail.com"
] | clcneogeek@gmail.com |
60309ee1fa094f91a63de0035ad8e89d9948f64f | 6a2539df3313167850c2cbace97e83cf5939f813 | /29/02/5.py | 2b7dcbd0ecfe534f8894fedfc77786e2f7267a8a | [
"CC0-1.0"
] | permissive | pylangstudy/201705 | 24936de9347c37905c853d4c59ee2ff8b8949969 | c69de524faa67fa2d96267d5a51ed9794208f0e4 | refs/heads/master | 2021-01-22T03:22:38.215696 | 2017-06-09T02:50:52 | 2017-06-09T02:50:52 | 92,374,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | a = 'True!!' if True else 'False...'
print(a)
| [
"pylangstudy@yahoo.co.jp"
] | pylangstudy@yahoo.co.jp |
b6094ed7de55c1744bf0c91b68d39a96cfc1bc13 | 182bc0ab5886aed9ef4236645e0f0ac8986f0b21 | /pypilot/serialprobe.py | 19ab080e7b1f9c46a9fcefc0547bd07b40780e60 | [] | no_license | dmdelorme/pypilot | ad95fe48c0ec7dcbabd36b89dd9ff9337f1502b3 | 39b2573e6c687fbc8786431014d028af4e2c56eb | refs/heads/master | 2020-04-16T10:33:34.552610 | 2019-01-08T23:13:46 | 2019-01-08T23:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,361 | py | #!/usr/bin/env python
#
# This Program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
import sys, os, time
import json
pypilot_dir = os.getenv('HOME') + '/.pypilot/'
def read_config(filename, fail):
devices = []
if os.path.exists(pypilot_dir + filename):
try:
f = open(pypilot_dir + filename, 'r')
while True:
device = f.readline()
if not device:
break
devices.append(os.path.realpath(device.strip()))
f.close()
return devices
except Exception as e:
print 'error reading', pypilot_dir + 'serial_ports'
return fail
blacklist_serial_ports = 'init'
def read_blacklist():
global blacklist_serial_ports
if blacklist_serial_ports == 'init':
blacklist_serial_ports = read_config('blacklist_serial_ports', [])
return blacklist_serial_ports
allowed_serial_ports = 'init'
def read_allowed():
global allowed_serial_ports
if allowed_serial_ports == 'init':
allowed_serial_ports = read_config('serial_ports', 'any')
return allowed_serial_ports
def scan_devices():
devices = []
#rpi3, orange pi have ttyS, othes have ttyAMA
devicesp = ['ttyAMA', 'ttyS']
by_id = '/dev/serial/by-id'
if os.path.exists(by_id):
for device_path in os.listdir(by_id):
devices.append(os.path.join(by_id, device_path))
# identical devices might exist, so also add by path
by_path = '/dev/serial/by-path'
if os.path.exists(by_path):
for device_path in os.listdir(by_path):
full_path = os.path.join(by_path, device_path)
realpath = os.path.realpath(full_path)
# make sure we don't already have it "by-id"
have = False
for path in devices:
if os.path.realpath(path) == realpath:
have = True
if not have:
devices.append(full_path)
else: # do not have by-id and by-path support
devicesp = ['ttyUSB', 'ttyACM'] + devicesp
# devicesp are devices that need number after, enumerate them
for dev in os.listdir('/dev'):
devicesd = []
for p in devicesp:
if dev.startswith(p):
devicesd.append('/dev/'+dev)
devices = devicesd + devices
allowed_devices = []
blacklist_serial_ports = read_blacklist()
for device in blacklist_serial_ports:
for d in devices:
if os.path.realpath(d.strip()) == device:
devices.remove(d)
allowed_serial_ports= read_allowed()
if allowed_serial_ports == 'any':
return devices
for device in devices:
if os.path.realpath(device) in allowed_serial_ports:
allowed_devices.append(device)
return allowed_devices
devices = 'init'
pyudev = 'init'
monitor = False
starttime = False
def enumerate_devices():
global pyudev
global devices
global monitor
global starttime
if devices == 'init':
starttime = time.time()
devices = scan_devices()
if monitor:
import signal
t1 = time.time()
if monitor.poll(0):
while monitor.poll(0): # flush events
pass
devices = scan_devices()
else:
# delay monitor slightly to ensure startup speed
if time.time() > starttime and pyudev == 'init':
import signal
try:
import signal
# need to temporary disable sigchld while loading pyudev
cursigchld_handler = signal.getsignal(signal.SIGCHLD)
signal.signal(signal.SIGCHLD, 0)
import pyudev
context = pyudev.Context()
signal.signal(signal.SIGCHLD, cursigchld_handler)
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by(subsystem='usb')
except Exception as e:
print 'no pyudev! will scan usb devices every probe!', e
starttime = time.time() + 20 # try pyudev again in 20 seconds
#pyudev = False
devices = scan_devices()
return devices
# reads the file recording the last working
# serial device and baud rate for that use
lastworkingdevices = {}
def lastworkingdevice(name):
global lastworkingdevices
if name in lastworkingdevices:
return lastworkingdevices[name]
filename = pypilot_dir + name + 'device'
try:
file = open(filename, 'r')
lastdevice = json.loads(file.readline().rstrip())
file.close()
# ensure lastdevice defines path and baud here
lastdevice = lastdevice[0], [lastdevice[1]]
except:
lastdevice = False
lastworkingdevices[name] = lastdevice
return lastdevice
# called to find a new serial device and baud to try to use
probes = {}
def probe(name, bauds, timeout=5):
global devices
global probes
t0 = time.time()
if not name in probes:
probes[name] = {'time': 0, 'device': False, 'probe last': True}
probe = probes[name]
# prevent probing too often
if t0 - probe['time'] < timeout:
probe['device'] = False
return False
probe['time'] = t0
# current device being probed or used, try next baud rate
if probe['device']:
probe['bauds'] = probe['bauds'][1:]
if probe['bauds']: # there are more bauds to try
return probe['device'], probe['bauds'][0]
# try the last working device every other probe
if probe['probe last']:
probe['probe last'] = False # next time probe new devices
last = lastworkingdevice(name)
if last:
if 'USB' in last[0] or 'ACM' in last[0]:
if not os.path.exists(last[0]):
lastworkingdevices[name] = False
return False
probe['device'], probe['bauds'] = last
return probe['device'], probe['bauds'][0]
probe['probe last'] = True # next time try last working device if this fails
# find a new device
#t1 = time.time()
devices = enumerate_devices()
#print 'enumtime', time.time() - t1, devices
# find next device index to probe
try:
index = devices.index(probe['lastdevice']) + 1
except:
index = 0
# do not probe another probe's device
pi = 0
plist = list(probes)
while pi < len(plist) and index < len(devices):
real_path = os.path.realpath(devices[index])
probe_path = probes[plist[pi]]['device']
if probe_path and os.path.realpath(probe_path) == real_path:
index += 1
pi = 0
else:
pi += 1
# if no more devices, return false to reset, and allow other probes
if index >= len(devices):
probe['lastdevice'] = False
return False
device = devices[index]
serial_device = device, bauds[0]
try:
import serial
serial.Serial(*serial_device)
except serial.serialutil.SerialException as err:
arg = err.args[0]
if type(arg) == type('') and 'Errno ' in arg:
arg = int(arg[arg.index('Errno ')+6: arg.index(']')])
if arg == 16: # device busy, retry later
print 'busy, try again later', probe['device'], name
elif arg == 6: # No such device or address, don't try again
devices.remove(device)
elif arg == 5: # input output error (unusable)
devices.remove(device)
elif arg == 2: # No such file or directory
devices.remove(device)
else:
devices.remove(device)
print 'serial exception', serial_device, name, err
# don't try again if ttyS port?
#if device.startswith('/dev/ttyS'):
# devices.remove(device)
serial_device = False
except IOError:
print 'io error', serial_device
devices.remove(device)
serial_device = False
probe['device'] = device
probe['lastdevice'] = probe['device']
probe['bauds'] = bauds
#print 'probe', name, serial_device
return serial_device
# allow reserving gps devices against probing
def reserve(device):
print 'serial reserve', device
i = 0
while 'reserved%d' % i in probes:
i+=1
for dev in devices:
if os.path.realpath(dev) == os.path.realpath(device):
probes['reserved%d' % i] = {'device': device}
break
# called to record the working serial device
def success(name, device):
global probes
filename = pypilot_dir + name + 'device'
print 'serialprobe success:', filename, device
try:
file = open(filename, 'w')
file.write(json.dumps(device) + '\n')
file.close()
except:
print 'serialprobe failed to record device', name
if __name__ == '__main__':
print 'testing serial probe'
while True:
t0 = time.time()
device = probe('test', [9600], timeout=2)
if device:
print 'return', device, time.time() - t0
time.sleep(1)
| [
"seandepagnier@gmail.com"
] | seandepagnier@gmail.com |
8e790772dc1b98f875809de6ee3c8a4febabb32f | a81d21f98dd558416f8731f001cb8151d8309f4f | /interviewbit/test/test_kmp.py | c6f61524a22f6d1a9b02a51773adcef0494da13f | [] | no_license | marquesarthur/programming_problems | 1128c38e65aade27e2435f7987d7ee2b328fda51 | 2f7df25d0d735f726b7012e4aa2417dee50526d9 | refs/heads/master | 2022-01-25T18:19:02.575634 | 2022-01-18T02:07:06 | 2022-01-18T02:07:06 | 32,213,919 | 2 | 0 | null | 2020-10-13T01:29:08 | 2015-03-14T13:44:06 | Python | UTF-8 | Python | false | false | 525 | py | import unittest
from interviewbit.stringm.kmp import KMP
class KMPTest(unittest.TestCase):
def test_base_case(self):
s = KMP()
pattern = "ABCDABD"
str = "ABC ABCDAB ABCDABCDABDE"
ret = [15]
result = s.search(pattern, str)
self.assertEqual(result, ret)
def test_two_matches(self):
s = KMP()
pattern = "AA"
str = "AABC AA AAD ASAA"
ret = [0, 5, 8, 14]
result = s.search(pattern, str)
self.assertEqual(result, ret) | [
"marques.art@gmail.com"
] | marques.art@gmail.com |
fd96b02df510870d46c989b700494017c3aca79a | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_log_fortianalyzer_filter.py | ff2ae4eb1c590af8b604708db6ee1d3213a97e17 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 12,113 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortianalyzer_filter
short_description: Filters for FortiAnalyzer in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_fortianalyzer feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_fortianalyzer_filter:
description:
- Filters for FortiAnalyzer.
default: null
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
choices:
- enable
- disable
dlp-archive:
description:
- Enable/disable DLP archive logging.
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
choices:
- enable
- disable
filter:
description:
- FortiAnalyzer log filter.
filter-type:
description:
- Include/exclude logs that match the filter.
choices:
- include
- exclude
forward-traffic:
description:
- Enable/disable forward traffic logging.
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
choices:
- enable
- disable
local-traffic:
description:
- Enable/disable local in or out traffic logging.
choices:
- enable
- disable
multicast-traffic:
description:
- Enable/disable multicast traffic logging.
choices:
- enable
- disable
netscan-discovery:
description:
- Enable/disable netscan discovery event logging.
netscan-vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
severity:
description:
- Lowest severity level to log.
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer-traffic:
description:
- Enable/disable sniffer traffic logging.
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Filters for FortiAnalyzer.
fortios_log_fortianalyzer_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortianalyzer_filter:
anomaly: "enable"
dlp-archive: "enable"
dns: "enable"
filter: "<your_own_value>"
filter-type: "include"
forward-traffic: "enable"
gtp: "enable"
local-traffic: "enable"
multicast-traffic: "enable"
netscan-discovery: "<your_own_value>"
netscan-vulnerability: "<your_own_value>"
severity: "emergency"
sniffer-traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_fortianalyzer_filter_data(json):
option_list = ['anomaly', 'dlp-archive', 'dns',
'filter', 'filter-type', 'forward-traffic',
'gtp', 'local-traffic', 'multicast-traffic',
'netscan-discovery', 'netscan-vulnerability', 'severity',
'sniffer-traffic', 'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def log_fortianalyzer_filter(data, fos):
vdom = data['vdom']
log_fortianalyzer_filter_data = data['log_fortianalyzer_filter']
filtered_data = filter_log_fortianalyzer_filter_data(log_fortianalyzer_filter_data)
return fos.set('log.fortianalyzer',
'filter',
data=filtered_data,
vdom=vdom)
def fortios_log_fortianalyzer(data, fos):
login(data)
methodlist = ['log_fortianalyzer_filter']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_fortianalyzer_filter": {
"required": False, "type": "dict",
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dlp-archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter-type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan-discovery": {"required": False, "type": "str"},
"netscan-vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer-traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_fortianalyzer(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"skydevapp@gmail.com"
] | skydevapp@gmail.com |
a388c7647550a18d118c61a5e81bd0186660777e | c152873f28e62dbbf7100f789364cf5a7e578a38 | /sncosmo/magsystems.py | c593a6f5d5ba4252b6c47ecf478a3b8eef6aea91 | [
"BSD-3-Clause"
] | permissive | barentsen/sncosmo | d537b906e3f61098a731ff5d2fefcc251935a30f | 7276566ef3e2b7fd21beba25cc72dbcedf55e161 | refs/heads/master | 2020-12-24T12:01:00.052524 | 2016-11-06T03:28:08 | 2016-11-06T03:51:53 | 73,101,477 | 0 | 0 | null | 2016-11-07T17:03:28 | 2016-11-07T17:03:28 | null | UTF-8 | Python | false | false | 4,853 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
import math
import numpy as np
import astropy.units as u
import astropy.constants as const
from ._registry import Registry
from .bandpasses import get_bandpass
__all__ = ['get_magsystem', 'MagSystem', 'SpectralMagSystem',
'ABMagSystem', 'CompositeMagSystem']
_MAGSYSTEMS = Registry()
def get_magsystem(name):
"""Get a MagSystem from the registry by name."""
if isinstance(name, MagSystem):
return name
return _MAGSYSTEMS.retrieve(name)
class MagSystem(object):
"""An abstract base class for magnitude systems."""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None):
self._zpbandflux = {}
self._name = name
@abc.abstractmethod
def _refspectrum_bandflux(self, band):
"""Flux of the fundamental spectrophotometric standard."""
pass
@property
def name(self):
"""Name of magnitude system."""
return self._name
@name.setter
def name(self, value):
self._name = value
def zpbandflux(self, band):
"""Flux of an object with magnitude zero in the given bandpass.
Parameters
----------
bandpass : `~sncosmo.spectral.Bandpass` or str
Returns
-------
bandflux : float
Flux in photons / s / cm^2.
"""
band = get_bandpass(band)
try:
return self._zpbandflux[band]
except KeyError:
bandflux = self._refspectrum_bandflux(band)
self._zpbandflux[band] = bandflux
return bandflux
def band_flux_to_mag(self, flux, band):
"""Convert flux (photons / s / cm^2) to magnitude."""
return -2.5 * math.log10(flux / self.zpbandflux(band))
def band_mag_to_flux(self, mag, band):
"""Convert magnitude to flux in photons / s / cm^2"""
return self.zpbandflux(band) * 10.**(-0.4 * mag)
class CompositeMagSystem(MagSystem):
"""A magnitude system defined in a specific set of bands.
In each band, there is a fundamental standard with a known
(generally non-zero) magnitude.
Parameters
----------
bands: iterable of `~sncosmo.Bandpass` or str
The filters in the magnitude system.
standards: iterable of `~sncosmo.MagSystem` or str,
The spectrophotmetric flux standards for each band, in the
same order as `bands`.
offsets: list_like
The magnitude of standard in the given band.
"""
def __init__(self, bands, standards, offsets, name=None):
super(CompositeMagSystem, self).__init__(name=name)
if not len(bands) == len(offsets) == len(standards):
raise ValueError('Lengths of bands, standards, and offsets '
'must match.')
self._bands = [get_bandpass(band) for band in bands]
self._standards = [get_magsystem(s) for s in standards]
self._offsets = offsets
@property
def bands(self):
return self._bands
@property
def standards(self):
return self._standards
@property
def offsets(self):
return self._offsets
def _refspectrum_bandflux(self, band):
if band not in self._bands:
raise ValueError('band not in local magnitude system')
i = self._bands.index(band)
standard = self._standards[i]
offset = self._offsets[i]
return 10.**(0.4 * offset) * standard.zpbandflux(band)
def __str__(self):
s = "CompositeMagSystem {!r}:\n".format(self.name)
for i in range(len(self._bands)):
s += " {!r}: system={!r} offset={}\n".format(
self._bands[i].name,
self._standards[i].name,
self._offsets[i])
return s
class SpectralMagSystem(MagSystem):
"""A magnitude system defined by a fundamental spectrophotometric
standard.
Parameters
----------
refspectrum : `sncosmo.Spectrum`
The spectrum of the fundamental spectrophotometric standard.
"""
def __init__(self, refspectrum, name=None):
super(SpectralMagSystem, self).__init__(name)
self._refspectrum = refspectrum
def _refspectrum_bandflux(self, band):
return self._refspectrum.bandflux(band)
class ABMagSystem(MagSystem):
"""Magnitude system where a source with F_nu = 3631 Jansky at all
frequencies has magnitude 0 in all bands."""
def _refspectrum_bandflux(self, band):
bwave, btrans = band.to_unit(u.Hz)
# AB spectrum is 3631 x 10^{-23} erg/s/cm^2/Hz
# Get spectral values in photons/cm^2/s/Hz at bandpass wavelengths
# by dividing by (h \nu).
f = 3631.e-23 / const.h.cgs.value / bwave
binw = np.gradient(bwave)
return np.sum(f * btrans * binw)
| [
"kylebarbary@gmail.com"
] | kylebarbary@gmail.com |
386035a852c2dc374f26af267d7ccd3a54c081a1 | 0e338d96c395950090c9252a73fc6dd2169decf0 | /p4ss/sniff_veth0.py | c5e92907b4b9af4fe982b28d76e722391d117e7b | [] | no_license | hotephen/p4-dev | 4281fbecbef406e5dfe48774d5f555235b4dfc1d | 8476734562511cde6663a4910b71dfaeed7bf34b | refs/heads/master | 2023-01-12T04:40:41.786827 | 2022-12-30T05:30:23 | 2022-12-30T05:30:23 | 130,222,370 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | #!/usr/bin/env python
import sys
import struct
from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr, bind_layers
from scapy.all import Packet, IPOption
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
from scapy.all import IP, TCP, UDP, Raw, Ether, Padding
from time import sleep
import argparse
parser = argparse.ArgumentParser(description='send entry packet')
parser.add_argument('--i', required=False, type=str, default='veth0', help='i')
parser.add_argument('--save', required=False, type=bool, default=True, help='save')
parser.add_argument('--show', required=False, type=bool, default=False, help='save')
args = parser.parse_args()
def handle_pkt(pkt):
if(IP in pkt and (UDP in pkt or TCP in pkt)):
if (UDP in pkt):
print(str(pkt[IP].src) + " / " + str(pkt[IP].dst) + " / " + str(pkt[UDP].sport) + " / " + str(pkt[UDP].dport))
else:
print(str(pkt[IP].src) + " / " + str(pkt[IP].dst) + " / " + str(pkt[TCP].sport) + " / " + str(pkt[TCP].dport))
def main():
iface = args.i
print ("sniffing on %s" % iface)
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
74fd5bc6912ca3e02e94ad89eefdbae7bae13d48 | 91bd58191c9a25bc92f5372d5344b808b4f5ce5e | /tf_experiments/plot_keras.py | 93125da3cda7913711ae56da77efd762450c7df8 | [] | no_license | CBMM/Generalization-Puzzles-in-Deep-Networks-1 | c25c29b05b0439cd4cf0b53ce902b1ce5d775b6c | f7fa35fb68a9badab6843689f30b67e59fd379a3 | refs/heads/master | 2023-03-13T01:39:53.479689 | 2021-03-05T00:33:32 | 2021-03-05T00:33:32 | 449,828,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | import matplotlib.pyplot as plt
import pickle
import os
import numpy as np
from pdb import set_trace as st
def plot_keras(history):
'''
:param history: the dictionary history saved by keras.
:return:
'''
nb_epochs = len(history['acc'],)
# Plots for training and testing process: loss and accuracy
plt.figure(0)
plt.plot(history['acc'], 'r')
plt.plot(history['val_acc'], 'g')
plt.xticks(np.arange(0, nb_epochs + 1, 2.0))
plt.rcParams['figure.figsize'] = (8, 6)
plt.xlabel("Num of Epochs")
plt.ylabel("Accuracy")
plt.title("Training Accuracy vs Validation Accuracy")
plt.legend(['train', 'validation'])
plt.figure(1)
plt.plot(history['loss'], 'r')
plt.plot(history['val_loss'], 'g')
plt.xticks(np.arange(0, nb_epochs + 1, 2.0))
plt.rcParams['figure.figsize'] = (8, 6)
plt.xlabel("Num of Epochs")
plt.ylabel("Loss")
plt.title("Training Loss vs Validation Loss")
plt.legend(['train', 'validation'])
plt.show()
def main():
path = '../pytorch_experiments/test_runs_flatness/keras_expt'
filename = 'chance_plateau_debug_0'
''' load history '''
path_2_file = os.path.join(path,filename)
with open(path_2_file, 'rb') as keras_hist_file:
hist_dict = pickle.load(keras_hist_file)
''' plot history '''
plot_keras(hist_dict)
if __name__ == '__main__':
main()
print('Done')
| [
"brando90@mit.edu"
] | brando90@mit.edu |
7b881aa2ae99aba2c17a00a8a1eebb4c614a982c | 90c6485784db418b660e88bb3e76ee242475af85 | /libs/DROP_DATABASE_V2.py | b6d35abfb1f46d36692842d89dc82728b512c348 | [] | no_license | lamtov/project_tuannv48_sql_manager | 204f00809226ae9129ae45a9acd799c3c3bb5889 | c4ec4d6de251f7d2622e61b34dd11163660839ec | refs/heads/master | 2023-01-08T12:01:06.029465 | 2020-11-12T00:14:17 | 2020-11-12T00:14:17 | 311,110,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,433 | py | Mai's comment:
1. flask là gì
2. các gói repository offline có mục đích gì, giải thích rõ hơn
3. Docker Iage là gì, có mục đích gì
4. Các mục có giải thích phía dưới thì phải có chú thích (ví dụ xem mục 2.1)
5. Đối với mỗi khối
Mô tả chi tiết URL, phương thức, body request mẫu và body response để hình dung
6. Có một luồng mô tả ngắn và tổng quan về luồng xử lý của từng phần Core API (từ khối này xử lý gì --> khối kia xử lý gì, mục đích gì)
trước khi đi vào chi tiết từng khối
7. Playbook là gì, giải thích ở mục nào
8. Thêm giao diện luồng để rõ hơn luồng thực hiện
https://medium.com/eway/nguy%C3%AAn-t%E1%BA%AFc-thi%E1%BA%BFt-k%E1%BA%BF-rest-api-23add16968d7
https://techtalk.vn/tat-tan-tat-ve-api.html
URL tạo bài viết: http://my-blog.xyz/posts. Tương ứng với HTTP method là POST
URL đọc bài viết với ID là 123: http://my-blog.xyz/posts/123. Tương ứng với HTTP method là GET
URL cập nhật bài viết với ID là 123: http://my-blog.xyz/posts/123. Tương ứng với HTTP method là PUT
URL xoá bài viết với ID là 123: http://my-blog.xyz/posts/123. Tương ứng với HTTP method là DELETE
STatus code:
200: OK
201: Created
204: No Content
304: Not Modified
400: Bad request
401: Unauthorized
403: Forbidden
404: Not Found
409: Conflict
500: Internal Server Error
Tài liệu mô tả URL API:
Bắt đầu mọi service bằng http://0.0.0.0:4321/api/v1/
"Function NAME: "
- get_all__conferences()
- add_conference()
- get_conference()
- edit_conference()
from flask_restplus import Api, Resource
@api.route("/conferences/")
class ConferenceList(Resource):
def get(self):
"""
returns a list of conferences
"""
def post(self):
"""
Adds a new conference to the list
"""
Tài liệu mô tả thiết kế API và URL
Địa chỉ API: http://172.16.29.193:4321:
1. TỔNG HỢP LIST METHOD+ URL:
"Discover Node":
POST /hosts/add_host
POST /hosts/update_host
GET /hosts
POST /hosts/discover_hosts
GET /hosts/<host_id>
GET /hosts/host_info?host_id=xxx
GET /hosts/host_info?host_id=xxxx&fields=ram,disk,mem,interface
GET /hosts/interface_resources?interface_id=xx&device_name=xx, host_id=xxx
GET /hosts/disk_resources?disk_id=xx,device_name=xx,host_id=xxx
# GET /hosts?q=xxx
# GET+POST /hosts/<host_id>/historys
# PUT /hosts/host_id/refresh
"Assign Role + Service":
GET /roles
GET /roles/<role_id>/role_info || /roles/<role_id>
POST /roles/add_host_to_role
POST /roles/test_create_deployment
POST /roles/test_create_service_setup
POST /roles/test_create_ansible_inventory_with_role
POST /roles/test_create_ansible_playbook
POST /roles/test_create_task
GET /hosts/deployments || /deployments
GET /hosts/<host_id>/deployments
GET /hosts/deployments/<deployment_id> || /deployments/<deployment_id>
GET /deployments/<deployment_id>/service_setups
GET /deployments/<deployment_id>/service_setup_id || GET /service_setups/<service_setup_id> || GET /service_setups?deployment_id=&service_name=&
# POST /service_setups/disable_setup
# POST /service_setups/enable_setup
GET /deployments/<deployment_id>/playbooks
# GET /deployments/<deployment_id>/playbooks?service_setup_id=xxxx
GET /service_setups/<service_setup_id>/tasks
GET /service_setups/<service_setup_id>/<task_id> || GET /tasks/<task_id>
GET /tasks/
GET /tasks/<task_id>/changes
GET /changes/<string:change_id>
"Insert Specific Config":
# GET /configs/specific_configs/
# POST+PUT /api/configs/specific_configs
# GET /configs/specific_configs/validate
# GET /configs/specific_configs/recommend
# POST /configs/specific_configs/submit
GET /tools
"START, UNDO, PAUSE, NEXT"
# POST /installation/
# action:-START-UNDO-PAUSE-NEXT
# GET /installation
# GET /installation/node_info
# GET /installation/service_info?node_id=
# GET /installation/task_info?service_id=
# GET /installation/change_info?task_id=
POST /installation/run_service_setup
POST /installation/run_task
POST /installation/skip
GET /installation/current
GET /tasks/update_task
"SCALLING UP, SCALLING DOWN "
GET: /api/v1/hosts?role=compute
POST: /api/v1/hosts/scalling_up_host
DELETE: /api/v1/hosts/scalling_down_host
"REPLACE CONTROLLER"
GET: /api/v1/hosts?role=controller
POST: /api/v1/hosts/replace_controller
"TEMPLATE"
GET: /api/v1/templates
GET: /api/v1/templates/filter?properties.name=&properties.type=
"RECOMMEND"
POST: /api/v1/recommendations/assign_role
POST: /api/v1/recommendations/select_service
POST: /api/v1/recommendations/gen_password
POST: /api/v1/recommendations/select_IP
POST: /api/v1/recommendations/select_folders
POST: /api/v1/recommendations/configs
"FILE CONFIFG "
GET: /api/v1/configs/<host_id>
GET: /api/v1/configs/filter?name=&path=&node=&service=
POST: /api/v1/configs/download_configs
GET: /api/v1/configs/compare_config_db_vs_host?host_idt=
GET: /api/v1/configs/compare_config_db_vs_host?file_config_id=
GET: /api/v1/configs/compare_config_db_vs_db?file_config_1_id= & file_config_2_id=
GET: /api/v1/configs/compare_config_host_vs_host?file_config_1_id= & file_config_2_id=
GET: /api/v1/configs/<file_config_id>
GET: /api/v1/configs/<file_config_id>/services
GET: /api/v1/configs/<file_config_id>/content?type=database|server|last_update
POST: /api/v1/configs/update?file_config_id=file_config_id
POST: /api/v1/configs/commit?file_config_id=file_config_id
POST: /api/v1/configs/rollback?file_config_id
"CHANGE PASSWORD"
GET: /api/v1/passwords
GET: /api/v1/passwords/<password_id>
PUT: /api/v1/passwords/<passwords_id>
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
return:
{
"links":{
"self": "http://example.com/articles",
"next": "http://example.com/articles?page[offset]=2",
"last": "http://example.com/articles?page[offset]=10"
},
"data":[{
"type": "articles",
"id": "1",
"attributes": {
"title": "JSON:API paints my bikeshed!"
}],
}
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
===========================================================================================================
"Discover Node":
- POST: /api/v1/hosts/add_host
Bổ xung thêm một Host vào danh sách Hosts sẽ dùng trong OverCloud
body:
{
"management_ip":"172.16.29.195",
"ssh_user":"root",
"ssh_password":"123456@Epc", "host_name":"controller_02"
}
response:
{
"status":"OK"|"Fail",
"data":{
{"host_id": "xxxxx",
"err_code":""
"msg":""
}
}
}
- GET: /api/v1/hosts
Lấy ra danh sách Hosts đã được thêm vào OverCloud
response
{
....
"data": [<host_id_1>,<host_id_2>,....<host_id_n>]
}
- POST: /api/v1/hosts/discover_hosts
Gửi yêu cầu get thông tin các Host trong OverCloud về Database. Có thể thao tác discover nhiều host hoặc chỉ một host một lúc.
body:
{
"list_host_id":[<host_id_1>,<host_id_2>,..<host_id_n>]
}
response:
{
"status":"OK" | "Fail",
"data": {
"list_hosts":[
"<host_id_1>":{ "status":"OK","msg":""}
"<host_id_2>":{ "status":"Fail", "msg":""}
.....
]
"ok":4,
"fail":1
}
}
- GET: /api/v1/hosts/<host_id>
Lấy ra thông tin chung chung từng host dựa vào input là host_id
response:
{
"created_at": "",
"management_ip":"",
"host_display_name":"",
"host_roles":[],
"ssh_user":"",
"ssh_password":""
"updated_at":""
....
}
- GET: /api/v1/hosts/host_info?host_id=xxx
Lấy ra thông tin chi tiết của từng host dựa vào host_id
response:
{
"default_broadcast":"",
"default_ipv4":"",
"disk_resources":[],
"interface_resources":[],
"memory_mb":,
"memory_mb_free":""
....
}
- GET: /api/v1/hosts/host_info?host_id=xxxx&fields=ram,disk,mem,interface
Bổ xung thêm tính năng filter by fields để request chỉ lấy về một loại thông tin cụ thể của host như ram, disk, mem, interface
response:
{
"interface_resources":[]
}
- GET: /api/v1/hosts/interface_resources?interface_id=xx & device_name = xx , host_id =xxx
Lấy ra thông tin của interface, dựa vào interface_id, có thể lọc theo device_name, host_id
response:
{
"interface_id":"",
"active": "True",
"device_name": "ens0f1",
"feautures":"",
"macaddress":"",
"speed":""
....
}
- GET: /api/v1/hosts/disk_resources?disk_id = xx, device_name = xx, host_id = xxx
Lấy ra thông tin của ổ cứng dựa vào disk_id, có thể lọc theo device_name, host_id
response:
{
"disk_id":"",
"device_name":"sda",
"model": "LOGICAL_VOLUME",
"sectors":"",
"sec2torsize":"",
"serial":"",
"vendor":""
....
}
- GET: /api/v1/hosts?q=xxx
Sử dụng để tìm kiếm host dựa vào các yêu cầu (quer=???) ví dụ ram>20GB, disk>100GB....
response:
{
"count":3,
"list_host":[<host_id1>,<host_id2>,<host_id3>]
....
}
- GET+POST: /api/v1/hosts/<host_id>/historys?last=xxx
Lấy ra lịch sử các hoạt động gần đây nhất đã thực hiện trên Host dựa vào host_id và last=xxx là số kết quả trả về
response:
{
"date":"",
"content":"",
"result":"",
....
}
- PUT: /api/v1/hosts/host_id/refresh
Gửi yêu cầu cập nhật lại thông tin RAM, DISK, CPU của Host
body:
{
"action":"refresh"
}
response:
{
"ok":"",
"msg":""
}
"Assign Role + Service":
- GET: /api/v1/roles
Lấy ra thông tin về các ROLE, cùng với đó là danh sách Host đã được assign vào từng Role (ban đầu danh sách này trống)
response:
{
....
"list_roles": ["CONTROLLER", "COMPUTE", "CEPH",...]
"data":{
"CONTROLLER": [<host_id1>,<host_id2>...],
"COMPUTE": [<host_id3>,<host_id4>...],
"CEPH": [<host_id5>]
...
}
}
- GET: /api/v1/roles/role_info?role_name=CEPH
Lấy ra thông tin chi tiết về từng ROLE, bao gồm các suggestion_services là những service mặc định sẽ cài lên các Host được assign và Role này.
response:
{
...
"list_hosts":[<host_id1>,<host_id2>,...],
"total_cpu":"",
"total_ram":"",
"suggestion_services":["nova","cinder","neutron"....]
}
- POST: /api/v1/roles/add_host_to_role
Gửi yêu cầu thêm một host vào Role với host_id, role_name được validate trước.
body:
{
"host_id":<host_id>,
"role_name":<role_name>
}
response:
{
"ok":"",
"msg":"",
"redirect":""/api/v1/roles/role_info?role_name=CEPH
}
- GET: /api/v1/hosts/<host_name>/deployments
Lấy ra danh sách tất cả deployments, mỗi deployments sẽ tưng ứng với một Node
response:
{
"list_deployments":[
{"index":1, "deployment_id":"", "status":""},
{"index":2, "deployment_id":"", "status":""},
{"index":3, "deployment_id":"", "status":""}
]
}
- GET /api/v1/deployments/<deployment_id>/service_setups
Lấy ra thông tin danh sách service_setups của từng deployment
response:
{
"deployment_id":"",
"list_service_setups":[
{"index":1, "service_setup_id":"", "status":""},
{"index":2, "service_setup_id":"", "status":""},
{"index":3, "service_setup_id":"", "status":""}
]
}
- GET /api/v1/service_setups?deployment_id= & service_name= &
Thông tin chi tiết của một service_setup
- POST /api/v1/service_setups/disable_setup
Sử dụng để bỏ không cài một hoặc một số service không cần thiết
- POST /api/v1/service_setups/enable_setup
Sử dụng để tái bổ xung một hoặc một số service đã bị disable
- GET /api/v1/deployments/<deployment_id>/playbooks
Lấy danh sách playbook sẽ được chạy trong deployment
- GET /api/v1/deployments/<deployment_id>/playbooks?service_setup_id=xxxx
Lấy ra danh sash playbook sẽ được cài trong service_setup_id
- GET /api/v1/service_setups/<service_setup_id>/tasks
Danh sách tasks trong service_setup_id
- GET /api/v1/service_setups/<service_setup_id>/tasks?task_id=xxx
Trạng thái, kết quả của task với task_id=xxx
- GET /api/v1/tasks/<task_id>
Trạng thái, kết quả của task với task_id=xxx
Danh sách change được thực hiện trong task này
"Insert Specific Config":
- Get: /api/v1/configs/specific_configs
Danh sách các config đặc biệt mà bắt buộc người dùng phải nhập vào bằng tay
- POST + PUT : /api/configs/specific_configs
Điền vào giá trị của specific_configs
- POST: /api/v1/configs/specific_configs/validate
Validate một giá trị
- GET: /api/v1/configs/specific_configs/recommend
Xin gợi ý về một giá trị
- POST: /api/v1/configs/specific_configs/submit
Submit danh sách specific_configs
"START, UNDO, PAUSE, NEXT"
- POST /api/v1/installation/
action:-START-UNDO-PAUSE-NEXT
Với mối action đầu vào thực hiện một thao tác trong quá trình cài đặt VIM
- GET /api/v1/installation
Liệt kê trạng thái của installation: Danh sách các Node+status cài trên từng Node
- GET /api/v1/installation/node_info
Liệt kê trạng thái của installation trên Node: Danh sách các Service_setup +status cài trên từng Service_setup
- GET /api/v1/installation/service_info?node_id=
Liệt kê trạng thái của installation trên Service_setup: Danh sách các task_setup +status cài trên từng task
- GET /api/v1/installation/task_info?service_id=
Trạng thái, thông tin của task
- GET /api/v1/installation/change_info?task_id=
Danh sách change ứng với task_id này
"SCALLING UP, SCALLING DOWN "
- GET: /api/v1/hosts?role=compute
Liệt kê danh sách các hosts thuộc vào role COMPUTE
- POST: /api/v1/hosts/scalling_up_host
Thực hiện thao tác scalling_up, body data là thông tin host compute mới
- DELETE: /api/v1/hosts/scalling_down_host
Thực hiện thao tác scalling_down, body data là id host compute sẽ bị gỡ khỏi hệ thống
"REPLACE CONTROLLER"
- GET: /api/v1/hosts?role=controller
Liệt kê danh sách các hosts thuộc vào role CONTROLLER
- POST: /api/v1/hosts/replace_controller
Thực hiện thao tác replace_controller, body data là thông tin host controller mới và host controller sẽ bị gỡ khỏi hệ thống
"TEMPLATE"
- GET: /api/v1/templates
Liệt kê danh sách tất cả templates
- GET: /api/v1/templates/filter?properties.name=&properties.type=
Tìm kiếm template theo name,type
"RECOMMEND"
- POST: /api/v1/recommendations/assign_role
- POST: /api/v1/recommendations/select_service
- POST: /api/v1/recommendations/gen_password
- POST: /api/v1/recommendations/select_IP
- POST: /api/v1/recommendations/select_folders
- POST: /api/v1/recommendations/configs
"FILE CONFIFG "
- GET: /api/v1/configs/<host_id>
Liệt kê danh sách file_configs + địa chỉ file_config trên host có host_id=host_id
- GET: /api/v1/configs/filter?name=&path=&node=&service=
Tìm kiếm file_config dựa vào tên, path, node,service
- POST: /api/v1/configs/download_configs
Xuất file_config từ DB ra local, body_data là file_config_id
- GET: /api/v1/configs/compare_config_db_vs_host?host_id=
So sánh từng dòng cấu hình trên tất cả file_config của Host có host_id=host_id tương ứng với database và nội dung đang dùng trên Host ==> Tìm ra những thay đổi chưa được commit trên Host đó
- GET: /api/v1/configs/compare_config_db_vs_host?file_config_id=
Với mỗi file_config_id tìm thay đổi trong db và host
- GET: /api/v1/configs/compare_config_db_vs_db?file_config_1_id= & file_config_2_id=
So sánh 2 file_config trong DB với nhau, có thể là 2 file_config của cùng một service nhưng đặt trên 2 host khác nhau
- GET: /api/v1/configs/compare_config_host_vs_host?file_config_1_id= & file_config_2_id=
So sánh 2 file_config trên 2 host với nhau.
- GET: /api/v1/configs/<file_config_id>
Lấy ra nội dung cảu file_config
- GET: /api/v1/configs/<file_config_id>/services
Lấy ra danh sách service đang sử dụng file_config
- GET: /api/v1/configs/<file_config_id>/content?type=database|server|last_update
Lấy ra nội dung file_config theo type để so sánh với nhau:
+ database: đang ở trong database
+ server: Nội dung thực tế trên server
+ last_update: Nội dung trước khi commit, update
- POST: /api/v1/configs/update?file_config_id=file_config_id
+ Update file_config từ DB lên server
- POST: /api/v1/configs/commit?file_config_id=file_config_id
+ Đẩy file_config từ server về DB
- POST: /api/v1/configs/rollback?file_config_id
+ Rollback file_config
"CHANGE PASSWORD"
- GET: /api/v1/passwords
Liệt kê danh sách User, Password
- GET: /api/v1/passwords/<password_id>
+ Lấy các thông tin liên quan đến password này như file_config sử dụng, service sử dụng...
- PUT: /api/v1/passwords/<passwords_id>
+ Cập nhật password mới.
default:
image: python_27
before_script:
- bundle install
build:
stage: build
tags:
- docker
script:
- echo "Building"
- mkdir build
- touch build/info.txt
artifacts:
paths:
- build/
test:
stage: test
tags:
- docker
script:
- echo "Testing"
- test -f "build/info.txt"
- python lamtv10.py
grep "^[^#;]" smb.conf
UyPTCZKHqauD2PqxJHHh
oj7a969sX3-xJfnKs18K
docker build -t conda_centos_ansible_flask_app:v12 -f Dockerfile .
docker run -d --name mysql --network=host --privileged -v /u01/docker/docker_log/mysql:/var/log/ -v /usr/share/docker/:/usr/share/docker/ -u mysql -e PXC_START='BOOTSTRAP' -e SQL_SST_USER="sstuser" -e SQL_SST_PASSWD="fPWOWrsMGLaBaP74iK57XoOyJy8aAEew" docker-registry:4000/mysqlp_v20:q
docker run -d --network=host conda_centos_ansible_flask_app:v12
ansible all -i /root/app/static/ansible/inventory/new_node -m setup --tree /root/app/static/ansible/facts
docker run --rm -t -i -v /home/srv/gitlab-runner/config:/etc/gitlab-runner -v /etc/hosts:/etc/hosts -v /etc/ssl/certs/ca-bundle.crt:/etc/ssl/certs/ca-certificates.crt gitlab/gitlab-runner register
CREATE USER 'lamtv10'@'localhost' IDENTIFIED BY 'lamtv10';
CREATE USER 'lamtv10'@'%' IDENTIFIED BY 'lamtv10';
GRANT ALL ON *.* TO 'lamtv10'@'localhost';
GRANT ALL ON *.* TO 'lamtv10'@'%';
flush privileges;
git clone ssh://git@172.16.29.193:2222/lamtv10/software_deployment.git | [
"tovanlam20132223@gmail.com"
] | tovanlam20132223@gmail.com |
d40611b67fe7924473c6b498aa710a8ffbb3d8f6 | 434d5256fa47c6bec0e5d79917f2d09b52490fa0 | /examples/mlsdc_mg.py | 852fde05462da146465a83f88cd175be0e8dbe54 | [] | no_license | Parallel-in-Time/PyPinT | 2d0a54d21a6b50863c6acef69eb9a86d3bcc7fcf | 90aed34cf43d633e44f56444f6c5d4fa39619663 | refs/heads/master | 2016-08-03T18:58:05.269042 | 2014-06-10T08:27:30 | 2014-06-10T08:32:45 | 19,447,961 | 0 | 2 | null | 2014-06-02T14:26:08 | 2014-05-05T07:39:20 | Python | UTF-8 | Python | false | false | 8,785 | py | # coding=utf-8
"""
.. moduleauthor:: Torbjörn Klatt <t.klatt@fz-juelich.de>
.. moduleauthor:: Dieter Moser <d.moser@fz-juelich.de>
"""
from collections import OrderedDict
import numpy as np
dt = 0.001
from pypint.utilities.logging import LOG, print_logging_message_tree, VERBOSITY_LVL1, SEPARATOR_LVL1, SEPARATOR_LVL2
LOG.info("%sSetting Up Multigrid Space Solver" % VERBOSITY_LVL1)
from pypint.plugins.multigrid.stencil import Stencil
laplace_stencil = Stencil(np.array([1.0, -2.0, 1.0]), None, 2)
LOG.info("%s Laplace Discretization Stencil: %s" % (VERBOSITY_LVL1, laplace_stencil.arr))
LOG.info(SEPARATOR_LVL2)
LOG.info("%sSetting Up 1D Heat Equation" % VERBOSITY_LVL1)
LOG.info("%s Setting Up Geometry" % VERBOSITY_LVL1)
geo = np.asarray([[0, 1]])
LOG.info("%s Setting Up Boundary Functions" % VERBOSITY_LVL1)
boundary_types = ['dirichlet'] * 2
def left_f(x):
_s = np.zeros(x.shape)
LOG.debug("Left Bnd Fnc: %s -> %s" % (x.reshape(-1), _s.reshape(-1)))
return _s
left_f.__str__ = lambda: "const(0)"
def right_f(x):
_s = np.zeros(x.shape)
LOG.debug("Right Bnd Fnc: %s -> %s" % (x.reshape(-1), _s.reshape(-1)))
return _s
right_f.__str__ = lambda: "const(0)"
bnd_functions = [[left_f, right_f]]
num_points_mg_levels = OrderedDict()
num_points_mg_levels['finest'] = 11
# num_points_mg_levels['mid'] = 5
# num_points_mg_levels['base'] = 2
print_logging_message_tree(OrderedDict({'Points on Space Grid': num_points_mg_levels}))
def initial_value_fnc(x):
return np.exp(-100.0 * ((x-0.5)**2))
dx = 1.0 / (num_points_mg_levels['finest'] + 1)
print("dx: %s" % dx)
x = np.linspace(dx, 1.0-dx, num=(num_points_mg_levels['finest']))
LOG.debug("x (%s):\n%s" % (x.shape, x))
iv = initial_value_fnc(x)
LOG.debug("Initial Values %s:\n%s" % (iv.shape, iv))
from examples.problems.heat_equation import HeatEquation
problem = HeatEquation(dim=(num_points_mg_levels['finest'], 1),
time_end=dt,
thermal_diffusivity=0.5,
initial_value=iv.reshape((num_points_mg_levels['finest'], 1)),
rhs_function_wrt_space=lambda dof, tensor: 0.0,
boundary_functions=bnd_functions,
boundaries=boundary_types,
geometry=geo,
implicit_solve_method='mg')
print_logging_message_tree(OrderedDict({'Problem': problem.print_lines_for_log()}))
# LOG.info(SEPARATOR_LVL2)
# LOG.info("%sSetting Up Multigrid Levels" % VERBOSITY_LVL1)
from pypint.plugins.multigrid.level import MultigridLevel1D
borders = np.array([2, 2])
fine_mg_level = MultigridLevel1D(num_points_mg_levels['finest'], mg_problem=problem, max_borders=borders, role='FL')
problem._mg_level = fine_mg_level
problem._mg_stencil = \
Stencil(
np.array(
[
problem.thermal_diffusivity,
-2.0 * problem.thermal_diffusivity,
problem.thermal_diffusivity
]
) / fine_mg_level.h**2
)
problem._mg_stencil.grid = fine_mg_level.mid.shape
# LOG.debug("Sparse matrix: %s -> %s" % (problem._mg_stencil.sp_matrix.shape, problem._mg_stencil.sp_matrix.todense()))
# mid_mg_level = MultigridLevel1D(num_points_mg_levels['mid'], mg_problem=problem, max_borders=borders, role='ML')
# base_mg_level = MultigridLevel1D(num_points_mg_levels['base'], mg_problem=problem, max_borders=borders, role='CL')
# LOG.info("%s Levels" % VERBOSITY_LVL1)
# LOG.info("%s Top Level" % VERBOSITY_LVL1)
# LOG.info("%s h: %s" % (VERBOSITY_LVL1, fine_mg_level.h))
# LOG.info("%s Middle Level" % VERBOSITY_LVL1)
# LOG.info("%s h: %s" % (VERBOSITY_LVL1, mid_mg_level.h))
# LOG.info("%s Base Level" % VERBOSITY_LVL1)
# LOG.info("%s h: %s" % (VERBOSITY_LVL1, base_mg_level.h))
# LOG.info(SEPARATOR_LVL2)
# LOG.info("%sSetting Up Multigrid Smoothers" % VERBOSITY_LVL1)
# from pypint.plugins.multigrid.multigrid_smoother import SplitSmoother, DirectSolverSmoother
# define the smoother from the split smoother class on each level,
# where the last level is solved directly
# omega = 1/np.sqrt(2)
# omega = 0.5
# l_plus = np.asarray([0, -2.0/omega, 0])
# l_minus = np.asarray([1.0, -2.0*(1.0 - 1.0/omega), 1.0])
# top_jacobi_smoother = SplitSmoother(l_plus / fine_mg_level.h**2,
# l_minus / fine_mg_level.h**2,
# fine_mg_level)
# mid_jacobi_smoother = SplitSmoother(l_plus / mid_mg_level.h**2,
# l_minus / mid_mg_level.h**2,
# mid_mg_level)
# low_jacobi_smoother = SplitSmoother(l_plus / base_mg_level.h**2,
# l_minus / base_mg_level.h**2,
# base_mg_level)
# low_direct_smoother = DirectSolverSmoother(laplace_stencil, base_mg_level)
# LOG.info(SEPARATOR_LVL2)
# LOG.info("%sSetting Up Multigrid Level Transitions" % VERBOSITY_LVL1)
# from operator import iadd
# from pypint.plugins.multigrid.restriction import RestrictionByStencilForLevelsClassical
# from pypint.plugins.multigrid.interpolation import InterpolationByStencilForLevelsClassical
# center = np.asarray([0])
# n_jacobi_pre = 1
# n_jacobi_post = 1
# we define the Restriction operator
# rst_stencil = Stencil(np.asarray([0.25, 0.5, 0.25]))
# rst_top_to_mid = RestrictionByStencilForLevelsClassical(rst_stencil, fine_mg_level, mid_mg_level)
# rst_mid_to_low = RestrictionByStencilForLevelsClassical(rst_stencil, mid_mg_level, base_mg_level)
# and the interpolation operator
# ipl_stencil_list_standard = [(Stencil(np.asarray([1]), center), (1,)),
# (Stencil(np.asarray([0.5, 0.5]), center), (0,))]
#
# ipl_mid_to_top = InterpolationByStencilForLevelsClassical(ipl_stencil_list_standard,
# mid_mg_level, fine_mg_level, pre_assign=iadd)
#
# ipl_low_to_mid = InterpolationByStencilForLevelsClassical(ipl_stencil_list_standard,
# base_mg_level, mid_mg_level, pre_assign=iadd)
# LOG.info(SEPARATOR_LVL2)
# LOG.info("%sSetting Initial Values for MG Levels" % VERBOSITY_LVL1)
# initialize top level
# fine_mg_level.arr[:] = 0.0
# top_level.arr[:] = 0.0
# fine_mg_level.res[:] = 0.0
# fine_mg_level.rhs[:] = 0.0
# fine_mg_level.pad()
# mid_mg_level.arr[:] = 0.0
# mid_mg_level.res[:] = 0.0
# mid_mg_level.rhs[:] = 0.0
# mid_mg_level.pad()
# base_mg_level.arr[:] = 0.0
# base_mg_level.res[:] = 0.0
# base_mg_level.rhs[:] = 0.0
# base_mg_level.pad()
# problem.fill_rhs(fine_mg_level)
LOG.info(SEPARATOR_LVL2)
LOG.info("%sSetting Up MLSDC Solver" % VERBOSITY_LVL1)
from pypint.multi_level_providers.multi_time_level_provider import MultiTimeLevelProvider
from pypint.multi_level_providers.level_transition_providers.time_transition_provider import TimeTransitionProvider
from pypint.integrators.sdc_integrator import SdcIntegrator
base_mlsdc_level = SdcIntegrator()
base_mlsdc_level.init(num_nodes=5)
fine_mlsdc_level = SdcIntegrator()
fine_mlsdc_level.init(num_nodes=7)
transitioner = TimeTransitionProvider(fine_nodes=fine_mlsdc_level.nodes, coarse_nodes=base_mlsdc_level.nodes)
ml_provider = MultiTimeLevelProvider()
ml_provider.add_coarse_level(fine_mlsdc_level)
ml_provider.add_coarse_level(base_mlsdc_level)
ml_provider.add_level_transition(transitioner, 0, 1)
from pypint.communicators import ForwardSendingMessaging
comm = ForwardSendingMessaging()
from pypint.solvers.ml_sdc import MlSdc
mlsdc = MlSdc(communicator=comm)
comm.link_solvers(previous=comm, next=comm)
comm.write_buffer(tag=(ml_provider.num_levels - 1), value=problem.initial_value, time_point=problem.time_start)
from pypint.utilities.threshold_check import ThresholdCheck
thresh = ThresholdCheck(max_threshold=3, min_threshold=1e-7,
conditions=('solution reduction', 'residual', 'iterations'))
mlsdc.init(problem=problem, threshold=thresh, ml_provider=ml_provider)
# LOG.info(SEPARATOR_LVL1)
# LOG.info("%sInitialize Direct Space Solvers for Time Levels" % VERBOSITY_LVL1)
# for time_level in range(0, ml_provider.num_levels):
# _integrator = ml_provider.integrator(time_level)
# for time_node in range(0, _integrator.num_nodes - 1):
# problem.initialize_direct_space_solver(time_level,
# (_integrator.nodes[time_node + 1] - _integrator.nodes[time_node]),
# fine_mg_level)
LOG.info(SEPARATOR_LVL1)
LOG.info("%sLaunching MLSDC with MG" % VERBOSITY_LVL1)
from pypint.solvers.cores import SemiImplicitMlSdcCore, ExplicitMlSdcCore
mlsdc.run(SemiImplicitMlSdcCore, dt=dt)
print("RHS Evaluations: %d" % problem.rhs_evaluations)
| [
"t.klatt@fz-juelich.de"
] | t.klatt@fz-juelich.de |
990899041c7f51cc4a3c9a3c649736772d803b5f | 0d949e3373deb48b715080fce4ea397d656cd701 | /garfield/deterrence/tests/test_admin.py | 353d770887788d87516d26d2b46256e75be499f0 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | RobSpectre/garfield | f291d6022024a6238839db4a2dee47be1e1501c5 | ab806b7ad9221bd1b17c92daadd0a53a4f261cbe | refs/heads/master | 2022-07-27T05:40:07.533290 | 2021-06-10T17:38:46 | 2021-06-10T17:38:46 | 106,447,706 | 3 | 1 | MIT | 2022-04-22T20:51:54 | 2017-10-10T17:14:21 | Python | UTF-8 | Python | false | false | 723 | py | from django.test import RequestFactory
from django.test import TestCase
from deterrence.admin import DeterrenceMessageInline
class DeterrenceInlineTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get("/stuff")
def test_deterrence_message_inline_get_extra(self):
test = DeterrenceMessageInline.get_extra(None, self.request)
self.assertEqual(test, 1)
def test_deterrence_message_inline_get_extra_obj_exists(self):
test = DeterrenceMessageInline.get_extra(None,
self.request,
obj=True)
self.assertEqual(test, 0)
| [
"rob@brooklynhacker.com"
] | rob@brooklynhacker.com |
2a33a9ba5550d1e34ea409bff8c0c42eb0988458 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_inbound_nat_rules_operations.py | 4cf4860cf8f810796c6773d6e6b8b8e59f35b58f | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 22,430 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations:
"""InboundNatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundNatRuleListResult"]:
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.InboundNatRule":
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> "_models.InboundNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
inbound_nat_rule_name: str,
inbound_nat_rule_parameters: "_models.InboundNatRule",
**kwargs: Any
) -> AsyncLROPoller["_models.InboundNatRule"]:
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2020_05_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
ad9deeab0d64712cb21fa7a68f0bae20d525d236 | 7ab85ba79a6553659f0b324ecebb4bb39f8a8a1c | /class_method2.py | 03efa5297b7caf919e9d4354c4ec872da162c672 | [] | no_license | subinmun1997/my_python | b75db77a035fa8f531d9872bf33a1818a002206a | 634acc948e7758f5d26084536c506e7da45cd53c | refs/heads/master | 2022-12-28T21:11:40.173378 | 2020-10-16T08:02:18 | 2020-10-16T08:02:18 | 292,875,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | class Simple:
count = 0
def __init__(self):
Simple.count += 1
@classmethod
def get_count(cls):
return cls.count # cls에 전달되는 것은 Simple 클래스
def main():
print(Simple.get_count())
s = Simple()
print(Simple.get_count())
main() | [
"qzxy812@gmail.com"
] | qzxy812@gmail.com |
a6dcf6ed3c58a620fd57e409629611ca0ccf68e2 | 45a61af9028a1805c08b6f7638c7aebe8140bd2d | /Hydrology/old/Correlaciones_regresiones_v2.15.0.py | 839246f267e100849c82e0b1f5bc154c068a6d5a | [] | no_license | gumilar19/Personal | 1c1fac036af3a4b9d4d425b7c8cb604271b94fd2 | c666b07c5184006aca8e6ad946cc98ef72dfe9fe | refs/heads/master | 2023-08-14T20:39:07.164849 | 2021-09-29T11:19:10 | 2021-09-29T11:19:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,996 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
Author: CCCM
"""
#%% Preambulo
def limpiar_kernel():
try:
from IPython import get_ipython
get_ipython().magic('clear')
get_ipython().magic('reset -f')
except:
pass
limpiar_kernel()
import pandas as pd
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
import gc
#funciones
def regresion(x_,y_):
X = sm.add_constant(x_)
resultados_fit = sm.OLS(y_,X,missing='drop').fit()
N = resultados_fit.params[0]
M = resultados_fit.params[1]
R2 = resultados_fit.rsquared
return [M,N,R2]
def mejoresCorrelaciones(df, col, Nestaciones):
ordenados = df.sort_values(by=col, ascending = False)
return ordenados.index[:Nestaciones]
#%%
def main():
#%%
ruta_GitHub = r'D:\GitHub'
# ruta_GitHub = r'C:\Users\ccalvo\Documents\GitHub'
# ruta_Q = ruta_GitHub+r'\Analisis-Oferta-Hidrica\DGA\datosDGA\Q\Maule\Q_Maule_1900-2020_v0.csv'
# ruta_Q = ruta_GitHub+r'\Analisis-Oferta-Hidrica\DGA\datosDGA\Q\Maipo\RIO MAIPO_Q_diario.csv'
ruta_Q = ruta_GitHub+r'\Analisis-Oferta-Hidrica\Hidrología\Caudales\Validacion\cr2_Maipo_Q.xlsx'
Q_daily = pd.read_excel(ruta_Q, index_col = 0)
# Q_daily = pd.read_csv(ruta_Q, index_col = 0)
Q_daily.index = pd.to_datetime(Q_daily.index)
# ver entrada gráfica
# Q_daily.plot()
#meses
meses = [4,5,6,7,8,9,10,11,12,1,2,3]
#fechas
# inicio = pd.to_datetime('2000-12-31',format='%Y-%m-%d')
inicio = pd.to_datetime('1949-12-31',format='%Y-%m-%d')
fin = pd.to_datetime('2002-01-01',format='%Y-%m-%d')
# fin = pd.to_datetime('2020-01-01',format='%Y-%m-%d')
Q_daily = pd.DataFrame(Q_daily[Q_daily.index <= fin ], index = pd.date_range(inicio, fin, freq='D', closed='right'))
#minimo de años con datos
minYr = 1
#%%Crear indice de fechas
#Convertir años a int y calcular frecuencia de datos para mapa de colores
data = Q_daily.notnull().astype('int')
data = data.groupby(Q_daily.index.year)
data_anual = data.aggregate(np.sum)
data_anual = data_anual/(365*0.8)
data_anual = data_anual.apply(lambda x: [y if y < 1 else 1 for y in x])
data_anual = data_anual.transpose()
data_anual = data_anual.sort_index()
estaciones_minimas = pd.DataFrame(data_anual.sum(axis=1), columns = ['registro'])
estaciones_minimas = estaciones_minimas[estaciones_minimas['registro']>= minYr]
# estaciones_minimas = ['05710001-K', '05701001-0', '05701002-9', '05702001-6', '05704002-5', '05705001-2', '05706001-8', '05707002-1', '05721001-K',
# '05722001-5', '05722002-3', '05716001-2', '05735001-6', '05737002-5','05741001-9', '05746001-6', '05748001-7']
# Q_daily_filtradas = Q_daily[estaciones_minimas]
Q_daily_filtradas = Q_daily[estaciones_minimas.index]
# Q_month_mean = Q_daily_filtradas.groupby(Q_daily.index.month).mean()
Q_month_mean = Q_daily_filtradas.groupby(Q_daily.index.month).apply(lambda x: x.mode().iloc[0])
Q_month_std = Q_daily_filtradas.groupby(Q_daily.index.month).std()
Q_month_mean = (Q_month_mean.fillna(method='ffill') + Q_month_mean.fillna(method='bfill'))/2
Q_month_std = (Q_month_std.fillna(method='ffill') + Q_month_std.fillna(method='bfill'))/2
#%% Relleno con OLR
coef_m_mensuales = pd.DataFrame( index = meses, columns = Q_daily_filtradas.columns)
coef_n_mensuales = pd.DataFrame( index = meses, columns = Q_daily_filtradas.columns)
coef_r2_mensuales = pd.DataFrame( index = meses, columns = Q_daily_filtradas.columns)
# Est con mejor correlación diaria
correl = Q_daily_filtradas.corr()
correl = correl.replace(1,-9999)
idx = correl.idxmax()
# idx_2 = correl[col].nlargest(3)
# r = correl.max()
Q_daily_mon = Q_daily_filtradas.groupby(Q_daily_filtradas.index.month)
for indice in idx.index:
print(indice)
for mes in meses:
y = Q_daily_mon[indice].apply(list).loc[mes] #mes 1
est_indep = idx.loc[indice]
x = Q_daily_mon[est_indep].apply(list).loc[mes] #mes 1
try:
M, N, R2 = regresion(x,y)
coef_m_mensuales.loc[mes][indice] = M
coef_n_mensuales.loc[mes][indice] = N
coef_r2_mensuales.loc[mes][indice] = R2
except:
print('No hay datos para el mes '+str(mes))
Q_daily_rellenas = Q_daily_filtradas.copy()
coeficientes = pd.DataFrame(index=Q_daily_rellenas.index, columns = ['m','n'])
nticks = 4
plt.close("all")
fig = plt.figure()
for ind,col in enumerate(Q_daily_rellenas.columns):
print(col)
missingData = Q_daily_filtradas[col].isna()
coeficientes['m'] = coef_m_mensuales.loc[Q_daily_filtradas.index.month][col].to_list()
coeficientes['n'] = coef_n_mensuales.loc[Q_daily_filtradas.index.month][col].to_list()
Q_x = Q_daily_filtradas[idx.loc[col]]
Q_daily_rellenas.loc[missingData,col] = Q_x.loc[missingData]*coeficientes['m'].loc[missingData]+ coeficientes['n'].loc[missingData]
Q_daily_rellenas.loc[:,col][Q_daily_rellenas.loc[:,col] < 0] = 0
fig.add_subplot(9,6,ind+1)
ax1 = Q_daily_rellenas[col].plot()
Q_daily_filtradas[col].plot(ax = ax1)
ticks = ax1.xaxis.get_ticklocs()[::nticks]
fig.canvas.draw()
ticklabels = [l.get_text() for l in ax1.xaxis.get_ticklabels()][::nticks]
ax1.xaxis.set_ticks(ticks)
ax1.xaxis.set_ticklabels(ticklabels)
ax1.figure.show()
plt.legend(['Rellenas','Originales'],bbox_to_anchor=(1.05, 1), loc='upper left')
#%% Multivariable
Q_daily_MLR = Q_daily_filtradas.copy()
n_multivariables = 20
stdOutliers = 3
for ind,col in enumerate(Q_daily_filtradas.columns):
print(col)
for mes in meses:
Q_daily_mes = Q_daily_filtradas.loc[Q_daily_filtradas.index.month == mes].copy()
y = Q_daily_mes[col].copy()
if y.count() < 1:
continue
correl = Q_daily_mes.corr()
est_indep = mejoresCorrelaciones(correl, col, n_multivariables)
x = Q_daily_mes.loc[Q_daily_mes.index.month == mes][est_indep.to_list()]
imp = IterativeImputer(max_iter=1, random_state=0, min_value = 0,max_value = y.mean()+stdOutliers*y.std(), sample_posterior = True)
Q_daily_MLR_mes = x[x[x.count().idxmax()].notna()]
# IterativeImputer()
imp.fit(Q_daily_MLR_mes.values.T)
A = imp.transform(Q_daily_MLR_mes.values.T.tolist()).T
Q_daily_MLR_mes = pd.DataFrame(A, columns = Q_daily_MLR_mes.columns, index = Q_daily_MLR_mes.index )
Q_daily_MLR_mes = Q_daily_MLR_mes.dropna()
# Y = pd.DataFrame(Q_daily_MLR_mes[col])
Q_daily_MLR.loc[Q_daily_MLR_mes.index,col] = Q_daily_MLR_mes[col]
# Q_daily_MLR.loc[Q_daily_MLR_mes.index,Q_daily_MLR_mes.columns] = Q_daily_MLR_mes[Q_daily_MLR_mes.columns]
# Q_daily_MLR.loc[Q_daily_mes.index,col] = Q_daily_MLR.loc[Q_daily_mes.index,col].fillna(Q_daily_MLR.loc[Q_daily_mes.index,col].median())
# Q_daily_MLR.loc[Q_daily_mes.index,col] = Q_daily_MLR.loc[Q_daily_mes.index,col].fillna(Q_daily_MLR.loc[Q_daily_mes.index,col].rolling(60).mean())
# Q_daily_MLR.loc[Q_daily_mes.index,col] = Q_daily_MLR.loc[Q_daily_mes.index,col].fillna(Q_daily_MLR.loc[Q_daily_mes.index,col].median())
Q_daily_MLR.loc[Q_daily_mes.index,col] = Q_daily_MLR.loc[Q_daily_mes.index,col].fillna(Q_daily_MLR.loc[Q_daily_mes.index,col].mode())
# learnt_month[mes] = Q_daily_MLR_mes.columns.to_list()
gc.collect()
del imp
del A
#Graficar
nticks = 2
plt.close("all")
fig = plt.figure()
for ind,col in enumerate(Q_daily_filtradas.columns):
fig.add_subplot(8,4,ind+1)
ax1 = Q_daily_MLR[col].plot(linewidth = 3)
Q_daily_filtradas[col].plot(ax = ax1, linewidth = 1)
ticks = ax1.xaxis.get_ticklocs()[::nticks]
fig.canvas.draw()
ticklabels = [l.get_text() for l in ax1.xaxis.get_ticklabels()][::nticks]
ax1.xaxis.set_ticks(ticks)
ax1.xaxis.set_ticklabels(ticklabels)
ax1.figure.show()
plt.ylabel('Q $m^3/s$')
plt.title('Estación '+col)
plt.legend(['Predictor','Original'],bbox_to_anchor=(1.05, 1), loc='upper left')
Q_daily_MLR.to_csv('Q_relleno_MLR_Maipo_1950-2001_outlier_correction_median_mode_general.csv')
| [
"52244870+ccalvocm@users.noreply.github.com"
] | 52244870+ccalvocm@users.noreply.github.com |
67ae26888983b46d5e6b9c5b7eaff0f630ef0073 | 1ca4f7d65ecc85f3607f4e56216875b094219e0d | /144.py | c8e7a5409b60bc2ce3e2667cb4a399e17940ef33 | [] | no_license | ZhangRui111/MyLeetCode | 4c1ac6b309b6c497956a46b1054201c06813c563 | 168fb5a720847721aad426a48a09999b59285794 | refs/heads/master | 2023-06-08T03:09:26.594980 | 2021-07-05T00:30:13 | 2021-07-05T00:30:13 | 381,576,497 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | """
tag: 栈;树
144. 二叉树的前序遍历
https://leetcode-cn.com/problems/binary-tree-preorder-traversal/
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution1:
""" Recursion """
def preorderTraversal(self, root: TreeNode) -> List[int]:
def preorder(root: TreeNode): # 嵌套函数
if root is None:
return
res.append(root.val)
preorder(root.left)
preorder(root.right)
res = list()
preorder(root)
return res
class Solution2:
""" Iteration """
def preorderTraversal(self, root: TreeNode) -> List[int]:
res = list()
if root is None:
return res
stack = []
node = root
while stack or node:
while node:
res.append(node.val)
stack.append(node)
node = node.left
node = stack.pop()
node = node.right
return res
| [
"zhangruisg111@163.com"
] | zhangruisg111@163.com |
6515831fad7e7e60012980b3868f8fcee9c1ee01 | 3e50ed55208122b2f8b34e7f26f33c9ef70efce5 | /python/pygame_home/full_screen.py | 15f4ea3d3d06f15105b62e1d69837b5d38a8c3fe | [] | no_license | brady-wang/mac_home | b8343da428a4e6696b89d0e6a53ff0dfc87ffd21 | c56a739c31d3c0f62d26d8512fe1a90c036a1f96 | refs/heads/master | 2023-01-14T11:42:02.544322 | 2019-10-02T11:47:27 | 2019-10-02T11:47:27 | 193,177,718 | 0 | 0 | null | 2023-01-04T13:55:31 | 2019-06-22T01:27:10 | PHP | UTF-8 | Python | false | false | 754 | py | # *_*coding:utf-8 *_*
import pygame
from pygame.locals import *
from sys import exit
background_image = 'images/bk.jpg'
pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)
background = pygame.image.load(background_image).convert()
Fullscreen = False
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
if event.type == KEYDOWN:
if event.key == K_f:
Fullscreen = not Fullscreen
if Fullscreen:
screen = pygame.display.set_mode((640, 480), FULLSCREEN, 32)
else:
screen = pygame.display.set_mode((640, 480), 0, 32)
screen.blit(background, (0, 0))
pygame.display.update()
| [
"brady.wang@qq.com"
] | brady.wang@qq.com |
b58ad3496928f3809ec4c67f94811bbc30c644b9 | 3f53e38076713ab49fd03a54c7c9d3e21de5eb14 | /Pyrado/scripts/training/bob-d_dql.py | 421de7b102c0d48ccc9e952111e4b2879f71b26f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | arlene-kuehn/SimuRLacra | 4510473789d1c8927c8d5969a9606238523d5dd7 | 15901f70f0538bce19acdda2a0018984f67cc0fe | refs/heads/master | 2023-01-28T13:10:05.607575 | 2020-12-04T14:47:01 | 2020-12-04T14:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Train an agent to solve the discrete Ball-on-Beam environment using Deep Q-Leaning.
.. note::
The hyper-parameters are not tuned at all!
"""
import torch as to
import pyrado
from pyrado.algorithms.step_based.dql import DQL
from pyrado.environments.pysim.ball_on_beam import BallOnBeamDiscSim
from pyrado.logger.experiment import setup_experiment, save_list_of_dicts_to_yaml
from pyrado.policies.feed_forward.fnn import DiscreteActQValPolicy, FNN
from pyrado.utils.argparser import get_argparser
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Experiment
ex_dir = setup_experiment(BallOnBeamDiscSim.name, f'{DQL.name}_{DiscreteActQValPolicy.name}')
# Set seed if desired
pyrado.set_seed(args.seed, verbose=True)
# Environment
env_hparams = dict(dt=1/100., max_steps=500)
env = BallOnBeamDiscSim(**env_hparams)
# Policy
policy_hparam = dict(
hidden_sizes=[32, 32],
hidden_nonlin=to.tanh
)
net = FNN(
input_size=DiscreteActQValPolicy.get_qfcn_input_size(env.spec),
output_size=DiscreteActQValPolicy.get_qfcn_output_size(),
**policy_hparam
)
policy = DiscreteActQValPolicy(spec=env.spec, net=net)
# Algorithm
algo_hparam = dict(
max_iter=5000,
memory_size=10*env.max_steps,
eps_init=0.1286,
eps_schedule_gamma=0.9955,
gamma=0.998,
target_update_intvl=5,
num_batch_updates=20,
max_grad_norm=0.5,
min_steps=10,
batch_size=256,
num_workers=4,
lr=7e-4,
)
algo = DQL(ex_dir, env, policy, **algo_hparam)
# Save the hyper-parameters
save_list_of_dicts_to_yaml([
dict(env=env_hparams, seed=args.seed),
dict(policy=policy_hparam),
dict(algo=algo_hparam, algo_name=algo.name)],
ex_dir
)
# Jeeeha
algo.train(snapshot_mode='best', seed=args.seed)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
f1ff9eb781befbc411938d6dca7c4e91b57ac891 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.65_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=94/params.py | 50a976b4643a17024e6aad70bfb0d7da1bcc731c | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.056548',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 94,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
c3ec0afea7ee4fa6e33481c3719572532fe791c2 | c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5 | /keras/keras46_MC_1_fashion.py | 1a2eaf22ee6fd437d4d6962fe544b82cd65f86a1 | [] | no_license | 89Mansions/AI_STUDY | d9f8bdf206f14ba41845a082e731ea844d3d9007 | d87c93355c949c462f96e85e8d0e186b0ce49c76 | refs/heads/master | 2023-07-21T19:11:23.539693 | 2021-08-30T08:18:59 | 2021-08-30T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,978 | py | # CNN
# fashion_mnist
# from tensorflow.keras.callbacks import ModelCheckpoint
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import fashion_mnist
#1. DATA
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28)--> 흑백 1 생략 가능 (60000,)
print(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,)
# print(x_train[0])
# print("y_train[0] : " , y_train[0]) # 9
# print(x_train[0].shape) # (28, 28)
# plt.imshow(x_train[0], 'gray') # 0 : black, ~255 : white (가로 세로 색깔)
# # plt.imshow(x_train[0]) # 색깔 지정 안해도 나오긴 함
# plt.show()
# x > preprocessing
# print(np.min(x_train),np.max(x_train)) # 0 ~ 255
x_train = x_train.reshape(x_train.shape[0],x_train.shape[1],x_train.shape[2],1)/255.
x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)/255.
print(x_train.shape) # (60000, 28, 28, 1)
print(x_test.shape) # (10000, 28, 28, 1)
print(np.min(x_train),np.max(x_train)) # 0.0 ~ 1.0
# y > preprocessing
# print(y_train[:20]) # 0 ~ 9
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) # (60000, 10)
print(y_test.shape) # (10000, 10)
#2. Modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout
model = Sequential()
model.add(Conv2D(filters=112, kernel_size=(2,2),padding='same',strides=1,input_shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3])))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=84,kernel_size=(2,2)))
model.add(MaxPool2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=28,kernel_size=(2,2)))
model.add(Conv2D(filters=28,kernel_size=(2,2)))
model.add(Flatten())
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(10,activation='softmax'))
# model.summary()
#3. Compile, Train
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
# 체크포인트의 가중치를 저장할 파일경로 지정
modelpath='../data/modelcheckpoint/k46_1_fashion_{epoch:02d}-{val_loss:.4f}.hdf5'
# 02d : 정수 두 자리만 적겠다. / .4f : 소수점 아래 4째자리까지 적겠다.
# 저장 예시) k45_mnist_37-0.0100.hdf5
# 저장된 파일 중에 가장 마지막에 생성된게 가장 좋은 것이 됨
es = EarlyStopping(monitor='val_loss', patience=5, mode='max')
cp = ModelCheckpoint(filepath=modelpath,monitor='val_loss', save_best_only=True, mode='auto')
# filepath : 최저점이 찍힐 때마다 가중치가 세이브된 파일이 생성된다.
# 궁극의 목적 : 최적의 weight를 구하기 위해서
# predict할 때 혹은 evaluate 할 때 이 weight를 넣기만 하면된다.
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
hist = model.fit(x_train, y_train, epochs=15, batch_size=32, validation_split=0.2, callbacks=[es, cp])
#4. Evaluate, predict
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss : ", loss)
print("acc : ", acc)
print("y_test : ", np.argmax(y_test[-5:-1],axis=1))
y_pred = model.predict(x_test[-5:-1])
print("y_pred : ", np.argmax(y_pred,axis=1))
# 시각화
# import matplotlib.pyplot as plt
# plt.figure(figsize=(10,6)) # 판 사이즈 (가로 10, 세로 6)
# plt.subplot(2, 1, 1) # plot : 도화지 하나에 그림을 그리겠다.
# # 2행 1열 중 첫 번째
# # 만약 (3, 1, 1) 이라면 세 개의 plot이 있어야 한다. (3, 1, 1) (3, 1, 2) (3, 1, 3)
# plt.plot(hist.history['loss'], marker='.', c='red', label='loss')
# plt.plot(hist.history['val_loss'], marker='.', c='blue', label='val_loss')
# plt.grid()
# # plt.title('손실비용') # 과제 : 한글 깨짐 오류 해결할 것
# plt.title('Cost Loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# plt.subplot(2, 1, 2) # 2행 1열 중 두 번째
# plt.plot(hist.history['accuracy'], marker='.', c='red', label='accuracy')
# plt.plot(hist.history['val_accuracy'], marker='.', c='blue', label='val_accuracy')
# plt.grid() # 모눈종이 격자위에 그리겠다.
# # plt.title('정확도') # 과제 : 한글 깨짐 오류 해결할 것
# plt.title('Accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# plt.show()
# CNN
# loss : 0.3053586483001709
# acc : 0.8960000276565552
# y_test : [9 1 8 1]
# y_pred : [9 1 8 1]
# ModelCheckPoint
# loss : 0.32927361130714417
# acc : 0.8809999823570251
# y_test : [9 1 8 1]
# y_pred : [9 1 8 1] | [
"hwangkei0212@gmail.com"
] | hwangkei0212@gmail.com |
66cba8cea9deee015eb75c5dcb0821fc6757469a | 1ef536d93c6616f9793e57a9ebc6b44248d50202 | /move_product_out_to_in/customer_code/models/res_partner.py | 7b6b7f6d481a13d3ea17555ec3913e2e3a386849 | [] | no_license | mohamed4185/Express | 157f21f8eba2b76042f4dbe09e4071e4411342ac | 604aa39a68bfb41165549d605d40a27b9251d742 | refs/heads/master | 2022-04-12T17:04:05.407820 | 2020-03-09T14:02:17 | 2020-03-09T14:02:17 | 246,014,712 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # -*- coding: utf-8 -*-
from odoo import api, fields ,models
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit="res.partner"
customer_code=fields.Char('Code',size=10)
| [
"mohamed.abdelrahman@businessborderlines.com"
] | mohamed.abdelrahman@businessborderlines.com |
4a42cd4a8c1653a170f6d0bcdd745e2fc8287620 | 7137161629a1003583744cc3bd0e5d3498e0a924 | /airflow/sensors/external_task.py | c4510015138e01d416c907cfacebc4d48c0d7c31 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jbampton/airflow | 3fca85975854eb916f16143b659a9119af143963 | dcfa14d60dade3fdefa001d10013466fe4d77f0d | refs/heads/master | 2023-05-25T22:31:49.104069 | 2021-09-18T19:18:32 | 2021-09-18T19:18:32 | 247,645,744 | 3 | 0 | Apache-2.0 | 2020-03-16T08:12:58 | 2020-03-16T08:12:57 | null | UTF-8 | Python | false | false | 14,226 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
from typing import Any, Callable, FrozenSet, Iterable, Optional, Union
from sqlalchemy import func
from airflow.exceptions import AirflowException
from airflow.models import BaseOperatorLink, DagBag, DagModel, DagRun, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.helpers import build_airflow_url_with_query
from airflow.utils.session import provide_session
from airflow.utils.state import State
class ExternalTaskSensorLink(BaseOperatorLink):
"""
Operator link for ExternalTaskSensor. It allows users to access
DAG waited with ExternalTaskSensor.
"""
name = 'External DAG'
def get_link(self, operator, dttm):
query = {"dag_id": operator.external_dag_id, "execution_date": dttm.isoformat()}
return build_airflow_url_with_query(query)
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a different DAG or a task in a different DAG to complete for a
specific execution_date
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: str
:param external_task_id: The task_id that contains the task you want to
wait for. If ``None`` (default value) the sensor waits for the DAG
:type external_task_id: str or None
:param external_task_ids: The list of task_ids that you want to wait for.
If ``None`` (default value) the sensor waits for the DAG. Either
external_task_id or external_task_ids can be passed to
ExternalTaskSensor, but not both.
:type external_task_ids: Iterable of task_ids or None, default is None
:param allowed_states: Iterable of allowed states, default is ``['success']``
:type allowed_states: Iterable
:param failed_states: Iterable of failed or dis-allowed states, default is ``None``
:type failed_states: Iterable
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task or DAG.
For yesterday, use [positive!] datetime.timedelta(days=1). Either
execution_delta or execution_date_fn can be passed to
ExternalTaskSensor, but not both.
:type execution_delta: Optional[datetime.timedelta]
:param execution_date_fn: function that receives the current execution date as the first
positional argument and optionally any number of keyword arguments available in the
context dictionary, and returns the desired execution dates to query.
Either execution_delta or execution_date_fn can be passed to ExternalTaskSensor,
but not both.
:type execution_date_fn: Optional[Callable]
:param check_existence: Set to `True` to check if the external task exists (when
external_task_id is not None) or check if the DAG to wait for exists (when
external_task_id is None), and immediately cease waiting if the external task
or DAG does not exist (default value: False).
:type check_existence: bool
"""
template_fields = ['external_dag_id', 'external_task_id']
ui_color = '#19647e'
@property
def operator_extra_links(self):
"""Return operator extra links"""
return [ExternalTaskSensorLink()]
def __init__(
self,
*,
external_dag_id: str,
external_task_id: Optional[str] = None,
external_task_ids: Optional[Iterable[str]] = None,
allowed_states: Optional[Iterable[str]] = None,
failed_states: Optional[Iterable[str]] = None,
execution_delta: Optional[datetime.timedelta] = None,
execution_date_fn: Optional[Callable] = None,
check_existence: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.allowed_states = list(allowed_states) if allowed_states else [State.SUCCESS]
self.failed_states = list(failed_states) if failed_states else []
total_states = self.allowed_states + self.failed_states
total_states = set(total_states)
if set(self.failed_states).intersection(set(self.allowed_states)):
raise AirflowException(
"Duplicate values provided as allowed "
"`{}` and failed states `{}`".format(self.allowed_states, self.failed_states)
)
if external_task_id is not None and external_task_ids is not None:
raise ValueError(
'Only one of `external_task_id` or `external_task_ids` may '
'be provided to ExternalTaskSensor; not both.'
)
if external_task_id is not None:
external_task_ids = [external_task_id]
if external_task_ids:
if not total_states <= set(State.task_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` or `external_task_ids` is not `None`: {State.task_states}'
)
if len(external_task_ids) > len(set(external_task_ids)):
raise ValueError('Duplicate task_ids passed in external_task_ids parameter')
elif not total_states <= set(State.dag_states):
raise ValueError(
f'Valid values for `allowed_states` and `failed_states` '
f'when `external_task_id` is `None`: {State.dag_states}'
)
if execution_delta is not None and execution_date_fn is not None:
raise ValueError(
'Only one of `execution_delta` or `execution_date_fn` may '
'be provided to ExternalTaskSensor; not both.'
)
self.execution_delta = execution_delta
self.execution_date_fn = execution_date_fn
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
self.external_task_ids = external_task_ids
self.check_existence = check_existence
self._has_checked_existence = False
@provide_session
def poke(self, context, session=None):
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
elif self.execution_date_fn:
dttm = self._handle_execution_date_fn(context=context)
else:
dttm = context['execution_date']
dttm_filter = dttm if isinstance(dttm, list) else [dttm]
serialized_dttm_filter = ','.join(dt.isoformat() for dt in dttm_filter)
self.log.info(
'Poking for tasks %s in dag %s on %s ... ',
self.external_task_ids,
self.external_dag_id,
serialized_dttm_filter,
)
# In poke mode this will check dag existence only once
if self.check_existence and not self._has_checked_existence:
self._check_for_existence(session=session)
count_allowed = self.get_count(dttm_filter, session, self.allowed_states)
count_failed = -1
if self.failed_states:
count_failed = self.get_count(dttm_filter, session, self.failed_states)
if count_failed == len(dttm_filter):
if self.external_task_ids:
raise AirflowException(
f'Some of the external tasks {self.external_task_ids} '
f'in DAG {self.external_dag_id} failed.'
)
else:
raise AirflowException(f'The external DAG {self.external_dag_id} failed.')
return count_allowed == len(dttm_filter)
def _check_for_existence(self, session) -> None:
dag_to_wait = session.query(DagModel).filter(DagModel.dag_id == self.external_dag_id).first()
if not dag_to_wait:
raise AirflowException(f'The external DAG {self.external_dag_id} does not exist.')
if not os.path.exists(dag_to_wait.fileloc):
raise AirflowException(f'The external DAG {self.external_dag_id} was deleted.')
if self.external_task_ids:
refreshed_dag_info = DagBag(dag_to_wait.fileloc).get_dag(self.external_dag_id)
for external_task_id in self.external_task_ids:
if not refreshed_dag_info.has_task(external_task_id):
raise AirflowException(
f'The external task {external_task_id} in '
f'DAG {self.external_dag_id} does not exist.'
)
self._has_checked_existence = True
def get_count(self, dttm_filter, session, states) -> int:
"""
Get the count of records against dttm filter and states
:param dttm_filter: date time filter for execution date
:type dttm_filter: list
:param session: airflow session object
:type session: SASession
:param states: task or dag states
:type states: list
:return: count of record against the filters
"""
TI = TaskInstance
DR = DagRun
if self.external_task_ids:
count = (
session.query(func.count()) # .count() is inefficient
.filter(
TI.dag_id == self.external_dag_id,
TI.task_id.in_(self.external_task_ids),
TI.state.in_(states),
TI.execution_date.in_(dttm_filter),
)
.scalar()
)
count = count / len(self.external_task_ids)
else:
count = (
session.query(func.count())
.filter(
DR.dag_id == self.external_dag_id,
DR.state.in_(states),
DR.execution_date.in_(dttm_filter),
)
.scalar()
)
return count
def _handle_execution_date_fn(self, context) -> Any:
"""
This function is to handle backwards compatibility with how this operator was
previously where it only passes the execution date, but also allow for the newer
implementation to pass all context variables as keyword arguments, to allow
for more sophisticated returns of dates to return.
"""
from airflow.utils.operator_helpers import make_kwargs_callable
# Remove "execution_date" because it is already a mandatory positional argument
execution_date = context["execution_date"]
kwargs = {k: v for k, v in context.items() if k != "execution_date"}
# Add "context" in the kwargs for backward compatibility (because context used to be
# an acceptable argument of execution_date_fn)
kwargs["context"] = context
kwargs_callable = make_kwargs_callable(self.execution_date_fn)
return kwargs_callable(execution_date, **kwargs)
class ExternalTaskMarker(DummyOperator):
"""
Use this operator to indicate that a task on a different DAG depends on this task.
When this task is cleared with "Recursive" selected, Airflow will clear the task on
the other DAG and its downstream tasks recursively. Transitive dependencies are followed
until the recursion_depth is reached.
:param external_dag_id: The dag_id that contains the dependent task that needs to be cleared.
:type external_dag_id: str
:param external_task_id: The task_id of the dependent task that needs to be cleared.
:type external_task_id: str
:param execution_date: The execution_date of the dependent task that needs to be cleared.
:type execution_date: str or datetime.datetime
:param recursion_depth: The maximum level of transitive dependencies allowed. Default is 10.
This is mostly used for preventing cyclic dependencies. It is fine to increase
this number if necessary. However, too many levels of transitive dependencies will make
it slower to clear tasks in the web UI.
"""
template_fields = ['external_dag_id', 'external_task_id', 'execution_date']
ui_color = '#19647e'
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: Optional[FrozenSet[str]] = None
def __init__(
self,
*,
external_dag_id: str,
external_task_id: str,
execution_date: Optional[Union[str, datetime.datetime]] = "{{ execution_date.isoformat() }}",
recursion_depth: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
if isinstance(execution_date, datetime.datetime):
self.execution_date = execution_date.isoformat()
elif isinstance(execution_date, str):
self.execution_date = execution_date
else:
raise TypeError(
f'Expected str or datetime.datetime type for execution_date. Got {type(execution_date)}'
)
if recursion_depth <= 0:
raise ValueError("recursion_depth should be a positive integer")
self.recursion_depth = recursion_depth
@classmethod
def get_serialized_fields(cls):
"""Serialized ExternalTaskMarker contain exactly these fields + templated_fields ."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(super().get_serialized_fields() | {"recursion_depth"})
return cls.__serialized_fields
| [
"noreply@github.com"
] | jbampton.noreply@github.com |
7178d7fb0f535f9f31617612709df1b85386f6ef | 8c92787a518bea3d528641311939137f7f37b56c | /grab/spider/error.py | 056eaeef85e057b70d0019b7e1da520f8fcb7d46 | [
"MIT"
] | permissive | brabadu/grab | 8d973d5052bc60d06d67e1ea82814a939dea6877 | 92b1d68ceeece3087e053064520261a7aef3bd02 | refs/heads/master | 2021-01-17T22:16:16.923189 | 2013-10-02T20:28:44 | 2013-10-02T20:28:44 | 13,282,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | __all__ = ('SpiderError', 'SpiderMisuseError', 'FatalError',
'StopTaskProcessing', 'SpiderInternalError',
'NoTaskHandler', 'NoDataHandler')
class SpiderError(Exception):
"Base class for Spider exceptions"
class SpiderMisuseError(SpiderError):
"Improper usage of Spider framework"
class FatalError(SpiderError):
"Fatal error which should stop parsing process"
class StopTaskProcessing(SpiderError):
"""
Used in middlewares to stop task processing
"""
class SpiderInternalError(SpiderError):
"""
Used to indicate error in some internal spider services
like spider class discovering, CLI error
"""
class NoTaskHandler(SpiderError):
"""
Used then it is not possible to find which
handler should be used to process network response.
"""
class NoDataHandler(SpiderError):
"""
Used then it is not possible to find which
handler should be used to process Data object.
"""
| [
"lorien@lorien.name"
] | lorien@lorien.name |
9668b5f349cdc67a5166ee2ddb3178f990d70225 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/Keyboard-Layout-Editor/__init__.py | 3eaf1adc8f98d6f2aa0df9bec87cf4e5246ec300 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | # addon details
bl_info = {
"name": "Import: KLE Raw JSON format (.json)",
"author": "/u/kdem007 /u/jacopods",
"version": (2, 4),
"blender": (2, 79, 0),
"location": "File > Import-Export > Keyboard Layout Editor Raw (.json) ",
"description": "Import Keyboard Layouts",
"warning": "",
"category": "Learnbgame",
}
import bpy
# main addon class
class JSONImporter(bpy.types.Operator):
"""Load Keyboard Layout data"""
bl_idname = "import_mesh.json"
bl_label = "Import KLE Raw JSON"
bl_options = {'UNDO'}
filepath = bpy.props.StringProperty(
subtype='FILE_PATH',
)
filter_glob = bpy.props.StringProperty(
default="*.json", options={'HIDDEN'})
def execute(self, context):
from . import import_keyboard
import_keyboard.read(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
# add to import menu
def menu_import(self, context):
self.layout.operator(JSONImporter.bl_idname, text="KLE Raw Data (.json)")
# register addon
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_import)
# unregister addon
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_import)
if __name__ == "__main__":
register()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
353adba7584ba5ce57a5bfcec27a0bac66b5c7da | 496da9f56598029d9f34b4b5eb7689750f476707 | /analysis/pmf.py | 7adfe8a19822fe31486c268a6bb27c4028aa169f | [] | no_license | wangyp36/llcsim | e22d56b6cdb5ca0f643e3134b3b24ac549bbfb0e | 8ffae44e5ff2f7fe0e6f51f32d0899acd98ec61d | refs/heads/master | 2021-10-11T08:37:55.231820 | 2019-01-23T22:56:28 | 2019-01-23T22:56:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,157 | py | #!/usr/bin/env python
import mdtraj as md
import numpy as np
import pymbar
import argparse
from llcsim.setup.residue_topology import Residue
from llcsim.analysis import Atom_props
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def initialize():
parser = argparse.ArgumentParser(description='Run Cylindricity script')
parser.add_argument('-f', '--basename', default='umbrella', type=str, help='Name of umbrella simulations where'
'i is the state number indexed starting from 0. Assume that all files follow suit. i.e. in the'
' default case, there exists umbrella_i_pullx.xvg and umbrella_i.gro')
parser.add_argument('-i', '--initial', default='em', type=str, help='Basename corresponding to initial '
'configurations used to run umbrella simulations. em_i.gro')
parser.add_argument('-N', '--nstates', default=12, type=int, help='Number of umbrella simulations')
parser.add_argument('-r', '--residue', default='ETH', type=str, help='Name of residue whose center of mass was '
'constrained')
parser.add_argument('-c', '--centers', default=None, help='File containing the locations where the center of mass'
'was orginally restrained in a column. If None, this script will generate the list for you '
'using the initial .gro files')
parser.add_argument('-k', '--spring_constant', default=1000, type=float, help='spring constant for harmonic'
'restraint (kJ mol^-1 nm^-2)')
parser.add_argument('-T', '--temperature', default=300, type=int, help='Temperature of simulation (K)')
parser.add_argument('-b', '--bins', default=50, type=int, help='Number of bins for calculating PMF')
args = parser.parse_args()
return args
# Not needed functionality. Delete once everything is working.
# def center_of_mass(positions, res):
# """
# :param positions: position of each atom
# :param res: residue object generated from residue_topology.Residue()
# :return: center of mass of residue in coordinate file
# """
#
# if positions.shape[0] != res.natoms:
# print('Number of positions does not equal number of atoms in residue. Check your work.')
# exit()
#
# com = np.zeros([3])
# for i in range(res.natoms):
# com += positions[i, :] * res.masses[i] # weight each position by the atom's atomic weight
#
# com /= res.mw
#
# return com
# if not centers:
# print('No file with was given with initial COM positions, so I will generate one for you ...',
# flush=True, end='')
# t = md.load('%s_0.gro' % self.basename) # assumes basename_0.gro exists
# self.res = Residue(residue)
# self.res_ndx = [a.index for a in t.topology.atoms if a.residue.name == residue]
# self.nres = len(self.res_ndx) // self.res.natoms
# self.calculate_initial_com(initial)
# print('Done. You are welcome. Next time run this script with -c centers.dat unless you want me to do this '
# 'calculation again. Even if this was quick, I prefer that I only have to do this once because I am a '
# 'lazy computer')
# else:
# with open(centers, 'r') as f:
# c = []
# for line in f:
# c.append(line)
#
# self.nres = len(c[0].split())
# self.centers = np.zeros([self.nres, self.K])
# for i in range(self.K):
# self.centers[:, i] = c[i].split()
# def calculate_initial_com(self, initial):
#
# self.centers = np.zeros([self.nres, self.K]) # calculate COM for each residue separately
# for i in range(self.K):
# pos = md.load('%s_%d.gro' % (initial, i)).xyz[0, self.res_ndx, :]
# for j in range(self.nres):
# self.centers[j, i] = center_of_mass(pos[j*self.res.natoms:(j+1)*self.res.natoms, :], self.res)[2]
#
# with open('centers.dat', 'w') as f:
# for i in range(self.K):
# c = [x for x in self.centers[:, i]]
# f.write('{:1.3f} {:1.3f} {:1.3f} {:1.3f}\n'.format(c[0], c[1], c[2], c[3]))
# ndx = []
# with open('index.ndx', 'r') as f:
# for line in f:
# ndx.append(line)
#
# membrane = 0
# while ndx[membrane].count('[ membrane ]') == 0:
# membrane += 1
# membrane += 1
#
# indices = []
# while ndx[membrane] != '\n':
# data = ndx[membrane].split()
# for i in data:
# indices.append(int(i))
# membrane += 1
#
# refcom = np.zeros([12, 1001])
# for i in range(12):
#
# t = md.load('umbrella_%d.trr' % i, top='umbrella_%d.gro' % i)
# pos = t.xyz[:, indices, 2] # z positions of all atoms within reference com index group
# w = np.array([Atom_props.mass[a.name] for a in t.topology.atoms if a.index in indices]) # weights for com measure
# refcom[i, :] = np.sum((pos * w), axis=1) / sum(w)
#
# with open('refcom.dat', 'w') as f:
# for i in range(1001):
# data = refcom[:, i]
# for d in data:
# f.write('%1.3f ' % d)
# f.write('\n')
class Umbrellas(object):
def __init__(self, K, basename, residue, k, T, dim='z', centers=None, initial=None):
"""
:param K: number of umbrella simulations
:param basename: Name of umbrella simulations where i is the state number indexed starting from 0. Assumes that
all files follow suit. i.e. there exists umbrella_i_pullx.xvg and umbrella_i.gro
:param residue: Name of residue whose center of mass was constrained
:param k : spring constant for harmonic restraint
"""
kB = 1.381e-23 * 6.022e23 / 1000.0 # Boltzmann constant in kJ/mol/K
self.beta = 1 / (kB * T)
self.spring_constant = k # (kJ mol^-1 nm^-2)
self.K = K
self.centers = np.zeros([self.K])
self.basename = basename
self.uncorrelated_samples = []
self.com_bin = []
self.histo = []
self.bin_centers = [] # for plotting
self.results = None
self.dimension = []
for d in 'xyz':
if d in dim:
self.dimension.append(d)
# read pullx files and store com location with each frame
# do the first one outside of the loop so we can intialize com_dist array to be the right size
pullx = []
with open('%s_1_pullx.xvg' % basename, 'r') as f:
for line in f:
if line[0] != '#' and line[0] != '@':
pullx.append(line)
# first column is time. Two columns per pull group (absolute distance, z-component distance)
self.nres = (len(pullx[0].split()) - 1) // 2
self.com_dist = np.zeros([self.K, self.nres, len(pullx)]) # initialize
c = []
with open('centers.txt', 'r') as f:
for line in f:
c.append(line)
initial_com = np.zeros([self.K, self.nres])
for i in range(1, self.K + 1):
initial_com[i - 1, :] = c[i].split()
# ref = ['C', 'C1', 'C2', 'C3', 'C4', 'C5']
# t = md.load('long_1.trr', top='long_1.gro')
# ref_atoms = [a.index for a in t.topology.atoms if a.name in ref and 1369 <= a.index < 2055]
# eth = [a.index for a in t.topology.atoms if a.residue.name == 'ETH']
# eth_names = [a.name for a in t.topology.atoms if a.residue.name == 'ETH']
# w = [Atom_props.mass[a] for a in eth_names]
# eth = eth[:9]
# w = w[:9]
#
# ref_com = np.zeros([t.n_frames])
# eth_com = np.zeros([t.n_frames])
# for i in range(t.n_frames):
# ref_com[i] = np.mean(t.xyz[i, ref_atoms, 2])
# for j in range(9):
# eth_com[i] += w[j]*t.xyz[i, eth[j], 2]
# eth_com[i] /= sum(w)
#
# plt.hist(np.abs(eth_com - ref_com), bins=25)
# plt.show()
#
# exit()
print('Reading pullx files...', end='', flush=True)
for i in range(self.K):
if i != 0:
pullx = []
with open('%s_%d_pullx.xvg' % (basename, i + 1), 'r') as f:
for line in f:
if line[0] != '#' and line[0] != '@':
pullx.append(line)
for j in range(self.com_dist.shape[2]):
self.com_dist[i, :, j] = pullx[j].split()[2::2] # extract COM dZ
self.com_dist += initial_com[:, :, None] # add initial com location to properly space apart histograms
colors = ['aqua', 'blue', 'coral', 'crimson', 'darkgreen', 'gold', 'lavender', 'magenta', 'orangered', 'plum',
'teal', 'violet']
self.centers = self.com_dist[:, :, 0]
for i in range(self.K):
plt.hist(self.com_dist[i, 0, :], bins=50, color=colors[i])
plt.plot([self.centers[i, 0], self.centers[i, 0]], [0, 10000], '--', color=colors[i])
# print(np.mean(self.com_dist[i, 0, :]))
# print(np.std(self.com_dist[i, 0, :]))
plt.show()
exit()
self.N = np.zeros([self.K, self.nres], dtype=int) # number of uncorrelated frames for each trajectory
self.u_kn = np.zeros_like(self.com_dist)
self.u_kln = np.zeros([self.nres, self.K, self.K, len(pullx)])
def extract_uncorrelated_samples(self, t=None):
"""
:param t: correlation time. Set the correlation time manually (in terms of frames between uncorrelated samples)
or let pymbar figure it out with the timeseries module by default (None)
"""
g = t
print('Extracting uncorrelated samples...', end='', flush=True)
for u in range(self.K):
for r in range(self.nres):
if not t:
g = pymbar.timeseries.statisticalInefficiency(self.com_dist[u, r, :], fft=True)
indices = pymbar.timeseries.subsampleCorrelatedData(self.com_dist[u, r, :], g=g)
self.N[u, r] = len(indices)
self.com_dist[u, r, :len(indices)] = self.com_dist[u, r, indices]
print('Done!')
def calculate_PMF(self, nbins):
#dz = self.com_dist - self.centers[..., np.newaxis] # deviation from restrained position
self.com_bin = np.zeros_like(self.com_dist, dtype=int) # [umbrella, residue, config]
self.histo = np.zeros([self.nres, self.K, nbins], dtype=int)
self.bin_centers = np.zeros([self.nres, nbins])
self.results = []
for i in range(self.nres):
print('Calculating PMF for Residue %d' % i, flush=True)
# left edge of bins
maxi = np.amax(self.com_dist[:, i, :])
mini = np.amin(self.com_dist[:, i, :])
delta = (maxi - mini) / nbins
bins = np.linspace(mini, maxi - delta, nbins) # some reasonable bounds
self.bin_centers[i, :] = bins + 0.5*delta
bins += delta # for proper output from np.digitize
for k in range(self.K):
for n in range(self.N[k, i]):
dz = self.com_dist[k, i, n] - self.centers[:, i]
self.u_kln[i, k, :, n] = self.u_kn[k, i, n] + 0.5 * self.beta * self.spring_constant * dz**2
self.com_bin[k, i, n] = np.digitize(self.com_dist[k, i, n], bins)
self.histo[i, k, :] = [np.sum(self.com_bin[k, i, :self.N[k, i]] == a) for a in range(nbins)]
mbar = pymbar.MBAR(self.u_kln[i, ...], self.N[:, i])
self.results.append(mbar.computePMF(self.u_kn[:, i, :], self.com_bin[:, i, :], nbins))
def plot_histograms(self, show=True, save=False, savename='histograms.png'):
for i in range(self.nres):
plt.figure(i + 1)
for k in range(self.K):
plt.plot(self.bin_centers[i, :], self.histo[i, k, :])
plt.tight_layout()
if save:
plt.savefig(savename)
if show:
plt.show()
def plot_PMF(self, show=True, save=False, savename='pmf.png'):
n = int(np.ceil(self.nres ** 0.5))
fig, ax = plt.subplots(n, n)
for i in range(n):
for j in range(n):
conf = i * n + j
if conf < self.nres:
ax[i, j].errorbar(self.bin_centers[conf, :], self.results[conf]['f_i'], yerr=self.results[conf]['df_i'])
ax[i, j].set_title('Residue %d' % conf)
ax[i, j].set_ylabel('Free Energy (kJ/mol)')
ax[i, j].set_xlabel('Distance from COM reference (nm)')
# plt.tight_layout()
if save:
plt.savefig(savename)
if show:
plt.show()
if __name__ == "__main__":
args = initialize()
# initialize umbrella data
u = Umbrellas(args.nstates, args.basename, args.residue, args.spring_constant, args.temperature,
initial=args.initial, centers=args.centers)
u.extract_uncorrelated_samples(t = 20)
u.calculate_PMF(args.bins)
# u.plot_histograms()
u.plot_PMF()
| [
"benjamin.coscia@colorado.edu"
] | benjamin.coscia@colorado.edu |
9a4bc2cd59a6a8bf06f6eca10852f04a5b9a2621 | dde8b97eee29cd6af17082cf84773d50bea7ca42 | /WHAnalysis/Configuration/python/skimming_ett_cff.py | 9894ce4a59888484a2e68d200d588496a4f1e0a8 | [] | no_license | calabria/WHAnalysis | 557cee96fe1dfe221a3a76f99b92f59c0800a8eb | 6cdcc0b73d94261f5ff7822b8bf5e48bc08268ae | refs/heads/master | 2021-01-23T13:36:11.593683 | 2014-04-12T10:39:44 | 2014-04-12T10:39:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import FWCore.ParameterSet.Config as cms
from electronHistos_ett_cff import *
skimmedElectrons = cms.EDFilter("PATElectronSelector",
src = cms.InputTag("electronVariables"),
cut = cms.string('pt > 24. && abs(eta) < 2.1 && ((isEB && userFloat("PFRelIsoDB04") < 0.15) || (isEE && userFloat("PFRelIsoDB04") < 0.1))'),
filter = cms.bool(True)
)
skimmedTaus = cms.EDFilter("PATTauSelector",
src = cms.InputTag("tauVariables"),
cut = cms.string('pt > 30.0 && abs(eta) < 2.3 && tauID("decayModeFinding") > 0.5 && tauID("byLooseCombinedIsolationDeltaBetaCorr") > 0.5 && tauID("againstMuonLoose") > 0.5 && tauID("againstElectronLoose") > 0.5'),
filter = cms.bool(True)
)
skimmingSequence = cms.Sequence(
skimmedElectrons *
skimmedTaus
)
| [
"cesare.calabria23@gmail.com"
] | cesare.calabria23@gmail.com |
6bc04cbe9bef936e8b03f30257dd980642088635 | e70b678712a355a0b51632728c7781b0bdcf29f4 | /Algorithms/Python/Next-Permutation.py | 25484560a873d416844a871ab406890796b55883 | [] | no_license | keyi/Leetcode_Solutions | b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af | 69e4e969b435ff2796bd7c4b5dad9284a853ab54 | refs/heads/master | 2020-05-21T23:36:20.450053 | 2018-11-11T03:45:28 | 2018-11-11T03:45:28 | 33,714,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
left, right = -1, -1
for i in range(len(nums) - 1):
if nums[i] < nums[i + 1]:
left = i
if left == -1:
nums.reverse()
return
for i in range(left + 1, len(nums)):
if nums[i] > nums[left]:
right = i
nums[left], nums[right] = nums[right], nums[left]
nums[left + 1:] = nums[:left: -1]
| [
"yike921012@gmail.com"
] | yike921012@gmail.com |
2c46f60d2fd89cab8c491fad1b4dd269924164bf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/977.py | 29a7741a218216554f1a711e65d07a2ace5e8d80 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import math
def isPal(n):
if n%1 != 0: return False
sn = str(long(n))
for i in xrange(0, len(sn)):
if (sn[i] != sn[-i-1]): return False
return True
if __name__ == "__main__":
T = int(raw_input())
for c in xrange(1,T+1):
[A, B] = map(lambda x: long(x), raw_input().split())
cnt = 0
for i in xrange(A,B+1L):
if (isPal(i) and isPal(math.sqrt(i))): cnt+=1
print 'Case #%d: %d' % (c, cnt)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2da7102a1a8c84e25261aa94efbfe23b159ec9aa | f769e0a8e80f604502a63ae0073b8e95c771bad8 | /blog_app/urls.py | 75de32e6b39b93bb047cf5261111293302901881 | [] | no_license | Justprince234/blog | 6c318f005346086b2df28741e689a032646d1c9f | 25004f3731045384dd423f8e6375a5e609fd5548 | refs/heads/master | 2023-01-23T14:07:36.119180 | 2020-08-06T00:57:07 | 2020-08-06T00:57:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.urls import path
from blog_app import views
app_name = 'blog_app'
urlpatterns = [
path('category/<slug:slug>', views.blog_category, name='blog_category'),
path('search/', views.search, name='search'),
path('<slug:slug>/', views.post_detail, name='post_detail'),
path('', views.post_list, name='home'),
] | [
"princewilljackson@ymail.com"
] | princewilljackson@ymail.com |
79ace01feaaf47a4d80a5259ed902b36c4c7207c | 9028b6983685a3ace074049fccf2b8c503b77de8 | /PyStationB/libraries/GlobalPenalisation/gp/base/chain_rule.py | c6d02e92c609b00f5467ad8833e4b3110a301889 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | mebristo/station-b-libraries | 7f5517e5e77e6cdc54c03355804b8c0a4fcae65b | 40bab526af6562653c42dbb32b174524c44ce2ba | refs/heads/main | 2023-09-03T03:54:53.181082 | 2021-10-01T03:21:11 | 2021-10-01T03:21:11 | 412,871,835 | 0 | 0 | MIT | 2021-10-02T17:53:07 | 2021-10-02T17:53:06 | null | UTF-8 | Python | false | false | 3,144 | py | # -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import numpy as np
def chain_rule_means_vars(d_acq_dy: np.ndarray, dy_dx: np.ndarray) -> np.ndarray:
"""Implements the chain rule with respect to the means/standard deviation vectors for candidate points.
Args:
d_acq_dy: gradient of acquisition with respect to one-dimensional variable (e.g. mean, var or std).
Shape (n_candidates, 1)
dy_dx: gradient of the variable with respect to the inputs. Shape (n_candidates, n_inputs)
Returns:
d_acq_dx, shape (n_candidates, n_inputs). Note that it's not the whole expression, if ``acq`` depends on other
variable than ``y`` as well
"""
return d_acq_dy * dy_dx
def chain_rule_cross_covariance(d_acq_d_cov: np.ndarray, d_cov_dx: np.ndarray) -> np.ndarray:
"""Implements the chain rule with respect to the cross-covariance matrix between candidate points and selected points.
Args:
d_acq_d_cov: gradient of acquisition with respect to covariance matrix between candidates and selected points.
Shape (n_candidates, n_selected)
d_cov_dx: gradient of covariance matrix between candidates and selected points with respect to the inputs.
Shape (n_candidates, n_selected, n_inputs)
Returns:
d_acq_dx, shape (n_candidates, n_selected).
Note that it's not the whole expression, if ``acq`` depends on other variable than ``cov`` as well
"""
return np.einsum("ij,ijk -> ik", d_acq_d_cov, d_cov_dx)
def chain_rule_means_from_predict_joint(d_acq_d_means: np.ndarray, d_means_dx: np.ndarray) -> np.ndarray:
"""
Args:
d_acq_d_means: gradient of acquisition with respect to the means vector. Shape (n_points, 1).
d_means_dx: gradient of the means vector with respect to the inputs vector.
Shape (n_points, n_points, input_dim)
Returns:
part of d_acq_dx, which can be calculated from the chain rule with respect to the means vector.
Shape (n_points, input_dim)
"""
d_acq_d_means = d_acq_d_means.ravel()
return np.einsum("i,ikl", d_acq_d_means, d_means_dx)
def chain_rule_covariance(d_acq_d_covariance: np.ndarray, d_covariance_dx: np.ndarray) -> np.ndarray:
"""Chain rule for the gradient with respect to covariance.
Args:
d_acq_d_covariance: gradients of the acquisition function with respect to the covariance.
Shape (n_points, n_points)
d_covariance_dx: gradients of the covariance matrix entries with respect to the inputs.
Shape (n_points, n_points, n_points, input_dim)
Returns:
part of d_acq_dx, which can be calculated from the chain rule with respect to covariance.
Shape (n_points, input_dim)
"""
return np.einsum("ij,ijkl", d_acq_d_covariance, d_covariance_dx)
| [
"noreply@github.com"
] | mebristo.noreply@github.com |
78d810e3b5a2e9540be39cafd4404716991161ff | 119f87ff16278614dce6571a451c54b839a4bead | /catalyst/utils/meters/ppv_tpr_f1_meter.py | 3fb52f541fa221d5341855724b7597f9e2967f65 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | valyukov/catalyst | 17e6243cd2b0f9c790645647b7946ef05c1b57fa | a1d2638f22ff7bede74410baeb8bc6a7aff036df | refs/heads/master | 2020-08-04T17:01:27.565749 | 2020-02-15T13:33:44 | 2020-02-15T13:33:44 | 212,212,412 | 0 | 0 | Apache-2.0 | 2019-10-01T22:39:05 | 2019-10-01T22:39:05 | null | UTF-8 | Python | false | false | 3,662 | py | from collections import defaultdict
import torch
from . import meter
def f1score(precision_value, recall_value, eps=1e-5):
"""
Calculating F1-score from precision and recall to reduce computation
redundancy.
Args:
precision_value: precision (0-1)
recall_value: recall (0-1)
Returns:
F1 score (0-1)
"""
numerator = 2 * (precision_value * recall_value)
denominator = precision_value + recall_value + eps
return numerator / denominator
def precision(tp, fp, eps=1e-5):
"""
Calculates precision (a.k.a. positive predictive value) for binary
classification and segmentation.
Args:
tp: number of true positives
fp: number of false positives
Returns:
precision value (0-1)
"""
# originally precision is: ppv = tp / (tp + fp + eps)
# but when both masks are empty this gives: tp=0 and fp=0 => ppv=0
# so here precision is defined as ppv := 1 - fdr (false discovery rate)
return 1 - fp / (tp + fp + eps)
def recall(tp, fn, eps=1e-5):
"""
Calculates recall (a.k.a. true positive rate) for binary classification and
segmentation
Args:
tp: number of true positives
fn: number of false negatives
Returns:
recall value (0-1)
"""
# originally reacall is: tpr := tp / (tp + fn + eps)
# but when both masks are empty this gives: tp=0 and fn=0 => tpr=0
# so here recall is defined as tpr := 1 - fnr (false negative rate)
return 1 - fn / (fn + tp + eps)
class PrecisionRecallF1ScoreMeter(meter.Meter):
"""
Keeps track of global true positives, false positives, and false negatives
for each epoch and calculates precision, recall, and F1-score based on
those metrics. Currently, this meter works for binary cases only, please
use multiple instances of this class for multi-label cases.
"""
def __init__(self, threshold=0.5):
super(PrecisionRecallF1ScoreMeter, self).__init__()
self.threshold = threshold
self.reset()
def reset(self):
"""
Resets true positive, false positive and false negative counts to 0.
"""
self.tp_fp_fn_counts = defaultdict(int)
def add(self, output, target):
"""
Thresholds predictions and calculates the true positives,
false positives, and false negatives in comparison to the target.
Args:
output (torch.Tensor):
prediction after activation function
shape should be (batch_size, ...), but works with any shape
target (torch.Tensor):
label (binary)
shape should be the same as output's shape
Returns:
None
"""
output = (output > self.threshold).float()
tp = torch.sum(target * output)
fp = torch.sum(output) - tp
fn = torch.sum(target) - tp
self.tp_fp_fn_counts["tp"] += tp
self.tp_fp_fn_counts["fp"] += fp
self.tp_fp_fn_counts["fn"] += fn
def value(self):
"""
Calculates precision/recall/f1 based on the current stored
tp/fp/fn counts.
Args:
None
Returns:
tuple of floats: (precision, recall, f1)
"""
precision_value = precision(self.tp_fp_fn_counts["tp"],
self.tp_fp_fn_counts["fp"])
recall_value = recall(self.tp_fp_fn_counts["tp"],
self.tp_fp_fn_counts["fn"])
f1_value = f1score(precision_value, recall_value)
return (float(precision_value), float(recall_value), float(f1_value))
| [
"scitator@gmail.com"
] | scitator@gmail.com |
59fd2d2296999f253b25fe78c0624d986b164c3f | 7911da973079f325a515cd2ee66f7590a9f32e48 | /great_divice.py | 5edc06a2eac8745bdbb62b1d2d89b3b76c151283 | [] | no_license | Ponkiruthika112/Guvi | 5d2ff3dcf55d6c52c0f09a1e577d8b11632c7a92 | 319e5b4dab5654fabc25ef15c1d528f76d833c15 | refs/heads/master | 2020-04-21T06:05:03.581658 | 2018-08-02T05:53:48 | 2018-08-02T05:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | n1=int(input("N1 value:"))
n2=int(input("N2 value:"))
maximum=1
for x in range(1,min(n1,n2)+1):
if n1%x==0 and n2%x==0:
if x>maximum:
maximum=x
print("Ans is:",maximum)
| [
"noreply@github.com"
] | Ponkiruthika112.noreply@github.com |
905ef18e702d20a1fcff2e1cadc2674abfa4e3af | 6dae31f10260e39feae9d268e3ebe6d23146575a | /galaxy/bin_eBOSS_ELG/create_stack_list_ELG_all.py | 55cc2990fa24df1a48b2ead2e16fef481eaded1d | [
"CC0-1.0"
] | permissive | JohanComparat/pySU | e55eba92f0660e733468bce618595a03dc25a3d2 | 4169e11414be661dc0c01c774e64fb8ce6242825 | refs/heads/master | 2021-12-25T11:06:04.315554 | 2021-10-11T12:03:22 | 2021-10-11T12:03:22 | 44,340,565 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | #! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import astropy.io.fits as fits
import SpectraStackingEBOSS as sse
from scipy.interpolate import interp1d
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)#, Ob0=0.048206)
# create all input files :
#path_2_cat = join(os.environ['HOME'],"SDSS/lss/catalogs/3", "inputs/ELG.v5_10_10.all.fits")
path_2_cat = join(os.environ['HOME'],"SDSS/lss/catalogs/4", "inputs/ELG.v5_11_0.rrv2.all.fits")
cat = fits.open(path_2_cat)[1].data
Ngal = len(cat)
N_in_stack = 200000
N_factor = 4
#bins_2nd = n.arange(N_in_stack, N_in_stack*N_factor, N_in_stack)
print(Ngal)
#print(bins_2nd)
NNN,BBB=n.histogram(cat['Z'], bins=n.arange(0,4,0.001))
N_CM = n.cumsum(NNN)
N_bins = n.arange(N_in_stack*N_factor, N_CM.max(), N_in_stack*N_factor)
itp = interp1d(N_CM, BBB[:-1])
z_mins = itp(N_bins)[:-1]
z_maxs = itp(N_bins)[1:]
# CREATES A few stacks as a function of [OII] EW
z0,z1 = 0.2, 1.5
selection = (cat['rr_Z']>z0) & (cat['rr_Z']<z1) & (cat['rr_ZWARN']<=4)
ids_sort = n.argsort(cat['rr_Z'][selection])
DATA = n.transpose([ cat['plate'], cat['MJD'], cat['FIBERID'], cat['rr_Z'] ]) [selection][ids_sort]
path_2_input = join(os.environ['HOME'],"SDSS/stacks", "eboss-elg_"+str(z0)+"_z_"+str(z1)+".asc")
print(path_2_input)
print(len(DATA))
n.savetxt(path_2_input, DATA)
| [
"johan.comparat@gmail.com"
] | johan.comparat@gmail.com |
da2c73030f131e6a6657b73707e086a447727784 | 558e979b7c6d5dc2599453392ed624265a831d0d | /glamazer/favorites/models.py | cdc9a8d82dafb91981adcda84c1f1522fd8fdf14 | [] | no_license | SpeeDly/partytask | af08ca089d518bc0d09dda61e68ce3c1d117ab2b | c9a813bc130c41995140adaa4a19344791b89968 | refs/heads/master | 2021-01-10T15:42:41.761160 | 2016-02-15T13:48:13 | 2016-02-15T13:48:13 | 51,758,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.db import models
from django.contrib.auth.models import User
from glamazer.listings.models import Listing
class Favorite(models.Model):
user = models.ForeignKey(User)
listing = models.ForeignKey(Listing)
date = models.DateTimeField(auto_now_add=True) | [
"zhuhov@gmail.com"
] | zhuhov@gmail.com |
5e03fc0c1a8453bd3880c448edf424c8704ddbc0 | 54bd004dd18f23b46fd75288823977a93d6c7c9d | /Python_basics/p38.py | d7497f9f0ff4cade758fa4609d61848746daea7f | [] | no_license | Gagangithub1988/Python | 13f914a200f6f4750c1b7da1467ca7e3f48814d0 | 8c9ba1902ac45841fd3145d49b08547420f15f2d | refs/heads/master | 2022-11-03T22:12:51.799829 | 2020-06-20T06:46:45 | 2020-06-20T06:46:45 | 273,642,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | def fun(x,y):
result = (x+y)**2
return result
print(fun(3,4)) | [
"noreply@github.com"
] | Gagangithub1988.noreply@github.com |
b109cb8b121bd51d0db13746d6f51a2a11d5ce4e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/class_def_attr-107.py | 7f478ef28ce91a5201adf88cbe7f4d1ad105b948 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | class A(object):
x:int = 1
class B(A):
def __init__(self: "B"):
pass
class C(B):
z:bool = True
a:A = None
b:B = None
c:C = None
a = A()
b = B()
c = C()
$Statement
b.x = a.x
c.z = a.x == b.x
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
361eb11c018dc172bfabcdcd14403d0fe324b70b | 4717e299b70d658f607becacd5f202cdba904181 | /scripts/mount-image-partitions.sh | f17ca7525822a6916e1d6b07b2ee4de504e51ea7 | [
"MIT"
] | permissive | deets/yocto-pi-lap-timer | 8ea4840320675cb3c22a73fc25199fb96d379f9b | 857ea7e27ed3df979fbf22129da428430a40426c | refs/heads/master | 2023-01-24T06:36:28.649809 | 2023-01-01T18:13:37 | 2023-01-01T18:13:37 | 245,871,535 | 0 | 0 | MIT | 2023-01-20T22:30:26 | 2020-03-08T19:04:35 | C++ | UTF-8 | Python | false | false | 3,685 | sh | #!/usr/bin/env python3
# -*- mode: python -*-
# Copyright: 2020, Diez B. Roggisch, Berlin . All rights reserved.
import argparse
import json
import logging
import os
import subprocess
import sys
USAGE = """Images containing several partitions are
cumbersome to inspect. You need to figure out partition block ranges,
and mount these using the loopback device.
This script encapsulates this in a convenient package and prints out
the needed umount commands."""
def collect_partitions_dims(image):
p = subprocess.run(
[
"fdisk",
"-usectors",
"-l", image,
],
check=True,
stdout=subprocess.PIPE,
)
def safe_int(value):
try:
return int(value)
except ValueError:
return None
partitions = [
line for line in p.stdout.decode("ascii").split("\n")
if line.startswith(image)
]
dims = [
[safe_int(v) for v in p.split() if safe_int(v) is not None][0:3:2]
for p in partitions
]
return dims
def parse_args():
parser = argparse.ArgumentParser(usage=USAGE)
parser.add_argument("image", help="The disk image to mount")
parser.add_argument(
"--json",
action="store_true",
help="When given, write JSON data with mountpoints instead of umount commands",
)
parser.add_argument(
"--prefix",
help="Where to mount the partitions - defaults to /media",
default="/media",
)
parser.add_argument(
"--name",
help="Name of the partitions (will be appended with p0, p1, ..)."
" Defaults to the basename of the image.",
)
parser.add_argument(
"--sudo",
action="store_true",
help="Prepend 'sudo' to the mount commands executed by this script",
)
parser.add_argument(
"-n", "--dry-run", action="store_true", help="Don't actually mount, only pretend"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="More verbose output."
)
return parser.parse_args()
def main():
opts = parse_args()
logging.basicConfig(
level=logging.DEBUG if opts.verbose else logging.INFO,
stream=sys.stderr,
)
if opts.name is None:
opts.name = os.path.splitext(os.path.basename(opts.image))[0]
partition_dims = collect_partitions_dims(opts.image)
mountpoints = []
pending_error = None
for i, (offset, size) in enumerate(partition_dims):
logging.debug(f"partition {i}-> offset: {offset}, size: {size}")
mountpoint = "{}/{}p{}".format(opts.prefix, opts.name, i)
if not os.path.exists(mountpoint) and not opts.dry_run:
os.mkdir(mountpoint)
cmd = [
"mount",
"-o",
"rw,loop,offset={},sizelimit={}".format(offset * 512, size * 512),
"-t",
"auto",
opts.image,
mountpoint,
]
if opts.sudo:
cmd.insert(0, "sudo")
try:
if not opts.dry_run:
subprocess.run(cmd, check=True)
mountpoints.append(mountpoint)
else:
logging.debug(" ".join(cmd))
except subprocess.CalledProcessError as e:
logging.exception("Failed to mount partition p%d", i)
pending_error = e
if opts.json:
print(json.dumps(mountpoints))
else:
for x in mountpoints:
print(f"{'sudo' if opts.sudo else ''} umount {x}")
if pending_error:
logging.error("One or more partitions failed to mount")
sys.exit(1)
if __name__ == "__main__":
main()
| [
"deets@web.de"
] | deets@web.de |
ed149966b9509c17ce4cb3de3841e639cb2c9a4b | 88ae8695987ada722184307301e221e1ba3cc2fa | /chrome/browser/ui/webui/side_panel/read_anything/DEPS | 0d08619c1aa0d97e9806133820d5e14f04bdfaf3 | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 230 | include_rules = [
"+chrome/common/accessibility",
]
specific_include_rules = {
"read_anything_page_handler\.*": [
"+chrome/browser/ui/views/side_panel/read_anything",
"+chrome/browser/ui/views/frame/browser_view.h",
],
}
| [
"jengelh@inai.de"
] | jengelh@inai.de | |
54f7efa4be54b7bfa4d320fc695d8ebaee3721de | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /tree/leaf-similar_trees.py | f3e2de3f3491a207853a3eb9634c2e7fd4401330 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | # Consider all the leaves of a binary tree. From left to right order, the values of those leaves form a leaf value sequence.
#
# 3
# 5 1
# 6 2 9 8
# 7 4
# For example, in the given tree above, the leaf value sequence is (6, 7, 4, 9, 8).
#
# Two binary trees are considered leaf-similar if their leaf value sequence is the same.
#
# Return true if and only if the two given trees with head nodes root1 and root2 are leaf-similar.
#
#
#
# Constraints:
#
# Both of the given trees will have between 1 and 200 nodes.
# Both of the given trees will have values between 0 and 200
# 分析:这个问题需要解决的是给定两个二叉树,每个二叉树从左到右的叶节点构成一个数组。问两个二叉树的数组是否一致。
# 最直接的解决方法就是利用DFS遍历两棵树,并将叶节点的值保存到两个数组中,对比两个数组是否一致,该方法需要完全遍历两棵树,需要O(2N)time,O(2k)space,k为叶结点数。
# 优化:在遍历过程中,如果有任意一个有序的结点值不相等,则返回false.且可以不使用数组进行保存。考虑到两颗二叉树的叶节点位置不一定相同,找到叶节点需要迭代的次数也不一致。
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
res1=[]
res2=[]
def dfs(root,res):
if not root:return None
if not root.left and not root.right:res.append(root.val)
dfs(root.left,res)
dfs(root.right,res)
dfs(root1,res1)
dfs(root2,res2)
return True if res1==res2 else False
# 优化,time:最坏情况下O(2n); space:O(1)
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def dfs(root):
if not root:return []
if not (root.left or root.right):return [root.val]
return dfs(root.left)+dfs(root.right)
return dfs(root1)==dfs(root2)
| [
"k87974@163.com"
] | k87974@163.com |
832551fed04256accf01a92952917bc2b13db83a | 772b0df2635b95644ea3eb370103174804024167 | /scripts/exonU.py | ae8f5252c8e3b2a19529d53f4cea5dae88284b92 | [
"MIT"
] | permissive | 4dn-dcic/clodius | ec909bda90a9df13fa1b85472951f6cf149213a5 | aa31b3d90a5a9fec883c20cab31ad4d347cd52cd | refs/heads/develop | 2020-04-17T23:31:32.114043 | 2019-04-02T14:01:46 | 2019-04-02T14:01:46 | 167,038,915 | 0 | 0 | MIT | 2019-03-28T20:10:46 | 2019-01-22T17:43:32 | Python | UTF-8 | Python | false | false | 4,095 | py | from __future__ import print_function
__author__ = "Alaleh Azhir,Peter Kerpedjiev"
#!/usr/bin/python
import collections as col
import sys
import argparse
class GeneInfo:
def __init__(self):
pass
def merge_gene_info(gene_infos, gene_info):
'''
Add a new gene_info. If it's txStart and txEnd overlap with a previous entry for this
gene, combine them.
'''
merged = False
for existing_gene_info in gene_infos[gene_info.geneId]:
if (existing_gene_info.chrName == gene_info.chrName and
existing_gene_info.txEnd > gene_info.txStart and
gene_info.txEnd > existing_gene_info.txStart):
# overlapping genes, merge the exons of the second into the first
existing_gene_info.txStart = min(existing_gene_info.txStart,
gene_info.txStart)
existing_gene_info.txEnd = max(existing_gene_info.txEnd,
gene_info.txEnd)
for (exon_start, exon_end) in gene_info.exonUnions:
existing_gene_info.exonUnions.add((exon_start, exon_end))
merged = True
if not merged:
gene_infos[gene_info.geneId].append(gene_info)
return gene_infos
def main():
parser = argparse.ArgumentParser(description="""
python ExonUnion.py Calculate the union of the exons of a list
of transcript.
chr10 27035524 27150016 ABI1 76 - NM_001178120 10006 protein-coding abl-interactor 1 27037498 27149792 10 27035524,27040526,27047990,27054146,27057780,27059173,27060003,27065993,27112066,27149675, 27037674,27040712,27048164,27054247,27057921,27059274,27060018,27066170,27112234,27150016,
""")
parser.add_argument('transcript_bed')
#parser.add_argument('-o', '--options', default='yo',
# help="Some option", type='str')
#parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
inputFile = open(args.transcript_bed, 'r')
gene_infos = col.defaultdict(list)
for line in inputFile:
words = line.strip().split("\t")
gene_info = GeneInfo()
try:
gene_info.chrName = words[0]
gene_info.txStart = words[1]
gene_info.txEnd = words[2]
gene_info.geneName = words[3]
gene_info.score = words[4]
gene_info.strand = words[5]
gene_info.refseqId = words[6]
gene_info.geneId = words[7]
gene_info.geneType = words[8]
gene_info.geneDesc = words[9]
gene_info.cdsStart = words[10]
gene_info.cdsEnd = words[11]
gene_info.exonStarts = words[12]
gene_info.exonEnds = words[13]
except:
print("ERROR: line:", line, file=sys.stderr)
continue
# for some reason, exon starts and ends have trailing commas
gene_info.exonStartParts = gene_info.exonStarts.strip(",").split(',')
gene_info.exonEndParts = gene_info.exonEnds.strip(",").split(',')
gene_info.exonUnions = set([(int(s), int(e)) for (s,e) in zip(gene_info.exonStartParts, gene_info.exonEndParts)])
# add this gene info by checking whether it overlaps with any existing ones
gene_infos = merge_gene_info(gene_infos, gene_info)
for gene_id in gene_infos:
for contig in gene_infos[gene_id]:
output = "\t".join(map(str, [contig.chrName, contig.txStart, contig.txEnd,
contig.geneName, contig.score, contig.strand,
'union_' + gene_id, gene_id, contig.geneType, contig.geneDesc,
contig.cdsStart, contig.cdsEnd,
",".join([str(e[0]) for e in sorted(contig.exonUnions)]),
",".join([str(e[1]) for e in sorted(contig.exonUnions)])]))
print(output)
if __name__ == '__main__':
main()
| [
"pkerpedjiev@gmail.com"
] | pkerpedjiev@gmail.com |
acfa4053f94dbce50a7b89103402ace6cd401a3d | 65329299fca8dcf2e204132624d9b0f8f8f39af7 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/__init__.py | 104d9b2637c313bdee6b3a7271d1e67c2fbb956f | [
"Apache-2.0"
] | permissive | darylturner/napalm-yang | bf30420e22d8926efdc0705165ed0441545cdacf | b14946b884ad2019b896ee151285900c89653f44 | refs/heads/master | 2021-05-14T12:17:37.424659 | 2017-11-17T07:32:49 | 2017-11-17T07:32:49 | 116,404,171 | 0 | 0 | null | 2018-01-05T16:21:37 | 2018-01-05T16:21:36 | null | UTF-8 | Python | false | false | 14,642 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import config
import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes ISIS interface timers configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'timers'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'timers']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/config (container)
YANG Description: Configuration parameters relating to interface
timers for IS-IS
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to interface
timers for IS-IS
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/state (container)
YANG Description: This container defines state information for ISIS interface timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS interface timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
import config
import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes ISIS interface timers configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__config','__state',)
_yang_name = 'timers'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'timers']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/config (container)
YANG Description: Configuration parameters relating to interface
timers for IS-IS
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to interface
timers for IS-IS
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/state (container)
YANG Description: This container defines state information for ISIS interface timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS interface timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = {'config': config, 'state': state, }
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
6d26e7d8a39dea94a32fcdd63dc99c2a597f95bb | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2821/60651/234231.py | f4edee98f21804eb117fe388b3c8b273936dceec | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | n=int(input())
list1=input().split()
list1=[int(x) for x in list1]
p1=0
p2=0
while(len(list1)>1):
if list1[0]>=list1[len(list1)-1]:
p1+=list1[0]
del(list1[0])
else:
p1+=list1[len(list1)]
del(list1[len(list1)])
if list1[0]>=list1[len(list1)-1]:
p2+=list1[0]
del(list1[0])
else:
p2+=list1[len(list1)]
del(list1[len(list1)])
if len(list1)==1:
p1+=list1[0]
print(str(p1)+" "+str(p2))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
75a398246bbdcce760ca60cf2878fa345bbdca0c | 099256b28df65fb7c90c077b060dca16b8655235 | /reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py | 2430cee4b9e0d6d3beea190f0e178793a8c3ec3b | [] | no_license | Immaannn2222/holbertonschool-machine_learning | 1cebb9a889b363669bed7645d102dc56ab943c08 | 80bf8d3354702f7fb9f79bbb5ed7e00fc19f788d | refs/heads/master | 2023-08-01T05:35:00.180472 | 2021-09-22T20:28:17 | 2021-09-22T20:28:17 | 317,624,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python3
"""Q-Learning"""
import gym
import numpy as np
def epsilon_greedy(Q, state, epsilon):
"""uses epsilon-greedy to determine the next action"""
p = np.random.uniform(0, 1)
if p < epsilon:
action = np.random.randint(Q.shape[1])
else:
action = np.argmax(Q[state, :])
return action
| [
"imennaayari@gmail.com"
] | imennaayari@gmail.com |
03677753672e492a352d7591d8b920b07ca19949 | b96f1bad8a74d31d8ff79bc955813bfcd17d7b26 | /560. Subarray Sum Equals K.py | 7388bfa4045464b95f7ef4a1835a360ec064851a | [] | no_license | brianhu0716/LeetCode-Solution | e7177af15e84e833ce8ab05027683ed4ac489643 | 158a4359c90b723545b22c4898047274cc1b80a6 | refs/heads/main | 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,527 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 18:21:15 2021
@author: Brian
"""
'''
前綴和(Prefix Sum)的概念,前綴和是指由index = 0至index = i(i < len(nums))所累加成的數字組成的數列,此種概念非常適合解
這類序列中有正有負,又要求必須要連續的題型,以此題為例,目標是要找到和為k的子序列個數,因此我們先建立一個由前綴和為key值,
出現次數為value值的表,每當我們計算一次前綴和後我們同時可以查表中是否有出現當前prefixSum - k的key出現,有的話代表prefixSum - k
對應的次數即為出現再prefixSum之前且與prefixSum相差k的連續字序列的個數,答案就可以再不斷更新prefixSum的同時累加prefixSum - k對應
的value後得到。如果希望看到prefixSum - k以及prefixSum出現位置可以參照註解部分的程式碼,再更新完prefixSum的字典後,依次查詢key與
key - k是否成對存在,如果都存在,檢驗key中的idx_b是否有大於key - k中的idx_f,有的話加1;這樣的寫法在初始化字典時先給出{0 : [-1]}
的值,代表在位置-1時前綴和為0
*** 關於初始化值的問題可以參考這的範例nums = [3,...],k = 3,如果不先初始化前綴和為0的位置或次數,答案一定會少算一個因為在index
為0的時候,predixSum為3,對應的prefixSum - k 等於0,如果不先初始化就查無鍵值,直接少加1次正確答案
'''
nums = [1,-1,0]
k = 0
nums = [1,1,1]
k = 2
nums = [1,2,3]
k = 3
class Solution:
def subarraySum(self, nums, k: int) -> int:
occur,prefixSum,ans = {0 : 1},0,0 # {0 : 1}的意思是再index = 0之前的前綴和 = 0,出現一次
for num in nums:
prefixSum += num
ans += occur.get(prefixSum - k,0) # 一定要先計算prefixSum - k的個數,避免k = prefixSum = 0的狀況會出現錯誤
occur[prefixSum] = occur.get(prefixSum,0) + 1
return ans
'''
occur,prefixSum,ans = {0 : [-1]},0,0
for i in range(len(nums)):
prefixSum += nums[i]
if prefixSum not in occur.keys():
occur[prefixSum] = [i]
else:
occur[prefixSum].append(i)
for key in occur.keys():
if key - k in occur.keys():
for idx_b in occur[key]:
for idx_f in occur[key - k]:
if idx_b > idx_f:
ans += 1
return ans
'''
| [
"85205343+brianhu0716@users.noreply.github.com"
] | 85205343+brianhu0716@users.noreply.github.com |
c2a8f3da8dd2bd5019bd6fc5e761b3e7657d292d | a439ca43178d38cfe6daaee50ea134ca6c52b502 | /thaniya_server_upload/src/thaniya_server_upload/__init__.py | bc23281f002d4aaf2bd5c044627665570e87f050 | [
"Apache-2.0"
] | permissive | jkpubsrc/Thaniya | 37ca727abdc6f9f605257813889fe3a033995bba | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | refs/heads/master | 2023-03-05T20:58:59.528746 | 2021-02-15T19:31:06 | 2021-02-15T19:31:06 | 331,318,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py |
__version__ = "0.2021.1.20"
from .UploadHttpdCfg import UploadHttpdCfg
from .IAppRuntimeUserMgr import IAppRuntimeUserMgr
from .AppRuntimeUploadHttpd import AppRuntimeUploadHttpd
| [
"pubsrc@binary-overflow.de"
] | pubsrc@binary-overflow.de |
a9ffb5601e405aada062d39d769a9c49544fc474 | 092056c026f3ef162c31bca004a596bbe78948e9 | /w261/wk3/reducer_hw32.py | 10741e7585b2d326ecaeb8047f54c062d1939548 | [] | no_license | sayantansatpati/ml | 4138bbafd216a8ad848a56e4818163649a28b6a9 | 9f1765b716f39a1ef159db98b2813761bbc14b60 | refs/heads/master | 2021-01-19T03:19:42.734130 | 2019-03-12T15:44:15 | 2019-03-12T15:44:15 | 36,243,314 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | #!/usr/bin/python
import sys
import re
import heapq
itemset_1_cnt = 0
itemset_2_cnt = 0
itemset_1_last = None
itemset_2_last = None
'''
a1,* 1
a1,* 1
a1,b1 1
a1,b1 1
a1,b2 1
a1,b2 1
a2,* 1
'''
THRESHOLD = 100
# Store Itemsets 2
dict = {}
for line in sys.stdin:
# Remove leading & trailing chars
line = line.strip()
# Split the line by <TAB> delimeter
tokens = re.split(r'\s+', line)
# Split the key by <COMMA> delimeter
items = tokens[0].split(",")
i1 = items[0]
i2 = items[1]
if not itemset_1_last:
itemset_1_last = i1
if itemset_1_last != i1:
'''
if itemset_1_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, tokens[0], confidence)
dict[tokens[0]] = confidence
'''
# Reset
itemset_1_last = i1
itemset_1_cnt = int(tokens[1])
itemset_2_last = None
itemset_2_cnt = 0
else:
if i2 == '*':
itemset_1_cnt += int(tokens[1])
else:
if itemset_2_last != tokens[0]:
if itemset_1_cnt >= THRESHOLD and itemset_2_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
#print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, itemset_2_last, confidence)
dict[itemset_2_last] = confidence
itemset_2_last = tokens[0]
itemset_2_cnt = int(tokens[1])
else:
itemset_2_cnt += int(tokens[1])
# Last Set of Counts
if itemset_1_cnt >= THRESHOLD and itemset_2_cnt >= THRESHOLD:
confidence = (itemset_2_cnt * 1.0) / itemset_1_cnt
#print '[%d,%d]%s\t%f' %(itemset_1_cnt, itemset_2_cnt, itemset_2_last, confidence)
dict[itemset_2_last] = confidence
print '=== Top 5 Confidence ==='
sorted_dict = sorted(dict.items(), key=lambda x:(-x[1], x[0]))
for j,k in sorted_dict[:5]:
print '%s\t%f' %(j,k)
| [
"sayantan.satpati.sfbay@gmail.com"
] | sayantan.satpati.sfbay@gmail.com |
f2771a5178bd4266291d576c422b0f411c8ebd69 | c5ac7d9c4e4ad2f7809777c4e54f56b99eba1188 | /Elastix/Elastix.py | a886293f63dcbd5628cf9d44c9f8f024a9b16d38 | [] | no_license | mirabelarusu/SlicerElastix | e4843c401097b6c4835a49916657d9fb22e68b7b | 7d3deb3985d63344eb501892ae04a05e8bbbd253 | refs/heads/master | 2020-03-26T12:21:54.733526 | 2018-05-30T19:39:15 | 2018-05-30T19:39:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,884 | py | import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# Elastix
#
class Elastix(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "General Registration (Elastix)"
self.parent.categories = ["Registration"]
self.parent.dependencies = []
self.parent.contributors = ["Andras Lasso (PerkLab - Queen's University)"]
self.parent.helpText = """Align volumes based on image content using <a href="http://elastix.isi.uu.nl/">Elastix medical image registration toolbox</a>.
<p>Registration troubleshooting: check "Keep temporary files" option before starting regsitration and click on "Show temp folder" to open the folder that contains detailed logs.
<p>Edit registration parameters: open Advanced section, click "Show database folder", and edit presets. To add a new preset or modify registration phases, modify ElastixParameterSetDatabase.xml.
See <a href="http://elastix.bigr.nl/wiki/index.php/Parameter_file_database">registration parameter set database</a> and <a href="http://elastix.isi.uu.nl/doxygen/index.html">Elastix documentation</a> for more details."""
#self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This module was originally developed by Andras Lasso (Queen's University, PerkLab)
to serve as a frontend to Elastix medical image registration toolbox.
If you use this module, please cite the following articles:
<ul><li>S. Klein, M. Staring, K. Murphy, M.A. Viergever, J.P.W. Pluim, "<a href="http://elastix.isi.uu.nl/marius/publications/2010_j_TMI.php">elastix: a toolbox for intensity based medical image registration</a>", IEEE Transactions on Medical Imaging, vol. 29, no. 1, pp. 196 - 205, January 2010.</li>
<li>D.P. Shamonin, E.E. Bron, B.P.F. Lelieveldt, M. Smits, S. Klein and M. Staring, "<a href="http://elastix.isi.uu.nl/marius/publications/2014_j_FNI.php">Fast Parallel Image Registration on CPU and GPU for Diagnostic Classification of Alzheimer's Disease</a>", Frontiers in Neuroinformatics, vol. 7, no. 50, pp. 1-15, January 2014.</li></ul>
See more information about Elastix medical image registration toolbox at <a href="http://elastix.isi.uu.nl/">http://elastix.isi.uu.nl/</a>.
"""
#
# ElastixWidget
#
class ElastixWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.logic = ElastixLogic()
self.logic.logCallback = self.addLog
self.registrationInProgress = False
# Instantiate and connect widgets ...
# Parameter sets
defaultinputParametersCollapsibleButton = ctk.ctkCollapsibleButton()
defaultinputParametersCollapsibleButton.text = "Parameter set"
defaultinputParametersCollapsibleButton.collapsed = True
self.layout.addWidget(defaultinputParametersCollapsibleButton)
defaultParametersLayout = qt.QFormLayout(defaultinputParametersCollapsibleButton)
self.parameterNodeSelector = slicer.qMRMLNodeComboBox()
self.parameterNodeSelector.nodeTypes = ["vtkMRMLScriptedModuleNode"]
self.parameterNodeSelector.addAttribute( "vtkMRMLScriptedModuleNode", "ModuleName", "Elastix" )
self.parameterNodeSelector.selectNodeUponCreation = True
self.parameterNodeSelector.addEnabled = True
self.parameterNodeSelector.renameEnabled = True
self.parameterNodeSelector.removeEnabled = True
self.parameterNodeSelector.noneEnabled = False
self.parameterNodeSelector.showHidden = True
self.parameterNodeSelector.showChildNodeTypes = False
self.parameterNodeSelector.baseName = "General Registration (Elastix)"
self.parameterNodeSelector.setMRMLScene( slicer.mrmlScene )
self.parameterNodeSelector.setToolTip( "Pick parameter set" )
defaultParametersLayout.addRow("Parameter set: ", self.parameterNodeSelector)
#
# Inputs
#
inputParametersCollapsibleButton = ctk.ctkCollapsibleButton()
inputParametersCollapsibleButton.text = "Inputs"
self.layout.addWidget(inputParametersCollapsibleButton)
# Layout within the dummy collapsible button
inputParametersFormLayout = qt.QFormLayout(inputParametersCollapsibleButton)
#
# fixed volume selector
#
self.fixedVolumeSelector = slicer.qMRMLNodeComboBox()
self.fixedVolumeSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.fixedVolumeSelector.selectNodeUponCreation = True
self.fixedVolumeSelector.addEnabled = False
self.fixedVolumeSelector.removeEnabled = False
self.fixedVolumeSelector.noneEnabled = False
self.fixedVolumeSelector.showHidden = False
self.fixedVolumeSelector.showChildNodeTypes = False
self.fixedVolumeSelector.setMRMLScene( slicer.mrmlScene )
self.fixedVolumeSelector.setToolTip( "The moving volume will be transformed into this image space." )
inputParametersFormLayout.addRow("Fixed volume: ", self.fixedVolumeSelector)
#
# moving volume selector
#
self.movingVolumeSelector = slicer.qMRMLNodeComboBox()
self.movingVolumeSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.movingVolumeSelector.selectNodeUponCreation = True
self.movingVolumeSelector.addEnabled = False
self.movingVolumeSelector.removeEnabled = False
self.movingVolumeSelector.noneEnabled = False
self.movingVolumeSelector.showHidden = False
self.movingVolumeSelector.showChildNodeTypes = False
self.movingVolumeSelector.setMRMLScene( slicer.mrmlScene )
self.movingVolumeSelector.setToolTip( "This volume will be transformed into the fixed image space" )
inputParametersFormLayout.addRow("Moving volume: ", self.movingVolumeSelector)
self.registrationPresetSelector = qt.QComboBox()
for preset in self.logic.getRegistrationPresets():
self.registrationPresetSelector.addItem("{0} ({1})".format(preset[RegistrationPresets_Modality], preset[RegistrationPresets_Content]))
inputParametersFormLayout.addRow("Preset: ", self.registrationPresetSelector)
#
# Outputs
#
maskingParametersCollapsibleButton = ctk.ctkCollapsibleButton()
maskingParametersCollapsibleButton.text = "Masking"
maskingParametersCollapsibleButton.collapsed = True
self.layout.addWidget(maskingParametersCollapsibleButton)
# Layout within the dummy collapsible button
maskingParametersFormLayout = qt.QFormLayout(maskingParametersCollapsibleButton)
#
# fixed volume mask selector
#
self.fixedVolumeMaskSelector = slicer.qMRMLNodeComboBox()
self.fixedVolumeMaskSelector.nodeTypes = ["vtkMRMLLabelMapVolumeNode"]
self.fixedVolumeMaskSelector.addEnabled = False
self.fixedVolumeMaskSelector.removeEnabled = False
self.fixedVolumeMaskSelector.noneEnabled = True
self.fixedVolumeMaskSelector.showHidden = False
self.fixedVolumeMaskSelector.showChildNodeTypes = False
self.fixedVolumeMaskSelector.setMRMLScene( slicer.mrmlScene )
self.fixedVolumeMaskSelector.setToolTip("Areas of the fixed volume where mask label is 0 will be ignored in the registration.")
maskingParametersFormLayout.addRow("Fixed volume mask: ", self.fixedVolumeMaskSelector)
#
# moving volume mask selector
#
self.movingVolumeMaskSelector = slicer.qMRMLNodeComboBox()
self.movingVolumeMaskSelector.nodeTypes = ["vtkMRMLLabelMapVolumeNode"]
self.movingVolumeMaskSelector.selectNodeUponCreation = True
self.movingVolumeMaskSelector.addEnabled = False
self.movingVolumeMaskSelector.removeEnabled = False
self.movingVolumeMaskSelector.noneEnabled = True
self.movingVolumeMaskSelector.showHidden = False
self.movingVolumeMaskSelector.showChildNodeTypes = False
self.movingVolumeMaskSelector.setMRMLScene( slicer.mrmlScene )
self.movingVolumeMaskSelector.setToolTip("Areas of the moving volume where mask label is 0 will be ignored in the registration")
maskingParametersFormLayout.addRow("Moving volume mask: ", self.movingVolumeMaskSelector)
#
# Outputs
#
outputParametersCollapsibleButton = ctk.ctkCollapsibleButton()
outputParametersCollapsibleButton.text = "Outputs"
self.layout.addWidget(outputParametersCollapsibleButton)
# Layout within the dummy collapsible button
outputParametersFormLayout = qt.QFormLayout(outputParametersCollapsibleButton)
#
# output volume selector
#
self.outputVolumeSelector = slicer.qMRMLNodeComboBox()
self.outputVolumeSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.outputVolumeSelector.selectNodeUponCreation = True
self.outputVolumeSelector.addEnabled = True
self.outputVolumeSelector.renameEnabled = True
self.outputVolumeSelector.removeEnabled = True
self.outputVolumeSelector.noneEnabled = True
self.outputVolumeSelector.showHidden = False
self.outputVolumeSelector.showChildNodeTypes = False
self.outputVolumeSelector.setMRMLScene( slicer.mrmlScene )
self.outputVolumeSelector.setToolTip( "(optional) The moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume)" )
outputParametersFormLayout.addRow("Output volume: ", self.outputVolumeSelector)
#
# output transform selector
#
self.outputTransformSelector = slicer.qMRMLNodeComboBox()
self.outputTransformSelector.nodeTypes = ["vtkMRMLTransformNode"]
self.outputTransformSelector.selectNodeUponCreation = True
self.outputTransformSelector.addEnabled = True
self.outputTransformSelector.renameEnabled = True
self.outputTransformSelector.removeEnabled = True
self.outputTransformSelector.noneEnabled = True
self.outputTransformSelector.showHidden = False
self.outputTransformSelector.showChildNodeTypes = False
self.outputTransformSelector.setMRMLScene( slicer.mrmlScene )
self.outputTransformSelector.setToolTip( "(optional) Computed displacement field that transform nodes from moving volume space to fixed volume space. NOTE: You must set at least one output object (transform and/or output volume)." )
outputParametersFormLayout.addRow("Output transform: ", self.outputTransformSelector)
#
# Advanced area
#
self.advancedCollapsibleButton = ctk.ctkCollapsibleButton()
self.advancedCollapsibleButton.text = "Advanced"
self.advancedCollapsibleButton.collapsed = True
self.layout.addWidget(self.advancedCollapsibleButton)
advancedFormLayout = qt.QFormLayout(self.advancedCollapsibleButton)
self.showDetailedLogDuringExecutionCheckBox = qt.QCheckBox(" ")
self.showDetailedLogDuringExecutionCheckBox.checked = False
self.showDetailedLogDuringExecutionCheckBox.setToolTip("Show detailed log during registration.")
advancedFormLayout.addRow("Show detailed log during registration:", self.showDetailedLogDuringExecutionCheckBox)
self.keepTemporaryFilesCheckBox = qt.QCheckBox(" ")
self.keepTemporaryFilesCheckBox.checked = False
self.keepTemporaryFilesCheckBox.setToolTip("Keep temporary files (inputs, computed outputs, logs) after the registration is completed.")
self.showTemporaryFilesFolderButton = qt.QPushButton("Show temp folder")
self.showTemporaryFilesFolderButton.toolTip = "Open the folder where temporary files are stored."
self.showTemporaryFilesFolderButton.setSizePolicy(qt.QSizePolicy.MinimumExpanding, qt.QSizePolicy.Preferred)
hbox = qt.QHBoxLayout()
hbox.addWidget(self.keepTemporaryFilesCheckBox)
hbox.addWidget(self.showTemporaryFilesFolderButton)
advancedFormLayout.addRow("Keep temporary files:", hbox)
self.showRegistrationParametersDatabaseFolderButton = qt.QPushButton("Show database folder")
self.showRegistrationParametersDatabaseFolderButton.toolTip = "Open the folder where temporary files are stored."
self.showRegistrationParametersDatabaseFolderButton.setSizePolicy(qt.QSizePolicy.MinimumExpanding, qt.QSizePolicy.Preferred)
advancedFormLayout.addRow("Registration presets:", self.showRegistrationParametersDatabaseFolderButton)
customElastixBinDir = self.logic.getCustomElastixBinDir()
self.customElastixBinDirSelector = ctk.ctkPathLineEdit()
self.customElastixBinDirSelector.filters = ctk.ctkPathLineEdit.Dirs
self.customElastixBinDirSelector.setCurrentPath(customElastixBinDir)
self.customElastixBinDirSelector.setSizePolicy(qt.QSizePolicy.MinimumExpanding, qt.QSizePolicy.Preferred)
self.customElastixBinDirSelector.setToolTip("Set bin directory of an Elastix installation (where elastix executable is located). "
"If value is empty then default elastix (bundled with SlicerElastix extension) will be used.")
advancedFormLayout.addRow("Custom Elastix toolbox location:", self.customElastixBinDirSelector)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
self.layout.addWidget(self.applyButton)
self.statusLabel = qt.QPlainTextEdit()
self.statusLabel.setTextInteractionFlags(qt.Qt.TextSelectableByMouse)
self.statusLabel.setCenterOnScroll(True)
self.layout.addWidget(self.statusLabel)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.showTemporaryFilesFolderButton.connect('clicked(bool)', self.onShowTemporaryFilesFolder)
self.showRegistrationParametersDatabaseFolderButton.connect('clicked(bool)', self.onShowRegistrationParametersDatabaseFolder)
self.fixedVolumeSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.movingVolumeSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputVolumeSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputTransformSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
# Immediately update deleteTemporaryFiles in the logic to make it possible to decide to
# keep the temporary file while the registration is running
self.keepTemporaryFilesCheckBox.connect("toggled(bool)", self.onKeepTemporaryFilesToggled)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
def cleanup(self):
pass
def onSelect(self):
enabled = True
if not self.fixedVolumeSelector.currentNode() or not self.movingVolumeSelector.currentNode():
self.applyButton.text = "Select fixed and moving volumes"
self.applyButton.enabled = False
elif self.fixedVolumeSelector.currentNode() == self.movingVolumeSelector.currentNode():
self.applyButton.text = "Fixed and moving volume must not be the same"
self.applyButton.enabled = False
elif not self.outputVolumeSelector.currentNode() and not self.outputTransformSelector.currentNode():
self.applyButton.text = "Select an output volume and/or output transform"
self.applyButton.enabled = False
else:
self.applyButton.text = "Apply"
self.applyButton.enabled = True
def onShowTemporaryFilesFolder(self):
qt.QDesktopServices().openUrl(qt.QUrl("file:///" + self.logic.getTempDirectoryBase(), qt.QUrl.TolerantMode));
def onShowRegistrationParametersDatabaseFolder(self):
qt.QDesktopServices().openUrl(qt.QUrl("file:///" + self.logic.registrationParameterFilesDir, qt.QUrl.TolerantMode));
def onKeepTemporaryFilesToggled(self, toggle):
self.logic.deleteTemporaryFiles = toggle
def onApplyButton(self):
if self.registrationInProgress:
self.registrationInProgress = False
self.logic.abortRequested = True
self.applyButton.text = "Cancelling..."
self.applyButton.enabled = False
return
self.registrationInProgress = True
self.applyButton.text = "Cancel"
self.statusLabel.plainText = ''
slicer.app.setOverrideCursor(qt.Qt.WaitCursor)
try:
self.logic.setCustomElastixBinDir(self.customElastixBinDirSelector.currentPath)
self.logic.deleteTemporaryFiles = not self.keepTemporaryFilesCheckBox.checked
self.logic.logStandardOutput = self.showDetailedLogDuringExecutionCheckBox.checked
parameterFilenames = self.logic.getRegistrationPresets()[self.registrationPresetSelector.currentIndex][RegistrationPresets_ParameterFilenames]
self.logic.registerVolumes(self.fixedVolumeSelector.currentNode(), self.movingVolumeSelector.currentNode(),
parameterFilenames = parameterFilenames,
outputVolumeNode = self.outputVolumeSelector.currentNode(),
outputTransformNode = self.outputTransformSelector.currentNode(),
fixedVolumeMaskNode = self.fixedVolumeMaskSelector.currentNode(),
movingVolumeMaskNode = self.movingVolumeMaskSelector.currentNode())
except Exception as e:
print e
self.addLog("Error: {0}".format(e.message))
import traceback
traceback.print_exc()
finally:
slicer.app.restoreOverrideCursor()
self.registrationInProgress = False
self.onSelect() # restores default Apply button state
def addLog(self, text):
"""Append text to log window
"""
self.statusLabel.appendPlainText(text)
slicer.app.processEvents() # force update
#
# ElastixLogic
#
class ElastixLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
ScriptedLoadableModuleLogic.__init__(self)
self.logCallback = None
self.abortRequested = False
self.deleteTemporaryFiles = True
self.logStandardOutput = False
self.registrationPresets = None
self.customElastixBinDirSettingsKey = 'Elastix/CustomElastixPath'
import os
self.scriptPath = os.path.dirname(os.path.abspath(__file__))
self.registrationParameterFilesDir = os.path.abspath(os.path.join(self.scriptPath, 'Resources', 'RegistrationParameters'))
self.elastixBinDir = None # this will be determined dynamically
import platform
executableExt = '.exe' if platform.system() == 'Windows' else ''
self.elastixFilename = 'elastix' + executableExt
self.transformixFilename = 'transformix' + executableExt
def addLog(self, text):
logging.info(text)
if self.logCallback:
self.logCallback(text)
def getElastixBinDir(self):
if self.elastixBinDir:
return self.elastixBinDir
self.elastixBinDir = self.getCustomElastixBinDir()
if self.elastixBinDir:
return self.elastixBinDir
elastixBinDirCandidates = [
# install tree
os.path.join(self.scriptPath, '..'),
os.path.join(self.scriptPath, '../../../bin'),
# build tree
os.path.join(self.scriptPath, '../../../../bin'),
os.path.join(self.scriptPath, '../../../../bin/Release'),
os.path.join(self.scriptPath, '../../../../bin/Debug'),
os.path.join(self.scriptPath, '../../../../bin/RelWithDebInfo'),
os.path.join(self.scriptPath, '../../../../bin/MinSizeRel') ]
for elastixBinDirCandidate in elastixBinDirCandidates:
if os.path.isfile(os.path.join(elastixBinDirCandidate, self.elastixFilename)):
# elastix found
self.elastixBinDir = os.path.abspath(elastixBinDirCandidate)
return self.elastixBinDir
raise ValueError('Elastix not found')
def getCustomElastixBinDir(self):
settings = qt.QSettings()
if settings.contains(self.customElastixBinDirSettingsKey):
return slicer.util.toVTKString(settings.value(self.customElastixBinDirSettingsKey))
return ''
def setCustomElastixBinDir(self, customPath):
# don't save it if already saved
settings = qt.QSettings()
if settings.contains(self.customElastixBinDirSettingsKey):
if customPath == settings.value(self.customElastixBinDirSettingsKey):
return
settings.setValue(self.customElastixBinDirSettingsKey, customPath)
# Update elastix bin dir
self.elastixBinDir = None
self.getElastixBinDir()
def getElastixEnv(self):
"""Create an environment for elastix where executables are added to the path"""
elastixBinDir = self.getElastixBinDir()
elastixEnv = os.environ.copy()
elastixEnv["PATH"] = elastixBinDir + os.pathsep + elastixEnv["PATH"] if elastixEnv.get("PATH") else elastixBinDir
import platform
if platform.system() != 'Windows':
elastixLibDir = os.path.abspath(os.path.join(elastixBinDir, '../lib'))
elastixEnv["LD_LIBRARY_PATH"] = elastixLibDir + os.pathsep + elastixEnv["LD_LIBRARY_PATH"] if elastixEnv.get("LD_LIBRARY_PATH") else elastixLibDir
return elastixEnv
def getRegistrationPresets(self):
if self.registrationPresets:
return self.registrationPresets
# Read database from XML file
elastixParameterSetDatabasePath = os.path.join(self.scriptPath, 'Resources', 'RegistrationParameters', 'ElastixParameterSetDatabase.xml')
if not os.path.isfile(elastixParameterSetDatabasePath):
raise ValueError("Failed to open parameter set database: "+elastixParameterSetDatabasePath)
elastixParameterSetDatabaseXml = vtk.vtkXMLUtilities.ReadElementFromFile(elastixParameterSetDatabasePath)
elastixParameterSetDatabaseXml.UnRegister(None)
# Create python list from XML for convenience
self.registrationPresets = []
for parameterSetIndex in range(elastixParameterSetDatabaseXml.GetNumberOfNestedElements()):
parameterSetXml = elastixParameterSetDatabaseXml.GetNestedElement(parameterSetIndex)
parameterFilesXml = parameterSetXml.FindNestedElementWithName('ParameterFiles')
parameterFiles = []
for parameterFileIndex in range(parameterFilesXml.GetNumberOfNestedElements()):
parameterFiles.append(parameterFilesXml.GetNestedElement(parameterFileIndex).GetAttribute('Name'))
self.registrationPresets.append([parameterSetXml.GetAttribute('id'), parameterSetXml.GetAttribute('modality'),
parameterSetXml.GetAttribute('content'), parameterSetXml.GetAttribute('description'), parameterSetXml.GetAttribute('publications'), parameterFiles])
return self.registrationPresets
def getStartupInfo(self):
import platform
if platform.system() != 'Windows':
return None
# Hide console window (only needed on Windows)
import subprocess
info = subprocess.STARTUPINFO()
info.dwFlags = 1
info.wShowWindow = 0
return info
def startElastix(self, cmdLineArguments):
self.addLog("Register volumes...")
import subprocess
executableFilePath = os.path.join(self.getElastixBinDir(),self.elastixFilename)
logging.info("Register volumes using: "+executableFilePath+": "+repr(cmdLineArguments))
if subprocess.mswindows:
return subprocess.Popen([executableFilePath] + cmdLineArguments, env=self.getElastixEnv(),
stdout=subprocess.PIPE, universal_newlines=True, startupinfo=self.getStartupInfo())
else:
return subprocess.Popen([executableFilePath] + cmdLineArguments, env=self.getElastixEnv(),
stdout=subprocess.PIPE, universal_newlines=True)
def startTransformix(self, cmdLineArguments):
self.addLog("Generate output...")
import subprocess
executableFilePath = os.path.join(self.getElastixBinDir(), self.transformixFilename)
logging.info("Generate output using: " + executableFilePath + ": " + repr(cmdLineArguments))
if subprocess.mswindows:
return subprocess.Popen([os.path.join(self.getElastixBinDir(),self.transformixFilename)] + cmdLineArguments, env=self.getElastixEnv(),
stdout=subprocess.PIPE, universal_newlines = True, startupinfo=self.getStartupInfo())
else:
return subprocess.Popen([os.path.join(self.getElastixBinDir(),self.transformixFilename)] + cmdLineArguments, env=self.getElastixEnv(),
stdout=subprocess.PIPE, universal_newlines = True)
def logProcessOutput(self, process):
# save process output (if not logged) so that it can be displayed in case of an error
processOutput = ''
import subprocess
for stdout_line in iter(process.stdout.readline, ""):
if self.logStandardOutput:
self.addLog(stdout_line.rstrip())
else:
processOutput += stdout_line.rstrip() + '\n'
slicer.app.processEvents() # give a chance to click Cancel button
if self.abortRequested:
process.kill()
process.stdout.close()
return_code = process.wait()
if return_code:
if self.abortRequested:
raise ValueError("User requested cancel.")
else:
if processOutput:
self.addLog(processOutput)
raise subprocess.CalledProcessError(return_code, "elastix")
def getTempDirectoryBase(self):
tempDir = qt.QDir(slicer.app.temporaryPath)
fileInfo = qt.QFileInfo(qt.QDir(tempDir), "Elastix")
dirPath = fileInfo.absoluteFilePath()
qt.QDir().mkpath(dirPath)
return dirPath
def createTempDirectory(self):
import qt, slicer
tempDir = qt.QDir(self.getTempDirectoryBase())
tempDirName = qt.QDateTime().currentDateTime().toString("yyyyMMdd_hhmmss_zzz")
fileInfo = qt.QFileInfo(qt.QDir(tempDir), tempDirName)
dirPath = fileInfo.absoluteFilePath()
qt.QDir().mkpath(dirPath)
return dirPath
def registerVolumes(self, fixedVolumeNode, movingVolumeNode, parameterFilenames = None, outputVolumeNode = None, outputTransformNode = None,
fixedVolumeMaskNode = None, movingVolumeMaskNode = None):
self.abortRequested = False
tempDir = self.createTempDirectory()
self.addLog('Volume registration is started in working directory: '+tempDir)
# Write inputs
inputDir = os.path.join(tempDir, 'input')
qt.QDir().mkpath(inputDir)
inputParamsElastix = []
# Add input volumes
inputVolumes = []
inputVolumes.append([fixedVolumeNode, 'fixed.mha', '-f'])
inputVolumes.append([movingVolumeNode, 'moving.mha', '-m'])
inputVolumes.append([fixedVolumeMaskNode, 'fixedMask.mha', '-fMask'])
inputVolumes.append([movingVolumeMaskNode, 'movingMask.mha', '-mMask'])
for [volumeNode, filename, paramName] in inputVolumes:
if not volumeNode:
continue
filePath = os.path.join(inputDir, filename)
slicer.util.saveNode(volumeNode, filePath, {"useCompression": False})
inputParamsElastix.append(paramName)
inputParamsElastix.append(filePath)
# Specify output location
resultTransformDir = os.path.join(tempDir, 'result-transform')
qt.QDir().mkpath(resultTransformDir)
inputParamsElastix += ['-out', resultTransformDir]
# Specify parameter files
if parameterFilenames == None:
parameterFilenames = self.getRegistrationPresets()[0][RegistrationPresets_ParameterFilenames]
for parameterFilename in parameterFilenames:
inputParamsElastix.append('-p')
parameterFilePath = os.path.abspath(os.path.join(self.registrationParameterFilesDir, parameterFilename))
inputParamsElastix.append(parameterFilePath)
# Run registration
ep = self.startElastix(inputParamsElastix)
self.logProcessOutput(ep)
# Resample
if not self.abortRequested:
resultResampleDir = os.path.join(tempDir, 'result-resample')
qt.QDir().mkpath(resultResampleDir)
inputParamsTransformix = ['-in', os.path.join(inputDir, 'moving.mha'), '-out', resultResampleDir]
if outputTransformNode:
inputParamsTransformix += ['-def', 'all']
if outputVolumeNode:
inputParamsTransformix += ['-tp', resultTransformDir+'/TransformParameters.'+str(len(parameterFilenames)-1)+'.txt']
tp = self.startTransformix(inputParamsTransformix)
self.logProcessOutput(tp)
# Write results
if not self.abortRequested:
if outputVolumeNode:
outputVolumePath = os.path.join(resultResampleDir, "result.mhd")
[success, loadedOutputVolumeNode] = slicer.util.loadVolume(outputVolumePath, returnNode = True)
if success:
outputVolumeNode.SetAndObserveImageData(loadedOutputVolumeNode.GetImageData())
ijkToRas = vtk.vtkMatrix4x4()
loadedOutputVolumeNode.GetIJKToRASMatrix(ijkToRas)
outputVolumeNode.SetIJKToRASMatrix(ijkToRas)
slicer.mrmlScene.RemoveNode(loadedOutputVolumeNode)
else:
self.addLog("Failed to load output volume from "+outputVolumePath)
if outputTransformNode:
outputTransformPath = os.path.join(resultResampleDir, "deformationField.mhd")
[success, loadedOutputTransformNode] = slicer.util.loadTransform(outputTransformPath, returnNode = True)
if success:
if loadedOutputTransformNode.GetReadAsTransformToParent():
outputTransformNode.SetAndObserveTransformToParent(loadedOutputTransformNode.GetTransformToParent())
else:
outputTransformNode.SetAndObserveTransformFromParent(loadedOutputTransformNode.GetTransformFromParent())
slicer.mrmlScene.RemoveNode(loadedOutputTransformNode)
else:
self.addLog("Failed to load output transform from "+outputTransformPath)
# Clean up
if self.deleteTemporaryFiles:
import shutil
shutil.rmtree(tempDir)
self.addLog("Registration is completed")
class ElastixTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_Elastix1()
def test_Elastix1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
tumor1 = sampleDataLogic.downloadMRBrainTumor1()
tumor2 = sampleDataLogic.downloadMRBrainTumor2()
outputVolume = slicer.vtkMRMLScalarVolumeNode()
slicer.mrmlScene.AddNode(outputVolume)
outputVolume.CreateDefaultDisplayNodes()
logic = ElastixLogic()
parameterFilenames = logic.getRegistrationPresets()[0][RegistrationPresets_ParameterFilenames]
logic.registerVolumes(tumor1, tumor2, parameterFilenames = parameterFilenames, outputVolumeNode = outputVolume)
self.delayDisplay('Test passed!')
RegistrationPresets_Id = 0
RegistrationPresets_Modality = 1
RegistrationPresets_Content = 2
RegistrationPresets_Description = 3
RegistrationPresets_Publications = 4
RegistrationPresets_ParameterFilenames = 5
| [
"lasso@queensu.ca"
] | lasso@queensu.ca |
dd9892018dc6b2d6514d2081c8fca5562e57d115 | 65fe8e97656d41074e25219268c7b0a78fafb398 | /camera_infer.py | 7e74deaef16d9506c93f9ad054a23474383cb7d8 | [
"Apache-2.0"
] | permissive | Wblossom/Tensorflow-FaceRecognition | c48923f9ed8695380f251b5a81bcccafae33f44b | bc567fb53dc11554bfaf612f3a21045f7ab24876 | refs/heads/master | 2022-12-07T10:07:18.290096 | 2020-08-27T02:35:25 | 2020-08-27T02:35:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | import cv2
import numpy as np
import sklearn
import config
from utils import face_preprocess
from PIL import ImageFont, ImageDraw, Image
from utils.utils import feature_compare, load_mtcnn, load_faces, load_mobilefacenet, add_faces
# 人脸识别阈值
VERIFICATION_THRESHOLD = config.VERIFICATION_THRESHOLD
# 检测人脸检测模型
mtcnn_detector = load_mtcnn()
# 加载人脸识别模型
face_sess, inputs_placeholder, embeddings = load_mobilefacenet()
# 添加人脸
add_faces(mtcnn_detector)
# 加载已经注册的人脸
faces_db = load_faces(face_sess, inputs_placeholder, embeddings)
# 注册人脸
def face_register():
print("点击y确认拍照!")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('y'):
faces, landmarks = mtcnn_detector.detect(frame)
if faces.shape[0] is not 0:
faces_sum = 0
bbox = []
points = []
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
bbox = faces[i, 0:4]
points = landmarks[i, :].reshape((5, 2))
faces_sum += 1
if faces_sum == 1:
nimg = face_preprocess.preprocess(frame, bbox, points, image_size='112,112')
user_name = input("请输入注册名:")
cv2.imencode('.png', nimg)[1].tofile('face_db/%s.png' % user_name)
print("注册成功!")
else:
print('注册图片有错,图片中有且只有一个人脸')
else:
print('注册图片有错,图片中有且只有一个人脸')
break
# 人脸识别
def face_recognition():
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
faces, landmarks = mtcnn_detector.detect(frame)
if faces.shape[0] is not 0:
faces_sum = 0
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
faces_sum += 1
if faces_sum == 0:
continue
# 人脸信息
info_location = np.zeros(faces_sum)
info_location[0] = 1
info_name = []
probs = []
# 提取图像中的人脸
input_images = np.zeros((faces.shape[0], 112, 112, 3))
for i, face in enumerate(faces):
if round(faces[i, 4], 6) > 0.95:
bbox = faces[i, 0:4]
points = landmarks[i, :].reshape((5, 2))
nimg = face_preprocess.preprocess(frame, bbox, points, image_size='112,112')
nimg = nimg - 127.5
nimg = nimg * 0.0078125
input_images[i, :] = nimg
# 进行人脸识别
feed_dict = {inputs_placeholder: input_images}
emb_arrays = face_sess.run(embeddings, feed_dict=feed_dict)
emb_arrays = sklearn.preprocessing.normalize(emb_arrays)
for i, embedding in enumerate(emb_arrays):
embedding = embedding.flatten()
temp_dict = {}
# 比较已经存在的人脸数据库
for com_face in faces_db:
ret, sim = feature_compare(embedding, com_face["feature"], 0.70)
temp_dict[com_face["name"]] = sim
dict = sorted(temp_dict.items(), key=lambda d: d[1], reverse=True)
if dict[0][1] > VERIFICATION_THRESHOLD:
name = dict[0][0]
probs.append(dict[0][1])
info_name.append(name)
else:
probs.append(dict[0][1])
info_name.append("unknown")
for k in range(faces_sum):
# 写上人脸信息
x1, y1, x2, y2 = faces[k][0], faces[k][1], faces[k][2], faces[k][3]
x1 = max(int(x1), 0)
y1 = max(int(y1), 0)
x2 = min(int(x2), frame.shape[1])
y2 = min(int(y2), frame.shape[0])
prob = '%.2f' % probs[k]
label = "{}, {}".format(info_name[k], prob)
cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(cv2img)
draw = ImageDraw.Draw(pilimg)
font = ImageFont.truetype('font/simfang.ttf', 18, encoding="utf-8")
draw.text((x1, y1 - 18), label, (255, 0, 0), font=font)
frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR)
cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.imshow('image', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
i = int(input("请选择功能,1为注册人脸,2为识别人脸:"))
if i == 1:
face_register()
elif i == 2:
face_recognition()
else:
print("功能选择错误")
| [
"yeyupiaoling@foxmail.com"
] | yeyupiaoling@foxmail.com |
a5c4298eb129e99af9224110f4761b4b8ed3bd22 | 67117705720a3e3d81253ba48c1826d36737b126 | /Wk10_STRANDS/get_valid1.py | df448f30ab90cf1b517fb5526efd7d00ee5ea597 | [] | no_license | pyliut/Rokos2021 | 41f0f96bc396b6e8a5e268e31a38a4a4b288c370 | 70753ab29afc45766eb502f91b65cc455e6055e1 | refs/heads/main | 2023-08-13T17:29:30.013829 | 2021-09-26T19:01:35 | 2021-09-26T19:01:35 | 382,092,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 10 13:24:32 2021
@author: pyliu
"""
import pandas as pd
def get_valid1(df, adjacent):
df = df.loc[ ( df["origin"].isin(adjacent.keys()) & df["target"].isin(adjacent.keys()) ), :]
df = df[["_id", "status", "origin", "target", "edge_id","date_finished", "date_at_node", "date_started","_meta", "time_to_waypoint","operation_time", "final_node"]]
df = df.reset_index(drop=True)
return df | [
"noreply@github.com"
] | pyliut.noreply@github.com |
bb57a1105696946aa3421b5e5bf5cdc028143bb2 | 7cf8eb48e36e1aabf78f8fc4f9d5cfb0cfbc936b | /chapter3/odd_numbers.py | d8b26985e7ce50401bfd78fd04de691b15df0715 | [
"Apache-2.0"
] | permissive | AbdallahAhmed1999/WDMM-1402 | 47c6775e83ba01f7914451a181746c7f8acbff8b | 1d34a3c4bbedb6e2fcd1f45cc81e6aae5adad7d0 | refs/heads/master | 2020-04-23T23:14:15.570154 | 2018-12-24T11:58:49 | 2018-12-24T11:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | # print odd numbers from 1 to 10 (1, 3, 5, ..., 9)
for i in range(11):
if i % 2 == 1:
print(i)
| [
"motaz.saad@gmail.com"
] | motaz.saad@gmail.com |
2993c86ec6a80448c6afc224530dff735ad239be | 81d635211686b1bc87af5892bd9e0fb95cc2ddb8 | /adwords api/googleads-python-lib-master/examples/dfp/v201502/custom_targeting_service/get_custom_targeting_keys_by_statement.py | 0812eb4235de6320b09005b1154ac6fbaf2ee011 | [
"Apache-2.0"
] | permissive | analyticsbot/Python-Code---Part-2 | de2f0581258b6c8b8808b4ef2884fe7e323876f0 | 12bdcfdef4472bcedc77ae61707c25a4a09cba8a | refs/heads/master | 2021-06-04T05:10:33.185766 | 2016-08-31T13:45:45 | 2016-08-31T13:45:45 | 66,679,512 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,953 | py | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all predefined custom targeting keys.
To create custom targeting keys, run create_custom_targeting_keys_and_values.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201502')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
query = 'WHERE type = :type'
statement = dfp.FilterStatement(query, values)
# Get custom targeting keys by statement.
while True:
response = custom_targeting_service.getCustomTargetingKeysByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for key in response['results']:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| [
"ravi.shankar1788@gmail.com"
] | ravi.shankar1788@gmail.com |
52438bbc1d7accc32306a2504e0b0ac1b2143bf5 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/5 kyu/Eight ways to iterate over table 5af5c18786d075cd5e00008b.py | 24fec5621cd28f0bfe275dbea42300fdad8dd66c | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # https://www.codewars.com/kata/5af5c18786d075cd5e00008b
DIRECTION_UP, DIRECTION_LEFT, DIRECTION_DOWN, DIRECTION_RIGHT = range(1,5)
class Table:
def __init__(self, data):
self.data = data
def get_range(self, dir):
if dir == DIRECTION_UP:
return range(len(self.data) - 1, -1, -1)
elif dir == DIRECTION_LEFT:
return range(len(self.data[0]) - 1, -1, -1)
elif dir == DIRECTION_DOWN:
return range(len(self.data))
elif dir == DIRECTION_RIGHT:
return range(len(self.data[0]))
def walk(self, dir0, dir1):
for i in self.get_range(dir1):
for j in self.get_range(dir0):
yield self.data[j][i] if dir0 % 2 else self.data[i][j]
| [
"alichek95@mail.ru"
] | alichek95@mail.ru |
92aacf961b4b95d0e6211c1e212a46717c305669 | a145974f133dbbce352a2c001f3b386b04a3b1e7 | /network_manage_api/apps/networks_manage/apps.py | c6b4a0cf2a88ed6a05076f662513e6b86e1d4069 | [] | no_license | 571451370/network_manage | fa04db513e8da82cb610c28e918bb7e896b73350 | 1efcb4f79dc3f7be45e3789e461f1a4ccb7e8dec | refs/heads/master | 2023-03-18T09:52:01.928312 | 2020-10-29T07:31:38 | 2020-10-29T07:31:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class NetworksManageConfig(AppConfig):
name = 'networks_manage'
| [
"l"
] | l |
10901ab549fe64751337168cade0014abf98999b | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/stdlib/2/multiprocessing/util.pyi | 76f9424a6774726392f442e22dae101e2e626220 | [
"MIT",
"Apache-2.0"
] | permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 741 | pyi | from typing import Any
import threading
SUBDEBUG = ... # type: Any
SUBWARNING = ... # type: Any
def sub_debug(msg, *args): ...
def debug(msg, *args): ...
def info(msg, *args): ...
def sub_warning(msg, *args): ...
def get_logger(): ...
def log_to_stderr(level=None): ...
def get_temp_dir(): ...
def register_after_fork(obj, func): ...
class Finalize:
def __init__(self, obj, callback, args=..., kwargs=None, exitpriority=None): ...
def __call__(self, wr=None): ...
def cancel(self): ...
def still_active(self): ...
def is_exiting(): ...
class ForkAwareThreadLock:
def __init__(self): ...
class ForkAwareLocal(threading.local):
def __init__(self): ...
def __reduce__(self): ...
| [
"ryan@gniadek.net"
] | ryan@gniadek.net |
646938fdb988b0da4f7455fce4fddf228f6bd0b0 | 254e35ed13abb5670eb664c1b17cb77d6b2d6289 | /LeetCode/python/_229.MajorityElementII.py | f84f099e6907b3b8184ac96ff90db0bcae53f8a1 | [] | no_license | bobby20180331/Algorithms | 475f7b29efcab829bc97b18a088600d406850fc7 | c56967e292b34162438f86bfc4c76925329105dd | refs/heads/master | 2023-04-23T04:36:26.977179 | 2021-02-04T06:47:41 | 2021-02-04T06:47:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | #难点在于 linear time and in O(1)
#没思路,查了下,好像用Boyer-Moore Majority Vote algorithm
#这个算法是解决这样一个问题:从一个数组中找出出现半数以上的元素。 这道题是求n/3
'''
每次都找出一对不同的元素,从数组中删掉,直到数组为空或只有一种元素。
不难证明,如果存在元素e出现频率超过半数,那么数组中最后剩下的就只有e。
'''
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums: 判断为空的写法
return []
count1, count2, candidate1, candidate2 = 0, 0, 0, 1 #设置两个初始值和计数
for n in nums:
if n == candidate1:
count1 += 1
elif n == candidate2:
count2 += 1
elif count1 == 0: #若两个值为0时,重设为n
candidate1, count1 = n, 1
elif count2 == 0:
candidate2, count2 = n, 1
else: #若两个值都存在,但当前n都不等于其中两个,则两个计数都减1
count1, count2 = count1 - 1, count2 - 1
#事实当减为0时又重新为n初始值了
return [n for n in (candidate1, candidate2)
if nums.count(n) > len(nums) // 3]
#为什么最后就一个(c1,c2)的元组了
#对了!因为要出现超过n/3次,不可能有三个数!
| [
"noreply@github.com"
] | bobby20180331.noreply@github.com |
4ca4ef13e503c3348a0195e98d2a84a5902c7db3 | 83ecabbeea8b17a3fd9b8142062f09c76198e232 | /test/test_document_2.py | 34e6255c4f1a7ac910d2259eec0bfd0bc5f5172c | [] | no_license | junetigerlee/python-wso2-apim-publisherclient | 387f581bb48645b35f256159cce0031babd493f0 | 5e1cadeab4eb37ebc93e46b45d6d1f98f4fdfde9 | refs/heads/master | 2021-01-01T16:11:45.362270 | 2017-07-25T06:20:46 | 2017-07-25T06:20:46 | 97,783,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # coding: utf-8
"""
WSO2 API Manager - Publisher API
This specifies a **RESTful API** for WSO2 **API Manager** - Publisher. Please see [full swagger definition](https://raw.githubusercontent.com/wso2/carbon-apimgt/v6.0.4/components/apimgt/org.wso2.carbon.apimgt.rest.api.publisher/src/main/resources/publisher-api.yaml) of the API which is written using [swagger 2.0](http://swagger.io/) specification.
OpenAPI spec version: 0.11.0
Contact: architecture@wso2.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import wso2_apim_publisherclient
from wso2_apim_publisherclient.rest import ApiException
from wso2_apim_publisherclient.models.document_2 import Document2
class TestDocument2(unittest.TestCase):
""" Document2 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDocument2(self):
"""
Test Document2
"""
# FIXME: construct object with mandatory attributes with example values
#model = wso2_apim_publisherclient.models.document_2.Document2()
pass
if __name__ == '__main__':
unittest.main()
| [
"junetigerlee@gmail.com"
] | junetigerlee@gmail.com |
cf7d9faca9205cada39610e551a3816b38472e19 | f92ffbbaf2783dbd566e2d11d065fc02fb6255f7 | /.c9/metadata/workspace/My_Study/myform/urls.py | 85aaf9fc439fe708fa5892ec6fb677f2ce0b8139 | [] | no_license | chelseashin/django_practice | 20551810f31ffea1c7eb1ea301ffdbb2890a6830 | 6c28e44aa6c8b646d98e32307827c5ced5ba2c59 | refs/heads/master | 2020-06-04T09:19:26.404723 | 2019-06-14T15:10:36 | 2019-06-14T15:10:36 | 191,962,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | {"filter":false,"title":"urls.py","tooltip":"/My_Study/myform/urls.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":15,"column":32},"end":{"row":15,"column":32},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1555323377182,"hash":"dcc85db0be5d0e5cc8b9e812c502247e1d487e00"} | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
639d0089dc164b752a30ef726417d9d4557180ea | 4fcad69a9b2aec97fa29e0010d82f0f085cdc446 | /tsampi/pypy/lib_python-bak/hypothesis/utils/__init__.py | 44d438c8e378b5f27ebbb6ac0833bbf585e8de04 | [] | no_license | tsampi/tsampi-0 | b64d4457f58314343630b04232c6ecc74c7bfda1 | 5e0183e80718d5668b4b5b96631853942e344b64 | refs/heads/master | 2021-01-19T04:35:05.640785 | 2016-09-12T18:34:25 | 2016-09-12T18:34:25 | 49,612,767 | 1 | 3 | null | 2016-03-25T10:35:41 | 2016-01-14T01:02:18 | Python | UTF-8 | Python | false | false | 800 | py | # coding=utf-8
#
# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
#
# Most of this work is copyright (C) 2013-2015 David R. MacIver
# (david@drmaciver.com), but it contains contributions by others. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
"""hypothesis.utils is a package for things that you can consider part of the
semi-public Hypothesis API but aren't really the core point.
"""
| [
"tim@readevalprint.com"
] | tim@readevalprint.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.