hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a46995e40c568d9fab0090a1e0fdb13667f1d1ad
| 3,253
|
py
|
Python
|
data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy378.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy378.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy378.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=13
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[1]) # number=8
prog.rx(-0.14765485471872042,input_qubit[1]) # number=9
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.x(input_qubit[1]) # number=5
prog.h(input_qubit[1]) # number=10
prog.cz(input_qubit[0],input_qubit[1]) # number=11
prog.h(input_qubit[1]) # number=12
prog.rx(-2.73004401596953,input_qubit[1]) # number=6
prog.z(input_qubit[1]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy378.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.787611
| 82
| 0.631417
|
fc4bfecd2d5cf6ac9070193a6f82c0c3ab9e210d
| 527
|
py
|
Python
|
questions/migrations/0004_auto_20191127_1949.py
|
kemalayhan/kinda-stackoverflow
|
99052b78b18cbc562388c4fea87b5ec15cb47067
|
[
"MIT"
] | 7
|
2021-02-08T19:40:57.000Z
|
2021-10-01T08:09:21.000Z
|
questions/migrations/0004_auto_20191127_1949.py
|
kemalayhan/kinda-stackoverflow
|
99052b78b18cbc562388c4fea87b5ec15cb47067
|
[
"MIT"
] | 7
|
2021-04-08T19:57:02.000Z
|
2022-03-12T00:48:47.000Z
|
questions/migrations/0004_auto_20191127_1949.py
|
kemalayhan/kinda-stackoverflow
|
99052b78b18cbc562388c4fea87b5ec15cb47067
|
[
"MIT"
] | 1
|
2020-12-21T19:01:52.000Z
|
2020-12-21T19:01:52.000Z
|
# Generated by Django 2.2.7 on 2019-11-27 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tags', '0001_initial'),
('questions', '0003_auto_20191121_1134'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='tag',
),
migrations.AddField(
model_name='question',
name='tag',
field=models.ManyToManyField(to='tags.Tag'),
),
]
| 21.958333
| 56
| 0.559772
|
7ee99d8de55bf14419641acc843a3ccb8ec89200
| 3,303
|
py
|
Python
|
trending.py
|
Lissy93/gh-trending-no-cors
|
c0f3234568e3e9421cb56bef49c92bd7a8e883a8
|
[
"MIT"
] | 1
|
2021-11-27T00:17:11.000Z
|
2021-11-27T00:17:11.000Z
|
trending.py
|
Lissy93/gh-trending-no-cors
|
c0f3234568e3e9421cb56bef49c92bd7a8e883a8
|
[
"MIT"
] | null | null | null |
trending.py
|
Lissy93/gh-trending-no-cors
|
c0f3234568e3e9421cb56bef49c92bd7a8e883a8
|
[
"MIT"
] | 5
|
2021-11-17T09:41:52.000Z
|
2021-11-24T01:04:04.000Z
|
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from lxml import etree
GITHUB_URL = 'https://github.com/'
REPOSITORY = GITHUB_URL + 'trending/'
DEVELOPER = REPOSITORY + 'developers/'
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 ' \
'Safari/537.36 '
HEADER = {'User-Agent': USER_AGENT}
TIMEOUT = 15
NO_RESULT = {
'count': 0,
'msg': 'Unavailable',
'items': [],
}
async def get_trending(url: str, params: dict = None) -> dict:
html = await get_html(url, params)
if html:
is_blank = await has_trending(html)
if not is_blank:
if url.endswith(DEVELOPER):
return await parse_developer(html)
else:
return await parse_repo(html)
else:
return NO_RESULT
else:
return NO_RESULT
async def parse_repo(html) -> dict:
items = []
articles = html.xpath('//article')
for article in articles:
item = {'repo': article.xpath('./h1/a/@href')[0][1:]}
item['repo_link'] = GITHUB_URL + item['repo']
tmp = article.xpath('./p/text()')
item['desc'] = tmp[0].replace('\n', '').strip() if len(tmp) > 0 else ''
tmp = article.xpath('./div[last()]/span[1]/span[2]/text()')
item['lang'] = tmp[0].replace('\n', '').strip() if len(tmp) > 0 else ''
tmp = article.xpath('./div[last()]/a[1]/text()')
item['stars'] = "".join(tmp).replace(' ', '').replace('\n', '')
tmp = article.xpath('./div[last()]/a[2]/text()')
item['forks'] = "".join(tmp).replace(' ', '').replace('\n', '')
tmp = article.xpath('./div[last()]/span[3]/text()')
item['added_stars'] = "".join(tmp).replace('\n', '').strip()
item['avatars'] = article.xpath('./div[last()]/span[2]/a/img/@src')
items.append(item)
return {
'count': len(items),
'msg': 'suc',
'items': items
}
async def parse_developer(html) -> dict:
items = []
articles = html.xpath('//article')
for article in articles:
item = {'user': article.xpath('./div[2]/div[1]/h1/a/@href')[0][1:]}
item['user_link'] = GITHUB_URL + item['user']
item['full_name'] = article.xpath('./div[2]/div[1]/h1/a/text()')[0][1:]
item['developer_avatar'] = article.xpath('./div[1]/a/img/@src')[0]
items.append(item)
return {
'count': len(items),
'msg': 'suc',
'items': items
}
async def has_trending(html):
blank = html.xpath('//div[contains(@class,"blankslate")]')
if blank or len(blank) > 0:
return html.xpath('string(//div[contains(@class,"blankslate")]/h3)') \
.replace('\n', '').strip()
else:
return None
async def get_html(url: str, params: dict = None):
try:
if params is not None:
url = "{0}?since={1}".format(url, params.get('since'))
req = HTTPRequest(url, headers=HEADER, request_timeout=TIMEOUT)
response = await AsyncHTTPClient().fetch(req)
except Exception:
return None
else:
return etree.HTML(response.body)
async def get_all_language():
html = await get_html(url=REPOSITORY)
return html.xpath('//div[@class="select-menu-list"]/div/a/span/text()')
| 33.704082
| 108
| 0.565546
|
2eb771325a875f92b36c9d781d7422332007ae5b
| 353
|
py
|
Python
|
backintime/timeframes.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
backintime/timeframes.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
backintime/timeframes.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Timeframes(Enum):
# seconds
S1 = 1
S5 = 5
S15 = 15
S30 = 30
# minutes
M1 = 60
M3 = 180
M5 = 300
M15 = 900
M30 = 1800
M45 = 2700
# hours
H1 = 3600
H2 = 7200
H3 = 10800
H4 = 14400
# day
D1 = 86400
# week
W1 = 604800
| 13.576923
| 24
| 0.436261
|
bf9628f988b986fae9d8ccc5fb20ae7b2a8d33de
| 7,944
|
py
|
Python
|
Apache-Spark-Programming-with-Databricks/ASP 3 - Functions/Labs/ASP 3.4L - Abandoned Carts Lab.py
|
sjuratov/apache-spark-programming-with-databricks
|
8059b5b8e754d4702c712646282a7592e8b4d53b
|
[
"CC0-1.0"
] | 1
|
2022-03-20T05:23:50.000Z
|
2022-03-20T05:23:50.000Z
|
Apache-Spark-Programming-with-Databricks/ASP 3 - Functions/Labs/ASP 3.4L - Abandoned Carts Lab.py
|
sjuratov/apache-spark-programming-with-databricks
|
8059b5b8e754d4702c712646282a7592e8b4d53b
|
[
"CC0-1.0"
] | null | null | null |
Apache-Spark-Programming-with-Databricks/ASP 3 - Functions/Labs/ASP 3.4L - Abandoned Carts Lab.py
|
sjuratov/apache-spark-programming-with-databricks
|
8059b5b8e754d4702c712646282a7592e8b4d53b
|
[
"CC0-1.0"
] | null | null | null |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Abandoned Carts Lab
# MAGIC Get abandoned cart items for email without purchases.
# MAGIC 1. Get emails of converted users from transactions
# MAGIC 2. Join emails with user IDs
# MAGIC 3. Get cart item history for each user
# MAGIC 4. Join cart item history with emails
# MAGIC 5. Filter for emails with abandoned cart items
# MAGIC
# MAGIC ##### Methods
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrame.html" target="_blank">DataFrame</a>: `join`
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql.html?#functions" target="_blank">Built-In Functions</a>: `collect_set`, `explode`, `lit`
# MAGIC - <a href="https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.DataFrameNaFunctions.html" target="_blank">DataFrameNaFunctions</a>: `fill`
# COMMAND ----------
# MAGIC %md
# MAGIC ### Setup
# MAGIC Run the cells below to create DataFrames **`sales_df`**, **`users_df`**, and **`events_df`**.
# COMMAND ----------
# MAGIC %run ../../Includes/Classroom-Setup
# COMMAND ----------
# sale transactions at BedBricks
sales_df = spark.read.format("delta").load(sales_path)
display(sales_df)
# COMMAND ----------
# user IDs and emails at BedBricks
users_df = spark.read.format("delta").load(users_path)
display(users_df)
# COMMAND ----------
# events logged on the BedBricks website
events_df = spark.read.format("delta").load(events_path)
display(events_df)
# COMMAND ----------
# MAGIC %md ### 1: Get emails of converted users from transactions
# MAGIC - Select the **`email`** column in **`sales_df`** and remove duplicates
# MAGIC - Add a new column **`converted`** with the value **`True`** for all rows
# MAGIC
# MAGIC Save the result as **`converted_users_df`**.
# COMMAND ----------
# TODO
from pyspark.sql.functions import *
converted_users_df = (sales_df.FILL_IN
)
display(converted_users_df)
# COMMAND ----------
# MAGIC %md #### 1.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
expected_columns = ["email", "converted"]
expected_count = 210370
assert converted_users_df.columns == expected_columns, "converted_users_df does not have the correct columns"
assert converted_users_df.count() == expected_count, "converted_users_df does not have the correct number of rows"
assert converted_users_df.select(col("converted")).first()[0] == True, "converted column not correct"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 2: Join emails with user IDs
# MAGIC - Perform an outer join on **`converted_users_df`** and **`users_df`** with the **`email`** field
# MAGIC - Filter for users where **`email`** is not null
# MAGIC - Fill null values in **`converted`** as **`False`**
# MAGIC
# MAGIC Save the result as **`conversions_df`**.
# COMMAND ----------
# TODO
conversions_df = (users_df.FILL_IN
)
display(conversions_df)
# COMMAND ----------
# MAGIC %md #### 2.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
expected_columns = ["email", "user_id", "user_first_touch_timestamp", "converted"]
expected_count = 782749
expected_false_count = 572379
assert conversions_df.columns == expected_columns, "Columns are not correct"
assert conversions_df.filter(col("email").isNull()).count() == 0, "Email column contains null"
assert conversions_df.count() == expected_count, "There is an incorrect number of rows"
assert conversions_df.filter(col("converted") == False).count() == expected_false_count, "There is an incorrect number of false entries in converted column"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3: Get cart item history for each user
# MAGIC - Explode the **`items`** field in **`events_df`** with the results replacing the existing **`items`** field
# MAGIC - Group by **`user_id`**
# MAGIC - Collect a set of all **`items.item_id`** objects for each user and alias the column to "cart"
# MAGIC
# MAGIC Save the result as **`carts_df`**.
# COMMAND ----------
# TODO
carts_df = (events_df.FILL_IN
)
display(carts_df)
# COMMAND ----------
# MAGIC %md #### 3.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
expected_columns = ["user_id", "cart"]
expected_count = 488403
assert carts_df.columns == expected_columns, "Incorrect columns"
assert carts_df.count() == expected_count, "Incorrect number of rows"
assert carts_df.select(col("user_id")).drop_duplicates().count() == expected_count, "Duplicate user_ids present"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4: Join cart item history with emails
# MAGIC - Perform a left join on **`conversions_df`** and **`carts_df`** on the **`user_id`** field
# MAGIC
# MAGIC Save result as **`email_carts_df`**.
# COMMAND ----------
# TODO
email_carts_df = conversions_df.FILL_IN
display(email_carts_df)
# COMMAND ----------
# MAGIC %md #### 4.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
expected_columns = ["user_id", "email", "user_first_touch_timestamp", "converted", "cart"]
expected_count = 782749
expected_cart_null_count = 397799
assert email_carts_df.columns == expected_columns, "Columns do not match"
assert email_carts_df.count() == expected_count, "Counts do not match"
assert email_carts_df.filter(col("cart").isNull()).count() == expected_cart_null_count, "Cart null counts incorrect from join"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 5: Filter for emails with abandoned cart items
# MAGIC - Filter **`email_carts_df`** for users where **`converted`** is False
# MAGIC - Filter for users with non-null carts
# MAGIC
# MAGIC Save result as **`abandoned_carts_df`**.
# COMMAND ----------
# TODO
abandoned_carts_df = (email_carts_df.FILL_IN
)
display(abandoned_carts_df)
# COMMAND ----------
# MAGIC %md #### 5.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
expected_columns = ["user_id", "email", "user_first_touch_timestamp", "converted", "cart"]
expected_count = 204272
assert abandoned_carts_df.columns == expected_columns, "Columns do not match"
assert abandoned_carts_df.count() == expected_count, "Counts do not match"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 6: Bonus Activity
# MAGIC Plot number of abandoned cart items by product
# COMMAND ----------
# TODO
abandoned_items_df = (abandoned_carts_df.FILL_IN
)
display(abandoned_items_df)
# COMMAND ----------
# MAGIC %md #### 6.1: Check Your Work
# MAGIC
# MAGIC Run the following cell to verify that your solution works:
# COMMAND ----------
abandoned_items_df.count()
# COMMAND ----------
expected_columns = ["items", "count"]
expected_count = 12
assert abandoned_items_df.count() == expected_count, "Counts do not match"
assert abandoned_items_df.columns == expected_columns, "Columns do not match"
# COMMAND ----------
# MAGIC %md ### Clean up classroom
# COMMAND ----------
classroom_cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 28.887273
| 192
| 0.68857
|
97e791cacee4deda9b69e647552f0426007d4227
| 4,619
|
py
|
Python
|
examples/apiary-scenario/asset_request_workflow.py
|
cloudblue/connect-python-sdk
|
82963ee62d8040804e6568af778d3fd0997af162
|
[
"Apache-2.0"
] | 13
|
2019-12-21T05:03:43.000Z
|
2022-02-07T14:17:14.000Z
|
examples/apiary-scenario/asset_request_workflow.py
|
JaviCerveraIngram/connect-python-sdk
|
202e9ee8e9c2a0e77ff9343321f6fd946bc49ed6
|
[
"Apache-2.0"
] | 76
|
2019-02-14T14:28:04.000Z
|
2019-11-29T11:02:01.000Z
|
examples/apiary-scenario/asset_request_workflow.py
|
ingrammicro/connect-python-sdk
|
656d653e4065637e2cc5768d7d554de17d0120eb
|
[
"Apache-2.0"
] | 8
|
2019-02-11T08:12:02.000Z
|
2019-11-19T10:32:08.000Z
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect SDK.
# Copyright (c) 2019-2020 Ingram Micro. All Rights Reserved.
""" This is part of the example of the implementation between connect and
a Vendor System API.
The detail of this scenario is documented in the documentation portal
https://connect.cloudblue.com/community/sdk/
This microservice search all of the Purchase Request in status pending in Connect
and creates the request in the Vendor System using Vendor System API.
"""
import warnings
import requests
from connect.config import Config
from connect.logger import logger
from connect.models import AssetRequest
from connect.resources.automation_engine import AutomationEngine
from connect.resources.fulfillment import Fulfillment
# URL of the Vendor API, in this case the apiary.io scenario
VENDOR_API_URL = 'https://SET_YOUR_OWN_SAMPLE.apiary-mock.com/'
# Enable processing of deprecation warnings
warnings.simplefilter('default')
# Set logger level / default level ERROR
logger.setLevel('DEBUG')
# If we remove this line, it is done implicitly
Config(file='examples/apiary-scenario/config.json')
class AssetRequest(AutomationEngine):
resource = 'requests'
model_class = AssetRequest
def __init__(self, config=None):
super().__init__(config=config)
self._fulfillment = Fulfillment(config)
def dispatch(self, request):
return self.process_request(request)
def process_request(self, request):
if (request.type == 'purchase'):
if (len(request.asset.items) == 1):
tenant_param_id = ''
for param in request.asset.params:
if (param.name == 'tenantId'):
tenant_param_id = param.value
if (tenant_param_id == ''):
self.create_request(request)
else:
logger.info('Skip process')
else:
logger.info('Request malformed, too many items')
else:
logger.info('This processor not handle this type of request')
return False
def create_request(self, request):
for item in request.asset.items:
mpn = item.mpn
quantity = item.quantity
break
url = VENDOR_API_URL + "tenant?externalId=" + mpn
response = requests.get(url, data='').json()
first_name = request.asset.tiers.customer.contact_info.contact.first_name
last_name = request.asset.tiers.customer.contact_info.contact.last_name
address = request.asset.tiers.customer.contact_info.address_line1
postal_code = request.asset.tiers.customer.contact_info.postal_code
country = request.asset.tiers.customer.contact_info.country
email = request.asset.tiers.customer.contact_info.contact.email
account_phone = request.asset.tiers.customer.contact_info.contact.phone_number.phone_number
if response['externalId'] != request.asset.id:
url = VENDOR_API_URL + 'tenant'
payload = {
'Attributes': {
'product': {
'item': mpn,
'quantity': quantity
},
'account': {
'accountFirstName': first_name,
'accountLastName': last_name,
'accountCompany': request.asset.tiers.customer.name,
'accountAddress': address,
'accountCity': request.asset.tiers.customer.contact_info.city,
'accountState': request.asset.tiers.customer.contact_info.state,
'accountPostalCode': postal_code,
'accountCountry': country,
'accountEmail': email,
'accountPhone': account_phone
}
}
}
response = requests.post(url, data=payload).json()
if (response['tenantId'] != ''):
data = {
"params": [{
"id": "tenantId",
"value": response['tenantId']
}]
}
self._fulfillment.update_param_asset_request(request.id, data, 'vendor system Id')
return response
else:
logger.info('Error in Vendor System')
return False
if __name__ == '__main__':
asset_request_example = AssetRequest()
asset_request_example.process()
| 39.144068
| 99
| 0.599047
|
b1b302b70789fd34acbf973ac2df2c9b2b4314aa
| 5,195
|
py
|
Python
|
src/posts/views.py
|
vikasvmads/Post-With-Django
|
8c088c847cc82f42a0d477166a3d5a0c7512b688
|
[
"MIT"
] | 1
|
2019-04-30T09:07:56.000Z
|
2019-04-30T09:07:56.000Z
|
src/posts/views.py
|
vikasvmads/Post-With-Django
|
8c088c847cc82f42a0d477166a3d5a0c7512b688
|
[
"MIT"
] | null | null | null |
src/posts/views.py
|
vikasvmads/Post-With-Django
|
8c088c847cc82f42a0d477166a3d5a0c7512b688
|
[
"MIT"
] | null | null | null |
try:
from urllib import quote_plus #python 2
except:
pass
try:
from urllib.parse import quote_plus #python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.models import comments
from comments.forms import CommentForm
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
from django.views.generic import DetailView
class PostDetailView(DetailView):
template_name = 'post_detail.html'
def get_object(self, *args, **kwargs):
slug = self.kwargs.get("slug")
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not self.request.user.is_staff or not self.request.user.is_superuser:
raise Http404
return instance
def get_context_data(self, *args, **kwargs):
context = super(PostDetailView, self).get_context_data(*args, **kwargs)
instance = context['object']
context['share_string'] = quote_plus(instance.content)
return context
# in urls.py --> PostDetailView.as_view() instead of post_detail
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
instance_data = {
'content_type' : instance.get_content_type,
'object_id' : instance.id,
}
form = CommentForm(request.POST or None , initial=instance_data)
if form.is_valid():
c_type = form.cleaned_data.get('content_type')
obj_id = form.cleaned_data.get('object_id')
content_type = ContentType.objects.get(model=c_type)
content_data = form.cleaned_data.get('content')
parent_obj = None
try:
parent_id= int(request.POST.get("parent_id"))
except:
parent_id= None
if parent_id:
qs = comments.objects.filter(id=parent_id)
if qs.exists():
parent_obj = qs.first()
new_comment , created = comments.objects.get_or_create(
user = request.user,
content_type = content_type,
object_id = obj_id ,
content = content_data,
parent = parent_obj
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
Comments = comments.objects.filter_by_instance(instance)
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comment_form":form,
"comments":Comments
}
return render(request, "post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() #.order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved", extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form":form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
| 29.856322
| 81
| 0.744177
|
9d6633cf565c433921fb4878e1c06b22fc99c4d3
| 2,380
|
py
|
Python
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/my_functions.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | 1
|
2020-09-29T17:29:34.000Z
|
2020-09-29T17:29:34.000Z
|
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/my_functions.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null |
Machine-Learning-with-Python- From-LM-to-DL/Unit 1.Linear Classifiers and Generalizations/my_functions.py
|
andresdelarosa1887/Public-Projects
|
db8d8e0c0f5f0f7326346462fcdfe21ce8142a12
|
[
"Unlicense"
] | null | null | null |
msg= "hello_world"
import numpy as np
##Classifier- Im using pegasos
from string import punctuation, digits
import numpy as np
import random
from matplotlib import pyplot as plt
def get_order(n_samples):
try:
with open(str(n_samples) + '.txt') as fp:
line = fp.readline()
return list(map(int, line.split(',')))
except FileNotFoundError:
random.seed(1)
indices = list(range(n_samples))
random.shuffle(indices)
return indices
def pegasos_single_step_update(
feature_vector,
label,
L,
eta,
current_theta,
current_theta_0):
if label*(feature_vector@current_theta + current_theta_0) <= 1:
current_theta = (1 - eta*L)*current_theta + eta*label*feature_vector
current_theta_0 = current_theta_0 + eta*label
else:
current_theta = (1 - eta*L)*current_theta
return (current_theta, current_theta_0)
def classifier(feature_matrix, labels, T, L):
pegasos_theta = np.zeros(len(feature_matrix[0]))
pegasos_theta_0 = 0
update_counter = 0
# updating perceptrons
for t in range(T):
for i in get_order(feature_matrix.shape[0]):
update_counter += 1
eta = 1/(np.sqrt(update_counter))
pegasos_theta, pegasos_theta_0 = pegasos_single_step_update(feature_matrix[i],
labels[i],
L,
eta,
pegasos_theta,
pegasos_theta_0)
return (pegasos_theta, pegasos_theta_0)
##Classification Function##
def classify(feature_matrix, theta, theta_0):
result= []
for i, x in enumerate(feature_matrix):
if (np.dot(feature_matrix[i], theta) + theta_0) >0:
classification= 1
else:
classification= -1
result.append(classification)
return(np.array(result))
def accuracy(preds, targets):
"""
Given length-N vectors containing predicted and target labels,
returns the percentage and number of correct predictions.
"""
return (preds == targets).mean()
| 33.521127
| 90
| 0.555882
|
b404a70e7dc272f2ee71a5c99f4226879351975c
| 1,702
|
py
|
Python
|
test/functional/feature_uacomment.py
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 124
|
2018-12-25T00:01:18.000Z
|
2021-12-26T19:38:43.000Z
|
test/functional/feature_uacomment.py
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 702
|
2018-12-16T18:07:18.000Z
|
2022-03-18T16:52:14.000Z
|
test/functional/feature_uacomment.py
|
PeterL73/veil
|
2825d735275cd592b1fd5207b0dfdca2d4e3e78c
|
[
"MIT"
] | 151
|
2018-12-13T07:33:34.000Z
|
2022-01-29T11:35:23.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| 41.512195
| 150
| 0.692714
|
6bcd9345c3167dada174c7d901d05f38caabf0c6
| 115
|
py
|
Python
|
src/tools/inspector/resetOnTargetDestroyed.py
|
t3kt/raytk
|
e0e2b3643b2f536d597c5db64f02d17f7e8f23ac
|
[
"CC-BY-4.0"
] | 108
|
2020-11-23T01:22:37.000Z
|
2022-03-29T09:27:32.000Z
|
src/tools/inspector/resetOnTargetDestroyed.py
|
t3kt/raytk
|
e0e2b3643b2f536d597c5db64f02d17f7e8f23ac
|
[
"CC-BY-4.0"
] | 794
|
2020-11-21T22:27:37.000Z
|
2022-03-24T06:41:19.000Z
|
src/tools/inspector/resetOnTargetDestroyed.py
|
t3kt/raytk
|
e0e2b3643b2f536d597c5db64f02d17f7e8f23ac
|
[
"CC-BY-4.0"
] | 3
|
2021-06-19T00:57:54.000Z
|
2021-11-01T11:55:07.000Z
|
def onDestroy():
ext.inspector.Reset()
def onNameChange(changeOp):
return
def onPathChange(changeOp):
return
| 12.777778
| 27
| 0.756522
|
d61b525b3f398dacd0049fded9924e079ff583a9
| 814
|
py
|
Python
|
licenses/migrations/0010_auto_20201105_1404.py
|
ayushmankumar7/cc-licenses
|
51f0dd26c17c7fcff0008630452cc75505977408
|
[
"MIT"
] | null | null | null |
licenses/migrations/0010_auto_20201105_1404.py
|
ayushmankumar7/cc-licenses
|
51f0dd26c17c7fcff0008630452cc75505977408
|
[
"MIT"
] | null | null | null |
licenses/migrations/0010_auto_20201105_1404.py
|
ayushmankumar7/cc-licenses
|
51f0dd26c17c7fcff0008630452cc75505977408
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.13 on 2020-11-05 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("licenses", "0009_auto_20201029_1453"),
]
operations = [
migrations.AddField(
model_name="legalcode",
name="deed_url",
field=models.URLField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name="legalcode",
name="license_url",
field=models.URLField(default=None),
preserve_default=False,
),
migrations.AddField(
model_name="legalcode",
name="plain_text_url",
field=models.URLField(default=None),
preserve_default=False,
),
]
| 25.4375
| 48
| 0.570025
|
40c28d9bee34c66cb0a749e7e883ac7d7820799f
| 1,503
|
py
|
Python
|
dev/potential/ThreeBodyPotential/three_body_utils.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 4
|
2018-01-18T19:59:56.000Z
|
2020-08-25T11:56:52.000Z
|
dev/potential/ThreeBodyPotential/three_body_utils.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2018-04-22T23:02:13.000Z
|
2018-04-22T23:02:13.000Z
|
dev/potential/ThreeBodyPotential/three_body_utils.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2019-09-14T07:04:42.000Z
|
2019-09-14T07:04:42.000Z
|
import pytest
def get_2body_pairs(symbols):
pair = []
for i,s_i in enumerate(symbols):
for j,s_j in enumerate(symbols):
if i<=j:
pair.append("{}{}".format(s_i,s_j))
return pair
def get_3body_triples(symbols):
triples = []
for i,s_i in enumerate(symbols):
for j,s_j in enumerate(symbols):
for k,s_k in enumerate(symbols):
triples.append("{}{}{}".format(s_i,s_j,s_k))
return triples
def dev__get_2body_pairs__1_element():
symbols = ['Si']
pairs = get_2body_pairs(symbols)
print(pairs)
def dev__get_2body_pairs__2_element():
symbols = ['Si','Ge']
pairs = get_2body_pairs(symbols)
print(pairs)
def dev__get_2body_pairs__3_element():
symbols = ['Si','Ge','Sn']
pairs = get_2body_pairs(symbols)
print(pairs)
def dev__get_3body_triples__1_element():
symbols = ['Si']
triples = get_3body_triples(symbols)
print(triples)
def dev__get_3body_triples__2_element():
symbols = ['Si','Ge']
triples = get_3body_triples(symbols)
print(triples)
def dev__get_3body_triples__3_element():
symbols = ['Si','Ge','Sn']
triples = get_3body_triples(symbols)
print(triples)
if __name__ == "__main__":
dev__get_2body_pairs__1_element()
dev__get_2body_pairs__2_element()
dev__get_2body_pairs__3_element()
dev__get_3body_triples__1_element()
dev__get_3body_triples__2_element()
dev__get_3body_triples__3_element()
| 21.471429
| 60
| 0.667332
|
77a915d9c9619650b756c00ed070590d72b8be7d
| 612
|
py
|
Python
|
netbox/utilities/exceptions.py
|
esljaz/netbox
|
e7f64334c06748b4b85c54d881f5e2b03b9463b5
|
[
"Apache-2.0"
] | 2
|
2021-06-02T03:00:05.000Z
|
2021-07-30T18:52:32.000Z
|
netbox/utilities/exceptions.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 25
|
2019-09-17T19:40:50.000Z
|
2022-03-11T04:01:55.000Z
|
netbox/utilities/exceptions.py
|
emersonfelipesp/netbox
|
fecca5ad83fb6b48a2f15982dfd3242653f105f9
|
[
"Apache-2.0"
] | 1
|
2020-09-16T11:31:25.000Z
|
2020-09-16T11:31:25.000Z
|
from rest_framework import status
from rest_framework.exceptions import APIException
class AbortTransaction(Exception):
"""
A dummy exception used to trigger a database transaction rollback.
"""
pass
class RQWorkerNotRunningException(APIException):
"""
Indicates the temporary inability to enqueue a new task (e.g. custom script execution) because no RQ worker
processes are currently running.
"""
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = 'Unable to process request: RQ worker process not running.'
default_code = 'rq_worker_not_running'
| 30.6
| 111
| 0.75817
|
8ac483c2426701e883ef8115677e7214048bcf53
| 68,561
|
py
|
Python
|
fiftyone/utils/coco.py
|
Pascal0902/fiftyone
|
94ce6bebac6df848547228869a7825a9e8e67595
|
[
"Apache-2.0"
] | null | null | null |
fiftyone/utils/coco.py
|
Pascal0902/fiftyone
|
94ce6bebac6df848547228869a7825a9e8e67595
|
[
"Apache-2.0"
] | 1
|
2022-03-25T19:27:53.000Z
|
2022-03-25T19:27:53.000Z
|
fiftyone/utils/coco.py
|
cnheider/fiftyone
|
9fa8d64f913b0cdf59efd10f72d8c6d37864ce33
|
[
"Apache-2.0"
] | null | null | null |
"""
Utilities for working with datasets in
`COCO format <https://cocodataset.org/#format-data>`_.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from collections import defaultdict
import csv
from datetime import datetime
from itertools import groupby
import logging
import multiprocessing
import os
import random
import shutil
import warnings
import numpy as np
from skimage import measure
import eta.core.image as etai
import eta.core.serial as etas
import eta.core.utils as etau
import eta.core.web as etaw
import fiftyone.core.labels as fol
import fiftyone.core.metadata as fom
import fiftyone.core.utils as fou
import fiftyone.utils.data as foud
mask_utils = fou.lazy_import(
"pycocotools.mask", callback=lambda: fou.ensure_import("pycocotools")
)
logger = logging.getLogger(__name__)
def add_coco_labels(
sample_collection,
label_field,
labels_or_path,
label_type="detections",
coco_id_field="coco_id",
classes=None,
extra_attrs=None,
use_polylines=False,
tolerance=None,
):
"""Adds the given COCO labels to the collection.
The ``labels_or_path`` argument can be either a list of COCO annotations in
the format below, or the path to a JSON file containing such data on disk.
When ``label_type="detections"``, the labels should have format::
[
{
"id": 1,
"image_id": 1,
"category_id": 2,
"bbox": [260, 177, 231, 199],
# optional
"score": 0.95,
"area": 45969,
"iscrowd": 0,
# extra attrs
...
},
...
]
When ``label_type="segmentations"``, the labels should have format::
[
{
"id": 1,
"image_id": 1,
"category_id": 2,
"bbox": [260, 177, 231, 199],
"segmentation": [...],
# optional
"score": 0.95,
"area": 45969,
"iscrowd": 0,
# extra attrs
...
},
...
]
When ``label_type="keypoints"``, the labels should have format::
[
{
"id": 1,
"image_id": 1,
"category_id": 2,
"keypoints": [224, 226, 2, ...],
"num_keypoints": 10,
# extra attrs
...
},
...
]
See `this page <https://cocodataset.org/#format-data>`_ for more
information about the COCO data format.
Args:
sample_collection: a
:class:`fiftyone.core.collections.SampleCollection`
label_field: the label field in which to store the labels. The field
will be created if necessary
labels_or_path: a list of COCO annotations or the path to a JSON file
containing such data on disk
label_type ("detections"): the type of labels to load. Supported values
are ``("detections", "segmentations", "keypoints")``
coco_id_field ("coco_id"): the field of ``sample_collection``
containing the COCO IDs for the samples
classes (None): the list of class label strings. If not provided, these
must be available from
:meth:`classes <fiftyone.core.collections.SampleCollection.classes>` or
:meth:`default_classes <fiftyone.core.collections.SampleCollection.default_classes>`
extra_attrs (None): whether to load extra annotation attributes onto
the imported labels. Supported values are:
- ``None``/``False``: do not load extra attributes
- ``True``: load all extra attributes found
- a name or list of names of specific attributes to load
use_polylines (False): whether to represent segmentations as
:class:`fiftyone.core.labels.Polylines` instances rather than
:class:`fiftyone.core.labels.Detections` with dense masks
tolerance (None): a tolerance, in pixels, when generating approximate
polylines for instance masks. Typical values are 1-3 pixels
"""
if classes is None:
if label_field in sample_collection.classes:
classes = sample_collection.classes[label_field]
elif sample_collection.default_classes:
classes = sample_collection.default_classes
if not classes:
raise ValueError(
"You must provide `classes` in order to load COCO labels"
)
if etau.is_str(labels_or_path):
labels = etas.load_json(labels_or_path)
else:
labels = labels_or_path
coco_objects_map = defaultdict(list)
for d in labels:
coco_obj = COCOObject.from_anno_dict(d, extra_attrs=extra_attrs)
coco_objects_map[coco_obj.image_id].append(coco_obj)
id_map = {
k: v for k, v in zip(*sample_collection.values([coco_id_field, "id"]))
}
coco_ids = sorted(coco_objects_map.keys())
bad_ids = set(coco_ids) - set(id_map.keys())
if bad_ids:
coco_ids = [_id for _id in coco_ids if _id not in bad_ids]
logger.warning(
"Ignoring labels with %d nonexistent COCO IDs: %s",
len(bad_ids),
sorted(bad_ids),
)
view = sample_collection.select(
[id_map[coco_id] for coco_id in coco_ids], ordered=True
)
view.compute_metadata()
heights, widths = view.values(["metadata.height", "metadata.width"])
labels = []
for coco_id, height, width in zip(coco_ids, heights, widths):
coco_objects = coco_objects_map[coco_id]
frame_size = (height, width)
if label_type == "detections":
_labels = _coco_objects_to_detections(
coco_objects, frame_size, classes, None, False
)
elif label_type == "segmentations":
if use_polylines:
_labels = _coco_objects_to_polylines(
coco_objects, frame_size, classes, None, tolerance
)
else:
_labels = _coco_objects_to_detections(
coco_objects, frame_size, classes, None, True
)
elif label_type == "keypoints":
_labels = _coco_objects_to_keypoints(
coco_objects, frame_size, classes
)
else:
raise ValueError(
"Unsupported label_type='%s'. Supported values are %s"
% (label_type, ("detections", "segmentations", "keypoints"))
)
labels.append(_labels)
view.set_values(label_field, labels)
class COCODetectionDatasetImporter(
foud.LabeledImageDatasetImporter, foud.ImportPathsMixin
):
"""Importer for COCO detection datasets stored on disk.
See :class:`fiftyone.types.dataset_types.COCODetectionDataset` for format
details.
Args:
dataset_dir (None): the dataset directory
data_path (None): an optional parameter that enables explicit control
over the location of the media. Can be any of the following:
- a folder name like ``"data"`` or ``"data/"`` specifying a
subfolder of ``dataset_dir`` where the media files reside
- an absolute directory path where the media files reside. In
this case, the ``dataset_dir`` has no effect on the location of
the data
- a filename like ``"data.json"`` specifying the filename of the
JSON data manifest file in ``dataset_dir``
- an absolute filepath specifying the location of the JSON data
manifest. In this case, ``dataset_dir`` has no effect on the
location of the data
If None, this parameter will default to whichever of ``data/`` or
``data.json`` exists in the dataset directory
labels_path (None): an optional parameter that enables explicit control
over the location of the labels. Can be any of the following:
- a filename like ``"labels.json"`` specifying the location of
the labels in ``dataset_dir``
- an absolute filepath to the labels. In this case,
``dataset_dir`` has no effect on the location of the labels
If None, the parameter will default to ``labels.json``
label_types (None): a label type or list of label types to load. The
supported values are
``("detections", "segmentations", "keypoints")``. By default, only
"detections" are loaded
classes (None): a string or list of strings specifying required classes
to load. Only samples containing at least one instance of a
specified class will be loaded
image_ids (None): an optional list of specific image IDs to load. Can
be provided in any of the following formats:
- a list of ``<image-id>`` ints or strings
- a list of ``<split>/<image-id>`` strings
- the path to a text (newline-separated), JSON, or CSV file
containing the list of image IDs to load in either of the first
two formats
include_id (False): whether to include the COCO ID of each sample in the
loaded labels
extra_attrs (None): whether to load extra annotation attributes onto
the imported labels. Supported values are:
- ``None``/``False``: do not load extra attributes
- ``True``: load all extra attributes found
- a name or list of names of specific attributes to load
only_matching (False): whether to only load labels that match the
``classes`` requirement that you provide (True), or to load all
labels for samples that match the requirements (False)
use_polylines (False): whether to represent segmentations as
:class:`fiftyone.core.labels.Polylines` instances rather than
:class:`fiftyone.core.labels.Detections` with dense masks
tolerance (None): a tolerance, in pixels, when generating approximate
polylines for instance masks. Typical values are 1-3 pixels
shuffle (False): whether to randomly shuffle the order in which the
samples are imported
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to load. If
``label_types`` and/or ``classes`` are also specified, first
priority will be given to samples that contain all of the specified
label types and/or classes, followed by samples that contain at
least one of the specified labels types or classes. The actual
number of samples loaded may be less than this maximum value if the
dataset does not contain sufficient samples matching your
requirements. By default, all matching samples are loaded
"""
def __init__(
self,
dataset_dir=None,
data_path=None,
labels_path=None,
label_types=None,
classes=None,
image_ids=None,
include_id=False,
extra_attrs=None,
only_matching=False,
use_polylines=False,
tolerance=None,
shuffle=False,
seed=None,
max_samples=None,
):
data_path = self._parse_data_path(
dataset_dir=dataset_dir, data_path=data_path, default="data/",
)
labels_path = self._parse_labels_path(
dataset_dir=dataset_dir,
labels_path=labels_path,
default="labels.json",
)
label_types = _parse_label_types(label_types)
if include_id:
label_types.append("coco_id")
super().__init__(
dataset_dir=dataset_dir,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
)
self.data_path = data_path
self.labels_path = labels_path
self.label_types = label_types
self.classes = classes
self.image_ids = image_ids
self.extra_attrs = extra_attrs
self.only_matching = only_matching
self.use_polylines = use_polylines
self.tolerance = tolerance
self._info = None
self._classes = None
self._supercategory_map = None
self._image_paths_map = None
self._image_dicts_map = None
self._annotations = None
self._filenames = None
self._iter_filenames = None
def __iter__(self):
self._iter_filenames = iter(self._filenames)
return self
def __len__(self):
return len(self._filenames)
def __next__(self):
filename = next(self._iter_filenames)
if os.path.isabs(filename):
image_path = filename
else:
image_path = self._image_paths_map[filename]
image_dict = self._image_dicts_map.get(filename, None)
if image_dict is None:
image_metadata = fom.ImageMetadata.build_for(image_path)
return image_path, image_metadata, None
image_id = image_dict["id"]
width = image_dict["width"]
height = image_dict["height"]
image_metadata = fom.ImageMetadata(width=width, height=height)
if self._annotations is None:
return image_path, image_metadata, None
coco_objects = self._annotations.get(image_id, [])
frame_size = (width, height)
if self.classes is not None and self.only_matching:
coco_objects = _get_matching_objects(
coco_objects, self.classes, self._classes
)
label = {}
if "detections" in self.label_types:
detections = _coco_objects_to_detections(
coco_objects,
frame_size,
self._classes,
self._supercategory_map,
False, # no segmentations
)
if detections is not None:
label["detections"] = detections
if "segmentations" in self.label_types:
if self.use_polylines:
segmentations = _coco_objects_to_polylines(
coco_objects,
frame_size,
self._classes,
self._supercategory_map,
self.tolerance,
)
else:
segmentations = _coco_objects_to_detections(
coco_objects,
frame_size,
self._classes,
self._supercategory_map,
True, # load segmentations
)
if segmentations is not None:
label["segmentations"] = segmentations
if "keypoints" in self.label_types:
keypoints = _coco_objects_to_keypoints(
coco_objects, frame_size, self._classes
)
if keypoints is not None:
label["keypoints"] = keypoints
if "coco_id" in self.label_types:
label["coco_id"] = image_id
if self._has_scalar_labels:
label = next(iter(label.values())) if label else None
return image_path, image_metadata, label
@property
def has_dataset_info(self):
return True
@property
def has_image_metadata(self):
return True
@property
def _has_scalar_labels(self):
return len(self.label_types) == 1
@property
def label_cls(self):
seg_type = fol.Polylines if self.use_polylines else fol.Detections
types = {
"detections": fol.Detections,
"segmentations": seg_type,
"keypoints": fol.Keypoints,
"coco_id": int,
}
if self._has_scalar_labels:
return types[self.label_types[0]]
return {k: v for k, v in types.items() if k in self.label_types}
def setup(self):
self._image_paths_map = self._load_data_map(self.data_path)
if self.labels_path is not None and os.path.isfile(self.labels_path):
(
info,
classes,
supercategory_map,
images,
annotations,
) = load_coco_detection_annotations(
self.labels_path, extra_attrs=self.extra_attrs
)
if classes is not None:
info["classes"] = classes
image_ids = _get_matching_image_ids(
classes,
images,
annotations,
image_ids=self.image_ids,
classes=self.classes,
shuffle=self.shuffle,
seed=self.seed,
max_samples=self.max_samples,
)
filenames = [images[_id]["file_name"] for _id in image_ids]
_image_ids = set(image_ids)
image_dicts_map = {
i["file_name"]: i
for _id, i in images.items()
if _id in _image_ids
}
else:
info = {}
classes = None
supercategory_map = None
image_dicts_map = {}
annotations = None
filenames = []
self._info = info
self._classes = classes
self._supercategory_map = supercategory_map
self._image_dicts_map = image_dicts_map
self._annotations = annotations
self._filenames = filenames
def get_dataset_info(self):
return self._info
class COCODetectionDatasetExporter(
foud.LabeledImageDatasetExporter, foud.ExportPathsMixin
):
"""Exporter that writes COCO detection datasets to disk.
This class currently only supports exporting detections and instance
segmentations.
See :class:`fiftyone.types.dataset_types.COCODetectionDataset` for format
details.
Args:
export_dir (None): the directory to write the export. This has no
effect if ``data_path`` and ``labels_path`` are absolute paths
data_path (None): an optional parameter that enables explicit control
over the location of the exported media. Can be any of the
following:
- a folder name like ``"data"`` or ``"data/"`` specifying a
subfolder of ``export_dir`` in which to export the media
- an absolute directory path in which to export the media. In
this case, the ``export_dir`` has no effect on the location of
the data
- a JSON filename like ``"data.json"`` specifying the filename of
the manifest file in ``export_dir`` generated when
``export_media`` is ``"manifest"``
- an absolute filepath specifying the location to write the JSON
manifest file when ``export_media`` is ``"manifest"``. In this
case, ``export_dir`` has no effect on the location of the data
If None, the default value of this parameter will be chosen based
on the value of the ``export_media`` parameter
labels_path (None): an optional parameter that enables explicit control
over the location of the exported labels. Can be any of the
following:
- a filename like ``"labels.json"`` specifying the location in
``export_dir`` in which to export the labels
- an absolute filepath to which to export the labels. In this
case, the ``export_dir`` has no effect on the location of the
labels
If None, the labels will be exported into ``export_dir`` using the
default filename
export_media (None): controls how to export the raw media. The
supported values are:
- ``True``: copy all media files into the output directory
- ``False``: don't export media
- ``"move"``: move all media files into the output directory
- ``"symlink"``: create symlinks to the media files in the output
directory
- ``"manifest"``: create a ``data.json`` in the output directory
that maps UUIDs used in the labels files to the filepaths of
the source media, rather than exporting the actual media
If None, the default value of this parameter will be chosen based
on the value of the ``data_path`` parameter
image_format (None): the image format to use when writing in-memory
images to disk. By default, ``fiftyone.config.default_image_ext``
is used
classes (None): the list of possible class labels. If not provided,
this list will be extracted when :meth:`log_collection` is called,
if possible
info (None): a dict of info as returned by
:meth:`load_coco_detection_annotations`. If not provided, this info
will be extracted when :meth:`log_collection` is called, if
possible
extra_attrs (None): an optional field name or list of field names of
extra label attributes to include in the exported annotations
iscrowd ("iscrowd"): the name of a detection attribute that indicates
whether an object is a crowd (only used if present)
num_decimals (None): an optional number of decimal places at which to
round bounding box pixel coordinates. By default, no rounding is
done
tolerance (None): a tolerance, in pixels, when generating approximate
polylines for instance masks. Typical values are 1-3 pixels
"""
def __init__(
self,
export_dir=None,
data_path=None,
labels_path=None,
export_media=None,
image_format=None,
classes=None,
info=None,
extra_attrs=None,
iscrowd="iscrowd",
num_decimals=None,
tolerance=None,
):
data_path, export_media = self._parse_data_path(
export_dir=export_dir,
data_path=data_path,
export_media=export_media,
default="data/",
)
labels_path = self._parse_labels_path(
export_dir=export_dir,
labels_path=labels_path,
default="labels.json",
)
if etau.is_str(extra_attrs):
extra_attrs = [extra_attrs]
super().__init__(export_dir=export_dir)
self.data_path = data_path
self.labels_path = labels_path
self.export_media = export_media
self.image_format = image_format
self.classes = classes
self.info = info
self.extra_attrs = extra_attrs
self.iscrowd = iscrowd
self.num_decimals = num_decimals
self.tolerance = tolerance
self._labels_map_rev = None
self._image_id = None
self._anno_id = None
self._images = None
self._annotations = None
self._classes = None
self._has_labels = None
self._media_exporter = None
@property
def requires_image_metadata(self):
return True
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._image_id = 0
self._anno_id = 0
self._images = []
self._annotations = []
self._classes = set()
self._has_labels = False
self._parse_classes()
self._media_exporter = foud.ImageExporter(
self.export_media,
export_path=self.data_path,
default_ext=self.image_format,
)
self._media_exporter.setup()
def log_collection(self, sample_collection):
if self.classes is None:
if sample_collection.default_classes:
self.classes = sample_collection.default_classes
self._parse_classes()
elif sample_collection.classes:
self.classes = next(iter(sample_collection.classes.values()))
self._parse_classes()
elif "classes" in sample_collection.info:
self.classes = sample_collection.info["classes"]
self._parse_classes()
if self.info is None:
self.info = sample_collection.info
def export_sample(self, image_or_path, detections, metadata=None):
out_image_path, _ = self._media_exporter.export(image_or_path)
if metadata is None:
metadata = fom.ImageMetadata.build_for(out_image_path)
self._image_id += 1
self._images.append(
{
"id": self._image_id,
"file_name": os.path.basename(out_image_path),
"height": metadata.height,
"width": metadata.width,
"license": None,
"coco_url": None,
}
)
if detections is None:
return
self._has_labels = True
for detection in detections.detections:
self._anno_id += 1
self._classes.add(detection.label)
obj = COCOObject.from_detection(
detection,
metadata,
labels_map_rev=self._labels_map_rev,
extra_attrs=self.extra_attrs,
iscrowd=self.iscrowd,
num_decimals=self.num_decimals,
tolerance=self.tolerance,
)
obj.id = self._anno_id
obj.image_id = self._image_id
self._annotations.append(obj.to_anno_dict())
def close(self, *args):
if self.classes is None:
classes = sorted(self._classes)
labels_map_rev = _to_labels_map_rev(classes)
for anno in self._annotations:
anno["category_id"] = labels_map_rev[anno["category_id"]]
else:
classes = self.classes
date_created = datetime.now().replace(microsecond=0).isoformat()
info = {
"year": self.info.get("year", ""),
"version": self.info.get("version", ""),
"description": self.info.get("year", "Exported from FiftyOne"),
"contributor": self.info.get("contributor", ""),
"url": self.info.get("url", "https://voxel51.com/fiftyone"),
"date_created": self.info.get("date_created", date_created),
}
licenses = self.info.get("licenses", [])
categories = self.info.get("categories", None)
if categories is None:
categories = [
{"id": i, "name": l, "supercategory": None}
for i, l in enumerate(classes)
]
labels = {
"info": info,
"licenses": licenses,
"categories": categories,
"images": self._images,
}
if self._has_labels:
labels["annotations"] = self._annotations
etas.write_json(labels, self.labels_path)
self._media_exporter.close()
def _parse_classes(self):
if self.classes is not None:
self._labels_map_rev = _to_labels_map_rev(self.classes)
class COCOObject(object):
"""An object in COCO detection format.
Args:
id (None): the ID of the annotation
image_id (None): the ID of the image in which the annotation appears
category_id (None): the category ID of the object
bbox (None): a bounding box for the object in
``[xmin, ymin, width, height]`` format
segmentation (None): the segmentation data for the object
keypoints (None): the keypoints data for the object
score (None): a confidence score for the object
area (None): the area of the bounding box, in pixels
iscrowd (None): whether the detection is a crowd
**attributes: additional custom attributes
"""
def __init__(
self,
id=None,
image_id=None,
category_id=None,
bbox=None,
segmentation=None,
keypoints=None,
score=None,
area=None,
iscrowd=None,
**attributes,
):
self.id = id
self.image_id = image_id
self.category_id = category_id
self.bbox = bbox
self.segmentation = segmentation
self.keypoints = keypoints
self.score = score
self.area = area
self.iscrowd = iscrowd
self.attributes = attributes
def to_polyline(
self, frame_size, classes=None, supercategory_map=None, tolerance=None
):
"""Returns a :class:`fiftyone.core.labels.Polyline` representation of
the object.
Args:
frame_size: the ``(width, height)`` of the image
classes (None): the list of classes
supercategory_map (None): a dict mapping class names to category
dicts
tolerance (None): a tolerance, in pixels, when generating
approximate polylines for instance masks. Typical values are
1-3 pixels
Returns:
a :class:`fiftyone.core.labels.Polyline`, or None if no
segmentation data is available
"""
if self.segmentation is None:
return None
label, attributes = self._get_object_label_and_attributes(
classes, supercategory_map
)
attributes.update(self.attributes)
points = _get_polygons_for_segmentation(
self.segmentation, frame_size, tolerance
)
return fol.Polyline(
label=label,
points=points,
confidence=self.score,
closed=False,
filled=True,
**attributes,
)
def to_keypoints(self, frame_size, classes=None):
"""Returns a :class:`fiftyone.core.labels.Keypoint` representation of
the object.
Args:
frame_size: the ``(width, height)`` of the image
classes (None): the list of classes
Returns:
a :class:`fiftyone.core.labels.Keypoint`, or None if no keypoints
data is available
"""
if self.keypoints is None:
return None
width, height = frame_size
label = self._get_label(classes)
points = []
for x, y, v in fou.iter_batches(self.keypoints, 3):
if v == 0:
continue
points.append((x / width, y / height))
return fol.Keypoint(
label=label,
points=points,
confidence=self.score,
**self.attributes,
)
def to_detection(
self,
frame_size,
classes=None,
supercategory_map=None,
load_segmentation=False,
):
"""Returns a :class:`fiftyone.core.labels.Detection` representation of
the object.
Args:
frame_size: the ``(width, height)`` of the image
classes (None): the list of classes
supercategory_map (None): a dict mapping class names to category
dicts
load_segmentation (False): whether to load the segmentation mask
for the object, if available
Returns:
a :class:`fiftyone.core.labels.Detection`
"""
label, attributes = self._get_object_label_and_attributes(
classes, supercategory_map
)
attributes.update(self.attributes)
width, height = frame_size
x, y, w, h = self.bbox
bounding_box = [x / width, y / height, w / width, h / height]
mask = None
if load_segmentation and self.segmentation is not None:
mask = _coco_segmentation_to_mask(
self.segmentation, self.bbox, frame_size
)
return fol.Detection(
label=label,
bounding_box=bounding_box,
mask=mask,
confidence=self.score,
**attributes,
)
def to_anno_dict(self):
"""Returns a COCO annotation dictionary representation of the object.
Returns:
a COCO annotation dict
"""
d = {
"id": self.id,
"image_id": self.image_id,
"category_id": self.category_id,
}
if self.bbox is not None:
d["bbox"] = self.bbox
if self.keypoints is not None:
d["keypoints"] = self.keypoints
d["num_keypoints"] = len(self.keypoints) // 3
if self.segmentation is not None:
d["segmentation"] = self.segmentation
if self.score is not None:
d["score"] = self.score
if self.area is not None:
d["area"] = self.area
if self.iscrowd is not None:
d["iscrowd"] = self.iscrowd
if self.attributes:
d.update(self.attributes)
return d
@classmethod
def from_detection(
cls,
detection,
metadata,
keypoint=None,
labels_map_rev=None,
extra_attrs=None,
iscrowd="iscrowd",
num_decimals=None,
tolerance=None,
):
"""Creates a :class:`COCOObject` from a
:class:`fiftyone.core.labels.Detection`.
Args:
detection: a :class:`fiftyone.core.labels.Detection`
metadata: a :class:`fiftyone.core.metadata.ImageMetadata` for the
image
keypoint (None): an optional :class:`fiftyone.core.labels.Keypoint`
containing keypoints to include for the object
labels_map_rev (None): an optional dict mapping labels to category
IDs
extra_attrs (None): an optional list of extra attributes to include
iscrowd ("iscrowd"): the name of the crowd attribute (used if
present)
num_decimals (None): an optional number of decimal places at which
to round bounding box pixel coordinates. By default, no
rounding is done
tolerance (None): a tolerance, in pixels, when generating
approximate polylines for instance masks. Typical values are
1-3 pixels
Returns:
a :class:`COCOObject`
"""
if labels_map_rev:
category_id = labels_map_rev[detection.label]
else:
category_id = detection.label
width = metadata.width
height = metadata.height
x, y, w, h = detection.bounding_box
bbox = [x * width, y * height, w * width, h * height]
if num_decimals is not None:
bbox = [round(p, num_decimals) for p in bbox]
area = bbox[2] * bbox[3]
try:
_iscrowd = int(detection[iscrowd])
except KeyError:
# @todo remove Attribute usage
if detection.has_attribute(iscrowd):
_iscrowd = int(detection.get_attribute_value(iscrowd))
else:
_iscrowd = None
frame_size = (width, height)
segmentation = _make_coco_segmentation(
detection, frame_size, _iscrowd, tolerance
)
keypoints = _make_coco_keypoints(keypoint, frame_size)
if extra_attrs:
attributes = {f: getattr(detection, f, None) for f in extra_attrs}
else:
attributes = {}
return cls(
id=None,
image_id=None,
category_id=category_id,
bbox=bbox,
segmentation=segmentation,
keypoints=keypoints,
score=detection.confidence,
area=area,
iscrowd=_iscrowd,
**attributes,
)
@classmethod
def from_anno_dict(cls, d, extra_attrs=None):
"""Creates a :class:`COCOObject` from a COCO annotation dict.
Args:
d: a COCO annotation dict
extra_attrs (None): whether to load extra annotation attributes.
Supported values are:
- ``None``/``False``: do not load extra attributes
- ``True``: load all extra attributes
- a name or list of names of specific attributes to load
Returns:
a :class:`COCOObject`
"""
if extra_attrs is True:
return cls(**d)
if etau.is_str(extra_attrs):
extra_attrs = [extra_attrs]
if extra_attrs:
attributes = {f: d.get(f, None) for f in extra_attrs}
else:
attributes = {}
return cls(
id=d.get("id", None),
image_id=d.get("image_id", None),
category_id=d.get("category_id", None),
bbox=d.get("bbox", None),
segmentation=d.get("segmentation", None),
keypoints=d.get("keypoints", None),
score=d.get("score", None),
area=d.get("area", None),
iscrowd=d.get("iscrowd", None),
**attributes,
)
def _get_label(self, classes):
if classes:
return classes[self.category_id]
return str(self.category_id)
def _get_object_label_and_attributes(self, classes, supercategory_map):
if classes:
label = classes[self.category_id]
else:
label = str(self.category_id)
attributes = {}
if supercategory_map is not None and label in supercategory_map:
supercategory = supercategory_map[label].get("supercategory", None)
else:
supercategory = None
if supercategory is not None:
attributes["supercategory"] = supercategory
if self.iscrowd is not None:
attributes["iscrowd"] = self.iscrowd
return label, attributes
def load_coco_detection_annotations(json_path, extra_attrs=None):
"""Loads the COCO annotations from the given JSON file.
See :class:`fiftyone.types.dataset_types.COCODetectionDataset` for format
details.
Args:
json_path: the path to the annotations JSON file
extra_attrs (None): whether to load extra annotation attributes.
Supported values are:
- ``None``/``False``: do not load extra attributes
- ``True``: load all extra attributes found
- a name or list of names of specific attributes to load
Returns:
a tuple of
- info: a dict of dataset info
- classes: a list of classes
- supercategory_map: a dict mapping class labels to category dicts
- images: a dict mapping image IDs to image dicts
- annotations: a dict mapping image IDs to list of
:class:`COCOObject` instances, or ``None`` for unlabeled datasets
"""
d = etas.load_json(json_path)
# Load info
info = d.get("info", {})
licenses = d.get("licenses", None)
categories = d.get("categories", None)
if licenses is not None:
info["licenses"] = licenses
if categories is not None:
info["categories"] = categories
# Load classes
if categories is not None:
classes, supercategory_map = parse_coco_categories(categories)
else:
classes = None
supercategory_map = None
# Load image metadata
images = {i["id"]: i for i in d.get("images", [])}
# Load annotations
_annotations = d.get("annotations", None)
if _annotations is not None:
annotations = defaultdict(list)
for a in _annotations:
annotations[a["image_id"]].append(
COCOObject.from_anno_dict(a, extra_attrs=extra_attrs)
)
annotations = dict(annotations)
else:
annotations = None
return info, classes, supercategory_map, images, annotations
def parse_coco_categories(categories):
"""Parses the COCO categories list.
The returned ``classes`` contains all class IDs from ``[0, max_id]``,
inclusive.
Args:
categories: a dict of the form::
[
...
{
"id": 2,
"name": "cat",
"supercategory": "animal",
"keypoints": ["nose", "head", ...],
"skeleton": [[12, 14], [14, 16], ...]
},
...
]
Returns:
a tuple of
- classes: a list of classes
- supercategory_map: a dict mapping class labels to category dicts
"""
cat_map = {c["id"]: c for c in categories}
classes = []
supercategory_map = {}
for cat_id in range(max(cat_map) + 1):
category = cat_map.get(cat_id, None)
try:
name = category["name"]
except:
name = str(cat_id)
classes.append(name)
if category is not None:
supercategory_map[name] = category
return classes, supercategory_map
def is_download_required(
dataset_dir,
split,
year="2017",
label_types=None,
classes=None,
image_ids=None,
max_samples=None,
raw_dir=None,
):
"""Checks whether :meth:`download_coco_dataset_split` must be called in
order for the given directory to contain enough samples to satisfy the
given requirements.
See :class:`fiftyone.types.dataset_types.COCODetectionDataset` for the
format in which ``dataset_dir`` must be arranged.
Args:
dataset_dir: the directory to download the dataset
split: the split to download. Supported values are
``("train", "validation", "test")``
year ("2017"): the dataset year to download. Supported values are
``("2014", "2017")``
label_types (None): a label type or list of label types to load. The
supported values are ``("detections", "segmentations")``. By
default, only "detections" are loaded
classes (None): a string or list of strings specifying required classes
to load. Only samples containing at least one instance of a
specified class will be loaded
image_ids (None): an optional list of specific image IDs to load. Can
be provided in any of the following formats:
- a list of ``<image-id>`` ints or strings
- a list of ``<split>/<image-id>`` strings
- the path to a text (newline-separated), JSON, or CSV file
containing the list of image IDs to load in either of the first
two formats
max_samples (None): the maximum number of samples desired
raw_dir (None): a directory in which full annotations files may be
stored to avoid re-downloads in the future
Returns:
True/False
"""
logging.disable(logging.CRITICAL)
try:
_download_coco_dataset_split(
dataset_dir,
split,
year=year,
label_types=label_types,
classes=classes,
image_ids=image_ids,
max_samples=max_samples,
raw_dir=raw_dir,
dry_run=True,
)
return False # everything was downloaded
except:
return True # something needs to be downloaded
finally:
logging.disable(logging.NOTSET)
def download_coco_dataset_split(
dataset_dir,
split,
year="2017",
label_types=None,
classes=None,
image_ids=None,
num_workers=None,
shuffle=None,
seed=None,
max_samples=None,
raw_dir=None,
scratch_dir=None,
):
"""Utility that downloads full or partial splits of the
`COCO dataset <https://cocodataset.org>`_.
See :class:`fiftyone.types.dataset_types.COCODetectionDataset` for the
format in which ``dataset_dir`` will be arranged.
Any existing files are not re-downloaded.
Args:
dataset_dir: the directory to download the dataset
split: the split to download. Supported values are
``("train", "validation", "test")``
year ("2017"): the dataset year to download. Supported values are
``("2014", "2017")``
label_types (None): a label type or list of label types to load. The
supported values are ``("detections", "segmentations")``. By
default, only "detections" are loaded
classes (None): a string or list of strings specifying required classes
to load. Only samples containing at least one instance of a
specified class will be loaded
image_ids (None): an optional list of specific image IDs to load. Can
be provided in any of the following formats:
- a list of ``<image-id>`` ints or strings
- a list of ``<split>/<image-id>`` strings
- the path to a text (newline-separated), JSON, or CSV file
containing the list of image IDs to load in either of the first
two formats
num_workers (None): the number of processes to use when downloading
individual images. By default, ``multiprocessing.cpu_count()`` is
used
shuffle (False): whether to randomly shuffle the order in which samples
are chosen for partial downloads
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to load. If
``label_types`` and/or ``classes`` are also specified, first
priority will be given to samples that contain all of the specified
label types and/or classes, followed by samples that contain at
least one of the specified labels types or classes. The actual
number of samples loaded may be less than this maximum value if the
dataset does not contain sufficient samples matching your
requirements. By default, all matching samples are loaded
raw_dir (None): a directory in which full annotations files may be
stored to avoid re-downloads in the future
scratch_dir (None): a scratch directory to use to download any
necessary temporary files
Returns:
a tuple of:
- num_samples: the total number of downloaded images
- classes: the list of all classes
"""
return _download_coco_dataset_split(
dataset_dir,
split,
year=year,
label_types=label_types,
classes=classes,
image_ids=image_ids,
num_workers=num_workers,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
raw_dir=raw_dir,
scratch_dir=scratch_dir,
dry_run=False,
)
def _download_coco_dataset_split(
dataset_dir,
split,
year="2017",
label_types=None,
classes=None,
image_ids=None,
num_workers=None,
shuffle=None,
seed=None,
max_samples=None,
raw_dir=None,
scratch_dir=None,
dry_run=False,
):
if year not in _IMAGE_DOWNLOAD_LINKS:
raise ValueError(
"Unsupported year '%s'; supported values are %s"
% (year, tuple(_IMAGE_DOWNLOAD_LINKS.keys()))
)
if split not in _IMAGE_DOWNLOAD_LINKS[year]:
raise ValueError(
"Unsupported split '%s'; supported values are %s"
% (year, tuple(_IMAGE_DOWNLOAD_LINKS[year].keys()))
)
if classes is not None and split == "test":
logger.warning("Test split is unlabeled; ignoring `classes`")
classes = None
if scratch_dir is None:
scratch_dir = os.path.join(dataset_dir, "scratch")
anno_path = os.path.join(dataset_dir, "labels.json")
images_dir = os.path.join(dataset_dir, "data")
split_size = _SPLIT_SIZES[year][split]
etau.ensure_dir(images_dir)
#
# Download annotations to `raw_dir`, if necessary
#
if raw_dir is None:
raw_dir = os.path.join(dataset_dir, "raw")
etau.ensure_dir(raw_dir)
if split != "test":
src_path = _ANNOTATION_DOWNLOAD_LINKS[year]
rel_path = _ANNOTATION_PATHS[year][split]
subdir = "trainval"
anno_type = "annotations"
else:
src_path = _TEST_INFO_DOWNLOAD_LINKS[year]
rel_path = _TEST_INFO_PATHS[year]
subdir = "test"
anno_type = "test info"
zip_path = os.path.join(scratch_dir, os.path.basename(src_path))
unzip_dir = os.path.join(scratch_dir, subdir)
content_dir = os.path.join(unzip_dir, os.path.dirname(rel_path))
full_anno_path = os.path.join(raw_dir, os.path.basename(rel_path))
if not os.path.isfile(full_anno_path):
if dry_run:
raise ValueError("%s is not downloaded" % src_path)
logger.info("Downloading %s to '%s'", anno_type, zip_path)
etaw.download_file(src_path, path=zip_path)
logger.info("Extracting %s to '%s'", anno_type, full_anno_path)
etau.extract_zip(zip_path, outdir=unzip_dir, delete_zip=False)
_merge_dir(content_dir, raw_dir)
else:
logger.info("Found %s at '%s'", anno_type, full_anno_path)
(
_,
all_classes,
_,
images,
annotations,
) = load_coco_detection_annotations(full_anno_path, extra_attrs=True)
#
# Download images to `images_dir`, if necessary
#
images_src_path = _IMAGE_DOWNLOAD_LINKS[year][split]
images_zip_path = os.path.join(
scratch_dir, os.path.basename(images_src_path)
)
unzip_images_dir = os.path.splitext(images_zip_path)[0]
if classes is None and image_ids is None and max_samples is None:
# Full image download
num_downloaded = len(etau.list_files(images_dir))
if num_downloaded < split_size:
if dry_run:
raise ValueError("%s is not downloaded" % images_src_path)
if num_downloaded > 0:
logger.info(
"Found %d (< %d) downloaded images; must download full "
"image zip",
num_downloaded,
split_size,
)
logger.info("Downloading images to '%s'", images_zip_path)
etaw.download_file(images_src_path, path=images_zip_path)
logger.info("Extracting images to '%s'", images_dir)
etau.extract_zip(images_zip_path, delete_zip=False)
etau.move_dir(unzip_images_dir, images_dir)
else:
logger.info("Images already downloaded")
else:
# Partial image download
if image_ids is not None:
# Start with specific images
image_ids = _parse_image_ids(image_ids, images, split=split)
else:
# Start with all images
image_ids = list(images.keys())
if classes is not None:
# Filter by specified classes
all_ids, any_ids = _get_images_with_classes(
image_ids, annotations, classes, all_classes
)
else:
all_ids = image_ids
any_ids = []
all_ids = sorted(all_ids)
any_ids = sorted(any_ids)
if shuffle:
if seed is not None:
random.seed(seed)
random.shuffle(all_ids)
random.shuffle(any_ids)
image_ids = all_ids + any_ids
# Determine IDs to download
existing_ids, downloadable_ids = _get_existing_ids(
images_dir, images, image_ids
)
if max_samples is not None:
num_existing = len(existing_ids)
num_downloadable = len(downloadable_ids)
num_available = num_existing + num_downloadable
if num_available < max_samples:
logger.warning(
"Only found %d (<%d) samples matching your "
"requirements",
num_available,
max_samples,
)
if max_samples > num_existing:
num_download = max_samples - num_existing
download_ids = downloadable_ids[:num_download]
else:
download_ids = []
else:
download_ids = downloadable_ids
# Download necessary images
num_existing = len(existing_ids)
num_download = len(download_ids)
if num_existing > 0:
if num_download > 0:
logger.info(
"%d images found; downloading the remaining %d",
num_existing,
num_download,
)
else:
logger.info("Sufficient images already downloaded")
elif num_download > 0:
logger.info("Downloading %d images", num_download)
if num_download > 0:
if dry_run:
raise ValueError("%d images must be downloaded" % num_download)
_download_images(images_dir, download_ids, images, num_workers)
if dry_run:
return None, None
#
# Write usable annotations file to `anno_path`
#
downloaded_filenames = etau.list_files(images_dir)
num_samples = len(downloaded_filenames) # total downloaded
if num_samples >= split_size:
logger.info("Writing annotations to '%s'", anno_path)
etau.copy_file(full_anno_path, anno_path)
else:
logger.info(
"Writing annotations for %d downloaded samples to '%s'",
num_samples,
anno_path,
)
_write_partial_annotations(
full_anno_path, anno_path, split, downloaded_filenames
)
return num_samples, all_classes
def _merge_dir(indir, outdir):
etau.ensure_dir(outdir)
for filename in os.listdir(indir):
inpath = os.path.join(indir, filename)
outpath = os.path.join(outdir, filename)
shutil.move(inpath, outpath)
def _write_partial_annotations(inpath, outpath, split, filenames):
d = etas.load_json(inpath)
id_map = {i["file_name"]: i["id"] for i in d["images"]}
filenames = set(filenames)
image_ids = {id_map[f] for f in filenames}
d["images"] = [i for i in d["images"] if i["file_name"] in filenames]
if split != "test":
d["annotations"] = [
a for a in d["annotations"] if a["image_id"] in image_ids
]
else:
d.pop("annotations", None)
etas.write_json(d, outpath)
def _parse_label_types(label_types):
if label_types is None:
return ["detections"]
if etau.is_str(label_types):
label_types = [label_types]
else:
label_types = list(label_types)
bad_types = [l for l in label_types if l not in _SUPPORTED_LABEL_TYPES]
if len(bad_types) == 1:
raise ValueError(
"Unsupported label type '%s'. Supported types are %s"
% (bad_types[0], _SUPPORTED_LABEL_TYPES)
)
if len(bad_types) > 1:
raise ValueError(
"Unsupported label types %s. Supported types are %s"
% (bad_types, _SUPPORTED_LABEL_TYPES)
)
return label_types
def _get_matching_image_ids(
all_classes,
images,
annotations,
image_ids=None,
classes=None,
shuffle=False,
seed=None,
max_samples=None,
):
if image_ids is not None:
image_ids = _parse_image_ids(image_ids, images)
else:
image_ids = list(images.keys())
if classes is not None:
all_ids, any_ids = _get_images_with_classes(
image_ids, annotations, classes, all_classes
)
else:
all_ids = image_ids
any_ids = []
all_ids = sorted(all_ids)
any_ids = sorted(any_ids)
if shuffle:
if seed is not None:
random.seed(seed)
random.shuffle(all_ids)
random.shuffle(any_ids)
image_ids = all_ids + any_ids
if max_samples is not None:
return image_ids[:max_samples]
return image_ids
def _get_existing_ids(images_dir, images, image_ids):
filenames = set(etau.list_files(images_dir))
existing_ids = []
downloadable_ids = []
for _id in image_ids:
if images[_id]["file_name"] in filenames:
existing_ids.append(_id)
else:
downloadable_ids.append(_id)
return existing_ids, downloadable_ids
def _download_images(images_dir, image_ids, images, num_workers):
if num_workers is None or num_workers < 1:
num_workers = multiprocessing.cpu_count()
tasks = []
for image_id in image_ids:
image_dict = images[image_id]
url = image_dict["coco_url"]
path = os.path.join(images_dir, image_dict["file_name"])
tasks.append((url, path))
if not tasks:
return
if num_workers == 1:
with fou.ProgressBar() as pb:
for task in pb(tasks):
_do_download(task)
else:
with fou.ProgressBar(total=len(tasks)) as pb:
with multiprocessing.Pool(num_workers) as pool:
for _ in pool.imap_unordered(_do_download, tasks):
pb.update()
def _do_download(args):
url, path = args
etaw.download_file(url, path=path, quiet=True)
def _get_images_with_classes(
image_ids, annotations, target_classes, all_classes
):
if etau.is_str(target_classes):
target_classes = [target_classes]
bad_classes = [c for c in target_classes if c not in all_classes]
if bad_classes:
raise ValueError("Unsupported classes: %s" % bad_classes)
labels_map_rev = _to_labels_map_rev(all_classes)
class_ids = {labels_map_rev[c] for c in target_classes}
all_ids = []
any_ids = []
for image_id in image_ids:
coco_objects = annotations.get(image_id, None)
if not coco_objects:
continue
oids = set(o.category_id for o in coco_objects)
if class_ids.issubset(oids):
all_ids.append(image_id)
elif class_ids & oids:
any_ids.append(image_id)
return all_ids, any_ids
def _parse_image_ids(raw_image_ids, images, split=None):
# Load IDs from file
if etau.is_str(raw_image_ids):
image_ids_path = raw_image_ids
ext = os.path.splitext(image_ids_path)[-1]
if ext == ".txt":
raw_image_ids = _load_image_ids_txt(image_ids_path)
elif ext == ".json":
raw_image_ids = _load_image_ids_json(image_ids_path)
elif ext == ".csv":
raw_image_ids = _load_image_ids_csv(image_ids_path)
else:
raise ValueError(
"Invalid image ID file '%s'. Supported formats are .txt, "
".csv, and .json" % ext
)
image_ids = []
for raw_id in raw_image_ids:
if etau.is_str(raw_id):
if "/" in raw_id:
_split, raw_id = raw_id.split("/")
if split and _split != split:
continue
raw_id = int(raw_id.strip())
image_ids.append(raw_id)
# Validate that IDs exist
invalid_ids = [_id for _id in image_ids if _id not in images]
if invalid_ids:
raise ValueError(
"Found %d invalid IDs, ex: %s" % (len(invalid_ids), invalid_ids[0])
)
return image_ids
def _load_image_ids_txt(txt_path):
with open(txt_path, "r") as f:
return [l.strip() for l in f.readlines()]
def _load_image_ids_csv(csv_path):
with open(csv_path, "r", newline="") as f:
dialect = csv.Sniffer().sniff(f.read(10240))
f.seek(0)
if dialect.delimiter in _CSV_DELIMITERS:
reader = csv.reader(f, dialect)
else:
reader = csv.reader(f)
image_ids = [row for row in reader]
if isinstance(image_ids[0], list):
# Flatten list
image_ids = [_id for ids in image_ids for _id in ids]
return image_ids
def _load_image_ids_json(json_path):
return [_id for _id in etas.load_json(json_path)]
def _make_images_list(images_dir):
logger.info("Computing image metadata for '%s'", images_dir)
image_paths = foud.parse_images_dir(images_dir)
images = []
with fou.ProgressBar() as pb:
for idx, image_path in pb(enumerate(image_paths)):
metadata = fom.ImageMetadata.build_for(image_path)
images.append(
{
"id": idx,
"file_name": os.path.basename(image_path),
"height": metadata.height,
"width": metadata.width,
"license": None,
"coco_url": None,
}
)
return images
def _to_labels_map_rev(classes):
return {c: i for i, c in enumerate(classes)}
def _get_matching_objects(coco_objects, target_classes, all_classes):
if etau.is_str(target_classes):
target_classes = [target_classes]
labels_map_rev = _to_labels_map_rev(all_classes)
class_ids = {labels_map_rev[c] for c in target_classes}
return [obj for obj in coco_objects if obj.category_id in class_ids]
def _coco_objects_to_polylines(
coco_objects, frame_size, classes, supercategory_map, tolerance
):
polylines = []
for coco_obj in coco_objects:
polyline = coco_obj.to_polyline(
frame_size,
classes=classes,
supercategory_map=supercategory_map,
tolerance=tolerance,
)
if polyline is not None:
polylines.append(polyline)
else:
msg = "Skipping object with no segmentation mask"
warnings.warn(msg)
return fol.Polylines(polylines=polylines)
def _coco_objects_to_detections(
coco_objects, frame_size, classes, supercategory_map, load_segmentations
):
detections = []
for coco_obj in coco_objects:
detection = coco_obj.to_detection(
frame_size,
classes=classes,
supercategory_map=supercategory_map,
load_segmentation=load_segmentations,
)
if load_segmentations and detection.mask is None:
msg = "Skipping object with no segmentation mask"
warnings.warn(msg)
else:
detections.append(detection)
return fol.Detections(detections=detections)
def _coco_objects_to_keypoints(coco_objects, frame_size, classes):
keypoints = []
for coco_obj in coco_objects:
keypoints.append(coco_obj.to_keypoints(frame_size, classes=classes))
return fol.Keypoints(keypoints=keypoints)
#
# The methods below are taken, in part, from:
# https://github.com/waspinator/pycococreator/blob/207b4fa8bbaae22ebcdeb3bbf00b724498e026a7/pycococreatortools/pycococreatortools.py
#
def _get_polygons_for_segmentation(segmentation, frame_size, tolerance):
width, height = frame_size
# Convert to [[x1, y1, x2, y2, ...]] polygons
if isinstance(segmentation, list):
abs_points = segmentation
else:
if isinstance(segmentation["counts"], list):
# Uncompressed RLE
rle = mask_utils.frPyObjects(segmentation, height, width)
else:
# RLE
rle = segmentation
mask = mask_utils.decode(rle)
abs_points = _mask_to_polygons(mask, tolerance)
# Convert to [[(x1, y1), (x2, y2), ...]] in relative coordinates
rel_points = []
for apoints in abs_points:
rel_points.append(
[(x / width, y / height) for x, y, in _pairwise(apoints)]
)
return rel_points
def _pairwise(x):
y = iter(x)
return zip(y, y)
def _coco_segmentation_to_mask(segmentation, bbox, frame_size):
if segmentation is None:
return None
x, y, w, h = bbox
width, height = frame_size
if isinstance(segmentation, list):
# Polygon -- a single object might consist of multiple parts, so merge
# all parts into one mask RLE code
rle = mask_utils.merge(
mask_utils.frPyObjects(segmentation, height, width)
)
elif isinstance(segmentation["counts"], list):
# Uncompressed RLE
rle = mask_utils.frPyObjects(segmentation, height, width)
else:
# RLE
rle = segmentation
mask = mask_utils.decode(rle).astype(bool)
return mask[
int(round(y)) : int(round(y + h)), int(round(x)) : int(round(x + w)),
]
def _make_coco_segmentation(detection, frame_size, iscrowd, tolerance):
if detection.mask is None:
return None
dobj = detection.to_detected_object()
mask = etai.render_instance_image(dobj.mask, dobj.bounding_box, frame_size)
if iscrowd:
return _mask_to_rle(mask)
return _mask_to_polygons(mask, tolerance)
def _make_coco_keypoints(keypoint, frame_size):
if keypoint is None:
return None
width, height = frame_size
# @todo true COCO format would set v = 1/2 based on whether the keypoints
# lie within the object's segmentation, but we'll be lazy for now
keypoints = []
for x, y in keypoint.points:
keypoints.extend((int(x * width), int(y * height), 2))
return keypoints
def _mask_to_rle(mask):
counts = []
for i, (value, elements) in enumerate(groupby(mask.ravel(order="F"))):
if i == 0 and value == 1:
counts.append(0)
counts.append(len(list(elements)))
return {"counts": counts, "size": list(mask.shape)}
def _mask_to_polygons(mask, tolerance):
if tolerance is None:
tolerance = 2
# Pad mask to close contours of shapes which start and end at an edge
padded_mask = np.pad(mask, pad_width=1, mode="constant", constant_values=0)
contours = measure.find_contours(padded_mask, 0.5)
contours = np.subtract(contours, 1) # undo padding
polygons = []
for contour in contours:
contour = _close_contour(contour)
contour = measure.approximate_polygon(contour, tolerance)
if len(contour) < 3:
continue
contour = np.flip(contour, axis=1)
segmentation = contour.ravel().tolist()
# After padding and subtracting 1 there may be -0.5 points
segmentation = [0 if i < 0 else i for i in segmentation]
polygons.append(segmentation)
return polygons
def _close_contour(contour):
if not np.array_equal(contour[0], contour[-1]):
contour = np.vstack((contour, contour[0]))
return contour
_IMAGE_DOWNLOAD_LINKS = {
"2014": {
"train": "http://images.cocodataset.org/zips/train2014.zip",
"validation": "http://images.cocodataset.org/zips/val2014.zip",
"test": "http://images.cocodataset.org/zips/test2014.zip",
},
"2017": {
"train": "http://images.cocodataset.org/zips/train2017.zip",
"validation": "http://images.cocodataset.org/zips/val2017.zip",
"test": "http://images.cocodataset.org/zips/test2017.zip",
},
}
_SPLIT_SIZES = {
"2014": {"train": 82783, "test": 40775, "validation": 40504},
"2017": {"train": 118287, "test": 40670, "validation": 5000},
}
_ANNOTATION_DOWNLOAD_LINKS = {
"2014": "http://images.cocodataset.org/annotations/annotations_trainval2014.zip",
"2017": "http://images.cocodataset.org/annotations/annotations_trainval2017.zip",
}
_ANNOTATION_PATHS = {
"2014": {
"train": "annotations/instances_train2014.json",
"validation": "annotations/instances_val2014.json",
},
"2017": {
"train": "annotations/instances_train2017.json",
"validation": "annotations/instances_val2017.json",
},
}
_KEYPOINTS_PATHS = {
"2014": {
"train": "annotations/person_keypoints_train2014.json",
"validation": "annotations/person_keypoints_val2014.json",
},
"2017": {
"train": "annotations/person_keypoints_train2017.json",
"validation": "annotations/person_keypoints_val2017.json",
},
}
_TEST_INFO_DOWNLOAD_LINKS = {
"2014": "http://images.cocodataset.org/annotations/image_info_test2014.zip",
"2017": "http://images.cocodataset.org/annotations/image_info_test2017.zip",
}
_TEST_INFO_PATHS = {
"2014": "annotations/image_info_test2014.json",
"2017": "annotations/image_info_test2017.json",
}
_SUPPORTED_LABEL_TYPES = ["detections", "segmentations", "keypoints"]
_SUPPORTED_SPLITS = ["train", "validation", "test"]
_CSV_DELIMITERS = [",", ";", ":", " ", "\t", "\n"]
| 32.203382
| 132
| 0.597643
|
c80ea624772fc2018213361af067895999948f09
| 15,076
|
py
|
Python
|
detectron2/structures/boxes.py
|
hhy-ee/PedestrianDetection-NohNMS
|
482078a6bd0ff8cf03fbf7f6988e475f75c56e57
|
[
"Apache-2.0"
] | null | null | null |
detectron2/structures/boxes.py
|
hhy-ee/PedestrianDetection-NohNMS
|
482078a6bd0ff8cf03fbf7f6988e475f75c56e57
|
[
"Apache-2.0"
] | null | null | null |
detectron2/structures/boxes.py
|
hhy-ee/PedestrianDetection-NohNMS
|
482078a6bd0ff8cf03fbf7f6988e475f75c56e57
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import numpy as np
from enum import IntEnum, unique
from typing import Iterator, List, Tuple, Union
import torch
from detectron2.layers import cat
_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
@unique
class BoxMode(IntEnum):
"""
Enum of different ways to represent a box.
Attributes:
XYXY_ABS: (x0, y0, x1, y1) in absolute floating points coordinates.
The coordinates in range [0, width or height].
XYWH_ABS: (x0, y0, w, h) in absolute floating points coordinates.
XYXY_REL: (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
XYWH_REL: (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
XYWHA_ABS: (xc, yc, w, h, a) in absolute floating points coordinates.
(xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
"""
XYXY_ABS = 0
XYWH_ABS = 1
XYXY_REL = 2
XYWH_REL = 3
XYWHA_ABS = 4
@staticmethod
def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType:
"""
Args:
box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
from_mode, to_mode (BoxMode)
Returns:
The converted box of the same type.
"""
if from_mode == to_mode:
return box
original_type = type(box)
is_numpy = isinstance(box, np.ndarray)
single_box = isinstance(box, (list, tuple))
if single_box:
assert len(box) == 4 or len(box) == 5, (
"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
" where k == 4 or 5"
)
arr = torch.tensor(box)[None, :]
else:
# avoid modifying the input box
if is_numpy:
arr = torch.from_numpy(np.asarray(box)).clone()
else:
arr = box.clone()
assert to_mode.value not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
] and from_mode.value not in [
BoxMode.XYXY_REL,
BoxMode.XYWH_REL,
], "Relative mode not yet supported!"
if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
assert (
arr.shape[-1] == 5
), "The last dimension of input shape must be 5 for XYWHA format"
original_dtype = arr.dtype
arr = arr.double()
w = arr[:, 2]
h = arr[:, 3]
a = arr[:, 4]
c = torch.abs(torch.cos(a * math.pi / 180.0))
s = torch.abs(torch.sin(a * math.pi / 180.0))
# This basically computes the horizontal bounding rectangle of the rotated box
new_w = c * w + s * h
new_h = c * h + s * w
# convert center to top-left corner
arr[:, 0] -= new_w / 2.0
arr[:, 1] -= new_h / 2.0
# bottom-right corner
arr[:, 2] = arr[:, 0] + new_w
arr[:, 3] = arr[:, 1] + new_h
arr = arr[:, :4].to(dtype=original_dtype)
elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
original_dtype = arr.dtype
arr = arr.double()
arr[:, 0] += arr[:, 2] / 2.0
arr[:, 1] += arr[:, 3] / 2.0
angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
else:
if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
arr[:, 2] += arr[:, 0]
arr[:, 3] += arr[:, 1]
elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
arr[:, 2] -= arr[:, 0]
arr[:, 3] -= arr[:, 1]
else:
raise NotImplementedError(
"Conversion from BoxMode {} to {} is not supported yet".format(
from_mode, to_mode
)
)
if single_box:
return original_type(arr.flatten())
if is_numpy:
return arr.numpy()
else:
return arr
class Boxes:
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor (torch.Tensor): float matrix of Nx4.
"""
BoxSizeType = Union[List[int], Tuple[int, int]]
def __init__(self, tensor: torch.Tensor):
"""
Args:
tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
if tensor.numel() == 0:
# Use reshape, so we don't end up creating a new tensor that does not depend on
# the inputs (and consequently confuses jit)
tensor = tensor.reshape((0, 4)).to(dtype=torch.float32, device=device)
assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()
self.tensor = tensor
def clone(self) -> "Boxes":
"""
Clone the Boxes.
Returns:
Boxes
"""
return Boxes(self.tensor.clone())
def to(self, device: str) -> "Boxes":
return Boxes(self.tensor.to(device))
def area(self) -> torch.Tensor:
"""
Computes the area of all the boxes.
Returns:
torch.Tensor: a vector with areas of each box.
"""
box = self.tensor
area = (box[:, 2] - box[:, 0] + 1) * (box[:, 3] - box[:, 1] + 1)
return area
def clip(self, box_size: BoxSizeType) -> None:
"""
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
h, w = box_size
self.tensor[:, 0].clamp_(min=0, max=w)
self.tensor[:, 1].clamp_(min=0, max=h)
self.tensor[:, 2].clamp_(min=0, max=w)
self.tensor[:, 3].clamp_(min=0, max=h)
def nonempty(self, threshold: int = 0) -> torch.Tensor:
"""
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
box = self.tensor
widths = box[:, 2] - box[:, 0]
heights = box[:, 3] - box[:, 1]
keep = (widths > threshold) & (heights > threshold)
return keep
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes":
"""
Returns:
Boxes: Create a new :class:`Boxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return Boxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return Boxes(b)
def __len__(self) -> int:
return self.tensor.shape[0]
def __repr__(self) -> str:
return "Boxes(" + str(self.tensor) + ")"
def inside_box(self, box_size: BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor:
"""
Args:
box_size (height, width): Size of the reference box.
boundary_threshold (int): Boxes that extend beyond the reference box
boundary by more than boundary_threshold are considered "outside".
Returns:
a binary vector, indicating whether each box is inside the reference box.
"""
height, width = box_size
inds_inside = (
(self.tensor[..., 0] >= -boundary_threshold)
& (self.tensor[..., 1] >= -boundary_threshold)
& (self.tensor[..., 2] < width + boundary_threshold)
& (self.tensor[..., 3] < height + boundary_threshold)
)
return inds_inside
def get_centers(self) -> torch.Tensor:
"""
Returns:
The box centers in a Nx2 array of (x, y).
"""
return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2
def scale(self, scale_x: float, scale_y: float) -> None:
"""
Scale the box with horizontal and vertical scaling factors
"""
self.tensor[:, 0::2] *= scale_x
self.tensor[:, 1::2] *= scale_y
@staticmethod
def cat(boxes_list: List["Boxes"]) -> "Boxes":
"""
Concatenates a list of Boxes into a single Boxes
Arguments:
boxes_list (list[Boxes])
Returns:
Boxes: the concatenated Boxes
"""
assert isinstance(boxes_list, (list, tuple))
assert len(boxes_list) > 0
assert all(isinstance(box, Boxes) for box in boxes_list)
cat_boxes = type(boxes_list[0])(cat([b.tensor for b in boxes_list], dim=0))
return cat_boxes
@property
def device(self) -> torch.device:
return self.tensor.device
def __iter__(self) -> Iterator[torch.Tensor]:
"""
Yield a box as a Tensor of shape (4,) at a time.
"""
yield from self.tensor
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1.area()
area2 = boxes2.area()
boxes1, boxes2 = boxes1.tensor, boxes2.tensor
width_height = (
torch.min(boxes1[:, None, 2:], boxes2[:, 2:])
- torch.max(boxes1[:, None, :2], boxes2[:, :2])
+ 1
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
del width_height
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def pairwise_ioa(boxes1: Boxes, anchor: Boxes) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area_anchor = anchor.area()
boxes1, anchor = boxes1.tensor, anchor.tensor
width_height = (
torch.min(boxes1[:, None, 2:], anchor[:, 2:])
- torch.max(boxes1[:, None, :2], anchor[:, :2])
+ 1
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
del width_height
# handle empty boxes
iou = torch.where(
inter > 0,
inter / area_anchor[None, :],
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def calculate_iou(boxes1: torch.Tensor, boxes2: torch.Tensor) -> torch.Tensor:
"""
Given two lists of boxes of size N and N,
compute the IoU (intersection over union)
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Tensor): two `Boxes`. Both contains N boxes.
Returns:
Tensor: IoU, sized [N].
"""
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
width_height = torch.min(boxes1[:, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, :2], boxes2[:, :2]
) # [N,2]
width_height.clamp_(min=0) # [N,2]
inter = width_height.prod(dim=1) # [N]
del width_height
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1 + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def calculate_iog(proposals: torch.Tensor, gts: torch.Tensor) -> torch.Tensor:
"""
Given two lists of boxes of size N and N,
compute the IoG (intersection over GT box)
The box order must be (xmin, ymin, xmax, ymax).
Args:
proposals,gts (Tensor): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
gt_area = (gts[:, 2] - gts[:, 0]) * (gts[:, 3] - gts[:, 1])
boxes1, boxes2 = proposals, gts
width_height = torch.min(boxes1[:, 2:], boxes2[:, 2:]) - torch.max(
boxes1[:, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=1) # [N,M]
del width_height
# handle empty boxes
iog = torch.where(
inter > 0, inter / (gt_area), torch.zeros(1, dtype=inter.dtype, device=inter.device)
)
return iog
def matched_boxlist_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:
"""
Compute pairwise intersection over union (IOU) of two sets of matched
boxes. The box order must be (xmin, ymin, xmax, ymax).
Similar to boxlist_iou, but computes only diagonal elements of the matrix
Arguments:
boxes1: (Boxes) bounding boxes, sized [N,4].
boxes2: (Boxes) bounding boxes, sized [N,4].
Returns:
(tensor) iou, sized [N].
"""
assert len(boxes1) == len(boxes2), (
"boxlists should have the same"
"number of entries, got {}, {}".format(len(boxes1), len(boxes2))
)
area1 = boxes1.area() # [N]
area2 = boxes2.area() # [N]
box1, box2 = boxes1.tensor, boxes2.tensor
lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2]
rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
iou = inter / (area1 + area2 - inter) # [N]
return iou
| 32.989059
| 96
| 0.559432
|
d7e94f5a48e5e61ce9acf99db81a56fbef0a7330
| 984
|
py
|
Python
|
hello.py
|
maxq210/googlePython
|
f25337b7dc55bb9c9ac4d936dfff555647d59ec2
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
maxq210/googlePython
|
f25337b7dc55bb9c9ac4d936dfff555647d59ec2
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
maxq210/googlePython
|
f25337b7dc55bb9c9ac4d936dfff555647d59ec2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
print 'Howdy', name
print 'yay'
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| 28.114286
| 68
| 0.714431
|
ca39526b747dec3d6817303f52997bc5a76a14b1
| 4,699
|
py
|
Python
|
aiormq/base.py
|
vknrk/aiormq
|
735594b3291db005ed59bd17d91ddaecc42f6762
|
[
"Apache-2.0"
] | null | null | null |
aiormq/base.py
|
vknrk/aiormq
|
735594b3291db005ed59bd17d91ddaecc42f6762
|
[
"Apache-2.0"
] | null | null | null |
aiormq/base.py
|
vknrk/aiormq
|
735594b3291db005ed59bd17d91ddaecc42f6762
|
[
"Apache-2.0"
] | null | null | null |
import abc
import asyncio
from contextlib import suppress
from functools import wraps
from typing import Any, Callable, Coroutine, Optional, Set, TypeVar, Union
from weakref import WeakSet
from .abc import (
AbstractBase, AbstractFutureStore, CoroutineType, ExceptionType, TaskType,
TaskWrapper,
)
from .tools import shield
T = TypeVar("T")
class FutureStore(AbstractFutureStore):
__slots__ = "futures", "loop", "parent"
futures: Set[Union[asyncio.Future, TaskType]]
weak_futures: WeakSet
loop: asyncio.AbstractEventLoop
def __init__(self, loop: asyncio.AbstractEventLoop):
self.futures = set()
self.loop = loop
self.parent: Optional[FutureStore] = None
def __on_task_done(
self, future: Union[asyncio.Future, TaskWrapper],
) -> Callable[..., Any]:
def remover(*_: Any) -> None:
nonlocal future
if future in self.futures:
self.futures.remove(future)
return remover
def add(self, future: Union[asyncio.Future, TaskWrapper]) -> None:
self.futures.add(future)
future.add_done_callback(self.__on_task_done(future))
if self.parent:
self.parent.add(future)
@shield
async def reject_all(self, exception: Optional[ExceptionType]) -> None:
tasks = []
while self.futures:
future: Union[TaskType, asyncio.Future] = self.futures.pop()
if future.done():
continue
if isinstance(future, TaskWrapper):
future.throw(exception or Exception)
tasks.append(future)
elif isinstance(future, asyncio.Future):
future.set_exception(exception or Exception)
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
def create_task(self, coro: CoroutineType) -> TaskType:
task: TaskWrapper = TaskWrapper(self.loop.create_task(coro))
self.add(task)
return task
def create_future(self, weak: bool = False) -> asyncio.Future:
future = self.loop.create_future()
self.add(future)
return future
def get_child(self) -> "FutureStore":
store = FutureStore(self.loop)
store.parent = self
return store
class Base(AbstractBase):
__slots__ = "loop", "__future_store", "closing"
def __init__(
self, *, loop: asyncio.AbstractEventLoop,
parent: Optional[AbstractBase] = None
):
self.loop: asyncio.AbstractEventLoop = loop
if parent:
self.__future_store = parent._future_store_child()
else:
self.__future_store = FutureStore(loop=self.loop)
self.closing = self._create_closing_future()
def _create_closing_future(self) -> asyncio.Future:
future = self.__future_store.create_future()
future.add_done_callback(lambda x: x.exception())
return future
def _cancel_tasks(
self, exc: ExceptionType = None,
) -> Coroutine[Any, Any, None]:
return self.__future_store.reject_all(exc)
def _future_store_child(self) -> AbstractFutureStore:
return self.__future_store.get_child()
def create_task(self, coro: CoroutineType) -> TaskType:
return self.__future_store.create_task(coro)
def create_future(self) -> asyncio.Future:
return self.__future_store.create_future()
@abc.abstractmethod
async def _on_close(
self, exc: Optional[ExceptionType] = None
) -> None: # pragma: no cover
return
async def __closer(self, exc: Optional[ExceptionType]) -> None:
if self.is_closed: # pragma: no cover
return
with suppress(Exception):
await self._on_close(exc)
with suppress(Exception):
await self._cancel_tasks(exc)
async def close(
self, exc: Optional[ExceptionType] = asyncio.CancelledError
) -> None:
if self.is_closed:
return None
await self.loop.create_task(self.__closer(exc))
def __repr__(self) -> str:
cls_name = self.__class__.__name__
return '<{0}: "{1}" at 0x{2:02x}>'.format(
cls_name, str(self), id(self),
)
@abc.abstractmethod
def __str__(self) -> str: # pragma: no cover
raise NotImplementedError
@property
def is_closed(self) -> bool:
return self.closing.done()
TaskFunctionType = Callable[..., T]
def task(func: TaskFunctionType) -> TaskFunctionType:
@wraps(func)
async def wrap(self: Base, *args: Any, **kwargs: Any) -> Any:
return await self.create_task(func(self, *args, **kwargs))
return wrap
| 28.478788
| 78
| 0.635242
|
6b1d1c880a6ace4ac95a72fb37515c8abba9a44b
| 745
|
py
|
Python
|
examples/Decimal2HexConversion.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
examples/Decimal2HexConversion.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4
|
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
examples/Decimal2HexConversion.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5
|
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
# Convert a decimal to a hex as a string
def decimalToHex(decimalValue):
hex = ""
while decimalValue != 0:
hexValue = decimalValue % 16
hex = toHexChar(hexValue) + hex
decimalValue = decimalValue // 16
return hex
# Convert an integer to a single hex digit in a character
def toHexChar(hexValue):
if 0 <= hexValue <= 9:
return chr(hexValue + ord('0'))
else: # 10 <= hexValue <= 15
return chr(hexValue - 10 + ord('A'))
def main():
# Prompt the user to enter a decimal integer
decimalValue = eval(input("Enter a decimal number: "))
print("The hex number for decimal",
decimalValue, "is", decimalToHex(decimalValue))
main() # Call the main function
| 27.592593
| 58
| 0.624161
|
8b82118f9bc0984705a095fe1e9caa181039e33b
| 128
|
py
|
Python
|
Aula13/ex04.py
|
danicon/MD2-Curso_Python
|
77a2eb2d123eb1359dd7c84360c83bf3b3033ab4
|
[
"MIT"
] | 1
|
2020-11-28T14:48:03.000Z
|
2020-11-28T14:48:03.000Z
|
Aula13/ex04.py
|
danicon/MD2-Curso_Python
|
77a2eb2d123eb1359dd7c84360c83bf3b3033ab4
|
[
"MIT"
] | null | null | null |
Aula13/ex04.py
|
danicon/MD2-Curso_Python
|
77a2eb2d123eb1359dd7c84360c83bf3b3033ab4
|
[
"MIT"
] | null | null | null |
s = 0
for c in range(0, 4):
n = int(input('Digite um valor: '))
s += n
print(f'O somatório de todos os valores foi {s}')
| 25.6
| 49
| 0.585938
|
6bd37de6c078abdd5398202ad2825a8be301e750
| 4,053
|
gyp
|
Python
|
Xcode/node-sdl2_mixer.gyp
|
flyover/node-sdl2_mixer
|
e886ce7057c780374fd3e1eeeebb33f3601a8540
|
[
"MIT"
] | null | null | null |
Xcode/node-sdl2_mixer.gyp
|
flyover/node-sdl2_mixer
|
e886ce7057c780374fd3e1eeeebb33f3601a8540
|
[
"MIT"
] | null | null | null |
Xcode/node-sdl2_mixer.gyp
|
flyover/node-sdl2_mixer
|
e886ce7057c780374fd3e1eeeebb33f3601a8540
|
[
"MIT"
] | null | null | null |
{
'variables': {
'OS%': 'ios',
'NODE_PATH': '../../../node-v0.x-archive',
'SDL2_PATH': '../../../SDL',
'SDL2_MIXER_PATH': '../../../SDL_mixer',
},
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'USE_HEADERMAP': 'NO',
},
'conditions': [
[ 'OS=="ios"', {
'xcode_settings': {
'SDKROOT': 'iphoneos',
'ARCHS': '$(ARCHS_STANDARD)',
'TARGETED_DEVICE_FAMILY': '1,2',
'CODE_SIGN_IDENTITY': 'iPhone Developer',
}
}],
[ 'OS=="osx"', {
'xcode_settings': {
'SDKROOT': 'macosx',
'ARCHS': '$(ARCHS_STANDARD_32_64_BIT)',
}
}],
],
'target_defaults': {
'defines': [
'__POSIX__',
'_LARGEFILE_SOURCE',
'_LARGEFILE64_SOURCE',
'_FILE_OFFSET_BITS=64',
'_DARWIN_USE_64_BIT_INODE=1',
],
'configurations': {
'Debug': {
'defines': [ '_DEBUG', 'DEBUG=1' ],
'cflags': [
'-g',
'-O0',
'-fno-strict-aliasing'
'-fwrapv'
],
},
'Release': {
'defines': [ 'NDEBUG=1' ],
'cflags': [
'-O3',
'-fstrict-aliasing',
'-fomit-frame-pointer',
'-fdata-sections',
'-ffunction-sections',
],
},
},
'xcode_settings': {
'GCC_C_LANGUAGE_STANDARD': 'c99', # -std=c99
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic (Equivalent to -fPIC)
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions
'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti
'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES', # -fvisibility-inlines-hidden
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor
'PREBINDING': 'NO', # No -Wl,-prebind
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
],
'OTHER_CFLAGS[arch=armv7]': [ '-marm' ],
'OTHER_CFLAGS[arch=armv7s]': [ '-marm' ],
'OTHER_CFLAGS[arch=arm64]': [ '-marm' ],
},
},
'targets': [
{
'target_name': 'libnode-sdl2_mixer-<(OS)',
'type': 'static_library',
'defines': [
'NODE_WANT_INTERNALS=1',
],
'include_dirs': [
'.',
'<!(node -e "require(\'@flyover/node-sdl2/include\')")',
'<!(node -e "require(\'nan\')")',
'<(NODE_PATH)/src',
'<(NODE_PATH)/deps/uv/include',
'<(NODE_PATH)/deps/v8/include',
'<(NODE_PATH)/deps/debugger-agent/include',
'<(NODE_PATH)/deps/cares/include',
'<(SDL2_PATH)/include',
'<(SDL2_MIXER_PATH)',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
'<!(node -e "require(\'@flyover/node-sdl2/include\')")',
'<!(node -e "require(\'nan\')")',
'<(SDL2_PATH)/include',
'<(SDL2_MIXER_PATH)',
]
},
'dependencies': [
],
'sources': [
'node-sdl2_mixer.h',
'node-sdl2_mixer.cc',
],
},
],
}
| 34.641026
| 97
| 0.399457
|
124c4fa79dd56065e728e4fe323d8227f930d210
| 1,576
|
py
|
Python
|
keras/integration_test/vectorized_map_test.py
|
Halo9Pan/dive-keras
|
7d4c5572fa3a9fc2542a1314d06c555f67575cb0
|
[
"Apache-2.0"
] | 11
|
2015-11-27T18:33:56.000Z
|
2020-08-12T22:51:57.000Z
|
tensorflow/python/keras/integration_test/vectorized_map_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/keras/integration_test/vectorized_map_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
class VectorizedMapTest(tf.test.TestCase):
def test_vectorized_map(self):
batch_size = 10
num_features = 32
layer = tf.keras.layers.Dense(1)
def model_fn(arg):
with tf.GradientTape() as g:
inp, label = arg
inp = tf.expand_dims(inp, 0)
label = tf.expand_dims(label, 0)
prediction = layer(inp)
loss = tf.nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = tf.random.uniform([batch_size, num_features])
labels = tf.random.uniform([batch_size, 1])
per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
self.assertEqual(per_example_gradients[0].shape,
(batch_size, num_features, 1))
self.assertEqual(per_example_gradients[1].shape, (batch_size, 1))
if __name__ == "__main__":
tf.test.main()
| 35.022222
| 80
| 0.668147
|
43a6496451937e9f118507bc9d1d771e0f3123e1
| 7,088
|
py
|
Python
|
python/pyarrow/tests/conftest.py
|
stspyder/arrow
|
16b2a44be2b71bc1a7c95df70795664b4d450b6d
|
[
"Apache-2.0"
] | 2
|
2021-09-28T01:36:21.000Z
|
2021-12-22T08:24:17.000Z
|
python/pyarrow/tests/conftest.py
|
stspyder/arrow
|
16b2a44be2b71bc1a7c95df70795664b4d450b6d
|
[
"Apache-2.0"
] | 6
|
2020-07-01T20:18:37.000Z
|
2021-01-07T16:22:13.000Z
|
python/pyarrow/tests/conftest.py
|
stspyder/arrow
|
16b2a44be2b71bc1a7c95df70795664b4d450b6d
|
[
"Apache-2.0"
] | 1
|
2020-12-08T10:36:30.000Z
|
2020-12-08T10:36:30.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import subprocess
from tempfile import TemporaryDirectory
import pytest
import hypothesis as h
from pyarrow.util import find_free_port
# setup hypothesis profiles
h.settings.register_profile('ci', max_examples=1000)
h.settings.register_profile('dev', max_examples=10)
h.settings.register_profile('debug', max_examples=10,
verbosity=h.Verbosity.verbose)
# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment
# variable or pass --hypothesis-profile option to pytest, to see the generated
# examples try:
# pytest pyarrow -sv --enable-hypothesis --hypothesis-profile=debug
h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev'))
groups = [
'cython',
'dataset',
'hypothesis',
'fastparquet',
'gandiva',
'hdfs',
'large_memory',
'memory_leak',
'nopandas',
'orc',
'pandas',
'parquet',
'plasma',
's3',
'tensorflow',
'flight',
'slow',
'requires_testing_data',
]
defaults = {
'cython': False,
'dataset': False,
'fastparquet': False,
'hypothesis': False,
'gandiva': False,
'hdfs': False,
'large_memory': False,
'memory_leak': False,
'orc': False,
'nopandas': False,
'pandas': False,
'parquet': False,
'plasma': False,
's3': False,
'tensorflow': False,
'flight': False,
'slow': False,
'requires_testing_data': True,
}
try:
import cython # noqa
defaults['cython'] = True
except ImportError:
pass
try:
import fastparquet # noqa
defaults['fastparquet'] = True
except ImportError:
pass
try:
import pyarrow.gandiva # noqa
defaults['gandiva'] = True
except ImportError:
pass
try:
import pyarrow.dataset # noqa
defaults['dataset'] = True
except ImportError:
pass
try:
import pyarrow.orc # noqa
defaults['orc'] = True
except ImportError:
pass
try:
import pandas # noqa
defaults['pandas'] = True
except ImportError:
defaults['nopandas'] = True
try:
import pyarrow.parquet # noqa
defaults['parquet'] = True
except ImportError:
pass
try:
import pyarrow.plasma # noqa
defaults['plasma'] = True
except ImportError:
pass
try:
import tensorflow # noqa
defaults['tensorflow'] = True
except ImportError:
pass
try:
import pyarrow.flight # noqa
defaults['flight'] = True
except ImportError:
pass
try:
from pyarrow.fs import S3FileSystem # noqa
defaults['s3'] = True
except ImportError:
pass
try:
from pyarrow.fs import HadoopFileSystem # noqa
defaults['hdfs'] = True
except ImportError:
pass
def pytest_addoption(parser):
# Create options to selectively enable test groups
def bool_env(name, default=None):
value = os.environ.get(name.upper())
if value is None:
return default
value = value.lower()
if value in {'1', 'true', 'on', 'yes', 'y'}:
return True
elif value in {'0', 'false', 'off', 'no', 'n'}:
return False
else:
raise ValueError('{}={} is not parsable as boolean'
.format(name.upper(), value))
for group in groups:
default = bool_env('PYARROW_TEST_{}'.format(group), defaults[group])
parser.addoption('--enable-{}'.format(group),
action='store_true', default=default,
help=('Enable the {} test group'.format(group)))
parser.addoption('--disable-{}'.format(group),
action='store_true', default=False,
help=('Disable the {} test group'.format(group)))
class PyArrowConfig:
def __init__(self):
self.is_enabled = {}
def apply_mark(self, mark):
group = mark.name
if group in groups:
self.requires(group)
def requires(self, group):
if not self.is_enabled[group]:
pytest.skip('{} NOT enabled'.format(group))
def pytest_configure(config):
# Apply command-line options to initialize PyArrow-specific config object
config.pyarrow = PyArrowConfig()
for mark in groups:
config.addinivalue_line(
"markers", mark,
)
enable_flag = '--enable-{}'.format(mark)
disable_flag = '--disable-{}'.format(mark)
is_enabled = (config.getoption(enable_flag) and not
config.getoption(disable_flag))
config.pyarrow.is_enabled[mark] = is_enabled
def pytest_runtest_setup(item):
# Apply test markers to skip tests selectively
for mark in item.iter_markers():
item.config.pyarrow.apply_mark(mark)
@pytest.fixture
def tempdir(tmpdir):
# convert pytest's LocalPath to pathlib.Path
return pathlib.Path(tmpdir.strpath)
@pytest.fixture(scope='session')
def datadir():
return pathlib.Path(__file__).parent / 'data'
# TODO(kszucs): move the following fixtures to test_fs.py once the previous
# parquet dataset implementation and hdfs implementation are removed.
@pytest.mark.hdfs
@pytest.fixture(scope='session')
def hdfs_connection():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
user = os.environ.get('ARROW_HDFS_TEST_USER', 'hdfs')
return host, port, user
@pytest.mark.s3
@pytest.fixture(scope='session')
def s3_connection():
host, port = 'localhost', find_free_port()
access_key, secret_key = 'arrow', 'apachearrow'
return host, port, access_key, secret_key
@pytest.fixture(scope='session')
def s3_server(s3_connection):
host, port, access_key, secret_key = s3_connection
address = '{}:{}'.format(host, port)
env = os.environ.copy()
env.update({
'MINIO_ACCESS_KEY': access_key,
'MINIO_SECRET_KEY': secret_key
})
with TemporaryDirectory() as tempdir:
args = ['minio', '--compat', 'server', '--quiet', '--address',
address, tempdir]
proc = None
try:
proc = subprocess.Popen(args, env=env)
except OSError:
pytest.skip('`minio` command cannot be located')
else:
yield proc
finally:
if proc is not None:
proc.kill()
| 25.96337
| 78
| 0.645316
|
77c3ae0408d83382e76637d7cef3a4d06d21618b
| 2,745
|
py
|
Python
|
utest/py3270/test_command.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | 3
|
2018-10-02T14:32:06.000Z
|
2018-10-02T14:33:32.000Z
|
utest/py3270/test_command.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | null | null | null |
utest/py3270/test_command.py
|
MichaelSeeburger/Robot-Framework-Mainframe-3270-Library
|
76b589d58c55a39f96c027a8ae28c41fa37ed445
|
[
"MIT"
] | null | null | null |
import warnings
import pytest
from pytest_mock import MockerFixture
from Mainframe3270.py3270 import Command, CommandError, ExecutableAppLinux
def test_command_default(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
app = ExecutableAppLinux()
under_test = Command(app, b"abc")
assert under_test.app == app
assert under_test.cmdstr == b"abc"
assert under_test.status_line is None
assert under_test.data == []
def test_command_with_text_type(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch("warnings.warn")
app = ExecutableAppLinux()
under_test = Command(app, "abc")
warnings.warn.assert_called_with("Commands should be byte strings", stacklevel=3)
assert isinstance(under_test.cmdstr, bytes)
def test_execute(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch(
"Mainframe3270.py3270.ExecutableAppLinux.readline",
side_effect=[
b"data: abc",
b"U U U C(pub400.com) C 4 43 80 4 24 0x0 0.000",
b"ok",
],
)
app = ExecutableAppLinux()
under_test = Command(app, b"abc")
under_test.execute()
assert under_test.data == [b"abc"]
def test_handle_result_quit(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch("Mainframe3270.py3270.ExecutableAppLinux.readline", return_value=b"")
app = ExecutableAppLinux()
under_test = Command(app, b"Quit")
under_test.execute()
def test_handle_result_error(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch(
"Mainframe3270.py3270.ExecutableAppLinux.readline", return_value=b"error"
)
app = ExecutableAppLinux()
under_test = Command(app, b"abc")
with pytest.raises(CommandError, match="[no data message]"):
under_test.execute()
def test_handle_result_with_data(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch(
"Mainframe3270.py3270.ExecutableAppLinux.readline",
side_effect=[
b"data: abc",
b"U U U C(pub400.com) C 4 43 80 4 24 0x0 0.000",
b"error",
],
)
app = ExecutableAppLinux()
under_test = Command(app, b"abc")
with pytest.raises(CommandError, match="abc"):
under_test.execute()
def test_handle_result_not_ok_or_error(mocker: MockerFixture):
mocker.patch("subprocess.Popen")
mocker.patch(
"Mainframe3270.py3270.ExecutableAppLinux.readline", return_value=b"abc"
)
app = ExecutableAppLinux()
under_test = Command(app, b"abc")
with pytest.raises(
ValueError, match='expected "ok" or "error" result, but received: abc'
):
under_test.execute()
| 27.45
| 86
| 0.680146
|
b20f2c523e4a6c47ec8daeffa4345ae166a1099c
| 39,111
|
py
|
Python
|
core/domain/stats_services.py
|
aadilmehdis/oppia
|
afa9e55c1de88f6898fd31e8ed452f4a9157050b
|
[
"Apache-2.0"
] | null | null | null |
core/domain/stats_services.py
|
aadilmehdis/oppia
|
afa9e55c1de88f6898fd31e8ed452f4a9157050b
|
[
"Apache-2.0"
] | null | null | null |
core/domain/stats_services.py
|
aadilmehdis/oppia
|
afa9e55c1de88f6898fd31e8ed452f4a9157050b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for exploration-related statistics."""
import collections
import copy
import itertools
from core.domain import exp_domain
from core.domain import interaction_registry
from core.domain import stats_domain
from core.platform import models
import feconf
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
transaction_services = models.Registry.import_transaction_services()
# Counts contributions from all versions.
VERSION_ALL = 'all'
def _migrate_to_latest_issue_schema(exp_issue_dict):
"""Holds the responsibility of performing a step-by-step sequential update
of an exploration issue dict based on its schema version. If the current
issue schema version changes (stats_models.CURRENT_ISSUE_SCHEMA_VERSION), a
new conversion function must be added and some code appended to this
function to account for that new version.
Args:
exp_issue_dict: dict. Dict representing the exploration issue.
Raises:
Exception. The issue_schema_version is invalid.
"""
issue_schema_version = exp_issue_dict['schema_version']
if issue_schema_version is None or issue_schema_version < 1:
issue_schema_version = 0
if not (0 <= issue_schema_version
<= stats_models.CURRENT_ISSUE_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned issue schemas at'
'present.' %
stats_models.CURRENT_ISSUE_SCHEMA_VERSION)
while issue_schema_version < stats_models.CURRENT_ISSUE_SCHEMA_VERSION:
stats_domain.ExplorationIssue.update_exp_issue_from_model(
exp_issue_dict)
issue_schema_version += 1
def _migrate_to_latest_action_schema(learner_action_dict):
"""Holds the responsibility of performing a step-by-step sequential update
of an learner action dict based on its schema version. If the current action
schema version changes (stats_models.CURRENT_ACTION_SCHEMA_VERSION), a new
conversion function must be added and some code appended to this function to
account for that new version.
Args:
learner_action_dict: dict. Dict representing the learner action.
Raises:
Exception. The action_schema_version is invalid.
"""
action_schema_version = learner_action_dict['schema_version']
if action_schema_version is None or action_schema_version < 1:
action_schema_version = 0
if not (0 <= action_schema_version
<= stats_models.CURRENT_ACTION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned action schemas '
'at present.' %
stats_models.CURRENT_ACTION_SCHEMA_VERSION)
while action_schema_version < stats_models.CURRENT_ACTION_SCHEMA_VERSION:
stats_domain.LearnerAction.update_learner_action_from_model(
learner_action_dict)
action_schema_version += 1
def get_exploration_stats(exp_id, exp_version):
"""Retrieves the ExplorationStats domain instance.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationStats. The exploration stats domain object.
"""
exploration_stats = get_exploration_stats_by_id(exp_id, exp_version)
if exploration_stats is None:
exploration_stats = stats_domain.ExplorationStats.create_default(
exp_id, exp_version, {})
return exploration_stats
def update_stats(exp_id, exp_version, aggregated_stats):
"""Updates ExplorationStatsModel according to the dict containing aggregated
stats.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
aggregated_stats: dict. Dict representing an ExplorationStatsModel
instance with stats aggregated in the frontend.
"""
exploration_stats = get_exploration_stats_by_id(
exp_id, exp_version)
exploration_stats.num_starts_v2 += aggregated_stats['num_starts']
exploration_stats.num_completions_v2 += aggregated_stats['num_completions']
exploration_stats.num_actual_starts_v2 += aggregated_stats[
'num_actual_starts']
for state_name in aggregated_stats['state_stats_mapping']:
exploration_stats.state_stats_mapping[
state_name].total_answers_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['total_answers_count']
exploration_stats.state_stats_mapping[
state_name].useful_feedback_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['useful_feedback_count']
exploration_stats.state_stats_mapping[
state_name].total_hit_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['total_hit_count']
exploration_stats.state_stats_mapping[
state_name].first_hit_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['first_hit_count']
exploration_stats.state_stats_mapping[
state_name].num_times_solution_viewed_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['num_times_solution_viewed']
exploration_stats.state_stats_mapping[
state_name].num_completions_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['num_completions']
save_stats_model_transactional(exploration_stats)
def handle_stats_creation_for_new_exploration(exp_id, exp_version, state_names):
"""Creates ExplorationStatsModel for the freshly created exploration and
sets all initial values to zero.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
state_names: list(str). State names of the exploration.
"""
state_stats_mapping = {
state_name: stats_domain.StateStats.create_default()
for state_name in state_names
}
exploration_stats = stats_domain.ExplorationStats.create_default(
exp_id, exp_version, state_stats_mapping)
create_stats_model(exploration_stats)
def handle_stats_creation_for_new_exp_version(
exp_id, exp_version, state_names, change_list):
"""Retrieves the ExplorationStatsModel for the old exp_version and makes
any required changes to the structure of the model. Then, a new
ExplorationStatsModel is created for the new exp_version.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
state_names: list(str). State names of the exploration.
change_list: list(ExplorationChange). A list of changes introduced in
this commit.
"""
old_exp_version = exp_version - 1
new_exp_version = exp_version
exploration_stats = get_exploration_stats_by_id(
exp_id, old_exp_version)
if exploration_stats is None:
handle_stats_creation_for_new_exploration(
exp_id, new_exp_version, state_names)
return
# Handling state additions, deletions and renames.
for change in change_list:
if change.cmd == exp_domain.CMD_ADD_STATE:
exploration_stats.state_stats_mapping[
change.state_name
] = stats_domain.StateStats.create_default()
elif change.cmd == exp_domain.CMD_DELETE_STATE:
exploration_stats.state_stats_mapping.pop(change.state_name)
elif change.cmd == exp_domain.CMD_RENAME_STATE:
exploration_stats.state_stats_mapping[
change.new_state_name
] = exploration_stats.state_stats_mapping.pop(change.old_state_name)
exploration_stats.exp_version = new_exp_version
# Create new statistics model.
create_stats_model(exploration_stats)
def create_exp_issues_for_new_exploration(exp_id, exp_version):
"""Creates the ExplorationIssuesModel instance for the exploration.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
"""
stats_models.ExplorationIssuesModel.create(exp_id, exp_version, [])
def _handle_exp_issues_after_state_deletion(
state_name, exp_issue, deleted_state_names):
"""Checks if the exploration issue's concerned state is a deleted state and
invalidates the exploration issue accoridngly.
Args:
state_name: str. The issue's concerened state name.
exp_issue: ExplorationIssue. The exploration issue domain object.
deleted_state_names: list(str). The list of deleted state names in this
commit.
Returns:
ExplorationIssue. The exploration issue domain object.
"""
if state_name in deleted_state_names:
exp_issue.is_valid = False
return exp_issue
def _handle_exp_issues_after_state_rename(
state_name, exp_issue, old_to_new_state_names,
playthrough_ids_by_state_name):
"""Checks if the exploration issue's concerned state is a renamed state and
modifies the exploration issue accoridngly.
Args:
state_name: str. The issue's concerened state name.
exp_issue: ExplorationIssue. The exploration issue domain object.
old_to_new_state_names: dict. The dict mapping state names to their
renamed versions. This mapping contains state names only if it is
actually renamed.
playthrough_ids_by_state_name: dict. The dict mapping old state names to
their new ones
Returns:
ExplorationIssue. The exploration issue domain object.
"""
if state_name not in old_to_new_state_names:
return exp_issue, playthrough_ids_by_state_name
old_state_name = state_name
new_state_name = old_to_new_state_names[old_state_name]
if stats_models.ISSUE_TYPE_KEYNAME_MAPPING[
exp_issue.issue_type] == 'state_names':
state_names = exp_issue.issue_customization_args['state_names'][
'value']
exp_issue.issue_customization_args['state_names']['value'] = [
new_state_name
if state_name == old_state_name else state_name
for state_name in state_names]
else:
exp_issue.issue_customization_args['state_name']['value'] = (
new_state_name)
playthrough_ids_by_state_name[old_state_name].extend(
exp_issue.playthrough_ids)
return exp_issue, playthrough_ids_by_state_name
def update_exp_issues_for_new_exp_version(
exploration, exp_versions_diff, revert_to_version):
"""Retrieves the ExplorationIssuesModel for the old exp_version and makes
any required changes to the structure of the model.
Args:
exploration: Exploration. Domain object for the exploration.
exp_versions_diff: ExplorationVersionsDiff|None. The domain object for
the exploration versions difference, None if it is a revert.
revert_to_version: int|None. If the change is a revert, the version.
Otherwise, None.
"""
exp_issues = get_exp_issues(exploration.id, exploration.version - 1)
if exp_issues is None:
create_exp_issues_for_new_exploration(
exploration.id, exploration.version - 1)
return
# Handling reverts.
if revert_to_version:
old_exp_issues = get_exp_issues(exploration.id, revert_to_version)
# If the old exploration issues model doesn't exist, the current model
# is carried over (this is a fallback case for some tests, and can
# never happen in production.)
if old_exp_issues:
exp_issues.unresolved_issues = old_exp_issues.unresolved_issues
exp_issues.exp_version = exploration.version + 1
create_exp_issues_model(exp_issues)
return
playthrough_ids_by_state_name = collections.defaultdict(list)
for i_idx, exp_issue in enumerate(exp_issues.unresolved_issues):
keyname = stats_models.ISSUE_TYPE_KEYNAME_MAPPING[exp_issue.issue_type]
if keyname == 'state_names':
state_names = exp_issue.issue_customization_args[keyname]['value']
for state_name in state_names:
# Handle exp issues changes for deleted states.
exp_issues.unresolved_issues[i_idx] = (
_handle_exp_issues_after_state_deletion(
state_name, exp_issue,
exp_versions_diff.deleted_state_names))
# Handle exp issues changes for renamed states.
exp_issues.unresolved_issues[
i_idx], playthrough_ids_by_state_name = (
_handle_exp_issues_after_state_rename(
state_name, exp_issue,
exp_versions_diff.old_to_new_state_names,
playthrough_ids_by_state_name))
else:
state_name = exp_issue.issue_customization_args[keyname]['value']
# Handle exp issues changes for deleted states.
exp_issues.unresolved_issues[i_idx] = (
_handle_exp_issues_after_state_deletion(
state_name, exp_issue,
exp_versions_diff.deleted_state_names))
# Handle exp issues changes for renamed states.
exp_issues.unresolved_issues[
i_idx], playthrough_ids_by_state_name = (
_handle_exp_issues_after_state_rename(
state_name, exp_issue,
exp_versions_diff.old_to_new_state_names,
playthrough_ids_by_state_name))
# Handling changes to playthrough instances.
all_playthrough_ids = []
all_playthroughs = []
for old_state_name in playthrough_ids_by_state_name:
new_state_name = exp_versions_diff.old_to_new_state_names[
old_state_name]
playthrough_ids = playthrough_ids_by_state_name[old_state_name]
playthroughs = get_playthroughs_multi(playthrough_ids)
for p_idx, playthrough in enumerate(playthroughs):
if stats_models.ISSUE_TYPE_KEYNAME_MAPPING[
playthrough.issue_type] == 'state_names':
state_names = playthrough.issue_customization_args[
'state_names']['value']
playthrough.issue_customization_args['state_names']['value'] = [
new_state_name
if state_name == old_state_name else state_name
for state_name in state_names]
else:
playthrough.issue_customization_args['state_name']['value'] = (
new_state_name)
for a_idx, action in enumerate(playthrough.actions):
if action.action_customization_args['state_name']['value'] == (
old_state_name):
playthroughs[p_idx].actions[
a_idx].action_customization_args['state_name'][
'value'] = new_state_name
all_playthrough_ids.extend(playthrough_ids)
all_playthroughs.extend(playthroughs)
update_playthroughs_multi(all_playthrough_ids, all_playthroughs)
exp_issues.exp_version += 1
create_exp_issues_model(exp_issues)
def get_exp_issues(exp_id, exp_version):
"""Retrieves the ExplorationIssues domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationIssues|None: The domain object for exploration issues or
None if the exp_id is invalid.
"""
exp_issues = None
exp_issues_model = stats_models.ExplorationIssuesModel.get_model(
exp_id, exp_version)
if exp_issues_model is not None:
exp_issues = get_exp_issues_from_model(exp_issues_model)
return exp_issues
def get_playthrough_by_id(playthrough_id):
"""Retrieves the Playthrough domain object.
Args:
playthrough_id: str. ID of the playthrough.
Returns:
Playthrough|None: The domain object for the playthrough or None if the
playthrough_id is invalid.
"""
playthrough = None
playthrough_model = stats_models.PlaythroughModel.get(
playthrough_id, strict=False)
if playthrough_model is not None:
playthrough = get_playthrough_from_model(playthrough_model)
return playthrough
def get_playthroughs_multi(playthrough_ids):
"""Retrieves multiple Playthrough domain objects.
Args:
playthrough_ids: list(str). List of playthrough IDs.
Returns:
list(Playthrough). List of playthrough domain objects.
"""
playthrough_instances = stats_models.PlaythroughModel.get_multi(
playthrough_ids)
playthroughs = [
get_playthrough_from_model(playthrough_instance)
for playthrough_instance in playthrough_instances]
return playthroughs
def update_playthroughs_multi(playthrough_ids, playthroughs):
"""Updates the playthrough instances.
Args:
playthrough_ids: list(str). List of playthrough IDs.
playthroughs: list(Playthrough). List of playthrough domain objects.
"""
playthrough_instances = stats_models.PlaythroughModel.get_multi(
playthrough_ids)
updated_instances = []
for idx, playthrough_instance in enumerate(playthrough_instances):
playthrough_dict = playthroughs[idx].to_dict()
playthrough_instance.issue_type = playthrough_dict['issue_type']
playthrough_instance.issue_customization_args = (
playthrough_dict['issue_customization_args'])
playthrough_instance.actions = playthrough_dict['actions']
updated_instances.append(playthrough_instance)
stats_models.PlaythroughModel.put_multi(updated_instances)
def get_exploration_stats_by_id(exp_id, exp_version):
"""Retrieves the ExplorationStats domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationStats. The domain object for exploration statistics.
Raises:
Exception: Entity for class ExplorationStatsModel with id not found.
"""
exploration_stats = None
exploration_stats_model = stats_models.ExplorationStatsModel.get_model(
exp_id, exp_version)
if exploration_stats_model is not None:
exploration_stats = get_exploration_stats_from_model(
exploration_stats_model)
return exploration_stats
def get_multiple_exploration_stats_by_version(exp_id, version_numbers):
"""Returns a list of ExplorationStats domain objects corresponding to the
specified versions.
Args:
exp_id: str. ID of the exploration.
version_numbers: list(int). List of version numbers.
Returns:
list(ExplorationStats|None). List of ExplorationStats domain class
instances.
"""
exploration_stats = []
exploration_stats_models = (
stats_models.ExplorationStatsModel.get_multi_versions(
exp_id, version_numbers))
for exploration_stats_model in exploration_stats_models:
if exploration_stats_model is None:
exploration_stats.append(None)
else:
exploration_stats.append(get_exploration_stats_from_model(
exploration_stats_model))
return exploration_stats
def get_exp_issues_from_model(exp_issues_model):
"""Gets an ExplorationIssues domain object from an ExplorationIssuesModel
instance.
Args:
exp_issues_model: ExplorationIssuesModel. Exploration issues model in
datastore.
Returns:
ExplorationIssues. The domain object for exploration issues.
"""
unresolved_issues = []
for unresolved_issue_dict in exp_issues_model.unresolved_issues:
_migrate_to_latest_issue_schema(copy.deepcopy(unresolved_issue_dict))
unresolved_issues.append(
stats_domain.ExplorationIssue.from_dict(unresolved_issue_dict))
return stats_domain.ExplorationIssues(
exp_issues_model.exp_id, exp_issues_model.exp_version,
unresolved_issues)
def get_exploration_stats_from_model(exploration_stats_model):
"""Gets an ExplorationStats domain object from an ExplorationStatsModel
instance.
Args:
exploration_stats_model: ExplorationStatsModel. Exploration statistics
model in datastore.
Returns:
ExplorationStats. The domain object for exploration statistics.
"""
new_state_stats_mapping = {
state_name: stats_domain.StateStats.from_dict(
exploration_stats_model.state_stats_mapping[state_name])
for state_name in exploration_stats_model.state_stats_mapping
}
return stats_domain.ExplorationStats(
exploration_stats_model.exp_id,
exploration_stats_model.exp_version,
exploration_stats_model.num_starts_v1,
exploration_stats_model.num_starts_v2,
exploration_stats_model.num_actual_starts_v1,
exploration_stats_model.num_actual_starts_v2,
exploration_stats_model.num_completions_v1,
exploration_stats_model.num_completions_v2,
new_state_stats_mapping)
def get_playthrough_from_model(playthrough_model):
"""Gets a PlaythroughModel domain object from a PlaythroughModel instance.
Args:
playthrough_model: PlaythroughModel. Playthrough model in datastore.
Returns:
Playthrough. The domain object for a playthrough.
"""
actions = []
for action_dict in playthrough_model.actions:
_migrate_to_latest_action_schema(action_dict)
actions.append(stats_domain.LearnerAction.from_dict(action_dict))
return stats_domain.Playthrough(
playthrough_model.exp_id, playthrough_model.exp_version,
playthrough_model.issue_type,
playthrough_model.issue_customization_args, actions)
def create_stats_model(exploration_stats):
"""Creates an ExplorationStatsModel in datastore given an ExplorationStats
domain object.
Args:
exploration_stats: ExplorationStats. The domain object for exploration
statistics.
Returns:
str. ID of the datastore instance for ExplorationStatsModel.
"""
new_state_stats_mapping = {
state_name: exploration_stats.state_stats_mapping[state_name].to_dict()
for state_name in exploration_stats.state_stats_mapping
}
instance_id = stats_models.ExplorationStatsModel.create(
exploration_stats.exp_id,
exploration_stats.exp_version,
exploration_stats.num_starts_v1,
exploration_stats.num_starts_v2,
exploration_stats.num_actual_starts_v1,
exploration_stats.num_actual_starts_v2,
exploration_stats.num_completions_v1,
exploration_stats.num_completions_v2,
new_state_stats_mapping
)
return instance_id
def _save_stats_model(exploration_stats):
"""Updates the ExplorationStatsModel datastore instance with the passed
ExplorationStats domain object.
Args:
exploration_stats. ExplorationStats. The exploration statistics domain
object.
"""
new_state_stats_mapping = {
state_name: exploration_stats.state_stats_mapping[state_name].to_dict()
for state_name in exploration_stats.state_stats_mapping
}
exploration_stats_model = stats_models.ExplorationStatsModel.get_model(
exploration_stats.exp_id, exploration_stats.exp_version)
exploration_stats_model.num_starts_v1 = exploration_stats.num_starts_v1
exploration_stats_model.num_starts_v2 = exploration_stats.num_starts_v2
exploration_stats_model.num_actual_starts_v1 = (
exploration_stats.num_actual_starts_v1)
exploration_stats_model.num_actual_starts_v2 = (
exploration_stats.num_actual_starts_v2)
exploration_stats_model.num_completions_v1 = (
exploration_stats.num_completions_v1)
exploration_stats_model.num_completions_v2 = (
exploration_stats.num_completions_v2)
exploration_stats_model.state_stats_mapping = new_state_stats_mapping
exploration_stats_model.put()
def save_stats_model_transactional(exploration_stats):
"""Updates the ExplorationStatsModel datastore instance with the passed
ExplorationStats domain object in a transaction.
Args:
exploration_stats. ExplorationStats. The exploration statistics domain
object.
"""
transaction_services.run_in_transaction(
_save_stats_model, exploration_stats)
def create_exp_issues_model(exp_issues):
"""Creates a new ExplorationIssuesModel in the datastore.
Args:
exp_issues: ExplorationIssues. The exploration issues domain object.
"""
unresolved_issues_dicts = [
unresolved_issue.to_dict()
for unresolved_issue in exp_issues.unresolved_issues]
stats_models.ExplorationIssuesModel.create(
exp_issues.exp_id, exp_issues.exp_version, unresolved_issues_dicts)
def _save_exp_issues_model(exp_issues):
"""Updates the ExplorationIssuesModel datastore instance with the passed
ExplorationIssues domain object.
Args:
exp_issues: ExplorationIssues. The exploration issues domain
object.
"""
unresolved_issues_dicts = [
unresolved_issue.to_dict()
for unresolved_issue in exp_issues.unresolved_issues]
exp_issues_model = stats_models.ExplorationIssuesModel.get_model(
exp_issues.exp_id, exp_issues.exp_version)
exp_issues_model.exp_version = exp_issues.exp_version
exp_issues_model.unresolved_issues = unresolved_issues_dicts
exp_issues_model.put()
def save_exp_issues_model_transactional(exp_issues):
"""Updates the ExplorationIssuesModel datastore instance with the passed
ExplorationIssues domain object in a transaction.
Args:
exp_issues: ExplorationIssues. The exploration issues domain
object.
"""
transaction_services.run_in_transaction(
_save_exp_issues_model, exp_issues)
def get_exploration_stats_multi(exp_version_references):
"""Retrieves the exploration stats for the given explorations.
Args:
exp_version_references: list(ExpVersionReference). List of exploration
version reference domain objects.
Returns:
list(ExplorationStats). The list of exploration stats domain objects.
"""
exploration_stats_models = (
stats_models.ExplorationStatsModel.get_multi_stats_models(
exp_version_references))
exploration_stats_list = []
for index, exploration_stats_model in enumerate(exploration_stats_models):
if exploration_stats_model is None:
exploration_stats_list.append(
stats_domain.ExplorationStats.create_default(
exp_version_references[index].exp_id,
exp_version_references[index].version,
{}))
else:
exploration_stats_list.append(
get_exploration_stats_from_model(exploration_stats_model))
return exploration_stats_list
def delete_playthroughs_multi(playthrough_ids):
"""Deletes multiple playthrough instances.
Args:
playthrough_ids: list(str). List of playthrough IDs to be deleted.
"""
stats_models.PlaythroughModel.delete_playthroughs_multi(playthrough_ids)
def get_visualizations_info(exp_id, state_name, interaction_id):
"""Returns a list of visualization info. Each item in the list is a dict
with keys 'data' and 'options'.
Args:
exp_id: str. The ID of the exploration.
state_name: str. Name of the state.
interaction_id: str. The interaction type.
Returns:
list(dict). Each item in the list is a dict with keys representing
- 'id': str. The visualization ID.
- 'data': list(dict). A list of answer/frequency dicts.
- 'options': dict. The visualization options.
An example of the returned value may be:
[{'options': {'y_axis_label': 'Count', 'x_axis_label': 'Answer'},
'id': 'BarChart',
'data': [{u'frequency': 1, u'answer': 0}]}]
"""
if interaction_id is None:
return []
visualizations = interaction_registry.Registry.get_interaction_by_id(
interaction_id).answer_visualizations
calculation_ids = set([
visualization.calculation_id for visualization in visualizations])
calculation_ids_to_outputs = {}
for calculation_id in calculation_ids:
# Don't show top unresolved answers calculation ouutput in stats of
# exploration.
if calculation_id == 'TopNUnresolvedAnswersByFrequency':
continue
# This is None if the calculation job has not yet been run for this
# state.
calc_output_domain_object = _get_calc_output(
exp_id, state_name, calculation_id)
# If the calculation job has not yet been run for this state, we simply
# exclude the corresponding visualization results.
if calc_output_domain_object is None:
continue
# If the output was associated with a different interaction ID, skip the
# results. This filtering step is needed since the same calculation_id
# can be shared across multiple interaction types.
if calc_output_domain_object.interaction_id != interaction_id:
continue
calculation_ids_to_outputs[calculation_id] = (
calc_output_domain_object.calculation_output.to_raw_type())
return [{
'id': visualization.id,
'data': calculation_ids_to_outputs[visualization.calculation_id],
'options': visualization.options,
'addressed_info_is_supported': (
visualization.addressed_info_is_supported),
} for visualization in visualizations
if visualization.calculation_id in calculation_ids_to_outputs]
def record_answer(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer):
"""Record an answer by storing it to the corresponding StateAnswers entity.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration.
state_name: str. The name of the state.
interaction_id: str. The ID of the interaction.
submitted_answer: SubmittedAnswer. The submitted answer.
"""
record_answers(
exploration_id, exploration_version, state_name, interaction_id,
[submitted_answer])
def record_answers(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer_list):
"""Optimally record a group of answers using an already loaded exploration..
The submitted_answer_list is a list of SubmittedAnswer domain objects.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration.
state_name: str. The name of the state.
interaction_id: str. The ID of the interaction.
submitted_answer_list: list(SubmittedAnswer). The list of answers to be
recorded.
"""
state_answers = stats_domain.StateAnswers(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer_list)
for submitted_answer in submitted_answer_list:
submitted_answer.validate()
stats_models.StateAnswersModel.insert_submitted_answers(
state_answers.exploration_id, state_answers.exploration_version,
state_answers.state_name, state_answers.interaction_id,
state_answers.get_submitted_answer_dict_list())
def get_state_answers(exploration_id, exploration_version, state_name):
"""Returns a StateAnswers object containing all answers associated with the
specified exploration state, or None if no such answers have yet been
submitted.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration to fetch
answers for.
state_name: str. The name of the state to fetch answers for.
Returns:
StateAnswers or None. A StateAnswers object containing all answers
associated with the state, or None if no such answers exist.
"""
state_answers_models = stats_models.StateAnswersModel.get_all_models(
exploration_id, exploration_version, state_name)
if state_answers_models:
main_state_answers_model = state_answers_models[0]
submitted_answer_dict_list = itertools.chain.from_iterable([
state_answers_model.submitted_answer_list
for state_answers_model in state_answers_models])
return stats_domain.StateAnswers(
exploration_id, exploration_version, state_name,
main_state_answers_model.interaction_id,
[stats_domain.SubmittedAnswer.from_dict(submitted_answer_dict)
for submitted_answer_dict in submitted_answer_dict_list],
schema_version=main_state_answers_model.schema_version)
else:
return None
def get_sample_answers(exploration_id, exploration_version, state_name):
"""Fetches a list of sample answers that were submitted to the specified
exploration state (at the given version of the exploration).
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration to fetch
answers for.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of some sample raw answers. At most 100 answers are
returned.
"""
answers_model = stats_models.StateAnswersModel.get_master_model(
exploration_id, exploration_version, state_name)
if answers_model is None:
return []
# Return at most 100 answers, and only answers from the initial shard (If
# we needed to use subsequent shards then the answers are probably too big
# anyway).
sample_answers = answers_model.submitted_answer_list[:100]
return [
stats_domain.SubmittedAnswer.from_dict(submitted_answer_dict).answer
for submitted_answer_dict in sample_answers]
def get_top_state_answer_stats(exploration_id, state_name):
"""Fetches the top (at most) 10 answers from the given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of the top 10 answers, sorted by decreasing frequency.
"""
calc_output = (
_get_calc_output(exploration_id, state_name, 'Top10AnswerFrequencies'))
raw_calc_output = (
[] if calc_output is None else
calc_output.calculation_output.to_raw_type())
return [
{'answer': output['answer'], 'frequency': output['frequency']}
for output in raw_calc_output
if output['frequency'] >= feconf.STATE_ANSWER_STATS_MIN_FREQUENCY
]
def get_top_state_unresolved_answers(exploration_id, state_name):
"""Fetches the top unresolved answers for the given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of the top 10 answers, sorted by decreasing frequency.
"""
calculation_output = (
_get_calc_output(
exploration_id, state_name, 'TopNUnresolvedAnswersByFrequency')
.calculation_output.to_raw_type())
return [
{'answer': output['answer'], 'frequency': output['frequency']}
for output in calculation_output
if output['frequency'] >= feconf.STATE_ANSWER_STATS_MIN_FREQUENCY
]
def get_top_state_answer_stats_multi(exploration_id, state_names):
"""Fetches the top (at most) 10 answers from each given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_names: list(str). The name of the state to fetch answers for.
Returns:
dict(str: list(*)). Dict mapping each state name to the list of its top
(at most) 10 answers, sorted by decreasing frequency.
"""
return {
state_name: get_top_state_answer_stats(exploration_id, state_name)
for state_name in state_names
}
def _get_calc_output(exploration_id, state_name, calculation_id):
"""Get state answers calculation output domain object obtained from
StateAnswersCalcOutputModel instance stored in the data store. The
calculation ID comes from the name of the calculation class used to compute
aggregate data from submitted user answers. This returns aggregated output
for all versions of the specified state and exploration.
Args:
exploration_id: str. ID of the exploration.
state_name: str. Name of the state.
calculation_id: str. Name of the calculation class.
Returns:
StateAnswersCalcOutput|None. The state answers calculation output
domain object or None.
"""
calc_output_model = stats_models.StateAnswersCalcOutputModel.get_model(
exploration_id, VERSION_ALL, state_name, calculation_id)
if calc_output_model:
calculation_output = None
if (calc_output_model.calculation_output_type ==
stats_domain.CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST):
calculation_output = (
stats_domain.AnswerFrequencyList.from_raw_type(
calc_output_model.calculation_output))
elif (calc_output_model.calculation_output_type ==
stats_domain.CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS):
calculation_output = (
stats_domain.CategorizedAnswerFrequencyLists.from_raw_type(
calc_output_model.calculation_output))
return stats_domain.StateAnswersCalcOutput(
exploration_id, VERSION_ALL, state_name,
calc_output_model.interaction_id, calculation_id,
calculation_output)
else:
return None
| 38.955179
| 80
| 0.71292
|
132cfac5f59e599e9c94a50f7c18af239737e0a3
| 21,740
|
py
|
Python
|
hug/routing.py
|
Asteur/hug_api_maker
|
7da43cee1d1298a25417b12770b382464a8fe389
|
[
"MIT"
] | null | null | null |
hug/routing.py
|
Asteur/hug_api_maker
|
7da43cee1d1298a25417b12770b382464a8fe389
|
[
"MIT"
] | null | null | null |
hug/routing.py
|
Asteur/hug_api_maker
|
7da43cee1d1298a25417b12770b382464a8fe389
|
[
"MIT"
] | null | null | null |
"""hug/routing.py
Defines the chainable classes responsible for defining the routing of Python functions for use with Falcon
and CLIs
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import os
import re
from collections import OrderedDict
from functools import wraps
from urllib.parse import urljoin
import falcon
import hug.api
import hug.interface
import hug.output_format
from falcon import HTTP_METHODS
from hug import introspect
from hug.exceptions import InvalidTypeData
class Router(object):
"""The base chainable router object"""
__slots__ = ('route', )
def __init__(self, transform=None, output=None, validate=None, api=None, requires=(), **kwargs):
self.route = {}
if transform is not None:
self.route['transform'] = transform
if output:
self.route['output'] = output
if validate:
self.route['validate'] = validate
if api:
self.route['api'] = api
if requires:
self.route['requires'] = (requires, ) if not isinstance(requires, (tuple, list)) else requires
def output(self, formatter, **overrides):
"""Sets the output formatter that should be used to render this route"""
return self.where(output=formatter, **overrides)
def transform(self, function, **overrides):
"""Sets the function that should be used to transform the returned Python structure into something
serializable by specified output format
"""
return self.where(transform=function, **overrides)
def validate(self, validation_function, **overrides):
"""Sets the secondary validation fucntion to use for this handler"""
return self.where(validate=validation_function, **overrides)
def api(self, api, **overrides):
"""Sets the API that should contain this route"""
return self.where(api=api, **overrides)
def requires(self, requirements, **overrides):
"""Adds additional requirements to the specified route"""
return self.where(requires=tuple(self.route.get('requires', ())) + tuple(requirements), **overrides)
def doesnt_require(self, requirements, **overrides):
"""Removes individual requirements while keeping all other defined ones within a route"""
return self.where(requires=tuple(set(self.route.get('requires', ())).difference(requirements if
type(requirements) in (list, tuple) else (requirements, ))))
def where(self, **overrides):
"""Creates a new route, based on the current route, with the specified overrided values"""
route_data = self.route.copy()
route_data.update(overrides)
return self.__class__(**route_data)
class CLIRouter(Router):
"""The CLIRouter provides a chainable router that can be used to route a CLI command to a Python function"""
__slots__ = ()
def __init__(self, name=None, version=None, doc=None, **kwargs):
super().__init__(**kwargs)
if name is not None:
self.route['name'] = name
if version:
self.route['version'] = version
if doc:
self.route['doc'] = doc
def name(self, name, **overrides):
"""Sets the name for the CLI interface"""
return self.where(name=name, **overrides)
def version(self, version, **overrides):
"""Sets the version for the CLI interface"""
return self.where(version=version, **overrides)
def doc(self, documentation, **overrides):
"""Sets the documentation for the CLI interface"""
return self.where(doc=documentation, **overrides)
def __call__(self, api_function):
"""Enables exposing a Hug compatible function as a Command Line Interface"""
hug.interface.CLI(self.route, api_function)
return api_function
class InternalValidation(Router):
"""Defines the base route for interfaces that define their own internal validation"""
__slots__ = ()
def __init__(self, raise_on_invalid=False, on_invalid=None, output_invalid=None, **kwargs):
super().__init__(**kwargs)
if raise_on_invalid:
self.route['raise_on_invalid'] = raise_on_invalid
if on_invalid is not None:
self.route['on_invalid'] = on_invalid
if output_invalid is not None:
self.route['output_invalid'] = output_invalid
def raise_on_invalid(self, setting=True, **overrides):
"""Sets the route to raise validation errors instead of catching them"""
return self.where(raise_on_invalid=setting, **overrides)
def on_invalid(self, function, **overrides):
"""Sets a function to use to transform data on validation errors.
Defaults to the transform function if one is set to ensure no special
handling occurs for invalid data set to `False`.
"""
return self.where(on_invalid=function, **overrides)
def output_invalid(self, output_handler, **overrides):
"""Sets an output handler to be used when handler validation fails.
Defaults to the output formatter set globally for the route.
"""
return self.where(output_invalid=output_handler, **overrides)
class LocalRouter(InternalValidation):
"""The LocalRouter defines how interfaces should be handled when accessed locally from within Python code"""
__slots__ = ()
def __init__(self, directives=True, validate=True, version=None, **kwargs):
super().__init__(**kwargs)
if version is not None:
self.route['version'] = version
if not directives:
self.route['skip_directives'] = True
if not validate:
self.route['skip_validation'] = True
def directives(self, use=True, **kwargs):
return self.where(directives=use)
def validate(self, enforce=True, **kwargs):
return self.where(validate=enforce)
def version(self, supported, **kwargs):
return self.where(version=supported)
def __call__(self, api_function):
"""Enables exposing a hug compatible function locally"""
return hug.interface.Local(self.route, api_function)
class HTTPRouter(InternalValidation):
"""The HTTPRouter provides the base concept of a router from an HTTPRequest to a Python function"""
__slots__ = ()
def __init__(self, versions=None, parse_body=False, parameters=None, defaults={}, status=None,
response_headers=None, private=False, inputs=None, **kwargs):
super().__init__(**kwargs)
self.route['versions'] = (versions, ) if isinstance(versions, (int, float, None.__class__)) else versions
if parse_body:
self.route['parse_body'] = parse_body
if parameters:
self.route['parameters'] = parameters
if defaults:
self.route['defaults'] = defaults
if status:
self.route['status'] = status
if response_headers:
self.route['response_headers'] = response_headers
if private:
self.route['private'] = private
if inputs:
self.route['inputs'] = inputs
def versions(self, supported, **overrides):
"""Sets the versions that this route should be compatiable with"""
return self.where(versions=supported, **overrides)
def parse_body(self, automatic=True, **overrides):
"""Tells hug to automatically parse the input body if it matches a registered input format"""
return self.where(parse_body=automatic, **overrides)
def set_status(self, status, **overrides):
"""Sets the status that will be returned by default"""
return self.where(status=status, **overrides)
def parameters(self, parameters, **overrides):
"""Sets the custom parameters that will be used instead of those found introspecting the decorated function"""
return self.where(parameters=parameters, **overrides)
def defaults(self, defaults, **overrides):
"""Sets the custom defaults that will be used for custom parameters"""
return self.where(defaults=defaults, **overrides)
def _create_interface(self, api, api_function, catch_exceptions=True):
interface = hug.interface.HTTP(self.route, api_function, catch_exceptions)
return (interface, api_function)
def response_headers(self, headers, **overrides):
"""Sets the response headers automatically injected by the router"""
return self.where(response_headers=headers, **overrides)
def add_response_headers(self, headers, **overrides):
"""Adds the specified response headers while keeping existing ones in-tact"""
response_headers = self.route.get('response_headers', {}).copy()
response_headers.update(headers)
return self.where(response_headers=response_headers, **overrides)
def cache(self, private=False, max_age=31536000, s_maxage=None, no_cache=False, no_store=False,
must_revalidate=False, **overrides):
"""Convience method for quickly adding cache header to route"""
parts = ('private' if private else 'public', 'max-age={0}'.format(max_age),
's-maxage={0}'.format(s_maxage) if s_maxage is not None else None, no_cache and 'no-cache',
no_store and 'no-store', must_revalidate and 'must-revalidate')
return self.add_response_headers({'cache-control': ', '.join(filter(bool, parts))}, **overrides)
def allow_origins(self, *origins, methods=None, **overrides):
"""Convience method for quickly allowing other resources to access this one"""
headers = {'Access-Control-Allow-Origin': ', '.join(origins) if origins else '*'}
if methods:
headers['Access-Control-Allow-Methods'] = ', '.join(methods)
return self.add_response_headers(headers, **overrides)
class NotFoundRouter(HTTPRouter):
"""Provides a chainable router that can be used to route 404'd request to a Python function"""
__slots__ = ()
def __init__(self, output=None, versions=None, status=falcon.HTTP_NOT_FOUND, **kwargs):
super().__init__(output=output, versions=versions, status=status, **kwargs)
def __call__(self, api_function):
api = self.route.get('api', hug.api.from_object(api_function))
(interface, callable_method) = self._create_interface(api, api_function)
for version in self.route['versions']:
api.http.set_not_found_handler(interface, version)
return callable_method
class SinkRouter(HTTPRouter):
"""Provides a chainable router that can be used to route all routes pass a certain base URL (essentially route/*)"""
__slots__ = ()
def __init__(self, urls=None, output=None, **kwargs):
super().__init__(output=output, **kwargs)
if urls:
self.route['urls'] = (urls, ) if isinstance(urls, str) else urls
def __call__(self, api_function):
api = self.route.get('api', hug.api.from_object(api_function))
(interface, callable_method) = self._create_interface(api, api_function)
for base_url in self.route.get('urls', ("/{0}".format(api_function.__name__), )):
api.http.add_sink(interface, base_url)
return callable_method
class StaticRouter(SinkRouter):
"""Provides a chainable router that can be used to return static files automatically from a set of directories"""
__slots__ = ('route', )
def __init__(self, urls=None, output=hug.output_format.file, cache=False, **kwargs):
super().__init__(urls=urls, output=output, **kwargs)
if cache is True:
self.cache()
elif cache is not False:
self.cache(**cache)
def __call__(self, api_function):
directories = []
for directory in api_function():
path = os.path.abspath(
directory
)
directories.append(path)
api = self.route.get('api', hug.api.from_object(api_function))
for base_url in self.route.get('urls', ("/{0}".format(api_function.__name__), )):
def read_file(request=None, path=""):
filename = path.lstrip("/")
for directory in directories:
path = os.path.abspath(os.path.join(directory, filename))
if not path.startswith(directory):
hug.redirect.not_found()
if os.path.isdir(path):
new_path = os.path.join(path, "index.html")
if os.path.exists(new_path) and os.path.isfile(new_path):
path = new_path
if os.path.exists(path) and os.path.isfile(path):
return path
hug.redirect.not_found()
api.http.add_sink(self._create_interface(api, read_file)[0], base_url)
return api_function
class ExceptionRouter(HTTPRouter):
"""Provides a chainable router that can be used to route exceptions thrown during request handling"""
__slots__ = ()
def __init__(self, exceptions=(Exception, ), exclude=(), output=None, **kwargs):
super().__init__(output=output, **kwargs)
self.route['exceptions'] = (exceptions, ) if not isinstance(exceptions, (list, tuple)) else exceptions
self.route['exclude'] = (exclude, ) if not isinstance(exclude, (list, tuple)) else exclude
def __call__(self, api_function):
api = self.route.get('api', hug.api.from_object(api_function))
(interface, callable_method) = self._create_interface(api, api_function, catch_exceptions=False)
for version in self.route['versions']:
for exception in self.route['exceptions']:
api.http.add_exception_handler(exception, interface, version)
return callable_method
def _create_interface(self, api, api_function, catch_exceptions=False):
interface = hug.interface.ExceptionRaised(self.route, api_function, catch_exceptions)
return (interface, api_function)
class URLRouter(HTTPRouter):
"""Provides a chainable router that can be used to route a URL to a Python function"""
__slots__ = ()
def __init__(self, urls=None, accept=HTTP_METHODS, output=None, examples=(), versions=None,
suffixes=(), prefixes=(), response_headers=None, parse_body=True, **kwargs):
super().__init__(output=output, versions=versions, parse_body=parse_body, response_headers=response_headers,
**kwargs)
if urls is not None:
self.route['urls'] = (urls, ) if isinstance(urls, str) else urls
if accept:
self.route['accept'] = (accept, ) if isinstance(accept, str) else accept
if examples:
self.route['examples'] = (examples, ) if isinstance(examples, str) else examples
if suffixes:
self.route['suffixes'] = (suffixes, ) if isinstance(suffixes, str) else suffixes
if prefixes:
self.route['prefixes'] = (prefixes, ) if isinstance(prefixes, str) else prefixes
def __call__(self, api_function):
api = self.route.get('api', hug.api.from_object(api_function))
api.http.routes.setdefault(api.http.base_url, OrderedDict())
(interface, callable_method) = self._create_interface(api, api_function)
use_examples = self.route.get('examples', ())
if not interface.required and not use_examples:
use_examples = (True, )
for base_url in self.route.get('urls', ("/{0}".format(api_function.__name__), )):
expose = [base_url, ]
for suffix in self.route.get('suffixes', ()):
if suffix.startswith('/'):
expose.append(os.path.join(base_url, suffix.lstrip('/')))
else:
expose.append(base_url + suffix)
for prefix in self.route.get('prefixes', ()):
expose.append(prefix + base_url)
for url in expose:
handlers = api.http.routes[api.http.base_url].setdefault(url, {})
for method in self.route.get('accept', ()):
version_mapping = handlers.setdefault(method.upper(), {})
for version in self.route['versions']:
version_mapping[version] = interface
api.http.versioned.setdefault(version, {})[callable_method.__name__] = callable_method
interface.examples = use_examples
return callable_method
def urls(self, *urls, **overrides):
"""Sets the URLs that will map to this API call"""
return self.where(urls=urls, **overrides)
def accept(self, *accept, **overrides):
"""Sets a list of HTTP methods this router should accept"""
return self.where(accept=accept, **overrides)
def get(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to a GET"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='GET', **overrides)
def delete(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to DELETE"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='DELETE', **overrides)
def post(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to POST"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='POST', **overrides)
def put(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to PUT"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='PUT', **overrides)
def trace(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to TRACE"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='TRACE', **overrides)
def patch(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to PATCH"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='PATCH', **overrides)
def options(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to OPTIONS"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='OPTIONS', **overrides)
def head(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to HEAD"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='HEAD', **overrides)
def connect(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to CONNECT"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='CONNECT', **overrides)
def call(self, **overrides):
"""Sets the acceptable HTTP method to all known"""
return self.where(accept=HTTP_METHODS, **overrides)
def http(self, **overrides):
"""Sets the acceptable HTTP method to all known"""
return self.where(accept=HTTP_METHODS, **overrides)
def get_post(self, **overrides):
"""Exposes a Python method externally under both the HTTP POST and GET methods"""
return self.where(accept=('GET', 'POST'), **overrides)
def put_post(self, **overrides):
"""Exposes a Python method externally under both the HTTP POST and PUT methods"""
return self.where(accept=('PUT', 'POST'), **overrides)
def examples(self, *examples, **overrides):
"""Sets the examples that the route should use"""
return self.where(examples=examples, **overrides)
def suffixes(self, *suffixes, **overrides):
"""Sets the suffixes supported by the route"""
return self.where(suffixes=suffixes, **overrides)
def prefixes(self, *prefixes, **overrides):
"""Sets the prefixes supported by the route"""
return self.where(prefixes=prefixes, **overrides)
def where(self, **overrides):
if 'urls' in overrides:
existing_urls = self.route.get('urls', ())
use_urls = []
for url in (overrides['urls'], ) if isinstance(overrides['urls'], str) else overrides['urls']:
if url.startswith('/') or not existing_urls:
use_urls.append(url)
else:
for existing in existing_urls:
use_urls.append(urljoin(existing.rstrip('/') + '/', url))
overrides['urls'] = tuple(use_urls)
return super().where(**overrides)
| 43.393214
| 120
| 0.645814
|
69f81adbb32343db474b1b0c44099404db6431dd
| 1,017
|
py
|
Python
|
designing-restful-apis/Lesson_4/13_BargainMart/Starter Code/hungryclient.py
|
robinl3680/udacity-course
|
308daf62479f9bf6f4256eb19313631f1bb4c5da
|
[
"MIT"
] | 68
|
2016-07-28T07:24:57.000Z
|
2021-10-09T19:28:48.000Z
|
designing-restful-apis/Lesson_4/13_BargainMart/Starter Code/hungryclient.py
|
robinl3680/udacity-course
|
308daf62479f9bf6f4256eb19313631f1bb4c5da
|
[
"MIT"
] | 1
|
2022-03-12T01:01:42.000Z
|
2022-03-12T01:01:42.000Z
|
designing-restful-apis/Lesson_4/13_BargainMart/Starter Code/hungryclient.py
|
robinl3680/udacity-course
|
308daf62479f9bf6f4256eb19313631f1bb4c5da
|
[
"MIT"
] | 105
|
2016-10-19T03:56:33.000Z
|
2022-03-15T02:12:08.000Z
|
from __future__ import division
from time import sleep
import json
import httplib2
h = httplib2.Http()
url = raw_input("Please enter the uri you want to access, \n If left blank the connection will be set to 'http://localhost:5000/catalog': ")
if url == '':
url = 'http://localhost:5000/catalog'
req_per_minute = float(raw_input("Please specify the number of requests per minute: ") )
interval = (60.0 / req_per_minute)
def SendRequests(url, req_per_minute):
requests = 0
while requests < req_per_minute:
result = json.loads(h.request(url,'GET')[1])
#result = h.request(url,'GET')[1]
#print result
if result.get('error') is not None:
print "Error #%s : %s" %(result.get('error'), result.get('data'))
print "Hit rate limit. Waiting 5 seconds and trying again..."
sleep(5)
SendRequests(url, req_per_minute)
else:
print "Number of Requests: ", requests+1
print result
requests = requests + 1
sleep(interval)
print "Sending Requests..."
SendRequests(url, req_per_minute)
| 27.486486
| 142
| 0.701082
|
48686733444b8b26da69768b70182d397dd5f102
| 329
|
py
|
Python
|
xd/build/core/data/num.py
|
esben/xd-build-core
|
b1f805d4a50167f4c54003ffa8f33d224d4c819d
|
[
"MIT"
] | 1
|
2020-11-27T23:34:53.000Z
|
2020-11-27T23:34:53.000Z
|
xd/build/core/data/num.py
|
esben/xd-build-core
|
b1f805d4a50167f4c54003ffa8f33d224d4c819d
|
[
"MIT"
] | 6
|
2015-10-30T12:22:56.000Z
|
2016-08-25T09:38:48.000Z
|
xd/build/core/data/num.py
|
XD-embedded/xd-build-core
|
357e4d78d35456d6906aa30151ddc989781227ab
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
from .var import *
__all__ = ['Bool', 'Int', 'Float']
class Bool(Variable):
__slots__ = []
basetype = bool
class Int(Variable):
__slots__ = []
basetype = int
class Float(Variable):
__slots__ = []
basetype = float
| 10.612903
| 34
| 0.632219
|
9d8773c7a917fd3a30407a178f62eaf15bb45aab
| 1,574
|
py
|
Python
|
setup.py
|
Galarzaa90/GuildWatcher
|
2ec08ebb7a500a332962e6d78b8d175b842959a2
|
[
"MIT"
] | 5
|
2017-05-01T20:37:10.000Z
|
2020-06-04T20:41:38.000Z
|
setup.py
|
Galarzaa90/GuildWatcher
|
2ec08ebb7a500a332962e6d78b8d175b842959a2
|
[
"MIT"
] | 7
|
2018-05-08T20:56:49.000Z
|
2021-02-22T20:38:06.000Z
|
setup.py
|
Galarzaa90/GuildWatcher
|
2ec08ebb7a500a332962e6d78b8d175b842959a2
|
[
"MIT"
] | 4
|
2017-09-02T05:10:02.000Z
|
2018-02-22T01:41:24.000Z
|
import sys
from setuptools import setup
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported')
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('README.md') as f:
readme = f.read()
setup(
name='guildwatcher',
version='2.0.0',
author='Allan Galarza',
author_email="allan.galarza@gmail.com",
description='A discord webhook to track Tibia guild changes.',
long_description=readme,
long_description_content_type="text/markdown",
license="MIT",
url='https://github.com/Galarzaa90/GuildWatcher',
py_modules=['guildwatcher'],
install_requires=requirements,
entry_points='''
[console_scripts]
guildwatcher=guildwatcher:scan_guilds
''',
project_urls={
"Coverage: Codecov": "https://codecov.io/gh/Galarzaa90/GuildWatcher/",
},
python_requires=">=3.6",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Communications :: Chat',
'Topic :: Games/Entertainment',
'Topic :: Games/Entertainment :: Role-Playing'
]
)
| 31.48
| 78
| 0.628971
|
a6c60ee2e988ec4e7f6c0eb606c5bab98f0814dc
| 15,803
|
py
|
Python
|
mathics/builtin/attributes.py
|
ptjb/Mathics
|
c44678e73ce753e5ce3959cdca18ec983f05716d
|
[
"Apache-2.0"
] | null | null | null |
mathics/builtin/attributes.py
|
ptjb/Mathics
|
c44678e73ce753e5ce3959cdca18ec983f05716d
|
[
"Apache-2.0"
] | null | null | null |
mathics/builtin/attributes.py
|
ptjb/Mathics
|
c44678e73ce753e5ce3959cdca18ec983f05716d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
Attributes
There are several builtin-attributes which have a predefined meaning in \Mathics.
However, you can set any symbol as an attribute, in contrast to \Mathematica.
"""
from mathics.builtin.base import Predefined, Builtin
from mathics.builtin.evaluation import Sequence
from mathics.core.expression import Expression, Symbol, SymbolNull, String
from mathics.builtin.assignment import get_symbol_list
class Attributes(Builtin):
"""
<dl>
<dt>'Attributes'[$symbol$]
<dd>returns the attributes of $symbol$.
<dt>'Attributes'[$symbol$] = {$attr1$, $attr2$}
<dd>sets the attributes of $symbol$, replacing any existing attributes.
</dl>
>> Attributes[Plus]
= {Flat, Listable, NumericFunction, OneIdentity, Orderless, Protected}
'Attributes' always considers the head of an expression:
>> Attributes[a + b + c]
= {Flat, Listable, NumericFunction, OneIdentity, Orderless, Protected}
You can assign values to 'Attributes' to set attributes:
>> Attributes[f] = {Flat, Orderless}
= {Flat, Orderless}
>> f[b, f[a, c]]
= f[a, b, c]
Attributes must be symbols:
>> Attributes[f] := {a + b}
: Argument a + b at position 1 is expected to be a symbol.
= $Failed
Use 'Symbol' to convert strings to symbols:
>> Attributes[f] = Symbol["Listable"]
= Listable
>> Attributes[f]
= {Listable}
"""
attributes = ("HoldAll", "Listable")
def apply(self, expr, evaluation):
"Attributes[expr_]"
name = expr.get_lookup_name()
attributes = list(evaluation.definitions.get_attributes(name))
attributes.sort()
attr = [Symbol(attribute) for attribute in attributes]
return Expression("List", *attr)
class SetAttributes(Builtin):
"""
<dl>
<dt>'SetAttributes'[$symbol$, $attrib$]
<dd>adds $attrib$ to the list of $symbol$'s attributes.
</dl>
>> SetAttributes[f, Flat]
>> Attributes[f]
= {Flat}
Multiple attributes can be set at the same time using lists:
>> SetAttributes[{f, g}, {Flat, Orderless}]
>> Attributes[g]
= {Flat, Orderless}
"""
attributes = ("HoldFirst",)
def apply(self, symbols, attributes, evaluation):
"SetAttributes[symbols_, attributes_]"
symbols = get_symbol_list(
symbols, lambda item: evaluation.message("SetAttributes", "sym", item, 1)
)
if symbols is None:
return
values = get_symbol_list(
attributes, lambda item: evaluation.message("SetAttributes", "sym", item, 2)
)
if values is None:
return
for symbol in symbols:
if "System`Locked" in evaluation.definitions.get_attributes(symbol):
evaluation.message("SetAttributes", "locked", Symbol(symbol))
else:
for value in values:
evaluation.definitions.set_attribute(symbol, value)
return SymbolNull
class ClearAttributes(Builtin):
"""
<dl>
<dt>'ClearAttributes'[$symbol$, $attrib$]
<dd>removes $attrib$ from $symbol$'s attributes.
</dl>
>> SetAttributes[f, Flat]
>> Attributes[f]
= {Flat}
>> ClearAttributes[f, Flat]
>> Attributes[f]
= {}
Attributes that are not even set are simply ignored:
>> ClearAttributes[{f}, {Flat}]
>> Attributes[f]
= {}
"""
attributes = ("HoldFirst",)
def apply(self, symbols, attributes, evaluation):
"ClearAttributes[symbols_, attributes_]"
symbols = get_symbol_list(
symbols, lambda item: evaluation.message("ClearAttributes", "sym", item, 1)
)
if symbols is None:
return
values = get_symbol_list(
attributes,
lambda item: evaluation.message("ClearAttributes", "sym", item, 2),
)
if values is None:
return
for symbol in symbols:
if "System`Locked" in evaluation.definitions.get_attributes(symbol):
evaluation.message("ClearAttributes", "locked", Symbol(symbol))
else:
for value in values:
evaluation.definitions.clear_attribute(symbol, value)
return SymbolNull
class Protect(Builtin):
"""
<dl>
<dt>'Protect'[$s1$, $s2$, ...]
<dd>sets the attribute 'Protected' for the symbols $si$.
<dt>'Protect'[$str1$, $str2$, ...]
<dd>protects all symbols whose names textually match $stri$.
</dl>
>> A = {1, 2, 3};
>> Protect[A]
>> A[[2]] = 4;
: Symbol A is Protected.
>> A
= {1, 2, 3}
"""
attributes = ("HoldAll",)
messages = {
"ssym": "`1` is not a symbol or a string.",
}
def apply(self, symbols, evaluation):
"Protect[symbols___]"
protected = Symbol("System`Protected")
items = []
if isinstance(symbols, Symbol):
symbols = [symbols]
elif isinstance(symbols, String):
symbols = [symbols]
elif isinstance(symbols, Expression):
if symbols.get_head_name() in ("System`Sequence", "System`List"):
symbols = symbols.get_leaves()
else:
evaluation.message("Protect", "ssym", symbols)
return SymbolNull
for symbol in symbols:
if isinstance(symbol, Symbol):
items.append(symbol)
else:
pattern = symbol.get_string_value()
if not pattern or pattern == "":
evaluation.message("Protect", "ssym", symbol)
continue
if pattern[0] == "`":
pattern = evaluation.definitions.get_current_context() + pattern[1:]
names = evaluation.definitions.get_matching_names(pattern)
for defn in names:
symbol = Symbol(defn)
if not "System`Locked" in evaluation.definitions.get_attributes(
defn
):
items.append(symbol)
Expression("SetAttributes", Expression("List", *items), protected).evaluate(
evaluation
)
return SymbolNull
class Unprotect(Builtin):
"""
<dl>
<dt>'Unprotect'[$s1$, $s2$, ...]
<dd>removes the attribute 'Protected' for the symbols $si$.
<dt>'Unprotect'[$str$]
<dd>unprotects symbols whose names textually match $str$.
</dl>
"""
attributes = ("HoldAll",)
messages = {
"ssym": "`1` is not a symbol or a string.",
}
def apply(self, symbols, evaluation):
"Unprotect[symbols___]"
protected = Symbol("System`Protected")
items = []
if isinstance(symbols, Symbol):
symbols = [symbols]
elif isinstance(symbols, Expression):
symbols = symbols.get_leaves()
elif isinstance(symbols, String):
symbols = [symbols]
else:
symbols = symbols.get_sequence()
for symbol in symbols:
if isinstance(symbol, Symbol):
items.append(symbol)
else:
pattern = symbol.get_string_value()
if not pattern or pattern == "":
evaluation.message("Unprotect", "ssym", symbol)
continue
if pattern[0] == "`":
pattern = evaluation.definitions.get_current_context() + pattern[1:]
names = evaluation.definitions.get_matching_names(pattern)
for defn in names:
symbol = Symbol(defn)
if not "System`Locked" in evaluation.definitions.get_attributes(
defn
):
items.append(symbol)
Expression("ClearAttributes", Expression("List", *items), protected).evaluate(
evaluation
)
return SymbolNull
class Protected(Predefined):
"""
<dl>
<dt>'Protected'
<dd>is an attribute that prevents values on a symbol from
being modified.
</dl>
Values of 'Protected' symbols cannot be modified:
>> Attributes[p] = {Protected};
>> p = 2;
: Symbol p is Protected.
>> f[p] ^= 3;
: Tag p in f[p] is Protected.
>> Format[p] = "text";
: Symbol p is Protected.
However, attributes might still be set:
>> SetAttributes[p, Flat]
>> Attributes[p]
= {Flat, Protected}
Thus, you can easily remove the attribute 'Protected':
>> Attributes[p] = {};
>> p = 2
= 2
You can also use 'Protect' or 'Unprotect', resp.
>> Protect[p]
>> Attributes[p]
= {Protected}
>> Unprotect[p]
If a symbol is 'Protected' and 'Locked', it can never be changed again:
>> SetAttributes[p, {Protected, Locked}]
>> p = 2
: Symbol p is Protected.
= 2
>> Unprotect[p]
: Symbol p is locked.
"""
class ReadProtected(Predefined):
"""
<dl>
<dt>'ReadProtected'
<dd>is an attribute that prevents values on a symbol from
being read.
</dl>
Values associated with 'ReadProtected' symbols cannot be seen in
'Definition':
>> ClearAll[p]
>> p = 3;
>> Definition[p]
= p = 3
>> SetAttributes[p, ReadProtected]
>> Definition[p]
= Attributes[p] = {ReadProtected}
"""
class Locked(Predefined):
"""
<dl>
<dt>'Locked'
<dd>is an attribute that prevents attributes on a symbol from
being modified.
</dl>
The attributes of 'Locked' symbols cannot be modified:
>> Attributes[lock] = {Flat, Locked};
>> SetAttributes[lock, {}]
: Symbol lock is locked.
>> ClearAttributes[lock, Flat]
: Symbol lock is locked.
>> Attributes[lock] = {}
: Symbol lock is locked.
= {}
>> Attributes[lock]
= {Flat, Locked}
However, their values might be modified (as long as they are not 'Protected' too):
>> lock = 3
= 3
"""
class Flat(Predefined):
"""
<dl>
<dt>'Flat'
<dd>is an attribute that specifies that nested occurrences of
a function should be automatically flattened.
</dl>
A symbol with the 'Flat' attribute represents an associative
mathematical operation:
>> SetAttributes[f, Flat]
>> f[a, f[b, c]]
= f[a, b, c]
'Flat' is taken into account in pattern matching:
>> f[a, b, c] /. f[a, b] -> d
= f[d, c]
#> SetAttributes[{u, v}, Flat]
#> u[x_] := {x}
#> u[]
= u[]
#> u[a]
= {a}
#> u[a, b]
: Iteration limit of 1000 exceeded.
= $Aborted
#> u[a, b, c]
: Iteration limit of 1000 exceeded.
= $Aborted
#> v[x_] := x
#> v[]
= v[]
#> v[a]
= a
#> v[a, b] (* in Mathematica: Iteration limit of 4096 exceeded. *)
= v[a, b]
#> v[a, b, c] (* in Mathematica: Iteration limit of 4096 exceeded. *)
: Iteration limit of 1000 exceeded.
= $Aborted
"""
class Orderless(Predefined):
"""<dl>
<dt>'Orderless'
<dd>is an attribute that can be assigned to a symbol $f$ to
indicate that the elements $ei$ in expressions of the form
$f$[$e1$, $e2$, ...] should automatically be sorted into
canonical order. This property is accounted for in pattern
matching.
</dl>
The leaves of an 'Orderless' function are automatically sorted:
>> SetAttributes[f, Orderless]
>> f[c, a, b, a + b, 3, 1.0]
= f[1., 3, a, b, c, a + b]
A symbol with the 'Orderless' attribute represents a commutative
mathematical operation.
>> f[a, b] == f[b, a]
= True
'Orderless' affects pattern matching:
>> SetAttributes[f, Flat]
>> f[a, b, c] /. f[a, c] -> d
= f[b, d]
"""
class OneIdentity(Predefined):
"""
<dl>
<dt>'OneIdentity'
<dd>is an attribute specifying that '$f$[$x$]' should be treated
as equivalent to $x$ in pattern matching.
</dl>
'OneIdentity' affects pattern matching:
>> SetAttributes[f, OneIdentity]
>> a /. f[args___] -> {args}
= {a}
It does not affect evaluation:
>> f[a]
= f[a]
"""
class SequenceHold(Predefined):
"""
<dl>
<dt>'SequenceHold'
<dd>is an attribute that prevents 'Sequence' objects from being
spliced into a function's arguments.
</dl>
Normally, 'Sequence' will be spliced into a function:
>> f[Sequence[a, b]]
= f[a, b]
It does not for 'SequenceHold' functions:
>> SetAttributes[f, SequenceHold]
>> f[Sequence[a, b]]
= f[Sequence[a, b]]
E.g., 'Set' has attribute 'SequenceHold' to allow assignment of sequences to variables:
>> s = Sequence[a, b];
>> s
= Sequence[a, b]
>> Plus[s]
= a + b
"""
class HoldFirst(Predefined):
"""
<dl>
<dt>'HoldFirst'
<dd>is an attribute specifying that the first argument of a
function should be left unevaluated.
</dl>
>> Attributes[Set]
= {HoldFirst, Protected, SequenceHold}
"""
class HoldRest(Predefined):
"""
<dl>
<dt>'HoldRest'
<dd>is an attribute specifying that all but the first argument
of a function should be left unevaluated.
</dl>
>> Attributes[If]
= {HoldRest, Protected}
"""
class HoldAll(Predefined):
"""
<dl>
<dt>'HoldAll'
<dd>is an attribute specifying that all arguments of a
function should be left unevaluated.
</dl>
>> Attributes[Function]
= {HoldAll, Protected}
"""
class HoldAllComplete(Predefined):
"""
<dl>
<dt>'HoldAllComplete'
<dd>is an attribute that includes the effects of 'HoldAll' and
'SequenceHold', and also protects the function from being
affected by the upvalues of any arguments.
</dl>
'HoldAllComplete' even prevents upvalues from being used, and
includes 'SequenceHold'.
>> SetAttributes[f, HoldAllComplete]
>> f[a] ^= 3;
>> f[a]
= f[a]
>> f[Sequence[a, b]]
= f[Sequence[a, b]]
"""
class NHoldAll(Predefined):
"""
<dl>
<dt>'NHoldAll'
<dd>is an attribute that protects all arguments of a
function from numeric evaluation.
</dl>
>> N[f[2, 3]]
= f[2., 3.]
>> SetAttributes[f, NHoldAll]
>> N[f[2, 3]]
= f[2, 3]
"""
class NHoldFirst(Predefined):
"""
<dl>
<dt>'NHoldFirst'
<dd>is an attribute that protects the first argument of a
function from numeric evaluation.
</dl>
"""
class NHoldRest(Predefined):
"""
<dl>
<dt>'NHoldRest'
<dd>is an attribute that protects all but the first argument
of a function from numeric evaluation.
</dl>
"""
class Listable(Predefined):
"""
<dl>
<dt>'Listable'
<dd>is an attribute specifying that a function should be
automatically applied to each element of a list.
</dl>
>> SetAttributes[f, Listable]
>> f[{1, 2, 3}, {4, 5, 6}]
= {f[1, 4], f[2, 5], f[3, 6]}
>> f[{1, 2, 3}, 4]
= {f[1, 4], f[2, 4], f[3, 4]}
>> {{1, 2}, {3, 4}} + {5, 6}
= {{6, 7}, {9, 10}}
"""
class Constant(Predefined):
"""
<dl>
<dt>'Constant'
<dd>is an attribute that indicates that a symbol is a constant.
</dl>
Mathematical constants like 'E' have attribute 'Constant':
>> Attributes[E]
= {Constant, Protected, ReadProtected}
Constant symbols cannot be used as variables in 'Solve' and
related functions:
>> Solve[x + E == 0, E]
: E is not a valid variable.
= Solve[E + x == 0, E]
"""
| 26.559664
| 91
| 0.563627
|
fc60677ffde2308d0c6acb29de3b5a3bd690cf71
| 4,235
|
py
|
Python
|
malaya/text/rake.py
|
DevconX/Malaya
|
a2e7030f0911d65c9c1c72d38bc3e7c53b8e06fc
|
[
"MIT"
] | 39
|
2018-03-12T04:26:42.000Z
|
2018-12-05T03:53:45.000Z
|
malaya/text/rake.py
|
DevconX/Malaya
|
a2e7030f0911d65c9c1c72d38bc3e7c53b8e06fc
|
[
"MIT"
] | 12
|
2018-10-01T07:28:23.000Z
|
2018-12-10T01:59:25.000Z
|
malaya/text/rake.py
|
DevconX/Malaya
|
a2e7030f0911d65c9c1c72d38bc3e7c53b8e06fc
|
[
"MIT"
] | 16
|
2018-03-16T05:46:12.000Z
|
2018-12-10T04:15:07.000Z
|
# Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and
# Theory.unknown: John Wiley and Sons, Ltd.
import re
debug = False
test = True
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != '#':
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip()
# leave numbers in phrase, but don't count as words, since they tend to
# invalidate scores of their phrases
if (
len(current_word) > min_word_return_size
and current_word != ''
and not is_number(current_word)
):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(
u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s'
)
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])'
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile(
'|'.join(stop_word_regex_list), re.IGNORECASE
)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split('|')
for phrase in phrases:
phrase = phrase.strip()
if phrase != '':
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList, attentions=None):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
for word in word_list:
word_frequency.setdefault(word, 0)
if attentions:
score = attentions.get(word, 0)
else:
score = 1
word_frequency[word] += score
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0)
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
| 32.576923
| 120
| 0.644392
|
417425e6e95b15315e52c1840f79246098008dc7
| 3,631
|
py
|
Python
|
examples/tf/rl2_ppo_metaworld_ml10.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
examples/tf/rl2_ppo_metaworld_ml10.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
examples/tf/rl2_ppo_metaworld_ml10.py
|
michahu/garage
|
c045a1e5e5088a18828ec48bfee0addb1943bfd4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Example script to run RL2 in ML10."""
# pylint: disable=no-value-for-parameter
# yapf: disable
import click
import metaworld
from garage import wrap_experiment
from garage.envs import MetaWorldSetTaskEnv
from garage.experiment import (MetaEvaluator, MetaWorldTaskSampler,
SetTaskSampler)
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import LocalSampler
from garage.tf.algos import RL2PPO
from garage.tf.algos.rl2 import RL2Env, RL2Worker
from garage.tf.policies import GaussianGRUPolicy
from garage.trainer import TFTrainer
# yapf: enable
@click.command()
@click.option('--seed', default=1)
@click.option('--meta_batch_size', default=10)
@click.option('--n_epochs', default=10)
@click.option('--episode_per_task', default=10)
@wrap_experiment
def rl2_ppo_metaworld_ml10(ctxt, seed, meta_batch_size, n_epochs,
episode_per_task):
"""Train RL2 PPO with ML10 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
meta_batch_size (int): Meta batch size.
n_epochs (int): Total number of epochs for training.
episode_per_task (int): Number of training episode per task.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
ml10 = metaworld.ML10()
tasks = MetaWorldTaskSampler(ml10, 'train', lambda env, _: RL2Env(env))
test_task_sampler = SetTaskSampler(MetaWorldSetTaskEnv,
env=MetaWorldSetTaskEnv(
ml10, 'test'),
wrapper=lambda env, _: RL2Env(env))
meta_evaluator = MetaEvaluator(test_task_sampler=test_task_sampler)
env_updates = tasks.sample(10)
env = env_updates[0]()
env_spec = env.spec
policy = GaussianGRUPolicy(name='policy',
hidden_dim=64,
env_spec=env_spec,
state_include_action=False)
baseline = LinearFeatureBaseline(env_spec=env_spec)
algo = RL2PPO(meta_batch_size=meta_batch_size,
task_sampler=tasks,
env_spec=env_spec,
policy=policy,
baseline=baseline,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
optimizer_args=dict(batch_size=32,
max_optimization_epochs=10),
stop_entropy_gradient=True,
entropy_method='max',
policy_ent_coeff=0.02,
center_adv=False,
meta_evaluator=meta_evaluator,
episodes_per_trial=episode_per_task)
trainer.setup(algo,
tasks.sample(meta_batch_size),
sampler_cls=LocalSampler,
n_workers=meta_batch_size,
worker_class=RL2Worker,
worker_args=dict(n_episodes_per_trial=episode_per_task))
trainer.train(n_epochs=n_epochs,
batch_size=episode_per_task *
env_spec.max_episode_length * meta_batch_size)
rl2_ppo_metaworld_ml10()
| 38.62766
| 79
| 0.595153
|
5b6072163a85260bd478e300d75a5ebf011af9cf
| 256
|
py
|
Python
|
source/cornish/version.py
|
demitri/cornish
|
b1f37feac38edaab5543bcb8ccbd0aa26b87456d
|
[
"MIT"
] | 1
|
2020-11-10T18:59:00.000Z
|
2020-11-10T18:59:00.000Z
|
source/cornish/version.py
|
demitri/cornish
|
b1f37feac38edaab5543bcb8ccbd0aa26b87456d
|
[
"MIT"
] | 2
|
2020-08-03T22:52:46.000Z
|
2020-08-13T19:09:48.000Z
|
source/cornish/version.py
|
demitri/cornish
|
b1f37feac38edaab5543bcb8ccbd0aa26b87456d
|
[
"MIT"
] | 1
|
2020-02-23T19:17:20.000Z
|
2020-02-23T19:17:20.000Z
|
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module
__version__ = '1.1'
# Ref: https://stackoverflow.com/a/16084844/2712652
| 32
| 60
| 0.730469
|
9c5078ea6e6f0469763693f508e041144a64fbae
| 4,208
|
py
|
Python
|
users/views.py
|
oss2019/crisp.ai
|
95fe0f0e09386b97037db6d790623394c8a81dc8
|
[
"MIT"
] | 7
|
2019-05-07T17:31:57.000Z
|
2021-07-06T15:08:14.000Z
|
users/views.py
|
oss2019/crisp.ai
|
95fe0f0e09386b97037db6d790623394c8a81dc8
|
[
"MIT"
] | 60
|
2019-05-04T08:52:37.000Z
|
2022-03-11T23:53:25.000Z
|
users/views.py
|
oss2019/crisp.ai
|
95fe0f0e09386b97037db6d790623394c8a81dc8
|
[
"MIT"
] | 21
|
2019-04-12T14:31:54.000Z
|
2019-09-29T09:51:20.000Z
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.core.mail import send_mail
from django.forms import ValidationError
from django.contrib.auth.models import User
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from .tokens import account_activation_token
from crispy_ai.settings import EMAIL_HOST_USER
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
from validate_email import validate_email
from django.contrib import messages
from admin.models import CourseModel, LectureModel
@login_required(login_url="/users/login/")
def home(request):
if request.user.is_superuser:
return redirect('admin_home')
courses = CourseModel.objects.all()
return render(request, './users/home.html', {'courses': courses, 'user': request.user})
def register_user(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get('email')
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your Crispy AI Account'
message = render_to_string('registration/account_activation_email.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
from_mail = EMAIL_HOST_USER
to_mail = [user.email]
send_mail(subject, message, from_mail, to_mail, fail_silently=False)
return render(request, 'registration/email_sent.html')
else:
form = UserRegisterForm()
return render(request, 'registration/register.html', {'form': form})
# when user click on email link then this function execute
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
return render(request, 'registration/account_activated.html')
else:
return render(request, 'registration/account_activation_invalid.html')
@login_required(login_url="/users/login/")
def profile(request):
if request.method == "POST":
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profilemodel)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Profile updated successfully!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profilemodel)
return render(request, 'users/profile.html',
{'u_form': u_form, 'p_form': p_form, 'profile_pic_url': request.user.profilemodel.profile_image.url})
@login_required(login_url="/users/login/")
def course_display(request):
course = CourseModel.objects.first()
if request.method == "POST":
course_id = request.POST.get('course_id')
course = CourseModel.objects.get(pk=course_id)
lectures = LectureModel.objects.filter(course=course)
return render(request, './users/course.html', {'course': course, 'lectures': lectures})
def lecture_display(request):
lecture = LectureModel.objects.first()
if request.method == "POST":
lecture_id = request.POST.get('lecture_id')
lecture = LectureModel.objects.get(pk=lecture_id)
return render(request, './users/lecture.html', {'lecture': lecture})
| 40.461538
| 119
| 0.700095
|
49da520b3217dcc17773d251759759bdc542648a
| 669
|
py
|
Python
|
Symbol in Matrix.py
|
Lyubomir-Dakov/Python-Advanced
|
3b3b7181cc2bafc6f60329d6e42873d0f78b972f
|
[
"MIT"
] | null | null | null |
Symbol in Matrix.py
|
Lyubomir-Dakov/Python-Advanced
|
3b3b7181cc2bafc6f60329d6e42873d0f78b972f
|
[
"MIT"
] | null | null | null |
Symbol in Matrix.py
|
Lyubomir-Dakov/Python-Advanced
|
3b3b7181cc2bafc6f60329d6e42873d0f78b972f
|
[
"MIT"
] | null | null | null |
# import sys
# from io import StringIO
#
# input_1 = """3
# ABC
# DEF
# X!@
# !
# """
# input_2 = """4
# asdd
# xczc
# qwee
# qefw
# 4
# """
#
# sys.stdin = StringIO(input_2)
square_size = int(input())
matrix = []
for _ in range(square_size):
row = input()
matrix.append(row)
symbol = input()
found_symbol = False
row = 0
for i in matrix:
col = 0
for j in i:
if matrix[row][col] == symbol:
print(f"({row}, {col})")
found_symbol = True
col += 1
if found_symbol:
break
row += 1
if not found_symbol:
print(f"{symbol} does not occur in the matrix")
| 15.204545
| 52
| 0.5142
|
560bdbf688bc946ada679732db81a14fce0669ea
| 2,461
|
py
|
Python
|
pingo/intel/intel.py
|
pingo-io/pingo-py
|
5d7081f99ff13973404dc6361560f30ce8f7009c
|
[
"MIT"
] | 116
|
2015-05-06T17:49:22.000Z
|
2021-11-16T12:59:35.000Z
|
pingo/intel/intel.py
|
pingo-io/pingo-py
|
5d7081f99ff13973404dc6361560f30ce8f7009c
|
[
"MIT"
] | 49
|
2015-05-08T23:18:05.000Z
|
2017-07-12T17:11:48.000Z
|
pingo/intel/intel.py
|
pingo-io/pingo-py
|
5d7081f99ff13973404dc6361560f30ce8f7009c
|
[
"MIT"
] | 47
|
2015-05-04T07:42:04.000Z
|
2021-08-04T20:49:54.000Z
|
import pingo
mraa = None
class BaseMraa(pingo.Board, pingo.AnalogInputCapable, pingo.PwmOutputCapable):
_import_error_msg = 'pingo.intel.BaseMraa requires mraa installed'
def __init__(self):
global mraa
try:
import mraa as mraa
except ImportError:
raise ImportError(self._import_error_msg)
super(Galileo2, self).__init__()
self.PIN_MODES = {
pingo.IN: mraa.DIR_IN,
pingo.OUT: mraa.DIR_OUT,
}
self.PIN_STATES = {
pingo.HIGH: 1,
pingo.LOW: 0,
}
pwm_pin_numbers = [3, 5, 6, 9, 10, 11, 13]
digital_pin_numbers = [1, 2, 4, 7, 8, 12]
self._add_pins(
[pingo.PwmPin(self, location)
for location in pwm_pin_numbers] +
[pingo.DigitalPin(self, location)
for location in digital_pin_numbers] +
[pingo.AnalogPin(self, 'A' + location, 12)
for location in '012345']
)
self.mraa_pins, self.mraa_analogs, self.mraa_pwms = {}, {}, {}
def _set_digital_mode(self, pin, mode):
if pin.mode == pingo.PWM:
self.mraa_pwms[pin.location].enable(False)
self.mraa_pins[pin.location] = mraa.Gpio(pin.location)
self.mraa_pins[pin.location].dir(self.PIN_MODES[mode])
def _set_analog_mode(self, pin, mode):
mraa_id = int(pin.location[1])
self.mraa_analogs[pin.location] = mraa.Aio(mraa_id)
def _set_pwm_mode(self, pin, mode):
if pin.mode == pingo.IN:
self.mraa_pins[pin.location].dir(mraa.DIR_OUT)
self.mraa_pwms[pin.location] = mraa.Pwm(pin.location)
self.mraa_pwms[pin.location].enable(True)
def _set_pin_state(self, pin, state):
self.mraa_pins[pin.location].write(self.PIN_STATES[state])
def _get_pin_state(self, pin):
value = self.mraa_pins[pin.location].read()
return pingo.HIGH if value == 1 else pingo.LOW
def _get_pin_value(self, pin):
return self.mraa_analogs[pin.location].read()
def _set_pwm_duty_cycle(self, pin, value):
self.mraa_pwms[pin.location].write(value)
def _set_pwm_frequency(self, pin, value):
raise NotImplementedError
class Galileo2(BaseMraa):
_import_error_msg = 'pingo.intel.Galileo2 requires mraa installed'
class Edison(BaseMraa):
_import_error_msg = 'pingo.intel.Edison requires mraa installed'
| 29.297619
| 78
| 0.624137
|
8f25b3c9bfa38a3a07c72c28248c700fbdc95ada
| 30,285
|
py
|
Python
|
services/connectors/fronius-modbus-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | 4
|
2021-09-10T09:46:18.000Z
|
2021-12-05T17:55:14.000Z
|
services/connectors/fronius-modbus-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
services/connectors/fronius-modbus-connector/source/connector/main.py
|
fzi-forschungszentrum-informatik/BEMCom
|
0a0c359d889c6d5975e4d4d3b17c24adb5bf883b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
__version__="0.1.0"
import os
import json
import struct
import logging
from time import sleep
from fastnumbers import fast_real
from dotenv import load_dotenv, find_dotenv
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.exceptions import ModbusException
from pymodbus.payload import BinaryPayloadBuilder
from pymodbus.constants import Endian
from pyconnector_template.pyconnector_template import SensorFlow as SFTemplate
from pyconnector_template.pyconnector_template import ActuatorFlow as AFTemplate
from pyconnector_template.pyconnector_template import Connector as CTemplate
from pyconnector_template.dispatch import DispatchInInterval
logger = logging.getLogger("pyconnector")
class SensorFlow(SFTemplate):
"""
Bundles all functionality to handle sensor messages.
This is a template for a SensorFlow class, i.e. one that holds all
functions that are necessary to handle messages from the device(s)
towards the message broker. The methods could also be implemented
into the Connector class, but are seperated to support clarity.
Overload these functions
------------------------
In order to transform this class into operational code you need
to inherit from it and overload the following methods:
- receive_raw_msg
- parse_raw_msg
Connector Methods
-----------------
The connector must provide the following methods to allow correct
operation of the methods in this class:
- _update_available_datapoints
Connector Attributes
--------------------
The following attributes must be set up by the connector to
allow these methods to run correctly:
mqtt_client : class instance.
Initialized Mqtt client library with signature of paho mqtt.
SEND_RAW_MESSAGE_TO_DB : string
if SEND_RAW_MESSAGE_TO_DB == "TRUE" will send raw message
to designated DB via MQTT.
MQTT_TOPIC_RAW_MESSAGE_TO_DB : string
The topic which on which the raw messages will be published.
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must alaways be
present, even if the child dicts are empty.
"""
def receive_raw_msg(self, raw_data=None):
"""
Receive raw data from Modbus maser device using. ModbusTCP.
Poll the Modbus master device for the data specified in MODBUS_CONFIG.
Parameters
----------
raw_data : TYPE, optional
Not used, here as we poll for data. Kept for consistency.
Returns
-------
msg : dict
The message object containing the raw unprocessed data.
Should be formated like this:
msg = {
"payload": {
"raw_message": <the raw data>
}
}
E.g.
msg = {
"payload": {
"raw_message": "device_1:{sensor_1:2.12,sensor_2:3.12}"
}
}
"""
if not hasattr(self, "modbus_connection"):
# Establish connection to modbus master device.
logger.debug(
"Connecting to Modbus master %s:%s",
*(self.modbus_master_ip, self.modbus_master_port)
)
self.modbus_connection = ModbusTcpClient(
host=self.modbus_master_ip, port=self.modbus_master_port
)
if not self.modbus_connection.connect():
raise RuntimeError("Could not connect to Modbus master.")
# Read all data requested in configuration.
raw_message = {k: {} for k in self.modbus_read_method_names}
for read_method_name in self.modbus_read_method_names:
read_method = getattr(self.modbus_connection, read_method_name)
requested_ranges = self.modbus_config[read_method_name]
# requested_range is an entry like:
# {
# "address": 19000,
# "count": 20,
# "unit": 1,
# "datatypes": ">ffffffffff",
# },
for i, requested_range in enumerate(requested_ranges):
logger.debug(
"Using method %s to request data from address %s with "
"count %s from unit %s.",
*(
read_method_name,
requested_range["address"],
requested_range["count"],
requested_range["unit"],
)
)
retry = 0
while True:
response = read_method(
address=requested_range["address"],
count=requested_range["count"],
unit=requested_range["unit"],
)
if isinstance(response, BaseException) or response.isError():
# This track here is if the read failed. Then we wait
# a bit and retry a few times before we finally fail.
# If we retried to often we raise the execption and
# exit.
logger.info(
"Reading from modbus device failed with function "
" %s for address %s. Retrying in %s seconds. "
"Error was: %s",
*(
read_method_name,
requested_range["address"],
self.retry_wait,
str(response),
)
)
retry += 1
if retry >= self.max_retries:
raise RuntimeError(
"Max number of retries exceeded.")
sleep(self.retry_wait)
continue
# Pack the registers/coils into the raw message.
elif "_registers" in read_method_name:
raw_message[read_method_name][i] = response.registers
else:
raw_message[read_method_name][i] = response.bits
break
# Maybe wait a bit before next request.
sleep(self.poll_break)
if self.disconnect_between_polls:
logger.debug("Disconnecting from Modbus Master.")
self.modbus_connection.close()
# This is required so we create a new connection next poll.
delattr(self, "modbus_connection")
msg = {
"payload": {
"raw_message": raw_message
}
}
return msg
def parse_raw_msg(self, raw_msg):
"""
Functionality to receive a raw message from device.
Poll the device/gateway for data and transforms this raw data
into the format epxected by run_sensor_flow. If the device/gateway
uses some protocol that pushes data, the raw data should be passed
as the raw_data argument to the function.
Be aware: All keys in the output message should be strings. All values
should be converted be strings, too.
Parameters
----------
raw_msg : dict.
Raw msg with data from device/gateway. Should be formated like:
msg = {
"payload": {
"raw_message": <the raw data>,
"timestamp": <milliseconds since epoch>
}
}
Returns
-------
msg : dict
The message object containing the parsed data as python dicts from
dicts strucuture.
Should be formated like this:
msg = {
"payload": {
"parsed_message": <the parsed data as object>,
"timestamp": <milliseconds since epoch>
}
}
E.g:
msg = {
"payload": {
"parsed_message": {
"device_1": {
"sensor_1": "2.12",
"sensor_2": "3.12"
}
},
"timestamp": 1573680749000
}
}
"""
raw_message = raw_msg["payload"]["raw_message"]
parsed_message = {}
for read_method_name in raw_message:
parsed_message[read_method_name] = {}
modbus_config_for_method = self.modbus_config[read_method_name]
mbas_for_method = self.modbus_addresses[read_method_name]
for i in raw_message[read_method_name]:
# Load modbus addresses as strings as other stuff is loaded
# from JSON and expects strings too.
mbas = [str(m) for m in mbas_for_method[i]]
if "_registers" in read_method_name:
registers = raw_message[read_method_name][i]
datatypes = modbus_config_for_method[i]["datatypes"]
# Now we going to to encode the registers (which are
# currently represented as 16bit int values) to bytes so
# we can decode the data back with the correct datatype.
# This approach is taken from the pymodbus code, which
# does the same but doesn't allow to decode all of the
# values at once.
values_b = b''.join(struct.pack('!H', x)
for x in registers)
try:
values = struct.unpack(datatypes, values_b)
except struct.error:
logger.error(
"Unpacking binary data with struct failed. "
"The Modbus request returned %s registers, aka "
"%s bytes. However, you may have configured "
"datatypes in MODBUS_CONFIG corresponding to "
"a different number of bytes, see struct.error "
"below.",
*(len(registers), len(registers)*2)
)
raise
else:
values = []
for value in raw_message[read_method_name][i]:
if value:
values.append("1")
else:
values.append("0")
# Store each value under it's Modbus address.
# This may overwrite values if overlapping address
# ranges have been specified by the user.
# Also apply scaling factors while we are here, but only
# to registers. Scaling bits doesn't make sense, even if the
# user would request it.
sfs = {}
if (
"scaling_factors" in modbus_config_for_method[i] and
"_registers" in read_method_name
):
sfs = modbus_config_for_method[i]["scaling_factors"]
for mba, value in zip(mbas, values):
if mba in sfs:
try:
scaled_value = float(value) * sfs[mba]
value = str(scaled_value)
except ValueError:
pass
# All values are handled as strings in BEMCom.
parsed_message[read_method_name][mba] = str(value)
msg = {
"payload": {
"parsed_message": parsed_message,
"timestamp": raw_msg["payload"]["timestamp"],
}
}
return msg
class ActuatorFlow(AFTemplate):
"""
Bundles all functionality to handle actuator messages.
This is a template for a ActuatorFlow class, i.e. one that holds all
functions that are necessary to handle messages from the message
broker towards the devices/gateway. The methods could also be implemented
into the Connector class, but are seperated to support clarity.
Overload these functions
------------------------
In order to transform this class into operational code you need
to inherit from it and overload the following methods:
- send_command
Connector Attributes
--------------------
The following attributes must be set up by the connector to
allow these methods to run correctly:
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must alaways be
present, even if the child dicts are empty.
"""
def send_command(self, datapoint_key, datapoint_value):
"""
Send message to target device, via gateway if applicable.
TODO: Extend for writing stuff.
FC15: write_coils
FC16: write_registers
Parameters
----------
datapoint_key : string.
The internal key that is used by device/gateway to identify
the datapoint.
value : string.
The value that should be sent to the datapoint.
"""
# Examples: datapoint_key = "write_coil__10"
if not hasattr(self, "modbus_connection"):
# Establish connection to modbus master device.
logger.debug(
"Connecting to Modbus master %s:%s",
*(self.modbus_master_ip, self.modbus_master_port)
)
self.modbus_connection = ModbusTcpClient(
host=self.modbus_master_ip, port=self.modbus_master_port
)
if not self.modbus_connection.connect():
raise RuntimeError("Could not connect to Modbus master.")
# Parse write_method_name and coil/register number from datapoint_key
modbus_datatype = ""
modbus_method = datapoint_key.split("__")[0]
modbus_reg = int(datapoint_key.split("__")[1])
modbus_unit = int(datapoint_key.split("__")[2])
if "register" in modbus_method:
modbus_datatype = self.method_range[modbus_method][str(modbus_reg)]["datatypes"]
modbus_scaling_factor = self.method_range[modbus_method][str(modbus_reg)]["scaling_factor"]
# Apply scaling_factor if provided.
# Parse datatype for resgisters and pack value as binary.
# Parse coil values ("0" and "1") as Bools (True/False)
if "coil" in modbus_method:
values = False if int(datapoint_value) == 0 else True
else:
builder = BinaryPayloadBuilder(byteorder=modbus_datatype[0], wordorder=Endian.Big)
builder.reset()
p_string = builder._pack_words(modbus_datatype[1:], fast_real(datapoint_value)*modbus_scaling_factor)
builder._payload.append(p_string)
values = builder.to_registers()[0]
# Call write_method with parsed (aka. decoded)
write_method = getattr(self.modbus_connection, modbus_method)
try:
response = write_method(
address=modbus_reg,
value=values,
unit=modbus_unit,
)
logger.debug(
"Sent %s to register %s in unit %s",
*(values, modbus_reg, modbus_unit )
)
logger.debug("Received response for send_command: %s", response)
register_wmaxlim_pct_env = os.getenv('MODBUS_W_MAX_LIM_PCT')
if register_wmaxlim_pct_env:
register_wmaxlimpct = int(register_wmaxlim_pct_env)
else:
register_wmaxlimpct = 40242
register_wmaxlim_ena_env = os.getenv('MODBUS_W_MAX_LIM_ENA')
if register_wmaxlim_ena_env:
register_wmaxlim_ena = int(register_wmaxlim_ena_env)
else:
register_wmaxlim_ena = 40246
if modbus_reg == register_wmaxlimpct:
logger.info("Power limit to change (WMaxLimPct). Writing additional register WMaxLim_Ena to ensure power control via Modbus is enabled.")
builder = BinaryPayloadBuilder(byteorder=modbus_datatype[0], wordorder=Endian.Big)
builder.reset()
p_string = builder._pack_words(modbus_datatype[1:], 1)
builder._payload.append(p_string)
values = builder.to_registers()[0]
response = write_method(
address=register_wmaxlim_ena,
value=values,
unit=modbus_unit,
)
logger.debug(
"Sent %s to register %s in unit %s",
*(values, modbus_reg, modbus_unit )
)
logger.debug("Received response for send_command: %s", response)
finally:
# Maybe wait a bit before next request.
sleep(self.poll_break)
class Connector(CTemplate, SensorFlow, ActuatorFlow):
"""
The generic logic of the connector.
It should not be necessary to overload any of these methods nor
to call any of those apart from __init__() and run().
Configuration Attributes
------------------------
Confiugration will be populated from environment variables on init.
CONNECTOR_NAME : string
The name of the connector instance as seen by the AdminUI.
MQTT_TOPIC_LOGS : string
The topics used by the log handler to publish log messages on.
MQTT_TOPIC_HEARTBEAT : string
The topics used by the connector to publish heartbeats on.
MQTT_TOPIC_AVAILABLE_DATAPOINTS : string
The topic on which the available datapoints will be published.
MQTT_TOPIC_DATAPOINT_MAP : string
The topic the connector will listen on for datapoint maps
SEND_RAW_MESSAGE_TO_DB : string
if SEND_RAW_MESSAGE_TO_DB == "TRUE" will send raw message
to designated DB via MQTT. This is a string and not a bool as
environment variables are always strings.
MQTT_TOPIC_RAW_MESSAGE_TO_DB : string
The topic which on which the raw messages will be published.
DEBUG : string
if DEBUG == "TRUE" will log debug message to, elso loglevel is info.
MODBUS_MASTER_IP : string
The ip adress or DNS name of the Modbus master device which we want
to connect to.
MODBUS_MASTER_PORT : string (as env variables are always strings)
The port on which the master device awaits Modbus communication.
Computed Attributes
-------------------
These attriubutes are created by init and are then dynamically used
by the Connector.
mqtt_client : class instance.
Initialized Mqtt client library with signature of paho mqtt.
available_datapoints : dict of dict.
Lists all datapoints known to the connector and is sent to the
AdminUI. Actuator datapoints must be specified manually. Sensor
datapoints are additionally automatically added once a value for
a new datapoint is received. The object contains the connector
internal key and a sample and value looks e.g. like this:
available_datapoints = {
"sensor": {
"Channel__P__value__0": 0.122,
"Channel__P__unit__0": "kW",
},
"actuator": {
"Channel__P__setpoint__0": 0.4,
}
}
datapoint_map : dict of dict.
Mapping from datapoint key to topic. Is generated by the AdminUI.
Looks e.e. like this:
datapoint_map = {
"sensor": {
"Channel__P__value__0": "example-connector/msgs/0001",
"Channel__P__unit__0": "example-connector/msgs/0002",
},
"actuator": {
"example-connector/msgs/0003": "Channel__P__setpoint__0",
}
}
Note thereby that the keys "sensor" and "actuator"" must alaways be
present, even if the child dicts are empty.
"""
def __init__(self, *args, **kwargs):
"""
Init the inherited code from python_connector_template and add
function to parse the special environment variable args to configure
this connector.
TODO: Add support for swapping ordering of bits? Seems like our code
works if byte order and bit are identical, i.e. both Big endian or
both little endian. Is there a use case for other scenarios?
"""
# dotenv allows us to load env variables from .env files which is
# convient for developing. If you set override to True tests
# may fail as the tests assume that the existing environ variables
# have higher priority over ones defined in the .env file.
load_dotenv(find_dotenv(), verbose=True, override=False)
# We need to specify a dispatcher that triggers the connection with
# the device or gateway. Here we want to poll the device with the
# interval set in the POLL_SECONDS environment variable.
kwargs["DeviceDispatcher"] = DispatchInInterval
kwargs["device_dispatcher_kwargs"] = {
"call_interval": float(os.getenv("POLL_SECONDS"))
}
# CTemplate.__init__(self, *args, **kwargs)
self.modbus_master_ip = os.getenv("MODBUS_MASTER_IP")
self.modbus_master_port = int(os.getenv("MODBUS_MASTER_PORT"))
self.modbus_config = self.parse_modbus_config(
config_json_str=os.getenv("MODBUS_CONFIG")
)
self.modbus_addresses = self.compute_addresses(
modbus_config=self.modbus_config
)
self.modbus_read_method_names = [
k for k in self.modbus_config if "read_" in k
]
# add write methods to connector
self.modbus_write_method_names = [
k for k in self.modbus_config if "write_" in k
]
self.method_range = self.compute_method_ranges()
kwargs["available_datapoints"] = {
"sensor": {},
"actuator": self.compute_actuator_datapoints()
}
CTemplate.__init__(self, *args, **kwargs)
self.max_retries = int(os.getenv("MODBUS_MAX_RETRIES") or 3)
self.retry_wait = int(os.getenv("MODBUS_RETRY_WAIT_SECONDS") or 15)
self.poll_break = float(os.getenv("MODBUS_POLL_BREAK") or 0)
self.disconnect_between_polls = False
if os.getenv("MODBUS_DISCONNECT_BETWEEN_POLLS") == "TRUE":
self.disconnect_between_polls = True
def compute_actuator_datapoints(self):
actuator_temp = {}
for write_method in self.modbus_write_method_names:
for requested_range in self.modbus_config[write_method]:
if "_coil" in write_method:
datapoint = write_method + "__" + \
str(requested_range["address"]) + \
"__" + str(requested_range["unit"])
actuator_temp.update({datapoint: "0"})
elif "_register" in write_method:
datapoint = write_method + "__" + \
str(requested_range["address"]) + "__" + str(requested_range["unit"])
actuator_temp.update({datapoint: "0"})
return actuator_temp
def compute_method_ranges(self):
method_range_temp = {}
for write_method in self.modbus_write_method_names:
method_range_temp.update({write_method: {}})
for requested_range in self.modbus_config[write_method]:
if "_register" in write_method:
method_range_temp[write_method].update({str(requested_range["address"]): {}})
method_range_temp[write_method][str(requested_range["address"])].update(
{ "unit": requested_range["unit"],
"scaling_factor": requested_range["scaling_factor"],
"datatypes": requested_range["datatypes"]
}
)
elif "_coil" in write_method:
method_range_temp[write_method].update({str(requested_range["address"]): {}})
method_range_temp[write_method][str(requested_range["address"])].update(
{
"unit": requested_range["unit"]
}
)
return method_range_temp
@staticmethod
def parse_modbus_config(config_json_str):
"""
Parse and verify the configuration JSON string.
This also removes unexpected components of the config.
Arguements:
-----------
config_json_str : string
The MODBUS_CONFIG JSON string as defined in the Readme.
Returns:
--------
config : dict
The parsed version of the input.
"""
logger.info("Parsing MODBUS_CONFIG.")
expected_config_keys = [
"read_coils",
"read_discrete_inputs",
"read_holding_registers",
"read_input_registers",
"write_register",
"write_coil"
]
config = json.loads(config_json_str)
for config_key in list(config.keys()):
if config_key not in expected_config_keys:
logger.warning(
"Found unexpected key in MODBUS_CONFIG: %s"
"The corresponding values are:\n%s",
*(config_key, json.dumps(config[config_key], indent=2))
)
del config[config_key]
return config
@staticmethod
def compute_addresses(modbus_config):
"""
Compute the corresponding register and coil addresses to the address
ranges specified by the user in MODBUS_CONFIG.
Arguments:
----------
modbus_config : dict
As returned by parse_modbus_config.
"""
# These are the Modbus functions (supported by the connector)
# that interact with registers.
method_names = [
"read_coils",
"read_discrete_inputs",
"read_holding_registers",
"read_input_registers"
]
# These is the mapping from the struct keys to the Modbus
# register count, that is how many registers are filled with that
# variable. See also:
# https://docs.python.org/3/library/struct.html#format-characters
char_register_size = {
"c": 1,
"b": 1,
"B": 1,
"?": 1,
"h": 1,
"H": 1,
"i": 2,
"I": 2,
"l": 2,
"L": 2,
"q": 4,
"Q": 4,
"e": 1,
"f": 2,
"d": 4,
}
addresses = {}
for method_name in method_names:
if method_name not in modbus_config:
continue
addresses[method_name] = {}
requested_ranges = modbus_config[method_name]
if "register" in method_name:
for i, requested_range in enumerate(requested_ranges):
# The first value starts at the start of the
# address range.
range_addresses = []
current_address = requested_range["address"]
for datatype_char in requested_range["datatypes"]:
if datatype_char not in char_register_size:
# Ignore chars defining endianess or padding.
continue
# Append the address this value starts and add it's
# length so we get the starting address of the next
# value.
range_addresses.append(current_address)
current_address += char_register_size[datatype_char]
# Finally store the addresses of this range under the
# index the range has in the config.
addresses[method_name][i] = range_addresses
else:
# Handling for read_discrete_inputs and read_coils methods,
# is acutally quite simple as every bit is exactly one bit
# long :)
for i, requested_range in enumerate(requested_ranges):
start = requested_range["address"]
count = requested_range["count"]
addresses[method_name][i] = list(range(start, start+count))
return addresses
if __name__ == "__main__":
connector = Connector(version=__version__)
connector.run()
| 40.542169
| 153
| 0.55473
|
3be00669e611d58e8f5dd89176fbc8e94b07bb3e
| 11,317
|
py
|
Python
|
docs/build/lib/generativepoetry/poemgen.py
|
coreybobco/generativepoetrypy
|
b4a5f117157d55c6314309ff6dfe9dbeb27a5ebf
|
[
"MIT"
] | 25
|
2019-07-23T03:05:59.000Z
|
2022-03-21T20:56:16.000Z
|
docs/build/lib/generativepoetry/poemgen.py
|
coreybobco/generativepoetrypy
|
b4a5f117157d55c6314309ff6dfe9dbeb27a5ebf
|
[
"MIT"
] | 3
|
2019-07-30T00:57:48.000Z
|
2020-04-08T17:17:37.000Z
|
docs/build/lib/generativepoetry/poemgen.py
|
coreybobco/procedural-poetry
|
b4a5f117157d55c6314309ff6dfe9dbeb27a5ebf
|
[
"MIT"
] | 3
|
2020-10-10T18:52:36.000Z
|
2021-08-16T02:11:45.000Z
|
import itertools
from typing import List, Optional
from .lexigen import *
from .markov import StochasticJolasticWordGenerator
from .utils import too_similar
class Poem:
def __init__(self, input_words, words_for_sampling):
self.input_words = input_words
self.words_for_sampling = words_for_sampling
self.title = "'".join(input_words)
self.lines: List[str] = []
def __str__(self):
return self.raw_text
def update(self):
self.raw_text = '\n'.join(self.lines)
@property
def previous_line(self):
if len(self.lines):
return self.lines[-1]
return ''
class PoemGenerator:
def __init__(self):
self.default_connectors = [' ', ' ', '... ', random.choice([' & ', ' and ']), ' or ', ' or ']
self.line_enders = ['.', ', ', '!', '?', '', ' or', '...']
self.markov_line_enders = ['', '', ',', ',', '!', '.', '?']
self.line_indents = ['', ' ', ' ']
self.poem = None
def poem_line_from_markov(self, starting_word: str, num_words: int = 4, rhyme_with: Optional[str] = None,
words_for_sampling: List[str] = [], max_line_length: Optional[int] = 35) -> str:
"""Generate a line of poetry using a markov chain that optionally tries to make a line rhyme with the last one
Different algorithms handle the last word and all the other words: both algorithms use a mix of random
probability and process stopwords differently to keep the generated text interesting and non-repetitive.
:param starting_word: the input word for the Markov algorithm, which hence is also the poem line's first word
:param num_words: the number of words to write in the poem line
:param rhyme_with: an optional word to try to make the poem line rhyme with. The algorithm will try something
else if this word is a common stopword or if it can't find a rhyme though.
:param words_for_sampling: a list of other words to throw in to the poem. If you don't know what to pass here,
phonetically related words to the starting word probably adds some sonority.
:param max_line_length: an upper limit in characters for the line -- important for PDF generation to keep
everything on the page.
"""
output_words, previous_word = [starting_word], starting_word
markovgen = StochasticJolasticWordGenerator(previous_lines=self.poem.lines)
for i in range(num_words - 1):
if (i == num_words - 2) or (max_line_length and (max_line_length > 14 and
len(' '.join(output_words)) >= max_line_length - 14)):
# Checks if if it's the last word--the limit can be determined by either word count or character count
max_word_length = 12 if max_line_length else None
word = markovgen.last_word_of_markov_line(output_words, rhyme_with=rhyme_with,
max_length=max_word_length)
output_words.append(word)
break
else:
word = markovgen.nonlast_word_of_markov_line(output_words, words_for_sampling=words_for_sampling)
output_words.append(word)
correct_a_vs_an(output_words)
return " ".join(output_words)
def poem_from_markov(self, input_words, num_lines=10, min_line_words: int = 5, max_line_words: int = 9,
max_line_length: Optional[int] = 35) -> str:
"""Generate a line of poetry using a markov chain that optionally tries to make a line rhyme with the last one
Different algorithms handle the last word and all the other words: both algorithms use a mix of random
probability and process stopwords differently to keep the generated text interesting and non-repetitive.
:param input words: the user provided words to try making a poem from
:param num_lines: the number of lines the poem will have
:param max_line_words: the maximum number of words a line may have
:param words_for_sampling: a list of other words to throw in to the poem. If you don't know what to pass here,
phonetically related words to the starting word probably adds some sonority.
:param max_line_length: an upper limit in characters for the line -- important for PDF generation to keep
everything on the page.
"""
self.poem = None
words_for_sampling = input_words + phonetically_related_words(input_words, limit_results_per_input_word=20)
# Check for undesirable similarity overlap in the words for sampling list
similarity_checks = list(itertools.combinations(words_for_sampling, 2))
words_removed = []
for word_pair in similarity_checks:
if not(word_pair[0] in words_removed or word_pair[1] in words_removed) and \
too_similar(word_pair[0], word_pair[1]):
words_removed.append(random.choice([word_pair[0], word_pair[1]]))
words_for_sampling.remove(words_removed[-1])
self.poem = Poem(input_words, words_for_sampling)
last_line_last_word = ''
random.shuffle(words_for_sampling)
line_enders = []
print("\n")
for i in range(num_lines):
rhyme_with = last_line_last_word if i % 2 == 1 else None
# 67.5 % chance the line starts with an input word or something relate, 32.5% with a common word
line_starter = words_for_sampling.pop() if random.random() > .4 else \
random.choice(StochasticJolasticWordGenerator.common_words)
while i >= 1 and too_similar(line_starter, self.poem.lines[i - 1].split(' ')[0]):
# while statement prevents repetition of line starters
line_starter = words_for_sampling.pop() if random.random() > .4 else \
random.choice(StochasticJolasticWordGenerator.common_words)
line = self.poem_line_from_markov(line_starter, words_for_sampling=words_for_sampling,
num_words=random.randint(min_line_words, max_line_words),
rhyme_with=rhyme_with, max_line_length=max_line_length)
self.poem.lines.append(line)
last_line_last_word = line.split(' ')[-1]
# Directly adding line ender to line now will screw up rhyme pairs so save it & add it in another iteration
line_enders.append(random.choice(self.markov_line_enders))
print(line + line_enders[-1])
for i, line in enumerate(self.poem.lines):
self.poem.lines[i] += line_enders[i]
poem = self.poem
return poem
def poem_line_from_word_list(self, word_list: List[str], max_line_length=35, connectors: List[str] = []) -> str:
"""Generate a line of a visual poem from a list of words by gluing them together with random connectors
(whitespace, conjunctions, punctuation, and symbols).
:param word_list: the words that will be used (in order, not randomly) that will form a visual poem
:param max_line_length: upper limit on the length of the return value in characters
:param connectors (list): list of glue strings
"""
connectors = connectors if len(connectors) else self.default_connectors
output, last_word = word_list[0], word_list[0]
last_connector = ''
for word in word_list[1:]:
if random.random() < (
.2 + len(output) / 100): # Increasing probability of line termination as line gets longer
break
if too_similar(last_word, word):
continue
connector = random.choice(connectors)
while connector == last_connector:
connector = random.choice(connectors)
if len(output + connector + word) <= max_line_length:
output += connector + word
last_word = word
last_connector = connector
return output
def poem_from_word_list(self, input_word_list: List[str], num_lines: int = 6, max_line_length: int = 35,
connectors: List[str] = [], limit_line_to_one_input_word: bool = False):
"""Generate a visual poem from a list of words by taking a given input word list, adding the phonetically
related words to that word list, and then using those words to create a visual/concrete poem.
:param input_word_list: the list of user-provided words that will be used, along with phonetically related
words, to generate a poem
:param max_line_length: upper limit on the length of the return value in characters
:param max_line_length: upper limit on length of poem lines (excluding line ending punctuation) in characters
:param connectors: list of glue strings
:param limit_line_to_one_input_word: If true, when generating a line of poetry, only use words that are
phonetically related to one input word.
"""
connectors = self.default_connectors if not len(connectors) else connectors
output, line_indent = '', ''
if limit_line_to_one_input_word:
for i in range(num_lines - 1):
linked_word = random.choice(input_word_list)
output += self.poem_line_from_word_list(phonetically_related_words(linked_word), connectors=connectors,
max_line_length=max_line_length)
line_indent = random.choice(self.line_indents) if line_indent == '' else \
random.choice([li for li in self.line_indents if li is not line_indent]) # Don't repeat the same indent 2x
output += random.choice(self.line_enders) + '\n' + line_indent
else:
word_list = input_word_list.copy()
for word in input_word_list:
word_list.extend(phonetically_related_words(word))
for i in range(num_lines - 1):
random.shuffle(word_list)
output += self.poem_line_from_word_list(word_list, connectors=connectors,
max_line_length=max_line_length)
# Don't repeat the same indent 2x
line_indent = random.choice(self.line_indents) if line_indent == '' else \
random.choice([li for li in self.line_indents if li is not line_indent])
output += random.choice(self.line_enders) + '\n' + line_indent
output += random.choice(input_word_list[:-1]) + ' ' + input_word_list[-1]
return output
def print_poem(poem: str):
"""Print the poem with a newline before and after so it's easy to take a screenshot of its 'recipe' and the poem
in your terminal and share it. :)
:param poem: the poem, as a string, to be printed
"""
print('\n')
print(poem)
print('\n')
| 57.156566
| 127
| 0.625873
|
02556791f4b2dd6aef7a9a5ab2a8387a25107f84
| 20,408
|
py
|
Python
|
ax/models/torch/tests/test_surrogate.py
|
lyhyl/Ax
|
44384a0cb1a622c9e395c95f683cfee25c7b61f6
|
[
"MIT"
] | null | null | null |
ax/models/torch/tests/test_surrogate.py
|
lyhyl/Ax
|
44384a0cb1a622c9e395c95f683cfee25c7b61f6
|
[
"MIT"
] | null | null | null |
ax/models/torch/tests/test_surrogate.py
|
lyhyl/Ax
|
44384a0cb1a622c9e395c95f683cfee25c7b61f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import patch, MagicMock
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.models.torch.botorch_modular.acquisition import Acquisition
from ax.models.torch.botorch_modular.surrogate import Surrogate
from ax.utils.common.constants import Keys
from ax.utils.common.testutils import TestCase
from ax.utils.testing.torch_stubs import get_torch_test_data
from botorch.acquisition.monte_carlo import qSimpleRegret
from botorch.models import SaasFullyBayesianSingleTaskGP, SingleTaskGP
from botorch.models.model import Model
from botorch.sampling.samplers import SobolQMCNormalSampler
from botorch.utils.containers import TrainingData
from gpytorch.constraints import Interval
from gpytorch.kernels import Kernel, RBFKernel, ScaleKernel # noqa: F401
from gpytorch.likelihoods import ( # noqa: F401
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
Likelihood,
)
from gpytorch.mlls import ExactMarginalLogLikelihood, LeaveOneOutPseudoLikelihood
from torch import Tensor
ACQUISITION_PATH = f"{Acquisition.__module__}"
CURRENT_PATH = f"{__name__}"
SURROGATE_PATH = f"{Surrogate.__module__}"
class SingleTaskGPWithDifferentConstructor(SingleTaskGP):
def __init__(self, train_X: Tensor, train_Y: Tensor):
super().__init__(train_X=train_X, train_Y=train_Y)
class SurrogateTest(TestCase):
def setUp(self):
self.device = torch.device("cpu")
self.dtype = torch.float
self.Xs, self.Ys, self.Yvars, self.bounds, _, _, _ = get_torch_test_data(
dtype=self.dtype
)
self.training_data = TrainingData.from_block_design(
X=self.Xs[0], Y=self.Ys[0], Yvar=self.Yvars[0]
)
self.mll_class = ExactMarginalLogLikelihood
self.search_space_digest = SearchSpaceDigest(
feature_names=["x1", "x2"],
bounds=self.bounds,
target_fidelities={1: 1.0},
)
self.metric_names = ["y"]
self.fixed_features = {1: 2.0}
self.refit = True
self.objective_weights = torch.tensor(
[-1.0, 1.0], dtype=self.dtype, device=self.device
)
self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
self.linear_constraints = (
torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
torch.tensor([[0.5], [1.0]]),
)
self.options = {}
def _get_surrogate(self, botorch_model_class):
surrogate = Surrogate(
botorch_model_class=botorch_model_class, mll_class=self.mll_class
)
surrogate_kwargs = botorch_model_class.construct_inputs(self.training_data)
return surrogate, surrogate_kwargs
@patch(f"{CURRENT_PATH}.Kernel")
@patch(f"{CURRENT_PATH}.Likelihood")
def test_init(self, mock_Likelihood, mock_Kernel):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
self.assertEqual(surrogate.botorch_model_class, botorch_model_class)
self.assertEqual(surrogate.mll_class, self.mll_class)
@patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
def test_mll_options(self, _):
mock_mll = MagicMock(self.mll_class)
surrogate = Surrogate(
botorch_model_class=SingleTaskGP,
mll_class=mock_mll,
mll_options={"some_option": "some_value"},
)
surrogate.fit(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=self.refit,
)
self.assertEqual(mock_mll.call_args[1]["some_option"], "some_value")
def test_model_property(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
with self.assertRaisesRegex(
ValueError, "BoTorch `Model` has not yet been constructed."
):
surrogate.model
def test_training_data_property(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
with self.assertRaisesRegex(
ValueError,
"Underlying BoTorch `Model` has not yet received its training_data.",
):
surrogate.training_data
def test_dtype_property(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
self.assertEqual(self.dtype, surrogate.dtype)
def test_device_property(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
self.assertEqual(self.device, surrogate.device)
def test_from_botorch(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate_kwargs = botorch_model_class.construct_inputs(self.training_data)
surrogate = Surrogate.from_botorch(botorch_model_class(**surrogate_kwargs))
self.assertIsInstance(surrogate.model, botorch_model_class)
self.assertTrue(surrogate._constructed_manually)
@patch(f"{CURRENT_PATH}.SaasFullyBayesianSingleTaskGP.__init__", return_value=None)
@patch(f"{CURRENT_PATH}.SingleTaskGP.__init__", return_value=None)
def test_construct(self, mock_GP, mock_SAAS):
mock_GPs = [mock_SAAS, mock_GP]
for i, botorch_model_class in enumerate(
[SaasFullyBayesianSingleTaskGP, SingleTaskGP]
):
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
with self.assertRaises(NotImplementedError):
# Base `Model` does not implement `construct_inputs`.
Surrogate(botorch_model_class=Model).construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
mock_GPs[i].assert_called_once()
call_kwargs = mock_GPs[i].call_args[1]
self.assertTrue(torch.equal(call_kwargs["train_X"], self.Xs[0]))
self.assertTrue(torch.equal(call_kwargs["train_Y"], self.Ys[0]))
self.assertFalse(surrogate._constructed_manually)
# Check that `model_options` passed to the `Surrogate` constructor are
# properly propagated.
with patch.object(
botorch_model_class,
"construct_inputs",
wraps=botorch_model_class.construct_inputs,
) as mock_construct_inputs:
surrogate = Surrogate(
botorch_model_class=botorch_model_class,
mll_class=self.mll_class,
model_options={"some_option": "some_value"},
)
surrogate.construct(self.training_data)
mock_construct_inputs.assert_called_with(
training_data=self.training_data, some_option="some_value"
)
def test_construct_custom_model(self):
# Make sure covar_module and likelihood are filtered for a model that doesn't
# support them.
surrogate = Surrogate(
botorch_model_class=SingleTaskGPWithDifferentConstructor,
mll_class=self.mll_class,
covar_module_class=RBFKernel,
likelihood_class=FixedNoiseGaussianLikelihood,
)
surrogate.construct(self.training_data)
self.assertEqual(type(surrogate._model.covar_module), ScaleKernel)
self.assertEqual(type(surrogate._model.likelihood), GaussianLikelihood)
# Pass custom options to a SingleTaskGP and make sure they are used
noise_constraint = Interval(1e-6, 1e-1)
surrogate = Surrogate(
botorch_model_class=SingleTaskGP,
mll_class=LeaveOneOutPseudoLikelihood,
covar_module_class=RBFKernel,
covar_module_options={"ard_num_dims": 1},
likelihood_class=GaussianLikelihood,
likelihood_options={"noise_constraint": noise_constraint},
)
surrogate.construct(self.training_data)
self.assertEqual(type(surrogate._model.likelihood), GaussianLikelihood)
self.assertEqual(
surrogate._model.likelihood.noise_covar.raw_noise_constraint,
noise_constraint,
)
self.assertEqual(surrogate.mll_class, LeaveOneOutPseudoLikelihood)
self.assertEqual(type(surrogate._model.covar_module), RBFKernel)
self.assertEqual(surrogate._model.covar_module.ard_num_dims, 1)
@patch(f"{CURRENT_PATH}.SingleTaskGP.load_state_dict", return_value=None)
@patch(f"{SURROGATE_PATH}.fit_fully_bayesian_model_nuts")
@patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
@patch(f"{CURRENT_PATH}.ExactMarginalLogLikelihood")
def test_fit(self, mock_MLL, mock_fit_gpytorch, mock_fit_saas, mock_state_dict):
for mock_fit, botorch_model_class in zip(
[mock_fit_saas, mock_fit_gpytorch],
[SaasFullyBayesianSingleTaskGP, SingleTaskGP],
):
surrogate, surrogate_kwargs = self._get_surrogate(
botorch_model_class=botorch_model_class
)
# Checking that model is None before `fit` (and `construct`) calls.
self.assertIsNone(surrogate._model)
# Should instantiate mll and `fit_gpytorch_model` when `state_dict`
# is `None`.
surrogate.fit(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=self.refit,
)
# Check that training data is correctly passed through to the
# BoTorch `Model`.
self.assertTrue(
torch.equal(
surrogate.model.train_inputs[0],
surrogate_kwargs.get("train_X"),
)
)
self.assertTrue(
torch.equal(
surrogate.model.train_targets,
surrogate_kwargs.get("train_Y").squeeze(1),
)
)
mock_state_dict.assert_not_called()
mock_fit.assert_called_once()
mock_state_dict.reset_mock()
mock_MLL.reset_mock()
mock_fit.reset_mock()
# Should `load_state_dict` when `state_dict` is not `None`
# and `refit` is `False`.
state_dict = {"state_attribute": "value"}
surrogate.fit(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=False,
state_dict=state_dict,
)
mock_state_dict.assert_called_once()
mock_MLL.assert_not_called()
mock_fit.assert_not_called()
mock_state_dict.reset_mock()
mock_MLL.reset_mock()
@patch(f"{SURROGATE_PATH}.predict_from_model")
def test_predict(self, mock_predict):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
surrogate.predict(X=self.Xs[0])
mock_predict.assert_called_with(model=surrogate.model, X=self.Xs[0])
def test_best_in_sample_point(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
# `best_in_sample_point` requires `objective_weights`
with patch(
f"{SURROGATE_PATH}.best_in_sample_point", return_value=None
) as mock_best_in_sample:
with self.assertRaisesRegex(ValueError, "Could not obtain"):
surrogate.best_in_sample_point(
search_space_digest=self.search_space_digest,
objective_weights=None,
)
with patch(
f"{SURROGATE_PATH}.best_in_sample_point", return_value=(self.Xs[0], 0.0)
) as mock_best_in_sample:
best_point, observed_value = surrogate.best_in_sample_point(
search_space_digest=self.search_space_digest,
objective_weights=self.objective_weights,
outcome_constraints=self.outcome_constraints,
linear_constraints=self.linear_constraints,
fixed_features=self.fixed_features,
options=self.options,
)
mock_best_in_sample.assert_called_with(
Xs=[self.training_data.X],
model=surrogate,
bounds=self.search_space_digest.bounds,
objective_weights=self.objective_weights,
outcome_constraints=self.outcome_constraints,
linear_constraints=self.linear_constraints,
fixed_features=self.fixed_features,
options=self.options,
)
@patch(f"{ACQUISITION_PATH}.Acquisition.__init__", return_value=None)
@patch(
f"{ACQUISITION_PATH}.Acquisition.optimize",
return_value=([torch.tensor([0.0])], [torch.tensor([1.0])]),
)
@patch(
f"{SURROGATE_PATH}.pick_best_out_of_sample_point_acqf_class",
return_value=(qSimpleRegret, {Keys.SAMPLER: SobolQMCNormalSampler}),
)
def test_best_out_of_sample_point(
self, mock_best_point_util, mock_acqf_optimize, mock_acqf_init
):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
# currently cannot use function with fixed features
with self.assertRaisesRegex(NotImplementedError, "Fixed features"):
surrogate.best_out_of_sample_point(
search_space_digest=self.search_space_digest,
objective_weights=self.objective_weights,
fixed_features=self.fixed_features,
)
candidate, acqf_value = surrogate.best_out_of_sample_point(
search_space_digest=self.search_space_digest,
objective_weights=self.objective_weights,
outcome_constraints=self.outcome_constraints,
linear_constraints=self.linear_constraints,
options=self.options,
)
mock_acqf_init.assert_called_with(
surrogate=surrogate,
botorch_acqf_class=qSimpleRegret,
search_space_digest=self.search_space_digest,
objective_weights=self.objective_weights,
outcome_constraints=self.outcome_constraints,
linear_constraints=self.linear_constraints,
fixed_features=None,
options={Keys.SAMPLER: SobolQMCNormalSampler},
)
self.assertTrue(torch.equal(candidate, torch.tensor([0.0])))
self.assertTrue(torch.equal(acqf_value, torch.tensor([1.0])))
@patch(f"{CURRENT_PATH}.SingleTaskGP.load_state_dict", return_value=None)
@patch(f"{SURROGATE_PATH}.fit_fully_bayesian_model_nuts")
@patch(f"{SURROGATE_PATH}.fit_gpytorch_model")
@patch(f"{CURRENT_PATH}.ExactMarginalLogLikelihood")
def test_update(self, mock_MLL, mock_fit_gpytorch, mock_fit_saas, mock_state_dict):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, surrogate_kwargs = self._get_surrogate(
botorch_model_class=botorch_model_class
)
surrogate.construct(
training_data=self.training_data,
fidelity_features=self.search_space_digest.fidelity_features,
)
# Check that correct arguments are passed to `fit`.
with patch(f"{SURROGATE_PATH}.Surrogate.fit") as mock_fit:
# Call `fit` by default
surrogate.update(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=self.refit,
state_dict={"key": "val"},
)
mock_fit.assert_called_with(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
candidate_metadata=None,
refit=self.refit,
state_dict={"key": "val"},
)
# Check that the training data is correctly passed through to the
# BoTorch `Model`.
Xs, Ys, Yvars, bounds, _, _, _ = get_torch_test_data(
dtype=self.dtype, offset=1.0
)
training_data = TrainingData.from_block_design(
X=Xs[0], Y=Ys[0], Yvar=Yvars[0]
)
surrogate_kwargs = botorch_model_class.construct_inputs(training_data)
surrogate.update(
training_data=training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=self.refit,
state_dict={"key": "val"},
)
self.assertTrue(
torch.equal(
surrogate.model.train_inputs[0],
surrogate_kwargs.get("train_X"),
)
)
self.assertTrue(
torch.equal(
surrogate.model.train_targets,
surrogate_kwargs.get("train_Y").squeeze(1),
)
)
# If should not be reconstructed, check that error is raised.
surrogate._constructed_manually = True
with self.assertRaisesRegex(NotImplementedError, ".* constructed manually"):
surrogate.update(
training_data=self.training_data,
search_space_digest=self.search_space_digest,
metric_names=self.metric_names,
refit=self.refit,
)
def test_serialize_attributes_as_kwargs(self):
for botorch_model_class in [SaasFullyBayesianSingleTaskGP, SingleTaskGP]:
surrogate, _ = self._get_surrogate(botorch_model_class=botorch_model_class)
expected = surrogate.__dict__
self.assertEqual(surrogate._serialize_attributes_as_kwargs(), expected)
| 46.06772
| 88
| 0.636858
|
380358b87b6bad885697ed0ea4e7e3629b47e9c2
| 2,585
|
py
|
Python
|
examples/source_separation/utils/dist_utils.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 4
|
2022-03-16T15:35:35.000Z
|
2022-03-22T23:55:41.000Z
|
examples/source_separation/utils/dist_utils.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 6
|
2020-09-22T22:19:09.000Z
|
2021-06-21T17:37:32.000Z
|
examples/source_separation/utils/dist_utils.py
|
zkneupper/audio
|
1f136671b84071a2fe1d5b762df64f3a76310c31
|
[
"BSD-2-Clause"
] | 1
|
2022-03-16T00:40:40.000Z
|
2022-03-16T00:40:40.000Z
|
import os
import csv
import types
import logging
import torch
import torch.distributed as dist
def _info_on_master(self, *args, **kwargs):
if dist.get_rank() == 0:
self.info(*args, **kwargs)
def getLogger(name):
"""Get logging.Logger module with additional ``info_on_master`` method."""
logger = logging.getLogger(name)
logger.info_on_master = types.MethodType(_info_on_master, logger)
return logger
_LG = getLogger(__name__)
def setup_distributed(
world_size, rank, local_rank, backend="nccl", init_method="env://"
):
"""Perform env setup and initialization for distributed training"""
if init_method == "env://":
_set_env_vars(world_size, rank, local_rank)
if world_size > 1 and "OMP_NUM_THREADS" not in os.environ:
_LG.info("Setting OMP_NUM_THREADS == 1")
os.environ["OMP_NUM_THREADS"] = "1"
params = {
"backend": backend,
"init_method": init_method,
"world_size": world_size,
"rank": rank,
}
_LG.info("Initializing distributed process group with %s", params)
dist.init_process_group(**params)
_LG.info("Initialized distributed process group.")
def _set_env_vars(world_size, rank, local_rank):
for key, default in [("MASTER_ADDR", "127.0.0.1"), ("MASTER_PORT", "29500")]:
if key not in os.environ:
os.environ[key] = default
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(local_rank)
def save_on_master(path, obj):
if dist.get_rank() == 0:
_LG.info("Saving %s", path)
torch.save(obj, path)
def write_csv_on_master(path, *rows):
if dist.get_rank() == 0:
with open(path, "a", newline="") as fileobj:
writer = csv.writer(fileobj)
for row in rows:
writer.writerow(row)
def synchronize_params(path, device, *modules):
if dist.get_world_size() < 2:
return
rank = dist.get_rank()
if rank == 0:
_LG.info("[Parameter Sync]: Saving parameters to a temp file...")
torch.save({f"{i}": m.state_dict() for i, m in enumerate(modules)}, path)
dist.barrier()
if rank != 0:
_LG.info("[Parameter Sync]: Loading parameters...")
data = torch.load(path, map_location=device)
for i, m in enumerate(modules):
m.load_state_dict(data[f"{i}"])
dist.barrier()
if rank == 0:
_LG.info("[Parameter Sync]: Removing the temp file...")
os.remove(path)
_LG.info_on_master("[Parameter Sync]: Complete.")
| 29.712644
| 81
| 0.631721
|
05c30891dac6cc7720760ab683d3dadfed43ecc1
| 1,038
|
py
|
Python
|
make_pseudo_label.py
|
phamduyhk/signate_student_cup_2020
|
19e158b08a86f2df8e4ee45445169ae396c91409
|
[
"MIT"
] | null | null | null |
make_pseudo_label.py
|
phamduyhk/signate_student_cup_2020
|
19e158b08a86f2df8e4ee45445169ae396c91409
|
[
"MIT"
] | null | null | null |
make_pseudo_label.py
|
phamduyhk/signate_student_cup_2020
|
19e158b08a86f2df8e4ee45445169ae396c91409
|
[
"MIT"
] | null | null | null |
import pandas as pd
train = pd.read_csv("./data/data_augmentation_using_language_translation.csv")
test = pd.read_csv("./data/test.csv")
trained_output_file = "./output/pseudo_bert-large-cased_0-192_5cv_4ep.csv"
trained_output = pd.read_csv(trained_output_file, header=None)
pseudo_label = trained_output.iloc[:,1]
test["jobflag"] = pseudo_label
submit = pd.read_csv("./data/submit_sample.csv", header=None)
all_prob = None
for i, item in enumerate(trained_output.iloc[:,2]):
arr = eval(item)
for v in arr:
if v>0.8:
row = test.loc[i]
row["jobflag"] = trained_output.iloc[i,1]
# print(row)
train.loc[len(train)+1]= row
print(train)
train.to_csv("data/pseudo_train.csv", index=False)
# trained_output_file ="submission_gru_50ep.csv"
# trained_output = pd.read_csv(trained_output_file, header=None)
# pseudo_label = trained_output.iloc[:,1]
# test["jobflag"] = pseudo_label
# pseudo = pd.concat([train,test])
# pseudo.to_csv("data/pseudo_train_gru.csv", index=False)
| 29.657143
| 78
| 0.705202
|
53628893ba7d1c3644178d0a037d63e138d0bc49
| 670
|
py
|
Python
|
LeetCodeSolutions/python/42_Trapping_Rain_Water.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | 1
|
2017-03-27T13:38:37.000Z
|
2017-03-27T13:38:37.000Z
|
LeetCodeSolutions/python/42_Trapping_Rain_Water.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/python/42_Trapping_Rain_Water.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
area = 0
sec_height = 0
left, right = 0, len(height) - 1
while left < right:
if height[left] < height[right]:
sec_height = max(height[left], sec_height)
area += sec_height - height[left]
left += 1
else:
sec_height = max(height[right], sec_height)
area += sec_height - height[right]
right -= 1
return area
height = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]
print Solution().trap(height)
| 29.130435
| 59
| 0.471642
|
c580578d7f263ab79d26173e2dece5e2671bd134
| 1,714
|
py
|
Python
|
test/functional/feature_uacomment.py
|
patrykwnosuch/machinecoin-core
|
b6783c857f43f7f077de594d1e03d156f5295b9c
|
[
"MIT"
] | 1
|
2019-05-27T11:12:53.000Z
|
2019-05-27T11:12:53.000Z
|
test/functional/feature_uacomment.py
|
patrykwnosuch/machinecoin-core
|
b6783c857f43f7f077de594d1e03d156f5295b9c
|
[
"MIT"
] | null | null | null |
test/functional/feature_uacomment.py
|
patrykwnosuch/machinecoin-core
|
b6783c857f43f7f077de594d1e03d156f5295b9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
| 41.804878
| 150
| 0.694866
|
5461fe612fc1d33d67bb4e1889f5f40786662632
| 1,347
|
py
|
Python
|
fm/models/enumeration.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
fm/models/enumeration.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
fm/models/enumeration.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Enumeration model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
from mongoengine.document import Document
from mongoengine.fields import StringField, DictField, UUIDField
# Python modules
from noc.core.text import quote_safe_path
from noc.core.prettyjson import to_json
class Enumeration(Document):
meta = {
"collection": "noc.enumerations",
"strict": False,
"auto_create_index": False,
"json_collection": "fm.enumerations",
"json_unique_fields": ["name"],
}
name = StringField(unique=True)
uuid = UUIDField(binary=True)
values = DictField() # value -> [possible combinations]
def __str__(self):
return self.name
def get_json_path(self):
return "%s.json" % quote_safe_path(self.name)
def to_json(self):
return to_json(
{
"name": self.name,
"$collection": self._meta["json_collection"],
"uuid": self.uuid,
"values": self.values,
},
order=["name", "$collection", "uuid"],
)
| 29.282609
| 71
| 0.512992
|
4e736966fe4b4f3f84ee1d2f40f9848eebf8457a
| 24,872
|
py
|
Python
|
Skydipper/utils.py
|
Skydipper/LMIPy
|
4e3a5e474706df7272ffc11ef4e0bcc5d46ed36a
|
[
"MIT"
] | null | null | null |
Skydipper/utils.py
|
Skydipper/LMIPy
|
4e3a5e474706df7272ffc11ef4e0bcc5d46ed36a
|
[
"MIT"
] | 8
|
2019-12-20T12:32:27.000Z
|
2020-06-15T17:41:25.000Z
|
Skydipper/utils.py
|
Skydipper/LMIPy
|
4e3a5e474706df7272ffc11ef4e0bcc5d46ed36a
|
[
"MIT"
] | null | null | null |
import json
import math
import ee
from time import sleep
from google.cloud import storage
def html_box(item):
"""Returns an HTML block with template strings filled-in based on item attributes."""
is_layer = str(type(item)) == "<class 'Skydipper.layer.Layer'>"
is_dataset = str(type(item)) == "<class 'Skydipper.dataset.Dataset'>"
is_widget = str(type(item)) == "<class 'Skydipper.Skydipper.Widget'>"
is_geometry = str(type(item)) == "<class 'Skydipper.geometry.Geometry'>"
is_image = str(type(item)) == "<class 'Skydipper.image.Image'>"
site_link = "<a href='https://skydipper.com/' target='_blank'>"
site_logo = "<img class='itemThumbnail' src='https://skydipper.com/images/logo.png'>"
if is_layer:
kind_of_item = 'Layer'
url_link = f'{item.server}/v1/layer/{item.id}?includes=metadata'
elif is_dataset:
kind_of_item = 'Dataset'
url_link = f'{item.server}/v1/dataset/{item.id}?includes=metadata,layer'
elif is_image:
if item.type in ['Classified Image', 'Composite Image']:
instrument = item.type
else:
instrument = item.instrument
html_string = ("<div class='item_container' style='height: auto; overflow: hidden; border: 1px solid #2BA4A0;"
"border-radius: 5px; background: #2BA4A0; line-height: 1.21429em; padding: 10px;''>"
"<div class='item_left' style='width: 100px; height: 100px; float: left;''>"
f"<a href='{item.thumb_url}' target='_blank'>"
f"<img class='itemThumbnail' src='{item.thumb_url}'>"
"</a></div><div class='item_right' style='float: none; width: auto; hidden;padding-left: 10px; overflow: hidden;''>"
f"<b>Image Source</b>: {instrument} </br>"
f"<b>Datetime</b>: {item.date_time} </br>"
f"<b>Cloud score </b>: {item.cloud_score} </br>"
" </div> </div>")
return html_string
elif is_geometry:
kind_of_item = 'Geometry'
url_link = f'{item.server}/v1/geostore/{item.id}'
html_string = ("<div class='item_container' style='height: auto; overflow: hidden; border: 1px solid #2bA4A0;"
"border-radius: 5px; background: #2bA4A0; line-height: 1.21429em; padding: 10px;''>"
"<div class='item_left' style='width: 210px; float: left;''>"
f"{site_link}"
f"{site_logo}"
"</a></div><div class='item_right' style='float: none; width: auto; hidden;padding-left: 10px; overflow: hidden;''>"
f"<b>Geometry id</b>: <a href={url_link} target='_blank'>{item.id}</a></br>")
for k,v in item.attributes.get('info').items():
if v and k != 'simplifyThresh':
html_string += f"<br><i>{k}: {v}</i>"
html_string += (""
" </div> </div>")
return html_string
else:
kind_of_item = 'Unknown'
url_link = None
table_statement = f"Data source {item.attributes.get('provider')}"
if item.attributes.get('connectorUrl') and item.attributes.get('provider') == "cartodb":
table_statement = (f"Carto table: <a href={item.attributes.get('connectorUrl')}"
" target='_blank'>"
f"{item.attributes.get('tableName')}"
"</a>"
)
if item.attributes.get('provider') == 'gee':
table_statement = (f"GEE asset: <a href='https://code.earthengine.google.com/asset='"
f"{item.attributes.get('tableName')} target='_blank'>"
f"{item.attributes.get('tableName')}"
"</a>"
)
html = ("<div class='item_container' style='height: auto; overflow: hidden; border: 1px solid #2BA4A0;"
"border-radius: 2px; background: #2BA4A0; line-height: 1.21429em; padding: 10px;''>"
"<div class='item_left' style='width: 210px; float: left;''>"
f"{site_link}"
f"{site_logo}"
"</a></div><div class='item_right' style='float: none; width: auto; hidden;padding-left: 10px; overflow: hidden;''>"
f"<a href={url_link} target='_blank'>"
f"<b>{item.attributes.get('name')}</b>"
"</a>"
f"<br> {table_statement} | {kind_of_item} in {', '.join(item.attributes.get('application')).upper()}."
f"<br>Last Modified: {item.attributes.get('updatedAt')}"
f"<br><a href='{item.server}/v1/fields/{item.id}' target='_blank'>Fields</a>"
f" Connector: {item.attributes.get('provider')}"
f" | Published: {item.attributes.get('published')}"
" </div> </div>")
return html
def show_image_collection(item, i):
html_string = ("<div class='item_container' style='height: auto; overflow: hidden; border: 1px solid #2BA4A0;"
"border-radius: 2px; background: #2BA4A0; line-height: 1.21429em; padding: 10px;''>"
"<div class='item_left' style='width: 100px; height: 100px; hidden;padding-left: 10px; float: left''>"
f"<a href='{item.get('thumb_url')}' target='_blank'>"
f"<img class='itemThumbnail' src='{item.get('thumb_url')}'>"
"</a></div><div class='item_right' style='float: none; hidden;padding-left: 10px; width: auto; overflow: hidden;''>"
f"<b>Image Source</b>: {item.get('instrument')} </br>"
f"<b>Datetime</b>: {item.get('date_time')} </br>"
f"<b>Cloud score </b>: {item.get('cloud_score')} </br>"
" </div> </div>")
return html_string
def show(item, i):
"""Returns an HTML block with template strings filled-in based on item attributes."""
is_layer = item.get('type') == 'Layer'
is_dataset = item.get('type') == 'Dataset'
server = item.get('server', "https://api.skydipper.com")
item_id = item.get('id', None)
attributes = item.get('attributes', None)
if is_layer:
kind_of_item = 'Layer'
url_link = f'{server}/v1/layer/{item_id}?includes=metadata'
elif is_dataset:
kind_of_item = 'Dataset'
url_link = f'{server}/v1/dataset/{item_id}?includes=metadata,layer'
else:
kind_of_item = 'Unknown'
url_link = None
table_statement = f"Data source {attributes.get('provider')}"
if attributes.get('connectorUrl') and attributes.get('provider') == "cartodb":
table_statement = (f"Carto table: <a href={attributes.get('connectorUrl')}"
" target='_blank'>"
f"{attributes.get('tableName')}"
"</a>"
)
if attributes.get('connectorUrl') and attributes.get('provider') == "csv":
table_statement = (f"CSV Table: <a href={attributes.get('connectorUrl')}"
" target='_blank'>"
f"{attributes.get('tableName')}"
"</a>"
)
if attributes.get('provider') == 'gee':
table_statement = (f"GEE asset: <a href='https://code.earthengine.google.com/asset='"
f"{attributes.get('tableName')} target='_blank'>"
f"{attributes.get('tableName')}"
"</a>"
)
site_link = "<a href='https://skydipper.com/' target='_blank'>"
site_logo = "<img class='itemThumbnail' src='https://skydipper.com/images/logo.png'>"
html = ("<div class='item_container' style='height: auto; overflow: hidden; border: 1px solid #2BA4A0;"
"border-radius: 2px; background: #2BA4A0; line-height: 1.21429em; padding: 10px;''>"
"<div class='item_left' style='width: 210px; float: left;''>"
f"{site_link}"
f"{site_logo}"
"</a></div><div class='item_right' style='float: none; width: auto; hidden;padding-left: 10px; overflow: hidden;''>"
f"<b>{i}. </b>"
f"<a href={url_link} target='_blank'>"
f"<b>{attributes.get('name')}</b>"
"</a>"
f"<br> {table_statement} | {kind_of_item} in {', '.join(attributes.get('application')).upper()}."
f"<br>Last Modified: {attributes.get('updatedAt')}"
f"<br><a href='{server}/v1/fields/{item_id}' target='_blank'>Fields</a>"
f" | Connector: {attributes.get('provider')}"
f" | Published: {attributes.get('published')}"
" </div> </div>")
return html
def create_class(item):
from .dataset import Dataset
from .layer import Layer
from .Skydipper import Widget
from .image import Image
if item['type'] == 'metadata':
return Dataset(id_hash=item.get('attributes').get('dataset'), server = item.get('server'))
if item['type'] == 'Dataset' :
return Dataset(id_hash = item.get('id'), server = item.get('server'))
elif item['type'] == 'Layer':
return Layer(id_hash = item.get('id'), server = item.get('server'))
elif item['type'] == 'Widget':
return Widget(id_hash = item.get('id'), attributes=item.get('attributes'), server = item.get('server'))
elif item['type'] == 'Image':
return Image(**item)
def flatten_list(nested_list):
if len(nested_list) > 0:
return [item for sublist in nested_list for item in sublist]
else:
return []
def get_geojson_string(geom):
coords = geom.get('coordinates', None)
if coords and not any(isinstance(i, list) for i in coords[0]):
geom['coordinates'] = [coords]
feat_col = {"type": "FeatureCollection", "features": [{"type": "Feature", "properties": {}, "geometry": geom}]}
return json.dumps(feat_col)
def parse_filters(filter_objects):
filter_whitelist = ['connectorType', 'provider', 'status', 'published', 'protected', 'geoInfo']
return_string = ''
error_string = ''
if filter_objects:
for k, v in filter_objects.items():
if k in filter_whitelist:
return_string += f'{k}={v}&'
else:
error_string += f' {k},'
if error_string:
print(f'Unable to filter by{error_string[:-1]}.')
return return_string
return ''
def sldDump(sldObj):
"""
Creates valid SldStyle string from an object.
"""
sld_type = sldObj.get('type', None)
extended = str(sldObj.get('extended', 'false')).lower()
items = sldObj.get('items', None)
sld_attr = {'color': None, 'label': None, 'quantity': None, 'opacity': None}
if sld_type not in ['linear', 'ramp', 'gradient', 'intervals', 'values']:
print('Unable to create SldStyle. Type must be in "linear", "ramp", "gradient", "intervals", "values".')
return None
if items:
sld_str = f'<RasterSymbolizer> <ColorMap type="{sld_type}" extended="{extended}"> '
for item in items:
sld_str += f'<ColorMapEntry '
for k in sld_attr.keys():
if item.get(k, None): sld_str += f'{k}="{item[k]}" '
sld_str += '/> + '
return sld_str + "</ColorMap> </RasterSymbolizer>"
def sldParse(sld_str):
"""
Builds a dictionary from an SldStyle string.
"""
sld_str = sld_str.replace("'", '"').replace('\"', '"')
keys = ['color', 'label', 'quantity', 'opacity']
items = [el.strip() for el in sld_str.split('ColorMapEntry') if '<RasterSymbolizer>' not in el]
sld_items = []
for i in items:
tmp = {}
for k in keys:
v = find_between(i, f'{k}="', '"')
if v: tmp[k] = v
sld_items.append(tmp)
return {
'type': find_between(sld_str, 'type="', '"'),
'extended': find_between(sld_str, 'extended="', '"'),
'items': sld_items
}
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def nested_set(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def server_uses_widgets(server):
"""
Does the server currently set use Widget objects? Response gives True if it does, false if not.
"""
uses_widgets = ['https://api.resourcewatch.org','https://staging-api.globalforestwatch.org']
if any(server in s for s in uses_widgets):
return True
else:
return False
def tile_url(image, viz_params=None):
"""Create a target url for tiles from an EE image asset.
e.g.
im = ee.Image("LE7_TOA_1YEAR/" + year).select("B3","B2","B1")
viz = {'opacity': 1, 'gain':3.5, 'bias':4, 'gamma':1.5}
url = tile_url(image=im),viz_params=viz)
"""
if viz_params:
d = image.getMapId(viz_params)
else:
d = image.getMapId()
base_url = 'https://earthengine.googleapis.com'
url = (f"https://earthengine.googleapis.com/v1alpha/{d['mapid']}/tiles/{{z}}/{{x}}/{{y}}")
return url
class EE_TILE_CALCS(object):
"""
Copyright (c) 2018 Gennadii Donchyts. All rights reserved.
This work is licensed under the terms of the MIT license.
For a copy, see <https://opensource.org/licenses/MIT>.
Refactored to Python, Vizzuality, 2020.
This code will help you calculate tile bounds, intersected with
a given geom, at a specified z-level.
"""
def __init__(self, tileSize=256):
ee.Initialize()
self.tileSize = tileSize
self.equatorial_circumference = 40075016.686
self.origin = 2 * math.pi * 6378137 / 2.0
def zoomToScale(self, zoom):
tileWidth = self.equatorial_circumference / math.pow(2, zoom)
pixelWidth = tileWidth / self.tileSize
return pixelWidth
def scaleToZoom(self, scale):
tileWidth = scale * self.tileSize
zoom = math.log(self.equatorial_circumference / tileWidth) / math.log(2)
return math.ceil(zoom)
def pixelsToMeters(self, px, py, zoom):
resolution = self.zoomToScale(zoom)
x = px * resolution - self.origin
y = py * resolution - self.origin
return [x, y]
def metersToPixels(self, x, y, zoom):
resolution = zoomToScale(zoom)
px = (x + self.origin) / resolution
py = (y + self.origin) / resolution
return px, py
def degreesToTiles(self, lon, lat, zoom):
tx = math.floor((lon + 180) / 360 * math.pow(2, zoom))
ty = math.floor((1 - math.log(math.tan(self.toRadians(lat)) + 1 / math.cos(self.toRadians(lat))) / math.pi) / 2 * math.pow(2, zoom))
return [tx, ty]
@staticmethod
def tilesToDegrees(tx, ty, zoom):
lon = tx / math.pow(2, zoom) * 360 - 180
n = math.pi - 2 * math.pi * ty / math.pow(2, zoom)
lat = toDegrees(math.atan(0.5 * (math.exp(n) - math.exp(-n))))
return [lon, lat]
def getTilesForGeometry(self, geometry, zoom):
bounds = ee.List(geometry.bounds().coordinates().get(0))
ll = bounds.get(0).getInfo() # <-- Look at making this happen server-side
ur = bounds.get(2).getInfo() # <-- Look at making this happen server-side
tmin = self.degreesToTiles(ll[0], ll[1], zoom)
tmax = self.degreesToTiles(ur[0], ur[1], zoom)
tiles = []
for tx in range(tmin[0], tmax[0] + 1):
for ty in range(tmax[1], tmin[1] + 1):
bounds = self.getTileBounds(tx, ty, zoom)
rect = ee.Geometry.Rectangle(bounds, 'EPSG:3857', False)
tiles.append(ee.Feature(rect).set({'tx': tx, 'ty': ty, 'zoom': zoom }))
return ee.FeatureCollection(tiles).filterBounds(geometry)
def getTilesList(self, geometry, zoom):
"""Returns a list of individual features, where each feature element is a tile footprint."""
bounds = ee.List(geometry.bounds().coordinates().get(0))
ll = bounds.get(0).getInfo() # <-- Look at making this happen server-side
ur = bounds.get(2).getInfo() # <-- Look at making this happen server-side
tmin = self.degreesToTiles(ll[0], ll[1], zoom)
tmax = self.degreesToTiles(ur[0], ur[1], zoom)
tiles = []
for tx in range(tmin[0], tmax[0] + 1):
for ty in range(tmax[1], tmin[1] + 1):
bounds = self.getTileBounds(tx, ty, zoom)
rect = ee.Geometry.Rectangle(bounds, 'EPSG:3857', False)
tiles.append(ee.Feature(rect).set({'tx': tx, 'ty': ty, 'zoom': zoom }))
return tiles
def getTileBounds(self, tx, ty, zoom, tileSize=256):
"""Returns a FeatureCollection object, where each feature is a tile footprint"""
ty = math.pow(2, zoom) - ty - 1 # TMS -> XYZ, flip y index
tmp_min = self.pixelsToMeters(tx * tileSize, ty * tileSize, zoom)
tmp_max = self.pixelsToMeters((tx + 1) * tileSize, (ty + 1) * tileSize, zoom)
return [tmp_min, tmp_max]
@staticmethod
def toRadians(degrees):
return degrees * math.pi / 180
@staticmethod
def toDegrees(radians):
return radians * 180 / math.pi
class MovieMaker(object):
"""
Create movie tiles for a list of bounds, to go into a GCS bucket
Parameters
----------
area: ee.Geometry.Polygon()
A polygon that covers the area which you want to generate tiles within.
zlist: list
A list of integer values of z-levels to process, e.g. z=[3] or z=[3,4,5]
privatekey_path: string
A string specifying the direction of a json keyfile on your local filesystem
e.g. "/Users/me/.privateKeys/key_with_bucket_permissions.json"
bucket_name: string
A string specifying a GCS bucket (to which your private key should have access)
e.g. 'skydipper_materials'
folder_path: string
A string specifying a folder name to create within the target bucket.
e.g. 'movie-tiles/DTEST'
report_status: bool
Set to true if you want the program to report on the files it is generating.
Beware it can get long for high z-levels.
"""
def __init__(self, privatekey_path, bucket_name, folder_path,
area=None, zlist=None, ic=None, report_status=False):
self.storage_client = storage.Client.from_service_account_json(privatekey_path)
self.privatekey_path = privatekey_path
self.bucket = self.storage_client.get_bucket(bucket_name)
self.bucket_name = bucket_name
self.folder_path = folder_path
self.tiler = EE_TILE_CALCS()
self.area = area
self.zlist = zlist
self.ic = ic
self.report_status = report_status
ee.Initialize()
def run(self):
"""Main worker method"""
assert type(self.zlist) == list, "the zlist must be a list to run, e.g. zlist=[2]"
assert type(self.area) == ee.geometry.Geometry, "An area of type ee.geometry.Geometry must be provided to run"
for zlevel in self.zlist:
print(f"🧨 Calculating Z-level {zlevel}")
tileset = self.tiler.getTilesList(self.area, zlevel)
d = self.initial_dic_creation(tileset=tileset) # Starting dic of whatever has been burned to the bucket
to_do = self.get_items_by_state(d, 'WAITING')
for unprocessed in to_do:
z=unprocessed[0].split('/')[-3]
x=unprocessed[0].split('/')[-2]
y=unprocessed[0].split('/')[-1].split('.mp4')[0]
if self.report_status: print(f'{z}/{x}/{y}')
try:
self.movie_maker(tile=unprocessed[1].get('tile'), z=z, x=x, y=y)
except (ee.EEException) as err:
sleep(60 * 5) # Simple - Wait 5 mins and try assigning tasks again (this assumes the only issue )
self.movie_maker(tile=unprocessed[1].get('tile'), z=z, x=x, y=y)
print("Program ended normally. Note that after the files have been generated you should run MovieMaker().reNamer()")
self.reNamer()
return
def movie_maker(self, tile, z, x, y):
"""Generates a single movie tile"""
g = tile.geometry()
filtered = self.ic.filterBounds(g)
#print(f"🗺 Exporting movie-tile to {self.bucket_name}/{self.folder_path}/{z}/{x}/{y}.mp4")
exp_task = ee.batch.Export.video.toCloudStorage(
collection = filtered,
description = f'{z}_{x}_{y}',
bucket= self.bucket_name,
fileNamePrefix = f"{self.folder_path}/{z}/{x}/{y}",
dimensions = [256,256],
framesPerSecond = 2,
region = g)
exp_task.start()
def reNamer(self):
"""Renames source files to a clean target that removes jank added by EE."""
blob_gen = self.bucket.list_blobs(prefix=self.folder_path)
blobs = [blob for blob in blob_gen]
print(f'Scanning target {self.bucket_name}/{self.folder_path} for files that require renaming...')
for blob in blobs:
tmp_name = blob.name
if tmp_name[-4:] == '.mp4' and ("ee-export-video" in tmp_name):
target_name = f"{tmp_name.split('ee-export-video')[0]}.mp4"
if self.report_status: print(f'renaming:{tmp_name}-->{target_name}')
_ = self.bucket.rename_blob(blob, target_name)
def getDoneFileList(self):
"""Returns list of file names in a bucket/path that have already been created"""
blob_gen = self.bucket.list_blobs(prefix=self.folder_path)
blobs = [blob for blob in blob_gen]
completed_files = []
for blob in blobs:
completed_files.append(blob.name)
return completed_files
def getFullTargetList(self, z):
"""
Return a list of all files we intend to create for a specified z (int) target to cover a given ee.Geometry input
area, with a folder path (string) direction appended.
"""
target_list = []
tmp_fc = self.tiler.getTilesForGeometry(self.area, z)
tmp_info = tmp_fc.getInfo() # <---this is the point where I have the properties, I should make sure to propagate them rather then call getinfo again
target_list = [f"{self.folder_path}/{item.get('properties').get('zoom')}/{item.get('properties').get('tx')}/{item.get('properties').get('ty')}.mp4"
for item in tmp_info.get('features')]
return sorted(target_list)
def get_current_status(self):
"""Consult the current EE Task list to see what's what"""
batch_jobs = ee.batch.Task.list()
processing_list = []
processing_status = []
badness_list = []
for job in batch_jobs:
state = job.state
try:
tmp = job.config['description'].split("_")
tmp_fname = f"{self.folder_path}/{tmp[0]}/{tmp[1]}/{tmp[2]}.mp4"
processing_list.append(tmp_fname)
processing_status.append(state)
except:
badness_list.append(job)
return {'processing':processing_list,
'status': processing_status,
'badness': badness_list}
def get_objective_list(self, tileset):
"""Returns a list of target files 1:1 for the target tiles"""
tmp_list = []
for tile in tileset:
tmp_str = tile.__str__()
zoom = json.loads(tmp_str[11:-1]).get('arguments').get('value')
ty = json.loads(tmp_str[11:-1]).get('arguments').get('object').get('arguments').get('value')
tx = json.loads(tmp_str[11:-1]).get('arguments').get('object').get('arguments').get('object').get('arguments').get('value')
tmp_list.append(f"{self.folder_path}/{zoom}/{tx}/{ty}.mp4")
return tmp_list
@staticmethod
def generate_master_dic(objectives, tileset):
d = {}
for obj, tile in zip(objectives, tileset):
d[obj] = {'tile': tile, 'status': "WAITING"}
return d
@staticmethod
def get_items_by_state(d, state="WAITING"):
result = []
for i in d.items():
if (i[1].get('status') == state):
result.append(i)
return result
def initial_dic_creation(self, tileset):
objectives = self.get_objective_list(tileset) # <--- calculate target files to keep track
d = self.generate_master_dic(objectives=objectives, tileset=tileset)
self.reNamer()
done = self.getDoneFileList() # Consult the bucket and see what files have been completed
for item in done:
if d.get(item, None):
d[item]['status']='COMPLETED'
return d
| 46.059259
| 158
| 0.581296
|
bccbd751c2c4606ce707a9cc4299b8b83453bcfd
| 64,371
|
py
|
Python
|
salt/cloud/clouds/azurearm.py
|
wedge-jarrad/salt
|
7897d7922c70b039db8284976dcbf812eb3f2acf
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/azurearm.py
|
wedge-jarrad/salt
|
7897d7922c70b039db8284976dcbf812eb3f2acf
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/azurearm.py
|
wedge-jarrad/salt
|
7897d7922c70b039db8284976dcbf812eb3f2acf
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Azure ARM Cloud Module
======================
.. versionadded:: 2016.11.0
.. versionchanged:: Fluorine
The Azure ARM cloud module is used to control access to Microsoft Azure Resource Manager
:depends:
* `Microsoft Azure SDK for Python <https://pypi.python.org/pypi/azure>`_ >= 2.0rc6
* `Microsoft Azure Storage SDK for Python <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32
* `AutoRest swagger generator Python client runtime (Azure-specific module) <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4
:configuration:
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
* HTTP base URL for a custom endpoint, such as Azure Stack. The ``/metadata/endpoints`` path will be added to the URL.
**userdata** and **userdata_file**:
Azure Resource Manager uses a separate VirtualMachineExtension object to pass userdata scripts to the virtual
machine. Arbitrary shell commands can be passed via the ``userdata`` parameter, or via a file local to the Salt
Cloud system using the ``userdata_file`` parameter. Note that the local file is not treated as a script by the
extension, so "one-liners" probably work best. If greater functionality is desired, a web-hosted script file can
be specified via ``userdata_file: https://raw.githubusercontent.com/account/repo/master/azure-script.py``, which
will be executed on the system after VM creation. For Windows systems, script files ending in ``.ps1`` will be
executed with ``powershell.exe``. The ``userdata`` parameter takes precedence over the ``userdata_file`` parameter
when creating the custom script extension.
**win_installer**:
This parameter, which holds the local path to the Salt Minion installer package, is used to determine if the
virtual machine type will be "Windows". Only set this parameter on profiles which install Windows operating systems.
Example ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/azure.conf`` configuration:
.. code-block:: yaml
my-azure-config with username and password:
driver: azurearm
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: larry
password: 123pass
Or my-azure-config with service principal:
driver: azurearm
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_US_GOV_CLOUD
The Service Principal can be created with the new Azure CLI (https://github.com/Azure/azure-cli) with:
az ad sp create-for-rbac -n "http://<yourappname>" --role <role> --scopes <scope>
For example, this creates a service principal with 'owner' role for the whole subscription:
az ad sp create-for-rbac -n "http://mysaltapp" --role owner --scopes /subscriptions/3287abc8-f98a-c678-3bde-326766fd3617
*Note: review the details of Service Principals. Owner role is more than you normally need, and you can restrict
scope to a resource group or individual resources.
'''
# pylint: disable=wrong-import-position,wrong-import-order
from __future__ import absolute_import, print_function, unicode_literals
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import importlib
import logging
import os
import os.path
import pprint
import string
import time
# Salt libs
import salt.cache
import salt.config as config
import salt.loader
import salt.utils
import salt.utils.cloud
import salt.utils.yaml
import salt.ext.six as six
import salt.version
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
)
# Import 3rd-party libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models as compute_models
import azure.mgmt.network.models as network_models
from azure.storage.blob.blockblobservice import BlockBlobService
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__opts__ = salt.config.minion_config('/etc/salt/minion')
__utils__ = salt.loader.utils(__opts__)
__virtualname__ = 'azurearm'
if HAS_LIBS is True:
_get_client = __utils__['azurearm.get_client'] # pylint: disable=invalid-name
_log_cloud_error = __utils__['azurearm.log_cloud_error'] # pylint: disable=invalid-name
_paged_object_to_list = __utils__['azurearm.paged_object_to_list'] # pylint: disable=invalid-name
log = logging.getLogger(__name__)
def __virtual__():
'''
Check for Azure configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return (
False,
'The following dependencies are required to use the AzureARM driver: '
'Microsoft Azure SDK for Python >= 2.0rc6, '
'Microsoft Azure Storage SDK for Python >= 0.32, '
'MS REST Azure (msrestazure) >= 0.4'
)
return __virtualname__
def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Get a resource type api versions
'''
if kwargs is None:
kwargs = {}
if 'resource_provider' not in kwargs:
raise SaltCloudSystemExit(
'A resource_provider must be specified'
)
if 'resource_type' not in kwargs:
raise SaltCloudSystemExit(
'A resource_type must be specified'
)
api_versions = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace=kwargs['resource_provider']
)
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == kwargs['resource_type']:
resource_dict = resource.as_dict()
api_versions = resource_dict['api_versions']
except CloudError as exc:
_log_cloud_error('resource', exc.message)
return api_versions
def get_resource_by_id(resource_id, api_version, extract_value=None):
'''
Get an AzureARM resource by id
'''
ret = {}
try:
resconn = get_conn(client_type='resource')
resource_query = resconn.resources.get_by_id(
resource_id=resource_id,
api_version=api_version
)
resource_dict = resource_query.as_dict()
if extract_value is not None:
ret = resource_dict[extract_value]
else:
ret = resource_dict
except CloudError as exc:
_log_cloud_error('resource', exc.message)
ret = {'Error': exc.message}
return ret
def get_configured_provider():
'''
Return the first configured provider instance.
'''
def __is_provider_configured(opts, provider, required_keys=()):
'''
Check if the provider is configured.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
return False
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
skip_provider = True
break
if skip_provider:
continue
return provider_details
return False
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'tenant', 'client_id', 'secret')
)
if provider is False:
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'username', 'password')
)
return provider
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'azurearm': HAS_LIBS}
)
def get_conn(client_type):
'''
Return a connection object for a client type.
'''
conn_kwargs = {}
conn_kwargs['subscription_id'] = config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
cloud_env = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
if cloud_env is not None:
conn_kwargs['cloud_environment'] = cloud_env
tenant = config.get_cloud_config_value(
'tenant',
get_configured_provider(), __opts__, search_global=False
)
if tenant is not None:
client_id = config.get_cloud_config_value(
'client_id',
get_configured_provider(), __opts__, search_global=False
)
secret = config.get_cloud_config_value(
'secret',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'client_id': client_id, 'secret': secret,
'tenant': tenant})
else:
username = config.get_cloud_config_value(
'username',
get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'username': username, 'password': password})
client = _get_client(
client_type=client_type, **conn_kwargs
)
return client
def get_location(call=None): # pylint: disable=unused-argument
'''
Return the location that is configured for this provider
'''
return config.get_cloud_config_value(
'location',
get_configured_provider(), __opts__, search_global=False
)
def avail_locations(call=None):
'''
Return a dict of all available regions.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
ret['locations'] = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace='Microsoft.Compute'
)
locations = []
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == 'virtualMachines':
resource_dict = resource.as_dict()
locations = resource_dict['locations']
for location in locations:
lowercase = location.lower().replace(' ', '')
ret['locations'].append(lowercase)
except CloudError as exc:
_log_cloud_error('resource', exc.message)
ret = {'Error': exc.message}
return ret
def avail_images(call=None):
'''
Return a dict of all available images on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
compconn = get_conn(client_type='compute')
region = get_location()
publishers = []
ret = {}
def _get_publisher_images(publisher):
'''
Get all images from a specific publisher
'''
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region,
publisher_name=publisher,
offer=offer['name'],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
results = compconn.virtual_machine_images.list(
location=region,
publisher_name=publisher,
offer=offer['name'],
skus=sku['name'],
)
for version_obj in results:
version = version_obj.as_dict()
name = '|'.join((
publisher,
offer['name'],
sku['name'],
version['name'],
))
data[name] = {
'publisher': publisher,
'offer': offer['name'],
'sku': sku['name'],
'version': version['name'],
}
except CloudError as exc:
_log_cloud_error('compute', exc.message)
data = {publisher: exc.message}
return data
try:
publishers_query = compconn.virtual_machine_images.list_publishers(
location=region
)
for publisher_obj in publishers_query:
publisher = publisher_obj.as_dict()
publishers.append(publisher['name'])
except CloudError as exc:
_log_cloud_error('compute', exc.message)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_publisher_images, publishers)
results.wait()
ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
return ret
def avail_sizes(call=None):
'''
Return a list of sizes available from the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
compconn = get_conn(client_type='compute')
ret = {}
location = get_location()
try:
sizes = compconn.virtual_machine_sizes.list(
location=location
)
for size_obj in sizes:
size = size_obj.as_dict()
ret[size['name']] = size
except CloudError as exc:
_log_cloud_error('compute', exc.message)
ret = {'Error': exc.message}
return ret
def list_nodes(call=None):
'''
List VMs on this Azure account
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {'name': node}
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
ret[node][prop] = nodes[node].get(prop)
return ret
def list_nodes_full(call=None):
'''
List all VMs on the subscription with full information
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'networkInterfaces'
}
)
netapi_version = netapi_versions[0]
compconn = get_conn(client_type='compute')
ret = {}
def _get_node_info(node):
'''
Get node info.
'''
node_ret = {}
node['id'] = node['vm_id']
node['size'] = node['hardware_profile']['vm_size']
node['state'] = node['provisioning_state']
node['public_ips'] = []
node['private_ips'] = []
node_ret[node['name']] = node
try:
image_ref = node['storage_profile']['image_reference']
node['image'] = '|'.join([
image_ref['publisher'],
image_ref['offer'],
image_ref['sku'],
image_ref['version'],
])
except TypeError:
try:
node['image'] = node['storage_profile']['os_disk']['image']['uri']
except TypeError:
node['image'] = None
try:
netifaces = node['network_profile']['network_interfaces']
for index, netiface in enumerate(netifaces):
netiface_name = get_resource_by_id(
netiface['id'],
netapi_version,
'name'
)
netiface, pubips, privips = _get_network_interface(
netiface_name,
node['resource_group']
)
node['network_profile']['network_interfaces'][index].update(netiface)
node['public_ips'].extend(pubips)
node['private_ips'].extend(privips)
except Exception:
pass
node_ret[node['name']] = node
return node_ret
for group in list_resource_groups():
nodes = []
nodes_query = compconn.virtual_machines.list(
resource_group_name=group
)
for node_obj in nodes_query:
node = node_obj.as_dict()
node['resource_group'] = group
nodes.append(node)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_node_info, nodes)
results.wait()
group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
ret.update(group_ret)
return ret
def list_resource_groups(call=None):
'''
List resource groups associated with the subscription
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_hosted_services function must be called with '
'-f or --function'
)
resconn = get_conn(client_type='resource')
ret = {}
try:
groups = resconn.resource_groups.list()
for group_obj in groups:
group = group_obj.as_dict()
ret[group['name']] = group
except CloudError as exc:
_log_cloud_error('resource', exc.message)
ret = {'Error': exc.message}
return ret
def show_instance(name, call=None):
'''
Show the details from AzureARM concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
try:
node = list_nodes_full('function')[name]
except KeyError:
log.debug('Failed to get data for node \'%s\'', name)
node = {}
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
def delete_interface(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a network interface.
'''
if kwargs is None:
kwargs = {}
netconn = get_conn(client_type='network')
if kwargs.get('resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
'resource_group', {}, __opts__, search_global=True
)
ips = []
iface = netconn.network_interfaces.get(
kwargs['resource_group'],
kwargs['iface_name'],
)
iface_name = iface.name
for ip_ in iface.ip_configurations:
ips.append(ip_.name)
poller = netconn.network_interfaces.delete(
kwargs['resource_group'],
kwargs['iface_name'],
)
poller.wait()
for ip_ in ips:
poller = netconn.public_ip_addresses.delete(kwargs['resource_group'], ip_)
poller.wait()
return {iface_name: ips}
def _get_public_ip(name, resource_group):
'''
Get the public ip address details by name.
'''
netconn = get_conn(client_type='network')
try:
pubip_query = netconn.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=name
)
pubip = pubip_query.as_dict()
except CloudError as exc:
_log_cloud_error('network', exc.message)
pubip = {'error': exc.message}
return pubip
def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips
def create_network_interface(call=None, kwargs=None):
'''
Create a network interface.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_network_interface action must be called with -a or --action.'
)
# pylint: disable=invalid-name
IPAllocationMethod = getattr(
network_models,
'IPAllocationMethod'
)
# pylint: disable=invalid-name
NetworkInterface = getattr(
network_models,
'NetworkInterface'
)
# pylint: disable=invalid-name
NetworkInterfaceIPConfiguration = getattr(
network_models,
'NetworkInterfaceIPConfiguration'
)
# pylint: disable=invalid-name
PublicIPAddress = getattr(
network_models,
'PublicIPAddress'
)
if not isinstance(kwargs, dict):
kwargs = {}
vm_ = kwargs
netconn = get_conn(client_type='network')
if kwargs.get('location') is None:
kwargs['location'] = get_location()
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', vm_, __opts__, search_global=False
)
if kwargs.get('subnet') is None:
kwargs['subnet'] = config.get_cloud_config_value(
'subnet', vm_, __opts__, search_global=False
)
if kwargs.get('network_resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=False
)
else:
kwargs['resource_group'] = kwargs['network_resource_group']
if kwargs.get('iface_name') is None:
kwargs['iface_name'] = '{0}-iface0'.format(vm_['name'])
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
ip_kwargs = {}
ip_configurations = None
if 'private_ip_address' in kwargs.keys():
ip_kwargs['private_ip_address'] = kwargs['private_ip_address']
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.static
else:
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.dynamic
if kwargs.get('allocate_public_ip') is True:
pub_ip_name = '{0}-ip'.format(kwargs['iface_name'])
poller = netconn.public_ip_addresses.create_or_update(
resource_group_name=kwargs['resource_group'],
public_ip_address_name=pub_ip_name,
parameters=PublicIPAddress(
location=kwargs['location'],
public_ip_allocation_method=IPAllocationMethod.static,
),
)
count = 0
poller.wait()
while True:
try:
pub_ip_data = netconn.public_ip_addresses.get(
kwargs['resource_group'],
pub_ip_name,
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs['public_ip_address'] = PublicIPAddress(
six.text_type(pub_ip_data.id), # pylint: disable=no-member
)
ip_configurations = [
NetworkInterfaceIPConfiguration(
name='{0}-ip'.format(kwargs['iface_name']),
subnet=subnet_obj,
**ip_kwargs
)
]
break
except CloudError as exc:
log.error('There was a cloud error: {0}'.format(exc))
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
time.sleep(5)
else:
priv_ip_name = '{0}-ip'.format(kwargs['iface_name'])
ip_configurations = [
NetworkInterfaceIPConfiguration(
name=priv_ip_name,
subnet=subnet_obj,
**ip_kwargs
)
]
network_security_group = None
if kwargs.get('security_group') is not None:
network_security_group = netconn.network_security_groups.get(
resource_group_name=kwargs['resource_group'],
network_security_group_name=kwargs['security_group'],
)
iface_params = NetworkInterface(
location=kwargs['location'],
network_security_group=network_security_group,
ip_configurations=ip_configurations,
)
poller = netconn.network_interfaces.create_or_update(
kwargs['resource_group'], kwargs['iface_name'], iface_params
)
try:
poller.wait()
except Exception as exc:
log.warn('Network interface creation could not be polled. '
'It is likely that we are reusing an existing interface. (%s)', exc)
count = 0
while True:
try:
return _get_network_interface(kwargs['iface_name'], kwargs['resource_group'])
except CloudError:
count += 1
if count > 120:
raise ValueError('Timed out waiting for operation to complete.')
time.sleep(5)
def request_instance(vm_):
'''
Request a VM from Azure.
'''
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
CachingTypes = getattr(
compute_models, 'CachingTypes'
)
# pylint: disable=invalid-name
DataDisk = getattr(
compute_models, 'DataDisk'
)
# pylint: disable=invalid-name
DiskCreateOptionTypes = getattr(
compute_models, 'DiskCreateOptionTypes'
)
# pylint: disable=invalid-name
HardwareProfile = getattr(
compute_models, 'HardwareProfile'
)
# pylint: disable=invalid-name
ImageReference = getattr(
compute_models, 'ImageReference'
)
# pylint: disable=invalid-name
LinuxConfiguration = getattr(
compute_models, 'LinuxConfiguration'
)
# pylint: disable=invalid-name
SshConfiguration = getattr(
compute_models, 'SshConfiguration'
)
# pylint: disable=invalid-name
SshPublicKey = getattr(
compute_models, 'SshPublicKey'
)
# pylint: disable=invalid-name
NetworkInterfaceReference = getattr(
compute_models, 'NetworkInterfaceReference'
)
# pylint: disable=invalid-name
NetworkProfile = getattr(
compute_models, 'NetworkProfile'
)
# pylint: disable=invalid-name
OSDisk = getattr(
compute_models, 'OSDisk'
)
# pylint: disable=invalid-name
OSProfile = getattr(
compute_models, 'OSProfile'
)
# pylint: disable=invalid-name
StorageProfile = getattr(
compute_models, 'StorageProfile'
)
# pylint: disable=invalid-name
VirtualHardDisk = getattr(
compute_models, 'VirtualHardDisk'
)
# pylint: disable=invalid-name
VirtualMachine = getattr(
compute_models, 'VirtualMachine'
)
# pylint: disable=invalid-name
VirtualMachineSizeTypes = getattr(
compute_models, 'VirtualMachineSizeTypes'
)
if vm_.get('driver') is None:
vm_['driver'] = 'azurearm'
if vm_.get('location') is None:
vm_['location'] = get_location()
if vm_.get('resource_group') is None:
vm_['resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=True
)
if vm_.get('name') is None:
vm_['name'] = config.get_cloud_config_value(
'name', vm_, __opts__, search_global=True
)
# pylint: disable=unused-variable
iface_data, public_ips, private_ips = create_network_interface(
call='action',
kwargs=vm_
)
vm_['iface_id'] = iface_data['id']
disk_name = '{0}-vol0'.format(vm_['name'])
vm_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_username', vm_, __opts__, search_global=True
)
)
ssh_publickeyfile_contents = None
ssh_publickeyfile = config.get_cloud_config_value(
'ssh_publickeyfile',
vm_,
__opts__,
search_global=False,
default=None
)
if ssh_publickeyfile is not None:
try:
with salt.utils.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
"Failed to read ssh publickey file '{0}': "
"{1}".format(ssh_publickeyfile,
exc.args[-1])
)
disable_password_authentication = config.get_cloud_config_value(
'disable_password_authentication',
vm_,
__opts__,
search_global=False,
default=False
)
vm_password = config.get_cloud_config_value(
'ssh_password', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_password', vm_, __opts__, search_global=True
)
)
os_kwargs = {}
win_installer = config.get_cloud_config_value(
'win_installer', vm_, __opts__, search_global=True
)
if not win_installer and ssh_publickeyfile_contents is not None:
sshpublickey = SshPublicKey(
key_data=ssh_publickeyfile_contents,
path='/home/{0}/.ssh/authorized_keys'.format(vm_username),
)
sshconfiguration = SshConfiguration(
public_keys=[sshpublickey],
)
linuxconfiguration = LinuxConfiguration(
disable_password_authentication=disable_password_authentication,
ssh=sshconfiguration,
)
os_kwargs['linux_configuration'] = linuxconfiguration
if win_installer or (vm_password is not None and not disable_password_authentication):
if not isinstance(vm_password, str):
raise SaltCloudSystemExit(
'The admin password must be a string.'
)
if len(vm_password) < 8 or len(vm_password) > 123:
raise SaltCloudSystemExit(
'The admin password must be between 8-123 characters long.'
)
complexity = 0
if any(char.isdigit() for char in vm_password):
complexity += 1
if any(char.isupper() for char in vm_password):
complexity += 1
if any(char.islower() for char in vm_password):
complexity += 1
if any(char in string.punctuation for char in vm_password):
complexity += 1
if complexity < 3:
raise SaltCloudSystemExit(
'The admin password must contain at least 3 of the following types: '
'upper, lower, digits, special characters'
)
os_kwargs['admin_password'] = vm_password
cloud_env = _get_cloud_environment()
storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint
if isinstance(vm_.get('volumes'), six.string_types):
volumes = salt.utils.yaml.safe_load(vm_['volumes'])
else:
volumes = vm_.get('volumes')
data_disks = None
if isinstance(volumes, list):
data_disks = []
else:
volumes = []
lun = 0
luns = []
for volume in volumes:
if isinstance(volume, six.string_types):
volume = {'name': volume}
volume.setdefault(
'name',
volume.get(
'name',
volume.get(
'name',
'{0}-datadisk{1}'.format(vm_['name'], six.text_type(lun))
)
)
)
volume.setdefault(
'disk_size_gb',
volume.get(
'logical_disk_size_in_gb',
volume.get('size', 100)
)
)
# Old kwarg was host_caching, new name is caching
volume.setdefault('caching', volume.get('host_caching', 'ReadOnly'))
while lun in luns:
lun += 1
if lun > 15:
log.error('Maximum lun count has been reached')
break
volume.setdefault('lun', lun)
lun += 1
# The default vhd is {vm_name}-datadisk{lun}.vhd
if 'media_link' in volume:
volume['vhd'] = VirtualHardDisk(volume['media_link'])
del volume['media_link']
elif volume.get('vhd') == 'unmanaged':
volume['vhd'] = VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
vm_['name'],
volume['lun'],
),
)
elif 'vhd' in volume:
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = DiskCreateOptionTypes.from_image
elif 'attach' in volume:
volume['create_option'] = DiskCreateOptionTypes.attach
else:
volume['create_option'] = DiskCreateOptionTypes.empty
data_disks.append(DataDisk(**volume))
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':
if vm_['image'].startswith('http'):
source_image = VirtualHardDisk(vm_['image'])
img_ref = None
else:
source_image = None
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
if win_installer:
os_type = 'Windows'
else:
os_type = 'Linux'
os_disk = OSDisk(
caching=CachingTypes.none,
create_option=DiskCreateOptionTypes.from_image,
name=disk_name,
vhd=VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
disk_name,
),
),
os_type=os_type,
image=source_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
else:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
source_image = None
os_type = None
os_disk = None
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
userdata_template = config.get_cloud_config_value(
'userdata_template', vm_, __opts__, search_global=False, default=None
)
if userdata_file:
if os.path.exists(userdata_file):
with salt.utils.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None
if userdata is not None or userdata_file is not None:
try:
if win_installer:
publisher = 'Microsoft.Compute'
virtual_machine_extension_type = 'CustomScriptExtension'
type_handler_version = '1.8'
if userdata_file and userdata_file.endswith('.ps1'):
command_prefix = 'powershell -ExecutionPolicy Unrestricted -File '
else:
command_prefix = ''
else:
publisher = 'Microsoft.Azure.Extensions'
virtual_machine_extension_type = 'CustomScript'
type_handler_version = '2.0'
command_prefix = ''
settings = {}
if userdata:
settings['commandToExecute'] = userdata
elif userdata_file.startswith('http'):
settings['fileUris'] = [userdata_file]
settings['commandToExecute'] = command_prefix + './' + userdata_file[userdata_file.rfind('/')+1:]
custom_extension = {
'resource_group': vm_['resource_group'],
'virtual_machine_name': vm_['name'],
'extension_name': vm_['name'] + '_custom_userdata_script',
'location': vm_['location'],
'publisher': publisher,
'virtual_machine_extension_type': virtual_machine_extension_type,
'type_handler_version': type_handler_version,
'auto_upgrade_minor_version': True,
'settings': settings,
'protected_settings': None
}
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
params = VirtualMachine(
location=vm_['location'],
plan=None,
hardware_profile=HardwareProfile(
vm_size=getattr(
VirtualMachineSizeTypes, vm_['size'].lower()
),
),
storage_profile=StorageProfile(
os_disk=os_disk,
data_disks=data_disks,
image_reference=img_ref,
),
os_profile=OSProfile(
admin_username=vm_username,
computer_name=vm_['name'],
**os_kwargs
),
network_profile=NetworkProfile(
network_interfaces=[
NetworkInterfaceReference(vm_['iface_id']),
],
),
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'requesting',
vm_,
['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
vm_create = compconn.virtual_machines.create_or_update(
resource_group_name=vm_['resource_group'],
vm_name=vm_['name'],
parameters=params
)
vm_create.wait()
if custom_extension:
create_or_update_vmextension(kwargs=custom_extension)
except CloudError as exc:
_log_cloud_error('compute', exc.message)
try:
return show_instance(vm_['name'], call='action')
except CloudError:
return {}
def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
location = get_location(vm_)
vm_['location'] = location
log.info('Creating Cloud VM %s in %s', vm_['name'], location)
request_instance(vm_=vm_)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
ip_address = None
if len(data.keys()) == 0:
return False
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def destroy(name, call=None, kwargs=None): # pylint: disable=unused-argument
'''
Destroy a VM.
CLI Examples:
.. code-block:: bash
salt-cloud -d myminion
salt-cloud -a destroy myminion service_name=myservice
'''
if kwargs is None:
kwargs = {}
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
compconn = get_conn(client_type='compute')
node_data = show_instance(name, call='action')
if node_data['storage_profile']['os_disk'].get('managed_disk'):
vhd = node_data['storage_profile']['os_disk']['managed_disk']['id']
else:
vhd = node_data['storage_profile']['os_disk']['vhd']['uri']
ret = {name: {}}
log.debug('Deleting VM')
result = compconn.virtual_machines.delete(node_data['resource_group'], name)
result.wait()
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
cleanup_disks = config.get_cloud_config_value(
'cleanup_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False,
)
if cleanup_disks:
cleanup_vhds = kwargs.get(
'delete_vhd',
config.get_cloud_config_value(
'cleanup_vhds',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_vhds:
log.debug('Deleting vhd')
comps = vhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['delete_disk'] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if vhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_data_disks = kwargs.get(
'delete_data_disks',
config.get_cloud_config_value(
'cleanup_data_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_data_disks:
log.debug('Deleting data_disks')
ret[name]['data_disks'] = {}
for disk in node_data['storage_profile']['data_disks']:
datavhd = disk.get('managed_disk', {}).get('id') or disk.get('vhd', {}).get('uri')
comps = datavhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['data_disks'][disk['name']] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if datavhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_interfaces = config.get_cloud_config_value(
'cleanup_interfaces',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
if cleanup_interfaces:
ret[name]['cleanup_network'] = {
'cleanup_interfaces': cleanup_interfaces,
'resource_group': node_data['resource_group'],
'data': [],
}
ifaces = node_data['network_profile']['network_interfaces']
for iface in ifaces:
resource_group = iface['id'].split('/')[4]
ret[name]['cleanup_network']['data'].append(
delete_interface(
kwargs={
'resource_group': resource_group,
'iface_name': iface['name'],
},
call='function',
)
)
return ret
def list_storage_accounts(call=None):
'''
List storage accounts within the subscription.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_storage_accounts function must be called with '
'-f or --function'
)
storconn = get_conn(client_type='storage')
ret = {}
try:
accounts_query = storconn.storage_accounts.list()
accounts = _paged_object_to_list(accounts_query)
for account in accounts:
ret[account['name']] = account
except CloudError as exc:
_log_cloud_error('storage', exc.message)
ret = {'Error': exc.message}
return ret
def _get_cloud_environment():
'''
Get the cloud environment object.
'''
cloud_environment = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
try:
cloud_env_module = importlib.import_module('msrestazure.azure_cloud')
cloud_env = getattr(cloud_env_module, cloud_environment or 'AZURE_PUBLIC_CLOUD')
except (AttributeError, ImportError):
raise SaltCloudSystemExit(
'The azure {0} cloud environment is not available.'.format(cloud_environment)
)
return cloud_env
def _get_block_blob_service(kwargs=None):
'''
Get the block blob storage service.
'''
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
sas_token = kwargs.get('sas_token') or config.get_cloud_config_value(
'sas_token',
get_configured_provider(), __opts__, search_global=False
)
storage_account = kwargs.get('storage_account') or config.get_cloud_config_value(
'storage_account',
get_configured_provider(), __opts__, search_global=False
)
storage_key = kwargs.get('storage_key') or config.get_cloud_config_value(
'storage_key',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if not storage_account:
raise SaltCloudSystemExit(
'A storage account must be specified'
)
if not storage_key:
storconn = get_conn(client_type='storage')
storage_keys = storconn.storage_accounts.list_keys(resource_group, storage_account)
storage_keys = {v.key_name: v.value for v in storage_keys.keys}
storage_key = next(six.itervalues(storage_keys))
cloud_env = _get_cloud_environment()
endpoint_suffix = cloud_env.suffixes.storage_endpoint
return BlockBlobService(storage_account, storage_key,
sas_token=sas_token,
endpoint_suffix=endpoint_suffix)
def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warn(six.text_type(exc))
return ret
def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True
def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True
def list_virtual_networks(call=None, kwargs=None):
'''
List virtual networks.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_groups = list_resource_groups()
ret = {}
for group in resource_groups:
try:
networks = netconn.virtual_networks.list(
resource_group_name=group
)
except CloudError:
networks = {}
for network_obj in networks:
network = network_obj.as_dict()
ret[network['name']] = network
ret[network['name']]['subnets'] = list_subnets(
kwargs={'resource_group': group, 'network': network['name']}
)
return ret
def list_subnets(call=None, kwargs=None):
'''
List subnets in a virtual network.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group and 'group' in kwargs and 'resource_group' not in kwargs:
resource_group = kwargs['group']
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', get_configured_provider(), __opts__, search_global=False
)
if 'network' not in kwargs or kwargs['network'] is None:
raise SaltCloudSystemExit(
'A "network" must be specified'
)
ret = {}
subnets = netconn.subnets.list(resource_group, kwargs['network'])
for subnet in subnets:
ret[subnet.name] = subnet.as_dict()
ret[subnet.name]['ip_configurations'] = {}
for ip_ in subnet.ip_configurations:
comps = ip_.id.split('/')
name = comps[-1]
ret[subnet.name]['ip_configurations'][name] = ip_.as_dict()
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
ret[subnet.name]['resource_group'] = resource_group
return ret
def create_or_update_vmextension(call=None, kwargs=None): # pylint: disable=unused-argument
'''
.. versionadded:: Fluorine
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None >
'''
if kwargs is None:
kwargs = {}
if 'extension_name' not in kwargs:
raise SaltCloudSystemExit(
'An extension name must be specified'
)
if 'virtual_machine_name' not in kwargs:
raise SaltCloudSystemExit(
'A virtual machine name must be specified'
)
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
VirtualMachineExtension = getattr(
compute_models, 'VirtualMachineExtension'
)
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
location = kwargs.get('location') or get_location()
if not location:
raise SaltCloudSystemExit(
'A location must be specified'
)
publisher = kwargs.get('publisher', 'Microsoft.Azure.Extensions')
virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', 'CustomScript')
type_handler_version = kwargs.get('type_handler_version', '2.0')
auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', True)
settings = kwargs.get('settings', {})
protected_settings = kwargs.get('protected_settings')
if not isinstance(settings, dict):
raise SaltCloudSystemExit(
'VM extension settings are not valid'
)
elif 'commandToExecute' not in settings and 'script' not in settings:
raise SaltCloudSystemExit(
'VM extension settings are not valid. Either commandToExecute or script must be specified.'
)
log.info('Creating VM extension %s', kwargs['extension_name'])
ret = {}
try:
params = VirtualMachineExtension(
location=location,
publisher=publisher,
virtual_machine_extension_type=virtual_machine_extension_type,
type_handler_version=type_handler_version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
settings=settings,
protected_settings=protected_settings
)
poller = compconn.virtual_machine_extensions.create_or_update(
resource_group,
kwargs['virtual_machine_name'],
kwargs['extension_name'],
params
)
ret = poller.result()
ret = ret.as_dict()
except CloudError as exc:
_log_cloud_error('compute', 'Error attempting to create the VM extension: {0}'.format(exc.message))
ret = {'error': exc.message}
return ret
def stop(name, call=None):
'''
.. versionadded:: Fluorine
Stop (deallocate) a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a stop myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
_log_cloud_error('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
_log_cloud_error('compute', 'Error attempting to stop {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret
def start(name, call=None):
'''
.. versionadded:: Fluorine
Start a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a start myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.start(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
_log_cloud_error('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.start(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
_log_cloud_error('compute', 'Error attempting to start {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret
| 31.914229
| 131
| 0.585403
|
0c08e9bd195aba4c06a7aed6eed7031cc932355e
| 1,856
|
py
|
Python
|
tests/test_persona.py
|
manuelfel12/MiProyectoUniAndes
|
187efb581fe6c5bb229110dad8f87b01b910476d
|
[
"MIT"
] | null | null | null |
tests/test_persona.py
|
manuelfel12/MiProyectoUniAndes
|
187efb581fe6c5bb229110dad8f87b01b910476d
|
[
"MIT"
] | 1
|
2021-03-13T13:13:37.000Z
|
2021-03-13T13:13:37.000Z
|
tests/test_persona.py
|
manuelfel12/MiProyectoUniAndes
|
187efb581fe6c5bb229110dad8f87b01b910476d
|
[
"MIT"
] | null | null | null |
import unittest
import datetime
from src.mi_proyecto.persona import Persona
class PersonaTestCase(unittest.TestCase):
def setUp(self):
self.persona1 = Persona(nombre='Alejandra', edad=25)
self.persona2 = Persona(nombre='Diego', edad=22)
self.persona3 = Persona(nombre='Alejandra', edad=25)
self.persona4 = Persona(nombre='Diana', edad=25)
#Print()
self.grupo = [self.persona1, self.persona2, self.persona3]
def test_constructor(self):
self.assertEqual(self.persona1.dar_nombre(), 'Alejandra')
self.assertEqual(self.persona1.dar_edad(), 25)
def test_anio_nacimiento(self):
self.assertEqual(self.persona1.calcular_anio_nacimiento(True), datetime.datetime.now().year - 25)
self.assertNotEqual(self.persona1.calcular_anio_nacimiento(False), datetime.datetime.now().year - 25)
self.assertEqual(self.persona1.calcular_anio_nacimiento(False), datetime.datetime.now().year - 25 + 1)
self.assertNotEqual(self.persona1.calcular_anio_nacimiento(True), datetime.datetime.now().year - 25 + 1)
def test_asingacion(self):
self.persona2.asignar_edad(28)
self.persona2.asignar_nombre("Felipe")
self.assertFalse(self.persona2.dar_nombre()=='Diego')
self.assertFalse(self.persona2.dar_edad()==22)
self.assertTrue(self.persona2.dar_nombre()=='Felipe')
self.assertTrue(self.persona2.dar_edad()==28)
def test_objetos_iguales(self):
persona_nueva = self.persona1
self.assertIsNot(self.persona1, self.persona3)
self.assertIs(self.persona1, persona_nueva)
def test_elemento_en_conjunto(self):
self.assertIn(self.persona3, self.grupo)
self.assertNotIn(self.persona4, self.grupo)
def test_instancia_clase(self):
self.assertIsInstance(self.persona1, Persona)
self.assertNotIsInstance(self.grupo, Persona)
| 41.244444
| 110
| 0.727371
|
330ce4edecd87038aefb13070645974d118c26c7
| 18,618
|
py
|
Python
|
jeditor/main.py
|
Yadkee/orphans
|
1208a9cbd95ca7608da9855b27d717beaff1711e
|
[
"MIT"
] | null | null | null |
jeditor/main.py
|
Yadkee/orphans
|
1208a9cbd95ca7608da9855b27d717beaff1711e
|
[
"MIT"
] | null | null | null |
jeditor/main.py
|
Yadkee/orphans
|
1208a9cbd95ca7608da9855b27d717beaff1711e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
import widgets
from struct import unpack_from, pack
from collections import OrderedDict
MONOSPACE = ("Consolas", 9)
ARIAL = ("Arial, 8")
OP_CODES = {}
with open("opcodes") as f:
for i in f.read().splitlines():
data = i.split(";")
OP_CODES[data[0].upper()] = data[1:]
def fancy_newlines(s, maxPerLine=128):
s += " "
out = []
while s:
line, rest = s[:maxPerLine].rsplit(" ", 1)
out.append(line)
s = rest + s[maxPerLine:]
return "\n".join(out)
class App(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master, border=5)
w, h = 700, 500
master.geometry("%dx%d+100+100" % (w, h))
master.minsize(w, h)
self.entryVariable = tk.StringVar()
self.current = [0, 0]
self.filebutton = tk.Menubutton(self, text="File", bd=0, relief="flat",
activebackground="blue", activeforeground="white")
self.filemenu = tk.Menu(self.filebutton, tearoff=0)
self.notebook = ttk.Notebook(self)
self.constantPool = tk.Frame(self)
self.cpList = widgets.ScrolledListbox(
self.constantPool, font=MONOSPACE)
self.cpXscrollbar = widgets.AutoScrollbar(
self.constantPool, command=self.cpList.xview, orient="horizontal")
self.methods = tk.Frame(self)
self.methodFrame = tk.Frame(self.methods)
self.methodList = widgets.ScrolledListbox(
self.methodFrame, width=26, height=5)
self.methodLabelFrame = ttk.Labelframe(
self.methodFrame, text="Selected instruction")
self.methodEntry = tk.Entry(
self.methodLabelFrame, width=16, font=MONOSPACE, textvariable=self.entryVariable)
self.methodLabel1 = tk.Label(
self.methodLabelFrame, anchor="w", font=ARIAL)
self.methodLabel2 = tk.Label(
self.methodLabelFrame, anchor="w", font=ARIAL, justify="left")
kw = {"width": 8, "font": MONOSPACE, "activestyle": "dotbox"}
self.methodIndexes = tk.Listbox(self.methods, **kw)
self.methodCode = tk.Listbox(self.methods, **kw)
self.methodExplanation = tk.Listbox(self.methods, **kw)
self.methodXscrollbar = widgets.AutoScrollbar(
self.methods, command=self.methodExplanation.xview, orient="horizontal")
self.methodYscrollbar = widgets.AutoScrollbar(
self.methods, command=self.yview_methods)
self.filebutton.config(menu=self.filemenu)
self.filemenu.add_command(label="Open")
self.filemenu.add_command(label="Save")
self.notebook.add(self.constantPool, text="Constant Pool")
self.notebook.add(self.methods, text="Methods")
self.cpList.config(xscrollcommand=self.cpXscrollbar.set)
self.methodIndexes.config(yscrollcommand=self.yscroll_methods)
self.methodCode.config(yscrollcommand=self.yscroll_methods)
self.methodExplanation.config(
xscrollcommand=self.methodXscrollbar.set, yscrollcommand=self.yscroll_methods)
# app
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.filebutton.grid(column=0, row=0, sticky="NSW")
self.notebook.grid(column=0, row=1, sticky="NSEW")
# cp
self.constantPool.columnconfigure(0, weight=1)
self.constantPool.rowconfigure(0, weight=1)
self.cpList.grid(column=0, row=0, sticky="NSEW")
self.cpXscrollbar.grid(column=0, row=1, sticky="NSEW")
# methods
self.methods.columnconfigure(2, weight=1)
self.methods.rowconfigure(1, weight=1)
self.methodFrame.columnconfigure(1, weight=1)
self.methodLabelFrame.columnconfigure(1, weight=1)
self.methodFrame.grid(column=0, columnspan=3, row=0, sticky="NSEW")
self.methodList.grid(column=0, row=0, sticky="NSEW")
self.methodLabelFrame.grid(column=1, row=0, sticky="NSEW")
self.methodEntry.grid(column=0, row=0, sticky="NSW")
self.methodLabel1.grid(column=1, row=0, sticky="NSEW")
self.methodLabel2.grid(column=0, columnspan=2, row=1, sticky="NSEW")
self.methodIndexes.grid(column=0, row=1, sticky="NSEW", padx=2, pady=5)
self.methodCode.grid(column=1, row=1, sticky="NSEW", padx=2, pady=5)
self.methodExplanation.grid(
column=2, row=1, sticky="NSEW", padx=2, pady=5)
self.methodXscrollbar.grid(
column=0, columnspan=3, row=2, sticky="NSEW")
self.methodYscrollbar.grid(column=3, row=0, rowspan=3, sticky="NSEW")
self.methodList.bind("<<ListboxSelect>>", self.select_method)
self.methodIndexes.bind("<<ListboxSelect>>", self.select_index)
self.methodCode.bind("<<ListboxSelect>>", self.select_index)
self.methodExplanation.bind("<<ListboxSelect>>", self.select_index)
self.methodEntry.bind("<Return>", self.update_code)
# To open a file do:
# self.f = File("CvcMinigame.xclass")
# self.refresh()
def yview_methods(self, *arg):
self.methodIndexes.yview(*arg)
self.methodCode.yview(*arg)
self.methodExplanation.yview(*arg)
def yscroll_methods(self, *arg):
self.methodIndexes.yview_moveto(arg[0])
self.methodCode.yview_moveto(arg[0])
self.methodExplanation.yview_moveto(arg[0])
self.methodYscrollbar.set(*arg)
def refresh(self):
self.code = []
self.cpList.delete(0, "end")
for a, i in enumerate(self.f.constantPool):
if a == 0:
continue
short_name = i[0][9:]
s = "{0:04X};{1}: {2}".format(a, short_name, self.f.format(a))
self.cpList.insert("end", s)
for method in self.f.methods:
name = self.f.cp(method[1])[1]
for attribute in method[4]:
if len(attribute) == 5:
code = parse_code(attribute[3])
break
self.methodList.insert("end", name)
self.code.append(code)
def select_method(self, _):
self.methodIndexes.delete(0, "end")
self.methodCode.delete(0, "end")
self.methodExplanation.delete(0, "end")
if self.methodList.curselection():
self.current[0] = self.methodList.curselection()[0]
a = 0
for line in self.code[self.current[0]]:
self.methodIndexes.insert("end", "%08X" % a)
self.methodCode.insert("end", " ".join(line))
op = OP_CODES[line[0]]
explanation = op[0].ljust(20, " ")
if "indexbyte" in op[1]:
explanation += " %s" % self.f.format(
int(line[1] + line[2], 16))
elif "index" in op[1]:
explanation += " %s" % self.f.format(int(line[1], 16))
elif "branchbyte" in op[1]:
explanation += " pos.%08X" % (a + int(line[1] + line[2], 16))
self.methodExplanation.insert("end", explanation)
a += len(line)
def select_index(self, e):
if e and e.widget.curselection():
self.current[1] = e.widget.curselection()[0]
self.methodIndexes.selection_set(self.current[1])
line = self.code[self.current[0]][self.current[1]]
op = OP_CODES[line[0]]
self.entryVariable.set(" ".join(line))
self.methodLabel1.config(text="[%s] %s" % (op[0], op[2]))
self.methodLabel2.config(text=fancy_newlines(op[3], 100))
def update_code(self, _):
code = []
for a, line in enumerate(self.code[self.current[0]]):
if a == self.current[1]:
line = self.methodEntry.get().split(" ")
code.extend(line)
raw = bytes.fromhex("".join(code))
for attribute in self.f.methods[self.current[0]][4]:
if len(attribute) == 5:
attribute[3] = raw
self.code[self.current[0]] = parse_code(raw)
self.select_method(None)
self.select_index(None)
class File():
def __init__(self, path):
self.path = path
self.info = OrderedDict()
with open(path, "rb") as f:
self.data = f.read()
self.parse()
self.unparse()
print(self.data == self.newData)
def cp(self, number_or_index):
try:
return "#%d" % number_or_index
except TypeError:
return self.constantPool[int(number_or_index[1:])]
def format(self, a):
constant = self.constantPool[a]
if constant[0] == "CONSTANT_String":
return "'%s'" % self.cp(constant[1])[1]
out = []
for j in constant[1:]:
if type(j) is str and j.startswith("#"):
j = self.format(int(j[1:]))
out.append(str(j))
return ", ".join(out).replace("/", ".")
def parse(self):
d = self.data
i = 0
self.info["magic_number"], self.info["minor_version"], self.info["major_version"], \
self.info["constant_pool_count"] = unpack_from(">IHHH", d, i)
i += 10
# ===========================================================================
self.constantPool = [None]
n = 1
while n < self.info["constant_pool_count"]:
tag = d[i]
i += 1
if tag == 1:
lenght = unpack_from(">H", d, i)[0]
i += 2
text = d[i:i + lenght]
i += lenght
self.constantPool.append(("CONSTANT_Utf8", text.decode()))
elif 3 <= tag <= 4:
name = ("CONSTANT_Integer", "CONSTANT_Float")[tag - 3]
value = unpack_from(">I", d, i)[0]
i += 4
self.constantPool.append((name, value))
elif 5 <= tag <= 6:
n += 1
name = ("CONSTANT_Long", "CONSTANT_Double")[tag - 5]
value = unpack_from(">Q", d, i)[0]
i += 8
self.constantPool.append((name, value))
elif tag == 7:
index = unpack_from(">H", d, i)[0]
i += 2
self.constantPool.append(("CONSTANT_Class", self.cp(index)))
elif tag == 8:
string_index = unpack_from(">H", d, i)[0]
i += 2
self.constantPool.append(
("CONSTANT_String", self.cp(string_index)))
elif 9 <= tag <= 11:
name = ("CONSTANT_Fieldref", "CONSTANT_Methodref",
"CONSTANT_InterfaceMethodref")[tag - 9]
class_index, name_and_type_index = unpack_from(">HH", d, i)
i += 4
self.constantPool.append(
(name, self.cp(class_index), self.cp(name_and_type_index)))
elif tag == 12:
name_index, descriptor_index = unpack_from(">HH", d, i)
i += 4
self.constantPool.append(
("CONSTANT_NameAndType", self.cp(name_index), self.cp(descriptor_index)))
elif tag == 15:
reference_kind = d[i]
i += 1
reference_index = unpack_from(">H", d, i)[0]
i += 2
self.constantPool.append(
("CONSTANT_MethodHandle", reference_kind, self.cp(reference_index)))
elif tag == 16:
descriptor_index = unpack_from(">H", d, i)[0]
i += 2
self.constantPool.append(
("CONSTANT_MethodType", self.cp(descriptor_index)))
elif tag == 18:
bootstrap_method_attr_index, name_and_type_index = unpack_from(">HH", d, i)[
0]
i += 4
self.constantPool.append(("CONSTANT_InvokeDynamic", self.cp(
bootstrap_method_attr_index), self.cp(name_and_type_index)))
else:
raise Exception("!cp error [%d]" % tag)
n += 1
# ===========================================================================
self.info["access_flags"], self.info["this_class"], self.info["super_class"], \
self.info["interfaces_count"] = unpack_from(">HHHH", d, i)
i += 8
self.interfaces = []
for _ in range(self.info["interfaces_count"]):
self.interfaces.append(unpack_from(">H", d, i)[0])
i += 2
self.info["fields_count"] = unpack_from(">H", d, i)[0]
i += 2
self.fields, i = self.parse_fields(d, i, self.info["fields_count"])
self.info["methods_count"] = unpack_from(">H", d, i)[0]
i += 2
self.methods, i = self.parse_fields(d, i, self.info["methods_count"])
self.info["attributes_count"] = unpack_from(">H", d, i)[0]
i += 2
self.attributes, i = self.parse_attributes(
d, i, self.info["attributes_count"])
def parse_fields(self, d, i, count):
fields = []
for _ in range(count):
access_flags, name_index, descriptor_index, attributes_count = unpack_from(
">HHHH", d, i)
i += 8
attributes, i = self.parse_attributes(d, i, attributes_count)
fields.append((access_flags, self.cp(name_index), self.cp(
descriptor_index), attributes_count, attributes))
return fields, i
def parse_attributes(self, d, i, count):
attributes = []
for _ in range(count):
attribute_name_index = unpack_from(">H", d, i)[0]
i += 2
attribute_length = unpack_from(">I", d, i)[0]
i += 4
info = d[i:i + attribute_length]
if self.constantPool[attribute_name_index][1] == "Code":
max_stack, max_locals, code_length = unpack_from(">HHI", d, i)
code = d[i + 8:i + 8 + code_length]
attributes.append([self.cp(attribute_name_index), attribute_length,
(max_stack, max_locals), code, info[8 + code_length:]])
else:
attributes.append(
[self.cp(attribute_name_index), attribute_length, info])
i += attribute_length
return attributes, i
def unparse(self):
d = []
d.append(pack(">IHHH", self.info["magic_number"], self.info["minor_version"],
self.info["major_version"], self.info["constant_pool_count"]))
d.append(unparse_constant_pool(self.constantPool))
d.append(pack(">HHHH", self.info["access_flags"], self.info["this_class"],
self.info["super_class"], self.info["interfaces_count"]))
for i in self.interfaces:
d.append(pack(">H", i))
d.append(pack(">H", self.info["fields_count"]))
d.append(unparse_fields(self.fields))
d.append(pack(">H", self.info["methods_count"]))
d.append(unparse_fields(self.methods))
d.append(pack(">H", self.info["attributes_count"]))
d.append(unparse_attributes(self.attributes))
self.newData = b"".join(d)
def unparse_constant_pool(constantPool):
d = []
for constant in constantPool:
if constant is None:
continue
name = constant[0]
if name == "CONSTANT_Utf8":
d.append(b"\x01")
d.append(pack(">H", len(constant[1])))
d.append(constant[1].encode())
elif name == "CONSTANT_Integer":
d.append(b"\x03")
d.append(pack(">I", constant[1]))
elif name == "CONSTANT_Float":
d.append(b"\x04")
d.append(pack(">I", constant[1]))
elif name == "CONSTANT_Long":
d.append(b"\x05")
d.append(pack(">Q", constant[1]))
elif name == "CONSTANT_Double":
d.append(b"\x06")
d.append(pack(">Q", constant[1]))
elif name == "CONSTANT_Class":
d.append(b"\x07")
d.append(pack(">H", int(constant[1][1:])))
elif name == "CONSTANT_String":
d.append(b"\x08")
d.append(pack(">H", int(constant[1][1:])))
elif name == "CONSTANT_Fieldref":
d.append(b"\x09")
d.append(pack(">HH", int(constant[1][1:]), int(constant[2][1:])))
elif name == "CONSTANT_Methodref":
d.append(b"\x0A")
d.append(pack(">HH", int(constant[1][1:]), int(constant[2][1:])))
elif name == "CONSTANT_InterfaceMethodref":
d.append(b"\x0B")
d.append(pack(">HH", int(constant[1][1:]), int(constant[2][1:])))
elif name == "CONSTANT_NameAndType":
d.append(b"\x0C")
d.append(pack(">HH", int(constant[1][1:]), int(constant[2][1:])))
elif name == "CONSTANT_MethodHandle":
d.append(b"\x0F")
d.append(bytes([constant[1]]))
d.append(pack(">H", int(constant[2][1:])))
elif name == "CONSTANT_MethodType":
d.append(b"\x10")
d.append(pack(">H", int(constant[1][1:])))
elif name == "CONSTANT_InvokeDynamic":
d.append(b"\x12")
d.append(pack(">HH", int(constant[1][1:]), int(constant[2][1:])))
return b"".join(d)
def unparse_attributes(attributes):
d = []
for attribute in attributes:
d.append(pack(">H", int(attribute[0][1:])))
if len(attribute) == 5:
d.append(pack(">I", 8 + len(attribute[3]) + len(attribute[4])))
d.append(pack(">HHI", attribute[2][0],
attribute[2][1], len(attribute[3])))
d.append(attribute[3])
d.append(attribute[4])
else:
d.append(pack(">I", attribute[1]))
d.append(attribute[2])
return b"".join(d)
def unparse_fields(fields):
d = []
for field in fields:
d.append(pack(">HHHH", field[0], int(
field[1][1:]), int(field[2][1:]), field[3]))
d.append(unparse_attributes(field[4]))
return b"".join(d)
def parse_code(d):
lines = []
i = 0
while i < len(d):
h = "%02X" % d[i]
i += 1
opc = OP_CODES[h]
args = int(opc[1].split(":")[0])
lines.append([h] + list("%02X" % d[i + j] for j in range(args)))
i += args
return lines
if __name__ == "__main__":
root = tk.Tk()
root.title("Bytecode editor")
app = App(root)
app.pack(expand=True, fill="both")
root.mainloop()
| 40.650655
| 93
| 0.544903
|
dfb317554b6dd1a25819504f9792e43e357aa661
| 578
|
py
|
Python
|
arch_2.py
|
sushantjha78/complexity_and_performance
|
cc8a26241f462c671c352f33998f0d3facaf8a75
|
[
"MIT"
] | null | null | null |
arch_2.py
|
sushantjha78/complexity_and_performance
|
cc8a26241f462c671c352f33998f0d3facaf8a75
|
[
"MIT"
] | null | null | null |
arch_2.py
|
sushantjha78/complexity_and_performance
|
cc8a26241f462c671c352f33998f0d3facaf8a75
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class model(nn.Module):
def __init__(self):
super(model, self).__init__()
self.end = nn.Sequential(
nn.LeakyReLU(inplace = True),
nn.LazyLinear(32),
nn.Dropout(0.5),
nn.LeakyReLU(inplace = True),
nn.LazyLinear(10))
#same block being concatenated
def forward(self, X, num_blocks = 1):
h = X.view(X.shape[0], -1)
h = self.end(h)
h = nn.functional.log_softmax(h, dim=1)
return h
| 27.52381
| 48
| 0.512111
|
a93d2fbf8fb42add76c8b490c6e3d7a9a8532325
| 182
|
py
|
Python
|
Class 11/11-Programs/Palindrome.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
Class 11/11-Programs/Palindrome.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
Class 11/11-Programs/Palindrome.py
|
edwardmasih/Python-School-Level
|
545e8fcd87f540be2bbf01d3493bd84dd5504739
|
[
"MIT"
] | null | null | null |
n=int(input("Enter a three digit Number :- "))
o=n%10
t=(n//10)%10
h=n//100
if o==h:
print ("Your number is Palindrome")
else:
print("Your number is NOT Palindrome")
| 20.222222
| 47
| 0.60989
|
215c1cdddf0ed5a746191607ecbfaa986c2a7cc4
| 4,576
|
py
|
Python
|
datahub/company/admin/utils.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | null | null | null |
datahub/company/admin/utils.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | 16
|
2020-04-01T15:25:35.000Z
|
2020-04-14T14:07:30.000Z
|
datahub/company/admin/utils.py
|
cgsunkel/data-hub-api
|
a92faabf73fb93b5bfd94fd465eafc3e29aa6d8e
|
[
"MIT"
] | null | null | null |
import functools
from django.contrib import messages as django_messages
from django.http import HttpResponseRedirect
from datahub.metadata.models import Country
def format_company_diff(dh_company, dnb_company):
"""
Format the Datahub and D&B companies for templates.
"""
def get_field(name):
return dh_company._meta.get_field(name)
def get_country(address):
country = address.get('country')
return None if country is None else Country.objects.get(id=country)
address = dnb_company.get('address') or {}
registered_address = dnb_company.get('registered_address') or {}
return {
get_field('name'): (
dh_company.name,
dnb_company.get('name'),
),
get_field('address_1'): (
dh_company.address_1,
address.get('line_1'),
),
get_field('address_2'): (
dh_company.address_2,
address.get('line_2'),
),
get_field('address_town'): (
dh_company.address_town,
address.get('town'),
),
get_field('address_county'): (
dh_company.address_county,
address.get('county'),
),
get_field('address_postcode'): (
dh_company.address_postcode,
address.get('postcode'),
),
get_field('address_country'): (
dh_company.address_country,
get_country(address),
),
get_field('registered_address_1'): (
dh_company.registered_address_1,
registered_address.get('line_1'),
),
get_field('registered_address_2'): (
dh_company.registered_address_2,
registered_address.get('line_2'),
),
get_field('registered_address_town'): (
dh_company.registered_address_town,
registered_address.get('town'),
),
get_field('registered_address_county'): (
dh_company.registered_address_county,
registered_address.get('county'),
),
get_field('registered_address_postcode'): (
dh_company.registered_address_postcode,
registered_address.get('postcode'),
),
get_field('registered_address_country'): (
dh_company.registered_address_country,
get_country(registered_address),
),
get_field('company_number'): (
dh_company.company_number,
dnb_company.get('company_number'),
),
get_field('trading_names'): (
', '.join(dh_company.trading_names),
', '.join(dnb_company.get('trading_names', [])),
),
get_field('website'): (
dh_company.website,
dnb_company.get('website'),
),
get_field('number_of_employees'): (
dh_company.number_of_employees,
dnb_company.get('number_of_employees'),
),
get_field('is_number_of_employees_estimated'): (
dh_company.is_number_of_employees_estimated,
dnb_company.get('is_number_of_employees_estimated'),
),
get_field('turnover'): (
dh_company.turnover,
dnb_company.get('turnover'),
),
get_field('is_turnover_estimated'): (
dh_company.is_turnover_estimated,
dnb_company.get('is_turnover_estimated'),
),
get_field('global_ultimate_duns_number'): (
dh_company.global_ultimate_duns_number,
dnb_company.get('global_ultimate_duns_number'),
),
}
def redirect_with_messages(func):
"""
Decorator that redirects to a given URL with one or more messages for the user in case of an
error.
"""
@functools.wraps(func)
def wrapper(model_admin, request, *args, **kwargs):
try:
return func(model_admin, request, *args, **kwargs)
except AdminException as exc:
messages = exc.messages
redirect_url = exc.redirect_url
for message in messages:
django_messages.add_message(request, django_messages.ERROR, message)
return HttpResponseRedirect(redirect_url)
return wrapper
class AdminException(Exception):
"""
Exception in an admin view. Contains the message to be displayed to the user and the
redirect_url.
"""
def __init__(self, messages, redirect_url):
"""
Initialise the AdminException.
"""
self.messages = messages
self.redirect_url = redirect_url
| 32.453901
| 96
| 0.60118
|
7c4e61fc011730797c0c59a1ed42afe60dab5476
| 14,942
|
py
|
Python
|
sdb/commands/stacks.py
|
shartse/sdb
|
5ad91838ae869b7a72b6fc15c0b8ec2a0d0c6c77
|
[
"Apache-2.0"
] | null | null | null |
sdb/commands/stacks.py
|
shartse/sdb
|
5ad91838ae869b7a72b6fc15c0b8ec2a0d0c6c77
|
[
"Apache-2.0"
] | null | null | null |
sdb/commands/stacks.py
|
shartse/sdb
|
5ad91838ae869b7a72b6fc15c0b8ec2a0d0c6c77
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from typing import Any, Dict, Iterable, List, Tuple
from collections import defaultdict
import drgn
from drgn.helpers.linux.list import list_for_each_entry
from drgn.helpers.linux.pid import for_each_task
import sdb
#
# Note: This is a rudimentary version of what the command could/should be.
#
# On the high-level, it could be a `Locator`, or something similar, where
# objects could be passed as input from the pipeline dispatching different
# methods depending on the type of object. E.g. input could be a namespace
# object and we print all the task structs within it, or it could be just
# a list of task structs passed from a previous command for filtering.
# Another option would be to decouple the stack listing, filtering, and
# pretty-printing functionality to independent SDB commands.
#
# There are also other lower-level usability improvements like supporting
# filtering by `function+offset` with the `-c` option, or by namespace ID
# using `-n <ID>`.
#
# Finally, the command lacks any support for userland targets.
#
# SDB is still in its early stages and hasn't been used enough for us to
# be clear which use cases really matter. In the meantime if we don't have
# anything that provides this functionality it won't be easy to do this
# exploration. The version below is a good enough for the time being
# providing some basic functionality and being our tracer bullet for
# future iterations.
#
class Stacks(sdb.Command):
"""
Print the stack traces for the active tasks / threads
By default, the command will aggregate similar call stacks
printing them in descending order of frequency. The output
includes the `struct task_struct` address, thread state, and
aggregation count.
Optionally, the command can filter stacks, displaying only
those that match a given thread state, containing a given
function, or belonging to a given kernel module.
EXAMPLES
Print the call stacks for all tasks
sdb> stacks
TASK_STRUCT STATE COUNT
==========================================
0xffff9521bb3c3b80 IDLE 394
__schedule+0x24e
schedule+0x2c
worker_thread+0xba
kthread+0x121
ret_from_fork+0x35
0xffff9521bb3cbb80 INTERRUPTIBLE 384
__schedule+0x24e
schedule+0x2c
smpboot_thread_fn+0x166
kthread+0x121
ret_from_fork+0x35
...
Print stacks containing functions from the zfs module
sdb> stacks -m zfs
TASK_STRUCT STATE COUNT
==========================================
0xffff952130515940 INTERRUPTIBLE 1
__schedule+0x24e
schedule+0x2c
cv_wait_common+0x11f
__cv_wait_sig+0x15
zthr_procedure+0x51
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x35
...
Print stacks containing the l2arc_feed_thread function
sdb> stacks -c l2arc_feed_thread
TASK_STRUCT STATE COUNT
==========================================
0xffff9521b3f43b80 INTERRUPTIBLE 1
__schedule+0x24e
schedule+0x2c
schedule_timeout+0x15d
__cv_timedwait_common+0xdf
__cv_timedwait_sig+0x16
l2arc_feed_thread+0x66
thread_generic_wrapper+0x74
kthread+0x121
ret_from_fork+0x35
Print stacks of threads in the RUNNING state
sdb> stacks -t RUNNING
TASK_STRUCT STATE COUNT
==========================================
0xffff95214ff31dc0 RUNNING 1
"""
names = ["stacks"]
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument(
"-a",
"--all",
action="store_true",
help="list all threads for each unique stack trace" +
" instead of printing a single representative thread")
parser.add_argument(
"-c",
"--function",
help="only print threads whose stacks contains FUNCTION")
parser.add_argument(
"-m",
"--module",
help="only print threads whose stacks contain functions from MODULE"
)
parser.add_argument(
"-t",
"--tstate",
help="only print threads which are in TSTATE thread state")
parser.epilog = "TSTATE := [{:s}]".format(", ".join(
Stacks.TASK_STATES.values()))
return parser
#
# See include/linux/sched.h
#
TASK_STATES = {
0x00: "RUNNING",
0x01: "INTERRUPTIBLE",
0x02: "UNINTERRUPTIBLE",
0x04: "STOPPED",
0x08: "TRACED",
0x10: "DEAD",
0x20: "ZOMBIE",
0x40: "PARKED",
0x402: "IDLE",
}
#
# See man page of ps(1)
#
TASK_STATE_SHORTCUTS = {
"R": 0x00,
"S": 0x01,
"D": 0x02,
"T": 0x04,
"t": 0x08,
"X": 0x10,
"Z": 0x20,
}
@staticmethod
def task_struct_get_state(task: drgn.Object) -> str:
state = task.state.value_()
if state == 0x402:
return "IDLE"
exit_state = task.exit_state.value_()
return Stacks.TASK_STATES[(state | exit_state) & 0x7f]
#
# Unfortunately the drgn Symbol API does not specify the namelist
# that a symbol came from. As a result, we created the following
# function to implement the `-m` functionality. Whenever we filter
# by module name, we find the segment in memory where this module
# resides and do the matching based on the address of the function
# of the current frame.
#
@staticmethod
def find_module_memory_segment(mod_name: str) -> Tuple[int, int]:
"""
Looks for the segment in memory where `mod_name` is
loaded.
Returns:
(<base_offset>, <size>) if `mod_name` is found.
(-1, 0) otherwise.
"""
for mod in list_for_each_entry('struct module',
sdb.get_object('modules').address_of_(),
'list'):
if mod.name.string_().decode("utf-8") == mod_name:
return (mod.core_layout.base.value_(),
mod.core_layout.size.value_())
return (-1, 0)
def validate_args(self, args: argparse.Namespace) -> None:
if args.function:
try:
func = sdb.get_object(args.function)
except KeyError:
raise sdb.CommandError(
self.name,
"symbol '{:s}' does not exist".format(args.function))
if func.type_.kind != drgn.TypeKind.FUNCTION:
raise sdb.CommandError(
self.name, "'{:s}' is not a function".format(args.function))
task_states = Stacks.TASK_STATES.values()
task_states_lowercase = list(map(lambda x: x.lower(), task_states))
state_shortcuts = Stacks.TASK_STATE_SHORTCUTS
if args.tstate and not args.tstate.lower(
) in task_states_lowercase and not args.tstate in state_shortcuts:
raise sdb.CommandError(
self.name,
"'{:s}' is not a valid task state (acceptable states: {:s})".
format(args.tstate, ", ".join(task_states)))
if args.module and Stacks.find_module_memory_segment(
args.module)[0] == -1:
raise sdb.CommandError(
self.name,
"module '{:s}' doesn't exist or isn't currently loaded".format(
args.module))
def _call(self, objs: Iterable[drgn.Object]) -> Iterable[drgn.Object]:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
#
# As the exception explains the code that follows this statement
# only works for linux kernel targets (crash dumps or live systems).
# When support for userland is added we can factor the kernel code
# that follows into its own function and switch to the correct
# codepath depending on the target.
#
if not sdb.get_target_flags() & drgn.ProgramFlags.IS_LINUX_KERNEL:
raise sdb.CommandError(self.name,
"userland targets are not supported yet")
self.validate_args(self.args)
#
# Resolve TSTATE shortcut and/or sanitize it to standard uppercase
# notation if it exists.
#
if self.args.tstate:
if self.args.tstate in Stacks.TASK_STATE_SHORTCUTS:
self.args.tstate = Stacks.TASK_STATES[
Stacks.TASK_STATE_SHORTCUTS[self.args.tstate]]
else:
self.args.tstate = self.args.tstate.upper()
mod_start, mod_end = -1, -1
if self.args.module:
mod_start, mod_size = Stacks.find_module_memory_segment(
self.args.module)
assert mod_start != -1
mod_end = mod_start + mod_size
header = "{:<18} {:<16s}".format("TASK_STRUCT", "STATE")
if not self.args.all:
header += " {:>6s}".format("COUNT")
print(header)
print("=" * 42)
#
# We inspect and group the tasks by recording their state and
# stack frames once in the following loop. We do this because
# on live systems state can change under us, thus running
# something like sdb.get_prog().stack_trace(task) twice (once for
# grouping and once for printing) could yield different stack
# traces resulting into misleading output.
#
stack_aggr: Dict[Any, List[drgn.Object]] = defaultdict(list)
for task in for_each_task(sdb.get_prog()):
stack_key = [Stacks.task_struct_get_state(task)]
try:
for frame in sdb.get_prog().stack_trace(task):
stack_key.append(frame.pc)
except ValueError:
#
# Unwinding the stack of a running/runnable task will
# result in an exception. Since we expect some tasks to
# be running, we silently ignore this case, and move on.
#
# Unfortunately, the exception thrown in this case is a
# generic "ValueError" exception, so we may wind up
# masking other "ValueError" exceptions that are not due
# to unwinding the stack of a running task.
#
# We can't check the state of the task here, and verify
# it's in the "R" state, since that state can change in
# between the point where the "ValueError" exception was
# originally raised, and here where we'd verify the
# state of the task; i.e. it could have concurrently
# transitioned from running to some other state.
#
pass
stack_aggr[tuple(stack_key)].append(task)
for stack_key, tasks in sorted(stack_aggr.items(),
key=lambda x: len(x[1]),
reverse=True):
task_state = stack_key[0]
if self.args.tstate and self.args.tstate != task_state:
continue
stacktrace_info = ""
if self.args.all:
for task in tasks:
stacktrace_info += "{:<18s} {:<16s}\n".format(
hex(task.value_()), task_state)
else:
stacktrace_info += "{:<18s} {:<16s} {:6d}\n".format(
hex(tasks[0].value_()), task_state, len(tasks))
mod_match, func_match = False, False
#
# Note on the type-check being ignored:
# The original `stack_key` type is a list where the first
# element is a string and the rest of them are integers
# but this is not easily expressed in mypy, thus we ignore
# the assignment error below.
#
frame_pcs: List[int] = stack_key[1:] #type: ignore[assignment]
for frame_pc in frame_pcs:
if mod_start != -1 and mod_start <= frame_pc < mod_end:
mod_match = True
try:
sym = sdb.get_symbol(frame_pc)
func, offset = sym.name, frame_pc - sym.address
if self.args.function and self.args.function == func:
func_match = True
except LookupError:
func, offset = hex(frame_pc), 0x0
#
# As a potential future item, we may want to print
# the frame with the module where the pc/function
# belongs to. For example:
# txg_sync_thread+0x15e [zfs]
#
stacktrace_info += "{:18s}{}+{}\n".format("", func, hex(offset))
if mod_start != -1 and not mod_match:
continue
if self.args.function and not func_match:
continue
print(stacktrace_info)
return []
| 39.633952
| 80
| 0.545643
|
9d9384d58037ab02899807e2babceb56c1c5c3e6
| 10,815
|
py
|
Python
|
exps/centernet_pandaset.py
|
joonielee832/mmdet
|
552c09c6f82cab41427754102c7b8d1c36929552
|
[
"Apache-2.0"
] | null | null | null |
exps/centernet_pandaset.py
|
joonielee832/mmdet
|
552c09c6f82cab41427754102c7b8d1c36929552
|
[
"Apache-2.0"
] | null | null | null |
exps/centernet_pandaset.py
|
joonielee832/mmdet
|
552c09c6f82cab41427754102c7b8d1c36929552
|
[
"Apache-2.0"
] | null | null | null |
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[18, 24])
runner = dict(type='EpochBasedRunner', max_epochs=5)
checkpoint_config = dict(interval=5)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = 'checkpoints/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth'
resume_from = None
workflow = [('train', 1)]
opencv_num_threads = 0
mp_start_method = 'fork'
model = dict(
type='CenterNet',
backbone=dict(
type='ResNet',
depth=18,
norm_eval=False,
norm_cfg=dict(type='BN'),
init_cfg=dict(
type='Pretrained',
checkpoint='checkpoints/resnet18-f37072fd.pth')),
neck=dict(
type='CTResNetNeck',
in_channel=512,
num_deconv_filters=(256, 128, 64),
num_deconv_kernels=(4, 4, 4),
use_dcn=True),
bbox_head=dict(
type='CenterNetHead',
num_classes=27,
in_channel=64,
feat_channel=64,
loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0),
loss_wh=dict(type='L1Loss', loss_weight=0.1),
loss_offset=dict(type='L1Loss', loss_weight=1.0)),
train_cfg=None,
test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True, color_type='color'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
dict(type='Resize', img_scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg', 'border'),
keys=['img'])
])
]
dataset_type = 'PandaDataset'
data_root = '../data/'
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type='PandaDataset',
classes=('Animals - Bird', 'Semi-truck', 'Personal Mobility Device',
'Temporary Construction Barriers', 'Car', 'Signs',
'Emergency Vehicle', 'Other Vehicle - Pedicab',
'Pedestrian with Object', 'Train', 'Other Vehicle - Uncommon',
'Cones', 'Rolling Containers', 'Construction Signs',
'Other Vehicle - Construction Vehicle', 'Tram / Subway',
'Towed Object', 'Animals - Other', 'Bus', 'Pedestrian',
'Motorcycle', 'Bicycle', 'Road Barriers', 'Pickup Truck',
'Medium-sized Truck', 'Motorized Scooter', 'Pylons'),
data_root='../data/',
ann_file='train/labels.json',
img_prefix='train/images',
pipeline=[
dict(
type='LoadImageFromFile', to_float32=True, color_type='color'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_pad_mode=None),
dict(type='Resize', img_scale=(512, 512), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]),
val=dict(
type='PandaDataset',
classes=('Animals - Bird', 'Semi-truck', 'Personal Mobility Device',
'Temporary Construction Barriers', 'Car', 'Signs',
'Emergency Vehicle', 'Other Vehicle - Pedicab',
'Pedestrian with Object', 'Train', 'Other Vehicle - Uncommon',
'Cones', 'Rolling Containers', 'Construction Signs',
'Other Vehicle - Construction Vehicle', 'Tram / Subway',
'Towed Object', 'Animals - Other', 'Bus', 'Pedestrian',
'Motorcycle', 'Bicycle', 'Road Barriers', 'Pickup Truck',
'Medium-sized Truck', 'Motorized Scooter', 'Pylons'),
data_root='../data/',
ann_file='val/labels.json',
img_prefix='val/images',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
meta_keys=('filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg', 'border'),
keys=['img'])
])
]),
test=dict(
type='PandaDataset',
classes=('Animals - Bird', 'Semi-truck', 'Personal Mobility Device',
'Temporary Construction Barriers', 'Car', 'Signs',
'Emergency Vehicle', 'Other Vehicle - Pedicab',
'Pedestrian with Object', 'Train', 'Other Vehicle - Uncommon',
'Cones', 'Rolling Containers', 'Construction Signs',
'Other Vehicle - Construction Vehicle', 'Tram / Subway',
'Towed Object', 'Animals - Other', 'Bus', 'Pedestrian',
'Motorcycle', 'Bicycle', 'Road Barriers', 'Pickup Truck',
'Medium-sized Truck', 'Motorized Scooter', 'Pylons'),
data_root='../data/',
ann_file='val/labels.json',
img_prefix='val/images',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
meta_keys=('filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg', 'border'),
keys=['img'])
])
]))
custom_imports = dict(
imports=['mmdet.datasets.pandaset'], allow_failed_imports=False)
classes = ('Animals - Bird', 'Semi-truck', 'Personal Mobility Device',
'Temporary Construction Barriers', 'Car', 'Signs',
'Emergency Vehicle', 'Other Vehicle - Pedicab',
'Pedestrian with Object', 'Train', 'Other Vehicle - Uncommon',
'Cones', 'Rolling Containers', 'Construction Signs',
'Other Vehicle - Construction Vehicle', 'Tram / Subway',
'Towed Object', 'Animals - Other', 'Bus', 'Pedestrian',
'Motorcycle', 'Bicycle', 'Road Barriers', 'Pickup Truck',
'Medium-sized Truck', 'Motorized Scooter', 'Pylons')
evaluation = dict(metric='mAP')
work_dir = './exps'
seed = 0
auto_resume = False
gpu_ids = [0]
| 40.811321
| 89
| 0.503282
|
3a406b9675782cbd0e2f704d383ef244fc25ad9b
| 1,566
|
py
|
Python
|
find.py
|
caspii/domainfinder
|
b4d74c6f9e2017d2856f1ab5437db5a6f84d0596
|
[
"Apache-2.0"
] | 10
|
2015-09-16T19:12:40.000Z
|
2021-01-20T12:27:24.000Z
|
find.py
|
caspii/domainfinder
|
b4d74c6f9e2017d2856f1ab5437db5a6f84d0596
|
[
"Apache-2.0"
] | null | null | null |
find.py
|
caspii/domainfinder
|
b4d74c6f9e2017d2856f1ab5437db5a6f84d0596
|
[
"Apache-2.0"
] | 8
|
2015-07-28T13:33:18.000Z
|
2020-10-27T15:03:39.000Z
|
#!/usr/bin/env python
from time import sleep
import sys
try:
import whois
except ImportError:
print("ERROR: This script requires the python-whois module to run.")
print(" You can install it via 'pip install python-whois'")
sys.exit(0)
# Change top-level domain to check here
TLD = '.com'
# 1. Get prefixes and suffixes from input.txt
suffixes = []
prefixes = []
readingPrefixes = False
f = open('input.txt')
for l in f:
line = l.strip()
if line == '--prefixes':
readingPrefixes = True
continue
elif line == '--suffixes':
readingPrefixes = False
continue
elif not line:
continue # Ignore empty lines
if readingPrefixes:
prefixes.append(line)
else:
suffixes.append(line)
f.close()
# 2. create list of domains from prefixes and suffixes
domains =[]
for pre in prefixes:
for suff in suffixes:
domains.append( pre + suff + TLD)
# 3. Get list of domains that have aleady found to be free and removed them
checkeddomains= [line.strip() for line in open('free-domains.txt')] # Strip out newlines too
for remove in checkeddomains:
try:
domains.remove(remove)
except ValueError:
pass # Ignore exceptions
# 4. Check list of domains and write to file
for domain in domains:
sleep(0.5) # Too many requests lead to incorrect responses
print(' Checking: ' + domain), # Comma means no newline is printed
try:
w = whois.whois(domain)
print('\tTAKEN')
except whois.parser.PywhoisError:
# Exception means that the domain is free
print('\tFREE')
f = open('free-domains.txt', 'a')
f.write(domain + '\n')
f.close()
print("DONE!")
| 24.857143
| 92
| 0.709451
|
ea261b28eb1a028ec373a89221c0c296962e57ba
| 2,520
|
py
|
Python
|
saleor/graphql/middleware.py
|
sunilsrikumar/saleor
|
c9d147dba24ddef1def889852c229027bab767df
|
[
"BSD-3-Clause"
] | 1
|
2020-06-16T19:49:54.000Z
|
2020-06-16T19:49:54.000Z
|
saleor/graphql/middleware.py
|
gurupratap-matharu/saleor
|
c9d147dba24ddef1def889852c229027bab767df
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/middleware.py
|
gurupratap-matharu/saleor
|
c9d147dba24ddef1def889852c229027bab767df
|
[
"CC-BY-4.0"
] | 1
|
2019-09-15T02:19:10.000Z
|
2019-09-15T02:19:10.000Z
|
from typing import Optional
import opentracing as ot
import opentracing.tags as ot_tags
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.utils.functional import SimpleLazyObject
from graphql import ResolveInfo
from graphql_jwt.middleware import JSONWebTokenMiddleware
from ..app.models import App
from ..core.tracing import should_trace
from .views import API_PATH, GraphQLView
class JWTMiddleware(JSONWebTokenMiddleware):
def resolve(self, next, root, info, **kwargs):
request = info.context
if not hasattr(request, "user"):
request.user = AnonymousUser()
return super().resolve(next, root, info, **kwargs)
class OpentracingGrapheneMiddleware:
@staticmethod
def resolve(next_, root, info: ResolveInfo, **kwargs):
if not should_trace(info):
return next_(root, info, **kwargs)
operation = f"{info.parent_type.name}.{info.field_name}"
with ot.global_tracer().start_active_span(operation_name=operation) as scope:
span = scope.span
span.set_tag(ot_tags.COMPONENT, "graphql")
span.set_tag("graphql.parent_type", info.parent_type.name)
span.set_tag("graphql.field_name", info.field_name)
return next_(root, info, **kwargs)
def get_app(auth_token) -> Optional[App]:
qs = App.objects.filter(tokens__auth_token=auth_token, is_active=True)
return qs.first()
def app_middleware(next, root, info, **kwargs):
app_auth_header = "HTTP_AUTHORIZATION"
prefix = "bearer"
request = info.context
if request.path == API_PATH:
if not hasattr(request, "app"):
request.app = None
auth = request.META.get(app_auth_header, "").split()
if len(auth) == 2:
auth_prefix, auth_token = auth
if auth_prefix.lower() == prefix:
request.app = SimpleLazyObject(lambda: get_app(auth_token))
return next(root, info, **kwargs)
def process_view(self, request, view_func, *args):
if hasattr(view_func, "view_class") and issubclass(
view_func.view_class, GraphQLView
):
request._graphql_view = True
if settings.ENABLE_DEBUG_TOOLBAR:
import warnings
try:
from graphiql_debug_toolbar.middleware import DebugToolbarMiddleware
except ImportError:
warnings.warn("The graphiql debug toolbar was not installed.")
else:
DebugToolbarMiddleware.process_view = process_view
| 32.727273
| 85
| 0.687302
|
51909c57cfc39e4485846a7835d0fd8a85621f6e
| 1,106
|
py
|
Python
|
azure/mgmt/network/v2016_12_01/models/application_gateway_ssl_policy.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/network/v2016_12_01/models/application_gateway_ssl_policy.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2016_12_01/models/application_gateway_ssl_policy.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewaySslPolicy(Model):
"""Application gateway SSL policy.
:param disabled_ssl_protocols: SSL protocols to be disabled on application
gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'.
:type disabled_ssl_protocols: list[str or
~azure.mgmt.network.v2016_12_01.models.ApplicationGatewaySslProtocol]
"""
_attribute_map = {
'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'},
}
def __init__(self, disabled_ssl_protocols=None):
self.disabled_ssl_protocols = disabled_ssl_protocols
| 36.866667
| 83
| 0.640145
|
452798a28c1de698467dde21b543e9dd2fd1282c
| 6,668
|
py
|
Python
|
software/scripts/programmer.py
|
eeproto/PiAVRProg
|
33914b3e30636d55c2985b684ae16c0c8e9bd2f4
|
[
"CC0-1.0"
] | null | null | null |
software/scripts/programmer.py
|
eeproto/PiAVRProg
|
33914b3e30636d55c2985b684ae16c0c8e9bd2f4
|
[
"CC0-1.0"
] | null | null | null |
software/scripts/programmer.py
|
eeproto/PiAVRProg
|
33914b3e30636d55c2985b684ae16c0c8e9bd2f4
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the PiAVRProg project
https://github.com/eeproto/PiAVRProg
This work is licensed under a Creative Commons Attribution-ShareAlike 4.0
International License http://creativecommons.org/licenses/by-sa/4.0/
Copyright (c) 2019 EE Proto LLC https://www.eeproto.com
Author: christian@eeproto.com
"""
import RPi.GPIO as GPIO
import time
import json
import os
import logging
import traceback
import signal
import sys
import led
import button
import avrdude
DEVICE_FILE_PATH = '/media/usb/device.json'
FLASH_FILE_PATH = '/media/usb/firmware.hex'
DEVICE_KEYS = ['H', 'L', 'E', 'type']
SPI_DEVICE = '/dev/spidev0.0'
PROGRAMMER = 'linuxspi'
BAUDRATE_SLOW = '19200'
BITRATE_SLOW = '10'
BAUDRATE_FAST = '200000'
BITCLOCK_FAST = '1'
LOG_LEVEL = logging.INFO
VERSION = '0.2'
class PiProgrammer(object):
def __init__(self):
self.init_gpio()
self.leds = {}
self.led_ready = led.LED(6)
self.leds['programming'] = led.LED(22)
self.leds['flash_pass'] = led.LED(17)
self.leds['flash_fail'] = led.LED(18)
self.leds['fuse_pass'] = led.LED(23)
self.leds['fuse_fail'] = led.LED(24)
self.buffer_switch = led.LED(5)
self.go_button = button.Button(27, self.button_pressed)
self.clear_leds()
self._alive = True
def init_gpio(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
def led_cycle(self, *args):
l = list(self.leds.keys())
l.sort()
self.leds[l[self.x]].off()
self.x = (self.x + 1) % len(self.leds)
self.leds[l[self.x]].on()
def clear_leds(self):
for l in self.leds.values():
l.off()
self.led_ready.off()
def load_firmware(self):
device = {}
try:
logging.debug('reading device configuration from %s', DEVICE_FILE_PATH)
with open(DEVICE_FILE_PATH, 'r') as f:
device = json.load(f)
device_ok = (len(set(device.keys()) & set(DEVICE_KEYS)) == len(DEVICE_KEYS))
logging.debug('found %s keys in device file %s', len(DEVICE_KEYS), DEVICE_FILE_PATH)
except Exception as ex:
device_ok = False
logging.error('error loading device file: %s', ex)
try:
logging.debug('opening flash file %s', FLASH_FILE_PATH)
flash_size = os.path.getsize(FLASH_FILE_PATH)
logging.debug('found %s byte flash file %s', flash_size, FLASH_FILE_PATH)
flash_ok = (flash_size > 0)
except Exception as ex:
flash_ok = False
logging.error('error loading flash file: %s', ex)
return device, device_ok, flash_ok
def button_pressed(self, *args):
self.clear_leds()
if self._ready_to_go:
try:
logging.info('begin programming')
self.leds['programming'].on()
self.buffer_switch.on()
self.run_programming()
except avrdude.AvrDeviceNotRespondingError as ex:
logging.error('device not ready %s %s', type(ex), ex.args)
logging.debug(traceback.format_exc())
self.leds['fuse_fail'].blink(500, 500)
except avrdude.SignatureReadError as ex:
logging.error('can not read sig %s %s', type(ex), ex.args)
logging.debug(traceback.format_exc())
self.leds['flash_fail'].blink(500, 500)
except avrdude.SignatureDoesNotMatchError as ex:
logging.error('signature mismatch %s %s', type(ex), ex.args)
logging.debug(traceback.format_exc())
self.leds['fuse_pass'].blink(500, 500)
except Exception as ex:
logging.error('programming error %s %s', type(ex), ex.args)
logging.debug(traceback.format_exc())
finally:
self.buffer_switch.off()
self.leds['programming'].off()
def run_programming(self):
dude = avrdude.AvrDude(
self.device['type'],
SPI_DEVICE, PROGRAMMER, BAUDRATE_SLOW, BITRATE_SLOW, BAUDRATE_FAST, BITCLOCK_FAST)
read_signature = dude.verify_signature_and_fuses()
logging.info('device signature verified as %s', read_signature)
time.sleep(0.1)
read_fuses = dude.read_fuses()
fuses_same = (
self.device['E'] == read_fuses['E'] and
self.device['H'] == read_fuses['H'] and
self.device['L'] == read_fuses['L']
)
logging.info('device fuses read %s, same as device settings? %s', read_fuses, fuses_same)
if not fuses_same:
time.sleep(0.1)
fuses_ok = dude.write_fuses(
E=self.device['E'],
H=self.device['H'],
L=self.device['L'],
)
logging.info('fuse write done')
if fuses_ok:
self.leds['fuse_pass'].on()
self.leds['fuse_fail'].off()
else:
self.leds['fuse_pass'].off()
self.leds['fuse_fail'].on()
time.sleep(0.1)
flash_ok = dude.write_flash(FLASH_FILE_PATH)
logging.info('flash write result %s', flash_ok)
if flash_ok:
self.leds['flash_pass'].on()
self.leds['flash_fail'].off()
else:
self.leds['flash_pass'].off()
self.leds['flash_fail'].on()
def run(self):
while self._alive:
self.device, device_file_ok, flash_file_ok = self.load_firmware()
self._ready_to_go = device_file_ok and flash_file_ok
if self._ready_to_go:
self.led_ready.on()
else:
self.led_ready.blink(500, 500)
time.sleep(2)
logging.info('shut down')
def shutdown(self):
self._alive = False
self.clear_leds()
sys.exit(0)
def signal_received_handler(signum, frame):
logging.info("signal %s received in frame %s", signum, frame)
if signum in (signal.SIGINT, signal.SIGTERM):
global prog
prog.shutdown()
def main():
signal.signal(signal.SIGINT, signal_received_handler)
signal.signal(signal.SIGTERM, signal_received_handler)
signal.signal(signal.SIGHUP, signal_received_handler)
signal.signal(signal.SIGTSTP, signal_received_handler)
logging.basicConfig(level=LOG_LEVEL)
logging.info('PiAVR programmer version %s', VERSION)
global prog
prog = PiProgrammer()
prog.run()
if __name__ == '__main__':
main()
| 33.34
| 97
| 0.591032
|
2acf7a4ec5b6adb1955c8ab1566f03cf3f35e7d0
| 2,755
|
py
|
Python
|
yatube/posts/tests/test_urls.py
|
DeffronMax/hw05_test
|
f3c271dfcb60a2a96fa8ab98ee3e88fa351ca8b5
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_urls.py
|
DeffronMax/hw05_test
|
f3c271dfcb60a2a96fa8ab98ee3e88fa351ca8b5
|
[
"MIT"
] | null | null | null |
yatube/posts/tests/test_urls.py
|
DeffronMax/hw05_test
|
f3c271dfcb60a2a96fa8ab98ee3e88fa351ca8b5
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from django.contrib.auth import get_user_model
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Post, Group
User = get_user_model()
class URLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.author = User.objects.create_user(username="Author")
cls.not_author = User.objects.create_user(username="NotAuthor")
cls.group = Group.objects.create(title="Group", slug='slug')
cls.text = "test text"
cls.post = Post.objects.create(text=cls.text, author=cls.author)
cls.templates_url_names = {
'base.html': '/',
'posts/group_list.html': f'/group/{cls.group.slug}/',
'posts/create_post.html': '/create/',
'posts/profile.html': f'/profile/{cls.author.username}/',
'posts/post_detail.html': f'/posts/{cls.post.id}/'
}
cls.group_name = Group.objects.create(
title="test_group"
)
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.author)
self.not_author_client = Client()
self.not_author_client.force_login(self.not_author)
def test_anon_user(self):
"""Гость ошибка редактирования"""
for template, adress in self.templates_url_names.items():
with self.subTest(adress=adress):
if adress == reverse('posts:post_create'):
response = self.guest_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
else:
response = self.guest_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.OK)
response = self.guest_client.get(f'/posts/{self.post.id}/edit/')
self.assertEqual(response.status_code, HTTPStatus.FOUND)
def test_author_user(self):
"""автор редактирование"""
for template, adress in self.templates_url_names.items():
with self.subTest(adress=adress):
response = self.authorized_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.OK)
response = self.authorized_client.get(f'/posts/{self.post.id}/edit/')
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_wrong_url_returns_404(self):
response = self.client.get('/non-exist-page/')
self.assertEqual(response.status_code, 404)
def test_404_page_right_template(self):
response = self.client.get('/non-exist-page/')
template = 'core/404.html'
self.assertTemplateUsed(response, template)
| 39.927536
| 77
| 0.641379
|
694e86b5292a70226ebf67994825acec91064f3f
| 38,401
|
py
|
Python
|
plot/chi_msd.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2021-12-15T13:38:13.000Z
|
2021-12-15T13:38:13.000Z
|
plot/chi_msd.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2019-05-25T20:00:17.000Z
|
2019-05-25T20:00:17.000Z
|
plot/chi_msd.py
|
yketa/UBC---Spring-2018---code
|
b065544639a483dda48cda89bcbb11c1772232aa
|
[
"MIT"
] | 1
|
2020-01-22T17:05:18.000Z
|
2020-01-22T17:05:18.000Z
|
"""
Module chi_msd plots cooperativities, maximum cooperativities, times of
maximum cooperativities and mean square displacements for either different
persistence times at fixed self-propelling velocity or different
self-propelling velocities at fixed persistence time.
Simulation directories must follow the active_particles.naming.AHB2D naming
standard and input files in simulation directories must follow either the
active_particles.naming.Cuu standard (for cooperativities from displacement
correlations), or active_particles.naming.Cww standard (for cooperativities
from displacement relative to centre of mass displacement correlations), or
active_particles.naming.Cdd standard (for cooperativities from displacement
norm correlations), or active_particles.naming.Cee (for cooperativities from
displacement direction correlations), and the active_particles.naming.Msd (mean
square displacements).
Environment modes
-----------------
VARIABLE : string
Plot of maximum cooperativities and times of maximum cooperativity
x-coordinate variable.
_____________________________________________________________________
| Mode | Variable | x-coordinate if not(PECLET) |
|_________|_____________________________|_____________________________|
| 'dr' | Rotation diffusion constant | \\tau = 1/dr |
|_________|_____________________________|_____________________________|
| 'vzero' | self-propelling velocity | vzero |
|_________|_____________________________|_____________________________|
DEFAULT: dr
PECLET : bool
Plot maximum cooperativities and times of maximum cooperativity as
functions of the Péclet number Pe = vzero/dr.
DEFAULT: True
CORRELATION : string
Correlations from which to calculate cooperativities.
_____________________________________________________________
| Mode | Correlations |
|______|______________________________________________________|
| Cuu | displacement |
|______|______________________________________________________|
| Cww | displacement relative to centre of mass displacement |
|______|______________________________________________________|
| Cdd | displacement norm |
|______|______________________________________________________|
| Cee | displacement direction |
|______|______________________________________________________|
DEFAULT: Cuu
DRDT : bool
Use the product of the rotation diffusion constant and lag time rather
than the bare lag time.
DEFAULT: True
MSDDT : bool
Divide mean square displacement by lag time.
DEFAULT: True
FIT : bool
Fit maximum cooperativity and time of maximum cooperativity as power law of
their x-coordinates, on both sides of the variabe transition value VAR_C.
DEFAULT: False
Environment parameters
----------------------
DATA_DIRECTORY : string
Data directory.
DEFAULT: active_particles.naming.sim_directory
EXCLUDE : string
Simulation directories in DATA_DIRECTORY to exclude from the plots.
DEFAULT:
PARAMETERS_FILE : string
Simulation parameters file name.
DEFAULT: active_particles.naming.parameters_file
DENSITY : float
Packing fraction of particles.
DEFAULT: active_particles.plot.chi_msd._density
N : int
Number of particles.
DEFAULT: active_particles.plot.chi_msd._N
VZERO ['dr' mode] : float
Self-propulsion velocity.
DEFAULT: active_particles.plot.chi_msd._vzero
DR_MIN ['dr' mode] : float
Minimum rotation diffusion constant.
NOTE: Rotation diffusion constants lesser than DR_MIN will appear in the
mean square displacement figure but not in the maximum cooperativity
figure.
DEFAULT: active_particles.plot.chi_msd._dr_min
DR_MAX ['dr' mode] : float
Maximum rotation diffusion constant.
NOTE: Rotation diffusion constants greater than DR_MAX will appear in the
mean square displacement figure but not in the maximum cooperativity
figure.
DEFAULT: active_particles.plot.chi_msd._dr_max
DR_C ['dr' and FIT mode] : float
Transition rotation diffusion constant.
DEFAULT: active_particles.plot.chi_msd._dr_c
DR ['vzero' mode] : float
Rotation diffusion constant.
DEFAULT: active_particles.plot.chi_msd._dr
VZERO_MIN ['vzero' mode] : float
Minimum self-propulsion velocity.
NOTE: Self-propelling velocities lesser than VZERO_MIN will appear in the
mean square displacement figure but not in the maximum cooperativity
figure.
DEFAULT: active_particles.plot.chi_msd._vzero_min
VZERO_MAX ['vzero' mode] : float
Maximum self-propulsion velocity.
NOTE: Self-propelling velocities greater than VZERO_MIN will appear in the
mean square displacement figure but not in the maximum cooperativity
figure.
DEFAULT: active_particles.plot.chi_msd._vzero_max
VZERO_C ['vzero' and FIT mode] : float
Transition self-propelling velocity.
DEFAULT: active_particles.plot.chi_msd._vzero_c
BOX_SIZE : float
Size of the square box which was considered.
DEFAULT: simulation box size
X_ZERO : float
1st coordinate of the centre of the square box to consider.
DEFAULT: 0
Y_ZERO : float
2nd coordinate of the centre of the square box to consider.
DEFAULT: 0
INITIAL_FRAME_COR : int
Frame to consider as initial for correlations.
DEFAULT: active_particles.plot.chi_msd._init_frame_cor
INTERVAL_MAXIMUM_COR : int
Maximum number of intervals of length dt considered for correlations.
DEFAULT: active_particles.plot.chi_msd._int_max_cor
N_CASES : int
Number of boxes in each direction with which the displacement grid was
computed.
DEFAULT: active_particles.plot.chi_msd._Ncases_cor
INITIAL_FRAME_MSD : int separated by ':'
Display only mean square displacements calculated with initial frame from
this list except if this list is empty.
DEFAULT: []
INTERVAL_MAXIMUM_MSD : int
Maximum number of intervals of length dt considered for mean square
displacements calculation.
DEFAULT: active_particles.plot.chi_msd._int_max_msd
INTERVAL_PERIOD : int
Period of dumps for which mean square displacements were calculated.
DEFAULT: active_particles.plot.chi_msd._int_period_msd
R_MIN : float
Minimum radius for correlations integration.
DEFAULT: active_particles.plot.chi_msd._r_min
R_MAX : float
Maximum radius for correlations integration.
DEFAULT: active_particles.plot.chi_msd._r_max
FONT_SIZE : int
Plot font size.
DEFAULT: active_particles.plot.chi_msd._font_size
MARKER_SIZE : int
Plot marker size.
DEFAULT: active_particles.plot.chi_msd._marker_size
COLORMAP : string
Plot colormap.
DEFAULT: active_particles.plot.chi_msd._colormap
COLORMAP0 : string
Plot colormap for variables out of variable window.
DEFAULT: active_particles.plot.chi_msd._colormap0
RATIO_LEGEND : float
Width ratio between legend and figure.
DEFAULT: active_particles.plot.chi_msd._ratio_legend
NCOL_LEGEND : int
Number of columns for the legend.
DEFAULT: active_particles.plot.chi_msd._ncol_legend
WSPACE : float
Plots width space.
DEFAULT: active_particles.plot.chi_msd._wspace
HSPACE : float
Plots height space.
DEFAULT: active_particles.plot.chi_msd._hspace
CHI_XS : string
Cooperativity plot x-scale.
DEFAULT: active_particles.plot.chi_msd._chi_xs
CHI_YS : string
Cooperativity plot y-scale.
DEFAULT: active_particles.plot.chi_msd._chi_ys
X_SCALE : string
Maximum cooperativity and time of maximum cooperativity plots x-scale.
DEFAULT: active_particles.plot.chi_msd._x_scale
DTMAX_YS : string
Time of maximum cooperativity plot y-scale.
DEFAULT: active_particles.plot.chi_msd._dtmax_ys
CHIMAX_YS : string
Maximum cooperativity plot y-scale.
DEFAULT: active_particles.plot.chi_msd._chimax_ys
MSD_XS : string
Mean square displacement plot x-scale.
DEFAULT: active_particles.plot.chi_msd._msd_xs
MSD_YS : string
Mean square displacement plot y-scale.
DEFAULT: active_particles.plot.chi_msd._msd_ys
"""
import active_particles.naming as naming
from active_particles.init import get_env, get_env_list, dir_list
from os import environ as envvar
if __name__ == '__main__': envvar['SHOW'] = 'True'
from os.path import join as joinpath
from active_particles.plot.plot import list_colormap, list_markers,\
list_linestyles
from active_particles.analysis.cuu import c1Dtochi
from collections import OrderedDict
import pickle
import numpy as np
from scipy import stats as st
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
# DEFAULT VARIABLES
_dr = 3e-4 # default rotation diffusion constant
_dr_min = 1e-5 # default minimum diffusion rotation constant
_dr_max = 1e-2 # default maximum diffusion rotation constant
_dr_c = 3e-4 # default transition diffusion rotation constant
_vzero = 1e-2 # default self-propelling velocity
_vzero_min = 1e-2 # default minimum self-propelling velocity
_vzero_max = 1e-1 # default maximum self-propelling velocity
_vzero_c = 5e-1 # default transition self-propelling velocity
_density = 0.8 # default packing fraction of particles
_N = int(1e5) # default number of particles
_init_frame_cor = 0 # default frame to consider as initial for correlations
_int_max_cor = 1 # default maximum number of intervals of length dt considered for correlations
_Ncases_cor = 500 # default number of boxes in each direction with which the displacement grid is computed
_int_max_msd = 1 # default maximum number of intervals of length dt considered for correlations
_int_period_msd = 1 # default period of dumps for which mean square displacements were calculated
_r_min = 1 # default minimum radius for correlations integration
_r_max = 20 # default maximum radius for correlations integration
_font_size = 15 # default font size for the plot
_marker_size = 5 # default plot marker size
_colormap = 'jet' # default plot colormap
_colormap0 = 'Greys' # default plot colormap for variables out of variable window
_ncol_legend = 2 # default number of legend columns
_ratio_legend = 10 # default width ratio between graph and legend
_wspace = 0.4 # default plots width space
_hspace = 0.05 # default plots height space
_chi_xs = 'log' # default cooperativity plot x-scale
_chi_ys = 'log' # default cooperativity plot y-scale
_x_scale = 'log' # default x-scale
_dtmax_ys = 'log' # default time of maximum cooperativity plot y-scale
_chimax_ys = 'log' # default maximum cooperativity plot y-scale
_msd_xs = 'log' # default mean square displacement plot x-scale
_msd_ys = 'log' # default mean square displacement plot y-scale
# FUNCTIONS AND CLASSES
class ChiMsd:
"""
Search and read correlations files, calculate cooperativities.
Also search and read mean square displacement files.
"""
def __init__(self, data_dir, dir_standard, dir_attributes, parameters_file,
var, var_min, var_max, excluded_dir=''):
"""
Create list of directories to consider and compute plot variable values
associated to them.
Parameters
----------
data_dir : string
Data directory.
dir_standard : active_particles.naming._File standard
Simulation directory naming object.
dir_attributes : hash table
Attributes to be displayed in directory names.
parameters_file : string
Simulations parameters file name.
var : string
Plot variable name.
var_min : float
Minimum plot variable value.
var_max : float
Maximum plot variable value.
excluded_dir : string
Names of directories to be ignored. (default: '')
"""
self.data_dir = data_dir
self.dir_standard = dir_standard
self.dir_attributes = dir_attributes
self.excluded_dir = excluded_dir
self.parameters_file = parameters_file
self.var = var
self.var_min = var_min
self.var_max = var_max
(self.dirs, self.var_hash, self.var_list, self.var0_list,
self.isinvarinterval) = dir_list(
self.data_dir, self.dir_standard, self.dir_attributes,
self.var, self.var_min, self.var_max,
self.parameters_file, excluded_dir=self.excluded_dir,
include_out=True)
self.calculate_msd = False # calculate mean square displacements with cooperativities
def calculate(self, cor_standard, cor_attributes, r_min, r_max,
box_size=None, multiply_with_dr=True):
"""
Calculate cooperativities, maximum cooperativities and times of
maximum cooperativites.
Also calculates mean square displacements.
Parameters
----------
cor_standard : active_particles.naming._File standard
Correlation files naming object.
cor_attributes : hash table
Attributes to be displayed in correlation file names.
r_min : float
Minimum radius for correlations integration.
r_max : float
Maximum radius for correlations integration.
box_size : float or None
Size of the square box which was considered. (default: None)
NOTE: if None, then the size is taken as the simulation box size.
multiply_with_dr : bool
Consider the product of rotation diffusion constant and lag time
rather than just lag time. (default: True)
"""
self.cor_standard = cor_standard
self.cor_attributes = cor_attributes
self.r_min = r_min
self.r_max = r_max
self.box_size = box_size
self.multiply_with_dr = multiply_with_dr
self.time_step = {} # hash table of directories' simulation time step
self.chi = {} # hash table of list of lag times and corresponding cooperativities
self.dtmax = {} # hash table of time of maximum cooperativities
self.chimax = {} # hash table of maximum cooperativities
self.islocalmax = {} # hash table of booleans indicating if the time of maximum cooperativity is a local maximum
for dir in self.dirs:
# COOPERATIVITY
with open(
joinpath(self.data_dir, dir, self.parameters_file), 'rb')\
as param_file:
parameters = pickle.load(param_file) # simulation parameters hash table
if self.box_size == None: L = parameters['box_size']
else: L = self.box_size # box size
pdts = parameters['period_dump']*parameters['time_step'] # time corresponding to one dump length of time
if self.multiply_with_dr: pdts *= parameters['dr'] # plot dr*dt rather than dt
chidir = [] # list of lag times and corresponding cooperativities for current directory
for cor_filename in self.cor_standard.get_files(
directory=joinpath(self.data_dir, dir), **self.cor_attributes): # loop over correlations files in directory
with open(joinpath(self.data_dir, dir, cor_filename), 'rb')\
as cor_file:
c1D = pickle.load(cor_file)[1] # 1D correlation
chidir += [[
pdts*self.cor_standard.get_data(cor_filename, 'dt')[0],
c1Dtochi(c1D, L, r_min=self.r_min, r_max=self.r_max)]] # cooperativity
if not(chidir): continue # no cooperativities files with requested attributes
self.time_step[dir] = parameters['time_step'] # simulation time step
self.dtmax[dir], self.chimax[dir] = max(chidir,
key=lambda el: el[1]) # time of maximum cooperativity and maximum cooperativity
self.chi[dir] = np.transpose(sorted(chidir,
key=lambda el: el[0])) # cooperativity
self.islocalmax[dir] =\
self.dtmax[dir] > min(self.chi[dir][:, 0])\
and self.dtmax[dir] < max(self.chi[dir][:, 0]) # is dtmax a local maximum
if not(self.calculate_msd): continue # do not calculate mean square displacements
# MEAN SQUARE DISPLACEMENT
for msd_filename in self.msd_standard.get_files(
directory=joinpath(self.data_dir, dir), **self.msd_attributes): # loop over mean square displacements files in directory
init_frame = self.msd_standard.get_data(
msd_filename, 'init_frame')[0] # initial frame
if not(self.init_frame_msd)\
or init_frame in self.init_frame_msd:
self.msd[(dir, init_frame)] = np.genfromtxt(
fname=joinpath(self.data_dir, dir, msd_filename),
delimiter=',', skip_header=True) # mean square displacement
if self.divide_by_dt:
self.msd[(dir, init_frame)][:, 1] /=\
self.msd[(dir, init_frame)][:, 0]
self.msd[(dir, init_frame)][:, 2] /=\
self.msd[(dir, init_frame)][:, 0]
if self.multiply_with_dr:
self.msd[(dir, init_frame)][:, 0] *= parameters['dr']
self.time_step_list = sorted(OrderedDict.fromkeys(
self.time_step.values())) # list of time steps
def calculate_with_msd(self, cor_standard, cor_attributes, r_min, r_max,
msd_standard, msd_attributes, init_frame_msd, box_size=None,
multiply_with_dr=True, divide_by_dt=True):
"""
Calculate cooperativities, maximum cooperativities and times of
maximum cooperativites.
Also calculates mean square displacements.
Parameters
----------
cor_standard : active_particles.naming._File standard
Correlation files naming object.
cor_attributes : hash table
Attributes to be displayed in correlation file names.
r_min : float
Minimum radius for correlations integration.
r_max : float
Maximum radius for correlations integration.
msd_standard : active_particles.naming._File standard
Mean square displacement files naming object.
msd_attributes : hash table
Attributes to be displayed in mean square displacement file names.
init_frame_msd : list of int
Calculate only mean square displacements calculated with initial
frame from this list except if this list is empty.
box_size : float or None
Size of the square box which was considered. (default: None)
NOTE: if None, then the size is taken as the simulation box size.
multiply_with_dr : bool
Consider the product of rotation diffusion constant and lag time
rather than just lag time. (default: True)
divide_by_dt : bool
Divide mean square displacement by lag time. (default: True)
"""
self.calculate_msd = True
self.msd_standard = msd_standard
self.msd_attributes = msd_attributes
self.init_frame_msd = init_frame_msd
self.divide_by_dt = divide_by_dt
self.msd = {} # hash table of lists of lag times and corresponding mean square displacement and associated standard error
self.calculate(cor_standard, cor_attributes, r_min, r_max,
box_size=box_size, multiply_with_dr=multiply_with_dr)
self.init_frame_list = sorted(OrderedDict.fromkeys(
[init_frame for dir, init_frame in self.msd])) # list of mean square displacements initial frames
# SCRIPT
if __name__ == '__main__': # executing as script
# VARIABLES DEFINITIONS
mode = get_env('VARIABLE', default='dr') # plotting variable
peclet = get_env('PECLET', default=True, vartype=bool) # display Péclet number rather than mode variable
if mode == 'dr':
vzero = get_env('VZERO', default=_vzero, vartype=float) # self-propelling velocity
attributes = {'vzero': vzero} # attributes displayed in filenames
var = 'dr' # plot variable
var_min = get_env('DR_MIN', default=_dr_min, vartype=float) # minimum rotation diffusion constant
var_max = get_env('DR_MAX', default=_dr_max, vartype=float) # maximum rotation diffusion constant
var_c = get_env('DR_C', default=_dr_c, vartype=float) # transition rotation diffusion constant
var_label = r'$\tilde{\nu}_r$' # variable label
if peclet:
x_func = lambda x: vzero/x # x-coordinate as function of plot variable
else:
x_label = r'$\tau_r \equiv \tilde{\nu}_r^{-1}$' # x-coordinate label
x_func = lambda x: 1/x # x-coordinate as function of plot variable
elif mode == 'vzero':
dr = get_env('DR', default=_dr, vartype=float) # rotation diffusion constant
attributes = {'dr': dr} # attributes displayed in filenames
var = 'vzero' # plot variable
var_min = get_env('VZERO_MIN', default=_vzero_min, vartype=float) # minimum self-propelling velocity
var_max = get_env('VZERO_MAX', default=_vzero_max, vartype=float) # maximum self-propelling velocity
var_c = get_env('VZERO_C', default=_vzero_c, vartype=float) # transition self-propelling velocity
var_label = r'$\tilde{v}$' # variable label
if peclet:
x_func = lambda x: x/dr # x-coordinate as function of plot variable
else:
x_label = r'$\tilde{v}$' # x-coordinate label
x_func = lambda x: x # x-coordinate as function of plot variable
else: raise ValueError('Mode %s is not known.' % mode) # mode is not known
if peclet: x_label = r'$Pe$' # x-coordinate label
cor = get_env('CORRELATION', default='Cuu') # correlation variable
if cor == 'Cuu': # cooperativities from
naming_cor = naming.Cuu() # correlations naming object
cor_name = 'C_{uu}' # correlations name
elif cor == 'Cww':
naming_cor = naming.Cww() # correlations naming object
cor_name = 'C_{\\delta u \\delta u}' # correlations name
elif cor == 'Cdd':
naming_cor = naming.Cdd() # correlations naming object
cor_name = 'C_{|u||u|}' # correlations name
elif cor == 'Cee':
naming_cor = naming.Cee() # correlations naming object
cor_name = 'C_{\\hat{u}\\hat{u}}' # correlations name
else: raise ValueError('Correlation %s is not known.' % cor) # correlation is not known
data_dir = get_env('DATA_DIRECTORY', default=naming.sim_directory) # data directory
excluded_directories = get_env('EXCLUDE', default='') # directories to exclude
parameters_file = get_env('PARAMETERS_FILE',
default=naming.parameters_file) # simulations parameters file name
density = get_env('DENSITY', default=_density, vartype=float) # packing fraction of particles
N = get_env('N', default=_N, vartype=int) # number of particles
box_size = get_env('BOX_SIZE', vartype=float) # size of the square box which was considered
centre = (get_env('X_ZERO', default=0, vartype=float),
get_env('Y_ZERO', default=0, vartype=float)) # centre of the box
init_frame_cor = get_env('INITIAL_FRAME_COR',
default=_init_frame_cor, vartype=int) # frame to consider as initial for correlations
int_max_cor = get_env('INTERVAL_MAXIMUM_COR',
default=_int_max_cor, vartype=int) # maximum number of intervals of length dt considered for correlations
Ncases_cor = get_env('N_CASES', default=_Ncases_cor, vartype=int) # number of boxes in each direction with which the displacement grid is computed
init_frame_msd = get_env_list('INITIAL_FRAME_MSD',
delimiter=':', vartype=int) # display only mean square displacements calculated with initial frame from this list except if this list is empty
int_max_msd = get_env('INTERVAL_MAXIMUM_MSD',
default=_int_max_msd, vartype=int) # maximum number of intervals of length dt considered for mean square displacements calculation
int_period_msd = get_env('INTERVAL_PERIOD',
default=_int_period_msd, vartype=int) # period of dumps for which mean square displacements were calculated
r_min = get_env('R_MIN', default=_r_min, vartype=float) # minimum radius for correlations integration
r_max = get_env('R_MAX', default=_r_max, vartype=float) # maximum radius for correlations integration
# NAMING
common_attributes = {**attributes, 'density': density, 'N': N} # attributes to be displayed in file names
attributes_cor = {**common_attributes, 'init_frame': init_frame_cor,
'int_max': int_max_cor, 'Ncases': Ncases_cor, 'box_size': box_size,
'x_zero': centre[0], 'y_zero': centre[1]} # attributes displayed in file names specifically for correlations
attributes_msd = {**common_attributes, 'int_max': int_max_msd,
'int_period': int_period_msd} # attributes displayed in file names specifically for mean square displacements
naming_msd = naming.Msd() # mean square displacements naming object
naming_simdir = naming.AHB2D() # simulation directory naming object
# PLOT PARAMETERS
font_size = get_env('FONT_SIZE', default=_font_size, vartype=float) # plot font size
marker_size = get_env('MARKER_SIZE', default=_marker_size, vartype=int) # plot marker size
mpl.rcParams.update({'font.size': font_size,
'lines.markersize': marker_size})
colormap = get_env('COLORMAP', default=_colormap) # plot colormap
colormap0 = get_env('COLORMAP0', default=_colormap0) # plot colormap for variables out of variable window
ratio_legend = get_env('RATIO_LEGEND',
default=_ratio_legend, vartype=float) # width ratio between graph and legend
ncol_legend = get_env('NCOL_LEGEND', default=_ncol_legend, vartype=int) # number of legend columns
wspace = get_env('WSPACE', default=_wspace, vartype=float) # plots width space
hspace = get_env('HSPACE', default=_hspace, vartype=float) # plots height space
chi_xs = get_env('CHI_XS', default=_chi_xs) # cooperativity plot x-scale
chi_ys = get_env('CHI_YS', default=_chi_ys) # cooperativity plot y-scale
x_scale = get_env('X_SCALE', default=_x_scale) # maximum cooperativity and time of maximum cooperativity plots x-scale
dtmax_ys = get_env('DTMAX_YS', default=_dtmax_ys) # time of maximum cooperativity plot y-scale
chimax_ys = get_env('CHIMAX_YS', default=_chimax_ys) # maximum cooperativity plot y-scale
msd_xs = get_env('MSD_XS', default=_msd_xs) # mean square displacement plot x-scale
msd_ys = get_env('MSD_YS', default=_msd_ys) # mean square displacement plot y-scale
multiply_with_dr = get_env('DRDT', default=True, vartype=bool) # plot dr*dt rather than dt
if multiply_with_dr:
dt_label = '\\tilde{\\nu}_r \\Delta t' # dt label
else: dt_label = '\\Delta t' # dt label
divide_by_dt = get_env('MSDDT', default=True, vartype=bool) # divide mean square displacement by lag time
msd_label = r'$<|\Delta \vec{r}(t)|^2>$' # mean square displacement plot label
if divide_by_dt: msd_label += r'$/\Delta t$'
fit_max = get_env('FIT', default=False, vartype=bool) # fit maximum cooperativity and time of maximum cooperativity as power law of their x-coordinates on both sides of the variable transition value
# CALCULATION
chimsd = ChiMsd(data_dir, naming_simdir, common_attributes,
parameters_file, var, var_min, var_max,
excluded_dir=excluded_directories) # cooperativities and mean square displacements calculator
chimsd.calculate_with_msd(naming_cor, attributes_cor, r_min, r_max,
naming_msd, attributes_msd, init_frame_msd, box_size=box_size,
multiply_with_dr=multiply_with_dr, divide_by_dt=divide_by_dt) # calculate cooperativities and mean square displacements
# PLOT
colors = {**list_colormap(chimsd.var_list, colormap=colormap),
**list_colormap(chimsd.var0_list, colormap=colormap0)} # plot colors hash table
markers = list_markers(chimsd.time_step_list) # plot markers hash table
linestyles = list_linestyles(chimsd.init_frame_list) # plot linestyles hash table
# CHI, CHIMAX, DTMAX
fig_chi = plt.figure()
fig_chi.subplots_adjust(wspace=wspace, hspace=hspace)
fig_chi.suptitle(
r'$N=%.1e, \phi=%1.2f,$' % (N, density)
+ (r'$\tilde{v} = %.2e$' % vzero if mode == 'dr' else
r'$\tilde{\nu}_r = %.2e$' % dr)
+ r'$, r_{min} = %.2e, r_{max} = %.2e$' % (r_min, r_max))
gs = GridSpec(2, 3, width_ratios=[1, 1, 2/ratio_legend])
ax_chi = plt.subplot(gs[:, 0])
ax_chi.set_xlabel(r'$%s$' % dt_label)
ax_chi.set_xscale(chi_xs)
ax_chi.set_ylabel(r'$\chi(\Delta t) = \frac{1}{L^2}$'
+ r'$\int_{r=r_{min}}^{r=r_{max}} dr 2 \pi r %s(r, \Delta t)$'
% cor_name)
ax_chi.set_yscale(chi_ys)
ax_chi.set_title(
r'$S_{init} = %.1e, S_{max} = %.1e,$' % (init_frame_cor, int_max_cor)
+ r'$N_{cases} = %.1e$' % Ncases_cor)
ax_dtmax = plt.subplot(gs[0, 1])
ax_dtmax.set_xscale(x_scale)
ax_dtmax.set_ylabel(r'$%s^*$' % dt_label)
ax_dtmax.set_yscale(dtmax_ys)
ax_chimax = plt.subplot(gs[1, 1], sharex=ax_dtmax)
plt.setp(ax_dtmax.get_xticklabels(), visible=False)
ax_chimax.set_xlabel(x_label)
ax_chimax.set_xscale(x_scale)
ax_chimax.set_ylabel(r'$\chi(\Delta t^*) = \frac{1}{L^2}$'
+ r'$\int_{r=r_{min}}^{r=r_{max}} dr 2 \pi r %s(r, \Delta t^*)$'
% cor_name)
ax_chimax.set_yscale(chimax_ys)
ax_legend = plt.subplot(gs[:, 2])
ax_legend.axis('off')
lines_fig_chi = list(map(
lambda var_value: Line2D([0], [0], color=colors[var_value], lw=2,
label='%s = %.0e' % (var_label, var_value)),
chimsd.var_list))
lines_fig_chi += [Line2D([0], [0], lw=0, label='')]
lines_fig_chi += list(map(
lambda time_step: Line2D([0], [0], marker=markers[time_step],
color='black', label=r'$dt=%.0e$' % time_step),
chimsd.time_step_list))
ax_legend.legend(handles=lines_fig_chi, loc='center', ncol=ncol_legend)
for dir in chimsd.chi:
if chimsd.isinvarinterval[dir]:
time_step_value = chimsd.time_step[dir]
var_value = chimsd.var_hash[dir]
plot_parameters = {'color': colors[var_value],
'marker': markers[time_step_value]}
ax_chi.plot(*chimsd.chi[dir], **plot_parameters)
ax_dtmax.plot(x_func(var_value), chimsd.dtmax[dir],
**plot_parameters)
ax_chimax.plot(x_func(var_value), chimsd.chimax[dir],
**plot_parameters)
if fit_max: # fit maximum cooperativity and time of maximum cooperativity as power law of dt or dr*dt
# VALUES
dtmax_lt_varc, dtmax_gt_varc = [], []
chimax_lt_varc, chimax_gt_varc = [], []
for dir in chimsd.chi:
if chimsd.isinvarinterval[dir]:
var_value = chimsd.var_hash[dir]
if var_value < var_c:
dtmax_lt_varc += [[np.log(x_func(var_value)),
np.log(chimsd.dtmax[dir])]]
chimax_lt_varc += [[np.log(x_func(var_value)),
np.log(chimsd.chimax[dir])]]
elif var_value > var_c:
dtmax_gt_varc += [[np.log(x_func(var_value)),
np.log(chimsd.dtmax[dir])]]
chimax_gt_varc += [[np.log(x_func(var_value)),
np.log(chimsd.chimax[dir])]]
dtmax_lt_varc = np.transpose(dtmax_lt_varc)
dtmax_gt_varc = np.transpose(dtmax_gt_varc)
chimax_lt_varc = np.transpose(chimax_lt_varc)
chimax_gt_varc = np.transpose(chimax_gt_varc)
# FIT
dtmax_lt_varc_slo, dtmax_lt_varc_int, _, _, dtmax_lt_varc_std =\
st.linregress(*dtmax_lt_varc)
dtmax_gt_varc_slo, dtmax_gt_varc_int, _, _, dtmax_gt_varc_std =\
st.linregress(*dtmax_gt_varc)
chimax_lt_varc_slo, chimax_lt_varc_int, _, _, chimax_lt_varc_std =\
st.linregress(*chimax_lt_varc)
chimax_gt_varc_slo, chimax_gt_varc_int, _, _, chimax_gt_varc_std =\
st.linregress(*chimax_gt_varc)
# SORT FOR PLOTS
dtmax_lt_varc[0].sort()
dtmax_gt_varc[0].sort()
chimax_lt_varc[0].sort()
chimax_gt_varc[0].sort()
# PLOT
ax_dtmax.plot(np.exp(dtmax_lt_varc[0]),
np.exp(dtmax_lt_varc_int)
*(np.exp(dtmax_lt_varc[0])**dtmax_lt_varc_slo),
color='black', linestyle='-.',
label=r'$%s \propto (%s)^{%.1e \pm %.1e}$'
% (ax_dtmax.get_ylabel().replace('$', ''),
x_label.replace('$', ''),
dtmax_lt_varc_slo, dtmax_lt_varc_std))
ax_dtmax.plot(np.exp(dtmax_gt_varc[0]),
np.exp(dtmax_gt_varc_int)
*(np.exp(dtmax_gt_varc[0])**dtmax_gt_varc_slo),
color='black', linestyle='--',
label=r'$%s \propto (%s)^{%.1e \pm %.1e}$'
% (ax_dtmax.get_ylabel().replace('$', ''),
x_label.replace('$', ''),
dtmax_gt_varc_slo, dtmax_gt_varc_std))
ax_chimax.plot(np.exp(chimax_lt_varc[0]),
np.exp(chimax_lt_varc_int)
*(np.exp(chimax_lt_varc[0])**chimax_lt_varc_slo),
color='black', linestyle='-.',
label=r'$%s \propto (%s)^{%.1e \pm %.1e}$'
% ('\\chi(\\Delta t^*)', x_label.replace('$', ''),
chimax_lt_varc_slo, chimax_lt_varc_std))
ax_chimax.plot(np.exp(chimax_gt_varc[0]),
np.exp(chimax_gt_varc_int)
*(np.exp(chimax_gt_varc[0])**chimax_gt_varc_slo),
color='black', linestyle='--',
label=r'$%s \propto (%s)^{%.1e \pm %.1e}$'
% ('\\chi(\\Delta t^*)', x_label.replace('$', ''),
chimax_gt_varc_slo, chimax_gt_varc_std))
# LABEL
ax_dtmax.legend()
ax_chimax.legend()
# CHI, MSD
if chimsd.msd: # if there are mean square displacements to plot
fig_msd = plt.figure()
fig_msd.subplots_adjust(wspace=wspace, hspace=hspace)
fig_msd.suptitle(
r'$N=%.1e, \phi=%1.2f,$' % (N, density)
+ (r'$\tilde{v} = %.2e$' % vzero if mode == 'dr' else
r'$\tilde{\nu}_r = %.2e$' % dr)
+ r'$, r_{min} = %.2e, r_{max} = %.2e$' % (r_min, r_max))
gs = GridSpec(1, 3, width_ratios=[1, 1, 2/ratio_legend])
ax_msd_chi = plt.subplot(gs[0])
ax_msd_chi.set_xlabel(r'$%s$' % dt_label)
ax_msd_chi.set_xscale(chi_xs)
ax_msd_chi.set_ylabel(r'$\chi(\Delta t) = \frac{1}{L^2}$'
+ r'$\int_{r=r_{min}}^{r=r_{max}} dr 2 \pi r %s(r, \Delta t)$'
% cor_name)
ax_msd_chi.set_yscale(chi_ys)
ax_msd_chi.set_title(
r'$S_{init} = %.1e$' % init_frame_cor
+ r'$, S_{max} = %.1e,$' % int_max_cor
+ r'$N_{cases} = %.1e$' % Ncases_cor)
lines_ax_msd_chi = list(map(
lambda time_step: Line2D([0], [0], marker=markers[time_step],
color='black', label=r'$dt=%.0e$' % time_step),
chimsd.time_step_list))
ax_msd_chi.legend(handles=lines_ax_msd_chi, loc='best')
ax_msd = plt.subplot(gs[1])
ax_msd.set_xlabel(r'$%s$' % dt_label)
ax_msd.set_xscale(msd_xs)
ax_msd.set_ylabel(msd_label)
ax_msd.set_yscale(msd_ys)
ax_msd.set_title(
r'$S_{max} = %.1e$' % int_max_msd
+ r'$, S_{period} = %.1e$' % int_period_msd)
lines_ax_msd = list(map(
lambda init_frame: Line2D([0], [0],
linestyle=linestyles[init_frame],
color='black', label=r'$S_{init}=%.1e$' % init_frame),
chimsd.init_frame_list))
ax_msd.legend(handles=lines_ax_msd, loc='best')
ax_legend_msd = plt.subplot(gs[2])
ax_legend_msd.axis('off')
lines_fig_msd = list(map(
lambda var_value: Line2D([0], [0], color=colors[var_value], lw=2,
label='%s = %.0e' % (var_label, var_value)),
sorted(chimsd.var_list + chimsd.var0_list)))
ax_legend_msd.legend(handles=lines_fig_msd,
loc='center', ncol=ncol_legend)
for dir in chimsd.chi:
ax_msd_chi.plot(*chimsd.chi[dir],
color=colors[chimsd.var_hash[dir]],
marker=markers[chimsd.time_step[dir]])
for dir, init_frame in chimsd.msd:
ax_msd.errorbar(
chimsd.msd[(dir, init_frame)][:, 0],
chimsd.msd[(dir, init_frame)][:, 1],
yerr=chimsd.msd[(dir, init_frame)][:, 2],
color=colors[chimsd.var_hash[dir]],
linestyle=linestyles[init_frame])
# SHOW
plt.show()
| 44.75641
| 204
| 0.646988
|
8ead5d3c0fafa18f55f4f5107bc62847c162c836
| 7,324
|
py
|
Python
|
sdk/python/pulumi_azure_native/avs/v20210101preview/global_reach_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/avs/v20210101preview/global_reach_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/avs/v20210101preview/global_reach_connection.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['GlobalReachConnection']
class GlobalReachConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
global_reach_connection_name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A global reach connection resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: Authorization key from the peer express route used for the global reach connection
:param pulumi.Input[str] global_reach_connection_name: Name of the global reach connection in the private cloud
:param pulumi.Input[str] peer_express_route_circuit: Identifier of the ExpressRoute Circuit to peer with in the global reach connection
:param pulumi.Input[str] private_cloud_name: The name of the private cloud.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
__props__['global_reach_connection_name'] = global_reach_connection_name
__props__['peer_express_route_circuit'] = peer_express_route_circuit
if private_cloud_name is None and not opts.urn:
raise TypeError("Missing required property 'private_cloud_name'")
__props__['private_cloud_name'] = private_cloud_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['address_prefix'] = None
__props__['circuit_connection_status'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:avs/v20210101preview:GlobalReachConnection"), pulumi.Alias(type_="azure-native:avs:GlobalReachConnection"), pulumi.Alias(type_="azure-nextgen:avs:GlobalReachConnection"), pulumi.Alias(type_="azure-native:avs/v20200717preview:GlobalReachConnection"), pulumi.Alias(type_="azure-nextgen:avs/v20200717preview:GlobalReachConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GlobalReachConnection, __self__).__init__(
'azure-native:avs/v20210101preview:GlobalReachConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GlobalReachConnection':
"""
Get an existing GlobalReachConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_prefix"] = None
__props__["authorization_key"] = None
__props__["circuit_connection_status"] = None
__props__["name"] = None
__props__["peer_express_route_circuit"] = None
__props__["provisioning_state"] = None
__props__["type"] = None
return GlobalReachConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[str]:
"""
The network used for global reach carved out from the original network block provided for the private cloud
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
Authorization key from the peer express route used for the global reach connection
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> pulumi.Output[str]:
"""
The connection status of the global reach connection
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuit")
def peer_express_route_circuit(self) -> pulumi.Output[Optional[str]]:
"""
Identifier of the ExpressRoute Circuit to peer with in the global reach connection
"""
return pulumi.get(self, "peer_express_route_circuit")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The state of the ExpressRoute Circuit Authorization provisioning
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 44.932515
| 417
| 0.669306
|
02b7313efdffc3a160fba7abffbe993d5474b0f3
| 1,903
|
py
|
Python
|
actions/post_message.py
|
StackStorm-Exchange/stackstorm-irc
|
719f2b20952a97f44e237e2d3c96bf11ae602cad
|
[
"Apache-2.0"
] | null | null | null |
actions/post_message.py
|
StackStorm-Exchange/stackstorm-irc
|
719f2b20952a97f44e237e2d3c96bf11ae602cad
|
[
"Apache-2.0"
] | 4
|
2017-03-28T15:17:04.000Z
|
2021-05-09T05:11:44.000Z
|
actions/post_message.py
|
StackStorm-Exchange/stackstorm-irc
|
719f2b20952a97f44e237e2d3c96bf11ae602cad
|
[
"Apache-2.0"
] | 1
|
2021-01-28T17:44:50.000Z
|
2021-01-28T17:44:50.000Z
|
import random
import eventlet
from irc.bot import SingleServerIRCBot
from st2common.runners.base_action import Action
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
__all__ = [
'PostMessageAction'
]
class StackStormActionIRCBot(SingleServerIRCBot):
def __init__(self, server_host, server_port, nickname, channel, message):
server_list = [(server_host, server_port)]
super(StackStormActionIRCBot, self).__init__(server_list=server_list,
nickname=nickname,
realname=nickname)
self._channel = channel
self._message = message
def on_welcome(self, connection, event):
try:
connection.join(self._channel)
self.connection.privmsg(self._channel, self._message) # pylint: disable=no-member
finally:
self.die(msg='Disconnecting') # pylint: disable=no-member
def on_nicknameinuse(self, connection, event):
new_nickname = '%s-%s' % (connection.get_nickname(), random.randint(1, 1000))
connection.nick(new_nickname)
class PostMessageAction(Action):
def run(self, channel, message):
bot = self._get_bot(channel=channel, message=message)
bot.start() # pylint: disable=no-member
return True
def _get_bot(self, channel, message):
split = self.config['server'].split(':')
server_host = split[0]
server_port = int(split[1])
nickname = self.config['nickname']
bot = StackStormActionIRCBot(server_host=server_host,
server_port=server_port,
nickname=nickname,
channel=channel,
message=message)
return bot
| 32.254237
| 94
| 0.593274
|
6a5eb6db117c948da70f5ff978206e73602d1698
| 5,595
|
py
|
Python
|
src/robotide/utils/eventhandler.py
|
chandrapola/RIDE
|
08e01b98026b618359e47689cb5e65a0a67795f5
|
[
"ECL-2.0",
"Apache-2.0"
] | 775
|
2015-01-12T06:54:09.000Z
|
2022-03-25T05:18:05.000Z
|
src/robotide/utils/eventhandler.py
|
chandrapola/RIDE
|
08e01b98026b618359e47689cb5e65a0a67795f5
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,191
|
2015-05-19T16:49:09.000Z
|
2022-03-28T21:38:34.000Z
|
src/robotide/utils/eventhandler.py
|
chandrapola/RIDE
|
08e01b98026b618359e47689cb5e65a0a67795f5
|
[
"ECL-2.0",
"Apache-2.0"
] | 382
|
2015-01-24T08:41:44.000Z
|
2022-03-13T10:14:20.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
class _RideFSWatcherHandler:
def __init__(self):
self._fs_watcher = None
self._is_workspace_dirty = False
self._initial_watched_path = None
self._watched_path = set()
def create_fs_watcher(self, path):
if self._fs_watcher:
return
self._initial_watched_path = path
self._fs_watcher = wx.FileSystemWatcher()
self._fs_watcher.Bind(wx.EVT_FSWATCHER, self._on_fs_event)
# print(f"DEBUG: FileSystemWatcher create_fs_watcher CREATED")
def start_listening(self, path):
self.stop_listening()
if os.path.isdir(path):
# only watch folders
# MSW do not support watch single file
path = os.path.join(path, '')
result = self._fs_watcher.AddTree(path) #, filter="*.r*o*")
# print(f"DEBUG: FileSystemWatcher start_listening DIR result is ={result}")
# Add all files to the monitoring list
from wx import FileSystem
fs = FileSystem()
fs.ChangePathTo(path, True)
file_search = fs.FindFirst("*")
while file_search:
if self._is_valid_file_format(file_search):
# print(f"DEBUG: FileSystemWatcher start_listening file_search={file_search}")
self._watched_path.add(fs.URLToFileName(file_search))
file_search = fs.FindNext()
self._watched_path.add(path)
else:
self._watched_path.add(path) # Here we add the file path
path = os.path.join(os.path.dirname(path), '')
result = self._fs_watcher.Add(path) # Here we only add the file parent directory
# print(f"DEBUG: FileSystemWatcher start_listening FILE result is ={result}")
# print(f"DEBUG: FileSystemWatcher start_listening self._watched_path={self._watched_path}")
def stop_listening(self):
# print(f"DEBUG: FileSystemWatcher stop_listening")
self._is_workspace_dirty = False
self._fs_watcher.RemoveAll()
self._watched_path = set()
def is_workspace_dirty(self):
# print(f"DEBUG: is_workspace_dirty self._watched_path = {self._watched_path}")
if self._watched_path:
return self._is_workspace_dirty
else:
return False
def is_watcher_created(self):
return self._fs_watcher is not None
def get_workspace_new_path(self):
return self._initial_watched_path # self._watched_path.pop() # Returning file or directory name
def _on_fs_event(self, event):
if self._is_mark_dirty_needed(event):
self._is_workspace_dirty = True
def _is_mark_dirty_needed(self, event):
new_path = event.GetNewPath()
previous_path = event.GetPath()
change_type = event.GetChangeType()
if change_type == wx.FSW_EVENT_MODIFY: # DEBUG or change_type == wx.FSW_EVENT_ACCESS
"""
paths = list()
count = self._fs_watcher.GetWatchedPaths(paths) # DEBUG This is always empty
print(f"DEBUG: FSW_EVENT_MODIFY count={count} paths={paths}")
for file in paths:
# print(f"DEBUG: FileSystemWatcher count files {count} event wx.FSW_EVENT_MODIFY file = {file}")
if file == previous_path:
return True
"""
if previous_path in self._watched_path:
return True
return False
if change_type == wx.FSW_EVENT_CREATE:
if os.path.isdir(previous_path):
return True
elif os.path.isfile(previous_path):
return self._is_valid_file_format(previous_path)
elif change_type == wx.FSW_EVENT_DELETE:
if previous_path in self._watched_path:
# workspace root folder / suite file is deleted
self._watched_path.remove(previous_path)
return True
if previous_path.endswith(os.sep):
return True
else:
return self._is_valid_file_format(previous_path)
elif change_type == wx.FSW_EVENT_RENAME:
if previous_path in self._watched_path:
# workspace root folder / suite file is renamed
self._watched_path.remove(previous_path)
self._watched_path.add(new_path)
return True
if os.path.isdir(new_path):
return True
elif os.path.isfile(new_path):
return self._is_valid_file_format(new_path)
else:
return False
@staticmethod
def _is_valid_file_format(file_path):
# only watch files with certain extensions
suffixes = ('.robot', '.txt', '.resource', '.tsv')
return os.path.splitext(file_path)[-1].lower() in suffixes
RideFSWatcherHandler = _RideFSWatcherHandler()
| 39.680851
| 112
| 0.63092
|
16803ba4965525e0d1574560910fcbb179078581
| 189
|
py
|
Python
|
Instagram/admin.py
|
IsaacMurage-dev/Insta-lookalike
|
0b394a3e60c669dfb78e8d538e869cebbcec70b8
|
[
"MIT"
] | null | null | null |
Instagram/admin.py
|
IsaacMurage-dev/Insta-lookalike
|
0b394a3e60c669dfb78e8d538e869cebbcec70b8
|
[
"MIT"
] | null | null | null |
Instagram/admin.py
|
IsaacMurage-dev/Insta-lookalike
|
0b394a3e60c669dfb78e8d538e869cebbcec70b8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Profile,Image,Comment
# Register your models here.
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Comment)
| 27
| 41
| 0.820106
|
a4257e9fca84a4a5cfe3f469e0b80cc613fd2042
| 16,165
|
py
|
Python
|
osmtm/views/task.py
|
loggingroads/osm-tasking-manager2
|
cb482d667273adffbe27a905308aec579ed71cec
|
[
"BSD-2-Clause"
] | null | null | null |
osmtm/views/task.py
|
loggingroads/osm-tasking-manager2
|
cb482d667273adffbe27a905308aec579ed71cec
|
[
"BSD-2-Clause"
] | null | null | null |
osmtm/views/task.py
|
loggingroads/osm-tasking-manager2
|
cb482d667273adffbe27a905308aec579ed71cec
|
[
"BSD-2-Clause"
] | null | null | null |
from pyramid.view import view_config
from pyramid.httpexceptions import (
HTTPNotFound,
HTTPBadRequest,
HTTPUnauthorized,
HTTPForbidden,
)
from ..models import (
DBSession,
Task,
TaskState,
TaskLock,
TaskComment,
PriorityArea,
Project,
User,
Message,
)
from geoalchemy2 import (
shape,
)
from sqlalchemy.orm import (
joinedload,
)
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql.expression import (
not_,
and_,
or_,
)
from pyramid.security import authenticated_userid
import datetime
import random
import re
import transaction
from ..models import EXPIRATION_DELTA, ST_SetSRID
from user import username_to_userid
import logging
log = logging.getLogger(__name__)
def __get_user(request, allow_none=False):
user_id = authenticated_userid(request)
if not user_id:
if allow_none:
return None
raise HTTPUnauthorized()
user = DBSession.query(User).get(user_id)
if not allow_none and not user: # pragma: no cover
raise HTTPUnauthorized()
return user
def __get_task(request, lock_for_update=False):
task_id = request.matchdict['task']
project_id = request.matchdict['project']
filter = and_(Task.project_id == project_id, Task.id == task_id)
query = DBSession.query(Task) \
.options(joinedload(Task.cur_lock)) \
.filter(filter)
if lock_for_update:
query = query.with_for_update(nowait=True, of=Task)
_ = request.translate
try:
task = query.one()
except NoResultFound:
task = None
except OperationalError: # pragma: no cover
raise HTTPBadRequest(_("Cannot update task. Record lock for update."))
if not task or \
task.cur_state and task.cur_state.state == TaskState.state_removed:
# FIXME return translated text via JSON
raise HTTPNotFound(_("This task doesn't exist."))
return task
def __ensure_task_locked(request, task, user):
_ = request.translate
locked_task = get_locked_task(task.project_id, user)
if locked_task != task:
raise HTTPForbidden(_("You need to lock the task first."))
@view_config(route_name='task_xhr', renderer='task.mako', http_cache=0)
def task_xhr(request):
user = __get_user(request, allow_none=True)
task = __get_task(request)
locked_task = get_locked_task(task.project_id, user)
task_id = request.matchdict['task']
project_id = request.matchdict['project']
filter = and_(TaskState.task_id == task_id,
TaskState.project_id == project_id)
states = DBSession.query(TaskState).filter(filter) \
.order_by(TaskState.date).all()
# remove the first state (task creation with state==ready)
states.pop(0)
filter = and_(TaskLock.task_id == task_id,
TaskLock.project_id == project_id)
locks = DBSession.query(TaskLock).filter(filter) \
.order_by(TaskLock.date).all()
# remove the first lock (task creation)
locks.pop(0)
filter = and_(TaskComment.task_id == task_id,
TaskComment.project_id == project_id)
comments = DBSession.query(TaskComment).filter(filter) \
.order_by(TaskComment.date).all()
history = states + locks + comments
history = sorted(history, key=lambda step: step.date, reverse=True)
return dict(task=task,
user=user,
locked_task=locked_task,
history=history)
@view_config(route_name='task_empty', renderer='task.empty.mako', http_cache=0)
def task_empty(request):
user = __get_user(request, allow_none=True)
project_id = request.matchdict['project']
locked_task = get_locked_task(project_id, user)
assigned_tasks = get_assigned_tasks(project_id, user)
return dict(locked_task=locked_task, project_id=project_id,
assigned_tasks=assigned_tasks, user=user)
@view_config(route_name='task_done', renderer='json')
def done(request):
user = __get_user(request)
task = __get_task(request, lock_for_update=True)
__ensure_task_locked(request, task, user)
add_comment(request, task, user)
task.states.append(TaskState(user=user, state=TaskState.state_done))
task.locks.append(TaskLock(user=None, lock=False))
DBSession.add(task)
DBSession.flush()
_ = request.translate
return dict(success=True,
msg=_("Task marked as done. Thanks for your contribution"))
@view_config(route_name='task_lock', renderer="json")
def lock(request):
_ = request.translate
user = __get_user(request)
task = __get_task(request, lock_for_update=True)
locked_task = get_locked_task(task.project_id, user)
if locked_task is not None:
raise HTTPBadRequest
if task.cur_lock and task.cur_lock.lock:
# FIXME use http errors
return dict(success=False,
task=dict(id=task.id),
error_msg=_("Task already locked."))
if task.assigned_to is not None and task.assigned_to != user:
request.response.status = 400
return dict(success=False,
task=dict(id=task.id),
error_msg=_("Task assigned to someone else."))
task.locks.append(TaskLock(user=user, lock=True))
DBSession.add(task)
return dict(success=True, task=dict(id=task.id),
msg=_("Task locked. You can start mapping."))
@view_config(route_name='task_unlock', renderer="json")
def unlock(request):
user = __get_user(request)
task = __get_task(request, lock_for_update=True)
__ensure_task_locked(request, task, user)
add_comment(request, task, user)
task.locks.append(TaskLock(user=None, lock=False))
DBSession.add(task)
DBSession.flush()
_ = request.translate
return dict(success=True, task=dict(id=task.id),
msg=_("Task unlocked."))
@view_config(route_name='task_comment', renderer="json")
def comment(request):
user = __get_user(request)
task = __get_task(request)
add_comment(request, task, user)
_ = request.translate
return dict(success=True, task=dict(id=task.id),
msg=_("Comment added."))
def add_comment(request, task, user):
if 'comment' in request.params and request.params.get('comment') != '':
comment = request.params['comment']
# check for mentions in the comment
p = re.compile(ur'((?<=@)\w+|\[.+?\])')
def repl(var):
username = var.group()
username = re.sub('(\[|\])', '', username)
return username_to_userid(username)
# put ids instead of usernames in comment
comment = re.sub(p, repl, comment)
p = re.compile(ur'((?<=@)\d+)')
for userid in p.findall(comment):
to = DBSession.query(User).get(userid)
if to:
mention_user(request, user, to, comment)
task.comments.append(TaskComment(comment, user))
DBSession.add(task)
DBSession.flush()
def mention_user(request, from_, to, comment):
_ = request.translate
project_id = request.matchdict['project']
task_id = request.matchdict['task']
href = request.route_path('project', project=project_id)
href = href + '#task/%s' % task_id
link = '<a href="%s">#%s</a>' % (href, task_id)
subject = _('You were mentioned in a comment - Task ${link}',
mapping={'link': link})
send_message(subject, from_, to, comment)
def send_message(subject, from_, to_, msg):
DBSession.add(Message(subject, from_, to_, msg))
def send_invalidation_message(request, task, user):
"""Sends message to contributors of invalidated tasks."""
comment = request.params.get('comment', '')
states = sorted(task.states, key=lambda state: state.date, reverse=True)
recipients = set()
for state in states:
if (state.state == TaskState.state_validated or
state.state == TaskState.state_done):
recipients.add(state.user)
from_ = user
while recipients:
to = recipients.pop()
if from_ != to:
_ = request.translate
href = request.route_path('project', project=task.project_id)
href = href + '#task/%s' % task.id
link = '<a href="%s">#%d</a>' % (href, task.id)
subject = _('Task ${link} invalidated', mapping={'link': link})
send_message(subject, from_, to, comment)
@view_config(route_name='task_validate', renderer="json")
def validate(request):
user = __get_user(request)
task = __get_task(request, lock_for_update=True)
__ensure_task_locked(request, task, user)
task.user = None
_ = request.translate
if 'validate' in request.params:
state = TaskState.state_validated
msg = _("Task validated.")
else:
state = TaskState.state_invalidated
msg = _("Task invalidated.")
send_invalidation_message(request, task, user)
add_comment(request, task, user)
task.states.append(TaskState(user=user, state=state))
task.locks.append(TaskLock(user=None, lock=False))
DBSession.add(task)
DBSession.flush()
return dict(success=True, msg=msg)
@view_config(route_name='task_split', renderer='json')
def split(request):
user = __get_user(request)
task = __get_task(request, lock_for_update=True)
__ensure_task_locked(request, task, user)
if task.zoom is None or (task.zoom - task.project.zoom) > 1:
raise HTTPBadRequest()
for i in range(0, 2):
for j in range(0, 2):
t = Task(int(task.x) * 2 + i,
int(task.y) * 2 + j,
int(task.zoom) + 1)
t.project = task.project
t.update = datetime.datetime.utcnow()
task.states.append(TaskState(user=user, state=TaskState.state_removed))
task.locks.append(TaskLock(user=None, lock=False))
DBSession.add(task)
return dict()
def get_locked_task(project_id, user):
if user is None:
return None
try:
query = DBSession.query(Task).options(joinedload(Task.cur_lock)) \
.filter(and_(Task.cur_lock.has(lock=True),
Task.project_id == project_id,
Task.cur_lock.has(user_id=user.id)))
return query.one()
except NoResultFound:
return None
def get_assigned_tasks(project_id, user):
if user is None:
return None
query = DBSession.query(Task) \
.filter(Task.project_id == project_id, Task.assigned_to == user) \
.order_by(Task.assigned_date.desc())
return query.all()
def find_matching_task(project_id, filter):
state_filter = or_(Task.cur_state.has(state=TaskState.state_ready),
Task.cur_state.has(state=TaskState.state_invalidated))
query = DBSession.query(Task) \
.filter_by(project_id=project_id) \
.filter(state_filter) \
.filter(not_(Task.cur_lock.has(lock=True)))
query = query.filter(filter)
count = query.count()
if count != 0: # pragma: no cover
atask = query.offset(random.randint(0, count - 1)).first()
return atask
return None
@view_config(route_name='task_random', http_cache=0, renderer='json')
def random_task(request):
"""Gets a random not-done task. First it tries to get one that does not
border any in-progress tasks."""
project_id = request.matchdict['project']
# filter for tasks not bordering busy tasks
locked = DBSession.query(Task.geometry.ST_Union()) \
.filter_by(project_id=project_id) \
.filter(Task.cur_lock.has(lock=True)) \
.scalar()
locked_filter = None
if locked is not None:
locked_filter = Task.geometry.ST_Disjoint(ST_SetSRID(locked, 4326))
# filter for tasks within priority areas
priority = DBSession.query(PriorityArea.geometry.ST_Union()) \
.join(Project.priority_areas) \
.filter(Project.id == project_id) \
.scalar()
priority_filter = None
if priority is not None:
priority_filter = Task.geometry.ST_Intersects(
ST_SetSRID(priority, 4326)
)
# search attempts
filters = []
if priority_filter is not None and locked_filter is not None:
# tasks in priority areas and not bordering busy tasks
filters.append(and_(locked_filter, priority_filter))
if priority_filter is not None:
# tasks in priority areas
filters.append(priority_filter)
if locked_filter is not None:
# tasks not bordering busy tasks
filters.append(locked_filter)
# any other available task
filters.append(True)
for filter in filters:
atask = find_matching_task(project_id, filter)
if atask:
return dict(success=True, task=dict(id=atask.id))
_ = request.translate
return dict(success=False,
error_msg=_("Random task... none available! Sorry."))
@view_config(route_name='task_assign', renderer='json',
permission='project_edit')
def task_assign(request):
"""Assigns a taks to a given user"""
task = __get_task(request)
_ = request.translate
if task.cur_lock and task.cur_lock.lock:
request.response.status = 400
return dict(success=True,
msg=_("You cannot assign an already locked task"))
username = request.matchdict['user']
user = DBSession.query(User).filter(User.username == username).one()
task.assigned_to_id = user.id
task.assigned_date = datetime.datetime.utcnow()
DBSession.add(task)
return dict(success=True,
msg=_("Task assigned."))
@view_config(route_name='task_assign_delete', renderer='json',
permission='project_edit')
def task_assign_delete(request):
"""Remove assignment"""
task = __get_task(request)
task.assigned_to_id = None
task.assigned_date = None
_ = request.translate
return dict(success=True,
msg=_("Task assignment removed"))
@view_config(route_name='task_gpx', renderer='task.gpx.mako')
def task_gpx(request):
task = __get_task(request)
request.response.headerlist.append(('Access-Control-Allow-Origin',
'http://www.openstreetmap.org'))
return dict(multipolygon=shape.to_shape(task.geometry),
project_id=task.project_id)
@view_config(route_name='task_osm', renderer='task.osm.mako')
def task_osm(request):
task = __get_task(request)
request.response.headerlist.append(('Access-Control-Allow-Origin',
'http://www.openstreetmap.org'))
return dict(multipolygon=shape.to_shape(task.geometry),
project_id=task.project_id)
@view_config(route_name='task_difficulty', renderer='json',
permission='project_edit')
def task_difficulty(request):
"""Change task difficulty"""
task = __get_task(request)
difficulty = request.matchdict['difficulty']
task.difficulty = difficulty
_ = request.translate
return dict(success=True,
msg=_("Task difficulty changed."))
@view_config(route_name='task_difficulty_delete', renderer='json',
permission='project_edit')
def task_difficulty_delete(request):
"""Remove assignment"""
task = __get_task(request)
task.difficulty = None
_ = request.translate
return dict(success=True,
msg=_("Task difficulty removed"))
# unlock any expired task
def check_task_expiration(): # pragma: no cover
query = DBSession.query(Task).filter(
and_(
Task.lock_date.__ne__(None),
Task.lock_date < datetime.datetime.utcnow() - EXPIRATION_DELTA))
with transaction.manager:
for task in query:
new_lock = TaskLock()
new_lock.task_id = task.id
new_lock.project_id = task.project_id
DBSession.add(new_lock)
log.debug("found one task")
transaction.commit()
| 29.935185
| 79
| 0.648995
|
3aaafe4e05bafd956771739d4e02826d833a9281
| 218
|
py
|
Python
|
classical_control_theory/steady_state_final_value_theorem.py
|
andreamunafo/automatic_control
|
dd1d89f732bfd8d95b0ebef6fe99df29b18a1fc2
|
[
"Apache-2.0"
] | null | null | null |
classical_control_theory/steady_state_final_value_theorem.py
|
andreamunafo/automatic_control
|
dd1d89f732bfd8d95b0ebef6fe99df29b18a1fc2
|
[
"Apache-2.0"
] | null | null | null |
classical_control_theory/steady_state_final_value_theorem.py
|
andreamunafo/automatic_control
|
dd1d89f732bfd8d95b0ebef6fe99df29b18a1fc2
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: 07_Final_value_theorem_and_steady_state_error.ipynb (unless otherwise specified).
__all__ = []
# Cell
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
| 27.25
| 125
| 0.802752
|
6ee7b2f3e1db8d3c5577bc083bf0a909608f987d
| 4,212
|
py
|
Python
|
paint.py
|
naemazam/Paint
|
9346cbbfd13e1556922f2e4377f08439ea227239
|
[
"MIT"
] | null | null | null |
paint.py
|
naemazam/Paint
|
9346cbbfd13e1556922f2e4377f08439ea227239
|
[
"MIT"
] | null | null | null |
paint.py
|
naemazam/Paint
|
9346cbbfd13e1556922f2e4377f08439ea227239
|
[
"MIT"
] | null | null | null |
########################################################################
## Naem Azam
# YOUTUBE: (The Terminal Boy)
# WEBSITE: naemazam.github.io
########################################################################
from tkinter import *
from tkinter.colorchooser import askcolor
from PIL import ImageTk, Image
class Paint(object):
DEFAULT_PEN_SIZE = 5.0
DEFAULT_COLOR = 'black'
def __init__(self):
self.root = Tk()
self.root.title('NA Paint')
self.root.geometry('500x300')
self.root.maxsize(500,300)
self.root.minsize(500,300)
self.paint_tools = Frame(self.root,width=100,height=300,relief=RIDGE,borderwidth=2)
self.paint_tools.place(x=0,y=0)
self.pen_logo = ImageTk.PhotoImage(Image.open('pen.png'))
self.p = Label(self.paint_tools, text="pen",borderwidth=0,font=('verdana',10,'bold'))
self.p.place(x=5,y=11)
self.pen_button = Button(self.paint_tools,padx=6,image=self.pen_logo,borderwidth=2,command=self.use_pen)
self.pen_button.place(x=60,y=10)
self.brush_logo = ImageTk.PhotoImage(Image.open('brush.png'))
self.b = Label(self.paint_tools,borderwidth=0,text='brush',font=('verdana',10,'bold'))
self.b.place(x=5,y=40)
self.brush_button = Button(self.paint_tools,image = self.brush_logo,borderwidth=2,command=self.use_brush)
self.brush_button.place(x=60,y=40)
self.color_logo = ImageTk.PhotoImage(Image.open('color.png'))
self.cl = Label(self.paint_tools, text='color',font=('verdana',10,'bold'))
self.cl.place(x=5,y=70)
self.color_button = Button(self.paint_tools,image = self.color_logo,borderwidth=2,command=self.choose_color)
self.color_button.place(x=60,y=70)
self.eraser_logo = ImageTk.PhotoImage(Image.open('eraser.png'))
self.e = Label(self.paint_tools, text='eraser',font=('verdana',10,'bold'))
self.e.place(x=5,y=100)
self.eraser_button = Button(self.paint_tools,image = self.eraser_logo,borderwidth=2,command=self.use_eraser)
self.eraser_button.place(x=60,y=100)
self.pen_size = Label(self.paint_tools,text="Pen Size",font=('verdana',10,'bold'))
self.pen_size.place(x=15,y=250)
self.choose_size_button = Scale(self.paint_tools, from_=1, to=10, orient=VERTICAL)
self.choose_size_button.place(x=20,y=150)
self.c = Canvas(self.root, bg='white', width=600, height=600,relief=RIDGE,borderwidth=0)
self.c.place(x=100,y=0)
self.setup()
self.root.mainloop()
def setup(self):
self.old_x = None
self.old_y = None
self.line_width = self.choose_size_button.get()
self.color = self.DEFAULT_COLOR
self.eraser_on = False
self.active_button = self.pen_button
self.c.bind('<B1-Motion>', self.paint)
self.c.bind('<ButtonRelease-1>', self.reset)
def use_pen(self):
self.activate_button(self.pen_button)
def use_brush(self):
self.activate_button(self.brush_button)
def choose_color(self):
self.eraser_on = False
self.color = askcolor(color=self.color)[1]
def use_eraser(self):
self.activate_button(self.eraser_button, eraser_mode=True)
def activate_button(self, some_button, eraser_mode=False):
self.active_button.config(relief=RAISED)
some_button.config(relief=SUNKEN)
self.active_button = some_button
self.eraser_on = eraser_mode
def paint(self, event):
self.line_width = self.choose_size_button.get()
paint_color = 'white' if self.eraser_on else self.color
if self.old_x and self.old_y:
self.c.create_line(self.old_x, self.old_y, event.x, event.y,
width=self.line_width, fill=paint_color,
capstyle=ROUND, smooth=TRUE, splinesteps=36)
self.old_x = event.x
self.old_y = event.y
def reset(self, event):
self.old_x, self.old_y = None, None
if __name__ == '__main__':
Paint()
| 39
| 117
| 0.609212
|
0750ba5cb5381ebd85f27d00a103f5726833b1af
| 2,188
|
py
|
Python
|
tests/test_enum.py
|
bannsec/py010parser
|
cba61cab345f9b7c19628904d7667153dbddf326
|
[
"BSD-3-Clause"
] | 54
|
2015-04-08T22:54:17.000Z
|
2022-02-16T10:28:43.000Z
|
tests/test_enum.py
|
bannsec/py010parser
|
cba61cab345f9b7c19628904d7667153dbddf326
|
[
"BSD-3-Clause"
] | 27
|
2015-08-16T11:45:21.000Z
|
2020-09-07T20:28:22.000Z
|
tests/test_enum.py
|
bannsec/py010parser
|
cba61cab345f9b7c19628904d7667153dbddf326
|
[
"BSD-3-Clause"
] | 11
|
2015-04-12T04:24:44.000Z
|
2021-01-08T04:53:07.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import unittest
sys.path.insert(0, "..")
from py010parser import parse_file, parse_string, c_ast
class TestEnum(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_enum_types(self):
# note that there have been problems using a built-in
# type (int/float/etc) vs the typedefd ones, TYPEID vs
res = parse_string("""
enum <ulong> COLORS {
WHITE = 1
} var1;
enum <int> COLORS {
WHITE = 1
} var1;
enum IFD_dirtype {
IFD_TYPE_EXIF = 1,
IFD_TYPE_GEOTAG,
IFD_TYPE_CASIO_QV_R62,
};
enum {
TEST,
TEST2
} blah;
""", optimize=True)
def test_untypedefd_enum_as_typeid(self):
res = parse_string("""
enum <ulong> BLAH {
BLAH1, BLAH2, BLAH3
};
local BLAH x;
""", optimize=True)
def test_c_keywords_in_enum(self):
res = parse_string("""
enum <int> BLAH {
goto,
register,
extern,
goto,
volatile,
static
};
local BLAH x;
""", optimize=True)
def test_untypedefd_enum_as_typeid(self):
res = parse_string("""
enum <ulong> BLAH {
BLAH1, BLAH2, BLAH3
};
local BLAH x;
""", optimize=True)
def test_enum_types(self):
# note that there have been problems using a built-in
# type (int/float/etc) vs the typedefd ones, TYPEID vs
res = parse_string("""
enum <ulong> COLORS {
WHITE = 1
} var1;
enum <int> COLORS {
WHITE = 1
} var1;
enum IFD_dirtype {
IFD_TYPE_EXIF = 1,
IFD_TYPE_GEOTAG,
IFD_TYPE_CASIO_QV_R62,
};
enum {
TEST,
TEST2
} blah;
""", optimize=True)
if __name__ == "__main__":
unittest.main()
| 21.45098
| 63
| 0.481718
|
df81eaef5adf4e020f49d11b770b26c14b844ade
| 7,818
|
py
|
Python
|
utils/exporters/blender/addons/io_three/exporter/api/material.py
|
lchl7890987/WebGL
|
f73593b00e3b2c927bb1a3236240cea5597166ec
|
[
"MIT"
] | 10
|
2015-07-09T06:08:03.000Z
|
2021-11-08T13:00:01.000Z
|
utils/exporters/blender/addons/io_three/exporter/api/material.py
|
lchl7890987/WebGL
|
f73593b00e3b2c927bb1a3236240cea5597166ec
|
[
"MIT"
] | 1
|
2015-06-25T16:42:04.000Z
|
2021-01-12T19:22:16.000Z
|
utils/exporters/blender/addons/io_three/exporter/api/material.py
|
lchl7890987/WebGL
|
f73593b00e3b2c927bb1a3236240cea5597166ec
|
[
"MIT"
] | 3
|
2015-05-24T13:57:14.000Z
|
2018-01-17T09:49:08.000Z
|
from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.ambient_color(%s)", material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
"""
:param material:
:return: THREE_blending_type value
"""
logger.debug("material.blending(%s)", material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug("No THREE_blending_type attribute found")
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
"""
:param material:
:return: texture node for bump
"""
logger.debug("material.bump_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
"""
:param material:
:rtype: float
"""
return normal_scale(material)
@_material
def depth_test(material):
"""
:param material:
:return: THREE_depth_test value
:rtype: bool
"""
logger.debug("material.depth_test(%s)", material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug("No THREE_depth_test attribute found")
test = True
return test
@_material
def depth_write(material):
"""
:param material:
:return: THREE_depth_write value
:rtype: bool
"""
logger.debug("material.depth_write(%s)", material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug("No THREE_depth_write attribute found")
write = True
return write
@_material
def diffuse_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.diffuse_color(%s)", material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
"""
:param material:
:return: texture node for map
"""
logger.debug("material.diffuse_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.emissive_color(%s)", material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
"""
:param material:
:return: texture node for light maps
"""
logger.debug("material.light_map(%s)", material)
for texture in _valid_textures(material, strict_use=False):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.normal_scale(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
"""
:param material:
:return: texture node for normals
"""
logger.debug("material.normal_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.opacity(%s)", material)
return round(material.alpha, 2)
@_material
def shading(material):
"""
:param material:
:return: shading type (phong or lambert)
"""
logger.debug("material.shading(%s)", material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.specular_coef(%s)", material)
return material.specular_hardness
@_material
def specular_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.specular_color(%s)", material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
"""
:param material:
:return: texture node for specular
"""
logger.debug("material.specular_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.transparent(%s)", material)
return material.use_transparency
@_material
def type(material):
"""
:param material:
:return: THREE compatible shader type
"""
logger.debug("material.type(%s)", material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.use_vertex_colors(%s)", material)
return material.use_vertex_color_paint
def used_materials():
"""
:return: list of materials that are in use
:rtype: generator
"""
logger.debug("material.used_materials()")
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
"""
:param material:
:return: THREE_visible value
:rtype: bool
"""
logger.debug("material.visible(%s)", material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug("No THREE_visible attribute found")
vis = True
return vis
@_material
def wireframe(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.wireframe(%s)", material)
return material.type == WIRE
def _valid_textures(material, strict_use=True):
"""
:param material:
:rtype: generator
"""
for texture in material.texture_slots:
if not texture:
continue
if strict_use:
in_use = texture.use
else:
in_use = True
if texture.texture.type != IMAGE or not in_use:
continue
logger.debug("Valid texture found %s", texture)
yield texture
| 19.994885
| 69
| 0.634433
|
8d2e40ebdd6e5ae328594aa62d2168e2efb83cd3
| 455
|
py
|
Python
|
Symbol Patterns/symbolpattern135.py
|
vaidehisinha1/Python-PatternHouse
|
49f71bcc5319a838592e69b0e49ef1edba32bf7c
|
[
"MIT"
] | null | null | null |
Symbol Patterns/symbolpattern135.py
|
vaidehisinha1/Python-PatternHouse
|
49f71bcc5319a838592e69b0e49ef1edba32bf7c
|
[
"MIT"
] | 471
|
2022-01-15T07:07:18.000Z
|
2022-02-28T16:01:42.000Z
|
Symbol Patterns/symbolpattern135.py
|
vaidehisinha1/Python-PatternHouse
|
49f71bcc5319a838592e69b0e49ef1edba32bf7c
|
[
"MIT"
] | 2
|
2022-01-17T09:43:16.000Z
|
2022-01-29T15:15:47.000Z
|
height = int(input())
for i in range(1,height+1):
for j in range(1,height+1):
if(i == 1 or j == 1 or i == height-j+1):
print("*",end=" ")
elif(i == j and i >= height//2 and j > height//2):
print("*",end=" ")
else:
print(end=" ")
print()
# Sample Input :- 7
# Output :-
# * * * * * * *
# * *
# * *
# * *
# * * *
# * * *
# * *
| 16.851852
| 58
| 0.32967
|
94bf1d9726bb8cb73141750666bd2999eda26087
| 1,798
|
py
|
Python
|
api/src/repository/SampleRepository.py
|
SamuelJansen/FeatureManager
|
6b259aeb073ff705273e7d1f283232272fd629c9
|
[
"MIT"
] | 1
|
2021-11-21T21:18:32.000Z
|
2021-11-21T21:18:32.000Z
|
api/src/repository/SampleRepository.py
|
SamuelJansen/FeatureManager
|
6b259aeb073ff705273e7d1f283232272fd629c9
|
[
"MIT"
] | null | null | null |
api/src/repository/SampleRepository.py
|
SamuelJansen/FeatureManager
|
6b259aeb073ff705273e7d1f283232272fd629c9
|
[
"MIT"
] | null | null | null |
from python_framework import Repository
from Sample import Sample
@Repository(model = Sample)
class SampleRepository:
def findAll(self) :
return self.repository.findAllAndCommit(self.model)
def existsByKey(self,key) :
return self.repository.existsByKeyAndCommit(key, self.model)
def findByKey(self,key) :
if self.existsByKey(key) :
return self.repository.findByKeyAndCommit(key, self.model)
def notExistsByKey(self,key) :
return not self.existsByKey(key)
def save(self,model) :
return self.repository.saveAndCommit(model)
def deleteByKey(self,key):
self.repository.deleteByKeyAndCommit(key, self.model)
def findAllByFeatureKeyIn(self, featureKeyList):
sampleList = []
for sample in self.repository.session.query(self.model) :
keepIt = False
for key in featureKeyList :
for featureData in sample.featureDataList :
if featureData.feature.key == key :
keepIt = True
sampleList.append(sample)
break
if keepIt :
break
return sampleList
def findAllByAllFeatureKeyIn(self, featureKeyList):
sampleList = []
for sample in self.repository.session.query(self.model) :
keepIt = True
for key in featureKeyList :
keepIt = False
for featureData in sample.featureDataList :
if featureData.feature.key == key :
keepIt = True
break
if not keepIt :
break
if keepIt :
sampleList.append(sample)
return sampleList
| 32.107143
| 70
| 0.576752
|
f1b4c7820b46b1625a2f9106deaa0d899cf386c6
| 13,526
|
py
|
Python
|
jumpcutter.py
|
marc-philipp-knechtle/jumpcutter
|
26bfe561e73e311ac160baebd362a337098c2853
|
[
"MIT"
] | null | null | null |
jumpcutter.py
|
marc-philipp-knechtle/jumpcutter
|
26bfe561e73e311ac160baebd362a337098c2853
|
[
"MIT"
] | null | null | null |
jumpcutter.py
|
marc-philipp-knechtle/jumpcutter
|
26bfe561e73e311ac160baebd362a337098c2853
|
[
"MIT"
] | null | null | null |
import argparse
import math
import os
import re
import subprocess
from shutil import copyfile, rmtree
from time import sleep
import numpy as np
from audiotsm import phasevocoder
from audiotsm.io.wav import WavReader, WavWriter
from loguru import logger
from pytube import YouTube
from scipy.io import wavfile
def download_file(url):
name = YouTube(url).streams.first().download()
newname = name.replace(' ', '_')
os.rename(name, newname)
return newname
def get_max_volume(s):
maxv = float(np.max(s))
minv = float(np.min(s))
return max(maxv, -minv)
def copy_frame(input_frame, output_frame, temp_folder):
# src = TEMP_FOLDER + "/frame{:06d}".format(inputFrame + 1) + ".jpg"
frame_input: str = "frame{:06d}".format(input_frame + 1) + ".jpg"
src = os.path.join(temp_folder, frame_input)
# dst = TEMP_FOLDER + "/newFrame{:06d}".format(outputFrame + 1) + ".jpg"
frame_output: str = "newFrame{:06d}".format(output_frame + 1) + ".jpg"
dst = os.path.join(temp_folder, frame_output)
# if not os.path.isfile(src):
# return False
try:
copyfile(src, dst)
except FileNotFoundError:
raise FileNotFoundError("Special case for last frame in video!")
if output_frame % 20 == 19:
print(str(output_frame + 1) + " time-altered frames saved.")
def input_to_output_filename(filename):
dot_index = filename.rfind(".")
return filename[:dot_index] + "_ALTERED" + filename[dot_index:]
def create_path(s):
# assert (not os.path.exists(s)), "The filepath "+s+" already exists. Don't want to overwrite it. Aborting."
try:
logger.info("attempting to create dir with path: " + s)
os.mkdir(s)
except OSError:
assert False, "Creation of the directory %s failed. " \
"(The TEMP folder may already exist. Delete or rename it, and try again.)"
def delete_path(s):
try:
rmtree(s, ignore_errors=False)
except OSError:
print("Deletion of the directory %s failed" % s)
print(OSError)
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Modifies a video file to play at different speeds when there is sound vs. silence.')
parser.add_argument('--input_file', type=str, help='the video file you want modified')
parser.add_argument('--url', type=str, help='A youtube url to download and process')
parser.add_argument('--output_file', type=str, default="",
help="the output file. (optional. if not included, it'll just modify the input file name)")
parser.add_argument('--silent_threshold', type=float, default=0.03,
help="the volume amount that frames' audio needs to surpass to be consider \"sounded\". "
"It ranges from 0 (silence) to 1 (max volume)")
parser.add_argument('--sounded_speed', type=float, default=1.00,
help="the speed that sounded (spoken) frames should be played at. Typically 1.")
parser.add_argument('--silent_speed', type=float, default=5.00,
help="the speed that silent frames should be played at. 999999 for jumpcutting.")
parser.add_argument('--frame_margin', type=float, default=1,
help="some silent frames adjacent to sounded frames are included to provide context. "
"How many frames on either the side of speech should be included? That's this variable.")
parser.add_argument('--sample_rate', type=float, default=44100, help="sample rate of the input and output videos")
parser.add_argument('--frame_rate', type=float, default=30,
help="frame rate of the input and output videos. optional... "
"I try to find it out myself, but it doesn't always work.")
parser.add_argument('--frame_quality', type=int, default=3,
help="quality of frames to be extracted from input video. "
"1 is highest, 31 is lowest, 3 is the default.")
parser.add_argument('--tmp_working_dir', type=str, default="",
help="Please specify a directory where all generated files will be temporarily stored. "
"This may be be helpful considering the large storage space this script needs to run on.")
parser.add_argument('--folder_watcher_mode', type=bool, default=False,
help="Mark true if you want to run this script in watcher mode. "
"This mode will process all files in the specified --watched_dir directory.")
parser.add_argument('--watched_dir', type=str, help='The directory to listen for new files to process.')
parser.add_argument('--processed_output_dir', type=str, default="",
help="The processed files will be placed in this directory if specfied. "
"It's otherwise not possible to specify the output directory. "
"The default output location is the scripts location.")
return parser.parse_args()
def create_frames(input_file, frame_quality, temp_folder):
command = "ffmpeg -i " + input_file + " -qscale:v " + str(
frame_quality) + " \"" + temp_folder + "/frame%06d.jpg\" -hide_banner"
logger.info("Executing: " + command)
subprocess.call(command, shell=True)
def create_audio(input_file, sample_rate, temp_folder):
command = "ffmpeg -i " + input_file + " -ab 160k -ac 2 -ar " + str(
sample_rate) + " -vn \"" + temp_folder + "/audio.wav\""
logger.info("Executing: " + command)
subprocess.call(command, shell=True)
def set_input_file(arguments: argparse.Namespace) -> str:
if arguments.url is not None:
return download_file(arguments.url)
else:
return arguments.input_file
def set_output_file(arguments: argparse.Namespace) -> str:
if len(arguments.output_file) >= 1:
return arguments.output_file
elif arguments.folder_watcher_mode:
return ""
else:
return input_to_output_filename(arguments.input_file)
def create_params(temp_folder):
command = "ffmpeg -i " + temp_folder + "/input.mp4 2>&1"
file = open(temp_folder + "/params.txt", "w")
subprocess.call(command, shell=True, stdout=file)
def write_to_file(temp_folder, frame_rate, output_file):
"""
outputFrame = math.ceil(outputPointer/samplesPerFrame)
for endGap in range(outputFrame,audioFrameCount):
copyFrame(int(audioSampleCount/samplesPerFrame)-1,endGap)
"""
logger.info("Writing processed file to: " + output_file)
command_local = "ffmpeg -framerate " + str(
frame_rate) + " -i " + temp_folder + "/newFrame%06d.jpg -i " + temp_folder + "/audioNew.wav -strict -2 " \
+ output_file
subprocess.call(command_local, shell=True)
def create_jumpcutted_video(frame_rate, temp_folder: str, silent_threshold, frame_spreadage, sample_rate, new_speed,
audio_fade_envelope_size):
global output_audio_data
sample_rate, audio_data = wavfile.read(os.path.join(temp_folder, "audio.wav"))
audio_sample_count = audio_data.shape[0]
max_audio_volume = get_max_volume(audio_data)
f = open(temp_folder + "/params.txt", 'r+')
pre_params = f.read()
f.close()
params = pre_params.split('\n')
for line in params:
m = re.search('Stream #.*Video.* ([0-9]*) fps', line)
if m is not None:
frame_rate = float(m.group(1))
samples_per_frame = sample_rate / frame_rate
audio_frame_count = int(math.ceil(audio_sample_count / samples_per_frame))
has_loud_audio = np.zeros(audio_frame_count)
for i in range(audio_frame_count):
start = int(i * samples_per_frame)
end = min(int((i + 1) * samples_per_frame), audio_sample_count)
audiochunks = audio_data[start:end]
maxchunks_volume = float(get_max_volume(audiochunks)) / max_audio_volume
if maxchunks_volume >= silent_threshold:
has_loud_audio[i] = 1
chunks = [[0, 0, 0]]
should_include_frame = np.zeros(audio_frame_count)
for i in range(audio_frame_count):
start = int(max(0, i - frame_spreadage))
end = int(min(audio_frame_count, i + 1 + frame_spreadage))
should_include_frame[i] = np.max(has_loud_audio[start:end])
if i >= 1 and should_include_frame[i] != should_include_frame[i - 1]: # Did we flip?
chunks.append([chunks[-1][1], i, should_include_frame[i - 1]])
chunks.append([chunks[-1][1], audio_frame_count, should_include_frame[i - 1]])
chunks = chunks[1:]
output_audio_data = np.zeros((0, audio_data.shape[1]))
output_pointer = 0
last_existing_frame = None
for chunk in chunks:
audio_chunk = audio_data[int(chunk[0] * samples_per_frame):int(chunk[1] * samples_per_frame)]
s_file = temp_folder + "/tempStart.wav"
e_file = temp_folder + "/tempEnd.wav"
wavfile.write(s_file, sample_rate, audio_chunk)
with WavReader(s_file) as reader:
with WavWriter(e_file, reader.channels, reader.samplerate) as writer:
tsm = phasevocoder(reader.channels, speed=new_speed[int(chunk[2])])
tsm.run(reader, writer)
_, altered_audio_data = wavfile.read(e_file)
leng = altered_audio_data.shape[0]
end_pointer = output_pointer + leng
output_audio_data = np.concatenate((output_audio_data, altered_audio_data / max_audio_volume))
# outputAudioData[output_pointer:end_pointer] = altered_audio_data/max_audio_volume
# smooth out transitiion's audio by quickly fading in/out
if leng < audio_fade_envelope_size:
output_audio_data[output_pointer:end_pointer] = 0 # audio is less than 0.01 sec, let's just remove it.
else:
premask = np.arange(audio_fade_envelope_size) / audio_fade_envelope_size
mask = np.repeat(premask[:, np.newaxis], 2, axis=1) # make the fade-envelope mask stereo
output_audio_data[output_pointer:output_pointer + audio_fade_envelope_size] *= mask
output_audio_data[end_pointer - audio_fade_envelope_size:end_pointer] *= 1 - mask
start_output_frame = int(math.ceil(output_pointer / samples_per_frame))
end_output_frame = int(math.ceil(end_pointer / samples_per_frame))
for outputFrame in range(start_output_frame, end_output_frame):
input_frame = int(chunk[0] + new_speed[int(chunk[2])] * (outputFrame - start_output_frame))
try:
copy_frame(input_frame, outputFrame, temp_folder)
last_existing_frame = input_frame
except FileNotFoundError:
copy_frame(last_existing_frame, outputFrame, temp_folder)
output_pointer = end_pointer
def process_single_file(audio_fade_envelope_size, frame_quality, frame_rate, frame_spreadage, input_file, new_speed,
output_file, sample_rate, silent_threshold, temp_folder):
create_path(temp_folder)
create_frames(input_file, frame_quality, temp_folder)
create_audio(input_file, sample_rate, temp_folder)
create_params(temp_folder)
create_jumpcutted_video(frame_rate, temp_folder, silent_threshold, frame_spreadage, sample_rate, new_speed,
audio_fade_envelope_size)
wavfile.write(temp_folder + "/audioNew.wav", sample_rate, output_audio_data)
write_to_file(temp_folder, frame_rate, output_file)
delete_path(temp_folder)
def main():
args = parse_arguments()
frame_rate = args.frame_rate
sample_rate = args.sample_rate
silent_threshold = args.silent_threshold
frame_spreadage = args.frame_margin
new_speed = [args.silent_speed, args.sounded_speed]
watcher_mode: bool = args.folder_watcher_mode
watched_dir: str = args.watched_dir
tmp_working_dir: str = args.tmp_working_dir
processed_output_dir: str = args.processed_output_dir
input_file = set_input_file(args)
output_file = set_output_file(args)
url = args.url
frame_quality = args.frame_quality
temp_folder = os.path.join(tmp_working_dir, "tmp")
# smooth out transition's audio by quickly fading in/out (arbitrary magic number whatever)
audio_fade_envelope_size = 400
if not watcher_mode:
process_single_file(audio_fade_envelope_size, frame_quality, frame_rate, frame_spreadage, input_file, new_speed,
output_file, sample_rate, silent_threshold, temp_folder)
else:
logger.info("Started folder watcher mode on folder: " + watched_dir)
try:
while True:
for filename in os.listdir(watched_dir):
logger.info("Received file with name: " + filename)
filepath = watched_dir + "/" + filename
output_filename = input_to_output_filename(filename)
process_single_file(audio_fade_envelope_size, frame_quality, frame_rate, frame_spreadage, filepath,
new_speed, os.path.join(processed_output_dir, output_filename), sample_rate,
silent_threshold, temp_folder)
os.remove(filepath)
logger.info("Saved processed file as: " + output_filename)
sleep(2)
except KeyboardInterrupt:
exit(0)
main()
| 46.321918
| 120
| 0.662724
|
494317434b33db4f523d57b81be2d7964a681d93
| 17,837
|
py
|
Python
|
pytorch3d/ops/subdivide_meshes.py
|
hangg7/pytorcg3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | 1
|
2020-07-13T12:40:42.000Z
|
2020-07-13T12:40:42.000Z
|
pytorch3d/ops/subdivide_meshes.py
|
hangg7/pytorch3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/ops/subdivide_meshes.py
|
hangg7/pytorch3d
|
f7f363eeb8efeba0927f674c83ab927ad8ce3e32
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
import torch.nn as nn
from pytorch3d.structures import Meshes
class SubdivideMeshes(nn.Module):
"""
Subdivide a triangle mesh by adding a new vertex at the center of each edge
and dividing each face into four new faces. Vectors of vertex
attributes can also be subdivided by averaging the values of the attributes
at the two vertices which form each edge. This implementation
preserves face orientation - if the vertices of a face are all ordered
counter-clockwise, then the faces in the subdivided meshes will also have
their vertices ordered counter-clockwise.
If meshes is provided as an input, the initializer performs the relatively
expensive computation of determining the new face indices. This one-time
computation can be reused for all meshes with the same face topology
but different vertex positions.
"""
def __init__(self, meshes=None):
"""
Args:
meshes: Meshes object or None. If a meshes object is provided,
the first mesh is used to compute the new faces of the
subdivided topology which can be reused for meshes with
the same input topology.
"""
super(SubdivideMeshes, self).__init__()
self.precomputed = False
self._N = -1
if meshes is not None:
# This computation is on indices, so gradients do not need to be
# tracked.
mesh = meshes[0]
with torch.no_grad():
subdivided_faces = self.subdivide_faces(mesh)
if subdivided_faces.shape[1] != 3:
raise ValueError('faces can only have three vertices')
self.register_buffer('_subdivided_faces', subdivided_faces)
self.precomputed = True
def subdivide_faces(self, meshes):
r"""
Args:
meshes: a Meshes object.
Returns:
subdivided_faces_packed: (4*sum(F_n), 3) shape LongTensor of
original and new faces.
Refer to pytorch3d.structures.meshes.py for more details on packed
representations of faces.
Each face is split into 4 faces e.g. Input face
::
v0
/\
/ \
/ \
e1 / \ e0
/ \
/ \
/ \
/______________\
v2 e2 v1
faces_packed = [[0, 1, 2]]
faces_packed_to_edges_packed = [[2, 1, 0]]
`faces_packed_to_edges_packed` is used to represent all the new
vertex indices corresponding to the mid-points of edges in the mesh.
The actual vertex coordinates will be computed in the forward function.
To get the indices of the new vertices, offset
`faces_packed_to_edges_packed` by the total number of vertices.
::
faces_packed_to_edges_packed = [[2, 1, 0]] + 3 = [[5, 4, 3]]
e.g. subdivided face
::
v0
/\
/ \
/ f0 \
v4 /______\ v3
/\ /\
/ \ f3 / \
/ f2 \ / f1 \
/______\/______\
v2 v5 v1
f0 = [0, 3, 4]
f1 = [1, 5, 3]
f2 = [2, 4, 5]
f3 = [5, 4, 3]
"""
verts_packed = meshes.verts_packed()
with torch.no_grad():
faces_packed = meshes.faces_packed()
faces_packed_to_edges_packed = meshes.faces_packed_to_edges_packed()
faces_packed_to_edges_packed += verts_packed.shape[0]
f0 = torch.stack(
[
faces_packed[:, 0],
faces_packed_to_edges_packed[:, 2],
faces_packed_to_edges_packed[:, 1],
],
dim=1,
)
f1 = torch.stack(
[
faces_packed[:, 1],
faces_packed_to_edges_packed[:, 0],
faces_packed_to_edges_packed[:, 2],
],
dim=1,
)
f2 = torch.stack(
[
faces_packed[:, 2],
faces_packed_to_edges_packed[:, 1],
faces_packed_to_edges_packed[:, 0],
],
dim=1,
)
f3 = faces_packed_to_edges_packed
subdivided_faces_packed = torch.cat(
[f0, f1, f2, f3], dim=0
) # (4*sum(F_n), 3)
return subdivided_faces_packed
def forward(self, meshes, feats=None):
"""
Subdivide a batch of meshes by adding a new vertex on each edge, and
dividing each face into four new faces. New meshes contains two types
of vertices:
1) Vertices that appear in the input meshes.
Data for these vertices are copied from the input meshes.
2) New vertices at the midpoint of each edge.
Data for these vertices is the average of the data for the two
vertices that make up the edge.
Args:
meshes: Meshes object representing a batch of meshes.
feats: Per-vertex features to be subdivided along with the verts.
Should be parallel to the packed vert representation of the
input meshes; so it should have shape (V, D) where V is the
total number of verts in the input meshes. Default: None.
Returns:
2-element tuple containing
- **new_meshes**: Meshes object of a batch of subdivided meshes.
- **new_feats**: (optional) Tensor of subdivided feats, parallel to the
(packed) vertices of the subdivided meshes. Only returned
if feats is not None.
"""
self._N = len(meshes)
if self.precomputed:
return self.subdivide_homogeneous(meshes, feats)
else:
return self.subdivide_heterogenerous(meshes, feats)
def subdivide_homogeneous(self, meshes, feats=None):
"""
Subdivide verts (and optionally features) of a batch of meshes
where each mesh has the same topology of faces. The subdivided faces
are precomputed in the initializer.
Args:
meshes: Meshes object representing a batch of meshes.
feats: Per-vertex features to be subdivided along with the verts.
Returns:
2-element tuple containing
- **new_meshes**: Meshes object of a batch of subdivided meshes.
- **new_feats**: (optional) Tensor of subdivided feats, parallel to the
(packed) vertices of the subdivided meshes. Only returned
if feats is not None.
"""
verts = meshes.verts_padded() # (N, V, D)
edges = meshes[0].edges_packed()
# The set of faces is the same across the different meshes.
new_faces = self._subdivided_faces.view(1, -1, 3).expand(
self._N, -1, -1
)
# Add one new vertex at the midpoint of each edge by taking the average
# of the vertices that form each edge.
new_verts = verts[:, edges].mean(dim=2)
new_verts = torch.cat(
[verts, new_verts], dim=1
) # (sum(V_n)+sum(E_n), 3)
new_feats = None
# Calculate features for new vertices.
if feats is not None:
if feats.dim() == 2:
# feats is in packed format, transform it from packed to
# padded, i.e. (N*V, D) to (N, V, D).
feats = feats.view(verts.size(0), verts.size(1), feats.size(1))
if feats.dim() != 3:
raise ValueError(
'features need to be of shape (N, V, D) or (N*V, D)'
)
# Take average of the features at the vertices that form each edge.
new_feats = feats[:, edges].mean(dim=2)
new_feats = torch.cat(
[feats, new_feats], dim=1
) # (sum(V_n)+sum(E_n), 3)
new_meshes = Meshes(verts=new_verts, faces=new_faces)
if feats is None:
return new_meshes
else:
return new_meshes, new_feats
def subdivide_heterogenerous(self, meshes, feats=None):
"""
Subdivide faces, verts (and optionally features) of a batch of meshes
where each mesh can have different face topologies.
Args:
meshes: Meshes object representing a batch of meshes.
feats: Per-vertex features to be subdivided along with the verts.
Returns:
2-element tuple containing
- **new_meshes**: Meshes object of a batch of subdivided meshes.
- **new_feats**: (optional) Tensor of subdivided feats, parallel to the
(packed) vertices of the subdivided meshes. Only returned
if feats is not None.
"""
# The computation of new faces is on face indices, so gradients do not
# need to be tracked.
verts = meshes.verts_packed()
with torch.no_grad():
new_faces = self.subdivide_faces(meshes)
edges = meshes.edges_packed()
face_to_mesh_idx = meshes.faces_packed_to_mesh_idx()
edge_to_mesh_idx = meshes.edges_packed_to_mesh_idx()
num_edges_per_mesh = edge_to_mesh_idx.bincount(minlength=self._N)
num_verts_per_mesh = meshes.num_verts_per_mesh()
num_faces_per_mesh = meshes.num_faces_per_mesh()
# Add one new vertex at the midpoint of each edge.
new_verts_per_mesh = num_verts_per_mesh + num_edges_per_mesh # (N,)
new_face_to_mesh_idx = torch.cat([face_to_mesh_idx] * 4, dim=0)
# Calculate the indices needed to group the new and existing verts
# for each mesh.
verts_sort_idx = create_verts_index(
num_verts_per_mesh, num_edges_per_mesh, meshes.device
) # (sum(V_n)+sum(E_n),)
verts_ordered_idx_init = torch.zeros(
new_verts_per_mesh.sum(),
dtype=torch.int64,
device=meshes.device,
) # (sum(V_n)+sum(E_n),)
# Reassign vertex indices so that existing and new vertices for each
# mesh are sequential.
verts_ordered_idx = verts_ordered_idx_init.scatter_add(
0,
verts_sort_idx,
torch.arange(new_verts_per_mesh.sum(), device=meshes.device),
)
# Retrieve vertex indices for each face.
new_faces = verts_ordered_idx[new_faces]
# Calculate the indices needed to group the existing and new faces
# for each mesh.
face_sort_idx = create_faces_index(
num_faces_per_mesh, device=meshes.device
)
# Reorder the faces to sequentially group existing and new faces
# for each mesh.
new_faces = new_faces[face_sort_idx]
new_face_to_mesh_idx = new_face_to_mesh_idx[face_sort_idx]
new_faces_per_mesh = new_face_to_mesh_idx.bincount(
minlength=self._N
) # (sum(F_n)*4)
# Add one new vertex at the midpoint of each edge by taking the average
# of the verts that form each edge.
new_verts = verts[edges].mean(dim=1)
new_verts = torch.cat([verts, new_verts], dim=0)
# Reorder the verts to sequentially group existing and new verts for
# each mesh.
new_verts = new_verts[verts_sort_idx]
if feats is not None:
new_feats = feats[edges].mean(dim=1)
new_feats = torch.cat([feats, new_feats], dim=0)
new_feats = new_feats[verts_sort_idx]
verts_list = list(new_verts.split(new_verts_per_mesh.tolist(), 0))
faces_list = list(new_faces.split(new_faces_per_mesh.tolist(), 0))
new_verts_per_mesh_cumsum = torch.cat(
[
new_verts_per_mesh.new_full(size=(1,), fill_value=0.0),
new_verts_per_mesh.cumsum(0)[:-1],
],
dim=0,
)
faces_list = [
faces_list[n] - new_verts_per_mesh_cumsum[n] for n in range(self._N)
]
if feats is not None:
feats_list = new_feats.split(new_verts_per_mesh.tolist(), 0)
new_meshes = Meshes(verts=verts_list, faces=faces_list)
if feats is None:
return new_meshes
else:
new_feats = torch.cat(feats_list, dim=0)
return new_meshes, new_feats
def create_verts_index(verts_per_mesh, edges_per_mesh, device=None):
"""
Helper function to group the vertex indices for each mesh. New vertices are
stacked at the end of the original verts tensor, so in order to have
sequential packing, the verts tensor needs to be reordered so that the
vertices corresponding to each mesh are grouped together.
Args:
verts_per_mesh: Tensor of shape (N,) giving the number of vertices
in each mesh in the batch where N is the batch size.
edges_per_mesh: Tensor of shape (N,) giving the number of edges
in each mesh in the batch
Returns:
verts_idx: A tensor with vert indices for each mesh ordered sequentially
by mesh index.
"""
# e.g. verts_per_mesh = (4, 5, 6)
# e.g. edges_per_mesh = (5, 7, 9)
V = verts_per_mesh.sum() # e.g. 15
E = edges_per_mesh.sum() # e.g. 21
verts_per_mesh_cumsum = verts_per_mesh.cumsum(dim=0) # (N,) e.g. (4, 9, 15)
edges_per_mesh_cumsum = edges_per_mesh.cumsum(
dim=0
) # (N,) e.g. (5, 12, 21)
v_to_e_idx = verts_per_mesh_cumsum.clone()
# vertex to edge index.
v_to_e_idx[1:] += edges_per_mesh_cumsum[
:-1
] # e.g. (4, 9, 15) + (0, 5, 12) = (4, 14, 27)
# vertex to edge offset.
v_to_e_offset = (
V - verts_per_mesh_cumsum
) # e.g. 15 - (4, 9, 15) = (11, 6, 0)
v_to_e_offset[1:] += edges_per_mesh_cumsum[
:-1
] # e.g. (11, 6, 0) + (0, 5, 12) = (11, 11, 12)
e_to_v_idx = (
verts_per_mesh_cumsum[:-1] + edges_per_mesh_cumsum[:-1]
) # (4, 9) + (5, 12) = (9, 21)
e_to_v_offset = (
verts_per_mesh_cumsum[:-1] - edges_per_mesh_cumsum[:-1] - V
) # (4, 9) - (5, 12) - 15 = (-16, -18)
# Add one new vertex per edge.
idx_diffs = torch.ones(V + E, device=device, dtype=torch.int64) # (36,)
idx_diffs[v_to_e_idx] += v_to_e_offset
idx_diffs[e_to_v_idx] += e_to_v_offset
# e.g.
# [
# 1, 1, 1, 1, 12, 1, 1, 1, 1,
# -15, 1, 1, 1, 1, 12, 1, 1, 1, 1, 1, 1,
# -17, 1, 1, 1, 1, 1, 13, 1, 1, 1, 1, 1, 1, 1
# ]
verts_idx = idx_diffs.cumsum(dim=0) - 1
# e.g.
# [
# 0, 1, 2, 3, 15, 16, 17, 18, 19, --> mesh 0
# 4, 5, 6, 7, 8, 20, 21, 22, 23, 24, 25, 26, --> mesh 1
# 9, 10, 11, 12, 13, 14, 27, 28, 29, 30, 31, 32, 33, 34, 35 --> mesh 2
# ]
# where for mesh 0, [0, 1, 2, 3] are the indices of the existing verts, and
# [15, 16, 17, 18, 19] are the indices of the new verts after subdivision.
return verts_idx
def create_faces_index(faces_per_mesh, device=None):
"""
Helper function to group the faces indices for each mesh. New faces are
stacked at the end of the original faces tensor, so in order to have
sequential packing, the faces tensor needs to be reordered to that faces
corresponding to each mesh are grouped together.
Args:
faces_per_mesh: Tensor of shape (N,) giving the number of faces
in each mesh in the batch where N is the batch size.
Returns:
faces_idx: A tensor with face indices for each mesh ordered sequentially
by mesh index.
"""
# e.g. faces_per_mesh = [2, 5, 3]
F = faces_per_mesh.sum() # e.g. 10
faces_per_mesh_cumsum = faces_per_mesh.cumsum(dim=0) # (N,) e.g. (2, 7, 10)
switch1_idx = faces_per_mesh_cumsum.clone()
switch1_idx[1:] += (
3 * faces_per_mesh_cumsum[:-1]
) # e.g. (2, 7, 10) + (0, 6, 21) = (2, 13, 31)
switch2_idx = 2 * faces_per_mesh_cumsum # e.g. (4, 14, 20)
switch2_idx[1:] += (
2 * faces_per_mesh_cumsum[:-1]
) # e.g. (4, 14, 20) + (0, 4, 14) = (4, 18, 34)
switch3_idx = 3 * faces_per_mesh_cumsum # e.g. (6, 21, 30)
switch3_idx[1:] += faces_per_mesh_cumsum[
:-1
] # e.g. (6, 21, 30) + (0, 2, 7) = (6, 23, 37)
switch4_idx = 4 * faces_per_mesh_cumsum[:-1] # e.g. (8, 28)
switch123_offset = F - faces_per_mesh # e.g. (8, 5, 7)
idx_diffs = torch.ones(4 * F, device=device, dtype=torch.int64)
idx_diffs[switch1_idx] += switch123_offset
idx_diffs[switch2_idx] += switch123_offset
idx_diffs[switch3_idx] += switch123_offset
idx_diffs[switch4_idx] -= 3 * F
# e.g
# [
# 1, 1, 9, 1, 9, 1, 9, 1, -> mesh 0
# -29, 1, 1, 1, 1, 6, 1, 1, 1, 1, 6, 1, 1, 1, 1, 6, 1, 1, 1, 1, -> mesh 1
# -29, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 -> mesh 2
# ]
faces_idx = idx_diffs.cumsum(dim=0) - 1
# e.g.
# [
# 0, 1, 10, 11, 20, 21, 30, 31,
# 2, 3, 4, 5, 6, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 32, 33, 34, 35, 36,
# 7, 8, 9, 17, 18, 19, 27, 28, 29, 37, 38, 39
# ]
# where for mesh 0, [0, 1] are the indices of the existing faces, and
# [10, 11, 20, 21, 30, 31] are the indices of the new faces after subdivision.
return faces_idx
| 37.3159
| 83
| 0.565734
|
6b8e21d87a95ad4cf3c8e97460c91ed05e1e383e
| 365
|
py
|
Python
|
library/library/migrations/0005_auto_20220505_1020.py
|
buckldav/merit-api
|
8a3851a9703f5a57549fd858c3e94d136083a15a
|
[
"MIT"
] | null | null | null |
library/library/migrations/0005_auto_20220505_1020.py
|
buckldav/merit-api
|
8a3851a9703f5a57549fd858c3e94d136083a15a
|
[
"MIT"
] | 1
|
2021-11-23T17:56:07.000Z
|
2021-11-23T17:56:07.000Z
|
library/library/migrations/0005_auto_20220505_1020.py
|
buckldav/merit-api
|
8a3851a9703f5a57549fd858c3e94d136083a15a
|
[
"MIT"
] | 2
|
2022-03-03T15:43:50.000Z
|
2022-03-31T15:08:29.000Z
|
# Generated by Django 3.1.13 on 2022-05-05 16:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0004_checkout_checkin_time'),
]
operations = [
migrations.RenameField(
model_name='checkout',
old_name='book',
new_name='isbn',
),
]
| 19.210526
| 50
| 0.586301
|
f61c8e02e89b10dc8003c8403fe81b340de7a758
| 246
|
py
|
Python
|
a3a_logster/utility/meta.py
|
Giddius/A3A_Logster_repo
|
d2417c783e6c5e5e0b07504b3d3d7ce5534673e6
|
[
"MIT"
] | null | null | null |
a3a_logster/utility/meta.py
|
Giddius/A3A_Logster_repo
|
d2417c783e6c5e5e0b07504b3d3d7ce5534673e6
|
[
"MIT"
] | null | null | null |
a3a_logster/utility/meta.py
|
Giddius/A3A_Logster_repo
|
d2417c783e6c5e5e0b07504b3d3d7ce5534673e6
|
[
"MIT"
] | null | null | null |
class SingletonMeta(type):
_instance = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instance:
cls._instance[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)
return cls._instance[cls]
| 22.363636
| 84
| 0.617886
|
29f64523710c7beea13b964cf82d8712ad9586c7
| 6,854
|
py
|
Python
|
teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/api/ci/views/ci_task_flow_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 6
|
2018-11-26T08:42:52.000Z
|
2020-06-01T08:33:48.000Z
|
teamcat_service/doraemon/doraemon/api/ci/views/ci_task_flow_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | null | null | null |
teamcat_service/doraemon/doraemon/api/ci/views/ci_task_flow_view.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 1
|
2019-01-22T06:45:36.000Z
|
2019-01-22T06:45:36.000Z
|
#coding=utf-8
# coding=utf-8
'''
Created on 2014-1-5
@author: ETHAN
'''
from rest_framework import generics,status
from doraemon.api.ci.serializer import ci_taskflow_serializer
from rest_framework.permissions import AllowAny
from doraemon.ci.models import CITaskFlow,CITaskFlowSection
from doraemon.api.ci.viewmodel.api_ci_taskflow import ApiCITaskFlow
from rest_framework.response import Response
from business.ci.ci_taskflow_service import CITaskFlowService
from business.ci.ci_taskflow_section_service import CITaskFlowSectionService
from doraemon.api.ci.filters.ci_pagination import CIPagination
from doraemon.api.ci.filters.ci_taskflow_filter import CITaskFlowFilterSet
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from doraemon.api.project.views.CsrfExemptSessionAuthentication import CsrfExemptSessionAuthentication
from gatesidelib.common.simplelogger import SimpleLogger
class CITaskFlowView(generics.RetrieveUpdateDestroyAPIView):
"""
path:/api/ci/task_flow/<id>/
id:taskid
"""
serializer_class = ci_taskflow_serializer.CITaskFlowSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get_object(self):
taskflow_id = int(self.kwargs['id'])
print(taskflow_id)
task_flow=CITaskFlow.objects.get(taskflow_id)
temp=ApiCITaskFlow(task_flow)
task_flow.Sections=temp.Sections
return task_flow
def delete(self,request, *args, **kwargs):
flow_id =int(kwargs['id'])
task_flow=CITaskFlow.objects.get(flow_id)
result = '任务流 ['+ task_flow.FlowName +'] 删除失败,请联系管理员或者重试!'
try:
result = CITaskFlowService.delete_taskflow(request.user,flow_id)
except Exception as ex:
SimpleLogger.exception(ex)
return Response({'message': result})
class CITaskFlowOperationView(generics.RetrieveAPIView):
"""
path:/api/ci/task_flow/<id>/<operation>
id:taskflow id
operation:start,copy
"""
serializer_class = ci_taskflow_serializer.CITaskFlowSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get_object(self):
flow_id = int(self.kwargs['id'])
task_flow=CITaskFlow.objects.get(flow_id)
return task_flow
def get(self,request, *args, **kwargs):
flow_id =int(kwargs['id'])
task_flow=CITaskFlow.objects.get(flow_id)
operation = kwargs['operation'].strip()
result = '任务流 ['+ task_flow.FlowName +'] 执行指令下发失败,请联系管理员或者重试!'
try:
if operation == 'start':
result = CITaskFlowService.start_taskflow(request,flow_id)
if operation == 'copy':
result = CITaskFlowService.copy_taskflow(request.user,flow_id)
except Exception as ex:
SimpleLogger.exception(ex)
return Response({'message': result})
class CITaskFlowListView(generics.ListCreateAPIView):
"""
/api/ci/task_flow/list
get all ci taskflow and create new ci task
FilterSet: id, Project
FilterOperation:=,__in,__gt,__contains,__icontains,Range__in,__lt,!=,__isnull
"""
serializer_class = ci_taskflow_serializer.CITaskFlowListSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
pagination_class = CIPagination
def get_queryset(self):
qs = CITaskFlow.objects.all().filter(IsActive=1)
return CITaskFlowFilterSet(data=self.request.GET, queryset=qs).filter()
def create(self, request, *args, **kwargs):
task_flow = CITaskFlowService.create_taskflow(request.data,request.user)
serializer = ci_taskflow_serializer.CITaskFlowListSerializer(instance=task_flow,data = request.data)
serializer.is_valid(raise_exception=True)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class CITaskFlowMyListView(generics.ListAPIView):
"""
/api/ci/task_flow/my get all my ci taskflow FilterSet:id,Project FilterOperation:=,__in,__gt,__contains,__icontains,Range__in,__lt,!=
"""
serializer_class = ci_taskflow_serializer.CITaskFlowListSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
pagination_class = CIPagination
def get_queryset(self):
qs = CITaskFlowService.get_my_taskflows(self.request,'all')
return CITaskFlowFilterSet(data=self.request.GET, queryset=qs).filter()
class CITaskFlowSectionView(generics.RetrieveUpdateDestroyAPIView):
"""
/api/ci/task_flow/section/id
get,update,delete section with section id
"""
serializer_class = ci_taskflow_serializer.CITaskFlowSectionSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
pagination_class = CIPagination
def get_object(self):
section_id = int(self.kwargs.get('id', 0))
section = CITaskFlowSection.objects.get(section_id)
return section
class CIFlowSectionOperationView(generics.RetrieveAPIView):
"""
path:/api/ci/flow_section/<id>/<operation>
id:section id
operation:start
"""
serializer_class = ci_taskflow_serializer.CITaskFlowSectionSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
pagination_class = CIPagination
def get_object(self):
section_id = int(self.kwargs.get('id', 0))
section = CITaskFlowSection.objects.get(section_id)
return section
def get(self,request, *args, **kwargs):
section = self.get_object()
operation = kwargs['operation'].strip()
result = '任务流阶段 ['+ section.SectionName +'] 执行指令下发失败,请联系管理员或者重试!'
try:
if operation == 'start':
result = CITaskFlowSectionService.start_flowsection(request,section.id)
except Exception as ex:
SimpleLogger.exception(ex)
return Response({'message': result})
class CITaskFlowSectionListView(generics.ListCreateAPIView):
"""
/api/ci/task_flow_section/id
get,update,delete section with section id
"""
serializer_class = ci_taskflow_serializer.CITaskFlowSectionSerializer
permission_classes=[AllowAny]
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
pagination_class = CIPagination
def get_queryset(self):
flow_id = int(self.kwargs.get('flow_id', 0))
sections = CITaskFlowSection.objects.flow_sections(flow_id).order_by('SectionOrder')
return sections
| 36.073684
| 137
| 0.730522
|
aa51c8350ed9222859efd6ec91a2b5aed98658c3
| 3,334
|
py
|
Python
|
create_segm_labels.py
|
Fkaneko/kaggle-hpa-single-cell-image-classification
|
52000cbf5c7eec6ace29274d9e85b5b24fac281b
|
[
"MIT"
] | 1
|
2022-01-12T08:44:55.000Z
|
2022-01-12T08:44:55.000Z
|
create_segm_labels.py
|
Fkaneko/kaggle-hpa-single-cell-image-classification
|
52000cbf5c7eec6ace29274d9e85b5b24fac281b
|
[
"MIT"
] | null | null | null |
create_segm_labels.py
|
Fkaneko/kaggle-hpa-single-cell-image-classification
|
52000cbf5c7eec6ace29274d9e85b5b24fac281b
|
[
"MIT"
] | null | null | null |
import argparse
import copy
import glob
import os
from typing import Tuple
import numpy as np
import pandas as pd
import pycocotools.mask as coco_mask
from tqdm import tqdm
from run_cam_infer import decode_ascii_mask
from src.dataset.utils import ErrorSample, save_segm_label
from src.utils.util import print_argparse_arguments
def convert_to_train_format(pred_df: pd.DataFrame) -> Tuple[dict, list]:
samples = {}
error_samples = []
for i, row_ in enumerate(tqdm(pred_df.itertuples(), total=len(pred_df))):
try:
pred = row_.PredictionString.split(" ")
except AttributeError as e:
print(i, row_.ID, e)
error_samples.append(ErrorSample(ID=row_.ID, csv_idx=i))
pass
w_size, h_size = row_.ImageWidth, row_.ImageHeight
input_id = row_.ID
class_ids = np.array(pred[0::3], dtype=np.int32)
confs = np.array(pred[1::3], dtype=np.float32)
rle_asciis = pred[2::3]
last_ascii = ""
rles: list = []
rles_idxs = []
rles_idx = -1
for ins_id, rle_ascii in enumerate(rle_asciis):
if last_ascii == rle_ascii:
rles.append(rles[-1].copy())
rles_idxs.append(rles_idx)
continue
else:
rles_idx += 1
rles_idxs.append(rles_idx)
last_ascii = copy.deepcopy(rle_ascii)
mask_dict = decode_ascii_mask(rle_ascii, w_size, h_size)
rles.append(mask_dict["rle"])
bboxes = coco_mask.toBbox(rles)
bboxes[:, 2] += bboxes[:, 0]
bboxes[:, 3] += bboxes[:, 1]
input_sample = {
"filename": input_id,
"width": w_size,
"height": h_size,
"ann": {
"bboxes": np.array(bboxes, dtype=np.float32),
"labels": class_ids,
"confs": confs,
"masks": rles,
"mask_idxs": rles_idxs,
},
}
samples[input_id] = input_sample
return samples, error_samples
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--save_folder",
default="./save_uni/version_15/",
type=str,
help="the path to save the sub-category label",
)
parser.add_argument(
"--sub_csv",
type=str,
default="./save_uni/version_15/submission_full_v15_0407.csv",
help="submission style csv, or a directory which contains csvs",
)
args = parser.parse_args()
print_argparse_arguments(args)
if os.path.isdir(args.sub_csv):
sub_csvs = glob.glob(os.path.join(args.sub_csv, "*.csv"))
dfs = []
for path_ in sub_csvs:
dfs.append(pd.read_csv(path_))
print(f"load: {path_} \t len: {len(dfs[-1])}")
pred_df = pd.concat(dfs, axis=0)
print("csvs are merged:", len(pred_df))
assert pred_df.duplicated(["ID"]).sum() == 0
elif os.path.isfile(args.sub_csv):
pred_df = pd.read_csv(args.sub_csv)
else:
raise NotImplementedError
print(pred_df.head())
samples, error_samples = convert_to_train_format(pred_df)
save_segm_label(
samples=samples, error_samples=error_samples, save_folder=args.save_folder
)
| 30.309091
| 82
| 0.589682
|
83012fd0373aa81ea02c81d79acdd2328f7fc856
| 16,325
|
py
|
Python
|
napari/components/viewer_model.py
|
zeroth/napari
|
c8d755b13716d1a60003e88f9d75dd6af8a346f9
|
[
"BSD-3-Clause"
] | null | null | null |
napari/components/viewer_model.py
|
zeroth/napari
|
c8d755b13716d1a60003e88f9d75dd6af8a346f9
|
[
"BSD-3-Clause"
] | null | null | null |
napari/components/viewer_model.py
|
zeroth/napari
|
c8d755b13716d1a60003e88f9d75dd6af8a346f9
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from ..utils.events import EmitterGroup, Event
from ..utils.key_bindings import KeymapHandler, KeymapProvider
from ..utils.theme import palettes
from ._viewer_mouse_bindings import dims_scroll
from .add_layers_mixin import AddLayersMixin
from .dims import Dims
from .layerlist import LayerList
class ViewerModel(AddLayersMixin, KeymapHandler, KeymapProvider):
"""Viewer containing the rendered scene, layers, and controlling elements
including dimension sliders, and control bars for color limits.
Parameters
----------
title : string
The title of the viewer window.
ndisplay : {2, 3}
Number of displayed dimensions.
order : tuple of int
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
axis_labels = list of str
Dimension names.
Attributes
----------
window : Window
Parent window.
layers : LayerList
List of contained layers.
dims : Dimensions
Contains axes, indices, dimensions and sliders.
themes : dict of str: dict of str: str
Preset color palettes.
"""
themes = palettes
def __init__(
self, title='napari', ndisplay=2, order=None, axis_labels=None
):
super().__init__()
self.events = EmitterGroup(
source=self,
auto_connect=True,
status=Event,
help=Event,
title=Event,
interactive=Event,
cursor=Event,
reset_view=Event,
active_layer=Event,
palette=Event,
grid=Event,
layers_change=Event,
)
self.dims = Dims(
ndim=None, ndisplay=ndisplay, order=order, axis_labels=axis_labels
)
self.layers = LayerList()
self._status = 'Ready'
self._help = ''
self._title = title
self._cursor = 'standard'
self._cursor_size = None
self._interactive = True
self._active_layer = None
self._grid_size = (1, 1)
self.grid_stride = 1
self._palette = None
self.theme = 'dark'
self.dims.events.camera.connect(self.reset_view)
self.dims.events.ndisplay.connect(self._update_layers)
self.dims.events.order.connect(self._update_layers)
self.dims.events.axis.connect(self._update_layers)
self.layers.events.changed.connect(self._update_active_layer)
self.layers.events.changed.connect(self._update_grid)
self.layers.events.changed.connect(self._on_layers_change)
self.keymap_providers = [self]
# Hold callbacks for when mouse moves with nothing pressed
self.mouse_move_callbacks = []
# Hold callbacks for when mouse is pressed, dragged, and released
self.mouse_drag_callbacks = []
# Hold callbacks for when mouse wheel is scrolled
self.mouse_wheel_callbacks = [dims_scroll]
self._persisted_mouse_event = {}
self._mouse_drag_gen = {}
self._mouse_wheel_gen = {}
@property
def palette(self):
"""dict of str: str : Color palette with which to style the viewer.
"""
return self._palette
@palette.setter
def palette(self, palette):
if palette == self.palette:
return
self._palette = palette
self.events.palette()
@property
def theme(self):
"""string or None : Preset color palette.
"""
for theme, palette in self.themes.items():
if palette == self.palette:
return theme
@theme.setter
def theme(self, theme):
if theme == self.theme:
return
try:
self.palette = self.themes[theme]
except KeyError:
raise ValueError(
f"Theme '{theme}' not found; "
f"options are {list(self.themes)}."
)
@property
def grid_size(self):
"""tuple: Size of grid
"""
return self._grid_size
@grid_size.setter
def grid_size(self, grid_size):
if np.all(self.grid_size == grid_size):
return
self._grid_size = grid_size
self.reset_view()
self.events.grid()
@property
def status(self):
"""string: Status string
"""
return self._status
@status.setter
def status(self, status):
if status == self.status:
return
self._status = status
self.events.status(text=self._status)
@property
def help(self):
"""string: String that can be displayed to the
user in the status bar with helpful usage tips.
"""
return self._help
@help.setter
def help(self, help):
if help == self.help:
return
self._help = help
self.events.help(text=self._help)
@property
def title(self):
"""string: String that is displayed in window title.
"""
return self._title
@title.setter
def title(self, title):
if title == self.title:
return
self._title = title
self.events.title(text=self._title)
@property
def interactive(self):
"""bool: Determines if canvas pan/zoom interactivity is enabled or not.
"""
return self._interactive
@interactive.setter
def interactive(self, interactive):
if interactive == self.interactive:
return
self._interactive = interactive
self.events.interactive()
@property
def cursor(self):
"""string: String identifying cursor displayed over canvas.
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
if cursor == self.cursor:
return
self._cursor = cursor
self.events.cursor()
@property
def cursor_size(self):
"""int | None: Size of cursor if custom. None is yields default size
"""
return self._cursor_size
@cursor_size.setter
def cursor_size(self, cursor_size):
if cursor_size == self.cursor_size:
return
self._cursor_size = cursor_size
self.events.cursor()
@property
def active_layer(self):
"""int: index of active_layer
"""
return self._active_layer
@active_layer.setter
def active_layer(self, active_layer):
if active_layer == self.active_layer:
return
if self._active_layer is not None:
self.keymap_providers.remove(self._active_layer)
self._active_layer = active_layer
if active_layer is not None:
self.keymap_providers.insert(0, active_layer)
self.events.active_layer(item=self._active_layer)
@property
def _sliced_extent_world(self) -> np.ndarray:
"""Extent of layers in world coordinates after slicing.
D is either 2 or 3 depending on if the displayed data is 2D or 3D.
Returns
-------
sliced_extent_world : array, shape (2, D)
"""
if len(self.layers) == 0 and self.dims.ndim != 2:
# If no data is present and dims model has not been reset to 0
# than someone has passed more than two axis labels which are
# being saved and so default values are used.
return np.vstack(
[np.zeros(self.dims.ndim), np.repeat(512, self.dims.ndim)]
)
else:
return self.layers._extent_world[:, self.dims.displayed]
def reset_view(self, event=None):
"""Resets the camera's view using `event.rect` a 4-tuple of the x, y
corner position followed by width and height of the camera
"""
extent = self._sliced_extent_world
scene_size = extent[1] - extent[0]
corner = extent[0]
grid_size = list(self.grid_size)
if len(scene_size) > len(grid_size):
grid_size = [1] * (len(scene_size) - len(grid_size)) + grid_size
size = np.multiply(scene_size, grid_size)
centroid = np.add(corner, np.divide(size, 2))
if self.dims.ndisplay == 2:
# For a PanZoomCamera emit a 4-tuple of the rect
corner = np.subtract(corner, np.multiply(0.05, size))[::-1]
size = np.multiply(1.1, size)[::-1]
rect = tuple(corner) + tuple(size)
self.events.reset_view(rect=rect)
else:
# For an ArcballCamera emit the center and scale_factor
center = centroid[::-1]
scale_factor = 1.1 * np.max(size[-2:])
# set initial camera angle so that it matches top layer of 2D view
# when transitioning to 3D view
quaternion = [np.pi / 2, 1, 0, 0]
self.events.reset_view(
center=center, scale_factor=scale_factor, quaternion=quaternion
)
def _new_labels(self):
"""Create new labels layer filling full world coordinates space."""
extent = self.layers._extent_world
scale = self.layers._step_size
scene_size = extent[1] - extent[0]
corner = extent[0]
shape = [
np.round(s / sc).astype('int') if s > 0 else 1
for s, sc in zip(scene_size, scale)
]
empty_labels = np.zeros(shape, dtype=int)
self.add_labels(empty_labels, translate=np.array(corner), scale=scale)
def _update_layers(self, event=None, layers=None):
"""Updates the contained layers.
Parameters
----------
layers : list of napari.layers.Layer, optional
List of layers to update. If none provided updates all.
"""
layers = layers or self.layers
for layer in layers:
# adjust the order of the global dims based on the number of
# dimensions that a layer has - for example a global order of
# [2, 1, 0, 3] -> [0, 1] for a layer that only has two dimensions
# or -> [1, 0, 2] for a layer with three as that corresponds to
# the relative order of the last two and three dimensions
# respectively
offset = self.dims.ndim - layer.dims.ndim
order = np.array(self.dims.order)
if offset <= 0:
order = list(range(-offset)) + list(order - offset)
else:
order = list(order[order >= offset] - offset)
layer.dims.order = order
layer.dims.ndisplay = self.dims.ndisplay
# Update the point values of the layers for the dimensions that
# the layer has
for axis in range(layer.dims.ndim):
point = self.dims.point[axis + offset]
layer.dims.set_point(axis, point)
def _toggle_theme(self):
"""Switch to next theme in list of themes
"""
theme_names = list(self.themes.keys())
cur_theme = theme_names.index(self.theme)
self.theme = theme_names[(cur_theme + 1) % len(theme_names)]
def _update_active_layer(self, event):
"""Set the active layer by iterating over the layers list and
finding the first selected layer. If multiple layers are selected the
iteration stops and the active layer is set to be None
Parameters
----------
event : Event
No Event parameters are used
"""
# iteration goes backwards to find top most selected layer if any
# if multiple layers are selected sets the active layer to None
active_layer = None
for layer in self.layers:
if active_layer is None and layer.selected:
active_layer = layer
elif active_layer is not None and layer.selected:
active_layer = None
break
if active_layer is None:
self.status = 'Ready'
self.help = ''
self.cursor = 'standard'
self.interactive = True
self.active_layer = None
else:
self.status = active_layer.status
self.help = active_layer.help
self.cursor = active_layer.cursor
self.interactive = active_layer.interactive
self.active_layer = active_layer
def _on_layers_change(self, event):
if len(self.layers) == 0:
self.dims.ndim = 2
self.dims.reset()
else:
extent = self.layers._extent_world
ss = self.layers._step_size
ndim = extent.shape[1]
self.dims.ndim = ndim
for i in range(ndim):
self.dims.set_range(i, (extent[0, i], extent[1, i], ss[i]))
self.events.layers_change()
def _update_status(self, event):
"""Set the viewer status with the `event.status` string."""
self.status = event.status
def _update_help(self, event):
"""Set the viewer help with the `event.help` string."""
self.help = event.help
def _update_interactive(self, event):
"""Set the viewer interactivity with the `event.interactive` bool."""
self.interactive = event.interactive
def _update_cursor(self, event):
"""Set the viewer cursor with the `event.cursor` string."""
self.cursor = event.cursor
def _update_cursor_size(self, event):
"""Set the viewer cursor_size with the `event.cursor_size` int."""
self.cursor_size = event.cursor_size
def grid_view(self, n_row=None, n_column=None, stride=1):
"""Arrange the current layers is a 2D grid.
Default behaviour is to make a square 2D grid.
Parameters
----------
n_row : int, optional
Number of rows in the grid.
n_column : int, optional
Number of column in the grid.
stride : int, optional
Number of layers to place in each grid square before moving on to
the next square. The default ordering is to place the most visible
layer in the top left corner of the grid. A negative stride will
cause the order in which the layers are placed in the grid to be
reversed.
"""
n_grid_squares = np.ceil(len(self.layers) / abs(stride)).astype(int)
if n_row is None and n_column is None:
n_row = np.ceil(np.sqrt(n_grid_squares)).astype(int)
n_column = n_row
elif n_row is None:
n_row = np.ceil(n_grid_squares / n_column).astype(int)
elif n_column is None:
n_column = np.ceil(n_grid_squares / n_row).astype(int)
n_row = max(1, n_row)
n_column = max(1, n_column)
self.grid_size = (n_row, n_column)
self.grid_stride = stride
for i, layer in enumerate(self.layers):
if stride > 0:
adj_i = len(self.layers) - i - 1
else:
adj_i = i
adj_i = adj_i // abs(stride)
adj_i = adj_i % (n_row * n_column)
i_row = adj_i // n_column
i_column = adj_i % n_column
self._subplot(layer, (i_row, i_column))
def stack_view(self):
"""Arrange the current layers is a stack.
"""
self.grid_view(n_row=1, n_column=1, stride=1)
def _update_grid(self, event=None):
"""Update grid with current grid values.
"""
self.grid_view(
n_row=self.grid_size[0],
n_column=self.grid_size[1],
stride=self.grid_stride,
)
def _subplot(self, layer, position):
"""Shift a layer to a specified position in a 2D grid.
Parameters
----------
layer : napari.layers.Layer
Layer that is to be moved.
position : 2-tuple of int
New position of layer in grid.
size : 2-tuple of int
Size of the grid that is being used.
"""
extent = self._sliced_extent_world
scene_size = extent[1] - extent[0]
translate_2d = np.multiply(scene_size[-2:], position)
translate = [0] * layer.ndim
translate[-2:] = translate_2d
layer.translate_grid = translate
| 32.913306
| 79
| 0.588668
|
1068297e50830302ed944f8bd43a501591bf5d9d
| 3,942
|
py
|
Python
|
testproject/testproject/app/tests/test_connection.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testproject/app/tests/test_connection.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testproject/app/tests/test_connection.py
|
maria-grigorieva/django_cassandra_engine
|
70918eeb6edd26c50a394a1ddcf6521b92ec429a
|
[
"BSD-2-Clause"
] | null | null | null |
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from mock import patch
from django.test import TestCase
from django_cassandra_engine.connection import CassandraConnection, Cursor
from django_cassandra_engine.utils import get_cassandra_connection
class CassandraConnectionTestCase(TestCase):
def setUp(self):
self.cassandra_connection = get_cassandra_connection()
self.connection = CassandraConnection(
**self.cassandra_connection.settings_dict)
def test_cursor(self):
self.assertIsInstance(self.connection.cursor(), Cursor)
self.assertEqual(self.connection.cursor().connection, self.connection)
def test_connected_to_db(self):
from cqlengine import connection as cql_connection
self.assertIsInstance(cql_connection.cluster, Cluster)
self.assertIsNotNone(cql_connection.session)
def test_session_property(self):
from cqlengine import connection as cql_connection
self.assertEqual(self.connection.session, cql_connection.session)
def test_cluster_property(self):
from cqlengine import connection as cql_connection
self.assertEqual(self.connection.cluster, cql_connection.cluster)
def test_connection_options(self):
connection_options = \
self.cassandra_connection.settings_dict['OPTIONS']['connection']
self.assertEqual(
self.connection.connection_options, connection_options)
@patch("cqlengine.connection")
def test_connection_setup_called_first_time_with_proper_options(
self, connection_mock):
settings = self.cassandra_connection.settings_dict
connection_mock.cluster = None
connection = CassandraConnection(**settings)
connection_mock.setup.assert_called_once_with(
connection.hosts, connection.keyspace,
**settings['OPTIONS']['connection'])
@patch("django_cassandra_engine.connection.connection")
def test_connection_setup_called_second_time(
self, connection_mock):
settings = self.cassandra_connection.settings_dict
connection_mock.cluster = Cluster()
CassandraConnection(**settings)
self.assertFalse(connection_mock.setup.called)
def test_connection_auth_provider_added_to_connection_options(self):
settings = self.cassandra_connection.settings_dict
settings['USER'] = 'user'
settings['PASSWORD'] = 'pass'
connection = CassandraConnection(**settings)
self.assertIsInstance(connection.connection_options['auth_provider'],
PlainTextAuthProvider)
def test_connection_auth_provider_not_changed(self):
settings = self.cassandra_connection.settings_dict
settings['USER'] = 'user'
settings['PASSWORD'] = 'pass'
settings['OPTIONS']['connection'] = {}
settings['OPTIONS']['connection']['auth_provider'] = 'sth'
connection = CassandraConnection(**settings)
self.assertEqual(connection.connection_options['auth_provider'],
settings['OPTIONS']['connection']['auth_provider'])
def test_connection_session_options_default_timeout(self):
session_opts = \
self.cassandra_connection.settings_dict['OPTIONS']['session']
self.assertEqual(self.connection.session_options, session_opts)
self.assertEqual(self.connection.session.default_timeout,
session_opts.get('default_timeout'))
def test_raw_cql_cursor_queries(self):
cursor = self.connection.cursor()
self.assertEqual(
cursor.execute("SELECT count(*) from example_model")[0]['count'], 0
)
cursor.execute("INSERT INTO example_model (id) VALUES (1)")
self.assertEqual(
cursor.execute("SELECT count(*) from example_model")[0]['count'], 1
)
| 35.513514
| 79
| 0.703957
|
952b6f650a35a1769a46d9337b0752082101bf29
| 241
|
py
|
Python
|
debug/testmondo.py
|
merlinxcy/iptcFirewallOS
|
5427cd9104aefcef9071b17f54ca6574837932d6
|
[
"MIT"
] | null | null | null |
debug/testmondo.py
|
merlinxcy/iptcFirewallOS
|
5427cd9104aefcef9071b17f54ca6574837932d6
|
[
"MIT"
] | null | null | null |
debug/testmondo.py
|
merlinxcy/iptcFirewallOS
|
5427cd9104aefcef9071b17f54ca6574837932d6
|
[
"MIT"
] | null | null | null |
import mongolib
class a():
def aa(self):
a=mongolib.mongodb()
a.log_collect(msg='1gaejiusfuadaifuagusuifhiau afdu gaudf uisg uagsi gaug asyaigasydg aug iug ')
a.log_collect(msg='2')
a.log_input()
a.log_output()
aaaa=a()
aaaa.aa()
| 21.909091
| 98
| 0.721992
|
425c6c9d0129f41044428190f3b7cdda95e1d73d
| 82
|
py
|
Python
|
yc238/994-2.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc238/994-2.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc238/994-2.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
N, K = map(int, input().split())
if N >= K:
print(K - 1)
else:
print(-1)
| 11.714286
| 32
| 0.47561
|
dd6156d4fc4d4e17db1881fbb6b14aa4c4eec0a3
| 738
|
py
|
Python
|
setup.py
|
755/python_ndms2_client
|
b6b0c8642979faed8f6623359e4637b05134f838
|
[
"MIT"
] | null | null | null |
setup.py
|
755/python_ndms2_client
|
b6b0c8642979faed8f6623359e4637b05134f838
|
[
"MIT"
] | null | null | null |
setup.py
|
755/python_ndms2_client
|
b6b0c8642979faed8f6623359e4637b05134f838
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="ndms2_client",
version="0.0.8",
author="Andrey F. Kupreychik",
author_email="foxel@quickfox.ru",
description="Keenetic NDMS2 client",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/foxel/python_ndms2_client",
packages=setuptools.find_packages(exclude=['tests']),
classifiers=(
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 30.75
| 57
| 0.655827
|
40eb2babe15b5b4b6e0b25e28017501b2418f088
| 1,753
|
py
|
Python
|
PSET3/P1 & P2/matstri2.py
|
MonitSharma/Computational-Methods-in-Physics
|
e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85
|
[
"MIT"
] | null | null | null |
PSET3/P1 & P2/matstri2.py
|
MonitSharma/Computational-Methods-in-Physics
|
e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85
|
[
"MIT"
] | null | null | null |
PSET3/P1 & P2/matstri2.py
|
MonitSharma/Computational-Methods-in-Physics
|
e3b2db36c37dd5f64b9a37ba39e9bb267ba27d85
|
[
"MIT"
] | null | null | null |
# EqStringAnimateMat.py: Animated leapfrog solution Vibrating string using MatPlotLib
from numpy import *
import numpy as np, matplotlib.pyplot as plt, matplotlib.animation as animation
rho = 0.01; ten = 40.; c = sqrt(ten/rho) # density, tension
c1 = c; ratio = c*c/(c1*c1) # CFL criterium = 1
xi = np.zeros((101,3), float) # Declaration
k = range(0,101)
def Initialize(): # Initial conditions
for i in range(0, 81): xi[i, 0] = 0.00125*i
for i in range (81, 101): xi[i, 0] = 0.1 - 0.005*(i - 80) # second part of string
def animate(num):
for i in range(1, 100):
xi[i,2] = 2.*xi[i,1]-xi[i,0]+ratio*(xi[i+1,1]+xi[i-1,1]-2*xi[i,1])
line.set_data(k,xi[k,2]) # Data to plot ,x,y
for m in range (0,101):
xi[m, 0] = xi[m, 1] # Recycle array
xi[m, 1] = xi[m, 2]
return line
Initialize() # Plot initial string
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0, 101), ylim=(-0.15, 0.15))
ax.grid() # Plot grid
plt.title("Vibrating String")
line, = ax.plot(k, xi[k,0], lw=2)
for i in range(1,100):
xi[i,1] = xi[i,0] + 0.5*ratio*(xi[i+1,0] + xi[i-1,0] -2*xi[i,0])
ani = animation.FuncAnimation(fig, animate,1) # Dummy argument: 1
plt.show()
print("finished")
| 51.558824
| 102
| 0.436395
|
4f6c77703d1f233fb07892ff042aa9ff2a4365fd
| 418
|
py
|
Python
|
spacenet/schemas/events.py
|
space-logistics-org/spacenet
|
fd004437ed7b27dd6dc41a374e1dedfcea92e37d
|
[
"MIT"
] | 1
|
2022-02-17T18:01:41.000Z
|
2022-02-17T18:01:41.000Z
|
spacenet/schemas/events.py
|
space-logistics-org/spacenet
|
fd004437ed7b27dd6dc41a374e1dedfcea92e37d
|
[
"MIT"
] | 2
|
2021-06-19T19:41:15.000Z
|
2021-07-21T17:07:48.000Z
|
spacenet/schemas/events.py
|
space-logistics-org/spacenet
|
fd004437ed7b27dd6dc41a374e1dedfcea92e37d
|
[
"MIT"
] | 3
|
2021-06-16T16:31:12.000Z
|
2022-02-17T18:02:57.000Z
|
"""
A module re-exporting all events from one file
so that `from spacenet.schemas.events import *` is usable.
"""
from .bases import *
from .flight_transport import *
from .space_transport import *
from .surface_transport import *
from .propulsive_burn import *
from .element_events import *
from .transfer_resources import *
from .consume_resource import *
from .crewed_eva import *
from .crewed_exploration import *
| 27.866667
| 58
| 0.782297
|
3a44644bb3ea9fa59bfd5048376a7e03bc9281f1
| 389
|
py
|
Python
|
shorty/asgi.py
|
malakbenr/Shorty_URL-Shortner
|
1babc411acfc72e3d559a1a1b1aba3582d713c80
|
[
"MIT"
] | null | null | null |
shorty/asgi.py
|
malakbenr/Shorty_URL-Shortner
|
1babc411acfc72e3d559a1a1b1aba3582d713c80
|
[
"MIT"
] | null | null | null |
shorty/asgi.py
|
malakbenr/Shorty_URL-Shortner
|
1babc411acfc72e3d559a1a1b1aba3582d713c80
|
[
"MIT"
] | null | null | null |
"""
ASGI config for shorty project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shorty.settings')
application = get_asgi_application()
| 22.882353
| 78
| 0.784062
|
04a998e0af08a9aa1a96ea80490d4302de174e59
| 4,727
|
py
|
Python
|
pycls/core/plotting.py
|
feymanpriv/pymetric
|
f7c4f354f87969142263c87e1fb33499b7b2d62a
|
[
"MIT"
] | 62
|
2020-08-26T11:06:37.000Z
|
2022-03-29T03:26:00.000Z
|
pycls/core/plotting.py
|
ym547559398/pycls
|
f7c4f354f87969142263c87e1fb33499b7b2d62a
|
[
"MIT"
] | 2
|
2021-06-02T10:19:53.000Z
|
2021-12-06T05:41:23.000Z
|
pycls/core/plotting.py
|
ym547559398/pycls
|
f7c4f354f87969142263c87e1fb33499b7b2d62a
|
[
"MIT"
] | 11
|
2020-09-14T12:26:17.000Z
|
2021-10-04T06:29:35.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Plotting functions."""
import colorlover as cl
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.offline as offline
import pycls.core.logging as logging
def get_plot_colors(max_colors, color_format="pyplot"):
"""Generate colors for plotting."""
colors = cl.scales["11"]["qual"]["Paired"]
if max_colors > len(colors):
colors = cl.to_rgb(cl.interp(colors, max_colors))
if color_format == "pyplot":
return [[j / 255.0 for j in c] for c in cl.to_numeric(colors)]
return colors
def prepare_plot_data(log_files, names, metric="top1_err"):
"""Load logs and extract data for plotting error curves."""
plot_data = []
for file, name in zip(log_files, names):
d, data = {}, logging.sort_log_data(logging.load_log_data(file))
for phase in ["train", "test"]:
x = data[phase + "_epoch"]["epoch"]
y = data[phase + "_epoch"][metric]
x = [int(e.split("/")[0]) for e in x]
d["x_" + phase], d["y_" + phase] = x, y
d[phase + "_label"] = "[{:5.2f}] ".format(min(y) if y else 0) + name
plot_data.append(d)
assert len(plot_data) > 0, "No data to plot"
return plot_data
def plot_error_curves_plotly(log_files, names, filename, metric="top1_err"):
"""Plot error curves using plotly and save to file."""
plot_data = prepare_plot_data(log_files, names, metric)
colors = get_plot_colors(len(plot_data), "plotly")
# Prepare data for plots (3 sets, train duplicated w and w/o legend)
data = []
for i, d in enumerate(plot_data):
s = str(i)
line_train = {"color": colors[i], "dash": "dashdot", "width": 1.5}
line_test = {"color": colors[i], "dash": "solid", "width": 1.5}
data.append(
go.Scatter(
x=d["x_train"],
y=d["y_train"],
mode="lines",
name=d["train_label"],
line=line_train,
legendgroup=s,
visible=True,
showlegend=False,
)
)
data.append(
go.Scatter(
x=d["x_test"],
y=d["y_test"],
mode="lines",
name=d["test_label"],
line=line_test,
legendgroup=s,
visible=True,
showlegend=True,
)
)
data.append(
go.Scatter(
x=d["x_train"],
y=d["y_train"],
mode="lines",
name=d["train_label"],
line=line_train,
legendgroup=s,
visible=False,
showlegend=True,
)
)
# Prepare layout w ability to toggle 'all', 'train', 'test'
titlefont = {"size": 18, "color": "#7f7f7f"}
vis = [[True, True, False], [False, False, True], [False, True, False]]
buttons = zip(["all", "train", "test"], [[{"visible": v}] for v in vis])
buttons = [{"label": b, "args": v, "method": "update"} for b, v in buttons]
layout = go.Layout(
title=metric + " vs. epoch<br>[dash=train, solid=test]",
xaxis={"title": "epoch", "titlefont": titlefont},
yaxis={"title": metric, "titlefont": titlefont},
showlegend=True,
hoverlabel={"namelength": -1},
updatemenus=[
{
"buttons": buttons,
"direction": "down",
"showactive": True,
"x": 1.02,
"xanchor": "left",
"y": 1.08,
"yanchor": "top",
}
],
)
# Create plotly plot
offline.plot({"data": data, "layout": layout}, filename=filename)
def plot_error_curves_pyplot(log_files, names, filename=None, metric="top1_err"):
"""Plot error curves using matplotlib.pyplot and save to file."""
plot_data = prepare_plot_data(log_files, names, metric)
colors = get_plot_colors(len(names))
for ind, d in enumerate(plot_data):
c, lbl = colors[ind], d["test_label"]
plt.plot(d["x_train"], d["y_train"], "--", c=c, alpha=0.8)
plt.plot(d["x_test"], d["y_test"], "-", c=c, alpha=0.8, label=lbl)
plt.title(metric + " vs. epoch\n[dash=train, solid=test]", fontsize=14)
plt.xlabel("epoch", fontsize=14)
plt.ylabel(metric, fontsize=14)
plt.grid(alpha=0.4)
plt.legend()
if filename:
plt.savefig(filename)
plt.clf()
else:
plt.show()
| 35.276119
| 81
| 0.542204
|
20fb7b5b24de209d168958ec2035bb0ed7e7d948
| 12,386
|
py
|
Python
|
intersight/model/meta_display_name_definition.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/meta_display_name_definition.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/meta_display_name_definition.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.meta_display_name_definition_all_of import MetaDisplayNameDefinitionAllOf
from intersight.model.mo_base_complex_type import MoBaseComplexType
globals()['MetaDisplayNameDefinitionAllOf'] = MetaDisplayNameDefinitionAllOf
globals()['MoBaseComplexType'] = MoBaseComplexType
class MetaDisplayNameDefinition(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
},
('object_type',): {
'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = True
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'format': (str,), # noqa: E501
'include_ancestor': (bool,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'format': 'Format', # noqa: E501
'include_ancestor': 'IncludeAncestor', # noqa: E501
'name': 'Name', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MetaDisplayNameDefinition - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "meta.DisplayNameDefinition", must be one of ["meta.DisplayNameDefinition", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "meta.DisplayNameDefinition", must be one of ["meta.DisplayNameDefinition", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
format (str): A specification for constructing the displayname from the MO's properties.. [optional] # noqa: E501
include_ancestor (bool): An indication of whether the displayname should be contructed 'recursively' including the displayname of the first ancestor with a similarly named displayname.. [optional] # noqa: E501
name (str): The name of the displayname used as a key in the DisplayName map which is returned as part of an MO for a Rest request.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "meta.DisplayNameDefinition")
object_type = kwargs.get('object_type', "meta.DisplayNameDefinition")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MetaDisplayNameDefinitionAllOf,
MoBaseComplexType,
],
'oneOf': [
],
}
| 49.150794
| 1,678
| 0.637817
|
95872d38ad4d9304260e50509ad9311374158a46
| 551
|
py
|
Python
|
setup.py
|
wishful-project/tests
|
fc628bee6b6c495cff55ba3891ff9aa9ce969c98
|
[
"MIT"
] | null | null | null |
setup.py
|
wishful-project/tests
|
fc628bee6b6c495cff55ba3891ff9aa9ce969c98
|
[
"MIT"
] | null | null | null |
setup.py
|
wishful-project/tests
|
fc628bee6b6c495cff55ba3891ff9aa9ce969c98
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='wishful_testing_framework',
version='0.1.0',
packages=find_packages(),
url='http://www.wishful-project.eu/software',
license='',
author='Piotr Gawlowicz',
author_email='gawlowicz@tkn.tu-berlin.de',
description='WiSHFUL Testing Framework',
long_description='Test of WiSHFUL Control Framework',
keywords='testing',
install_requires=['pytest', 'sh', 'PyRIC', 'pyzmq', 'gevent']
)
| 27.55
| 65
| 0.673321
|
96e13148e96be3c7afef36adf635ed6ce6f53302
| 401
|
py
|
Python
|
interiorshop/wsgi.py
|
wilfex81/INTERIORSHOP
|
0b1859e240b47b956f366e8eab53cecdc2786bcd
|
[
"MIT"
] | null | null | null |
interiorshop/wsgi.py
|
wilfex81/INTERIORSHOP
|
0b1859e240b47b956f366e8eab53cecdc2786bcd
|
[
"MIT"
] | 1
|
2022-02-24T12:47:49.000Z
|
2022-02-24T12:47:49.000Z
|
interiorshop/wsgi.py
|
wilfex81/INTERIORSHOP
|
0b1859e240b47b956f366e8eab53cecdc2786bcd
|
[
"MIT"
] | null | null | null |
"""
WSGI config for interiorshop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'interiorshop.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
2f1de2da16fa30175c565c03eb9949989f6a6942
| 4,475
|
py
|
Python
|
tests/conftest.py
|
alvistack/ionrock-cachecontrol
|
09992a140edca2602cf8230370fc8b28566a069b
|
[
"Apache-2.0"
] | 353
|
2015-01-03T11:07:43.000Z
|
2022-03-01T10:24:29.000Z
|
tests/conftest.py
|
alvistack/ionrock-cachecontrol
|
09992a140edca2602cf8230370fc8b28566a069b
|
[
"Apache-2.0"
] | 190
|
2015-01-07T10:00:49.000Z
|
2022-02-23T19:04:03.000Z
|
tests/conftest.py
|
alvistack/ionrock-cachecontrol
|
09992a140edca2602cf8230370fc8b28566a069b
|
[
"Apache-2.0"
] | 108
|
2015-01-05T19:17:33.000Z
|
2022-02-27T20:02:18.000Z
|
# SPDX-FileCopyrightText: 2015 Eric Larson
#
# SPDX-License-Identifier: Apache-2.0
from pprint import pformat
import os
import socket
import pytest
import cherrypy
class SimpleApp(object):
def __init__(self):
self.etag_count = 0
self.update_etag_string()
def dispatch(self, env):
path = env["PATH_INFO"][1:].split("/")
segment = path.pop(0)
if segment and hasattr(self, segment):
return getattr(self, segment)
return None
def optional_cacheable_request(self, env, start_response):
"""A request with no hints as to whether it should be
cached. Yet, we might still choose to cache it via a
heuristic."""
headers = [
("server", "nginx/1.2.6 (Ubuntu)"),
("last-modified", "Mon, 21 Jul 2014 17:45:39 GMT"),
("content-type", "text/html"),
]
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
def vary_accept(self, env, start_response):
response = pformat(env).encode("utf8")
headers = [
("Cache-Control", "max-age=5000"),
("Content-Type", "text/plain"),
("Vary", "Accept-Encoding, Accept"),
]
start_response("200 OK", headers)
return [response]
def update_etag_string(self):
self.etag_count += 1
self.etag_string = '"ETAG-{}"'.format(self.etag_count)
def update_etag(self, env, start_response):
self.update_etag_string()
headers = [("Cache-Control", "max-age=5000"), ("Content-Type", "text/plain")]
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
def conditional_get(self, env, start_response):
return start_response("304 Not Modified", [])
def etag(self, env, start_response):
headers = [("Etag", self.etag_string)]
if env.get("HTTP_IF_NONE_MATCH") == self.etag_string:
start_response("304 Not Modified", headers)
return []
else:
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
def cache_60(self, env, start_response):
headers = [("Cache-Control", "public, max-age=60")]
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
def no_cache(self, env, start_response):
headers = [("Cache-Control", "no-cache")]
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
def permanent_redirect(self, env, start_response):
headers = [("Location", "/permalink")]
start_response("301 Moved Permanently", headers)
return ["See: /permalink".encode("utf-8")]
def permalink(self, env, start_response):
start_response("200 OK", [("Content-Type", "text/plain")])
return ["The permanent resource".encode("utf-8")]
def multiple_choices(self, env, start_response):
headers = [("Link", "/permalink")]
start_response("300 Multiple Choices", headers)
return ["See: /permalink".encode("utf-8")]
def stream(self, env, start_response):
headers = [("Content-Type", "text/plain"), ("Cache-Control", "max-age=5000")]
start_response("200 OK", headers)
for i in range(10):
yield pformat(i).encode("utf8")
def __call__(self, env, start_response):
func = self.dispatch(env)
if func:
return func(env, start_response)
headers = [("Cache-Control", "max-age=5000"), ("Content-Type", "text/plain")]
start_response("200 OK", headers)
return [pformat(env).encode("utf8")]
@pytest.fixture(scope="session")
def server():
return cherrypy.server
@pytest.fixture()
def url(server):
return "http://%s:%s/" % server.bind_addr
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get("WEBTEST_SERVER_BIND", "127.0.0.1")
return ip, port
def pytest_configure(config):
cherrypy.tree.graft(SimpleApp(), "/")
ip, port = get_free_port()
cherrypy.config.update({"server.socket_host": ip, "server.socket_port": port})
# turn off logging
logger = cherrypy.log.access_log
logger.removeHandler(logger.handlers[0])
cherrypy.server.start()
def pytest_unconfigure(config):
try:
cherrypy.server.stop()
except:
pass
| 28.685897
| 85
| 0.608268
|
6a5ec02889d3e5098dc3073d5c220a9693f85494
| 8,403
|
py
|
Python
|
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | 1
|
2021-08-24T00:00:23.000Z
|
2021-08-24T00:00:23.000Z
|
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | null | null | null |
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | 2
|
2021-08-30T20:36:36.000Z
|
2021-11-02T19:13:33.000Z
|
import sqlite3
import sys
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import logzero
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import yaml
from app import app
from dash.dependencies import Input, Output
from dash_table import DataTable
from logzero import logger
sys.path.append("../source")
import queries
import plot_tools
import ts_tools
# open and retrieve configuration data
try:
with open("../source/config.yml", "r") as config_in:
cfg = yaml.load(config_in, Loader=yaml.SafeLoader)
logger.info(f"{cfg}\n")
except:
logger.error(f"config file open failure.")
exit(1)
db_path = cfg["file_paths"]["db_path"]
db_files = ts_tools.get_db_files(db_path)
logger.info(f"DB Path: {db_path}\n{db_files}\n")
# --------------------------begin layout--------------------------#
layout_app1 = html.Div(
[
# Dropdown row 0
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id="dd-db-selection",
options=[{"label": db, "value": db} for db in db_files],
value=cfg["file_names"]["default_db"],
placeholder="Select a database",
persistence=True,
persistence_type="session",
),
width={"size": 2, "offset": 0},
),
dbc.Col(
[
dcc.Dropdown(
id="dd-zipcode-selection",
placeholder="Select a Zip Code",
persistence=True,
),
html.H5(id="dd-zipcode-selection-locale"),
],
width={"size": 2, "offset": 1},
),
],
),
# Plots row 1
dbc.Row(
[
dbc.Col(
[
html.H6(
"Data View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(id="graph-data-view"),
],
width={"size": 6},
),
dbc.Col(
[
html.H6(
"Distribution View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(id="graph-dist-view"),
],
width={"size": 5},
),
],
),
# Plots row 2
dbc.Row(
[
dbc.Col(
[
html.H6(
"Meteorological View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(
id="graph-meteoro-view",
),
],
width={"size": 6, "offset": 0},
),
dbc.Col(
[
html.H6(
"Desciptive Statistics",
style={
"display": "inline-block",
"textAlign": "center",
},
),
DataTable(
id="table-desc-stats",
style_table={
"height": "395px",
},
style_cell={
"backgroundColor": "black",
"forgroundColor": "white",
},
style_header={
"backgroundColor": "black",
"forgroundColor": "white",
"fontWeight": "bold",
"fontColor": "gold",
},
),
],
width={"size": 5},
),
],
),
],
)
# --------------------------begin callbacks--------------------------#
@app.callback(
Output("dd-zipcode-selection", "options"),
Input("dd-db-selection", "value"),
)
def get_zipcodes(file_name):
logger.info(f"get_zipcodes callback: {file_name}")
conn = ts_tools.get_db_connection(db_path, file_name)
zipcodes = ts_tools.get_db_zipcodes(conn)
conn.close()
logger.info(f"app1 zipcodes retrieved\n{zipcodes}")
# return the list object to properly populate the dropdown!
return [{"label": zipcode, "value": zipcode} for zipcode in zipcodes]
# -------------------------------------------------------------------#
# @app.callback(
# Output("dd-zipcode-selection", "value"),
# [
# Input("dd-zipcode-selection", "options"),
# ],
# )
# def set_zipcode_value(options):
# logger.info(f"app1 zipcode selected: {options[0]['value']}")
# return options[0]["value"]
# -------------------------------------------------------------------#
@app.callback(
# [
Output("graph-data-view", "figure"),
Output("graph-dist-view", "figure"),
Output("graph-meteoro-view", "figure"),
Output("table-desc-stats", "data"),
Output("table-desc-stats", "columns"),
Output("dd-zipcode-selection-locale", "children"),
# -------------------------------------
Input("dd-db-selection", "value"),
Input("dd-zipcode-selection", "value"),
)
def graph_output(db_filename, zipcode):
cntx = dash.callback_context
context = cntx.triggered[0]["prop_id"].split(".")[0]
logger.info(f"app1 graph_output #1 Context = {context}\n")
# print(f"app1 graph_output #1 Context: {context}")
if context == "dd-db-selection":
conn = ts_tools.get_db_connection(db_path, db_filename)
# zipcodes = ts_tools.get_db_zipcodes(conn)
# zipcode = zipcodes[0]
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made if: {db_filename}, {zipcode}")
elif context == "dd-zipcode-selection":
conn = ts_tools.get_db_connection(db_path, db_filename)
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made elif: {db_filename}, {zipcode}")
else:
db_filename = cfg["file_names"]["default_db"]
conn = ts_tools.get_db_connection(db_path, db_filename)
zipcodes = ts_tools.get_db_zipcodes(conn)
if not zipcode:
zipcode = zipcodes[0]
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made else: {db_filename}, {zipcode}")
logger.info(f"app1 passed if/elif/else")
df_desc = df.describe().transpose().round(decimals=2).reset_index(drop=False)
df_desc.rename(columns={"index": "feature"}, inplace=True)
df_desc.insert(loc=1, column="unit", value=[value for value in cfg["data_units"].values()])
desc_columns = [{"id": col, "name": col} for col in df_desc.columns]
logger.info(f"app1 passed df_desc")
title1 = "Irradiance Data"
fig1 = plot_tools.plot_irradiance(
df, title=title1, zipcode=zipcode, irr_columns=cfg["irradiance_columns"], locale=locale_data
)
logger.info(f"app1 passed {title1}")
title2 = "Data Distributions"
fig2 = plot_tools.plot_histograms(
df,
title=title2,
zipcode=zipcode,
)
logger.info(f"app1 passed {title2}")
title3 = "Meteorological Conditions"
fig3 = plot_tools.plot_multi_line(
df,
title=title3,
locale=locale_data,
columns=cfg["meteorological_fields"],
)
logger.info(f"app1 passed {title3}")
return (
fig1,
fig2,
fig3,
df_desc.to_dict("records"),
desc_columns,
f"{locale_data[0]}, {locale_data[2]}",
)
| 32.952941
| 100
| 0.471855
|
9f159ed4cbe2e34fae811c65f4ab2181e34c476a
| 1,377
|
py
|
Python
|
bloomfilter.py
|
jetz/algorithms
|
1cf3a0864b4de06442457ab4a171f2eabfc2bb66
|
[
"MIT"
] | null | null | null |
bloomfilter.py
|
jetz/algorithms
|
1cf3a0864b4de06442457ab4a171f2eabfc2bb66
|
[
"MIT"
] | null | null | null |
bloomfilter.py
|
jetz/algorithms
|
1cf3a0864b4de06442457ab4a171f2eabfc2bb66
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from BitVector import BitVector
class BloomFilter(object):
"""
布隆过滤器
"""
def __init__(self, n):
super(BloomFilter, self).__init__()
self.n = n
self.bits = BitVector(size=n)
def __hashcodes(self, val):
"""
供布隆过滤器使用的哈希函数
seed: 种子
val : 被哈希的字符串
"""
results = []
seeds = [3, 5, 7, 11, 13, 31, 37, 61]
for seed in seeds:
result = 0
for v in val:
result = seed * result + ord(v)
results.append((self.n - 1) & result)
return results
def has(self, val):
"""
根据哈希函数进行过滤,返回True或False
使用不同种子构建8个不同的哈希函数
"""
hash_pos = self.__hashcodes(val)
has = True
for i in hash_pos:
if not self.bits[i]:
self.bits[i] = 1
has = False
return has
if __name__ == '__main__':
bloomFilter = BloomFilter(1000000)
examples = ['http://www.baidu.com',
'http://s1.bdstatic.com/r/www/img/i-1.0.0.png',
'http://s1.bdstatic.com/r/www/img/i-1.0.0.png',
'http://www.baidu.com/gaoji/preferences.html',
'http://news.baidu.com',
'http://news.baidu.com']
for e in examples:
print bloomFilter.has(e)
| 25.981132
| 63
| 0.499637
|
306c5e21b421427000a6e1be2af450f7b02b3e8c
| 43,900
|
py
|
Python
|
evennia/evennia/utils/evmenu.py
|
MarsZone/DreamLand
|
87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
evennia/evennia/utils/evmenu.py
|
MarsZone/DreamLand
|
87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
evennia/evennia/utils/evmenu.py
|
MarsZone/DreamLand
|
87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module.
Example usage:
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, cmd_on_exit="look", persistent=True)
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The `perstent` keyword will make the menu survive a server reboot.
It is `False` by default. Note that if using persistent mode, every
node and callback in the menu must be possible to be *pickled*, this
excludes e.g. callables that are class methods or functions defined
dynamically or as part of another function. In non-persistent mode
no such restrictions exist.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_name(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The menu tree itself is available on the caller as
`caller.ndb._menutree`. This makes it a convenient place to store
temporary state variables between nodes, since this NAttribute is
deleted when the menu is exited.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the
second element in the tuple is a help text to display at this
node when the user enters the menu help command there.
options (tuple, dict or None): (
{'key': name, # can also be a list of aliases. A special key is
# "_default", which marks this option as the default
# fallback when no other option matches the user input.
'desc': description, # optional description
'goto': nodekey, # node to go to when chosen
'exec': nodekey}, # node or callback to trigger as callback when chosen. This
# will execute *before* going to the next node. Both node
# and the explicit callback will be called as normal nodes
# (with caller and/or raw_string args). If the callable/node
# returns a single string (only), this will replace the current
# goto location string in-place. Note that relying to
# much on letting exec assign the goto location can make it
# hard to debug your menu logic.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). If it returned a string, say 'node3',
# then the next node would be node3 instead of node2 as specified
# by the normal 'goto' option key above.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestMenu from this module and add it to
your default cmdset. Run it with this module, like `testmenu
evennia.utils.evmenu`.
"""
from __future__ import print_function
from builtins import object, range
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import logger
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
_ERROR_PERSISTENT_SAVING = \
"""
{error}
|rThe menu state could not be saved for persistent mode. Switching
to non-persistent mode (which means the menu session won't survive
an eventual server reload).|n
"""
_TRACE_PERSISTENT_SAVING = \
"EvMenu persistent-mode error. Commonly, this is because one or " \
"more of the EvEditor callbacks could not be pickled, for example " \
"because it's a class method or is defined inside another function."
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = _CMD_NOINPUT
aliases = [_CMD_NOMATCH]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
def _restore(caller):
# check if there is a saved menu available.
# this will re-start a completely new evmenu call.
saved_options = caller.attributes.get("_menutree_saved")
if saved_options:
startnode_tuple = caller.attributes.get("_menutree_saved_startnode")
try:
startnode, startnode_input = startnode_tuple
except ValueError: # old form of startnode stor
startnode, startnode_input = startnode_tuple, ""
if startnode:
saved_options[1]["startnode"] = startnode
saved_options[1]["startnode_input"] = startnode_input
# this will create a completely new menu call
EvMenu(caller, *saved_options[0], **saved_options[1])
return True
caller = self.caller
# we store Session on the menu since this can be hard to
# get in multisession environemtns if caller is a Player.
menu = caller.ndb._menutree
if not menu:
if _restore(caller):
return
orig_caller = caller
caller = caller.player if hasattr(caller, "player") else None
menu = caller.ndb._menutree if caller else None
if not menu:
if caller and _restore(caller):
return
caller = self.session
menu = caller.ndb._menutree
if not menu:
# can't restore from a session
err = "Menu object not found as %s.ndb._menutree!" % (orig_caller)
orig_caller.msg(err) # don't give the session as a kwarg here, direct to original
raise EvMenuError(err)
# we must do this after the caller with the menui has been correctly identified since it
# can be either Player, Object or Session (in the latter case this info will be superfluous).
caller.ndb._menutree._session = self.session
# we have a menu, use it.
menu._input_parser(menu, self.raw_string, caller)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
# These are default node formatters
def dedent_strip_nodetext_formatter(nodetext, has_options, caller=None):
"""
Simple dedent formatter that also strips text
"""
return dedent(nodetext).strip()
def dedent_nodetext_formatter(nodetext, has_options, caller=None):
"""
Just dedent text.
"""
return dedent(nodetext)
def evtable_options_formatter(optionlist, caller=None):
"""
Formats the option list display.
"""
if not optionlist:
return ""
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
if not (key or desc):
continue
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(" |lc%s|lt%s|le: %s" % (raw_key, key, desc))
else:
# add a default white color to key
table.append(" |lc%s|lt|w%s|n|le: %s" % (raw_key, raw_key, desc))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in range(nrows - nlastcol)])
# build the actual table grid
table = [table[icol * nrows : (icol * nrows) + nrows] for icol in range(0, ncols)]
# adjust the width of each column
for icol in range(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
# format the table into columns
return unicode(EvTable(table=table, border="none"))
def underline_node_formatter(nodetext, optionstext, caller=None):
"""
Draws a node with underlines '_____' around it.
"""
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
options_width_max = max(m_len(line) for line in optionstext.split("\n"))
total_width = max(options_width_max, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + "|n" + nodetext + "|n" + separator2 + "|n" + optionstext
def null_node_formatter(nodetext, optionstext, caller=None):
"""
A minimalistic node formatter, no lines or frames.
"""
return nodetext + "\n\n" + optionstext
def evtable_parse_input(menuobject, raw_string, caller):
"""
Processes the user's node inputs.
Args:
menuobject (EvMenu): The EvMenu instance
raw_string (str): The incoming raw_string from the menu
command.
caller (Object, Player or Session): The entity using
the menu.
"""
cmd = raw_string.strip().lower()
if cmd in menuobject.options:
# this will take precedence over the default commands
# below
goto, callback = menuobject.options[cmd]
menuobject.callback_goto(callback, goto, raw_string)
elif menuobject.auto_look and cmd in ("look", "l"):
menuobject.display_nodetext()
elif menuobject.auto_help and cmd in ("help", "h"):
menuobject.display_helptext()
elif menuobject.auto_quit and cmd in ("quit", "q", "exit"):
menuobject.close_menu()
elif menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH, session=menuobject._session)
if not (menuobject.options or menuobject.default):
# no options - we are at the end of the menu.
menuobject.close_menu()
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menudata, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, auto_look=True, auto_help=True,
cmd_on_exit="look",
nodetext_formatter=dedent_strip_nodetext_formatter,
options_formatter=evtable_options_formatter,
node_formatter=underline_node_formatter,
input_parser=evtable_parse_input,
persistent=False, startnode_input="", session=None,
**kwargs):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (Object, Player or Session): The user of the menu.
menudata (str, module or dict): The full or relative path to the module
holding the menu tree data. All global functions in this module
whose name doesn't start with '_ ' will be parsed as menu nodes.
Also the module itself is accepted as input. Finally, a dictionary
menu tree can be given directly. This must then be a mapping
`{"nodekey":callable,...}` where `callable` must be called as
and return the data expected of a menu node. This allows for
dynamic menu creation.
startnode (str, optional): The starting node name in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
auto_quit (bool, optional): Allow user to use "q", "quit" or
"exit" to leave the menu at any point. Recommended during
development!
auto_look (bool, optional): Automatically make "looK" or "l" to
re-show the last node. Turning this off means you have to handle
re-showing nodes yourself, but may be useful if you need to
use "l" for some other purpose.
auto_help (bool, optional): Automatically make "help" or "h" show
the current help entry for the node. If turned off, eventual
help must be handled manually, but it may be useful if you
need 'h' for some other purpose, for example.
cmd_on_exit (callable, str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
callback function or command string will be executed.
The callback function takes two parameters, the caller then the
EvMenu object. This is called after cleanup is complete.
Set to None to not call any command.
nodetext_formatter (callable, optional): This callable should be on
the form `function(nodetext, has_options, caller=None)`, where `nodetext` is the
node text string and `has_options` a boolean specifying if there
are options associated with this node. It must return a formatted
string. `caller` is optionally a reference to the user of the menu.
`caller` is optionally a reference to the user of the menu.
options_formatter (callable, optional): This callable should be on
the form `function(optionlist, caller=None)`, where ` optionlist is a list
of option dictionaries, like
[{"key":..., "desc",..., "goto": ..., "exec",...}, ...]
Each dictionary describes each possible option. Note that this
will also be called if there are no options, and so should be
able to handle an empty list. This should
be formatted into an options list and returned as a string,
including the required separator to use between the node text
and the options. If not given the default EvMenu style will be used.
`caller` is optionally a reference to the user of the menu.
node_formatter (callable, optional): This callable should be on the
form `func(nodetext, optionstext, caller=None)` where the arguments are strings
representing the node text and options respectively (possibly prepared
by `nodetext_formatter`/`options_formatter` or by the default styles).
It should return a string representing the final look of the node. This
can e.g. be used to create line separators that take into account the
dynamic width of the parts. `caller` is optionally a reference to the
user of the menu.
input_parser (callable, optional): This callable is responsible for parsing the
options dict from a node and has the form `func(menuobject, raw_string, caller)`,
where menuobject is the active `EvMenu` instance, `input_string` is the
incoming text from the caller and `caller` is the user of the menu.
It should use the helper method of the menuobject to goto new nodes, show
help texts etc. See the default `evtable_parse_input` function for help
with parsing.
persistent (bool, optional): Make the Menu persistent (i.e. it will
survive a reload. This will make the Menu cmdset persistent. Use
with caution - if your menu is buggy you may end up in a state
you can't get out of! Also note that persistent mode requires
that all formatters, menu nodes and callables are possible to
*pickle*. When the server is reloaded, the latest node shown will be completely
re-run with the same input arguments - so be careful if you are counting
up some persistent counter or similar - the counter may be run twice if
reload happens on the node that does that.
startnode_input (str, optional): Send an input text to `startnode` as if
a user input text from a fictional previous node. When the server reloads,
the latest visited node will be re-run using this kwarg.
session (Session, optional): This is useful when calling EvMenu from a player
in multisession mode > 2. Note that this session only really relevant
for the very first display of the first node - after that, EvMenu itself
will keep the session updated from the command input. So a persistent
menu will *not* be using this same session anymore after a reload.
Kwargs:
any (any): All kwargs will become initialization variables on `caller.ndb._menutree`,
to be available at run.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
Notes:
While running, the menu is stored on the caller as `caller.ndb._menutree`. Also
the current Session (from the Command, so this is still valid in multisession
environments) is available through `caller.ndb._menutree._session`. The `_menutree`
property is a good one for storing intermediary data on between nodes since it
will be automatically deleted when the menu closes.
In persistent mode, all nodes, formatters and callbacks in the menu must be
possible to be *pickled*, this excludes e.g. callables that are class methods
or functions defined dynamically or as part of another function. In
non-persistent mode no such restrictions exist.
"""
self._startnode = startnode
self._menutree = self._parse_menudata(menudata)
self._nodetext_formatter = nodetext_formatter
self._options_formatter = options_formatter
self._node_formatter = node_formatter
self._input_parser = input_parser
self._persistent = persistent
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# public variables made available to the command
self.caller = caller
self.auto_quit = auto_quit
self.auto_look = auto_look
self.auto_help = auto_help
self._session = session
if isinstance(cmd_on_exit, str):
# At this point menu._session will have been replaced by the
# menu command to the actual session calling.
self.cmd_on_exit = lambda caller, menu: caller.execute_cmd(cmd_on_exit, session=menu._session)
elif callable(cmd_on_exit):
self.cmd_on_exit = cmd_on_exit
else:
self.cmd_on_exit = None
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# assign kwargs as initialization vars on ourselves.
if set(("_startnode", "_menutree", "_nodetext_formatter", "_options_formatter",
"node_formatter", "_input_parser", "_peristent", "cmd_on_exit", "default",
"nodetext", "helptext", "options")).intersection(set(kwargs.keys())):
raise RuntimeError("One or more of the EvMenu `**kwargs` is reserved by EvMenu for internal use.")
for key, val in kwargs.iteritems():
setattr(self, key, val)
# store ourself on the object
self.caller.ndb._menutree = self
if persistent:
# save the menu to the database
try:
caller.attributes.add("_menutree_saved",
((menudata, ),
{"startnode": startnode,
"cmdset_mergetype": cmdset_mergetype,
"cmdset_priority": cmdset_priority,
"auto_quit": auto_quit, "auto_look": auto_look, "auto_help": auto_help,
"cmd_on_exit": cmd_on_exit,
"nodetext_formatter": nodetext_formatter, "options_formatter": options_formatter,
"node_formatter": node_formatter, "input_parser": input_parser,
"persistent": persistent,}))
caller.attributes.add("_menutree_saved_startnode", (startnode, startnode_input))
except Exception as err:
caller.msg(_ERROR_PERSISTENT_SAVING.format(error=err), session=self._session)
logger.log_trace(_TRACE_PERSISTENT_SAVING)
persistent = False
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self.caller.cmdset.add(menu_cmdset, permanent=persistent)
# start the menu
self.goto(self._startnode, startnode_input)
def _parse_menudata(self, menudata):
"""
Parse a menufile for node functions and store in dictionary
map. Alternatively, accept a pre-made mapping dictionary of
node functions.
Args:
menudata (str, module or dict): The python.path to the menufile,
or the python module itself. If a dict, this should be a
mapping nodename:callable, where the callable must match
the criteria for a menu node.
Returns:
menutree (dict): A {nodekey: func}
"""
if isinstance(menudata, dict):
# This is assumed to be a pre-loaded menu tree.
return menudata
else:
# a python path of a module
module = mod_import(menudata)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
# handle the node text
nodetext = self._nodetext_formatter(nodetext, len(optionlist), self.caller)
# handle the options
optionstext = self._options_formatter(optionlist, self.caller)
# format the entire node
return self._node_formatter(nodetext, optionstext, self.caller)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename), session=self._session)
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self.caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self.caller)
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename), session=self._session)
raise EvMenuError
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename), session=self._session)
raise
return nodetext, options
def display_nodetext(self):
self.caller.msg(self.nodetext, session=self._session)
def display_helptext(self):
self.caller.msg(self.helptext, session=self._session)
def callback_goto(self, callback, goto, raw_string):
"""
Call callback and goto in sequence.
Args:
callback (callable or str): Callback to run before goto. If
the callback returns a string, this is used to replace
the `goto` string before going to the next node.
goto (str): The target node to go to next (unless replaced
by `callable`)..
raw_string (str): The original user input.
"""
if callback:
# replace goto only if callback returns
goto = self.callback(callback, raw_string) or goto
if goto:
self.goto(goto, raw_string)
def callback(self, nodename, raw_string):
"""
Run a function or node as a callback (with the 'exec' option key).
Args:
nodename (callable or str): A callable to run as
`callable(caller, raw_string)`, or the Name of an existing
node to run as a callable. This may or may not return
a string.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
new_goto (str or None): A replacement goto location string or
None (no replacement).
Notes:
Relying on exec callbacks to set the goto location is
very powerful but will easily lead to spaghetti structure and
hard-to-trace paths through the menu logic. So be careful with
relying on this.
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
ret = nodename(self.caller, raw_string)
else:
# normal callable, only the caller as arg
ret = nodename(self.caller)
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename), self._session)
raise
else:
# nodename is a string; lookup as node
try:
# execute the node
ret = self._execute_node(nodename, raw_string)
except EvMenuError:
return
if isinstance(ret, basestring):
# only return a value if a string (a goto target), ignore all other returns
return ret
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
if self._persistent:
self.caller.attributes.add("_menutree_saved_startnode", (nodename, raw_string))
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = "" if nodetext is None else str(nodetext)
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip())))
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.auto_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.auto_quit else _HELP_NO_OPTIONS_NO_QUIT
self.display_nodetext()
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node or using the quit command.
"""
self.caller.cmdset.remove(EvMenuCmdSet)
del self.caller.ndb._menutree
if self._persistent:
self.caller.attributes.remove("_menutree_saved")
self.caller.attributes.remove("_menutree_saved_startnode")
if self.cmd_on_exit is not None:
self.cmd_on_exit(self.caller, self)
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinput._callback
if not callback:
# this can be happen if called from a player-command when IC
caller = self.player
callback = caller.ndb._getinput._callback
if not callback:
raise RuntimeError("No input callback found.")
caller.ndb._getinput._session = self.session
prompt = caller.ndb._getinput._prompt
result = self.raw_string.strip() # we strip the ending line break caused by sending
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinput
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
class _Prompt(object):
"Dummy holder"
pass
def get_input(caller, prompt, callback, session=None):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
session (Session, optional): This allows to specify the
session to send the prompt to. It's usually only
needed if `caller` is a Player in multisession modes
greater than 2. The session is then updated by the
command and is available (for example in callbacks)
through `caller.ndb.getinput._session`.
Raises:
RuntimeError: If the given callback is not callable.
Notes:
The result value sent to the callback is raw and not
processed in any way. This means that you will get
the ending line return character from most types of
client inputs. So make sure to strip that before
doing a comparison.
When the prompt is running, a temporary object
`caller.ndb._getinput` is stored; this will be removed
when the prompt finishes.
If you need the specific Session of the caller (which
may not be easy to get if caller is a player in higher
multisession modes), then it is available in the
callback through `caller.ndb._getinput._session`.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinput = _Prompt()
caller.ndb._getinput._callback = callback
caller.ndb._getinput._prompt = prompt
caller.ndb._getinput._session = session
caller.cmdset.add(InputCmdSet)
caller.msg(prompt, session=session)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
menu = caller.ndb._menutree
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
The menu was initialized with two variables: %s and %s.
""" % (menu.testval, menu.testval2)
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yL{nook", "l"),
"desc": "Look and see a custom message.",
"goto": "test_look_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_look_node(caller):
text = ""
options = {"key": ("{yL{nook", "l"),
"desc": "Go back to the previous menu.",
"goto": "test_start_node"}
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click |lclook|lthere|le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", persistent=True, cmdset_mergetype="Replace",
testval="val", testval2="val2")
| 39.65673
| 120
| 0.611435
|
ebee1b0a7566e5214889d60be92f3600253130b4
| 7,951
|
py
|
Python
|
src/parsec/configs-mesi-two-level/run_parsec_mesi_two_level.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | 1
|
2022-01-04T03:41:23.000Z
|
2022-01-04T03:41:23.000Z
|
src/parsec/configs-mesi-two-level/run_parsec_mesi_two_level.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | null | null | null |
src/parsec/configs-mesi-two-level/run_parsec_mesi_two_level.py
|
my569/gem5-resources
|
5788f1394a1894efec8e4784d37f473a743aa9f6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 The Regents of the University of California.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" Script to run PARSEC benchmarks with gem5. The memory model used
in the experiments is Ruby and uses MESEI_Two_Level protocolx.
The script expects kernel, diskimage, cpu (kvm or timing),
benchmark, benchmark size, and number of cpu cores as arguments.
This script is best used if your disk-image has workloads tha have
ROI annotations compliant with m5 utility. You can use the script in
../disk-images/parsec/ with the parsec-benchmark repo at
https://github.com/darchr/parsec-benchmark.git to create a working
disk-image for this script.
"""
import errno
import os
import sys
import time
import m5
import m5.ticks
from m5.objects import *
sys.path.append('gem5/configs/common/') # For the next line...
import SimpleOpts
from system import *
def writeBenchScript(dir, bench, size, num_cpus):
"""
This method creates a script in dir which will be eventually
passed to the simulated system (to run a specific benchmark
at bootup).
"""
file_name = '{}/run_{}'.format(dir, bench)
bench_file = open(file_name, 'w+')
bench_file.write('cd /home/gem5/parsec-benchmark\n')
bench_file.write('source env.sh\n')
bench_file.write('parsecmgmt -a run -p \
{} -c gcc-hooks -i {} -n {}\n'.format(bench, size, num_cpus))
# sleeping for sometime makes sure
# that the benchmark's output has been
# printed to the console
bench_file.write('sleep 5 \n')
bench_file.write('m5 exit \n')
bench_file.close()
return file_name
if __name__ == "__m5_main__":
(opts, args) = SimpleOpts.parse_args()
kernel, disk, cpu, benchmark, size, num_cpus = args
if not cpu in ['kvm', 'timing']:
m5.fatal("cpu not supported")
# create the system we are going to simulate
system = MyRubySystem(kernel, disk, int(num_cpus), opts)
# Exit from guest on workbegin/workend
system.exit_on_work_items = True
# Create and pass a script to the simulated system to run the reuired
# benchmark
system.readfile = writeBenchScript(m5.options.outdir, benchmark, size, num_cpus)
# set up the root SimObject and start the simulation
root = Root(full_system = True, system = system)
if system.getHostParallel():
# Required for running kvm on multiple host cores.
# Uses gem5's parallel event queue feature
# Note: The simulator is quite picky about this number!
root.sim_quantum = int(1e9) # 1 ms
#needed for long running jobs
m5.disableAllListeners()
# instantiate all of the objects we've created above
m5.instantiate()
globalStart = time.time()
print("Running the simulation")
print("Using cpu: {}".format(cpu))
start_tick = m5.curTick()
end_tick = m5.curTick()
start_insts = system.totalInsts()
end_insts = system.totalInsts()
m5.stats.reset()
exit_event = m5.simulate()
if exit_event.getCause() == "workbegin":
# Reached the start of ROI
# start of ROI is marked by an
# m5_work_begin() call
print("Resetting stats at the start of ROI!")
m5.stats.reset()
start_tick = m5.curTick()
start_insts = system.totalInsts()
# switching to timing cpu if argument cpu == timing
if cpu == 'timing':
system.switchCpus(system.cpu, system.timingCpu)
else:
print("Unexpected termination of simulation!")
print()
m5.stats.dump()
end_tick = m5.curTick()
end_insts = system.totalInsts()
m5.stats.reset()
print("Performance statistics:")
print("Simulated time: %.2fs" % ((end_tick-start_tick)/1e12))
print("Instructions executed: %d" % ((end_insts-start_insts)))
print("Ran a total of", m5.curTick()/1e12, "simulated seconds")
print("Total wallclock time: %.2fs, %.2f min" % \
(time.time()-globalStart, (time.time()-globalStart)/60))
exit()
# Simulate the ROI
exit_event = m5.simulate()
# Reached the end of ROI
# Finish executing the benchmark with kvm cpu
if exit_event.getCause() == "workend":
# Reached the end of ROI
# end of ROI is marked by an
# m5_work_end() call
print("Dump stats at the end of the ROI!")
m5.stats.dump()
end_tick = m5.curTick()
end_insts = system.totalInsts()
m5.stats.reset()
# switching to timing cpu if argument cpu == timing
if cpu == 'timing':
# This line is commented due to an unimplemented
# flush request in MESI_Two_Level that results in
# the crashing of simulation. There will be a patch
# fixing this issue but the line is commented out
# for now.
# system.switchCpus(system.timingCpu, system.cpu)
print("Performance statistics:")
print("Simulated time: %.2fs" % ((end_tick-start_tick)/1e12))
print("Instructions executed: %d" % ((end_insts-start_insts)))
print("Ran a total of", m5.curTick()/1e12, "simulated seconds")
print("Total wallclock time: %.2fs, %.2f min" % \
(time.time()-globalStart, (time.time()-globalStart)/60))
exit()
else:
print("Unexpected termination of simulation!")
print()
m5.stats.dump()
end_tick = m5.curTick()
end_insts = system.totalInsts()
m5.stats.reset()
print("Performance statistics:")
print("Simulated time: %.2fs" % ((end_tick-start_tick)/1e12))
print("Instructions executed: %d" % ((end_insts-start_insts)))
print("Ran a total of", m5.curTick()/1e12, "simulated seconds")
print("Total wallclock time: %.2fs, %.2f min" % \
(time.time()-globalStart, (time.time()-globalStart)/60))
exit()
# Simulate the remaning part of the benchmark
exit_event = m5.simulate()
print("Done with the simulation")
print()
print("Performance statistics:")
print("Simulated time in ROI: %.2fs" % ((end_tick-start_tick)/1e12))
print("Instructions executed in ROI: %d" % ((end_insts-start_insts)))
print("Ran a total of", m5.curTick()/1e12, "simulated seconds")
print("Total wallclock time: %.2fs, %.2f min" % \
(time.time()-globalStart, (time.time()-globalStart)/60))
| 38.97549
| 84
| 0.664319
|
9aab08fbd44542df04653a9aaebf08ab7e755cf4
| 666
|
py
|
Python
|
tests/test_ekey.py
|
shatgupt/django-encrypted-id
|
4fcbc66cd2b7d3489e73357efbf3db61feaf37a2
|
[
"BSD-2-Clause"
] | 31
|
2016-05-30T19:28:25.000Z
|
2021-11-10T01:33:12.000Z
|
tests/test_ekey.py
|
shatgupt/django-encrypted-id
|
4fcbc66cd2b7d3489e73357efbf3db61feaf37a2
|
[
"BSD-2-Clause"
] | 21
|
2016-05-30T11:44:24.000Z
|
2020-10-22T08:12:21.000Z
|
tests/test_ekey.py
|
wittfabian/django-encrypted-id
|
9c15b114a0d1459f6d68de3815e5f372e197078b
|
[
"BSD-2-Clause"
] | 18
|
2016-05-11T17:57:35.000Z
|
2022-03-23T14:53:10.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from django.http import Http404
from encrypted_id import ekey, get_object_or_404
from tapp.models import Foo
def test_ekey(db):
assert db is db
foo = Foo.objects.create(text="asd")
assert ekey(foo) == foo.ekey
assert foo == get_object_or_404(Foo, foo.ekey)
def test_allow_none_ekey(db):
assert db is db
with pytest.raises(Http404):
get_object_or_404(Foo, None)
with pytest.raises(Foo.DoesNotExist):
Foo.objects.get(ekey=None)
| 22.2
| 50
| 0.738739
|
20abaf05368fe7f54f1946da0911b5baed5a7122
| 2,295
|
py
|
Python
|
tests/test_examples.py
|
brentyi/jaxlie
|
4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537
|
[
"MIT"
] | 128
|
2020-11-28T19:43:31.000Z
|
2022-03-14T11:48:12.000Z
|
tests/test_examples.py
|
brentyi/jaxlie
|
4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537
|
[
"MIT"
] | 4
|
2021-06-27T09:04:54.000Z
|
2022-01-07T07:23:52.000Z
|
tests/test_examples.py
|
brentyi/jaxlie
|
4dbe16f3c1d1cfda30e0418ef5d1e1772cf9f537
|
[
"MIT"
] | 7
|
2021-01-17T10:04:39.000Z
|
2022-01-06T21:22:15.000Z
|
"""Tests with explicit examples.
"""
import numpy as onp
from hypothesis import given, settings
from hypothesis import strategies as st
from utils import assert_arrays_close, assert_transforms_close, sample_transform
import jaxlie
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se2_translation(_random_module):
"""Simple test for SE(2) translation terms."""
translation = onp.random.randn(2)
T = jaxlie.SE2.from_xy_theta(*translation, theta=0.0)
assert_arrays_close(T @ translation, translation * 2)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_translation(_random_module):
"""Simple test for SE(3) translation terms."""
translation = onp.random.randn(3)
T = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.identity(),
translation=translation,
)
assert_arrays_close(T @ translation, translation * 2)
def test_se2_rotation():
"""Simple test for SE(2) rotation terms."""
T_w_b = jaxlie.SE2.from_rotation_and_translation(
rotation=jaxlie.SO2.from_radians(onp.pi / 2.0),
translation=onp.zeros(2),
)
p_b = onp.array([1.0, 0.0])
p_w = onp.array([0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_se3_rotation():
"""Simple test for SE(3) rotation terms."""
T_w_b = jaxlie.SE3.from_rotation_and_translation(
rotation=jaxlie.SO3.from_rpy_radians(onp.pi / 2.0, 0.0, 0.0),
translation=onp.zeros(3),
)
p_b = onp.array([0.0, 1.0, 0.0])
p_w = onp.array([0.0, 0.0, 1.0])
assert_arrays_close(T_w_b @ p_b, p_w)
def test_so3_xyzw_basic():
"""Check that we can create an SO3 object from an xyzw quaternion."""
assert_transforms_close(
jaxlie.SO3.from_quaternion_xyzw(onp.array([0, 0, 0, 1])),
jaxlie.SO3.identity(),
)
@settings(deadline=None)
@given(_random_module=st.random_module())
def test_se3_compose(_random_module):
"""Compare SE3 composition in matrix form vs compact form."""
T1 = sample_transform(jaxlie.SE3)
T2 = sample_transform(jaxlie.SE3)
assert_arrays_close(T1.as_matrix() @ T2.as_matrix(), (T1 @ T2).as_matrix())
assert_transforms_close(
jaxlie.SE3.from_matrix(T1.as_matrix() @ T2.as_matrix()), T1 @ T2
)
| 31.013514
| 80
| 0.695425
|
0b644d1d60f7b06b5e5b3416bfc00a3e0dacde3d
| 495
|
py
|
Python
|
Python/calendar_fun.py
|
ArmstrongYang/StudyShare
|
6dffcfba6811865589a8e11748ffc7e71a656f50
|
[
"Apache-2.0"
] | 2
|
2017-08-10T13:41:19.000Z
|
2017-11-30T09:00:33.000Z
|
Python/calendar_fun.py
|
ArmstrongYang/StudyShare
|
6dffcfba6811865589a8e11748ffc7e71a656f50
|
[
"Apache-2.0"
] | null | null | null |
Python/calendar_fun.py
|
ArmstrongYang/StudyShare
|
6dffcfba6811865589a8e11748ffc7e71a656f50
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#! /usr/bin/python
# coding=utf-8
import calendar
"""
返回的某个月的日历
返回类型是字符串型
"""
cal = calendar.month(2011, 11)
"""
返回一年的日历
"""
cal = calendar.calendar(2011)
cal = calendar.HTMLCalendar(calendar.MONDAY)
"""
打印出一个月日历
"""
cal.formatmonth(2011, 11)
"""
打印出一年的日历
formatyearpage将生成完整的页面代码
"""
print (cal.formatyear(2017))
cal.formatyearpage(2017)
"""
默认每周的第一天是星期一,这里修改成星期天
"""
calendar.setfirstweekday(calendar.SUNDAY)
if __name__=='__main__':
print(__file__)
exit(0)
| 12.375
| 44
| 0.711111
|
ec783959fa263d5603428b8ed2e86b9f71992fdb
| 45,218
|
py
|
Python
|
xnu-2782.1.97/tools/lldbmacros/ipc.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | null | null | null |
xnu-2782.1.97/tools/lldbmacros/ipc.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | null | null | null |
xnu-2782.1.97/tools/lldbmacros/ipc.py
|
LeeWongSnail/SourceCode_ReadingNote
|
b6bbf99a5fef6a087e053f6dfcc7f12691961ba6
|
[
"MIT"
] | 1
|
2021-03-28T02:56:16.000Z
|
2021-03-28T02:56:16.000Z
|
""" Please make sure you read the README file COMPLETELY BEFORE reading anything below.
It is very critical that you read coding guidelines in Section E in README file.
"""
from xnu import *
import sys, shlex
from utils import *
from process import *
from atm import *
from bank import *
import xnudefines
@header("{0: <20s} {1: <6s} {2: <6s} {3: <10s} {4: <15s}".format("task", "pid", '#acts', "tablesize", "command"))
def GetTaskIPCSummary(task):
""" Display a task's ipc summary.
params:
task : core.value represeting a Task in kernel
returns
str - string of ipc info for the task
"""
out_string = ''
format_string = "{0: <#020x} {1: <6d} {2: <6d} {3: <10d} {4: <15s}"
pval = Cast(task.bsd_info, 'proc *')
table_size = int(task.itk_space.is_table_size)
proc_name = str(pval.p_comm)
out_string += format_string.format(task, pval.p_pid, task.thread_count, table_size, proc_name)
return out_string
@header("{0: <20s} {1: <28s} {2: <12s} {3: <6s} {4: <4s} {5: <20s} {6: <4s}\n".format(
"port", "mqueue", "recvname", "flags", "refs", "recvname", "dest"))
def GetPortSummary(port, show_kmsg_summary=True, prefix=""):
""" Display a port's summary
params:
port : core.value representing a port in the kernel
returns
str : string of ipc info for the given port
"""
out_string = ""
portp = Cast(port, 'struct ipc_port *')
destspacep = kern.GetValueFromAddress(0, 'struct ipc_space *')
spacep = portp.data.receiver
format_string = "{0: #019x} {1: #019x} {2: <8s} {3: #011x} {4: <5s} {5: #05x} {6: #019x} {7: <16s}\n"
if portp.ip_object.io_bits & 0x80000000:
out_string += prefix + format_string.format(
unsigned(portp), addressof(portp.ip_messages), ' '*8,
unsigned(portp.ip_messages.data.port.receiver_name),
"APort", portp.ip_object.io_references,
unsigned(portp.ip_messages.data.port.receiver_name),
GetPortDestProc(portp))
else:
out_string += prefix + format_string.format(
unsigned(portp), addressof(portp.ip_messages), ' '*8,
unsigned(portp.ip_messages.data.port.receiver_name),
"DPort", portp.ip_object.io_references, unsigned(portp),
"inactive-port")
if show_kmsg_summary:
kmsgp = Cast(portp.ip_messages.data.port.messages.ikmq_base, 'ipc_kmsg_t')
out_string += prefix + GetKMsgSummary.header + prefix + GetKMsgSummary(kmsgp)
kmsgheadp = kmsgp
kmsgp = kmsgp.ikm_next
while (kmsgp) != (kmsgheadp):
out_string += prefix + GetKMsgSummary(kmsgp)
kmsgp = kmsgp.ikm_next
return out_string
def GetPortDestProc(portp):
""" Display the name and pid of a given port's receiver
params:
portp : core.value representing a pointer to a port in the kernel
destspacep : core.value representing a pointer to an ipc_space
returns:
str : string containing receiver's name and pid
"""
spacep = portp.data.receiver
out_str = "Not found"
for tsk in kern.tasks:
if tsk.itk_space == spacep:
if tsk.bsd_info:
destprocp = Cast(tsk.bsd_info, 'struct proc *')
out_str = "{0:s}({1: <d})".format(destprocp.p_comm, destprocp.p_pid)
else:
out_str = "unknown"
break
return out_str
@header("{0: <20s} {1: <28s} {2: <12s} {3: <6s} {4: <6s} {5: <19s} {6: <26s} {7: <26s}\n".format(
"dest-port", "kmsg", "msgid", "disp", "size", "reply-port", "source", "destination"))
def GetKMsgSummary(kmsgp):
""" Display a summary for type ipc_kmsg_t
params:
kmsgp : core.value representing the given ipc_kmsg_t struct
returns:
str : string of summary info for the given ipc_kmsg_t instance
"""
kmsghp = kmsgp.ikm_header
kmsgh = dereference(kmsghp)
out_string = ""
out_string += "{0: <19s} {1: <#019x} {2: <8s} {3: <#011x} ".format(
' '*19, unsigned(kmsgp), ' '*8, kmsgh.msgh_id)
if (kmsgh.msgh_bits & 0xff) == 17:
out_string += "{0: <2s}".format("rS")
else:
out_string += "{0: <2s}".format("rO")
if (kmsgh.msgh_bits & 0xff00) == (17 << 8):
out_string += "{0: <2s}".format("lS")
else:
if (kmsgh.msgh_bits & 0xff00) == (18 << 8):
out_string += "{0: <2s}".format("lO")
else:
out_string += "{0: <2s}".format("l-")
if kmsgh.msgh_bits & 0xf0000000:
out_string += "{0: <2s}".format("c")
else:
out_string += "{0: <2s}".format("s")
dest_proc_name = ""
if kmsgp.ikm_header.msgh_remote_port:
dest_proc_name = GetDestinationProcessFromPort(kmsgp.ikm_header.msgh_remote_port)
out_string += "{0: ^6d} {1: <#019x} {2: <26s} {3: <26s}\n".format(
unsigned(kmsgh.msgh_size), unsigned(kmsgh.msgh_local_port),
GetKMsgSrc(kmsgp), dest_proc_name)
return out_string
def GetKMsgSrc(kmsgp):
""" Routine that prints a kmsg's source process and pid details
params:
kmsgp : core.value representing the given ipc_kmsg_t struct
returns:
str : string containing the name and pid of the kmsg's source proc
"""
kmsgsrchp = Cast(kmsgp, 'ipc_kmsg_t').ikm_header
kmsgpid = int(Cast(kern.GetValueFromAddress(unsigned(kmsgsrchp) + kmsgsrchp.msgh_size, 'uint *')[10], 'pid_t'))
return "{0:s} ({1:d})".format(GetProcNameForPid(kmsgpid), kmsgpid)
@header("{0: <20s} {1: <28s} {2: <12s} {3: <6s} {4: <6s} {5: <20s} {6: <7s}\n".format(
"portset", "waitqueue", "recvname", "flags", "refs", "recvname", "process"))
def GetPortSetSummary(pset):
""" Display summary for a given struct ipc_pset *
params:
pset : core.value representing a pset in the kernel
returns:
str : string of summary information for the given pset
"""
out_str = ""
if pset.ips_object.io_bits & 0x80000000:
out_str += "{0: #019x} {1: #019x} {2: <7s} {3: #011x} {4: <4s} {5: >6d} {6: #019x} ".format(
unsigned(pset), addressof(pset.ips_messages), ' '*7,
pset.ips_messages.data.pset.local_name, "ASet",
pset.ips_object.io_references,
pset.ips_messages.data.pset.local_name)
else:
out_str += "{0: #019x} {1: #019x} {2: <7s} {3: #011x} {4: <4s} {5: >6d} {6: #019x} ".format(
unsigned(pset), addressof(pset.ips_messages), ' '*7,
pset.ips_messages.data.pset.local_name, "DSet",
pset.ips_object.io_references,
pset.ips_messages.data.pset.local_name)
once = True
setlinksp = addressof(pset.ips_messages.data.pset.set_queue.wqs_setlinks)
wql = Cast(pset.ips_messages.data.pset.set_queue.wqs_setlinks.next, 'WaitQueueLink *')
portoff = getfieldoffset('struct ipc_port', 'ip_messages')
prefix_str = "{0:<21s}".format(' '*21)
while unsigned(wql) != unsigned(Cast(setlinksp, 'void *')):
portp = kern.GetValueFromAddress(unsigned(wql.wql_element.wqe_queue) - portoff, 'ipc_port *')
if once:
once = False
out_str += "{0:s}\n{1:s}{2:s}".format(GetPortDestProc(portp), prefix_str, GetPortSummary.header)
out_str += GetPortSummary(portp, False, prefix_str)
wql = Cast(wql.wql_setlinks.next, 'WaitQueueLink *')
return out_str
# Macro: showipc
@lldb_command('showipc')
def ShowIPC(cmd_args=None):
""" Routine to print data for the given IPC space
Usage: showipc <address of ipc space>
"""
if not cmd_args:
print "No arguments passed"
print ShowIPC.__doc__
return False
ipc = kern.GetValueFromAddress(cmd_args[0], 'ipc_space *')
if not ipc:
print "unknown arguments:", str(cmd_args)
return False
print GetIPCInformation.header
print GetIPCInformation(ipc, False, False)
# EndMacro: showipc
# Macro: showtaskipc
@lldb_command('showtaskipc')
def ShowTaskIPC(cmd_args=None):
""" Routine to print IPC summary of given task
Usage: showtaskipc <address of task>
"""
if not cmd_args:
print "No arguments passed"
print ShowTaskIPC.__doc__
return False
tval = kern.GetValueFromAddress(cmd_args[0], 'task *')
if not tval:
print "unknown arguments:", str(cmd_args)
return False
print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(tval.bsd_info, 'proc *')
print GetTaskSummary(tval) + " " + GetProcSummary(pval)
print GetTaskIPCSummary.header
print GetTaskIPCSummary(tval)
# EndMacro: showtaskipc
# Macro: showallipc
@lldb_command('showallipc')
def ShowAllIPC(cmd_args=None):
""" Routine to print IPC summary of all tasks
Usage: showallipc
"""
for t in kern.tasks:
print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(t.bsd_info, 'proc *')
print GetTaskSummary(t) + " " + GetProcSummary(pval)
print GetIPCInformation.header
print GetIPCInformation(t.itk_space, False, False) + "\n\n"
# EndMacro: showallipc
@lldb_command('showipcsummary')
def ShowIPCSummary(cmd_args=None):
""" Summarizes the IPC state of all tasks.
This is a convenient way to dump some basic clues about IPC messaging. You can use the output to determine
tasks that are candidates for further investigation.
"""
print GetTaskIPCSummary.header
for t in kern.tasks:
print GetTaskIPCSummary(t)
return
def GetKObjectFromPort(portval):
""" Get Kobject description from the port.
params: portval - core.value representation of 'ipc_port *' object
returns: str - string of kobject information
"""
kobject_str = "{0: <#020x}".format(portval.kdata.kobject)
io_bits = unsigned(portval.ip_object.io_bits)
objtype_index = io_bits & 0xfff
if objtype_index < len(xnudefines.kobject_types) :
desc_str = "kobject({0:s})".format(xnudefines.kobject_types[objtype_index])
if xnudefines.kobject_types[objtype_index] in ('TASK_RESUME', 'TASK'):
desc_str += " " + GetProcNameForTask(Cast(portval.kdata.kobject, 'task *'))
else:
desc_str = "kobject(UNKNOWN) {:d}".format(objtype_index)
return kobject_str + " " + desc_str
@static_var('destcache', {})
def GetDestinationProcessFromPort(port):
"""
params: port - core.value representation of 'ipc_port *' object
returns: str - name of process
"""
out_str = ''
dest_space = port.data.receiver
found_dest = False
#update destcache if data is not found
if hex(dest_space) not in GetDestinationProcessFromPort.destcache:
for t in kern.tasks:
if hex(t.itk_space) == hex(dest_space):
pval = Cast(t.bsd_info, 'proc *')
GetDestinationProcessFromPort.destcache[hex(dest_space)] = (t, pval)
found_dest = True
break
#end of for loop
else: found_dest = True
if found_dest:
(ftask , fproc) = GetDestinationProcessFromPort.destcache[hex(dest_space)]
if fproc:
out_str = "{0:s}({1:d})".format(fproc.p_comm, fproc.p_pid )
else:
out_str = "task {0: <#020x}".format(ftask)
return out_str
@header("{0: <20s} {1: <20s}".format("destname", "destination") )
def GetPortDestinationSummary(port):
""" Get destination information for a port.
params: port - core.value representation of 'ipc_port *' object
returns: str - string of info about ports destination
"""
out_str = ''
format_string = "{0: <20s} {1: <20s}"
destname_str = ''
destination_str = ''
ipc_space_kernel = unsigned(kern.globals.ipc_space_kernel)
target_spaceval = port.data.receiver
if unsigned(target_spaceval) == ipc_space_kernel :
destname_str = GetKObjectFromPort(port)
else:
if int(port.ip_object.io_bits) & 0x80000000 :
destname_str = "{0: <#020x}".format(port.ip_messages.data.port.receiver_name)
destination_str = GetDestinationProcessFromPort(port)
else:
destname_str = "{0: <#020x}".format(port)
destination_str = "inactive-port"
out_str += format_string.format(destname_str, destination_str)
return out_str
@lldb_type_summary(['ipc_entry_t'])
@header("{0: <20s} {1: <20s} {2: <8s} {3: <8s} {4: <20s} {5: <20s}".format("object", "name","rite", "urefs", "destname", "destination"))
def GetIPCEntrySummary(entry, ipc_name=''):
""" Get summary of a ipc entry.
params:
entry - core.value representing ipc_entry_t in the kernel
ipc_name - str of format '0x0123' for display in summary.
returns:
str - string of ipc entry related information
"""
out_str = ''
entry_ptr = int(hex(entry), 16)
format_string = "{0: <#020x} {1: <12s} {2: <8s} {3: <8d} {4: <20s} {5: <20s}"
right_str = ''
destname_str = ''
destination_str = ''
ie_object = entry.ie_object
ie_bits = int(entry.ie_bits)
urefs = int(ie_bits & 0xffff)
if ie_bits & 0x00100000 :
right_str = 'Dead'
elif ie_bits & 0x00080000:
right_str = 'Set'
else:
if ie_bits & 0x00010000 :
if ie_bits & 0x00020000 :
right_str = 'SR'
else:
right_str = 'S'
elif ie_bits & 0x00020000:
right_str = 'R'
elif ie_bits & 0x00040000 :
right_str = 'O'
portval = Cast(ie_object, 'ipc_port_t')
if int(entry.index.request) != 0:
requestsval = portval.ip_requests
sorightval = requestsval[int(entry.index.request)].notify.port
soright_ptr = unsigned(sorightval)
if soright_ptr != 0:
if soright_ptr & 0x1 : right_str +='s'
elif soright_ptr & 0x2 : right_str +='d'
else : right_str +='n'
if ie_bits & 0x00800000 : right_str +='c'
if portval.ip_nsrequest != 0: right_str +='x'
# now show the port destination part
destname_str = GetPortDestinationSummary(Cast(ie_object, 'ipc_port_t'))
out_str = format_string.format(ie_object, ipc_name, right_str, urefs, destname_str, destination_str)
return out_str
@header("{0: >20s}".format("user bt") )
def GetPortUserStack(port, task):
""" Get UserStack information for the given port & task.
params: port - core.value representation of 'ipc_port *' object
task - value representing 'task *' object
returns: str - string information on port's userstack
"""
out_str = ''
ie_port_callstack = port.ip_callstack
ie_port_spares = port.ip_spares[0]
proc_val = Cast(task.bsd_info, 'proc *')
if ie_port_callstack[0]:
out_str += "{: <10x}".format(ie_port_callstack[0])
count = 1
while count < 16 and ie_port_callstack[count]:
out_str += ": <10x".format(ie_port_callstack[count])
count = count + 1
if ie_port_spares != proc_val.p_pid:
out_str += " ({:<10d})".format(ie_port_spares)
out_str += '\n'
return out_str
@lldb_type_summary(['ipc_space *'])
@header("{0: <20s} {1: <20s} {2: <20s} {3: <8s} {4: <10s} {5: <16s} {6: <10s} {7: <7s}".format('ipc_space', 'is_task', 'is_table', 'flags', 'ports', 'table_next', 'low_mod', 'high_mod'))
def GetIPCInformation(space, show_entries=False, show_userstack=False):
""" Provide a summary of the ipc space
"""
out_str = ''
format_string = "{0: <#020x} {1: <#020x} {2: <#020x} {3: <8s} {4: <10d} {5: <#01x} {6: >10d} {7: >10d}"
is_tableval = space.is_table
ports = int(space.is_table_size)
flags =''
is_bits = int(space.is_bits)
if (is_bits & 0x40000000) == 0: flags +='A'
else: flags += ' '
if (is_bits & 0x20000000) != 0: flags +='G'
out_str += format_string.format(space, space.is_task, space.is_table, flags, space.is_table_size, space.is_table_next, space.is_low_mod, space.is_high_mod)
#should show the each individual entries if asked.
if show_entries == True:
out_str += "\n\t" + GetIPCEntrySummary.header + "\n"
num_entries = ports
index = 0
while index < num_entries:
entryval = GetObjectAtIndexFromArray(is_tableval, index)
entry_ie_bits = unsigned(entryval.ie_bits)
if (int(entry_ie_bits) & 0x001f0000 ) != 0:
entry_name = "{0: <#020x}".format( (index <<8 | entry_ie_bits >> 24) )
out_str += "\t" + GetIPCEntrySummary(entryval, entry_name) + "\n"
if show_userstack == True:
entryport = Cast(entryval.ie_object, 'ipc_port *')
if entryval.ie_object and (int(entry_ie_bits) & 0x00070000) and entryport.ip_callstack[0]:
out_str += GetPortUserStack.header
out_str += GetPortUserStack(entryport, space.is_task)
index +=1
#done with showing entries
return out_str
# Macro: showrights
@lldb_command('showrights')
def ShowRights(cmd_args=None):
""" Routine to print rights information for the given IPC space
Usage: showrights <address of ipc space>
"""
if not cmd_args:
print "No arguments passed"
print ShowRights.__doc__
return False
ipc = kern.GetValueFromAddress(cmd_args[0], 'ipc_space *')
if not ipc:
print "unknown arguments:", str(cmd_args)
return False
print GetIPCInformation.header
print GetIPCInformation(ipc, True, False)
# EndMacro: showrights
@lldb_command('showtaskrights')
def ShowTaskRights(cmd_args=None):
""" Routine to ipc rights information for a task
Usage: showtaskrights <task address>
"""
if cmd_args == None:
print "No arguments passed"
print ShowTaskStacksCmdHelper.__doc__
return False
tval = kern.GetValueFromAddress(cmd_args[0], 'task *')
if not tval:
print "unknown arguments:", str(cmd_args)
return False
print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(tval.bsd_info, 'proc *')
print GetTaskSummary(tval) + " " + GetProcSummary(pval)
print GetIPCInformation.header
print GetIPCInformation(tval.itk_space, True, False)
# Macro: showataskrightsbt
@lldb_command('showtaskrightsbt')
def ShowTaskRightsBt(cmd_args=None):
""" Routine to ipc rights information with userstacks for a task
Usage: showtaskrightsbt <task address>
"""
if cmd_args == None:
print "No arguments passed"
print ShowTaskRightsBt.__doc__
return False
tval = kern.GetValueFromAddress(cmd_args[0], 'task *')
if not tval:
print "unknown arguments:", str(cmd_args)
return False
print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(tval.bsd_info, 'proc *')
print GetTaskSummary(tval) + " " + GetProcSummary(pval)
print GetIPCInformation.header
print GetIPCInformation(tval.itk_space, True, True)
# EndMacro: showtaskrightsbt
# Macro: showallrights
@lldb_command('showallrights')
def ShowAllRights(cmd_args=None):
""" Routine to print rights information for IPC space of all tasks
Usage: showallrights
"""
for t in kern.tasks:
print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(t.bsd_info, 'proc *')
print GetTaskSummary(t) + " " + GetProcSummary(pval)
try:
print GetIPCInformation.header
print GetIPCInformation(t.itk_space, True, False) + "\n\n"
except (KeyboardInterrupt, SystemExit):
raise
except:
print "Failed to get IPC information. Do individual showtaskrights <task> to find the error. \n\n"
# EndMacro: showallrights
# Macro: showpipestats
@lldb_command('showpipestats')
def ShowPipeStats(cmd_args=None):
""" Display pipes usage information in the kernel
"""
print "Number of pipes: {: d}".format(kern.globals.amountpipes)
print "Memory used by pipes: {:s}".format(sizeof_fmt(int(kern.globals.amountpipekva)))
print "Max memory allowed for pipes: {:s}".format(sizeof_fmt(int(kern.globals.maxpipekva)))
# EndMacro: showpipestats
# Macro: showtaskbusyports
@lldb_command('showtaskbusyports')
def ShowTaskBusyPorts(cmd_args=None):
""" Routine to print information about receive rights belonging to this task that
have enqueued messages. This is oten a sign of a blocked or hung process
Usage: showtaskbusyports <task address>
"""
if not cmd_args:
print "No arguments passed. Please pass in the address of a task"
print ShowTaskBusyPorts.__doc__
return
task = kern.GetValueFromAddress(cmd_args[0], 'task_t')
print GetTaskBusyPorts(task)
return
def GetTaskBusyPorts(task):
""" Prints all busy ports for a given task. ie. all receive rights belonging
to this task that have enqueued messages.
params:
task : core.value representing a task in kernel
returns:
str : String containing information about the given task's busy ports
"""
isp = task.itk_space
i = 0
out_string = ""
while i < isp.is_table_size:
iep = addressof(isp.is_table[i])
if iep.ie_bits & 0x00020000:
port = Cast(iep.ie_object, 'ipc_port_t')
if port.ip_messages.data.port.msgcount > 0:
out_string += GetPortSummary.header + GetPortSummary(port)
i = i + 1
return out_string
# EndMacro: showtaskbusyports
# Macro: showallbusyports
@lldb_command('showallbusyports')
def ShowAllBusyPorts(cmd_args=None):
""" Routine to print information about all receive rights on the system that
have enqueued messages.
"""
task_queue_head = kern.globals.tasks
for tsk in kern.tasks:
print GetTaskBusyPorts(tsk)
return
# EndMacro: showallbusyports
# Macro: showmqueue:
@lldb_command('showmqueue')
def ShowMQueue(cmd_args=None):
""" Routine that lists details about a given mqueue
Syntax: (lldb) showmqueue 0xaddr
"""
if not cmd_args:
print "Please specify the address of the ipc_mqueue whose details you want to print"
print ShowMQueue.__doc__
return
mqueue = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_mqueue *')
wq_type = mqueue.data.pset.set_queue.wqs_wait_queue.wq_type
if int(wq_type) == 3:
psetoff = getfieldoffset('struct ipc_pset', 'ips_messages')
pset = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(psetoff)
print GetPortSetSummary.header + GetPortSetSummary(kern.GetValueFromAddress(pset, 'struct ipc_pset *'))
if int(wq_type) == 2:
portoff = getfieldoffset('struct ipc_port', 'ip_messages')
port = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(portoff)
print GetPortSummary.header + GetPortSummary(kern.GetValueFromAddress(port, 'struct ipc_port *'))
# EndMacro: showmqueue
# Macro: showkmsg:
@lldb_command('showkmsg')
def ShowKMSG(cmd_args=[]):
""" Show detail information about a <ipc_kmsg_t> structure
Usage: (lldb) showkmsg <ipc_kmsg_t>
"""
if not cmd_args:
raise ArgumentError('Invalid arguments')
kmsg = kern.GetValueFromAddress(cmd_args[0], 'ipc_kmsg_t')
print GetKMsgSummary.header
print GetKMsgSummary(kmsg)
# EndMacro: showkmsg
# Macro: showpset
@lldb_command('showpset')
def ShowPSet(cmd_args=None):
""" Routine that prints details for a given ipc_pset *
Syntax: (lldb) showpset 0xaddr
"""
if not cmd_args:
print "Please specify the address of the pset whose details you want to print"
print ShowPSet.__doc__
return
print GetPortSetSummary.header + GetPortSetSummary(kern.GetValueFromAddress(cmd_args[0], 'ipc_pset *'))
# EndMacro: showpset
# IPC importance inheritance related macros.
@lldb_command('showalliits')
def ShowAllIITs(cmd_args=[], cmd_options={}):
""" Development only macro. Show list of all iits allocated in the system. """
try:
iit_queue = kern.globals.global_iit_alloc_queue
except ValueError:
print "This debug macro is only available in development or debug kernels"
return
print GetIPCImportantTaskSummary.header
for iit in IterateQueue(iit_queue, 'struct ipc_importance_task *', 'iit_allocation'):
print GetIPCImportantTaskSummary(iit)
return
@header("{: <18s} {: <3s} {: <18s} {: <20s} {: <18s} {: <8s}".format("ipc_imp_inherit", "don", "to_task", "proc_name", "from_elem", "depth"))
@lldb_type_summary(['ipc_importance_inherit *', 'ipc_importance_inherit_t'])
def GetIPCImportanceInheritSummary(iii):
""" describes iii object of type ipc_importance_inherit_t * """
out_str = ""
fmt = "{o: <#018x} {don: <3s} {o.iii_to_task.iit_task: <#018x} {task_name: <20s} {o.iii_from_elem: <#018x} {o.iii_depth: <#08x}"
donating_str = ""
if unsigned(iii.iii_donating):
donating_str = "DON"
taskname = GetProcNameForTask(iii.iii_to_task.iit_task)
if hasattr(iii.iii_to_task, 'iit_bsd_pid'):
taskname = "({:d}) {:s}".format(iii.iii_to_task.iit_bsd_pid, iii.iii_to_task.iit_procname)
out_str += fmt.format(o=iii, task_name = taskname, don=donating_str)
return out_str
@static_var('recursion_count', 0)
@header("{: <18s} {: <4s} {: <8s} {: <8s} {: <18s} {: <18s}".format("iie", "type", "refs", "made", "#kmsgs", "#inherits"))
@lldb_type_summary(['ipc_importance_elem *'])
def GetIPCImportanceElemSummary(iie):
""" describes an ipc_importance_elem * object """
if GetIPCImportanceElemSummary.recursion_count > 500:
GetIPCImportanceElemSummary.recursion_count = 0
return "Recursion of 500 reached"
out_str = ''
fmt = "{: <#018x} {: <4s} {: <8d} {: <8d} {: <#018x} {: <#018x}"
type_str = 'TASK'
if unsigned(iie.iie_bits) & 0x80000000:
type_str = "INH"
refs = unsigned(iie.iie_bits) & 0x7fffffff
made_refs = unsigned(iie.iie_made)
kmsg_count = sum(1 for i in IterateQueue(iie.iie_kmsgs, 'struct ipc_kmsg *', 'ikm_inheritance'))
inherit_count = sum(1 for i in IterateQueue(iie.iie_inherits, 'struct ipc_importance_inherit *', 'iii_inheritance'))
out_str += fmt.format(iie, type_str, refs, made_refs, kmsg_count, inherit_count)
if config['verbosity'] > vHUMAN:
if kmsg_count > 0:
out_str += "\n\t"+ GetKMsgSummary.header
for k in IterateQueue(iie.iie_kmsgs, 'struct ipc_kmsg *', 'ikm_inheritance'):
out_str += "\t" + "{: <#018x}".format(k.ikm_header.msgh_remote_port) + ' ' + GetKMsgSummary(k).lstrip()
out_str += "\n"
if inherit_count > 0:
out_str += "\n\t" + GetIPCImportanceInheritSummary.header + "\n"
for i in IterateQueue(iie.iie_inherits, 'struct ipc_importance_inherit *', 'iii_inheritance'):
out_str += "\t" + GetIPCImportanceInheritSummary(i) + "\n"
out_str += "\n"
if type_str == "INH":
iii = Cast(iie, 'struct ipc_importance_inherit *')
out_str += "Inherit from: " + GetIPCImportanceElemSummary(iii.iii_from_elem)
return out_str
@header("{: <18s} {: <18s} {: <20s}".format("iit", "task", "name"))
@lldb_type_summary(['ipc_importance_task *'])
def GetIPCImportantTaskSummary(iit):
""" iit is a ipc_importance_task value object.
"""
fmt = "{: <#018x} {: <#018x} {: <20s}"
out_str=''
pname = GetProcNameForTask(iit.iit_task)
if hasattr(iit, 'iit_bsd_pid'):
pname = "({:d}) {:s}".format(iit.iit_bsd_pid, iit.iit_procname)
out_str += fmt.format(iit, iit.iit_task, pname)
return out_str
@lldb_command('showallimportancetasks')
def ShowIPCImportanceTasks(cmd_args=[], cmd_options={}):
""" display a list of all tasks with ipc importance information.
Usage: (lldb) showallimportancetasks
Tip: add "-v" to see detailed information on each kmsg or inherit elems
"""
print ' ' + GetIPCImportantTaskSummary.header + ' ' + GetIPCImportanceElemSummary.header
for t in kern.tasks:
s = ""
if unsigned(t.task_imp_base):
s += ' ' + GetIPCImportantTaskSummary(t.task_imp_base)
s += ' ' + GetIPCImportanceElemSummary(addressof(t.task_imp_base.iit_elem))
print s
@lldb_command('showipcimportance', '')
def ShowIPCImportance(cmd_args=[], cmd_options={}):
""" Describe an importance from <ipc_importance_elem_t> argument.
Usage: (lldb) showimportance <ipc_importance_elem_t>
"""
if not cmd_args:
raise ArgumentError("Please provide valid argument")
elem = kern.GetValueFromAddress(cmd_args[0], 'ipc_importance_elem_t')
print GetIPCImportanceElemSummary.header
print GetIPCImportanceElemSummary(elem)
@header("{: <18s} {: <10s} {: <18s} {: <18s} {: <8s} {: <5s} {: <5s} {: <5s}".format("ivac", "refs", "port", "tbl", "tblsize", "index", "Grow", "freelist"))
@lldb_type_summary(['ipc_voucher_attr_control *', 'ipc_voucher_attr_control_t'])
def GetIPCVoucherAttrControlSummary(ivac):
""" describes a voucher attribute control settings """
out_str = ""
fmt = "{c: <#018x} {c.ivac_refs: <10d} {c.ivac_port: <#018x} {c.ivac_table: <#018x} {c.ivac_table_size: <8d} {c.ivac_key_index: <5d} {growing: <5s} {c.ivac_freelist: <5d}"
growing_str = ""
if unsigned(ivac) == 0:
return "{: <#018x}".format(ivac)
if unsigned(ivac.ivac_is_growing):
growing_str = "Y"
out_str += fmt.format(c=ivac, growing = growing_str)
return out_str
@lldb_command('showivac','')
def ShowIPCVoucherAttributeControl(cmd_args=[], cmd_options={}):
""" Show summary of voucher attribute contols.
Usage: (lldb) showivac <ipc_voucher_attr_control_t>
"""
if not cmd_args:
raise ArgumentError("Please provide correct arguments.")
ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')
print GetIPCVoucherAttrControlSummary.header
print GetIPCVoucherAttrControlSummary(ivac)
if config['verbosity'] > vHUMAN:
cur_entry_index = 0
last_entry_index = unsigned(ivac.ivac_table_size)
print "index " + GetIPCVoucherAttributeEntrySummary.header
while cur_entry_index < last_entry_index:
print "{: <5d} ".format(cur_entry_index) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[cur_entry_index]))
cur_entry_index += 1
@header("{: <18s} {: <30s} {: <30s} {: <30s} {: <30s} {: <30s}".format("ivam", "get_value_fn", "extract_fn", "release_value_fn", "command_fn", "release_fn"))
@lldb_type_summary(['ipc_voucher_attr_manager *', 'ipc_voucher_attr_manager_t'])
def GetIPCVoucherAttrManagerSummary(ivam):
""" describes a voucher attribute manager settings """
out_str = ""
fmt = "{: <#018x} {: <30s} {: <30s} {: <30s} {: <30s} {: <30s}"
if unsigned(ivam) == 0 :
return "{: <#018x}".format(ivam)
get_value_fn = kern.Symbolicate(unsigned(ivam.ivam_get_value))
extract_fn = kern.Symbolicate(unsigned(ivam.ivam_extract_content))
release_value_fn = kern.Symbolicate(unsigned(ivam.ivam_release_value))
command_fn = kern.Symbolicate(unsigned(ivam.ivam_command))
release_fn = kern.Symbolicate(unsigned(ivam.ivam_release))
out_str += fmt.format(ivam, get_value_fn, extract_fn, release_value_fn, command_fn, release_fn)
return out_str
@header("{: <18s} {: <10s} {:s} {:s}".format("ivgte", "key", GetIPCVoucherAttrControlSummary.header.strip(), GetIPCVoucherAttrManagerSummary.header.strip()))
@lldb_type_summary(['ipc_voucher_global_table_element *', 'ipc_voucher_global_table_element_t'])
def GetIPCVoucherGlobalTableElementSummary(ivgte):
""" describes a ipc_voucher_global_table_element object """
out_str = ""
fmt = "{g: <#018x} {g.ivgte_key: <10d} {ctrl_s:s} {mgr_s:s}"
out_str += fmt.format(g=ivgte, ctrl_s=GetIPCVoucherAttrControlSummary(ivgte.ivgte_control), mgr_s=GetIPCVoucherAttrManagerSummary(ivgte.ivgte_manager))
return out_str
@lldb_command('showglobalvouchertable', '')
def ShowGlobalVoucherTable(cmd_args=[], cmd_options={}):
""" show detailed information of all voucher attribute managers registered with vouchers system
Usage: (lldb) showglobalvouchertable
"""
entry_size = sizeof(kern.globals.iv_global_table[0])
elems = sizeof(kern.globals.iv_global_table) / entry_size
print GetIPCVoucherGlobalTableElementSummary.header
for i in range(elems):
elt = addressof(kern.globals.iv_global_table[i])
print GetIPCVoucherGlobalTableElementSummary(elt)
# Type summaries for Bag of Bits.
@lldb_type_summary(['user_data_value_element', 'user_data_element_t'])
@header("{0: <20s} {1: <16s} {2: <20s} {3: <20s} {4: <16s} {5: <20s}".format("user_data_ve", "maderefs", "checksum", "hash value", "size", "data"))
def GetBagofBitsElementSummary(data_element):
""" Summarizes the Bag of Bits element
params: data_element = value of the object of type user_data_value_element_t
returns: String with summary of the type.
"""
format_str = "{0: <#020x} {1: <16d} {2: <#020x} {3: <#020x} {4: <16d}"
out_string = format_str.format(data_element, unsigned(data_element.e_made), data_element.e_sum, data_element.e_hash, unsigned(data_element.e_size))
out_string += " 0x"
for i in range(0, (unsigned(data_element.e_size) - 1)):
out_string += "{:02x}".format(int(data_element.e_data[i]))
return out_string
def GetIPCHandleSummary(handle_ptr):
""" converts a handle value inside a voucher attribute table to ipc element and returns appropriate summary.
params: handle_ptr - uint64 number stored in handle of voucher.
returns: str - string summary of the element held in internal structure
"""
elem = kern.GetValueFromAddress(handle_ptr, 'ipc_importance_elem_t')
if elem.iie_bits & 0x80000000 :
iie = Cast(elem, 'struct ipc_importance_inherit *')
return GetIPCImportanceInheritSummary(iie)
else:
iit = Cast(elem, 'struct ipc_importance_task *')
return GetIPCImportantTaskSummary(iit)
def GetATMHandleSummary(handle_ptr):
""" Convert a handle value to atm value and returns corresponding summary of its fields.
params: handle_ptr - uint64 number stored in handle of voucher
returns: str - summary of atm value
"""
elem = kern.GetValueFromAddress(handle_ptr, 'atm_value *')
return GetATMValueSummary(elem)
def GetBankHandleSummary(handle_ptr):
""" converts a handle value inside a voucher attribute table to bank element and returns appropriate summary.
params: handle_ptr - uint64 number stored in handle of voucher.
returns: str - summary of bank element
"""
elem = kern.GetValueFromAddress(handle_ptr, 'bank_element_t')
if elem.be_type & 1 :
ba = Cast(elem, 'struct bank_account *')
return GetBankAccountSummary(ba)
else:
bt = Cast(elem, 'struct bank_task *')
return GetBankTaskSummary(bt)
def GetBagofBitsHandleSummary(handle_ptr):
""" Convert a handle value to bag of bits value and returns corresponding summary of its fields.
params: handle_ptr - uint64 number stored in handle of voucher
returns: str - summary of bag of bits element
"""
elem = kern.GetValueFromAddress(handle_ptr, 'user_data_element_t')
return GetBagofBitsElementSummary(elem)
@static_var('attr_managers',{1: GetATMHandleSummary, 2: GetIPCHandleSummary, 3: GetBankHandleSummary, 7: GetBagofBitsHandleSummary})
def GetHandleSummaryForKey(handle_ptr, key_num):
""" Get a summary of handle pointer from the voucher attribute manager.
For example key 1 -> ATM and it puts atm_value_t in the handle. So summary of it would be atm value and refs etc.
key 2 -> ipc and it puts either ipc_importance_inherit_t or ipc_important_task_t.
key 3 -> Bank and it puts either bank_task_t or bank_account_t.
key 7 -> Bag of Bits and it puts user_data_element_t in handle. So summary of it would be Bag of Bits content and refs etc.
"""
key_num = int(key_num)
if key_num not in GetHandleSummaryForKey.attr_managers:
return "Unknown key %d" % key_num
return GetHandleSummaryForKey.attr_managers[key_num](handle_ptr)
@header("{: <18s} {: <18s} {: <10s} {: <4s} {: <18s} {: <18s}".format("ivace", "value_handle", "#refs", "rel?", "maderefs", "next_layer"))
@lldb_type_summary(['ivac_entry *', 'ivac_entry_t'])
def GetIPCVoucherAttributeEntrySummary(ivace, manager_key_num = 0):
""" Get summary for voucher attribute entry.
"""
out_str = ""
fmt = "{e: <#018x} {e.ivace_value: <#018x} {e.ivace_refs: <10d} {release: <4s} {made_refs: <18s} {next_layer: <18s}"
release_str = ""
free_str = ""
made_refs = ""
next_layer = ""
if unsigned(ivace.ivace_releasing):
release_str = "Y"
if unsigned(ivace.ivace_free):
free_str = 'F'
if unsigned(ivace.ivace_layered):
next_layer = "{: <#018x}".format(ivace.ivace_u.ivaceu_layer)
else:
made_refs = "{: <18d}".format(ivace.ivace_u.ivaceu_made)
out_str += fmt.format(e=ivace, release=release_str, made_refs=made_refs, next_layer=next_layer)
if config['verbosity'] > vHUMAN and manager_key_num > 0:
out_str += " " + GetHandleSummaryForKey(unsigned(ivace.ivace_value), manager_key_num)
if config['verbosity'] > vHUMAN :
out_str += ' {: <2s} {: <4d} {: <4d}'.format(free_str, ivace.ivace_next, ivace.ivace_index)
return out_str
@lldb_command('showivacfreelist','')
def ShowIVACFreeList(cmd_args=[], cmd_options={}):
""" Walk the free list and print every entry in the list.
usage: (lldb) showivacfreelist <ipc_voucher_attr_control_t>
"""
if not cmd_args:
raise ArgumentError('Please provide <ipc_voucher_attr_control_t>')
ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')
print GetIPCVoucherAttrControlSummary.header
print GetIPCVoucherAttrControlSummary(ivac)
if unsigned(ivac.ivac_freelist) == 0:
print "ivac table is full"
return
print "index " + GetIPCVoucherAttributeEntrySummary.header
next_free = unsigned(ivac.ivac_freelist)
while next_free != 0:
print "{: <5d} ".format(next_free) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[next_free]))
next_free = unsigned(ivac.ivac_table[next_free].ivace_next)
@header('{: <18s} {: <8s} {: <18s} {: <18s} {: <18s} {: <18s} {: <18s}'.format("ipc_voucher", "refs", "checksum", "hash", "tbl_size", "table", "voucher_port"))
@lldb_type_summary(['ipc_voucher *', 'ipc_voucher_t'])
def GetIPCVoucherSummary(voucher, show_entries=False):
""" describe a voucher from its ipc_voucher * object """
out_str = ""
fmt = "{v: <#018x} {v.iv_refs: <8d} {v.iv_sum: <#018x} {v.iv_hash: <#018x} {v.iv_table_size: <#018x} {v.iv_table: <#018x} {v.iv_port: <#018x}"
out_str += fmt.format(v = voucher)
entries_str = ''
if show_entries or config['verbosity'] > vHUMAN:
elems = unsigned(voucher.iv_table_size)
entries_header_str = "\n\t" + "{: <5s} {: <3s} {: <16s} {: <30s}".format("index", "key", "value_index", "manager") + " " + GetIPCVoucherAttributeEntrySummary.header
fmt = "{: <5d} {: <3d} {: <16d} {: <30s}"
for i in range(elems):
voucher_entry_index = unsigned(voucher.iv_inline_table[i])
if voucher_entry_index:
s = fmt.format(i, GetVoucherManagerKeyForIndex(i), voucher_entry_index, GetVoucherAttributeManagerNameForIndex(i))
e = GetVoucherValueHandleFromVoucherForIndex(voucher, i)
if e is not None:
s += " " + GetIPCVoucherAttributeEntrySummary(addressof(e), GetVoucherManagerKeyForIndex(i) )
if entries_header_str :
entries_str = entries_header_str
entries_header_str = ''
entries_str += "\n\t" + s
if not entries_header_str:
entries_str += "\n\t"
out_str += entries_str
return out_str
def GetVoucherManagerKeyForIndex(idx):
""" Returns key number for index based on global table. Will raise index error if value is incorrect
"""
return unsigned(kern.globals.iv_global_table[idx].ivgte_key)
def GetVoucherAttributeManagerForKey(k):
""" Walks through the iv_global_table and finds the attribute manager name
params: k - int key number of the manager
return: cvalue - the attribute manager object.
None - if not found
"""
retval = None
entry_size = sizeof(kern.globals.iv_global_table[0])
elems = sizeof(kern.globals.iv_global_table) / entry_size
for i in range(elems):
elt = addressof(kern.globals.iv_global_table[i])
if k == unsigned(elt.ivgte_key):
retval = elt.ivgte_manager
break
return retval
def GetVoucherAttributeControllerForKey(k):
""" Walks through the iv_global_table and finds the attribute controller
params: k - int key number of the manager
return: cvalue - the attribute controller object.
None - if not found
"""
retval = None
entry_size = sizeof(kern.globals.iv_global_table[0])
elems = sizeof(kern.globals.iv_global_table) / entry_size
for i in range(elems):
elt = addressof(kern.globals.iv_global_table[i])
if k == unsigned(elt.ivgte_key):
retval = elt.ivgte_control
break
return retval
def GetVoucherAttributeManagerName(ivam):
""" find the name of the ivam object
param: ivam - cvalue object of type ipc_voucher_attr_manager_t
returns: str - name of the manager
"""
return kern.Symbolicate(unsigned(ivam))
def GetVoucherAttributeManagerNameForIndex(idx):
""" get voucher attribute manager name for index
return: str - name of the attribute manager object
"""
return GetVoucherAttributeManagerName(GetVoucherAttributeManagerForKey(GetVoucherManagerKeyForIndex(idx)))
def GetVoucherValueHandleFromVoucherForIndex(voucher, idx):
""" traverse the voucher attrs and get value_handle in the voucher attr controls table
params:
voucher - cvalue object of type ipc_voucher_t
idx - int index in the entries for which you wish to get actual handle for
returns: cvalue object of type ivac_entry_t
None if no handle found.
"""
manager_key = GetVoucherManagerKeyForIndex(idx)
voucher_num_elems = unsigned(voucher.iv_table_size)
if idx >= voucher_num_elems:
debuglog("idx %d is out of range max: %d" % (idx, voucher_num_elems))
return None
voucher_entry_value = unsigned(voucher.iv_inline_table[idx])
debuglog("manager_key %d" % manager_key)
ivac = GetVoucherAttributeControllerForKey(manager_key)
if ivac is None or unsigned(ivac) == 0:
debuglog("No voucher attribute controller for idx %d" % idx)
return None
ivac = kern.GetValueFromAddress(unsigned(ivac), 'ipc_voucher_attr_control_t') # ??? No idea why lldb does not addressof directly
ivace_table = ivac.ivac_table
if voucher_entry_value >= unsigned(ivac.ivac_table_size):
print "Failed to get ivace for value %d in table of size %d" % (voucher_entry_value, unsigned(ivac.ivac_table_size))
return None
return ivace_table[voucher_entry_value]
@lldb_command('showallvouchers')
def ShowAllVouchers(cmd_args=[], cmd_options={}):
""" Display a list of all vouchers in the global voucher hash table
Usage: (lldb) showallvouchers
"""
iv_hash_table = kern.globals.ivht_bucket
num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])
print GetIPCVoucherSummary.header
for i in range(num_buckets):
for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):
print GetIPCVoucherSummary(v)
@lldb_command('showvoucher', '')
def ShowVoucher(cmd_args=[], cmd_options={}):
""" Describe a voucher from <ipc_voucher_t> argument.
Usage: (lldb) showvoucher <ipc_voucher_t>
"""
if not cmd_args:
raise ArgumentError("Please provide valid argument")
voucher = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_t')
print GetIPCVoucherSummary.header
print GetIPCVoucherSummary(voucher, show_entries=True)
| 42.618285
| 186
| 0.648835
|
be015dcb455504cf55a34eb613263c8aaf4a86f5
| 56,787
|
py
|
Python
|
jp.atcoder/abc011/abc011_4/16937650.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc011/abc011_4/16937650.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc011/abc011_4/16937650.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import math
import string
import sys
from bisect import bisect_left as bi_l
from bisect import bisect_right as bi_r
from collections import Counter, defaultdict, deque
from heapq import heappop, heappush
from itertools import accumulate, combinations, product
# from numba import jit
import networkx as nx
import numpy as np
from scipy import optimize
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import maximum_flow, shortest_path
inf = float("inf")
from functools import lru_cache, reduce
sys.setrecursionlimit(10**6)
MOD = 10**9 + 7
# MOD = 998244353
class NumberTheory:
def __init__(self, n=2 * 10**6, numpy=True):
self.n = n
self.np_flg = numpy
self.is_prime_number, self.prime_numbers = self.sieve_of_eratosthenes(
n
)
def sieve_of_eratosthenes(self, n):
if self.np_flg:
sieve = np.ones(n + 1, dtype=np.int64)
sieve[:2] = 0
for i in range(2, int(n**0.5) + 1):
if sieve[i]:
sieve[i * 2 :: i] = 0
prime_numbers = np.flatnonzero(sieve)
else:
sieve = [1] * (n + 1)
sieve[0] = sieve[1] = 0
for i in range(2, int(n**0.5) + 1):
if not sieve[i]:
continue
for j in range(i * 2, n + 1, i):
sieve[j] = 0
prime_numbers = [i for i in range(2, n + 1) if sieve[i]]
return sieve, prime_numbers
def prime_factorize(self, n):
res = dict()
if n < 2:
return res
border = int(n**0.5)
for p in self.prime_numbers:
if p > border:
break
while n % p == 0:
res[p] = res.get(p, 0) + 1
n //= p
if n == 1:
return res
res[n] = 1
return res
def prime_factorize_factorial(self, n):
res = dict()
for i in range(2, n + 1):
for p, c in self.prime_factorize(i).items():
res[p] = res.get(p, 0) + c
return res
@classmethod
def gcd(cls, a, b):
return cls.gcd(b, a % b) if b else abs(a)
@classmethod
def lcm(cls, a, b):
return abs(a // cls.gcd(a, b) * b)
@staticmethod
def find_divisors(n):
divisors = []
for i in range(1, int(n**0.5) + 1):
if n % i:
continue
divisors.append(i)
j = n // i
if j != i:
divisors.append(j)
return divisors
@staticmethod
def base_convert(n, b):
if not n:
return [0]
res = []
while n:
n, r = divmod(n, b)
if r < 0:
n += 1
r -= b
res.append(r)
return res
class UnionFind:
def __init__(self, n=10**6):
self.root = list(range(n))
self.height = [0] * n
self.size = [1] * n
def find_root(self, u):
if self.root[u] == u:
return u
self.root[u] = self.find_root(self.root[u])
return self.root[u]
def unite(self, u, v):
ru = self.find_root(u)
rv = self.find_root(v)
if ru == rv:
return
hu = self.height[ru]
hv = self.height[rv]
if hu >= hv:
self.root[rv] = ru
self.size[ru] += self.size[rv]
self.height[ru] = max(hu, hv + 1)
else:
self.root[ru] = rv
self.size[rv] += self.size[ru]
class Combinatorics:
def __init__(self, N=10**9, n=10**6, mod=10**9 + 7, numpy=True):
self.mod = mod
self.nCr = dict()
self.np_flg = numpy
self.make_mod_tables(N, n)
sys.setrecursionlimit(10**6)
def choose(self, n, r, mod=None): # no mod, or mod ≠ prime
if r > n or r < 0:
return 0
if r == 0:
return 1
if (n, r) in self.nCr:
return self.nCr[(n, r)]
if not mod:
self.nCr[(n, r)] = self.choose(n - 1, r) + self.choose(
n - 1, r - 1
)
else:
self.nCr[(n, r)] = (
self.choose(n - 1, r, mod) + self.choose(n - 1, r - 1, mod)
) % mod
return self.nCr[(n, r)]
def cumprod(self, a):
p = self.mod
l = len(a)
sql = int(np.sqrt(l) + 1)
a = np.resize(a, sql**2).reshape(sql, sql)
for i in range(sql - 1):
a[:, i + 1] *= a[:, i]
a[:, i + 1] %= p
for i in range(sql - 1):
a[i + 1] *= a[i, -1]
a[i + 1] %= p
return np.ravel(a)[:l]
def make_mod_tables(self, N, n):
p = self.mod
if self.np_flg:
fac = np.arange(n + 1)
fac[0] = 1
fac = self.cumprod(fac)
ifac = np.arange(n + 1, 0, -1)
ifac[0] = pow(int(fac[-1]), p - 2, p)
ifac = self.cumprod(ifac)[n::-1]
n_choose = np.arange(N + 1, N - n, -1)
n_choose[0] = 1
n_choose[1:] = self.cumprod(n_choose[1:]) * ifac[1 : n + 1] % p
else:
fac = [None] * (n + 1)
fac[0] = 1
for i in range(n):
fac[i + 1] = fac[i] * (i + 1) % p
ifac = [None] * (n + 1)
ifac[n] = pow(fac[n], p - 2, p)
for i in range(n, 0, -1):
ifac[i - 1] = ifac[i] * i % p
n_choose = [None] * (n + 1)
n_choose[0] = 1
for i in range(n):
n_choose[i + 1] = n_choose[i] * (N - i) % p
for i in range(n + 1):
n_choose[i] = n_choose[i] * ifac[i] % p
self.fac, self.ifac, self.mod_n_choose = fac, ifac, n_choose
def mod_choose(self, n, r):
return (
self.fac[n] * self.ifac[r] % self.mod * self.ifac[n - r] % self.mod
)
def z_algorithm(s):
n = len(s)
a = [0] * n
a[0] = n
l = r = -1
for i in range(1, n):
if r >= i:
a[i] = min(a[i - l], r - i)
while i + a[i] < n and s[i + a[i]] == s[a[i]]:
a[i] += 1
if i + a[i] >= r:
l, r = i, i + a[i]
return a
class GeometryTopology:
class GraphTheory:
class MaximumFlow:
@staticmethod
def dinic(graph, s, t): # s: source, t: sink
def bfs():
level = defaultdict(int)
level[s] = 0
q = deque([s])
while q:
u = q.popleft()
for v, cap in graph[u].items():
if cap == 0 or v in level:
continue
level[v] = level[u] + 1
q.append(v)
return level
def flow_to_sink(u, flow_in):
if u == t:
return flow_in
flow = 0
for v, cap in graph[u].items():
if cap == 0 or level[v] <= level[u]:
continue
f = flow_to_sink(v, min(flow_in, cap))
if not f:
continue
graph[u][v] -= f
graph[v][u] += f
flow_in -= f
flow += f
return flow
flow = 0
while True:
level = bfs()
if not t in level:
return flow
flow += flow_to_sink(s, float("inf"))
@staticmethod
def ford_fulkerson():
pass
@staticmethod
def push_relabel():
pass
class ShortestPath:
pass
class AtCoder:
class ABC001:
@staticmethod
def a():
h1, h2 = map(int, sys.stdin.read().split())
print(h1 - h2)
class ABC002:
@staticmethod
def a():
print(max(map(int, sys.stdin.readline().split())))
@staticmethod
def b():
vowels = set("aeiou")
print(
"".join(
[
c
for c in sys.stdin.readline().rstrip()
if c not in vowels
]
)
)
@staticmethod
def c():
def triangle_area(x0, y0, x1, y1, x2, y2):
x1 -= x0
x2 -= x0
y1 -= y0
y2 -= y0
return abs(x1 * y2 - x2 * y1) / 2
print(triangle_area(*map(int, sys.stdin.readline().split())))
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
edges = set(
(x - 1, y - 1)
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2)
)
print(
max(
len(s)
for i in range(1, 1 << n)
for s in [[j for j in range(n) if i >> j & 1]]
if all((x, y) in edges for x, y in combinations(s, 2))
)
)
@staticmethod
def d_2():
n, m = map(int, sys.stdin.readline().split())
relations = [1 << i for i in range(n)]
for x, y in zip(*[map(int, sys.stdin.read().split())] * 2):
x -= 1
y -= 1
relations[x] |= 1 << y
relations[y] |= 1 << x
res = 0
for i in range(1 << n):
cnt = 0
s = 0
t = (1 << n) - 1
for j in range(n):
if i >> j & 1:
s |= 1 << j
t &= relations[j]
cnt += 1
if t & s == s:
res = max(res, cnt)
print(res)
class ABC003:
@staticmethod
def a():
print((int(sys.stdin.readline().rstrip()) + 1) * 5000)
@staticmethod
def b():
atcoder = set("atcoder")
s, t = sys.stdin.read().split()
print(
all(
s[i] == t[i]
or s[i] == "@"
and t[i] in atcoder
or t[i] == "@"
and s[i] in atcoder
for i in range(len(s))
)
and "You can win"
or "You will lose"
)
@staticmethod
def c():
n, k, *r = map(int, sys.stdin.read().split())
print(reduce(lambda x, y: (x + y) / 2, sorted(r)[-k:], 0))
class ABC004:
@staticmethod
def a():
print(int(sys.stdin.readline().rstrip()) * 2)
@staticmethod
def b():
for l in [sys.stdin.readline().rstrip() for _ in range(4)][::-1]:
print(l[::-1])
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip()) % 30
res = list(range(1, 7))
for i in range(n):
i %= 5
res[i], res[i + 1] = res[i + 1], res[i]
print(*res, sep="")
class ABC005:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print(y // x)
@staticmethod
def b():
n, *t = map(int, sys.stdin.read().split())
print(min(t))
@staticmethod
def c():
t = int(sys.stdin.readline().rstrip())
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
m = int(sys.stdin.readline().rstrip())
b = [int(x) for x in sys.stdin.readline().split()]
i = 0
for p in b:
if i == n:
print("no")
return
while p - a[i] > t:
i += 1
if i == n:
print("no")
return
if a[i] > p:
print("no")
return
i += 1
print("yes")
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
d = np.array(
[sys.stdin.readline().split() for _ in range(n)], np.int64
)
s = d.cumsum(axis=0).cumsum(axis=1)
s = np.pad(s, 1)
max_del = np.zeros((n + 1, n + 1), dtype=np.int64)
for y in range(1, n + 1):
for x in range(1, n + 1):
max_del[y, x] = np.amax(
s[y : n + 1, x : n + 1]
- s[0 : n - y + 1, x : n + 1]
- s[y : n + 1, 0 : n - x + 1]
+ s[0 : n - y + 1, 0 : n - x + 1]
)
res = np.arange(n**2 + 1)[:, None]
i = np.arange(1, n + 1)
res = max_del[i, np.minimum(res // i, n)].max(axis=1)
q = int(sys.stdin.readline().rstrip())
p = np.array(sys.stdin.read().split(), dtype=np.int64)
print(*res[p], sep="\n")
class ABC006:
@staticmethod
def a():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
@staticmethod
def b():
mod = 10007
t = [0, 0, 1]
for _ in range(1001001):
t.append(t[-1] + t[-2] + t[-3])
t[-1] %= mod
n = int(sys.stdin.readline().rstrip())
print(t[n - 1])
@staticmethod
def c():
n, m = map(int, sys.stdin.readline().split())
cnt = [0, 0, 0]
if m == 1:
cnt = [-1, -1, -1]
else:
if m & 1:
m -= 3
cnt[1] += 1
n -= 1
cnt[2] = m // 2 - n
cnt[0] = n - cnt[2]
if cnt[0] < 0 or cnt[1] < 0 or cnt[2] < 0:
print(-1, -1, -1)
else:
print(*cnt, sep=" ")
@staticmethod
def d():
n, *c = map(int, sys.stdin.read().split())
lis = [inf] * n
for x in c:
lis[bi_l(lis, x)] = x
print(n - bi_l(lis, inf))
class ABC007:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n - 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
if s == "a":
print(-1)
else:
print("a")
@staticmethod
def c():
r, c = map(int, sys.stdin.readline().split())
sy, sx = map(int, sys.stdin.readline().split())
gy, gx = map(int, sys.stdin.readline().split())
sy -= 1
sx -= 1
gy -= 1
gx -= 1
maze = [sys.stdin.readline().rstrip() for _ in range(r)]
queue = deque([(sy, sx)])
dist = np.full((r, c), np.inf)
dist[sy, sx] = 0
while queue:
y, x = queue.popleft()
for i, j in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
i += y
j += x
if maze[i][j] == "#" or dist[i, j] != np.inf:
continue
dist[i, j] = dist[y, x] + 1
queue.append((i, j))
print(int(dist[gy, gx]))
@staticmethod
def d():
ng = set([4, 9])
def count(d):
return d if d <= 4 else d - 1
def f(n):
x = [int(d) for d in str(n)]
flg = True
dp = 0
for d in x:
dp = dp * 8 + flg * count(d)
if d in ng:
flg = False
return n - (dp + flg)
a, b = map(int, sys.stdin.readline().split())
print(f(b) - f(a - 1))
class ABC008:
@staticmethod
def a():
s, t = map(int, sys.stdin.readline().split())
print(t - s + 1)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
res = defaultdict(int)
for name in s:
res[name] += 1
print(sorted(res.items(), key=lambda x: x[1])[-1][0])
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
c = n - np.count_nonzero(a[:, None] % a, axis=1)
print(np.sum((c + 1) // 2 / c))
@staticmethod
def d():
w, h, n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*([iter(xy)] * 2))
@lru_cache(maxsize=None)
def count(x1, y1, x2, y2):
res = 0
for x, y in xy:
if not (x1 <= x <= x2 and y1 <= y <= y2):
continue
cnt = (x2 - x1) + (y2 - y1) + 1
cnt += count(x1, y1, x - 1, y - 1)
cnt += count(x1, y + 1, x - 1, y2)
cnt += count(x + 1, y1, x2, y - 1)
cnt += count(x + 1, y + 1, x2, y2)
res = max(res, cnt)
return res
print(count(1, 1, w, h))
class ABC009:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((n + 1) // 2)
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
print(sorted(set(a))[-2])
@staticmethod
def c():
n, k = map(int, sys.stdin.readline().split())
s = list(sys.stdin.readline().rstrip())
cost = [1] * n
r = k
for i in range(n - 1):
q = []
for j in range(i + 1, n):
if s[j] < s[i] and cost[i] + cost[j] <= r:
heappush(q, (s[j], cost[i] + cost[j], -j))
if not q:
continue
_, c, j = heappop(q)
j = -j
s[i], s[j] = s[j], s[i]
r -= c
cost[i] = cost[j] = 0
print("".join(s))
@staticmethod
def d():
k, m = map(int, sys.stdin.readline().split())
a = np.array([int(x) for x in sys.stdin.readline().split()])
c = np.array([int(x) for x in sys.stdin.readline().split()])
mask = (1 << 32) - 1
d = np.eye(k, k, -1, dtype=np.uint32) * mask
d[0] = c
def bitwise_dot(a, b):
return np.bitwise_xor.reduce(
a[:, None, :] & b.T[None, :, :], axis=-1
)
def bitwise_mat_pow(a, n):
if n == 0:
return np.eye(k, dtype=np.uint32) * mask
res = bitwise_mat_pow(a, n // 2)
res = bitwise_dot(res, res)
return bitwise_dot(res, a) if n & 1 else res
if m <= k:
print(a[m - 1])
return
print(
bitwise_dot(bitwise_mat_pow(d, m - k), a[::-1].reshape(-1, 1))[
0
].item()
)
class ABC010:
@staticmethod
def a():
print(sys.stdin.readline().rstrip() + "pp")
@staticmethod
def b():
n, *a = map(int, sys.stdin.read().split())
tot = 0
for x in a:
c = 0
while x % 2 == 0 or x % 3 == 2:
x -= 1
c += 1
tot += c
print(tot)
@staticmethod
def c():
sx, sy, gx, gy, t, v, n, *xy = map(int, sys.stdin.read().split())
x, y = np.array(xy).reshape(-1, 2).T
def dist(x1, y1, x2, y2):
return np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ans = (
"YES"
if (dist(sx, sy, x, y) + dist(x, y, gx, gy) <= v * t).any()
else "NO"
)
print(ans)
@staticmethod
def d_1():
n, g, e = map(int, sys.stdin.readline().split())
p = [int(x) for x in sys.stdin.readline().split()]
x, y = [], []
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
x.append(a)
y.append(b)
x.append(b)
y.append(a)
for a in p:
x.append(a)
y.append(n)
if not x:
print(0)
return
c = [1] * len(x)
min_cut = maximum_flow(
csr_matrix((c, (x, y)), (n + 1, n + 1)), source=0, sink=n
).flow_value
print(min_cut)
@staticmethod
def d_2():
n, g, e = map(int, sys.stdin.readline().split())
if g + e == 0:
print(0)
return
graph = nx.DiGraph()
graph.add_nodes_from(range(n + 1))
for p in [int(x) for x in sys.stdin.readline().split()]:
graph.add_edge(p, n, capacity=1)
for _ in range(e):
a, b = map(int, sys.stdin.readline().split())
graph.add_edge(a, b, capacity=1)
graph.add_edge(b, a, capacity=1)
print(nx.minimum_cut_value(graph, 0, n))
@staticmethod
def d_3():
n, g, e = map(int, sys.stdin.readline().split())
if g + e == 0:
print(0)
return
graph = defaultdict(dict)
for p in [int(x) for x in sys.stdin.readline().split()]:
graph[p][n] = 1
graph[n][p] = 0
for a, b in zip(*[map(int, sys.stdin.read().split())] * 2):
graph[a][b] = 1
graph[b][a] = 1
print(GeometryTopology.GraphTheory.MaximumFlow.dinic(graph, 0, n))
class ABC011:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print(n % 12 + 1)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
print(s[0].upper() + s[1:].lower())
@staticmethod
def c():
n, *ng = map(int, sys.stdin.read().split())
ng = set(ng)
if n in ng:
print("NO")
else:
r = 100
while n > 0:
if r == 0:
print("NO")
return
for i in range(3, 0, -1):
if (n - i) in ng:
continue
n -= i
r -= 1
break
else:
print("NO")
return
print("YES")
@staticmethod
def d():
n, d, x, y = map(int, sys.stdin.read().split())
x, y = abs(x), abs(y)
if x % d or y % d:
print(0)
return
x, y = x // d, y // d
r = n - (x + y)
if r < 0 or r & 1:
print(0)
return
combinatorics = Combinatorics()
choose = combinatorics.choose
res = 0
half_p = pow(1 / 2, n)
for d in range(r // 2 + 1): # 0 <= d <= r//2, south
south, north = d, y + d
west = (r - 2 * d) // 2
res += (
half_p
* choose(n, south)
* choose(n - south, north)
* choose(n - south - north, west)
* half_p
)
print(res)
class ABC032:
@staticmethod
def a():
a, b, n = map(int, sys.stdin.read().split())
l = NumberTheory.lcm(a, b)
print((n + l - 1) // l * l)
@staticmethod
def b():
s, k = sys.stdin.read().split()
k = int(k)
res = set()
for i in range(len(s) - k + 1):
res.add(s[i : i + k])
print(len(res))
@staticmethod
def c():
n, k, *s = map(int, sys.stdin.read().split())
if 0 in s:
print(n)
return
s += [inf]
res = 0
l = r = 0
tmp = 1
while r <= n:
tmp *= s[r]
while tmp > k:
res = max(res, r - l)
tmp //= s[l]
l += 1
r += 1
print(res)
class ABC033:
@staticmethod
def a():
n = set(sys.stdin.readline().rstrip())
print("SAME" if len(n) == 1 else "DIFFERENT")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = dict()
for _ in range(n):
s, p = sys.stdin.readline().split()
p = int(p)
res[s] = p
tot = sum(res.values())
for s, p in res.items():
if p > tot / 2:
print(s)
return
print("atcoder")
@staticmethod
def c():
s = sys.stdin.readline().rstrip()
res = sum(not "0" in f for f in s.split("+"))
print(res)
class ABC034:
@staticmethod
def a():
x, y = map(int, sys.stdin.readline().split())
print("Better" if y > x else "Worse")
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
print(n + 1 if n & 1 else n - 1)
@staticmethod
def c():
h, w = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, numpy=True, mod=MOD)
print(combinatorics.mod_choose(h + w - 2, h - 1))
@staticmethod
def d():
n, k, *wp = map(int, sys.stdin.read().split())
w, p = np.array(wp).reshape(-1, 2).T
def f(x):
return np.sort(w * (p - x))[-k:].sum()
print(optimize.bisect(f, 0, 100))
class ABC035:
@staticmethod
def a():
w, h = map(int, sys.stdin.readline().split())
print("4:3" if 4 * h == 3 * w else "16:9")
@staticmethod
def b():
s, t = sys.stdin.read().split()
y = 0
x = 0
z = 0
for c in s:
if c == "?":
z += 1
elif c == "L":
x -= 1
elif c == "R":
x += 1
elif c == "D":
y -= 1
elif c == "U":
y += 1
d = abs(y) + abs(x)
if t == "1":
print(d + z)
else:
print(max(d - z, (d - z) & 1))
@staticmethod
def c():
n, q, *lr = map(int, sys.stdin.read().split())
l, r = np.array(lr).reshape(q, 2).T
res = np.zeros(n + 1, dtype=int)
np.add.at(res, l - 1, 1)
np.subtract.at(res, r, 1)
np.cumsum(res, out=res)
res = res & 1
print("".join(map(str, res[:-1])))
@staticmethod
def d():
n, m, t = map(int, sys.stdin.readline().split())
point = np.array(sys.stdin.readline().split(), dtype=int)
a, b, c = (
np.array(sys.stdin.read().split(), dtype=np.int64)
.reshape(m, 3)
.T
)
a -= 1
b -= 1
d_1 = shortest_path(
csr_matrix((c, (a, b)), (n, n)),
method="D",
directed=True,
indices=0,
)
d_2 = shortest_path(
csr_matrix((c, (b, a)), (n, n)),
method="D",
directed=True,
indices=0,
)
print(int(np.amax((t - (d_1 + d_2)) * point)))
class ABC036:
@staticmethod
def a():
a, b = map(int, sys.stdin.readline().split())
print((b + a - 1) // a)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
n = int(n)
for j in range(n):
row = ""
for i in range(n - 1, -1, -1):
row += s[i][j]
print(row)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
b = [None] * n
prev = None
j = -1
for i, x in sorted(enumerate(a), key=lambda x: x[1]):
if x != prev:
j += 1
b[i] = j
prev = x
print(*b, sep="\n")
@staticmethod
def d():
n, *ab = map(int, sys.stdin.read().split())
edges = [[] for _ in range(n)]
for a, b in zip(*[iter(ab)] * 2):
a -= 1
b -= 1
edges[a].append(b)
edges[b].append(a)
parent = [None] * n
def count(u):
black, white = 1, 1
for v in edges[u]:
if v == parent[u]:
continue
parent[v] = u
b, w = count(v)
black *= w
black %= MOD
white *= (b + w) % MOD
white %= MOD
return black, white
print(sum(count(0)) % MOD)
class ABC037:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print(c // min(a, b))
@staticmethod
def b():
n, q, *lrt = map(int, sys.stdin.read().split())
a = np.zeros(n, dtype=int)
for l, r, t in zip(*[iter(lrt)] * 3):
a[l - 1 : r] = t
print(*a, sep="\n")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
a = np.array([0] + a)
np.cumsum(a, out=a)
s = (a[k:] - a[:-k]).sum()
print(s)
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
a = [
[int(x) for x in sys.stdin.readline().split()]
for _ in range(h)
]
dyx = [(-1, 0), (0, -1), (1, 0), (0, 1)]
path = [[None] * w for _ in range(h)]
def paths(i, j):
if path[i][j]:
return path[i][j]
val = a[i][j]
cnt = 1
for dy, dx in dyx:
y = i + dy
x = j + dx
if 0 <= y < h and 0 <= x < w and a[y][x] < val:
cnt += paths(y, x)
cnt %= MOD
path[i][j] = cnt
return cnt
tot = 0
for i in range(h):
for j in range(w):
tot += paths(i, j)
tot %= MOD
print(tot)
class ABC038:
@staticmethod
def a():
s = sys.stdin.readline().rstrip()
print("YES" if s[-1] == "T" else "NO")
@staticmethod
def b():
a, b, c, d = map(int, sys.stdin.read().split())
print("YES" if a == c or b == c or a == d or b == d else "NO")
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
cnt = n
tmp = 1
for i in range(n):
if a[i + 1] > a[i]:
tmp += 1
else:
cnt += tmp * (tmp - 1) // 2
tmp = 1
print(cnt)
@staticmethod
def d():
n, *wh = map(int, sys.stdin.read().split())
wh = sorted(zip(*[iter(wh)] * 2), key=lambda x: (-x[0], x[1]))
w = [x[1] for x in wh][::-1]
res = [inf] * n
for x in w:
res[bi_l(res, x)] = x
print(bi_l(res, inf))
class ABC039:
@staticmethod
def a():
a, b, c = map(int, sys.stdin.readline().split())
print((a * b + b * c + c * a) * 2)
@staticmethod
def b():
x = int(sys.stdin.readline().rstrip())
for n in range(1, int(x**0.5) + 1):
if pow(n, 4) == x:
print(n)
return
@staticmethod
def c():
board = "WBWBWWBWBWBW" * 3
convert = "Do, *, Re, *, Mi, Fa, *, So, *, La, *, Si".split(", ")
s = sys.stdin.readline().rstrip()
print(convert[board.index(s)])
@staticmethod
def d():
h, w = map(int, sys.stdin.readline().split())
s = sys.stdin.read().split()
dyx = list(product((-1, 0, 1), repeat=2))
black_certain = set()
black_before = set()
for i in range(h):
for j in range(w):
black_cand = set()
for dy, dx in dyx:
y = i + dy
x = j + dx
if y < 0 or y >= h or x < 0 or x >= w:
continue
if s[y][x] == ".":
break
black_cand.add((y, x))
else:
black_before.add((i, j))
black_certain |= black_cand
for i in range(h):
for j in range(w):
if s[i][j] == "#" and not (i, j) in black_certain:
print("impossible")
return
print("possible")
for i in range(h):
row = ""
for j in range(w):
row += "#" if (i, j) in black_before else "."
print("".join(row))
class ABC040:
@staticmethod
def a():
n, x = map(int, sys.stdin.readline().split())
print(min(x - 1, n - x))
@staticmethod
def b():
n = int(sys.stdin.readline().rstrip())
res = inf
for i in range(1, int(n**0.5) + 1):
res = min(res, n // i - i + n % i)
print(res)
@staticmethod
def c():
n, *h = map(int, sys.stdin.read().split())
h = [h[0]] + h
cost = [None] * (n + 1)
cost[0] = cost[1] = 0
for i in range(2, n + 1):
cost[i] = min(
cost[i - 2] + abs(h[i] - h[i - 2]),
cost[i - 1] + abs(h[i] - h[i - 1]),
)
print(cost[n])
@staticmethod
def d():
n, m = map(int, sys.stdin.readline().split())
uf = UnionFind(n=n)
queue = []
for _ in range(m):
a, b, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y), a - 1, b - 1))
q = int(sys.stdin.readline().rstrip())
for i in range(q):
v, y = map(int, sys.stdin.readline().split())
heappush(queue, (-(2 * y + 1), v - 1, i))
res = [None] * q
while queue:
y, i, j = heappop(queue)
if y & 1:
res[j] = uf.size[uf.find_root(i)]
else:
uf.unite(i, j)
print(*res, sep="\n")
class ABC041:
@staticmethod
def a():
s, i = sys.stdin.read().split()
i = int(i)
print(s[i - 1])
@staticmethod
def b():
a, b, c = map(int, sys.stdin.readline().split())
ans = a * b % MOD * c % MOD
print(ans)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
for i, h in sorted(enumerate(a), key=lambda x: -x[1]):
print(i + 1)
@staticmethod
def d():
n, m, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
edges = [0] * n
for x, y in xy:
x -= 1
y -= 1
edges[x] |= 1 << y
comb = [None] * (1 << n)
comb[0] = 1
def count(edges, bit):
if comb[bit] is not None:
return comb[bit]
comb[bit] = 0
for i in range(n):
if (bit >> i) & 1 and not edges[i]:
nxt_bit = bit & ~(1 << i)
nxt_edges = edges.copy()
for j in range(n):
nxt_edges[j] &= ~(1 << i)
cnt = count(nxt_edges, nxt_bit)
comb[bit] += cnt
return comb[bit]
print(count(edges, (1 << n) - 1))
class ABC042:
@staticmethod
def a():
a = [int(x) for x in sys.stdin.readline().split()]
c = Counter(a)
print("YES" if c[5] == 2 and c[7] == 1 else "NO")
@staticmethod
def b():
n, l, *s = sys.stdin.read().split()
print("".join(sorted(s)))
@staticmethod
def c():
n, k, *d = sys.stdin.read().split()
l = len(n)
ok = sorted(set(string.digits) - set(d))
cand = [int("".join(p)) for p in product(ok, repeat=l)] + [
int(min(x for x in ok if x > "0") + min(ok) * l)
]
print(cand[bi_l(cand, int(n))])
@staticmethod
def d():
h, w, a, b = map(int, sys.stdin.read().split())
combinatorics = Combinatorics(n=2 * 10**5, mod=MOD, numpy=True)
tot = combinatorics.mod_choose(h + w - 2, h - 1)
i = np.arange(h - a, h)
ng = np.sum(
combinatorics.mod_choose(i + b - 1, i)
* combinatorics.mod_choose(h - i + w - b - 2, h - 1 - i)
% MOD
)
tot -= ng
tot %= MOD
print(tot)
class ABC043:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
print((1 + n) * n // 2)
@staticmethod
def b():
s = sys.stdin.readline().rstrip()
t = ""
for c in s:
if c == "B":
t = t[:-1]
else:
t += c
print(t)
@staticmethod
def c():
n, *a = map(int, sys.stdin.read().split())
a = np.array(a)
x = np.around(a.sum() / n).astype(int)
print(np.sum((a - x) ** 2))
@staticmethod
def d():
s = sys.stdin.readline().rstrip()
n = len(s)
for i in range(n - 1):
if s[i] == s[i + 1]:
print(i + 1, i + 2)
return
for i in range(n - 2):
if s[i] == s[i + 2]:
print(i + 1, i + 3)
return
print(-1, -1)
class ABC170:
@staticmethod
def a():
x = [int(x) for x in sys.stdin.readline().split()]
for i in range(5):
if x[i] != i + 1:
print(i + 1)
break
@staticmethod
def b():
x, y = map(int, sys.stdin.readline().split())
print("Yes" if 2 * x <= y <= 4 * x and y % 2 == 0 else "No")
@staticmethod
def c():
x, n, *p = map(int, sys.stdin.read().split())
a = list(set(range(102)) - set(p))
a = [(abs(y - x), y) for y in a]
print(sorted(a)[0][1])
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
cand = set(a)
cnt = 0
for x, c in sorted(Counter(a).items()):
cnt += c == 1 and x in cand
cand -= set(range(x * 2, 10**6 + 1, x))
print(cnt)
@staticmethod
def e():
n, q = map(int, sys.stdin.readline().split())
queue = []
m = 2 * 10**5
infants = [[] for _ in range(m)]
highest_rate = [None] * m
where = [None] * n
rate = [None] * n
def entry(i, k):
where[i] = k
while infants[k]:
r, j = heappop(infants[k])
if where[j] != k or j == i:
continue
if rate[i] >= -r:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (r, j))
break
else:
highest_rate[k] = rate[i]
heappush(queue, (rate[i], k, i))
heappush(infants[k], (-rate[i], i))
def transfer(i, k):
now = where[i]
while infants[now]:
r, j = heappop(infants[now])
if where[j] != now or j == i:
continue
if highest_rate[now] != -r:
highest_rate[now] = -r
heappush(queue, (-r, now, j))
heappush(infants[now], (r, j))
break
else:
highest_rate[now] = None
entry(i, k)
def inquire():
while True:
r, k, i = heappop(queue)
if where[i] != k or r != highest_rate[k]:
continue
heappush(queue, (r, k, i))
return r
for i in range(n):
a, b = map(int, sys.stdin.readline().split())
rate[i] = a
entry(i, b - 1)
for _ in range(q):
c, d = map(int, sys.stdin.readline().split())
transfer(c - 1, d - 1)
print(inquire())
class ABC171:
@staticmethod
def a():
c = sys.stdin.readline().rstrip()
print("A" if c < "a" else "a")
@staticmethod
def b():
n, k, *p = map(int, sys.stdin.read().split())
print(sum(sorted(p)[:k]))
@staticmethod
def c():
n = int(sys.stdin.readline().rstrip())
n -= 1
l = 1
while True:
if n < pow(26, l):
break
n -= pow(26, l)
l += 1
res = "".join(
[chr(ord("a") + d) for d in NumberTheory.base_convert(n, 26)][
::-1
]
)
res = "a" * (l - len(res)) + res
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
a = [int(x) for x in sys.stdin.readline().split()]
s = sum(a)
cnt = Counter(a)
q = int(sys.stdin.readline().rstrip())
for _ in range(q):
b, c = map(int, sys.stdin.readline().split())
s += (c - b) * cnt[b]
print(s)
cnt[c] += cnt[b]
cnt[b] = 0
@staticmethod
def e():
n, *a = map(int, sys.stdin.read().split())
s = 0
for x in a:
s ^= x
b = map(lambda x: x ^ s, a)
print(*b, sep=" ")
class ABC172:
@staticmethod
def a():
a = int(sys.stdin.readline().rstrip())
print(a * (1 + a + a**2))
@staticmethod
def b():
s, t = sys.stdin.read().split()
print(sum(s[i] != t[i] for i in range(len(s))))
@staticmethod
def c():
n, m, k = map(int, sys.stdin.readline().split())
a = [0] + [int(x) for x in sys.stdin.readline().split()]
b = [int(x) for x in sys.stdin.readline().split()]
(*sa,) = accumulate(a)
(*sb,) = accumulate(b)
res = 0
for i in range(n + 1):
r = k - sa[i]
if r < 0:
break
res = max(res, i + bi_r(sb, r))
print(res)
@staticmethod
def d():
n = int(sys.stdin.readline().rstrip())
f = np.zeros(n + 1, dtype=np.int64)
for i in range(1, n + 1):
f[i::i] += 1
print((np.arange(1, n + 1) * f[1:]).sum())
class ABC173:
@staticmethod
def a():
n = int(sys.stdin.readline().rstrip())
charge = (n + 999) // 1000 * 1000 - n
print(charge)
@staticmethod
def b():
n, *s = sys.stdin.read().split()
c = Counter(s)
for v in "AC, WA, TLE, RE".split(", "):
print(f"{v} x {c[v]}")
@staticmethod
def c():
h, w, k = map(int, sys.stdin.readline().split())
c = [sys.stdin.readline().rstrip() for _ in range(h)]
tot = 0
for i in range(1 << h):
for j in range(1 << w):
cnt = 0
for y in range(h):
for x in range(w):
if i >> y & 1 or j >> x & 1:
continue
cnt += c[y][x] == "#"
tot += cnt == k
print(tot)
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a.sort(reverse=True)
res = (
a[0]
+ sum(a[1 : 1 + (n - 2) // 2]) * 2
+ a[1 + (n - 2) // 2] * (n & 1)
)
print(res)
@staticmethod
def e():
MOD = 10**9 + 7
n, k, *a = map(int, sys.stdin.read().split())
minus = [x for x in a if x < 0]
plus = [x for x in a if x > 0]
if len(plus) + len(minus) // 2 * 2 >= k: # plus
(*minus,) = map(abs, minus)
minus.sort(reverse=True)
plus.sort(reverse=True)
cand = []
if len(minus) & 1:
minus = minus[:-1]
for i in range(0, len(minus) - 1, 2):
cand.append(minus[i] * minus[i + 1] % MOD)
if k & 1:
res = plus[0]
plus = plus[1:]
else:
res = 1
if len(plus) & 1:
plus = plus[:-1]
for i in range(0, len(plus) - 1, 2):
cand.append(plus[i] * plus[i + 1] % MOD)
cand.sort(reverse=True)
for x in cand[: k // 2]:
res *= x
res %= MOD
print(res)
elif 0 in a:
print(0)
else:
cand = sorted(map(abs, a))
res = 1
for i in range(k):
res *= cand[i]
res %= MOD
res = MOD - res
print(res)
pass
class ABC174:
@staticmethod
def a():
print("Yes" if int(sys.stdin.readline().rstrip()) >= 30 else "No")
class ACL001:
@staticmethod
def a():
n, *xy = map(int, sys.stdin.read().split())
(*xy,) = zip(*[iter(xy)] * 2)
print(xy)
pass
class MSolutions2020:
@staticmethod
def a():
x = int(sys.stdin.readline().rstrip())
x -= 400
print(8 - x // 200)
@staticmethod
def b():
r, g, b, k = map(int, sys.stdin.read().split())
while k and g <= r:
g *= 2
k -= 1
while k and b <= g:
b *= 2
k -= 1
print("Yes" if r < g < b else "No")
@staticmethod
def c():
n, k, *a = map(int, sys.stdin.read().split())
for i in range(k, n):
print("Yes" if a[i] > a[i - k] else "No")
@staticmethod
def d():
n, *a = map(int, sys.stdin.read().split())
a += [-1]
m = 1000
s = 0
for i in range(n):
if a[i + 1] == a[i]:
continue
elif a[i + 1] > a[i]:
cnt = m // a[i]
m -= a[i] * cnt
s += cnt
else:
m += a[i] * s
s = 0
print(m)
class Codeforces:
pass
class ProjectEuler:
@staticmethod
def p1():
def f(n, x):
return (x + n // x * x) * (n // x) // 2
n = 1000
ans = f(n - 1, 3) + f(n - 1, 5) - f(n - 1, 15)
print(ans)
@staticmethod
def p2():
fib = [1, 2]
while fib[-1] < 4 * 10**6:
fib.append(fib[-1] + fib[-2])
print(sum(fib[1:-1:3]))
@staticmethod
def p3():
number_theory = NumberTheory()
res = number_theory.prime_factorize(600851475143)
print(max(res.keys()))
@staticmethod
def p4():
def is_palindrome(n):
n = str(n)
return n == n[::-1]
cand = []
for a in range(100, 1000):
for b in range(a, 1000):
n = a * b
if is_palindrome(n):
cand.append(n)
print(max(cand))
@staticmethod
def p5():
number_theory = NumberTheory()
res = defaultdict(int)
for i in range(1, 21):
for p, c in number_theory.prime_factorize(i).items():
res[p] = max(res[p], c)
ans = 1
for p, c in res.items():
ans *= pow(p, c)
print(ans)
@staticmethod
def p6():
a = np.arange(101)
b = np.cumsum(a**2)
a = a.cumsum()
print(a[100] ** 2 - b[100])
@staticmethod
def p7():
number_theory = NumberTheory()
print(sorted(number_theory.prime_numbers)[10000])
@staticmethod
def p8():
n = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = [int(d) for d in list(n)]
res = 0
for i in range(988):
x = 1
for j in range(13):
x *= n[i + j]
res = max(res, x)
print(res)
@staticmethod
def p9():
for a in range(1, 997):
for b in range(a, 998 - a):
c = 1000 - a - b
if a**2 + b**2 == c**2:
print(a * b * c)
return
@staticmethod
def p10():
number_theory = NumberTheory(2 * 10**6 - 1)
print(sum(number_theory.prime_numbers))
@staticmethod
def p11():
grid = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"
# grid = np.array(grid.split(), dtype=np.int64).reshape(20, -1)
# cand = []
# for i in range(20):
# bl1 = i+3 < 20
# for j in range(20):
# bl2 = j+3 < 20
# if bl1:
# np.prod
# tmp = 1
# for d in range(4):
# tmp *= grid[i+d, j]
print(grid)
pass
class Yukicoder:
pass
if __name__ == "__main__":
AtCoder.ABC011.d()
| 32.137521
| 1,217
| 0.374293
|
a438ef5c97e48703427243c06af3a37a0d3e6373
| 738
|
py
|
Python
|
src/infrastructure/database/connection.py
|
YegorMedvedev/python-onion-scaffold
|
16ca5eebe58b3aee7dd304cc9f7856de6309b5cc
|
[
"MIT"
] | 1
|
2022-02-23T05:07:09.000Z
|
2022-02-23T05:07:09.000Z
|
src/infrastructure/database/connection.py
|
YegorMedvedev/python-onion-scaffold
|
16ca5eebe58b3aee7dd304cc9f7856de6309b5cc
|
[
"MIT"
] | null | null | null |
src/infrastructure/database/connection.py
|
YegorMedvedev/python-onion-scaffold
|
16ca5eebe58b3aee7dd304cc9f7856de6309b5cc
|
[
"MIT"
] | null | null | null |
from os import getenv
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.orm import sessionmaker, Session as SqlSession
from infrastructure.utils.utils import Singleton
class DatabaseConnection(metaclass=Singleton):
engine: Engine
session: SqlSession
base: DeclarativeMeta = declarative_base()
def establish_connection(self):
postgresql_url = getenv("POSTGRESQL_URL")
self.engine = create_engine(postgresql_url)
def generate_database_schema(self):
self.base.metadata.create_all(self.engine)
def create_session(self):
self.session = sessionmaker(bind=self.engine)
| 29.52
| 72
| 0.776423
|
10ffe8bd1c76095040531ba2a0758192085b9081
| 384
|
py
|
Python
|
bebras/2012/2012-IT-04/genImgs.py
|
davidnarum/bebras-tasks
|
57deb129451c0b95947e4ac0cac5022759a4095e
|
[
"MIT"
] | 1
|
2021-05-20T12:01:21.000Z
|
2021-05-20T12:01:21.000Z
|
bebras/2012/2012-IT-04/genImgs.py
|
davidnarum/bebras-tasks
|
57deb129451c0b95947e4ac0cac5022759a4095e
|
[
"MIT"
] | 1
|
2019-09-21T02:22:24.000Z
|
2021-05-29T04:57:27.000Z
|
bebras/2012/2012-IT-04/genImgs.py
|
davidnarum/bebras-tasks
|
57deb129451c0b95947e4ac0cac5022759a4095e
|
[
"MIT"
] | 5
|
2017-08-14T21:03:48.000Z
|
2020-11-01T14:29:59.000Z
|
#! /usr/bin/python3 -B
conv = {}
conv['2012-IT-04_carte_bouee-original.png'] = '-quality 50 -colors 50'
conv['2012-IT-04_carte_bouee_V2-original.png'] = '-quality 50 -colors 50'
conv['2012-IT-04_solution-original.png'] = '-quality 50 -colors 50'
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.join('..', 'scripts'))
from common import *
execute(conv)
| 29.538462
| 73
| 0.677083
|
4be6a81e32af429a9352eb6eb972c5c180fe0996
| 39,116
|
py
|
Python
|
yolov5/utils/datasets.py
|
kungfumas/American-Sign-Language
|
393ba1ba066a3e3e4a60076415fba902649121b3
|
[
"MIT"
] | null | null | null |
yolov5/utils/datasets.py
|
kungfumas/American-Sign-Language
|
393ba1ba066a3e3e4a60076415fba902649121b3
|
[
"MIT"
] | null | null | null |
yolov5/utils/datasets.py
|
kungfumas/American-Sign-Language
|
393ba1ba066a3e3e4a60076415fba902649121b3
|
[
"MIT"
] | null | null | null |
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
self.label_files = [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
| 41.480382
| 238
| 0.531138
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.