blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
445a0485ed0661d1bcc771ce4a5393dd09284f34 | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_08/231.Power_of_Two.py | 3f3ec4663f705a473f8c713c5b03eefb4fc9e285 | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 564 | py | # Given an integer, write a function to determine if it is a power of two.
#
# Example 1:
#
#
# Input: 1
# Output: true
# Explanation: 20 = 1
#
#
# Example 2:
#
#
# Input: 16
# Output: true
# Explanation: 24 = 16
#
# Example 3:
#
#
# Input: 218
# Output: false
# Related Topics Math Bit Manipulation
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
# 位运算
return n != 0 and (n & (n-1)) == 0
# leetcode submit region end(Prohibit modification and deletion)
| [
"zhhjemotion@hotmail.com"
] | zhhjemotion@hotmail.com |
f9d5fd9ac3b57c62d986603e4fc8602020d3b07a | 77f07d6f08a3c401f528a4aa1fa8308e12598f44 | /urls.py | 18ac98ada7b430af52f4637aac387395a81b56aa | [] | no_license | sgammon/AppEngine-Toolkit-Skeleton | 7de4b9184d501865e1aae35a7c8f7b2a398b859a | df7a97333fcea8915c038de67c6836e7756a3961 | refs/heads/master | 2016-09-11T06:59:11.412514 | 2012-01-03T06:01:05 | 2012-01-03T06:01:05 | 2,559,656 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # -*- coding: utf-8 -*-
"""
urls
~~~~
URL definitions.
:copyright: 2009 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from config import config
from webapp2 import import_string
def get_rules():
"""Returns a list of URL rules for the application. The list can be
defined entirely here or in separate ``urls.py`` files.
:param app:
The WSGI application instance.
:return:
A list of class:`tipfy.Rule` instances.
"""
# Here we show an example of joining all rules from the
# ``apps_installed`` definition set in config.py.
rules = []
for app_module in config.get('webapp2')['apps_installed']:
try:
# Load the urls module from the app and extend our rules.
app_rules = import_string('%s.routing' % app_module)
rules.extend(app_rules.get_rules())
except ImportError:
pass
return rules | [
"sgammon@bluestatedigital.com"
] | sgammon@bluestatedigital.com |
d5588920e633f2f3b60e6e906e76fbd21802c787 | 463c053bcf3f4a7337b634890720ea9467f14c87 | /dashboard/modules/dashboard_sdk.py | 900ddc042fa5543ae5a46dc8aa8ca557608e35f6 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 11,982 | py | import dataclasses
import importlib
import logging
import json
import yaml
from pathlib import Path
import tempfile
from typing import Any, Dict, List, Optional
from pkg_resources import packaging
try:
import aiohttp
import requests
except ImportError:
aiohttp = None
requests = None
from ray._private.runtime_env.packaging import (
create_package,
get_uri_for_directory,
get_uri_for_package,
)
from ray._private.runtime_env.py_modules import upload_py_modules_if_needed
from ray._private.runtime_env.working_dir import upload_working_dir_if_needed
from ray.dashboard.modules.job.common import uri_to_http_components
from ray.ray_constants import DEFAULT_DASHBOARD_PORT
from ray.util.annotations import PublicAPI
from ray.client_builder import _split_address
from ray.autoscaler._private.cli_logger import cli_logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def parse_runtime_env_args(
runtime_env: Optional[str] = None,
runtime_env_json: Optional[str] = None,
working_dir: Optional[str] = None,
):
"""
Generates a runtime_env dictionary using `runtime_env`, `runtime_env_json`,
and `working_dir` CLI options. Only one of `runtime_env` or
`runtime_env_json` may be defined. `working_dir` overwrites the
`working_dir` from any other option.
"""
final_runtime_env = {}
if runtime_env is not None:
if runtime_env_json is not None:
raise ValueError(
"Only one of --runtime_env and --runtime-env-json can be provided."
)
with open(runtime_env, "r") as f:
final_runtime_env = yaml.safe_load(f)
elif runtime_env_json is not None:
final_runtime_env = json.loads(runtime_env_json)
if working_dir is not None:
if "working_dir" in final_runtime_env:
cli_logger.warning(
"Overriding runtime_env working_dir with --working-dir option"
)
final_runtime_env["working_dir"] = working_dir
return final_runtime_env
@dataclasses.dataclass
class ClusterInfo:
address: str
cookies: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
headers: Optional[Dict[str, Any]] = None
# TODO (shrekris-anyscale): renaming breaks compatibility, do NOT rename
def get_job_submission_client_cluster_info(
address: str,
# For backwards compatibility
*,
# only used in importlib case in parse_cluster_info, but needed
# in function signature.
create_cluster_if_needed: Optional[bool] = False,
cookies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
_use_tls: Optional[bool] = False,
) -> ClusterInfo:
"""Get address, cookies, and metadata used for SubmissionClient.
If no port is specified in `address`, the Ray dashboard default will be
inserted.
Args:
address (str): Address without the module prefix that is passed
to SubmissionClient.
create_cluster_if_needed (bool): Indicates whether the cluster
of the address returned needs to be running. Ray doesn't
start a cluster before interacting with jobs, but other
implementations may do so.
Returns:
ClusterInfo object consisting of address, cookies, and metadata
for SubmissionClient to use.
"""
scheme = "https" if _use_tls else "http"
split = address.split(":")
host = split[0]
if len(split) == 1:
port = DEFAULT_DASHBOARD_PORT
elif len(split) == 2:
port = int(split[1])
else:
raise ValueError(f"Invalid address: {address}.")
return ClusterInfo(
address=f"{scheme}://{host}:{port}",
cookies=cookies,
metadata=metadata,
headers=headers,
)
def parse_cluster_info(
address: str,
create_cluster_if_needed: bool = False,
cookies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
) -> ClusterInfo:
module_string, inner_address = _split_address(address)
# If user passes in ray://, raise error. Dashboard submission should
# not use a Ray client address.
if module_string == "ray":
raise ValueError(
f'Got an unexpected Ray client address "{address}" while trying '
"to connect to the Ray dashboard. The dashboard SDK requires the "
"Ray dashboard server's HTTP(S) address (which should start with "
'"http://" or "https://", not "ray://"). If this address '
"wasn't passed explicitly, it may be set in the RAY_ADDRESS "
"environment variable."
)
# If user passes http(s)://, go through normal parsing.
if module_string in {"http", "https"}:
return get_job_submission_client_cluster_info(
inner_address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
_use_tls=module_string == "https",
)
# Try to dynamically import the function to get cluster info.
else:
try:
module = importlib.import_module(module_string)
except Exception:
raise RuntimeError(
f"Module: {module_string} does not exist.\n"
f"This module was parsed from Address: {address}"
) from None
assert "get_job_submission_client_cluster_info" in dir(module), (
f"Module: {module_string} does "
"not have `get_job_submission_client_cluster_info`."
)
return module.get_job_submission_client_cluster_info(
inner_address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
)
class SubmissionClient:
def __init__(
self,
address: str,
create_cluster_if_needed=False,
cookies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
):
cluster_info = parse_cluster_info(
address, create_cluster_if_needed, cookies, metadata, headers
)
self._address = cluster_info.address
self._cookies = cluster_info.cookies
self._default_metadata = cluster_info.metadata or {}
# Headers used for all requests sent to job server, optional and only
# needed for cases like authentication to remote cluster.
self._headers = cluster_info.headers
def _check_connection_and_version(
self, min_version: str = "1.9", version_error_message: str = None
):
if version_error_message is None:
version_error_message = (
f"Please ensure the cluster is running Ray {min_version} or higher."
)
try:
r = self._do_request("GET", "/api/version")
if r.status_code == 404:
raise RuntimeError(version_error_message)
r.raise_for_status()
running_ray_version = r.json()["ray_version"]
if packaging.version.parse(running_ray_version) < packaging.version.parse(
min_version
):
raise RuntimeError(version_error_message)
# TODO(edoakes): check the version if/when we break compatibility.
except requests.exceptions.ConnectionError:
raise ConnectionError(
f"Failed to connect to Ray at address: {self._address}."
)
def _raise_error(self, r: "requests.Response"):
raise RuntimeError(
f"Request failed with status code {r.status_code}: {r.text}."
)
def _do_request(
self,
method: str,
endpoint: str,
*,
data: Optional[bytes] = None,
json_data: Optional[dict] = None,
) -> "requests.Response":
url = self._address + endpoint
logger.debug(f"Sending request to {url} with json data: {json_data or {}}.")
return requests.request(
method,
url,
cookies=self._cookies,
data=data,
json=json_data,
headers=self._headers,
)
def _package_exists(
self,
package_uri: str,
) -> bool:
protocol, package_name = uri_to_http_components(package_uri)
r = self._do_request("GET", f"/api/packages/{protocol}/{package_name}")
if r.status_code == 200:
logger.debug(f"Package {package_uri} already exists.")
return True
elif r.status_code == 404:
logger.debug(f"Package {package_uri} does not exist.")
return False
else:
self._raise_error(r)
def _upload_package(
self,
package_uri: str,
package_path: str,
include_parent_dir: Optional[bool] = False,
excludes: Optional[List[str]] = None,
is_file: bool = False,
) -> bool:
logger.info(f"Uploading package {package_uri}.")
with tempfile.TemporaryDirectory() as tmp_dir:
protocol, package_name = uri_to_http_components(package_uri)
if is_file:
package_file = Path(package_path)
else:
package_file = Path(tmp_dir) / package_name
create_package(
package_path,
package_file,
include_parent_dir=include_parent_dir,
excludes=excludes,
)
try:
r = self._do_request(
"PUT",
f"/api/packages/{protocol}/{package_name}",
data=package_file.read_bytes(),
)
if r.status_code != 200:
self._raise_error(r)
finally:
# If the package is a user's existing file, don't delete it.
if not is_file:
package_file.unlink()
def _upload_package_if_needed(
self,
package_path: str,
include_parent_dir: bool = False,
excludes: Optional[List[str]] = None,
is_file: bool = False,
) -> str:
if is_file:
package_uri = get_uri_for_package(Path(package_path))
else:
package_uri = get_uri_for_directory(package_path, excludes=excludes)
if not self._package_exists(package_uri):
self._upload_package(
package_uri,
package_path,
include_parent_dir=include_parent_dir,
excludes=excludes,
is_file=is_file,
)
else:
logger.info(f"Package {package_uri} already exists, skipping upload.")
return package_uri
def _upload_working_dir_if_needed(self, runtime_env: Dict[str, Any]):
def _upload_fn(working_dir, excludes, is_file=False):
self._upload_package_if_needed(
working_dir,
include_parent_dir=False,
excludes=excludes,
is_file=is_file,
)
upload_working_dir_if_needed(runtime_env, upload_fn=_upload_fn)
def _upload_py_modules_if_needed(self, runtime_env: Dict[str, Any]):
def _upload_fn(module_path, excludes, is_file=False):
self._upload_package_if_needed(
module_path, include_parent_dir=True, excludes=excludes, is_file=is_file
)
upload_py_modules_if_needed(runtime_env, upload_fn=_upload_fn)
@PublicAPI(stability="beta")
def get_version(self) -> str:
r = self._do_request("GET", "/api/version")
if r.status_code == 200:
return r.json().get("version")
else:
self._raise_error(r)
| [
"noreply@github.com"
] | pdames.noreply@github.com |
20eb4f23b767b4473d56ae05a400d01d40f44b95 | 8e352bddc79e22604cdc23bf2e33d3d36dd30502 | /linux_rest_api/filesystem/modes.py | ce03c36aced81c91a7b74f35753ef53c03f2fb36 | [] | no_license | kissgyorgy/linux-rest-api | 330950ba7e23932dd2cf3a1b026f83f587773110 | 3f46031f88e75c0a317f96cdb123fe5980877bcb | refs/heads/master | 2023-02-16T12:47:54.960902 | 2018-09-20T05:05:14 | 2020-09-23T11:56:33 | 143,931,513 | 7 | 2 | null | 2023-02-02T11:47:12 | 2018-08-07T22:06:32 | Python | UTF-8 | Python | false | false | 391 | py | import stat
def octal_mode(st_mode: int) -> str:
return oct(stat.S_IMODE(st_mode))[2:]
def symbolic_mode(st_mode: int) -> str:
lm = long_mode(st_mode)
user = lm[0:3].replace("-", "")
group = lm[3:6].replace("-", "")
other = lm[6:].replace("-", "")
return f"u={user},g={group},o={other}"
def long_mode(st_mode: int) -> str:
return stat.filemode(st_mode)[1:]
| [
"kissgyorgy@me.com"
] | kissgyorgy@me.com |
5753358a6c496e08c5026fc69e200f9fea67ff63 | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/git/v4_1/models/git_commit_changes.py | 9d82a62347a190cdf6e2bb739ac857cde0364cd7 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 1,142 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitCommitChanges(Model):
"""GitCommitChanges.
:param change_counts:
:type change_counts: dict
:param changes:
:type changes: list of :class:`object <git.v4_1.models.object>`
"""
_attribute_map = {
'change_counts': {'key': 'changeCounts', 'type': '{int}'},
'changes': {'key': 'changes', 'type': '[object]'}
}
def __init__(self, change_counts=None, changes=None):
super(GitCommitChanges, self).__init__()
self.change_counts = change_counts
self.changes = changes
| [
"tedchamb@microsoft.com"
] | tedchamb@microsoft.com |
acb8c308b81cccc7646965c8b7c9207a7d2d4b91 | 7a7a094f77a178aba06fae2176919f926119c356 | /data_structures_and_algorithms/adjacency_matrix.py | 651829845865a5c336bc7f66a83ea2dd84abd223 | [] | no_license | vlad-bezden/data_structures_and_algorithms | 9fd0c67a16ff1893d830ae68f43cabb75f5d6a99 | 3ba9b904ed5955de24053cb5941a7c5a71600106 | refs/heads/master | 2021-07-03T02:12:37.207767 | 2020-09-12T12:58:13 | 2020-09-12T12:58:13 | 165,452,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | """
Converts graph presented as dict to matrix
{'A': ['B', 'C'],
'B': ['A', 'C', 'E'],
'C': ['A', 'B', 'E', 'F'],
'E': ['B', 'C'],
'F': ['C']}
[[0, 1, 1, 0, 0],
[1, 0, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0]]
"""
from pprint import pprint
from typing import Dict, List
Row = List[int]
Matrix = List[Row]
Graph = Dict[str, List[str]]
def graph_to_matrix(graph: Graph) -> Matrix:
matrix_elements = sorted(graph)
rows = len(matrix_elements)
# allocate matrix size [row x row] with 0
matrix = [[0] * rows for _ in range(rows)]
for i, row in enumerate(matrix_elements):
for j, col in enumerate(matrix_elements):
if col in graph[row]:
matrix[i][j] = 1
return matrix
def main():
graph = {
"A": ["B", "C"],
"B": ["A", "C", "E"],
"C": ["A", "B", "E", "F"],
"E": ["B", "C"],
"F": ["C"],
}
pprint(graph)
matrix = graph_to_matrix(graph)
pprint(matrix)
if __name__ == "__main__":
main()
| [
"vlad.bezden@gmail.com"
] | vlad.bezden@gmail.com |
a32848f32d5ce3a38b936d720983ea66cef5905c | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_Mchi-20_Mphi-100_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_240000_3_cff.py | 44dfb28c4e034a9ba5da9f22db66bb4acc808144 | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:13096', '1:11582', '1:17956', '1:17355', '1:17456', '1:17827', '1:22867', '1:23024', '1:20788', '1:23181', '1:11217', '1:12681', '1:12703', '1:22540', '1:22572', '1:22719', '1:11095', '1:11112', '1:11514', '1:11665', '1:13465', '1:13924', '1:13248', '1:21437', '1:21438', '1:11775', '1:11808', '1:11848', '1:13190', '1:21508', '1:21526', '1:21326', '1:24039', '1:18731', '1:13388', '1:13994', '1:21762', '1:24337', '1:12050', '1:18932', '1:18753', '1:14247', '1:16589', '1:24065', '1:24230', '1:24356', '1:24174', '1:24340', '1:24918', '1:24790', '1:12013', '1:17670', '1:22069', '1:17036', '1:17063', '1:13879', '1:22523', '1:12310', '1:22147', '1:22587', '1:11357', '1:11479', '1:11796', '1:11746', '1:13927', '1:11279', '1:17187', '1:23423', '1:13544', '1:13875', '1:13911', '1:17305', '1:15013', '1:22456', '1:22533', '1:22594', '1:14987', '1:21358', '1:23675', '1:23878', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/AA555549-5912-EA11-8B41-3417EBE7063F.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/0E098A33-3703-EA11-871C-0CC47A4D7650.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/7C71D678-5A04-EA11-8EE7-0CC47A4D7626.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/96A81BD1-2810-EA11-96F8-0CC47A7C34A6.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/48005C4A-5912-EA11-A211-002590495B2C.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/8C290DDD-3DFF-E911-9EE2-24BE05C49891.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/2E76A192-7DFF-E911-9EC9-008CFA110C70.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/98E4FECD-5812-EA11-A767-00259029E81A.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/42071C23-1F03-EA11-9133-48FD8EE73AEF.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/240000/9E09CF11-7101-EA11-A096-0242AC130002.root']); | [
"Nicole.Stefanov@cern.ch"
] | Nicole.Stefanov@cern.ch |
19bab16feffcbe12224818535825db475ac0c04c | 12a8cc08189cbaf84f4a3fd3a54595097a03ef3c | /app/main/forms.py | 3a10472078aa8b8e9aab50947a6c0a1c07018340 | [] | no_license | kepha-okari/watchlist-2 | 723acc9a616f10b1caab4c245763856b5c055c54 | 0cadf9f905d8788dc0999d4addd506d03949d33c | refs/heads/master | 2022-03-27T21:34:43.102901 | 2017-12-16T08:58:45 | 2017-12-16T08:58:45 | 114,447,275 | 0 | 1 | null | 2020-01-28T18:48:26 | 2017-12-16T08:57:20 | Python | UTF-8 | Python | false | false | 387 | py | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
"""Class to contain forms instances and methods"""
title = StringField('Review title', validators=[Required()])
review = TextAreaField('Movie review', validators=[Required()])
submit = SubmitField('Submit')
| [
"kephaokari@gmail.com"
] | kephaokari@gmail.com |
b94406750358432b3d6cb7ea425f4f5ff477df4d | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/quiz/admin_20210424134332.py | ed0af28881c12976366a229f411022f9ab0e15c5 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from django.contrib import admin
from quiz.models import Quiz
from quiz.models import ICT
from quiz.models import Ban
from quiz.models import Math
from quiz.models import Science
from quiz.models import GK
class QuizAdmin(admin.ModelAdmin):
list_display = ('question',)
class ICTAdmin(admin.ModelAdmin):
list_display = ('question',)
class BanAdmin(admin.ModelAdmin):
list_display = ('question',)
class MathAdmin(admin.ModelAdmin):
list_display = ('question',)
class ScienceAdmin(admin.ModelAdmin):
list_display = ('question',)
class GKAdmin(admin.ModelAdmin):
list_display = ('question',)
admin.site.register(Quiz, QuizAdmin)
admin.site.register(ICT, ICTAdmin)
admin.site.register(Ban, BanAdmin)
admin.site.register(Math, MathAdmin)
admin.site.register(Science, ScienceAdmin)
admin.site.register(GK, GKAdmin)
admin.site.register(MA, MAAdmin)
| [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
0ef419920e487fcf0cd41aee9cd790d9ebac5369 | 9073e836f6f66110af04110b0d0117ab224fede4 | /eg_26.py | ef0f7c0bf58fe077236400e5018daf4a69b8d24b | [] | no_license | striveman1379/python100_examples | ec860d65aeff62b7e31b30798e7ca38db1297eec | c701b4444469a03efac3436f2a65199615f9e3cb | refs/heads/master | 2020-04-18T00:57:15.114970 | 2018-11-06T09:39:01 | 2018-11-06T09:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding:utf-8 -*-
# 利用递归方法求5!
def num(n):
result = 0
if n == 0:
result = 1
else:
result = n * num(n-1)
return result
print('5!= %d' % num(5)) | [
"you@example.com"
] | you@example.com |
61aca2f7a8eb22d3acbc9d35bbd3bcf742e3de7f | 1b60c5833acfb2669b1b51dc2a3616b6017986b6 | /question_answering/utils/utils.py | 48dbbe97f9cd7fb282bcfbc7fdfbe2e354cea37a | [] | no_license | akoshel/QuestionAnswering | 1a61a53c1b3fadde6ae6361a5d628b57625da39b | c33ac39945947df880f0d390fddfd0e0daf9dda8 | refs/heads/main | 2023-07-27T19:12:29.988477 | 2021-09-14T10:28:54 | 2021-09-14T10:28:54 | 385,997,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | from loguru import logger
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
def train_epoch(model: nn.Module, iterator: DataLoader, criterion, optimizer, device, writer):
model.train()
epoch_loss = 0
logger.info("train epoch started")
for i, batch in enumerate(iterator):
features, attention_mask, start_token, end_token = batch
start_logits, end_logits = model(features.to(device), attention_mask.to(device))
start_loss = criterion(start_logits, start_token.to(device))
# start_loss.backward()
end_loss = criterion(end_logits, end_token.to(device))
# end_loss.backward()
total_loss = (start_loss + end_loss) / 2
total_loss.backward()
optimizer.step()
epoch_loss += total_loss
if i % 100 == 0:
logger.info("iteration {i} loss {l}", i=i, l=total_loss)
writer.add_scalar('train loss', total_loss.item())
return epoch_loss / len(iterator)
def validate(model: nn.Module, iterator: DataLoader, criterion, device, writer):
model.eval()
val_loss = 0
logger.info("Eval started")
with torch.no_grad():
for i, batch in enumerate(iterator):
features, attention_mask, start_token, end_token = batch
start_logits, end_logits = model(features.to(device), attention_mask.to(device))
start_loss = criterion(start_logits, start_token.to(device))
end_loss = criterion(end_logits, end_token.to(device))
total_loss = (start_loss + end_loss) / 2
val_loss += total_loss
writer.add_scalar('validation loss', total_loss.item())
return val_loss / len(iterator)
| [
"johndoe@example.com"
] | johndoe@example.com |
a57086090d53b07ccbeee482c97989fabc0d8994 | 3523676133fe91bd69d87b60757af041cc8b603b | /offset/time.py | 3705ffc2cf60ca09f6874880f35622a4fa76db9a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | cautonwong/offset | 247377c0756970b7f6fef37fc4d37c192510abc7 | b8561635a4cb44a9f47d086163f4d0b58bb8fd74 | refs/heads/master | 2021-01-15T22:25:07.504578 | 2014-08-10T18:59:58 | 2014-08-10T18:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | # -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
from .core.util import nanotime, from_nanotime
from .core import timer
from .core.chan import makechan, select
NANOSECOND = 1
MICROSECOND = 1000 * NANOSECOND
MILLISECOND = 1000 * MICROSECOND
SECOND = 1000 * MILLISECOND
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
nano = nanotime
sleep = timer.sleep
def _sendtime(now, t, c):
select(c.if_send(from_nanotime(now)))
class Timer(object):
""" The Timer instance represents a single event.
When the timer expires, the current time will be sent on c """
def __init__(self, interval):
self.c = makechan(1)
self.t = timer.Timer(_sendtime, interval, args=(self.c,))
self.t.start()
def reset(self, interval):
""" reset the timer interval """
w = nanotime() + interval
self.t.stop()
self.t.when = w
self.t.start()
def stop(self):
self.t.stop()
self.c.close()
def After(interval):
""" After waits for the duration to elapse and then sends the current time
on the returned channel.
It is equivalent to Timer(interval).c
"""
return Timer(interval).c
def AfterFunc(interval, func, args=None, kwargs=None):
""" AfterFunc waits for the duration to elapse and then calls f in its own
goroutine. It returns a Timer that can be used to cancel the call using its
Stop method. """
t = timer.Timer(func, interval, args=args, kwargs=kwargs)
t.start()
return t
class Ticker(object):
""" returns a new Ticker containing a channel that will send the
time with a period specified by the duration argument.
It adjusts the intervals or drops ticks to make up for slow receivers.
The duration d must be greater than zero.
"""
def __init__(self, interval):
if interval < 0:
raise ValueError("non-positive interval")
self.c = makechan(1)
# set the runtime timer
self.t = timer.Timer(_sendtime, interval, interval, args=(self.c,))
self.t.start()
def stop(self):
self.c.close()
self.t.stop()
def Tick(interval):
""" Tick is a convenience wrapper for Ticker providing access
to the ticking channel. Useful for clients that no need to shutdown
the ticker """
if interval <= 0:
return
return Ticker(interval).c
| [
"bchesneau@gmail.com"
] | bchesneau@gmail.com |
f82d6a6bf94f54d656ce7cc54a735785cc4eb61f | 305b5459c319688a7a7184c959fc335d464a0e0c | /test_project/test_app/tests/crawler_tests.py | 1f9c29c6a6217e4e666c3c85ba5567b605e0efa2 | [] | no_license | ericholscher/django-crawler | 7595052a3a374c62c3fa6063e091aba4ab5ddb11 | 716f75a5a23b1befa28bbf2da41df4335eb0bf5c | refs/heads/master | 2021-01-01T16:13:29.020267 | 2015-06-29T17:53:42 | 2015-06-29T17:53:42 | 902,366 | 18 | 11 | null | 2015-06-29T17:54:12 | 2010-09-10T21:27:11 | Python | UTF-8 | Python | false | false | 2,643 | py | """
This file is to test testmaker. It will run over the polls app and with the crawler and with test maker outputting things. Hopefully this will provide a sane way to test testmaker.
"""
from django.test.testcases import TestCase
from crawler.base import Crawler
import logging
import os
class CrawlerTests(TestCase):
"""
Tests to test the Crawler API
"""
urls = "test_project.polls.urls"
fixtures = ['polls_testmaker.json']
def setUp(self):
self.log = logging.getLogger('crawler')
[self.log.removeHandler(h) for h in self.log.handlers]
self.log.setLevel(logging.DEBUG)
handler = logging.FileHandler('crawler_log', 'a')
handler.setFormatter(logging.Formatter('%(message)s'))
self.log.addHandler(handler)
def tearDown(self):
os.remove('crawler_log')
def test_basic_crawling(self):
c = Crawler('/')
c.run()
self.assertEqual(c.crawled, {'/': True, u'/1': True, u'/2': True})
def test_relative_crawling(self):
c = Crawler('/1')
c.run()
self.assertEqual(c.crawled, {u'/1': True})
def test_url_plugin(self):
conf_urls = {'this_wont_be_crawled': True}
c = Crawler('/', conf_urls=conf_urls)
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('These patterns were not matched during the crawl: this_wont_be_crawled') != -1)
def test_time_plugin(self):
#This isn't testing much, but I can't know how long the time will take
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('Time taken:') != -1)
def test_memory_plugin(self):
from crawler.plugins.memory_plugin import Memory
Memory.active = True
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('Memory consumed:') != -1)
#Guppy makes the tests take a lot longer, uncomment this if you want to
#test it.
"""
def test_guppy_plugin(self):
#This isn't testing much, but I can't know how long the time will take
from crawler.plugins.guppy_plugin import ACTIVE, Heap
if ACTIVE:
Heap.active = True
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
import ipdb; ipdb.set_trace()
self.assertTrue(output.find('heap') != -1)
else:
print "Skipping memory test, as guppy isn't installed"
"""
| [
"eric@ericholscher.com"
] | eric@ericholscher.com |
83557ba6614ffd8a233757c86d0f87a2b54d2e95 | 4eddf6a34715752dc652571b1ab274f51ceb5da0 | /Bayes Classification/.history/Bayes_main_20210428125730.py | e07e1b20e37d7581a86711c9e9dc3c7244e9fcf3 | [] | no_license | Suelt/Hust-SE-introduction-to-ML | 649aba0e5b41363ceac03330ef02982982a0615d | a66785c3085da573f5748d13608eabf02e616321 | refs/heads/master | 2023-05-27T13:13:41.058545 | 2021-06-10T05:44:02 | 2021-06-10T05:44:02 | 375,582,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
credit = pd.read_csv("C:\\pyproject\\Bayes Classification\\transformed.csv")
y = credit['credit_risk']
X = credit.loc[:,'status':'foreign_worker']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=1)
cols = ['status','duration','credit_history', 'purpose','amount','savings', 'employment_duration','installment_rate', 'personal_status_sex', 'other_debtors',
'present_residence','property','age','other_installment_plans','housing','number_credits','job','people_liable','telephone','foreign_worker']
dict_main_true={}
dict_main_false={}
train=credit.loc[y_train.index]
train_true=credit[train['credit_risk'].isin(['good'])]
train_bad=credit[train['credit_risk'].isin(['bad'])]
print(train_true.shape[0])
for col in cols:
dict_main_true[col]={}
dict_main_false[col]={}
| [
"2552925383@qq.com"
] | 2552925383@qq.com |
5e5a528d5d631b39d41e9f5499a4225b499ab82a | f8e9a88eba9e9d83166c64722fc94c5636ebfc1d | /torch/backends/_nnapi/serializer.py | a0d3f8030e36d27a5e2b5245e00c99fa0734eeff | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | Cat11438/pytorch | 1c2f9d9f5dda62a2594db24fa2306fd6ddf1ae16 | 9b908ab0d0a947d89ac3137f8c4a05a87c35f568 | refs/heads/master | 2023-06-19T22:35:33.400237 | 2021-07-09T11:33:13 | 2021-07-09T12:28:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,754 | py | import sys
import enum
import struct
import array
import logging
import functools
from typing import (
Tuple,
NamedTuple,
List,
Optional,
)
import torch
# TODO: Add type annotations
# TODO: Check tensor types for ops
LOG = logging.getLogger("nnapi_serialize")
class NNAPI_OperandCode(object):
FLOAT32 = 0
INT32 = 1
UINT32 = 2
TENSOR_FLOAT32 = 3
TENSOR_INT32 = 4
TENSOR_QUANT8_ASYMM = 5
BOOL = 6
TENSOR_QUANT16_SYMM = 7
TENSOR_FLOAT16 = 8
TENSOR_BOOL8 = 9
FLOAT16 = 10
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11
TENSOR_QUANT16_ASYMM = 12
class NNAPI_OperationCode(object):
ADD = 0
AVERAGE_POOL_2D = 1
CONCATENATION = 2
CONV_2D = 3
DEPTHWISE_CONV_2D = 4
DEPTH_TO_SPACE = 5
DEQUANTIZE = 6
EMBEDDING_LOOKUP = 7
FLOOR = 8
FULLY_CONNECTED = 9
HASHTABLE_LOOKUP = 10
L2_NORMALIZATION = 11
L2_POOL_2D = 12
LOCAL_RESPONSE_NORMALIZATION = 13
LOGISTIC = 14
LSH_PROJECTION = 15
LSTM = 16
MAX_POOL_2D = 17
MUL = 18
RELU = 19
RELU1 = 20
RELU6 = 21
RESHAPE = 22
RESIZE_BILINEAR = 23
RNN = 24
SOFTMAX = 25
SPACE_TO_DEPTH = 26
SVDF = 27
TANH = 28
BATCH_TO_SPACE_ND = 29
DIV = 30
MEAN = 31
PAD = 32
SPACE_TO_BATCH_ND = 33
SQUEEZE = 34
STRIDED_SLICE = 35
SUB = 36
TRANSPOSE = 37
ABS = 38
ARGMAX = 39
ARGMIN = 40
AXIS_ALIGNED_BBOX_TRANSFORM = 41
BIDIRECTIONAL_SEQUENCE_LSTM = 42
BIDIRECTIONAL_SEQUENCE_RNN = 43
BOX_WITH_NMS_LIMIT = 44
CAST = 45
CHANNEL_SHUFFLE = 46
DETECTION_POSTPROCESSING = 47
EQUAL = 48
EXP = 49
EXPAND_DIMS = 50
GATHER = 51
GENERATE_PROPOSALS = 52
GREATER = 53
GREATER_EQUAL = 54
GROUPED_CONV_2D = 55
HEATMAP_MAX_KEYPOINT = 56
INSTANCE_NORMALIZATION = 57
LESS = 58
LESS_EQUAL = 59
LOG = 60
LOGICAL_AND = 61
LOGICAL_NOT = 62
LOGICAL_OR = 63
LOG_SOFTMAX = 64
MAXIMUM = 65
MINIMUM = 66
NEG = 67
NOT_EQUAL = 68
PAD_V2 = 69
POW = 70
PRELU = 71
QUANTIZE = 72
QUANTIZED_16BIT_LSTM = 73
RANDOM_MULTINOMIAL = 74
REDUCE_ALL = 75
REDUCE_ANY = 76
REDUCE_MAX = 77
REDUCE_MIN = 78
REDUCE_PROD = 79
REDUCE_SUM = 80
ROI_ALIGN = 81
ROI_POOLING = 82
RSQRT = 83
SELECT = 84
SIN = 85
SLICE = 86
SPLIT = 87
SQRT = 88
TILE = 89
TOPK_V2 = 90
TRANSPOSE_CONV_2D = 91
UNIDIRECTIONAL_SEQUENCE_LSTM = 92
UNIDIRECTIONAL_SEQUENCE_RNN = 93
RESIZE_NEAREST_NEIGHBOR = 94
class NNAPI_FuseCode(object):
FUSED_NONE = 0
FUSED_RELU = 1
FUSED_RELU1 = 2
FUSED_RELU6 = 3
class OperandValueSourceType(object):
IMMEDIATE = 0
NUMBERED_BUFFER = 2
NUMBERED_MEMORY = 3
# Scalar types that appear explicitly in models.
# These must be kept in sync with
# AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
# TODO: Expose these directly to Python to avoid maintaining this list.
class TorchScalarTypes(enum.Enum):
QUINT8 = 13
def approx_equal(lhs, rhs, tolerance=1e-6):
return abs(lhs - rhs) <= tolerance * min(lhs, rhs)
def tensor_size(op_type, dims):
ITEM_SIZES = {
NNAPI_OperandCode.TENSOR_FLOAT32: 4,
NNAPI_OperandCode.TENSOR_INT32: 4,
NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: 1,
NNAPI_OperandCode.TENSOR_QUANT16_SYMM: 2,
}
size = ITEM_SIZES[op_type]
for d in dims:
size *= d
return size
def change_element(tup, index, value):
ls = list(tup)
ls[index] = value
return tuple(ls)
class ConvPoolArgs2d(NamedTuple):
"""Configuration arguments for a convolution."""
kernel_h: int
kernel_w: int
stride_h: int
stride_w: int
pad_t: int
pad_b: int
pad_l: int
pad_r: int
dilation_h: int
dilation_w: int
group: int
class DimOrder(enum.Enum):
PRESUMED_CONTIGUOUS = 0
CHANNELS_LAST = 1
SCALAR_OR_VECTOR = 2
UNKNOWN_CONSTANT = 999
class Operand(NamedTuple):
"""Represenation of an NNAPI operand."""
# NNAPI operand type. One of NNAPI_OperandCode.
# TODO: Make this an enum.
op_type: int
# This is always the PyTorch shape, which is NCHW for feature maps.
# The actual NNAPI operand might have a transposed shape.
shape: Tuple[int, ...]
# Specifies how the shape of the operand that we define in NNAPI
# relates to the shape we track above.
# - PRESUMED_CONTIGUOUS: physical NNAPI operand will exactly match
# the shape of the PyTorch tensor.
# - CHANNELS_LAST: The PyTorch tensor is expected to be NCHW, and
# the NNAPI operand will be represented explicitly as NHWC.
dim_order: DimOrder
# Quantization params
scale: float
zero_point: int
def use_nchw(self):
if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS:
return True
if self.dim_order is DimOrder.CHANNELS_LAST:
return False
raise Exception("Unknown dim order")
def broadcast_shapes(shape1, shape2):
assert len(shape1) > 0
assert len(shape2) > 0
s1 = list(shape1)
s2 = list(shape2)
# TODO: Support non-equal-rank broadcast where semantics match.
# This can be tricky for NHWC tensors because dimension orders
# don't match between PT and NNAPI, even though semantics match.
if len(s1) > len(s2):
# s2 = [1] * (len(s1) - len(s2)) + s2
raise Exception("Non-equal-rank broadcast is not supported yet.")
if len(s2) > len(s1):
# s3 = [1] * (len(s2) - len(s1)) + s1
raise Exception("Non-equal-rank broadcast is not supported yet.")
ret = []
for d1, d2 in zip(s1, s2):
if d1 == 1:
ret.append(d2)
elif d2 == 1:
ret.append(d1)
elif d1 == d2:
ret.append(d1)
else:
raise Exception("Cannot broadcast shapes: {} and {}".format(shape1, shape2))
return tuple(ret)
def get_conv_pool_shape(image_shape, args, out_ch, transpose):
batch, in_c, in_h, in_w = image_shape
# TODO: Handle dilation
if args.dilation_h != 1 or args.dilation_w != 1:
raise Exception("Dilation not supported yet.")
if transpose:
out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b
out_w = (in_w - 1) * args.stride_w + args.kernel_w - args.pad_l - args.pad_l
else:
out_h = (in_h - args.kernel_h + args.pad_t + args.pad_b) // args.stride_h + 1
out_w = (in_w - args.kernel_w + args.pad_l + args.pad_r) // args.stride_w + 1
# Handle variable-sized tensors.
if in_h == 0:
out_h = 0
if in_w == 0:
out_w = 0
out_shape = (batch, out_ch, out_h, out_w)
return out_shape
def fix_shape(shape, dim_order):
# Return the actual shape that an operand should have in NNAPI,
# given a PyTorch shape and dimension order. This is where we
# convert from PyTorch's "always NCHW" shape to explicit NHWC.
if dim_order is DimOrder.PRESUMED_CONTIGUOUS:
return shape
if dim_order is DimOrder.CHANNELS_LAST:
return tuple([shape[0]] + list(shape[2:]) + [shape[1]])
if dim_order is DimOrder.SCALAR_OR_VECTOR:
assert len(shape) == 0 or len(shape) == 1
return shape
if dim_order is DimOrder.UNKNOWN_CONSTANT:
# XXX think this through
return shape
raise Exception(f"Bad dim_order: {dim_order!r}.")
def reverse_map_dim(dim_order, d):
# Return the original PyTorch dimension position for a given dimension.
# d should be the dimension that NNAPI will see.
# reverse_map_dim(PRESUMED_CONTIGUOUS, x) == x
# reverse_map_dim(CHANNELS_LAST, 3) == 1
if dim_order is DimOrder.PRESUMED_CONTIGUOUS:
return d
assert dim_order is DimOrder.CHANNELS_LAST
return [0, 2, 3, 1][d]
def flex_name(op_id, dim):
# Return the local variable name for the computed flexible size
# for a given op and dimension.
return f"s_{op_id}_{dim}"
class _NnapiSerializer(object):
def __init__(self, config):
self.operands = []
self.values = []
self.operations = []
self.value_data = []
self.operation_args = []
self.inputs = []
self.outputs = []
self.flexible_shape_computation_lines = []
self.modules = {}
self.constants = {}
self.tensor_sequences = {}
self.jitval_operand_map = {}
self.cached_immediates = {}
self.used_weights = []
self.weight_offset = 0
if config is None:
config = {}
def get_next_operand_id(self):
return len(self.operands)
# Add a tensor operand corresponding to a JIT Value.
# Returns the NNAPI operand ID. Can be looked up later with
# get_tensor_operand_by_jitval.
def add_tensor_operand(self, jitval, oper):
assert isinstance(oper, Operand)
if jitval in self.jitval_operand_map:
raise Exception("Duplicate tensor: %r" % jitval)
operand_id = self.get_next_operand_id()
self.operands.append(oper)
self.jitval_operand_map[jitval] = operand_id
return operand_id
# Add a tensor operand that does not correspond to a JIT Value.
# Useful for cases where multiple NNAPI operands are required
# to implement one JIT IR node. Returns the NNAPI operand ID.
def add_anonymous_tensor_operand(self, oper):
assert isinstance(oper, Operand)
operand_id = self.get_next_operand_id()
self.operands.append(oper)
return operand_id
@staticmethod
def torch_tensor_to_operand(tensor, dim_order):
dtype = str(tensor.dtype).replace("torch.", "")
scale = 0.0
zero_point = 0
if dtype == "float32":
op_type = NNAPI_OperandCode.TENSOR_FLOAT32
elif dtype == "int32":
op_type = NNAPI_OperandCode.TENSOR_INT32
elif dtype == "quint8":
op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
scale = tensor.q_scale()
zero_point = tensor.q_zero_point()
elif dtype == "qint32":
op_type = NNAPI_OperandCode.TENSOR_INT32
scale = tensor.q_scale()
zero_point = tensor.q_zero_point()
assert zero_point == 0
else:
raise Exception(f"Can't handle input with dtype '{tensor.dtype}'")
return Operand(
shape=tuple(tensor.shape),
op_type=op_type,
dim_order=dim_order,
scale=scale,
zero_point=zero_point,
)
def add_tensor_operand_for_input(self, arg_idx, jitval, tensor):
dim_order = (
DimOrder.CHANNELS_LAST if getattr(tensor, "nnapi_nhwc", False)
else DimOrder.PRESUMED_CONTIGUOUS)
toper = self.torch_tensor_to_operand(tensor, dim_order)
operand_id = self.add_tensor_operand(jitval, toper)
self.inputs.append(operand_id)
for dim, size in enumerate(tensor.shape):
if size == 0:
self.compute_operand_shape(operand_id, dim, f"args[{arg_idx}].shape[{dim}]")
return operand_id
def add_tensor_operand_for_weight(self, tensor):
toper = self.torch_tensor_to_operand(tensor, DimOrder.UNKNOWN_CONSTANT)
operand_id = len(self.operands)
self.operands.append(toper)
tsize = tensor_size(toper.op_type, toper.shape)
psize = ((tsize - 1) | 0x3) + 1
self.values.append((operand_id, OperandValueSourceType.NUMBERED_BUFFER))
buf_num = len(self.used_weights)
offset = 0
self.value_data.append(struct.pack(
"iii",
buf_num,
offset,
tsize))
self.used_weights.append(tensor)
return operand_id
def add_immediate_operand(self, code, value, dims):
assert isinstance(dims, tuple)
cache_key = (code, value)
if cache_key not in self.cached_immediates:
operand_id = len(self.operands)
self.operands.append(Operand(code, dims, DimOrder.SCALAR_OR_VECTOR, 0.0, 0))
self.values.append((operand_id, OperandValueSourceType.IMMEDIATE))
self.value_data.append(value)
self.cached_immediates[cache_key] = operand_id
return self.cached_immediates[cache_key]
def add_immediate_int_scalar(self, value):
return self.add_immediate_operand(
NNAPI_OperandCode.INT32,
struct.pack("i", value),
())
def add_immediate_float_scalar(self, value):
return self.add_immediate_operand(
NNAPI_OperandCode.FLOAT32,
struct.pack("f", value),
())
def add_immediate_bool_scalar(self, value):
return self.add_immediate_operand(
NNAPI_OperandCode.BOOL,
b"\x01" if value else b"\x00",
())
def add_immediate_int_vector(self, value):
return self.add_immediate_operand(
NNAPI_OperandCode.TENSOR_INT32,
array.array("i", value).tobytes(),
(len(value),))
def get_tensor_operand_by_jitval(self, jitval):
operand_id = self.jitval_operand_map[jitval]
return (operand_id, self.operands[operand_id])
def get_tensor_operand_by_jitval_fixed_size(self, jitval):
op_id, oper = self.get_tensor_operand_by_jitval(jitval)
for s in oper.shape:
if s <= 0:
# TODO: Improve this error message, possibly after converting
# many callsites to support flexible size.
raise Exception("Flexible size is not supported for this operand.")
return op_id, oper
def get_tensor_operand_or_constant(self, jitval):
operand_id = self.jitval_operand_map.get(jitval)
if operand_id is None:
_, value = self.get_constant_value(jitval, "TensorType")
operand_id = self.add_tensor_operand_for_weight(value)
return (operand_id, self.operands[operand_id])
def get_tensor_operand_for_weight(self, jitval):
_, value = self.get_constant_value(jitval, "TensorType")
operand_id = self.add_tensor_operand_for_weight(value)
return (operand_id, self.operands[operand_id])
def add_operation(self, opcode, inputs, outputs):
self.operations.append((opcode, len(inputs), len(outputs)))
self.operation_args.extend(inputs + outputs)
def add_tensor_sequence(self, jitval, values):
assert jitval not in self.tensor_sequences
self.tensor_sequences[jitval] = values
def add_constant_value(self, jitval, ctype, value):
assert jitval not in self.constants
self.constants[jitval] = (ctype, value)
def get_constant_value(self, jitval, typekind=None):
record = self.constants.get(jitval)
if record is None:
raise Exception(f"Could not find constant value for '{jitval!r}'.")
ctype, _ = record
if typekind is not None and ctype.kind() != typekind:
raise Exception(
f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'")
return record
@staticmethod
def operand_to_template_torchscript(op_id, oper):
"""Return a TorchScript expression to build a template for a given operand."""
shape_parts = ["("]
for d, s in enumerate(oper.shape):
if s > 0:
# Fixed shape dimension: just add the value.
shape_parts.append(str(s))
else:
# Flexible shape dimension: it should have been computed in a variable.
shape_parts.append(flex_name(op_id, d))
shape_parts.append(",")
shape_parts.append(")")
shape_code = "".join(shape_parts)
if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32:
return f"torch.zeros({shape_code}, dtype=torch.float32)"
elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:
return (
f"torch.quantize_per_tensor("
f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)"
f".expand({shape_code}).contiguous()"
)
raise Exception(f"Unsupported output operand type: {oper.op_type}")
def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim):
self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim))
def compute_operand_shape(self, op_id, dim, expr):
self.flexible_shape_computation_lines.append(f"{flex_name(op_id, dim)} = {expr}")
def transpose_to_nhwc(self, in_id, oper):
if oper.shape[2:] != (1, 1):
raise Exception("Automatic transpose only supported for H,W == 1,1")
out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector([0, 2, 3, 1])
outputs = [None] * 1
outputs[0] = self.add_anonymous_tensor_operand(out_oper)
self.add_operation(NNAPI_OperationCode.TRANSPOSE, inputs, outputs)
return outputs[0], out_oper
# Transpose inputs as necessary to allow broadcasting.
def transpose_for_broadcast(self, in0_id, in0_oper, in1_id, in1_oper):
if in0_oper.dim_order == in1_oper.dim_order:
return in0_id, in0_oper, in1_id, in1_oper
# Assume NHWC is preferred if there is a mismatch.
orders = (in0_oper.dim_order, in1_oper.dim_order)
if orders == (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.CHANNELS_LAST):
return self.transpose_to_nhwc(in0_id, in0_oper) + (in1_id, in1_oper)
if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS):
return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper)
raise Exception(
"Automatic transpose not supported for dim_orders: %r, %r" %
(in0_oper.dim_order, in1_oper.dim_order))
def get_size_arg(self, jitval):
ctype, value = self.get_constant_value(jitval)
if ctype.kind() == "ListType":
assert ctype.getElementType().kind() == "IntType"
return value
raise Exception(f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'")
def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config):
pc = [i.item() for i in packed_config]
assert pc[0] == 2
strides = [pc[1], pc[2]]
paddings = [pc[3], pc[4]]
dilations = [pc[5], pc[6]]
output_padding = [pc[7], pc[8]]
group_num = pc[9]
transpose = pc[10]
assert len(pc) == 11
assert output_padding == [0, 0]
assert transpose == 0
return self.get_conv_pool_args_2d_common(kernel_size, strides, paddings, dilations, group_num)
def get_conv_pool_args_2d_from_jit(self, kernel_size, stride, padding, dilation=None, group=None):
strides = self.get_size_arg(stride)
paddings = self.get_size_arg(padding)
if dilation is None:
dilations = [1, 1]
else:
dilations = self.get_size_arg(dilation)
if group is not None:
_, group_num = self.get_constant_value(group, "IntType")
else:
group_num = None
return self.get_conv_pool_args_2d_common(kernel_size, strides, paddings, dilations, group_num)
def get_conv_pool_args_2d_common(self, kernel_size, strides, paddings, dilations, group_num):
kernels = list(kernel_size)
assert len(kernels) == 2
assert len(strides) == 2
assert len(paddings) == 2
assert len(dilations) == 2
# NNAPI uses 4 values for padding.
ph, pw = paddings
real_paddings = [ph, ph, pw, pw]
return ConvPoolArgs2d(*(kernels + strides + real_paddings + dilations + [group_num]))
def serialize_model(self, model, inputs):
self.add_immediate_bool_scalar(False)
self.add_immediate_bool_scalar(True)
inp_dim_orders = []
out_dim_orders = []
self_jitval = next(model.graph.inputs())
self.add_constant_value(self_jitval, self_jitval.type(), model)
for arg_idx, (input_value, input_tensor) in enumerate(zip(list(model.graph.inputs())[1:], inputs)):
op_id = self.add_tensor_operand_for_input(arg_idx, input_value, input_tensor)
inp_dim_orders.append(self.operands[op_id].dim_order.value)
for idx, node in enumerate(model.graph.nodes()):
LOG.debug("Processing node #%d: %r", idx, node)
self.add_node(node)
retn = model.graph.return_node()
assert retn.inputsSize() == 1
assert retn.outputsSize() == 0
retn_input = retn.inputsAt(0)
template_return_lines = ["return ["]
if retn_input.type().kind() == "TensorType":
return_values = [retn_input]
retval_count = -1
elif retn_input.type().kind() == "TupleType":
return_values = self.tensor_sequences[retn_input]
retval_count = len(return_values)
else:
raise Exception(f"Unsupported return type: {retn_input.type()}")
for v in return_values:
op_id = self.jitval_operand_map[v]
self.outputs.append(op_id)
out_dim_orders.append(self.operands[op_id].dim_order.value)
template_return_lines.append(self.operand_to_template_torchscript(op_id, self.operands[op_id]) + ",")
template_return_lines.append("]")
model = []
version = 1
header = struct.pack(
"iiiiii",
version,
len(self.operands),
len(self.values),
len(self.operations),
len(self.inputs),
len(self.outputs),
)
model.append(header)
serialized_values, serialized_value_data = self.serialize_values()
model.extend(struct.pack("iifi", t, len(d), s, z) for (t, d, _m, s, z) in self.operands)
model.extend(serialized_values)
model.extend(struct.pack("iii", *x) for x in self.operations)
# Compact the model so we can get its length so far.
model = [b"".join(model)]
model_offset = len(model[0])
# Model offset is the index into the model (in 32-bit words, not bytes)
# of the next dimension we're about to serialize. If it's 0,
# generate code to mutate it before passing to NNAPI.
assert model_offset % 4 == 0
model_offset = int(model_offset / 4)
for (op_id, (_, dims, dim_order, _, _)) in enumerate(self.operands):
shape = fix_shape(dims, dim_order)
for d, s in enumerate(shape):
if s == 0:
pt_d = reverse_map_dim(dim_order, d)
self.flexible_shape_computation_lines.append(
f"ser_model[{model_offset}] = {flex_name(op_id, pt_d)}")
model_offset += 1
model.append(self.serialize_ints(shape))
model.extend(serialized_value_data)
model.append(self.serialize_ints(self.operation_args))
model.append(self.serialize_ints(self.inputs))
model.append(self.serialize_ints(self.outputs))
self.flexible_shape_computation_lines.extend(template_return_lines)
return (
array.array("i", b"".join(model)),
self.used_weights,
inp_dim_orders,
out_dim_orders,
self.flexible_shape_computation_lines,
retval_count,
)
def serialize_values(self):
serialized_values = []
serialized_value_data = []
assert len(self.values) == len(self.value_data)
for ((op_index, source_type), data) in zip(self.values, self.value_data):
source_length = len(data)
# Pad with 0 bytes out to a multiple of 4 for alignment.
physical_length = ((source_length - 1) | 0x3) + 1
padded_data = data + (b"\0" * (physical_length - source_length))
serialized_values.append(struct.pack("iii", op_index, source_type, source_length))
serialized_value_data.append(padded_data)
return serialized_values, serialized_value_data
@staticmethod
def serialize_ints(ints):
return array.array("i", ints).tobytes()
ADDER_MAP = {
"prim::GetAttr": lambda self, node:
self.add_getattr(node),
"prim::Constant": lambda self, node:
self.add_constant_node(node),
"prim::ListConstruct": lambda self, node:
self.add_list_construct(node),
"prim::TupleConstruct": lambda self, node:
self.add_tuple_construct(node),
"aten::unsqueeze": lambda self, node:
self.add_unsqueeze(node),
"aten::to": lambda self, node:
self.add_to(node),
"aten::detach": lambda self, node:
self._identity(node),
"aten::reshape": lambda self, node:
self.add_reshape(node),
"aten::flatten": lambda self, node:
self.add_flatten(node),
"aten::slice": lambda self, node:
self.add_slice(node),
"aten::size": lambda self, node:
self.add_size(node),
"aten::cat": lambda self, node:
self.add_cat(node),
"aten::mean": lambda self, node:
self.add_mean(node),
"aten::quantize_per_tensor": lambda self, node:
self.add_quantize(node),
"aten::dequantize": lambda self, node:
self.add_dequantize(node),
"aten::add": lambda self, node:
self.add_add_sub_op(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE),
"aten::sub": lambda self, node:
self.add_add_sub_op(node, NNAPI_OperationCode.SUB, NNAPI_FuseCode.FUSED_NONE),
"aten::mul": lambda self, node:
self.add_pointwise_simple_binary_broadcast_op(node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE),
"aten::div": lambda self, node:
self.add_pointwise_simple_binary_broadcast_op(node, NNAPI_OperationCode.DIV, NNAPI_FuseCode.FUSED_NONE),
"aten::relu": lambda self, node:
self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.RELU),
"aten::sigmoid": lambda self, node:
self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.LOGISTIC),
"aten::softmax": lambda self, node:
self.add_softmax(node),
"aten::hardtanh": lambda self, node:
self.add_hardtanh(node),
"aten::avg_pool2d": lambda self, node:
self.add_avg_pool2d(node),
"aten::max_pool2d": lambda self, node:
self.add_pool2d_node(node, NNAPI_OperationCode.MAX_POOL_2D),
"aten::adaptive_avg_pool2d": lambda self, node:
self.add_adaptive_avg_pool2d(node),
"aten::upsample_nearest2d": lambda self, node:
self.add_upsample_nearest2d(node),
"aten::prelu": lambda self, node:
self.add_prelu_op(node),
"aten::addmm": lambda self, node:
self.add_addmm(node),
"aten::linear": lambda self, node:
self.add_linear(node),
"aten::_convolution": lambda self, node:
self.add_conv_underscore(node),
"aten::conv2d": lambda self, node:
self.add_conv2d(node),
"aten::log_softmax": lambda self, node:
self.add_log_softmax(node),
"quantized::linear": lambda self, node:
self.add_qlinear(node),
"quantized::conv2d": lambda self, node:
self.add_qconv2d(node, NNAPI_FuseCode.FUSED_NONE),
"quantized::conv2d_relu": lambda self, node:
self.add_qconv2d(node, NNAPI_FuseCode.FUSED_RELU),
"quantized::add": lambda self, node:
self.add_qadd(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE),
"quantized::add_relu": lambda self, node:
self.add_qadd(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_RELU),
}
def add_node(self, node):
adder = self.ADDER_MAP.get(node.kind())
if not adder:
raise Exception("Unsupported node kind (%r) in node %r" % (node.kind(), node))
adder(self, node)
def _identity(self, node):
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
jitval = node.outputsAt(0)
self.jitval_operand_map[jitval] = in_id
def add_getattr(self, node):
assert node.inputsSize() == 1
assert node.outputsSize() == 1
obj_ctype, obj = self.get_constant_value(node.inputsAt(0))
assert str(obj_ctype).startswith("__torch__.")
name = node.s("name")
value = getattr(obj, name)
output = node.outputsAt(0)
ctype = output.type()
self.add_constant_value(output, ctype, value)
def add_constant_node(self, node):
assert node.inputsSize() == 0
assert node.outputsSize() == 1
output = node.outputsAt(0)
ctype = output.type()
value = output.toIValue()
self.add_constant_value(output, ctype, value)
def add_list_construct(self, node):
assert node.outputsSize() == 1
output = node.outputsAt(0)
ctype = output.type()
const_vals: Optional[List] = []
tensors: Optional[List] = []
for inp in node.inputs():
if const_vals is not None and inp in self.constants:
_, val = self.get_constant_value(inp)
const_vals.append(val)
else:
const_vals = None
if tensors is not None and inp.type().kind() == "TensorType":
tensors.append(inp)
else:
tensors = None
if const_vals is not None:
# NOTE: Now that TorchScript supports list constants,
# this code path might not be used anymore.
self.add_constant_value(output, ctype, const_vals)
if tensors is not None:
self.add_tensor_sequence(output, tensors)
if const_vals is None and tensors is None:
raise Exception(
"Unable to handle ListConstruct node."
" Neither all constants nor all tensors. %r" % node)
def add_tuple_construct(self, node):
assert node.outputsSize() == 1
output = node.outputsAt(0)
values = []
for inp in node.inputs():
values.append(inp)
self.add_tensor_sequence(output, values)
def add_unsqueeze(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
_, dim = self.get_constant_value(node.inputsAt(1), "IntType")
assert in_oper.dim_order == DimOrder.PRESUMED_CONTIGUOUS
real_dim = dim if dim >= 0 else dim + len(in_oper.shape) + 1
out_shape_list = list(in_oper.shape)
out_shape_list.insert(real_dim, 1)
out_shape = tuple(out_shape_list)
out_oper = in_oper._replace(shape=out_shape)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = self.add_immediate_int_scalar(dim)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.EXPAND_DIMS, inputs, outputs)
def add_to(self, node):
# Handle to("cpu") / to("gpu") case
self._identity(node)
def add_reshape(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
shape_ctype, shape = self.get_constant_value(node.inputsAt(1))
assert shape_ctype.kind() == "ListType"
assert shape_ctype.getElementType().kind() == "IntType"
is_trivial_reshape = len(shape) == 2 and shape[1] == -1
if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape:
raise Exception(
"Currently, reshape is only supported on NHWC tensors if the target size is [X, -1].")
# Bit of a hack here. Use a real tensor to infer the output shape.
out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape
out_oper = in_oper._replace(shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector(shape)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs)
def add_flatten(self, node):
assert node.inputsSize() == 3
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
start_ctype, start_dim = self.get_constant_value(node.inputsAt(1), "IntType")
end_ctype, end_dim = self.get_constant_value(node.inputsAt(2), "IntType")
if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS:
raise Exception(
"Currently, reshape is not supported on NHWC tensors")
if start_dim < 0:
start_dim += len(in_oper.shape)
if end_dim < 0:
end_dim += len(in_oper.shape)
out_shape = (
in_oper.shape[: start_dim] +
(functools.reduce(
lambda x, y: x * y, in_oper.shape[start_dim: end_dim + 1]),) +
in_oper.shape[end_dim + 1:]
)
# TODO(axit): To add support for runtime
# if any(dim == 0 for dim in in_oper.shape[start_dim: end_dim + 1]):
# raise Exception("Flattened dims can't be flexible")
# non_flattened_dims = in_oper.shape[: start_dim] + in_oper.shape[end_dim + 1:]
# if non_flattened_dims.count(0) > 1:
# raise Exception("Only 1 dim can be flexible")
# out_shape = tuple(
# dim if dim != 0 else -1
# for dim in out_shape
# )
out_oper = in_oper._replace(shape=out_shape)
out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector(out_shape)
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs)
def add_slice(self, node):
assert node.inputsSize() == 5
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
_, dim_value = self.get_constant_value(node.inputsAt(1))
_, start_value = self.get_constant_value(node.inputsAt(2))
_, stop_value = self.get_constant_value(node.inputsAt(3))
_, step_value = self.get_constant_value(node.inputsAt(4))
if start_value is None:
start_value = 0
if stop_value is None:
stop_value = sys.maxsize
if start_value < 0:
start_value += in_oper.shape[dim_value]
elif start_value == sys.maxsize:
start_value = 0
if start_value == 0 and stop_value == sys.maxsize:
self._identity(node)
return
if in_oper.shape[dim_value] == 0:
raise Exception("Unable to slice with flexible shape")
if stop_value < 0:
stop_value += in_oper.shape[dim_value]
elif stop_value == sys.maxsize:
stop_value = in_oper.shape[dim_value]
if start_value >= stop_value:
raise Exception("Slice start value should be less than stop value")
out_len = (stop_value - start_value) // step_value
out_shape = tuple(out_len if i == dim_value else dim for i, dim in enumerate(in_oper.shape))
out_id = self.add_tensor_operand(node.outputsAt(0), in_oper._replace(shape=out_shape))
# flex inputs
end_mask = 0
for idx, dim in enumerate(out_shape):
if dim == 0:
self.forward_operand_shape(out_id, idx, in_id, idx)
end_mask |= (1 << idx)
inputs = [None] * 7
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector(
[start_value if i == dim_value else 0 for i in range(len(in_oper.shape))])
inputs[2] = self.add_immediate_int_vector(
[stop_value if i == dim_value else dim for i, dim in enumerate(in_oper.shape)])
inputs[3] = self.add_immediate_int_vector(
[step_value if i == dim_value else 1 for i in range(len(in_oper.shape))])
inputs[4] = self.add_immediate_int_scalar(0) # begin mask
inputs[5] = self.add_immediate_int_scalar(end_mask)
inputs[6] = self.add_immediate_int_scalar(0) # shrink axis mas
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.STRIDED_SLICE, inputs, outputs)
def add_size(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
_, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
_, value = self.constants[node.inputsAt(1)]
res = in_oper.shape[value]
output = node.outputsAt(0)
self.add_constant_value(output, output.type(), res)
def add_cat(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
tensors = self.tensor_sequences[node.inputsAt(0)]
_, dim = self.get_constant_value(node.inputsAt(1), "IntType")
assert len(tensors) > 0
in_ids = []
out_oper = None
out_dim_size = 0
for inp in tensors:
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(inp)
if out_oper is None:
out_shape = change_element(in_oper.shape, dim, -1)
out_oper = in_oper._replace(shape=out_shape)
assert in_oper.op_type == out_oper.op_type
assert in_oper.dim_order == out_oper.dim_order
assert change_element(in_oper.shape, dim, -1) == change_element(out_oper.shape, dim, -1)
# TODO: Possibly check scale and zero point.
in_ids.append(in_id)
# TODO: Possibly support variable-sized inputs.
out_dim_size += in_oper.shape[dim]
assert out_oper is not None
out_oper = out_oper._replace(shape=change_element(out_oper.shape, dim, out_dim_size))
if in_oper.dim_order == DimOrder.CHANNELS_LAST:
assert len(out_oper.shape) == 4
nnapi_dim = [0, 3, 1, 2][dim]
else:
nnapi_dim = dim
inputs = in_ids + [self.add_immediate_int_scalar(nnapi_dim)]
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.CONCATENATION, inputs, outputs)
def add_mean(self, node):
assert node.inputsSize() == 4
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
dim_ctype, dim = self.get_constant_value(node.inputsAt(1))
assert dim_ctype.kind() == "ListType"
assert dim_ctype.getElementType().kind() == "IntType"
_, keep_dim = self.get_constant_value(node.inputsAt(2), "BoolType")
# Expect None for dtype
self.get_constant_value(node.inputsAt(3), "NoneType")
if in_oper.dim_order == DimOrder.CHANNELS_LAST:
assert len(in_oper.shape) == 4
nnapi_dim = [[0, 3, 1, 2][d] for d in dim]
else:
nnapi_dim = dim
collapsed_dims = set()
for d in dim:
if d < 0:
d += len(in_oper.shape)
collapsed_dims.add(d)
if in_oper.dim_order == DimOrder.CHANNELS_LAST and not keep_dim:
assert collapsed_dims.issuperset({2, 3})
out_dim_order = DimOrder.PRESUMED_CONTIGUOUS
else:
out_dim_order = in_oper.dim_order
out_shape = []
for i, s in enumerate(in_oper.shape):
if i not in collapsed_dims:
out_shape.append(s)
elif keep_dim:
out_shape.append(1)
out_oper = in_oper._replace(shape=out_shape, dim_order=out_dim_order)
inputs = [None] * 3
inputs[0] = in_id
inputs[1] = self.add_immediate_int_vector(nnapi_dim)
inputs[2] = self.add_immediate_int_scalar(keep_dim)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.MEAN, inputs, outputs)
def add_quantize(self, node):
assert node.inputsSize() == 4
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
if in_oper.dim_order != DimOrder.CHANNELS_LAST:
raise Exception(
"Most hardware backends prefer NHWC quantized tensors. "
"Try setting `t.nnapi_nhwc = True` on your tensor inputs. ")
_, scale = self.get_constant_value(node.inputsAt(1), "FloatType")
_, zero_point = self.get_constant_value(node.inputsAt(2), "IntType")
_, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType")
if scalar_type != TorchScalarTypes.QUINT8.value:
raise Exception(
"PyTorch NNAPI export only supports quantized tensors "
"with the quint8 dtype.")
op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
out_oper = in_oper._replace(
op_type=op_type,
scale=scale,
zero_point=zero_point,
)
inputs = [None] * 1
inputs[0] = in_id
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.QUANTIZE, inputs, outputs)
def add_dequantize(self, node):
assert node.inputsSize() == 1
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
out_oper = in_oper._replace(
op_type=NNAPI_OperandCode.TENSOR_FLOAT32,
scale=0.0,
zero_point=0,
)
inputs = [None] * 1
inputs[0] = in_id
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.DEQUANTIZE, inputs, outputs)
def add_pointwise_simple_unary_op(self, node, opcode):
assert node.inputsSize() == 1
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
for idx, dim in enumerate(in_oper.shape):
if dim == 0:
self.forward_operand_shape(out_id, idx, in_id, idx)
inputs = [None] * 1
inputs[0] = in_id
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(opcode, inputs, outputs)
def _do_add_binary(self, node, opcode, fuse_code, *, qparams=None):
"""Helper for pointwise binary broadcast ops with superfluous extra args"""
assert node.outputsSize() == 1
assert node.inputsAt(0).type().kind() == "TensorType"
assert node.inputsAt(1).type().kind() == "TensorType"
# TODO: Should support constant as either operand.
in0_id, in0_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
in1_id, in1_oper = self.get_tensor_operand_by_jitval(node.inputsAt(1))
assert in0_oper.op_type == in1_oper.op_type
in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast(
in0_id, in0_oper, in1_id, in1_oper)
# NOTE: PyTorch and NNAPI have the same broadcast semantics.
out_shape = broadcast_shapes(in0_oper.shape, in1_oper.shape)
out_oper = in0_oper._replace(shape=out_shape)
if qparams is not None:
scale, zp = qparams
out_oper = out_oper._replace(scale=scale, zero_point=zp)
out_id = self.add_tensor_operand(node.outputsAt(0), out_oper)
for idx, (d0, d1) in enumerate(zip(in0_oper.shape, in1_oper.shape)):
if d0 == 1 and d1 == 0:
self.forward_operand_shape(out_id, idx, in1_id, idx)
elif d0 == 0 and d1 == 1:
self.forward_operand_shape(out_id, idx, in0_id, idx)
elif d0 == 0 and d1 == 0:
self.flexible_shape_computation_lines.append(
f"assert {flex_name(in0_id, idx)} == {flex_name(in1_id, idx)}"
)
self.forward_operand_shape(out_id, idx, in0_id, idx)
inputs = [None] * 3
inputs[0] = in0_id
inputs[1] = in1_id
inputs[2] = self.add_immediate_int_scalar(fuse_code)
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(opcode, inputs, outputs)
def add_pointwise_simple_binary_broadcast_op(self, node, opcode, fuse_code):
assert node.inputsSize() == 2
self._do_add_binary(node, opcode, fuse_code)
def add_add_sub_op(self, node, opcode, fuse_code):
assert node.inputsSize() == 3
_, alpha = self.get_constant_value(node.inputsAt(2), "IntType")
if alpha != 1:
raise Exception("NNAPI does not support add/sub with alpha.")
self._do_add_binary(node, opcode, fuse_code)
def add_qadd(self, node, opcode, fuse_code):
assert node.inputsSize() == 4
_, scale = self.get_constant_value(node.inputsAt(2), "FloatType")
_, zero_point = self.get_constant_value(node.inputsAt(3), "IntType")
self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point))
def add_softmax(self, node):
assert node.inputsSize() == 3
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
_, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType")
out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
for dim, size in enumerate(in_oper.shape):
if size == 0:
self.forward_operand_shape(out_id, dim, in_id, dim)
inputs = [None] * 3
inputs[0] = in_id
inputs[1] = self.add_immediate_float_scalar(1.0) # positive scaling factor of exponent, beta
inputs[2] = self.add_immediate_int_scalar(softmax_dim)
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs)
def add_hardtanh(self, node):
assert node.inputsSize() == 3
assert node.outputsSize() == 1
in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
_, min_val = self.get_constant_value(node.inputsAt(1), "FloatType")
_, max_val = self.get_constant_value(node.inputsAt(2), "FloatType")
op_map = {
(-1, 1): NNAPI_OperationCode.RELU1,
( 0, 6): NNAPI_OperationCode.RELU6, # noqa: E201
}
opcode = op_map.get((min_val, max_val))
if opcode is None:
raise Exception("NNAPI only supports hardtanh with args (-1, 1) or (0, 6).")
inputs = [None] * 1
inputs[0] = in_id
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), in_oper)
self.add_operation(opcode, inputs, outputs)
def add_prelu_op(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
assert node.inputsAt(0).type().kind() == "TensorType"
assert node.inputsAt(1).type().kind() == "TensorType"
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
w_id, w_oper = self.get_tensor_operand_for_weight(node.inputsAt(1))
assert len(w_oper.shape) == 1
assert w_oper.shape[0] > 0
if w_oper.shape[0] > 1:
if in_oper.use_nchw():
# TODO: Support this by adding trailing 1 dims.
raise Exception("Per-channel PReLU only supports channels_last right now.")
out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
for dim, size in enumerate(in_oper.shape):
if size > 0:
pass
elif dim <= 1:
raise Exception("PReLU requires fixed size for dim 0 and dim 1.")
else:
self.forward_operand_shape(out_id, dim, in_id, dim)
inputs = [None] * 2
inputs[0] = in_id
inputs[1] = w_id
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.PRELU, inputs, outputs)
def add_pool2d_node(self, node, opcode):
assert node.inputsSize() == 6
assert node.outputsSize() == 1
image, kernel, stride, padding, dilation, ceil_mode = node.inputs()
stride = stride or kernel
# TODO: Validate ceil_mode semantics.
args = self.get_conv_pool_args_2d_from_jit(self.get_size_arg(kernel), stride, padding, dilation)
if args.dilation_h != 1 or args.dilation_w != 1:
raise Exception("NNAPI does not support dilated pooling.")
image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image)
assert len(image_oper.shape) == 4
out_shape = get_conv_pool_shape(image_oper.shape, args, image_oper.shape[1], False)
use_nchw = image_oper.use_nchw()
inputs = [None] * 11
inputs[0] = image_id
inputs[1] = self.add_immediate_int_scalar(args.pad_l)
inputs[2] = self.add_immediate_int_scalar(args.pad_r)
inputs[3] = self.add_immediate_int_scalar(args.pad_t)
inputs[4] = self.add_immediate_int_scalar(args.pad_b)
inputs[5] = self.add_immediate_int_scalar(args.stride_w)
inputs[6] = self.add_immediate_int_scalar(args.stride_h)
inputs[7] = self.add_immediate_int_scalar(args.kernel_w)
inputs[8] = self.add_immediate_int_scalar(args.kernel_h)
inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
inputs[10] = self.add_immediate_bool_scalar(use_nchw)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape))
self.add_operation(opcode, inputs, outputs)
def add_avg_pool2d(self, node):
assert node.inputsSize() == 7
assert node.outputsSize() == 1
image, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override = node.inputs()
_, count_include_pad_value = self.get_constant_value(count_include_pad)
_, divisor_override_value = self.get_constant_value(divisor_override)
if not count_include_pad_value or divisor_override_value:
raise Exception("NNAPI doesn't support count_include_pad=False or divisor_override")
args = self.get_conv_pool_args_2d_from_jit(self.get_size_arg(kernel), stride, padding)
image_id, image_oper = self.get_tensor_operand_by_jitval(image)
assert len(image_oper.shape) == 4
out_shape = get_conv_pool_shape(image_oper.shape, args, image_oper.shape[1], False)
use_nchw = image_oper.use_nchw()
inputs = [None] * 11
inputs[0] = image_id
inputs[1] = self.add_immediate_int_scalar(args.pad_l)
inputs[2] = self.add_immediate_int_scalar(args.pad_r)
inputs[3] = self.add_immediate_int_scalar(args.pad_t)
inputs[4] = self.add_immediate_int_scalar(args.pad_b)
inputs[5] = self.add_immediate_int_scalar(args.stride_w)
inputs[6] = self.add_immediate_int_scalar(args.stride_h)
inputs[7] = self.add_immediate_int_scalar(args.kernel_w)
inputs[8] = self.add_immediate_int_scalar(args.kernel_h)
inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
inputs[10] = self.add_immediate_bool_scalar(use_nchw)
outputs = [None] * 1
out_id = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape))
self._handle_conv_pool_flexible_input(out_id, image, args, False)
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs)
def add_adaptive_avg_pool2d(self, node):
assert node.inputsSize() == 2
assert node.outputsSize() == 1
image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0))
assert len(image_oper.shape) == 4
size_ctype, size_arg = self.get_constant_value(node.inputsAt(1))
assert size_ctype.kind() == "ListType"
assert size_ctype.getElementType().kind() == "IntType"
if size_arg != [1, 1]:
raise Exception("NNAPI only supports adaptive_avg_pool2d with output size (1, 1).")
out_shape = image_oper.shape[0:2] + tuple(size_arg)
use_nchw = image_oper.use_nchw()
inputs = [None] * 11
inputs[0] = image_id
inputs[1] = self.add_immediate_int_scalar(0)
inputs[2] = self.add_immediate_int_scalar(0)
inputs[3] = self.add_immediate_int_scalar(0)
inputs[4] = self.add_immediate_int_scalar(0)
inputs[5] = self.add_immediate_int_scalar(1)
inputs[6] = self.add_immediate_int_scalar(1)
inputs[7] = self.add_immediate_int_scalar(image_oper.shape[3])
inputs[8] = self.add_immediate_int_scalar(image_oper.shape[2])
inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
inputs[10] = self.add_immediate_bool_scalar(use_nchw)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape))
self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs)
def add_upsample_nearest2d(self, node):
assert node.inputsSize() == 3
assert node.outputsSize() == 1
image, size_jit, scale_jit = node.inputs()
size_ctype, size_arg = self.get_constant_value(size_jit)
scale_ctype, scale_arg = self.get_constant_value(scale_jit)
image_id, image_oper = self.get_tensor_operand_by_jitval(image)
assert len(image_oper.shape) == 4
if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType":
raise Exception("Size and scale cannot both be non-None.")
elif size_ctype.kind() != "NoneType":
assert size_ctype.kind() == "ListType"
assert size_ctype.getElementType().kind() == "IntType"
assert scale_ctype.kind() == "NoneType"
assert scale_arg is None
assert isinstance(size_arg, list)
assert size_arg
assert all(isinstance(val, int) for val in size_arg)
if len(size_arg) == 1:
size_arg = size_arg * 2
assert len(size_arg) == 2
out_h = size_arg[0]
out_w = size_arg[1]
arg_h = self.add_immediate_int_scalar(out_h)
arg_w = self.add_immediate_int_scalar(out_w)
elif scale_ctype.kind() != "NoneType":
assert scale_ctype.kind() == "ListType"
assert scale_ctype.getElementType().kind() == "FloatType"
assert size_ctype.kind() == "NoneType"
assert size_arg is None
assert isinstance(scale_arg, list)
assert scale_arg
assert all(isinstance(val, float) for val in scale_arg)
if len(scale_arg) == 1:
scale_arg = scale_arg * 2
assert len(scale_arg) == 2
out_h = int(scale_arg[0] * image_oper.shape[2])
out_w = int(scale_arg[1] * image_oper.shape[3])
arg_h = self.add_immediate_float_scalar(scale_arg[0])
arg_w = self.add_immediate_float_scalar(scale_arg[1])
else:
raise Exception("Size and scale cannot both be None.")
out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w)
use_nchw = image_oper.use_nchw()
out_id = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape))
if image_oper.shape[0] == 0 or image_oper.shape[1] == 0:
raise Exception("Flexible batch or channels not supported")
# Handle variable input size
for dim in (2, 3): # h, w indices
if image_oper.shape[dim] == 0:
if size_ctype.kind() != "NoneType":
self.compute_operand_shape(out_id, dim, size_arg[dim - 2])
elif scale_ctype.kind() != "NoneType":
self.compute_operand_shape(out_id, dim, f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})")
else:
raise Exception("Size and scale cannot both be None.")
inputs = [None] * 4
inputs[0] = image_id
inputs[1] = arg_w
inputs[2] = arg_h
inputs[3] = self.add_immediate_bool_scalar(use_nchw)
outputs = [None] * 1
outputs[0] = out_id
self.add_operation(NNAPI_OperationCode.RESIZE_NEAREST_NEIGHBOR, inputs, outputs)
def add_addmm(self, node):
assert node.inputsSize() == 5
assert node.outputsSize() == 1
jit_bias, jit_input, jit_weight, jit_beta, jit_alpha = node.inputs()
for jitval in (jit_beta, jit_alpha):
scale_ctype, scale_value = self.get_constant_value(jitval)
assert scale_ctype.kind() in ("IntType", "FloatType")
if scale_value != 1:
raise Exception("NNAPI Fully-Connected does not support alpha and beta.")
self.add_addmm_or_linear(node, True, jit_input, jit_weight, jit_bias)
def add_linear(self, node):
assert node.inputsSize() == 3
assert node.outputsSize() == 1
jit_input, jit_weight, jit_bias = node.inputs()
self.add_addmm_or_linear(node, False, jit_input, jit_weight, jit_bias)
def add_addmm_or_linear(self, node, transpose_weight, jit_input, jit_weight, jit_bias):
input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input)
bias_id, bias_oper = self.get_tensor_operand_for_weight(jit_bias)
assert len(input_oper.shape) == 2
assert len(bias_oper.shape) == 1
# TODO: Transform at load time to share weights with CPU model.
_, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
assert len(weight_tensor.shape) == 2
if transpose_weight:
nnapi_weight_tensor = weight_tensor.t().contiguous()
else:
nnapi_weight_tensor = weight_tensor.contiguous()
weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
weight_oper = self.operands[weight_id]
out_shape = (input_oper.shape[0], weight_oper.shape[0])
inputs = [None] * 4
inputs[0] = input_id
inputs[1] = weight_id
inputs[2] = bias_id
inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), input_oper._replace(shape=out_shape))
self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs)
def add_qlinear(self, node):
assert node.inputsSize() == 4
assert node.outputsSize() == 1
(
jit_input,
jit_packed_weight,
jit_scale,
jit_zero_point,
) = node.inputs()
input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input)
# TODO: Support automatic reshape
assert len(input_oper.shape) == 2
_, out_scale = self.get_constant_value(jit_scale, "FloatType")
_, out_zero_point = self.get_constant_value(jit_zero_point, "IntType")
weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight)
assert weight_ctype.name() == "LinearPackedParamsBase"
raw_weight, raw_bias = packed_weight.__getstate__()[0]
assert raw_bias is not None
assert len(raw_weight.shape) == 2
assert len(raw_bias.shape) == 1
assert raw_bias.shape[0] == raw_weight.shape[0]
assert raw_weight.shape[1] == input_oper.shape[1]
assert raw_weight.qscheme() == torch.per_tensor_affine
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
scale=raw_weight.q_scale(),
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
bias_scale = input_oper.scale * weight_scale
int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
bias_id = self.add_tensor_operand_for_weight(int_bias)
multiplier = input_oper.scale * weight_scale / out_scale
assert multiplier > 0
if multiplier >= 1:
raise Exception(
"Quantized convolution multiplier is greater than 1. "
"This is supported by NNAPI, but not by most hardware backends. "
"Try training a model without quantization-aware training. ")
# TODO: Transform at load time to share weights with CPU model.
nnapi_weight_tensor = unsigned_weight.contiguous()
weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
weight_oper = self.operands[weight_id]
out_shape = (input_oper.shape[0], weight_oper.shape[0])
out_oper = input_oper._replace(
shape=out_shape,
scale=out_scale,
zero_point=out_zero_point,
)
inputs = [None] * 4
inputs[0] = input_id
inputs[1] = weight_id
inputs[2] = bias_id
inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper)
self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs)
def get_optional_bias(self, jit_bias, weight_tensor):
ctype, value = self.get_constant_value(jit_bias)
if ctype.kind() == "NoneType":
nnapi_bias_tensor = torch.zeros(weight_tensor.size()[0], dtype=weight_tensor.dtype)
bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor)
bias_oper = self.operands[bias_id]
return bias_id, bias_oper
else:
return self.get_tensor_operand_for_weight(jit_bias)
def add_conv2d(self, node):
assert node.inputsSize() == 7
assert node.outputsSize() == 1
(
jit_image,
jit_weight,
jit_bias,
jit_stride,
jit_pad,
jit_dilation,
jit_groups,
) = node.inputs()
_, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor)
args = self.get_conv_pool_args_2d_from_jit(
weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups)
return self.add_conv2d_common(
node.outputsAt(0),
0.0,
0,
jit_image,
weight_tensor,
bias_id,
args,
False, # transpose
NNAPI_FuseCode.FUSED_NONE,
)
def add_conv_underscore(self, node):
assert node.inputsSize() == 13
assert node.outputsSize() == 1
(
jit_image,
jit_weight,
jit_bias,
jit_stride,
jit_pad,
jit_dilation,
jit_transpose,
_,
jit_groups,
_,
_,
_,
_,
) = node.inputs()
# XXX check jit_transpose
_, weight_tensor = self.get_constant_value(jit_weight, "TensorType")
bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor)
args = self.get_conv_pool_args_2d_from_jit(
weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups)
return self.add_conv2d_common(
node.outputsAt(0),
0.0,
0,
jit_image,
weight_tensor,
bias_id,
args,
False, # transpose
NNAPI_FuseCode.FUSED_NONE,
)
def add_log_softmax(self, node):
assert node.inputsSize() == 3
assert node.outputsSize() == 1
(
jit_input,
jit_dim,
jit_half_to_float
) = node.inputs()
input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input)
_, dim = self.get_constant_value(jit_dim, "IntType")
out_shape = input_oper.shape
inputs = [None] * 3
inputs[0] = input_id
# specifying 1 as the scaling factor for the exponent, beta
inputs[1] = self.add_immediate_float_scalar(1)
inputs[2] = self.add_immediate_int_scalar(dim)
outputs = [None] * 1
outputs[0] = self.add_tensor_operand(node.outputsAt(0), input_oper._replace(shape=out_shape))
self.add_operation(NNAPI_OperationCode.LOG_SOFTMAX, inputs, outputs)
def add_qconv2d(self, node, fuse_code):
assert node.inputsSize() == 4
assert node.outputsSize() == 1
(
jit_image,
jit_packed_weight,
jit_scale,
jit_zero_point,
) = node.inputs()
_, out_scale = self.get_constant_value(jit_scale, "FloatType")
_, out_zero_point = self.get_constant_value(jit_zero_point, "IntType")
weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight)
assert weight_ctype.name() == "Conv2dPackedParamsBase"
(
pack_version,
tensors,
opt_tensors,
) = packed_weight.__getstate__()[0]
assert pack_version == "2"
packed_config, raw_weight = tensors
raw_bias, = opt_tensors
assert raw_bias is not None
args = self.get_conv_pool_args_2d_from_pack(raw_weight.shape[2:4], packed_config)
assert raw_weight.qscheme() == torch.per_tensor_affine
if raw_weight.dtype == torch.quint8:
unsigned_weight = raw_weight
else:
assert raw_weight.dtype == torch.qint8
unsigned_weight = torch._make_per_tensor_quantized_tensor(
(raw_weight.int_repr().int() + 128).to(torch.uint8),
scale=raw_weight.q_scale(),
zero_point=raw_weight.q_zero_point() + 128)
weight_scale = unsigned_weight.q_scale()
_, image_oper = self.get_tensor_operand_by_jitval(jit_image)
bias_scale = image_oper.scale * weight_scale
int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32)
bias_id = self.add_tensor_operand_for_weight(int_bias)
multiplier = image_oper.scale * weight_scale / out_scale
assert multiplier > 0
if multiplier >= 1:
raise Exception(
"Quantized convolution multiplier is greater than 1. "
"This is supported by NNAPI, but not by most hardware backends. "
"Try training a model without quantization-aware training. ")
return self.add_conv2d_common(
node.outputsAt(0),
out_scale,
out_zero_point,
jit_image,
unsigned_weight,
bias_id,
args,
False, # transpose
fuse_code,
)
def add_conv2d_common(
self,
jit_out,
out_scale,
out_zero_point,
jit_image,
weight_tensor,
bias_id,
args,
transpose,
fuse_code):
image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image)
in_c = image_oper.shape[1]
if args.group == 1:
# Full convolution
depthwise = False
weight_permutation = (0, 2, 3, 1)
elif args.group == in_c:
# Depthwise convolution
depthwise = True
weight_permutation = (1, 2, 3, 0)
else:
raise Exception("Group convolution not supported yet.")
# TODO: Transform at load time to share weights with CPU model.
nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous()
weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor)
weight_oper = self.operands[weight_id]
bias_oper = self.operands[bias_id]
if image_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32:
assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32
assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32
elif image_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:
assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM
assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_INT32
assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale)
assert bias_oper.zero_point == 0
else:
raise Exception(
"Unsupported input type for conv2d: {}"
.format(image_oper.op_type))
assert len(image_oper.shape) == 4
assert len(weight_oper.shape) == 4
assert len(bias_oper.shape) == 1
if depthwise:
# Depthwise convolution
one, kern_h, kern_w, out_c = weight_oper.shape
assert one == 1
assert out_c % in_c == 0
channel_multiplier = out_c // in_c
assert channel_multiplier == 1 # Don't support multiplier
assert out_c == in_c
else:
# Full convolution
kern_nf, kern_h, kern_w, kern_d = weight_oper.shape
out_c = kern_nf
assert kern_d == in_c
assert out_c == bias_oper.shape[0]
use_nchw = image_oper.use_nchw()
if depthwise:
num_args = 12
opcode = NNAPI_OperationCode.DEPTHWISE_CONV_2D
else:
num_args = 11
if transpose:
opcode = NNAPI_OperationCode.TRANSPOSE_CONV_2D
else:
opcode = NNAPI_OperationCode.CONV_2D
inputs = [None] * num_args
inputs[0] = image_id
inputs[1] = weight_id
inputs[2] = bias_id
inputs[3] = self.add_immediate_int_scalar(args.pad_l)
inputs[4] = self.add_immediate_int_scalar(args.pad_r)
inputs[5] = self.add_immediate_int_scalar(args.pad_t)
inputs[6] = self.add_immediate_int_scalar(args.pad_b)
inputs[7] = self.add_immediate_int_scalar(args.stride_w)
inputs[8] = self.add_immediate_int_scalar(args.stride_h)
if depthwise:
inputs[9] = self.add_immediate_int_scalar(1)
inputs[10] = self.add_immediate_int_scalar(fuse_code)
inputs[11] = self.add_immediate_bool_scalar(use_nchw)
else:
inputs[9] = self.add_immediate_int_scalar(fuse_code)
inputs[10] = self.add_immediate_bool_scalar(use_nchw)
outputs = [None] * 1
out_shape = get_conv_pool_shape(image_oper.shape, args, out_c, transpose)
out_oper = image_oper._replace(
shape=out_shape,
scale=out_scale,
zero_point=out_zero_point,
)
out_id = self.add_tensor_operand(jit_out, out_oper)
self._handle_conv_pool_flexible_input(out_id, jit_image, args, transpose)
outputs[0] = out_id
self.add_operation(opcode, inputs, outputs)
def _handle_conv_pool_flexible_input(self, out_id, jit_image, args, transpose):
image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image)
batch, in_ch, in_h, in_w = image_oper.shape
if batch == 0 or in_ch == 0:
raise Exception("Only H & W can be flexible")
# H & W
if transpose:
if in_h == 0:
self.compute_operand_shape(
out_id,
2,
f"({flex_name(image_id, 2)} - 1) * {args.stride_h} + {args.kernel_h} - {args.pad_t} - {args.pad_b}"
)
if in_w == 0:
self.compute_operand_shape(
out_id,
3,
f"({flex_name(image_id, 3)} - 1) * {args.stride_w} + {args.kernel_w} - {args.pad_l} - {args.pad_r}"
)
else:
if in_h == 0:
self.compute_operand_shape(
out_id,
2,
f"({flex_name(image_id, 2)} - {args.kernel_h} + {args.pad_t} + {args.pad_b}) // {args.stride_h} + 1"
)
if in_w == 0:
self.compute_operand_shape(
out_id,
3,
f"({flex_name(image_id, 3)} - {args.kernel_w} + {args.pad_l} + {args.pad_r}) // {args.stride_w} + 1"
)
def serialize_model(module, inputs, config=None):
return _NnapiSerializer(config).serialize_model(module, inputs)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
93b28ca4463dc909e11f966c914b21a0d0b546f4 | 9a63e1b1f026dcde05d7ee1a00b836a6c34e5d43 | /tests/appointment/test_metrics.py | 08b1eda65fe4eb9f43f5f6c33de4203767d1c277 | [
"BSD-3-Clause"
] | permissive | databill86/poli-sci-kit | 201770634c05463fe4ef00b20a47de95d276b6cd | a3f308ccd914cf18105de89218e23fe95a0b1de7 | refs/heads/main | 2023-06-26T17:12:51.939600 | 2021-07-29T15:03:05 | 2021-07-29T15:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | """Appointment metric tests"""
from poli_sci_kit.appointment import metrics
def test_ideal_share(share, total_shares, seats):
assert (
round(
metrics.ideal_share(
share=share, total_shares=total_shares, total_alloc=seats
),
4,
)
== 6.9222
)
def test_alloc_to_share_ratio(share, total_shares, allocation, seats):
assert (
round(
metrics.alloc_to_share_ratio(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
4,
)
== 1.0112
)
def test_square_alloc_to_share_ratio(share, total_shares, allocation, seats):
assert (
round(
metrics.sqr_alloc_to_share_error(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
6,
)
== 0.000126
)
def test_total_alloc_to_share_error(tie_votes_list, allocations):
assert (
round(
metrics.total_alloc_to_share_error(
shares=tie_votes_list, allocations=allocations, proportional=True
),
6,
)
== 0.006835
)
def test_rep_weight(share, allocation):
assert (
round(metrics.rep_weight(share=share, allocation=allocation), 4) == 274082.5714
)
def test_sqr_rep_weight_error(share, total_shares, allocation, seats):
assert (
round(
metrics.sqr_rep_weight_error(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
4,
)
== 9480416.9437
)
def test_total_rep_weight_error(tie_votes_list, allocations):
assert (
round(
metrics.total_rep_weight_error(
shares=tie_votes_list, allocations=allocations, proportional=True
),
4,
)
== 594037282.4765
)
def test_div_not_0(short_votes_list, q, div_index_metrics):
assert (
metrics.div_index(shares=short_votes_list, q=q, metric_type=div_index_metrics)
!= 0
)
def test_dispr_not_0(short_votes_list, allocations, dispr_index_metrics):
assert (
metrics.dispr_index(
shares=short_votes_list,
allocations=allocations,
metric_type=dispr_index_metrics,
)
!= 0
)
def test_effective_number_of_groups_not_0(short_votes_list, effective_group_metrics):
assert (
metrics.effective_number_of_groups(
shares=short_votes_list, metric_type=effective_group_metrics
)
!= 0
)
| [
"andrew.t.mcallister@gmail.com"
] | andrew.t.mcallister@gmail.com |
ebf1aff1bbdf5a219a46dbeed92232bba9a0fad0 | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /azure_iot_edge/tests/test_check.py | 2b89dda24289d6d675f0c94de7469395f4b1e350 | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 3,884 | py | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import pytest
import requests
from datadog_checks.azure_iot_edge import AzureIoTEdgeCheck
from datadog_checks.base.stubs.aggregator import AggregatorStub
from datadog_checks.base.stubs.datadog_agent import DatadogAgentStub
from datadog_checks.dev.utils import get_metadata_metrics
from . import common
@pytest.mark.usefixtures("mock_server")
def test_check(aggregator, mock_instance):
# type: (AggregatorStub, dict) -> None
"""
Under normal conditions, metrics and service checks are collected as expected.
"""
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
check.check(mock_instance)
for metric, metric_type in common.HUB_METRICS:
# Don't assert exact tags since they're very complex (many cross products).
aggregator.assert_metric(metric, metric_type=metric_type)
m = aggregator._metrics[metric][0]
assert set(m.tags) >= set(common.TAGS)
for metric, metric_type, metric_tags in common.AGENT_METRICS:
tags = common.TAGS + metric_tags
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
for metric, metric_type in common.MODULE_METRICS:
for module_name in common.MODULES:
tags = common.TAGS + ['module_name:{}'.format(module_name)]
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
aggregator.assert_service_check(
'azure.iot_edge.edge_hub.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_HUB_PROMETHEUS_URL)],
)
aggregator.assert_service_check(
'azure.iot_edge.edge_agent.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_AGENT_PROMETHEUS_URL)],
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("mock_server")
def test_version_metadata(datadog_agent, mock_instance):
# type: (DatadogAgentStub, dict) -> None
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
check.check_id = 'test:123'
check.run()
major, minor, patch, raw = common.MOCK_EDGE_AGENT_VERSION
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw,
}
datadog_agent.assert_metadata('test:123', version_metadata)
@pytest.mark.usefixtures("mock_server")
@pytest.mark.parametrize(
"option, url, service_check",
[
pytest.param(
"edge_agent_prometheus_url",
common.MOCK_EDGE_AGENT_PROMETHEUS_URL,
"azure.iot_edge.edge_agent.prometheus.health",
id="edge-agent",
),
pytest.param(
"edge_hub_prometheus_url",
common.MOCK_EDGE_HUB_PROMETHEUS_URL,
"azure.iot_edge.edge_hub.prometheus.health",
id="edge-hub",
),
],
)
def test_prometheus_endpoint_down(aggregator, mock_instance, option, url, service_check):
# type: (AggregatorStub, dict, str, str, str) -> None
"""
When a Prometheus endpoint is unreachable, service check reports as CRITICAL.
"""
instance = copy.deepcopy(mock_instance)
wrong_port = common.MOCK_SERVER_PORT + 1 # Will trigger exception.
instance[option] = url.replace(str(common.MOCK_SERVER_PORT), str(wrong_port))
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [instance])
with pytest.raises(requests.ConnectionError):
check.check(instance)
aggregator.assert_service_check(service_check, AzureIoTEdgeCheck.CRITICAL)
| [
"noreply@github.com"
] | zeroc0d3.noreply@github.com |
08eed4de6183e86e72ce27b336edb9ca690983f7 | 13f6c4aac5af08e4e568d7069b579846c711d2f5 | /pw_console/py/pw_console/search_toolbar.py | 84691d998455a63028fe000ee1baf95c2ef1d6eb | [
"Apache-2.0"
] | permissive | Ray-Go/pigweed | 103fa8a61ffef7c7cb6dfe62351fc6a2980b00f3 | 4dda54715b8e01540c0fec758b77acc16ed17061 | refs/heads/main | 2023-07-20T13:24:27.315335 | 2021-08-17T21:06:23 | 2021-08-17T23:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,178 | py | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""SearchToolbar class used by LogPanes."""
from __future__ import annotations
import functools
from typing import TYPE_CHECKING
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import (
Condition, )
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
HSplit,
Window,
WindowAlign,
)
from prompt_toolkit.widgets import TextArea
from prompt_toolkit.validation import DynamicValidator
from pw_console.log_view import RegexValidator, SearchMatcher
# import pw_console.widgets.checkbox
import pw_console.widgets.mouse_handlers
if TYPE_CHECKING:
from pw_console.log_pane import LogPane
class SearchToolbar(ConditionalContainer):
"""One line toolbar for entering search text."""
TOOLBAR_HEIGHT = 3
def focus_self(self):
self.log_pane.application.application.layout.focus(self)
def close_search_bar(self):
"""Close search bar."""
# Reset invert setting for the next search
self._search_invert = False
# Hide the search bar
self.log_pane.search_bar_active = False
# Focus on the log_pane.
self.log_pane.application.focus_on_container(self.log_pane)
self.log_pane.redraw_ui()
def _start_search(self):
self.input_field.buffer.validate_and_handle()
def _invert_search(self):
self._search_invert = not self._search_invert
def _next_field(self):
fields = self.log_pane.log_view.log_store.table.all_column_names()
fields.append(None)
current_index = fields.index(self._search_field)
next_index = (current_index + 1) % len(fields)
self._search_field = fields[next_index]
def create_filter(self):
self._start_search()
if self._search_successful:
self.log_pane.log_view.apply_filter()
def get_search_help_fragments(self):
"""Return FormattedText with search general help keybinds."""
focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.focus_self)
start_search = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self._start_search)
add_filter = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self.create_filter)
clear_filters = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
self.log_pane.log_view.clear_filters)
close_search = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self.close_search_bar)
separator_text = [('', ' ', focus)]
# Empty text matching the width of the search bar title.
fragments = [
('', ' ', focus),
]
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Enter', 'Search', start_search))
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Ctrl-Alt-f', 'Add Filter', add_filter))
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Ctrl-Alt-r', 'Clear Filters', clear_filters))
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Ctrl-c', 'Close', close_search))
fragments.extend(separator_text)
return fragments
def get_search_settings_fragments(self):
"""Return FormattedText with current search settings and keybinds."""
focus = functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.focus_self)
next_field = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self._next_field)
toggle_invert = functools.partial(
pw_console.widgets.mouse_handlers.on_click, self._invert_search)
next_matcher = functools.partial(
pw_console.widgets.mouse_handlers.on_click,
self.log_pane.log_view.select_next_search_matcher)
separator_text = [('', ' ', focus)]
fragments = [
# Title
('class:search-bar-title', ' Search ', focus),
]
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Ctrl-t', 'Column:', next_field))
fragments.extend([
('class:search-bar-setting',
(self._search_field.title() if self._search_field else 'All'),
next_field),
])
fragments.extend(separator_text)
fragments.extend(
pw_console.widgets.checkbox.to_checkbox_with_keybind_indicator(
self._search_invert, 'Ctrl-v', 'Invert', toggle_invert))
fragments.extend(separator_text)
# Matching Method
fragments.extend(
pw_console.widgets.checkbox.to_keybind_indicator(
'Ctrl-n', 'Matcher:', next_matcher))
fragments.extend([
('class:search-bar-setting',
str(self.log_pane.log_view.search_matcher.name), next_matcher),
])
fragments.extend(separator_text)
return fragments
def get_search_matcher(self):
if self.log_pane.log_view.search_matcher == SearchMatcher.REGEX:
return self.log_pane.log_view.search_validator
return False
def __init__(self, log_pane: 'LogPane'):
self.log_pane = log_pane
self.search_validator = RegexValidator()
self._search_successful = False
self._search_invert = False
self._search_field = None
# FormattedText of the search column headers.
self.input_field = TextArea(
prompt=[
('class:search-bar-setting', '/',
functools.partial(pw_console.widgets.mouse_handlers.on_click,
self.focus_self))
],
focusable=True,
focus_on_click=True,
scrollbar=False,
multiline=False,
height=1,
dont_extend_height=True,
dont_extend_width=False,
accept_handler=self._search_accept_handler,
validator=DynamicValidator(self.get_search_matcher),
history=self.log_pane.application.search_history,
)
search_help_bar_control = FormattedTextControl(
self.get_search_help_fragments)
search_help_bar_window = Window(content=search_help_bar_control,
height=1,
align=WindowAlign.LEFT,
dont_extend_width=False)
search_settings_bar_control = FormattedTextControl(
self.get_search_settings_fragments)
search_settings_bar_window = Window(
content=search_settings_bar_control,
height=1,
align=WindowAlign.LEFT,
dont_extend_width=False)
# Additional keybindings for the text area.
key_bindings = KeyBindings()
@key_bindings.add('escape')
@key_bindings.add('c-c')
@key_bindings.add('c-d')
def _close_search_bar(_event: KeyPressEvent) -> None:
"""Close search bar."""
self.close_search_bar()
@key_bindings.add('c-n')
def _select_next_search_matcher(_event: KeyPressEvent) -> None:
"""Select the next search matcher."""
self.log_pane.log_view.select_next_search_matcher()
@key_bindings.add('escape', 'c-f') # Alt-Ctrl-f
def _create_filter(_event: KeyPressEvent) -> None:
"""Create a filter."""
self.create_filter()
@key_bindings.add('c-v')
def _toggle_search_invert(_event: KeyPressEvent) -> None:
"""Toggle inverted search matching."""
self._invert_search()
@key_bindings.add('c-t')
def _select_next_field(_event: KeyPressEvent) -> None:
"""Select next search field/column."""
self._next_field()
# Clear filter keybind is handled by the parent log_pane.
self.input_field.control.key_bindings = key_bindings
super().__init__(
HSplit(
[
search_help_bar_window,
search_settings_bar_window,
self.input_field,
],
height=SearchToolbar.TOOLBAR_HEIGHT,
style='class:search-bar',
),
filter=Condition(lambda: log_pane.search_bar_active),
)
def _search_accept_handler(self, buff: Buffer) -> bool:
"""Function run when hitting Enter in the search bar."""
self._search_successful = False
if len(buff.text) == 0:
self.close_search_bar()
# Don't apply an empty search.
return False
if self.log_pane.log_view.new_search(buff.text,
invert=self._search_invert,
field=self._search_field):
self._search_successful = True
self.close_search_bar()
# Erase existing search text.
return False
# Keep existing text if regex error
return True
| [
"pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com |
64c9f98a0759a0df0b44851c167e3a1d53498e0c | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /BAEKJOON/11000~/11655_ROT13_python/CodingTest.py | ba92d5f2015a4ebedf5f1c18c4985c26f96f09ed | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from string import ascii_uppercase
from string import ascii_lowercase
def ROT13(c):
list_upper = list(ascii_uppercase)
list_lower = list(ascii_lowercase)
if c in list_upper :
i = list_upper.index(c) + 13
if i >= 26:
i -= 26
return list_upper[i]
if c in list_lower :
i = list_lower.index(c) + 13
if i >= 26:
i -= 26
return list_lower[i]
return c
input = list(input())
size = len(input)
for i in range(size):
input[i] = ROT13(input[i])
print(''.join(input)) | [
"tjsrb75@gmail.com"
] | tjsrb75@gmail.com |
1f713eff37f69e1eb1b584fea35ebe8ac07f8c25 | 184ba93339a2af7d375bf0e4c9b787ec2a0c3c34 | /hack/picoctf.org/General Skills/PW Crack 4/level4.py | 526e2d62ae8d52e77fa7241cf5d4e59494727f96 | [] | no_license | pchaos/others | 4436dc3ab134d5ed7868df22d4098c93078aae1f | ff1d7229b075a1bb10dbbae8fc5b2bfe8ea43987 | refs/heads/master | 2023-07-14T19:33:36.238932 | 2023-06-21T09:09:04 | 2023-06-21T09:09:04 | 107,945,559 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | import hashlib
### THIS FUNCTION WILL NOT HELP YOU FIND THE FLAG --LT ########################
def str_xor(secret, key):
# extend key to secret length
new_key = key
i = 0
while len(new_key) < len(secret):
new_key = new_key + key[i]
i = (i + 1) % len(key)
return "".join(
[
chr(ord(secret_c) ^ ord(new_key_c))
for (secret_c, new_key_c) in zip(secret, new_key)
]
)
###############################################################################
flag_enc = open("level4.flag.txt.enc", "rb").read()
correct_pw_hash = open("level4.hash.bin", "rb").read()
def hash_pw(pw_str):
pw_bytes = bytearray()
pw_bytes.extend(pw_str.encode())
m = hashlib.md5()
m.update(pw_bytes)
return m.digest()
def level_4_pw_check():
user_pw = input("Please enter correct password for flag: ")
user_pw_hash = hash_pw(user_pw)
if user_pw_hash == correct_pw_hash:
print("Welcome back... your flag, user:")
decryption = str_xor(flag_enc.decode(), user_pw)
print(decryption)
return
print("That password is incorrect")
level_4_pw_check()
# The strings below are 100 possibilities for the correct password.
# (Only 1 is correct)
pos_pw_list = [
"158f",
"1655",
"d21e",
"4966",
"ed69",
"1010",
"dded",
"844c",
"40ab",
"a948",
"156c",
"ab7f",
"4a5f",
"e38c",
"ba12",
"f7fd",
"d780",
"4f4d",
"5ba1",
"96c5",
"55b9",
"8a67",
"d32b",
"aa7a",
"514b",
"e4e1",
"1230",
"cd19",
"d6dd",
"b01f",
"fd2f",
"7587",
"86c2",
"d7b8",
"55a2",
"b77c",
"7ffe",
"4420",
"e0ee",
"d8fb",
"d748",
"b0fe",
"2a37",
"a638",
"52db",
"51b7",
"5526",
"40ed",
"5356",
"6ad4",
"2ddd",
"177d",
"84ae",
"cf88",
"97a3",
"17ad",
"7124",
"eff2",
"e373",
"c974",
"7689",
"b8b2",
"e899",
"d042",
"47d9",
"cca9",
"ab2a",
"de77",
"4654",
"9ecb",
"ab6e",
"bb8e",
"b76b",
"d661",
"63f8",
"7095",
"567e",
"b837",
"2b80",
"ad4f",
"c514",
"ffa4",
"fc37",
"7254",
"b48b",
"d38b",
"a02b",
"ec6c",
"eacc",
"8b70",
"b03e",
"1b36",
"81ff",
"77e4",
"dbe6",
"59d9",
"fd6a",
"5653",
"8b95",
"d0e5",
]
| [
"drifthua@gmail.com"
] | drifthua@gmail.com |
0b7cd367e8fb6f11318b27a150bd97bc031d5441 | 2b15168bc67ee935446f51c46045f73346369c5a | /model/resnet50.py | 1a54e0fc47e9fea59fb465a498f7c2c608b9c85f | [] | no_license | jason9075/tf2_arcface | 6c37500c9c14170ea6731f6a0d79a19f088c32d3 | 6fabcdf9c3c9a12603456476fc8052de2830684d | refs/heads/master | 2023-04-30T11:42:52.845549 | 2021-04-01T06:59:39 | 2021-04-01T06:59:39 | 311,858,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,769 | py | import tensorflow as tf
def create_resnet50(input_node, embedding_size, layers=None, is_train=False):
expansion = 1
net = tf.keras.layers.ZeroPadding2D(padding=1, name='first_padding')(input_node)
net = tf.keras.layers.Conv2D(64,
3,
strides=1,
use_bias=False,
name='conv1__conv')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='bn1__bn')(net)
net = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25),
shared_axes=[1, 2], name='prelu__prelu')(net)
net = make_layer(net, 64, layers[0], stride=2, expansion=expansion, prefix='layer1')
net = make_layer(net, 128, layers[1], stride=2, expansion=expansion, prefix='layer2')
net = make_layer(net, 256, layers[2], stride=2, expansion=expansion, prefix='layer3')
net = make_layer(net, 512, layers[3], stride=2, expansion=expansion, prefix='layer4')
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='bn2__bn')(net)
# Because in pytorch is channel first and it start from index 1, so the pytorch order is NCHW.
# And here we have to switch the tensorflow order from NHWC to NCHW
net = tf.transpose(net, [0, 3, 1, 2])
net = tf.keras.layers.Flatten()(net)
if is_train:
net = tf.keras.layers.Dropout(0.4)(net)
net = tf.keras.layers.Dense(embedding_size, name='fc__fc')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='features__bn')(net)
return net
def make_layer(net, out_ch, num_layer, stride=1, expansion=1, prefix='layer'):
net = basic_block(net, 64, out_ch, stride=stride, expansion=expansion, downsample=True, prefix=f'{prefix}.0')
for idx in range(1, num_layer):
net = basic_block(net, 64, out_ch, stride=1, expansion=expansion, prefix=f'{prefix}.{idx}')
return net
def basic_block(net, in_ch,
out_ch,
stride=1,
groups=1,
expansion=1,
downsample=False,
prefix='basic_block'):
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn1__bn')(net)
out = tf.keras.layers.ZeroPadding2D(padding=1, name=f'{prefix}.padding1')(out)
out = tf.keras.layers.Conv2D(out_ch,
3,
strides=1,
use_bias=False,
name=f'{prefix}.conv1__conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn2__bn')(out)
out = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25),
shared_axes=[1, 2],
name=f'{prefix}.prelu__prelu')(out)
out = tf.keras.layers.ZeroPadding2D(padding=1, name=f'{prefix}.padding2')(out)
out = tf.keras.layers.Conv2D(out_ch,
3,
strides=stride,
use_bias=False,
name=f'{prefix}.conv2__conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn3__bn')(out)
if downsample and (stride != 1 or in_ch != out_ch * expansion):
net = tf.keras.layers.Conv2D(out_ch * expansion, 1,
strides=stride,
use_bias=False,
groups=groups,
name=f'{prefix}.downsample.0__conv')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.downsample.1__bn')(net)
return out + net
| [
"jason9075@gmail.com"
] | jason9075@gmail.com |
9c86f4be2dd731af2ce6d5d7908eeaa50af4d197 | ef1bf421aca35681574c03014e0c2b92da1e7dca | /test/test_modes/test_filewatcher.py | c9bb04ff63acd71e5503e30f6f2dda651615edff | [
"MIT"
] | permissive | pyQode/pyqode.core | 74e67f038455ea8cde2bbc5bd628652c35aff6eb | 0ffabebe4f0397d53429024f6f44db3fe97b0828 | refs/heads/master | 2020-04-12T06:36:33.483459 | 2020-01-18T14:16:08 | 2020-01-18T14:16:08 | 7,739,074 | 24 | 25 | MIT | 2020-01-18T14:16:10 | 2013-01-21T19:46:41 | Python | UTF-8 | Python | false | false | 2,194 | py | import os
import pytest
from pyqode.qt import QtCore
from pyqode.qt import QtWidgets
from pyqode.qt.QtTest import QTest
import datetime
from pyqode.core import modes
from test.helpers import editor_open, preserve_settings
file_path = os.path.join(
os.getcwd(), 'test', 'test_modes', 'file_to_watch.txt')
def setup_module():
with open(file_path, 'w') as f:
f.write("test file initial")
def teardown_module():
os.remove(file_path)
def get_mode(editor):
return editor.modes.get(modes.FileWatcherMode)
@editor_open(file_path)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
def accept_mbox():
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QMessageBox):
QTest.keyPress(w, QtCore.Qt.Key_Space)
def reject_mbox():
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QMessageBox):
QTest.keyPress(w, QtCore.Qt.Key_Escape)
@editor_open(file_path)
def test_modif_autoreload(editor):
mode = get_mode(editor)
mode.auto_reload = False
mode = get_mode(editor)
mode.auto_reload = True
with open(file_path, 'r') as f:
with open(file_path, 'w') as f2:
f2.write("test file %s" % datetime.datetime.now())
QTest.qWait(1000)
@editor_open(file_path)
def test_delete(editor):
mode = get_mode(editor)
mode.auto_reload = False
os.remove(file_path)
QTest.qWait(1000)
with open(file_path, 'w') as f:
f.write("test file initial")
editor.file.open(file_path)
@editor_open(file_path)
def test_none_filepath(editor):
mode = get_mode(editor)
mode.auto_reload = False
mode.auto_reload = False
p = editor.file.path
editor.file._path = None
mode._update_mtime()
editor.file._path = p
@editor_open(file_path)
def test_non_existing_file_path(editor):
mode = get_mode(editor)
mode.auto_reload = False
p = editor.file.path
editor.file._path = '/usr/blah/foo/bar.txt'
mode._update_mtime()
editor.file._path = p
| [
"colin.duquesnoy@gmail.com"
] | colin.duquesnoy@gmail.com |
a433723d25e214eee3dab87dbdbb2c3b88ba3cb4 | 79799898b833178f0af59d00e6b8d96d44d129d4 | /backend/crm/apps/finances/models/transaction.py | 0a9f9f458bdbdff8b559a40703e33f4c404c808f | [] | no_license | edzen12/min_crm | 924edeede9250bc3fabfb45a3f0e01a3768f11b3 | 4b979b347b67a2507d3c26c91852b300cdd20975 | refs/heads/master | 2023-08-20T13:25:39.914325 | 2021-10-12T08:38:28 | 2021-10-12T08:38:28 | 416,251,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | import datetime
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.utils import timezone
from apps.branches.models import Branch
from apps.courses.models import Course
from apps.finances import choices
from apps.finances.models.expense_tags import ExpenseTag
from apps.finances.models.wallet import Wallet
from apps.students.models import Student
from utils import generators
User = get_user_model()
class Transaction(models.Model):
created_date = models.DateTimeField(
default=timezone.now,
verbose_name='Дата создания'
)
title = models.CharField(
max_length=255,
verbose_name="Наименование транзакции",
db_index=True
)
transaction_id = models.CharField(
max_length=255,
db_index=True,
blank=True,
unique=True,
verbose_name='ID транзакции'
)
amount = models.DecimalField(
max_digits=100,
decimal_places=2,
default=0.00,
verbose_name='Сумма'
)
confirmation = models.FileField(
upload_to=generators.generate_document_filename,
null=True, blank=True,
verbose_name="Прикрепите фото или скан чека"
)
comment = models.TextField(
blank=True, null=True,
verbose_name='Комментарии'
)
user = models.ForeignKey(
User,
related_name='transactions',
on_delete=models.SET_NULL,
null=True,
verbose_name='Кто создал',
limit_choices_to=(
Q(is_administrator=True) |
Q(is_staff_member=True) |
Q(is_superuser=True)
)
)
student = models.ForeignKey(
Student,
verbose_name='Студент',
related_name='transactions',
on_delete=models.SET_NULL,
null=True, blank=True
)
course = models.ForeignKey(
Course,
verbose_name='Курс',
related_name='transactions',
on_delete=models.SET_NULL,
null=True, blank=True
)
wallet = models.ForeignKey(
Wallet,
verbose_name='Кошелек',
related_name='transactions',
on_delete=models.SET_NULL,
null=True
)
method = models.CharField(
'Метод',
max_length=255,
choices=choices.METHOD_CHOICES,
blank=True, null=True
)
categories = models.ManyToManyField(
ExpenseTag,
verbose_name='Категории расхода',
related_name='transactions',
blank=True,
)
transaction_type = models.CharField(
'Тип транзакции',
max_length=10,
choices=choices.TRANSACTION_CHOICES
)
branch = models.ForeignKey(
Branch,
on_delete=models.SET_NULL,
verbose_name='Филиал',
related_name='transactions',
null=True
)
class Meta:
verbose_name = 'Транзакция'
verbose_name_plural = 'Транзакции'
ordering = ['-id']
def __str__(self):
return self.transaction_id
def save(self, *args, **kwargs):
trn_id = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.transaction_id = f'trn-{trn_id}'
super(Transaction, self).save(*args, **kwargs)
| [
"oichiev.edzen@gmail.com"
] | oichiev.edzen@gmail.com |
b74670afae7e3e55ac606a2102310e4ab4eb2d37 | 37b3b5d71b121a667522604483254c237cb08d99 | /Read-Search-Ask/Python/数据结构与算法/1-概念/3-列表和字典.py | 59ff7056262039628143422e62b16ae1a670b2e6 | [] | no_license | chanwanxiang/isMe | 949a4b2c10f6c908e7fa529918445e9449aba259 | 9586c7a4d5045bd371bbe15991f42e7be68697c3 | refs/heads/master | 2023-07-28T16:34:12.035814 | 2021-09-10T04:00:10 | 2021-09-10T04:00:10 | 118,423,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | # python内置类型性能分析
# timeit模块
# 可以用来测试一小段Python代码执行速度
# class timeit.Timer(stmt='pass',setup='pass',timer=<timer function>)
# Timer是测试小段代码执行速度的类
# stmt参数是要测试的代码语句(statment)
# setup参数是运行代码是需要的设置
# timer参数是一个定时器函数,和平台有关
# timeit.Timer.timeit(number=1000)
# Timer类中测试语句执行速度的对象方法,number参数是测试代码时的测试次数,默认为1000000次.方法返回执行代码的平均时耗,返回一个float类型的秒数
# from timeit import Timer
# def test1():
# ls = []
# for i in range(10000):
# ls.append(i)
# def test2():
# ls = []
# for i in range(10000):
# ls = ls + [i]
# def test3():
# ls = [i for i in range(10000)]
# def test4():
# ls = list(range(10000))
# def test5():
# ls = []
# for i in range(10000):
# ls.extend([i])
# timer1 = Timer('test1()','from __main__ import test1')
# print('append:',timer1.timeit(1000))
# timer2 = Timer('test2()','from __main__ import test2')
# print('ls.add:',timer2.timeit(1000))
# timer3 = Timer('test3()','from __main__ import test3')
# print('列表推导:',timer3.timeit(1000))
# timer4 = Timer('test4()','from __main__ import test4')
# print('list(range(number)):',timer4.timeit(1000))
# timer5 = Timer('test5()','from __main__ import test5')
# print('extent:',timer5.timeit(1000))
# 运行结果
# append: 0.6040402
# ls.add: 218.9780432000000001
# 列表推导: 0.3399182999999999
# list(range(number)): 0.20767249999999993
# extent: 1.2539079000000002
# def test6():
# ls = []
# for i in range(10000):
# ls.append(i)
# def test7():
# ls = []
# for i in range(10000):
# ls.insert(0,i)
# timer6 = Timer('test6()','from __main__ import test6')
# print('append:',timer6.timeit(1000))
# timer7 = Timer('test7()','from __main__ import test7')
# print('insert:',timer7.timeit(1000))
# 运行结果
# append: 0.5967673
# insert: 27.2837601
# list内置操作的时间复杂度
# Operation Big-O Efficiency Remarks
# index[] O(1) 索引取值
# index assignment O(1) 索引赋值
# append O(1)
# pop O(1) 移除元素(默认最后一个)
# pop(i) O(n) 移除索引值的元素
# insert(i,item) O(n) 指定对象插入列表指定位置
# del operator O(n)
# iteration O(n)
# contains(in) O(n) 判断是否包含某个对象
# get Slice[x,y] O(k) 切片
# del Slice O(n)
# set Slice O(n+k)
# reverse O(n)
# concatenate O(k) 列表拼接
# sort O(nlogn)
# multiply O(nk) 列表相乘
# dict内置操作的时间复杂度
# Operation Big-O Efficiency Remarks
# copy O(n)
# get item O(1)
# set item O(1)
# del item O(1)
# contains(in) O(1)
# iteration O(n)
| [
"595366700@qq.com"
] | 595366700@qq.com |
ed9898838e80bb23adee24591f6c77d0e80e4c01 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/lpt.py | f87af3be5c939cd3b171e63f31a6878d93ada80d | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'lPT':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
eba84832f15bc60938f5bfa87daa1b50577b5e75 | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /configs/mmagic/super-resolution/super-resolution_snpe_static-256x256.py | 2d1291646f4d3bf2c2ee81bb233a40b458259426 | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 118 | py | _base_ = ['./super-resolution_static.py', '../../_base_/backends/snpe.py']
onnx_config = dict(input_shape=[256, 256])
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
417a0dfe50596e35b866a46bcc90a21946287f62 | e887ee52dae696ae067020e60df8f4733a0c0e96 | /ceasiompy/__version__.py | 52e3d7b5f60b0ba4877f70a932c6634ca2b8a748 | [
"Apache-2.0"
] | permissive | VivTrif/CEASIOMpy | 2e05970deab7d036701968d3ba326ebf4dae7ad4 | dc324a7c6bf93a9e3a383d70935c8a2983f25024 | refs/heads/master | 2021-01-08T16:09:59.945961 | 2020-09-15T11:38:22 | 2020-09-15T11:38:22 | 242,075,602 | 0 | 0 | Apache-2.0 | 2020-09-15T11:38:24 | 2020-02-21T07:03:17 | Python | UTF-8 | Python | false | false | 89 | py | """
CEASIONpy version
"""
VERSION = (0, 0, 1)
__version__ = '.'.join(map(str, VERSION))
| [
"dettmann@kth.se"
] | dettmann@kth.se |
2eeaf4e5a95badaf713fea9e6985db5ab31f02aa | 0c1ec32d0f08872ef3ca54830e33a2fdbe585f78 | /DeepForest/h5_generator.py | f78c6a5f120f7f8d40208fbc1c43f1c08a05bf0c | [] | no_license | jtpils/DeepForest | fa35bfab1e6bec13d843841aabd84f8fbc1af8bf | 932a84d604c941a084efdc0f729ae7868baf0309 | refs/heads/master | 2020-05-31T21:28:14.461505 | 2019-05-20T17:03:17 | 2019-05-20T17:03:17 | 190,498,747 | 1 | 0 | null | 2019-06-06T02:14:42 | 2019-06-06T02:14:41 | null | UTF-8 | Python | false | false | 7,350 | py | """
On the fly generator. Crop out portions of a large image, and pass boxes and annotations. This follows the csv_generator template. Satifies the format in generator.py
"""
import pandas as pd
import h5py
from keras_retinanet.preprocessing.generator import Generator
from keras_retinanet.utils.image import read_image_bgr
from keras_retinanet.utils.visualization import draw_annotations
import numpy as np
from PIL import Image
from six import raise_from
import random
import csv
import sys
import os.path
import cv2
import slidingwindow as sw
import itertools
from DeepForest import Lidar
from DeepForest.utils import image_utils
class H5Generator(Generator):
""" Generate data for a custom h5 dataset.
"""
def __init__(
self,
data,
DeepForest_config,
group_method="none",
name=None,
**kwargs
):
""" Initialize a data generator.
"""
self.image_names = []
self.image_data = {}
self.name = name
self.windowdf = data
self.DeepForest_config = DeepForest_config
#Holder for the group order, after shuffling we can still recover loss -> window
self.group_order = {}
self.group_method=group_method
#Holder for image path, keep from reloading same image to save time.
self.previous_image_path=None
#Turn off lidar checking during prediction for training sets.
self.with_lidar=False
#Read classes
self.classes={"Tree": 0}
#Create label dict
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
#Set groups at first order.
self.define_groups(shuffle=False)
#report total number of annotations
self.total_trees = self.total_annotations()
super(H5Generator, self).__init__(**kwargs)
def __len__(self):
"""Number of batches for generator"""
return len(self.groups)
def size(self):
""" Size of the dataset.
"""
image_data= self.windowdf.to_dict("index")
image_names = list(image_data.keys())
return len(image_names)
def num_classes(self):
""" Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def total_annotations(self):
""" Find the total number of annotations for the dataset
"""
#Find matching annotations
tiles = self.windowdf[["tile","site"]].drop_duplicates()
total_annotations = 0
#Select annotations
#Optionally multiple h5 dirs
for index, row in tiles.iterrows():
h5_dir = self.DeepForest_config[row["site"]]["h5"]
tilename = row["tile"]
csv_name = os.path.join(h5_dir, os.path.splitext(tilename)[0]+'.csv')
try:
annotations = pd.read_csv(csv_name)
except Exception as e:
print(e)
print("The csv named {} from tilename {} encountered an error when counting annotations".format(csv_name, tilename))
continue
selected_annotations = pd.merge(self.windowdf, annotations)
total_annotations += len(selected_annotations)
print("There are a total of {} tree annotations in the {} generator".format(total_annotations, self.name))
return(total_annotations)
def define_groups(self, shuffle=False):
'''
Define image data and names based on grouping of tiles for computational efficiency
'''
#group by tile
groups = [df for _, df in self.windowdf.groupby('tile')]
if shuffle:
#Shuffle order of windows within a tile
groups = [x.sample(frac=1) for x in groups]
#Shuffle order of tiles
random.shuffle(groups)
#Bring pandas frame back together
newdf = pd.concat(groups).reset_index(drop=True)
image_data = newdf.to_dict("index")
image_names = list(image_data.keys())
return(image_data, image_names)
def load_image(self, image_index):
""" Load an image at the image_index.
"""
#Select sliding window and tile
try:
image_name = self.image_names[image_index]
except Exception as e:
print("Failed on image index {}".format(image_index))
print("There are {} names in the image names object".format(len(self.image_names)))
self.row = self.image_data[image_name]
#Open image to crop
##Check if tile the is same as previous draw from generator, this will save time.
if not self.row["tile"] == self.previous_image_path:
print("Loading new h5: %s" % (self.row["tile"]))
#Set directory based on site
h5_dir = self.DeepForest_config[self.row["site"]]["h5"]
#tilename for h5 and csv files
tilename = os.path.split(self.row["tile"])[-1]
tilename = os.path.splitext(tilename)[0]
h5_name = os.path.join(h5_dir, tilename+'.h5')
csv_name = os.path.join(h5_dir, tilename+'.csv')
#Read h5
self.hf = h5py.File(h5_name, 'r')
#Read corresponding csv labels
self.annotations = pd.read_csv(csv_name)
#read image from h5
window = self.row["window"]
image = self.hf["train_imgs"][window,...]
#Save image path for next evaluation to check
self.previous_image_path = self.row["tile"]
return image
def load_annotations(self, image_index):
'''
Load annotations from csv file
'''
#Select sliding window and tile
image_name = self.image_names[image_index]
self.row = self.image_data[image_name]
#Find annotations
annotations = self.annotations.loc[(self.annotations["tile"] == self.row["tile"]) & (self.annotations["window"] == self.row["window"])]
return annotations[["0","1","2","3","4"]].values
def compute_windows(self):
''''
Create a sliding window object for reference
'''
#Load tile
site = self.annotation_list.site.unique()[0]
base_dir = self.DeepForest_config[site][self.name]["RGB"]
image = os.path.join(base_dir, self.annotation_list.rgb_path.unique()[0])
im = Image.open(image)
numpy_image = np.array(im)
#Generate sliding windows
windows = sw.generate(numpy_image, sw.DimOrder.HeightWidthChannel, self.DeepForest_config["patch_size"], self.DeepForest_config["patch_overlap"])
return(windows)
| [
"benweinstein2010@gmail.com"
] | benweinstein2010@gmail.com |
27e534559619c7b1c3f7751def4cd7a078bbfea9 | 8567438779e6af0754620a25d379c348e4cd5a5d | /chrome/android/java/DEPS | 8c434f90f8f274fea473dac3ed786c8c960e3387 | [
"BSD-3-Clause"
] | permissive | thngkaiyuan/chromium | c389ac4b50ccba28ee077cbf6115c41b547955ae | dab56a4a71f87f64ecc0044e97b4a8f247787a68 | refs/heads/master | 2022-11-10T02:50:29.326119 | 2017-04-08T12:28:57 | 2017-04-08T12:28:57 | 84,073,924 | 0 | 1 | BSD-3-Clause | 2022-10-25T19:47:15 | 2017-03-06T13:04:15 | null | UTF-8 | Python | false | false | 997 | include_rules = [
"+components/autofill/android/java/src/org/chromium/components/autofill",
"+components/background_task_scheduler/android/java/src/org/chromium/components/background_task_scheduler",
"+components/bookmarks/common/android/java/src/org/chromium/components/bookmarks",
"+components/dom_distiller/content/browser/android/java/src/org/chromium/components/dom_distiller/content",
"+components/dom_distiller/core/android/java/src/org/chromium/components/dom_distiller/core",
"+components/gcm_driver/android/java/src/org/chromium/components/gcm_driver",
"+components/location/android/java",
"+components/minidump_uploader",
"+components/navigation_interception",
"+components/precache/android/java",
"+components/safe_json/android/java",
"+components/sync/android/java/src/org/chromium/components/sync",
"+components/web_contents_delegate_android",
"+components/web_restrictions",
"+content/public/android/java",
"+services/service_manager/public/java",
]
| [
"hedonist.ky@gmail.com"
] | hedonist.ky@gmail.com | |
75d852a9b0694fc982f022a5ed972bc5949ed8eb | 3a29caaf19333f0623a8a6a26fbcf8ea14b9212f | /powerapp/core/apps.py | 1500ed69d40b50a8603db7edb3e197d6c905651c | [] | no_license | WisdomWolf/powerapp | 1f695e315fde0937ded0bd4194755bbc6ad6e1a1 | b287e5b3c51f649580ae81a21aa68c350049b73b | refs/heads/master | 2021-01-21T06:18:48.504225 | 2015-06-08T18:18:44 | 2015-06-08T18:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,564 | py | # -*- coding: utf-8 -*-
import re
import datetime
from collections import namedtuple
from importlib import import_module
from logging import getLogger
from django import apps
from django.conf import settings
from django.conf.urls import url, include
from django.utils.six import with_metaclass
from powerapp.core.app_signals import ServiceAppSignals
logger = getLogger(__name__)
class LoadModuleMixin(object):
"""
A mixin for an app to load any of its submodule
"""
def load_module(self, name, quiet=True):
"""
A helper to load any app's submodule by its name
"""
full_name = '%s.%s' % (self.name, name)
try:
return import_module(full_name)
except ImportError:
if quiet:
return None
raise
class AppConfig(LoadModuleMixin, apps.AppConfig):
"""
App Config for the powerapp.core app itself
"""
name = 'powerapp.core'
verbose_name = 'PowerApp core application'
def ready(self):
# import the submodule with cron tasks
self.load_module('cron')
# import the submodule with signal handlers
self.load_module('signals')
# import the submodule with OAuth implementations
self.load_module('oauth_impl')
class ServiceAppConfigMeta(type):
"""
A metaclass to create the ServiceAppConfig.
We need this for two reasons:
1. to create new objects for every signal in every subclass
2. to have a personal periodic task registry for every subclass we have
"""
def __new__(mcs, name, bases, attrs):
attrs['signals'] = ServiceAppSignals()
attrs['periodic_tasks'] = {}
return type.__new__(mcs, name, bases, attrs)
class ServiceAppConfig(with_metaclass(ServiceAppConfigMeta, LoadModuleMixin, apps.AppConfig)):
"""
Base class for the application config object of services
"""
#: A special flag to denote that current Django app represents a
#: powerapp service
service = True
#: This flag has to be set to True if the application is "stateless"
#: Stateless application reacts immediately on webhooks, it's easier to
#: scale, but this app doesn't keep local model in sync, and you cannot
#: perform queries such as "api.items.all(...)" against it.
#:
#: We in Todoist love stateless apps, because Sync queries are kind of
#: expensive for us, so we encourage everyone to use this flag :)
stateless = True
#: The registry of powerapp signals. We overwrite it in metaclass anyway,
#: but this way it provides hints for IDEs
signals = ServiceAppSignals()
#: The registry of periodic tasks. We overwrite it in metaclass as well
periodic_tasks = {}
""":type: dict[str,PeriodicTaskFun]"""
def urlpatterns(self):
"""
Returns the list of URL patterns which have to be added to main urls.py
By default returns a sigle URL pattern which mounts app's urls.py as
under the app's label path. Most likely you don't need to edit this
function.
"""
regex = r'^%s/' % self.label
urls_module = '%s.urls' % self.name
ns = self.label
return [url(regex, include(urls_module, namespace=ns, app_name=ns))]
def ready(self):
"""
A signal called by the constructor once the app instance is ready
(once it's registered)
"""
logger.debug('Application %s is ready', self.name)
# export app settings
self.export_settings()
# import the submodule with signal handlers
self.load_module('signals')
def export_settings(self):
re_variable = re.compile(r'^[A-Z0-9_]+$')
for key, value in self.__class__.__dict__.items():
if re_variable.match(key) and not hasattr(settings, key):
setattr(settings, key, value)
@classmethod
def periodic_task(cls, delta, name=None):
"""
A decorator to add a periodic task. Decorated function has to accept
two arguments: user and integration object
"""
if isinstance(delta, int):
delta = datetime.timedelta(seconds=delta)
def decorator(func):
registry_name = name or '%s.%s' % (func.__module__, func.__name__)
cls.periodic_tasks[registry_name] = PeriodicTaskFun(func, delta, registry_name)
return func
return decorator
#: A wrapper for periodic tasks
PeriodicTaskFun = namedtuple('PeriodicTaskFun', ['func', 'delta', 'name'])
| [
"roman.imankulov@gmail.com"
] | roman.imankulov@gmail.com |
30447e0dd7ecb10608749d5b46d4e1e7bd9019cc | f4335b5f682041a10f507401912a106fea7ad435 | /scripts/retriever/build_db.py | 1804c2b64fb8b07f4c1123b004f2ac55feed659a | [
"MIT"
] | permissive | Shuailong/RLQA | 27c4779518233f96b77a7d5af999c2f1e085a0b0 | 014c340aea9d27494e65e8329da61ebccd65db61 | refs/heads/master | 2021-04-12T09:57:41.235391 | 2018-11-17T13:14:56 | 2018-11-17T13:14:56 | 126,332,609 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,604 | py | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# Adapt from facebookresearch/DrQA by Shuailong on Mar 22 2018.
"""A script to read in and store documents in a sqlite database."""
import argparse
import sqlite3
import json
import os
import logging
import importlib.util
from multiprocessing import Pool as ProcessPool
from tqdm import tqdm
from rlqa.retriever import utils
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Import helper
# ------------------------------------------------------------------------------
PREPROCESS_FN = None
def init(filename):
global PREPROCESS_FN
if filename:
PREPROCESS_FN = import_module(filename).preprocess
def import_module(filename):
"""Import a module given a full path to the file."""
spec = importlib.util.spec_from_file_location('doc_filter', filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# ------------------------------------------------------------------------------
# Store corpus.
# ------------------------------------------------------------------------------
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
def get_contents(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
with open(filename) as f:
for line in f:
# Parse document
doc = json.loads(line)
# Maybe preprocess the document with custom function
if PREPROCESS_FN:
doc = PREPROCESS_FN(doc)
# Skip if it is empty or None
if not doc:
continue
# Add the document
documents.append((utils.normalize(doc['id']), doc['text']))
return documents
def store_contents(data_path, save_path, preprocess, num_workers=None):
"""Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs.
"""
if os.path.isfile(save_path):
raise RuntimeError('%s already exists! Not overwriting.' % save_path)
logger.info('Reading into database...')
conn = sqlite3.connect(save_path)
c = conn.cursor()
c.execute("CREATE TABLE documents (id PRIMARY KEY, text);")
workers = ProcessPool(num_workers, initializer=init, initargs=(preprocess,))
files = [f for f in iter_files(data_path)]
count = 0
with tqdm(total=len(files)) as pbar:
for pairs in tqdm(workers.imap_unordered(get_contents, files)):
count += len(pairs)
c.executemany("INSERT INTO documents VALUES (?,?)", pairs)
pbar.update()
logger.info('Read %d docs.' % count)
logger.info('Committing...')
conn.commit()
conn.close()
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path', type=str, help='/path/to/data')
parser.add_argument('save_path', type=str, help='/path/to/saved/db.db')
parser.add_argument('--preprocess', type=str, default=None,
help=('File path to a python module that defines '
'a `preprocess` function'))
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
args = parser.parse_args()
store_contents(
args.data_path, args.save_path, args.preprocess, args.num_workers
)
| [
"liangshuailong@gmail.com"
] | liangshuailong@gmail.com |
8af6f695bb0775773af909b521f2f19323fcd8da | f847abc060c56cbb14be69fbf1ed671caeda23e8 | /1-daemon.py | 90bfc2b6a242c008ba14fbd83dd48a7a86f0213a | [
"MIT"
] | permissive | ko9ma7/smartstore-automate | b4ade5b6c3ce2ea3f0345287a82ccbb006b25d82 | 3a222a4e81c08658e1c9be156d2814a1df0c71d7 | refs/heads/master | 2021-04-15T02:09:59.390417 | 2019-11-19T09:01:40 | 2019-11-19T09:01:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | import argparse
import logging
import os
import signal
import sys
import time
class SmartStore:
def __init__(self, log_file=None):
logging.basicConfig(level=logging.INFO, format='%(message)s')
self.logger = logging.getLogger('SmartStore')
self.log_file = log_file
if log_file:
self.log_handler = logging.FileHandler(self.log_file)
self.logger.addHandler(self.log_handler)
self.__stop = False
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
def main(self):
i = 0
self.logger.info('Start Singing, PID {}'.format(os.getpid()))
while not self.__stop:
self.logger.info(i)
i += 1
time.sleep(1)
def stop(self, signum, frame):
# SIGINT, SIGTERM 시그널 수신 종료 핸들러
self.__stop = True
self.logger.info('Receive Signal {}'.format(signum))
self.logger.info('Stop Singing')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pid', help='pid filename', required=True)
parser.add_argument('--log', help='log filename', default=None)
args = parser.parse_args()
store = SmartStore(args.log)
store.main()
# 첫 번째 fork
pid = os.fork()
if pid > 0:
# 부모 프로세스 그냥 종료
exit(0)
else:
# 부모 환경과 분리
os.chdir('/')
os.setsid()
os.umask(0)
# 두 번째 fork
pid = os.fork()
if pid > 0:
exit(0)
else:
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
with open(args.pid, 'w') as pid_file:
pid_file.write(str(os.getpid()))
store = SmartStore(args.log)
store.main()
| [
"pincoins@gmail.com"
] | pincoins@gmail.com |
7551c8ef2c19ceebb70042c614d045fa4fee0b7b | c91c5e6e33303bc57edceb955f184a5a43e3c030 | /policy_repository/policy_repository/settings.py | 3cb225113af4538daabbf5165385c3faed2a2095 | [] | no_license | rahuezo/policy_repository | dd429bc8b2c6641d9812e5c5e87f913380c1d7e9 | 6fdcdee2992e9980c1c023b59cec8f2ab6f9adc9 | refs/heads/master | 2020-03-29T02:14:12.244711 | 2017-08-08T23:52:14 | 2017-08-08T23:52:14 | 94,568,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | """
Django settings for policy_repository project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9%@h7jy2dpcz$%5^4@pfb6e+nbm@wzq$zq%(0e*mh4319z0mgz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polrep',
'accounts',
'configuration',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'policy_repository.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'policy_repository.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
| [
"rahuezo@ucdavis.edu"
] | rahuezo@ucdavis.edu |
78937c75818ffbb9a0e5c7b4d5689c7b0401e83c | 4af454bced0f99e4ed8269d71e97284f0ef13afb | /gameserver/packets/from_server/base.py | 0c23afaf92d63ce56115425cb793fea9553609e4 | [] | no_license | L2jBrasil/L2py | c46db78238b4caf272a2399f4e4910fc256b3cca | d1c2e7bddb54d222f9a3d04262c09ad70329a226 | refs/heads/master | 2022-11-19T01:39:02.019777 | 2020-07-24T20:07:15 | 2020-07-24T20:07:15 | 292,115,581 | 1 | 1 | null | 2020-09-01T21:53:54 | 2020-09-01T21:53:54 | null | UTF-8 | Python | false | false | 385 | py | from common.packet import add_length, Packet
from gameserver.crypt.xor import xor_encrypt_game
class GameServerPacket(Packet):
@add_length
@xor_encrypt_game
# @add_padding()
def encode(self, client):
return self.body
@classmethod
def parse(cls, data, client):
pass
@classmethod
def decode(cls, data, client, **kwargs):
pass
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
33acf2dbdce1cad3789c71937820056da49397a9 | 2279440aae28b1934c78797421948d1ee2a50422 | /scraping/labs/lab8/tripadvisor/tripadvisor/spiders/comments.py | 3eb7ddb0eb422ff455a1dbe08a730d189de5c91c | [] | no_license | yeladlouni/m2i | a024e3f740977ae27675d11d4d4d5dacecf59705 | d245ffd76f5b4f2a7f8d37821b89dedbfcd81b69 | refs/heads/master | 2023-02-12T07:00:12.712614 | 2021-01-07T13:52:08 | 2021-01-07T13:52:08 | 308,609,873 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | import scrapy
class CommentsSpider(scrapy.Spider):
name = 'comments'
allowed_domains = ['tripadvisor.fr']
start_urls = ['http://tripadvisor.fr/']
def parse(self, response):
pass
| [
"="
] | = |
06f7e5d64f3533d8064d85f7ddc3083d4ba331a8 | d37145afb8e788f00c52e33aa708df4685fd1822 | /jupyterlab/_version.py | 5827f0be5cc01bf3ecd2212a1755ad607fd96c5b | [
"MIT",
"BSD-3-Clause"
] | permissive | piqueen314/jupyterlab | d18909869a58a196b1616e88b959b65d98113f26 | f7a13f6a05aafca1ecaa4e41babc57b6ec385a52 | refs/heads/master | 2021-08-06T17:46:37.081852 | 2017-11-06T13:17:47 | 2017-11-06T13:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | version_info = (0, 29, 0, 'dev0')
__version__ = ".".join(map(str, version_info))
| [
"steven.silvester@ieee.org"
] | steven.silvester@ieee.org |
d2a906e840c696628c03915e80ab6534e291d253 | a3eb732ead7e1d10a85a88e42dc639eb16a40265 | /instagram_api/response/model/comment_translations.py | bbf2090de07a7f8e908de3c4cdb81372d7bf77c0 | [
"MIT"
] | permissive | carsam2021/instagram_api | 7654c0f485c22935cf478016e46e65acbeda9344 | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | refs/heads/master | 2023-03-16T14:06:27.515432 | 2020-10-17T04:39:19 | 2020-10-17T04:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['CommentTranslations', 'CommentTranslationsInterface']
class CommentTranslationsInterface(ApiInterfaceBase):
id: int
translation: AnyType
class CommentTranslations(PropertyMapper, CommentTranslationsInterface):
pass
| [
"root@proscript.ru"
] | root@proscript.ru |
6fb35da6b38e5e8a685ad2692319d35dd249394e | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/NCName/Schema+Instance/NISTXML-SV-IV-atomic-NCName-enumeration-5-4.py | 9e5ef92703e0c0d18b9c5344a4ce6a336bbc0dbb | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 568 | py | from output.models.nist_data.atomic.ncname.schema_instance.nistschema_sv_iv_atomic_ncname_enumeration_5_xsd.nistschema_sv_iv_atomic_ncname_enumeration_5 import NistschemaSvIvAtomicNcnameEnumeration5
from output.models.nist_data.atomic.ncname.schema_instance.nistschema_sv_iv_atomic_ncname_enumeration_5_xsd.nistschema_sv_iv_atomic_ncname_enumeration_5 import NistschemaSvIvAtomicNcnameEnumeration5Type
obj = NistschemaSvIvAtomicNcnameEnumeration5(
value=NistschemaSvIvAtomicNcnameEnumeration5Type.KOBJECT_TRANSACT_CONSTITUENT_OF_FILE_IS_WITHOUT_ABOUT_ARE_A_BE
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
a7f074b9ed6884b55e0d38eb262a82430585a1cd | 9ba68903665929f72d78409bdf9b8ff3733a6746 | /scientific_expedition/task19_yaml_simple_dict_v2.py | 4ef84703380890bb71ec4dcb6fdd4993de042f6a | [] | no_license | DorogAD/checkio | eed8ae9865dda45d2cb0a4201d51fb45e91aec8a | 5a151f861746dbd2e838dea40a30c20dbdeaa399 | refs/heads/main | 2023-02-27T21:11:50.638125 | 2021-02-07T11:04:04 | 2021-02-07T11:04:04 | 315,864,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | """
Have you ever heard of such markup language as YAML? It’s a friendly data serialization format. In fact it’s so friendly
that both people and programs can read it quite well. You can play around with the standard by following this link.
YAML is a text, and you need to convert it into an object. But I’m not asking you to implement the entire YAML standard,
we’ll implement it step by step.
The first step is the key-value conversion. The key can be any string consisting of Latin letters and numbers.
The value can be a single-line string (which consists of spaces, Latin letters and numbers) or a number (int).
I’ll show some examples:
name: Alex
age: 12
Converted into an object.
{
"name": "Alex",
"age": 12
}
Note that the number automatically gets type int
Another example shows that the string may contain spaces.
name: Alex Fox
age: 12
class: 12b
Will be converted into the next object.
{
"age": 12,
"name": "Alex Fox",
"class": "12b"
}
Pay attention to a few things. Between the string "age" and the string "class" there is an empty string that doesn’t
interfere with parsing. The class starts with numbers, but has letters, which means it cannot be converted to numbers,
so its type remains a string (str).
Input: A format string.
Output: An object.
"""
def yaml(a: str) -> dict:
result = {}
for pair in a.split('\n'):
if pair == '':
continue
else:
result_key, result_value = pair.split(': ')
result[result_key] = int(result_value) if result_value.isdigit() else result_value
return result
if __name__ == '__main__':
print("Example:")
print(yaml("""name: Alex
age: 12"""))
# These "asserts" are used for self-checking and not for an auto-testing
assert yaml("""name: Alex
age: 12""") == {'age': 12, 'name': 'Alex'}
assert yaml("""name: Alex Fox
age: 12
class: 12b""") == {'age': 12,
'class': '12b',
'name': 'Alex Fox'}
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"sa_do@tut.by"
] | sa_do@tut.by |
9ff9b342d2e32f0f5814c37a460c480096cf8637 | 2a54a1d9996778362421299a936bb0dadaace958 | /units/adms/mysite/video/models/__init__.py | ef77e1a3c15686119e05351b1c721d4a259ea750 | [] | no_license | icprog/zktime_wlm | 6d0719b5210c4d3196b5958bccbb7e606785ece3 | 449c487ce4664dde734f8007a974ed883801d106 | refs/heads/master | 2021-03-21T10:20:54.157131 | 2018-11-24T04:10:42 | 2018-11-24T04:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from models import VideoPreviewPage, VideoLinkagePage
verbose_name = _(u"视频")
_menu_index = 5
def app_options():
from base.options import SYSPARAM, PERSONAL
return (
#参数名称, 参数默认值,参数显示名称,解释
('video_default_page', 'video/VideoPreviewPage/', u"%s"%_(u'视频默认页面'), "", PERSONAL, False),
)
| [
"657984027@qq.com"
] | 657984027@qq.com |
554afa3e7378aff2d8ee25f90fd12005cd72f244 | 2e69d2f140bb653938dc1b7238b85a4af4754123 | /metanic/__main__.py | 414e4d5a49e7fde55e4421a1917758ccb70b4e8e | [
"BSD-3-Clause"
] | permissive | metanic/services | f866d78e7207624cf4b420929d987b6005394d1d | a00b99f9b697864a078e2cb886be4d75c10458a9 | refs/heads/master | 2021-06-06T22:33:56.823827 | 2018-08-14T08:05:00 | 2018-08-14T08:05:00 | 115,375,318 | 0 | 0 | NOASSERTION | 2020-02-11T21:34:25 | 2017-12-26T01:57:09 | Python | UTF-8 | Python | false | false | 80 | py | #!/usr/bin/env python
from metanic import command_line
command_line.execute()
| [
"monokrome@monokro.me"
] | monokrome@monokro.me |
b0b371a2160af839a1375cb22c786957b9801837 | 6ab217b675b0d33dec9d8985efc2de314e3a7a28 | /menus/models/menu/models.py | a34c7d7e3baa63430deb2a70792201dff695a8c7 | [] | no_license | nujkram/dream_cream_pastries | 3547928af859ebbb93f8d6ff64d02796d8c61a0c | c6a764f4f2c16191661ee6747dc0daa896eae5ec | refs/heads/master | 2023-06-20T20:20:21.001373 | 2021-07-29T00:55:49 | 2021-07-29T00:55:49 | 375,721,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | """
Dream Cream Pastries Project
Menu 0.0.1
Menu models
Menu
Author: Mark
"""
import uuid as uuid
from django.urls import reverse
from django_extensions.db.fields import AutoSlugField
from django.db.models import CharField
from django.db.models import DateTimeField
from django_extensions.db.fields import AutoSlugField
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth import models as auth_models
from django.db import models as models
from django_extensions.db import fields as extension_fields
from django.apps import apps
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.contrib.postgres.fields import JSONField
from .managers import MenuManager as manager
class Menu(models.Model):
# === Basic ===
created = models.DateTimeField(null=False, auto_now_add=True)
updated = models.DateTimeField(null=False, auto_now=True)
# === Identifiers ===
name = models.CharField(max_length=150)
uuid = models.UUIDField(unique=True, default=uuid.uuid4, null=True, editable=True)
slug = extension_fields.AutoSlugField(populate_from='name', blank=True)
# === Properties ===
price = models.DecimalField(decimal_places=2, max_digits=5)
# === State ===
is_best = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
meta = JSONField(default=dict, blank=True, null=True)
# === Relationship Fields ===
category = models.ForeignKey(
'menus.MenuCategory',
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_created_by_user'
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_created_by_user'
)
last_updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_updated_by_user'
)
objects = manager()
class Meta:
ordering = ('-created',)
verbose_name = 'Menu'
verbose_name_plural = 'Menus'
################################################################################
# === Magic Methods ===
################################################################################
def __str__(self):
return self.name
################################################################################
# === Model overrides ===
################################################################################
def clean(self, *args, **kwargs):
# add custom validation here
super().clean()
def save(self, *args, **kwargs):
# self.full_clean()
super().save(*args, **kwargs)
################################################################################
# === Model-specific methods ===
################################################################################
################################################################################
# === Signals ===
################################################################################
@receiver(post_save, sender=Menu)
def scaffold_post_save(sender, instance=None, created=False, **kwargs):
pass
@receiver(pre_save, sender=Menu)
def scaffold_pre_save(sender, instance=None, created=False, **kwargs):
pass
| [
"markjungersaniva@gmail.com"
] | markjungersaniva@gmail.com |
4acd8ea6336e7bf02139a24dd366e5ebe87c3059 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03126/s809270834.py | 61cb5d08d9d531977246b15447ef2eccf036d8fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | N, M = map(int, input().split())
L = []
for i in range(N):
L.append(list(map(int, input().split())))
S = [0]*M
for j in range (N):
for k in range(1, L[j][0]+1):
S[L[j][k]-1] += 1
print(S.count(N)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c3c871f5185f3314548f9bbb499779353b18d90e | 90cea58e80309d2dff88f73f3a43ed5f943ff97d | /PalindromeMinInsertions_v1.py | 004cc7d2f2f6edadef6d3f7965a8554044d71501 | [] | no_license | SaiSujithReddy/CodePython | 0b65c82b0e71dba2bbd4c1aefec4e6cd6fd42341 | 4c05b7909092009afffa4536fd284060d20e462d | refs/heads/master | 2022-02-24T09:21:15.284745 | 2019-10-07T23:36:17 | 2019-10-07T23:36:17 | 106,611,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | #http://isharemylearning.blogspot.com/2012/08/minimum-number-of-insertions-in-string.html
'''
This problem can be two steps
step 1:- find longest common subsequence
step 2:- total string - common sub sequence = no of insertions
'''
#Things learnt
# creating a matrix and initializing with zeores
import math
def find_common_subsequence(str1,str2):
i = len(str1)+1
j = len(str2)+1
matrix = [[0] * i for x in range(j)]
print(matrix)
# Another way of initializing the matrix with None or zero
matrix_v2 = []
matrix_v1 = [0] * i
for x in range(j):
matrix_v2.append(matrix_v1)
print(matrix_v2)
#matrix[0][0] = 0
for x in range(i):
for y in range(j):
print("x values is {}, y value is {}",x,y)
if x == 0 or y == 0:
matrix[y][x] = 0
elif str1[x-1] == str2[y-1]:
matrix[x][y] = matrix[x-1][y-1] + 1
else:
matrix[x][y] = max(matrix[x][y-1],matrix[x-1][y])
print(matrix)
print("Value of i j are", i ,j)
print(matrix[i-1][j-1])
return matrix[i-1][j-1]
string = "hotoh"
print(string[::-1])
common = find_common_subsequence(string,string[::-1])
min_insertions = len(string) - common
print("min_insertions is ",min_insertions)
# hello
#heolloeh
#min insertions = 3
# hi
# hih
#min insertions = 1
# abcdd - 3
# abcd - 3
# hotoh
| [
"sai.marapareddy@gmail.com"
] | sai.marapareddy@gmail.com |
41f535ff6ce551b3a9e0a46df9f6165c674a3c77 | 28e62867cd8d067f86e1aced1f0bf877abf33e68 | /naive_bayes/sohu_news_topic_classification_using_naive_bayes.py | dca4bf7f516a62c0a9224af2a2a8cf14f85d668a | [] | no_license | xiongfeihtp/scikit_learn | ffd462913deb8abc958311dbd0c13fe755468e51 | e97bbbd26ff47325cc0791ce241e9f1844feba9e | refs/heads/master | 2021-05-15T03:52:54.175922 | 2018-02-02T14:11:06 | 2018-02-02T14:11:06 | 119,989,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,560 | py | #! /usr/bin/env python
#coding=utf-8
# Authors: Hanxiaoyang <hanxiaoyang.ml@gmail.com>
# simple naive bayes classifier to classify sohu news topic
# data can be downloaded in http://www.sogou.com/labs/dl/cs.html
# 代码功能:简易朴素贝叶斯分类器,用于对搜狐新闻主题分类,数据可在http://www.sogou.com/labs/dl/cs.html下载(精简版)
# 详细说明参见博客http://blog.csdn.net/han_xiaoyang/article/details/50629608
# 作者:寒小阳<hanxiaoyang.ml@gmail.com>
import sys, math, random, collections
def shuffle(inFile):
'''
简单的乱序操作,用于生成训练集和测试集
'''
textLines = [line.strip() for line in open(inFile)]
print("正在准备训练和测试数据,请稍后...")
random.shuffle(textLines)
num = len(textLines)
trainText = textLines[:3*num/5]
testText = textLines[3*num/5:]
print("准备训练和测试数据准备完毕,下一步...")
return trainText, testText
#总共有9种新闻类别,我们给每个类别一个编号
lables = ['A','B','C','D','E','F','G','H','I']
def lable2id(lable):
for i in range(len(lables)):
if lable == lables[i]:
return i
raise Exception('Error lable %s' % (lable))
def doc_dict():
'''
构造和类别数等长的0向量
'''
return [0]*len(lables)
def mutual_info(N,Nij,Ni_,N_j):
'''
计算互信息,这里log的底取为2
'''
return Nij * 1.0 / N * math.log(N * (Nij+1)*1.0/(Ni_*N_j))/ math.log(2)
def count_for_cates(trainText, featureFile):
'''
遍历文件,统计每个词在每个类别出现的次数,和每类的文档数
并写入结果特征文件
'''
docCount = [0] * len(lables)
#defaultdict 提供初始化value
wordCount = collections.defaultdict(doc_dict())
#扫描文件和计数
for line in trainText:
#split(str,num) 字符和分割次数
lable,text = line.strip().split(' ',1)
#give the label_id
index = lable2id(lable[0])
#split the word
words = text.split(' ')
for word in words:
wordCount[word][index] += 1
docCount[index] += 1
#计算互信息值
print("计算互信息,提取关键/特征词中,请稍后...")
miDict = collections.defaultdict(doc_dict())
#N word_sum numbers
N = sum(docCount)
for k,vs in wordCount.items():
#k --word vs--num_list for label_id
for i in range(len(vs)):
N11 = vs[i]
N10 = sum(vs) - N11
N01 = docCount[i] - N11
N00 = N - N11 - N10 - N01
mi = mutual_info(N,N11,N10+N11,N01+N11) + mutual_info(N,N10,N10+N11,N00+N10)+ mutual_info(N,N01,N01+N11,N01+N00)+ mutual_info(N,N00,N00+N10,N00+N01)
miDict[k][i] = mi
fWords = set()
for i in range(len(docCount)):
keyf = lambda x:x[1][i]
sortedDict = sorted(miDict.items(),key=keyf,reverse=True)
for j in range(100):
fWords.add(sortedDict[j][0])
out = open(featureFile, 'w')
#输出各个类的文档数目
out.write(str(docCount)+"\n")
#输出互信息最高的词作为特征词
for fword in fWords:
out.write(fword+"\n")
print("特征词写入完毕...")
out.close()
def load_feature_words(featureFile):
'''
从特征文件导入特征词
'''
f = open(featureFile)
#各个类的文档数目
docCounts = eval(f.readline())
features = set()
#读取特征词
for line in f:
features.add(line.strip())
f.close()
return docCounts,features
def train_bayes(featureFile, textFile, modelFile):
'''
训练贝叶斯模型,实际上计算每个类中特征词的出现次数
'''
print("使用朴素贝叶斯训练中...")
docCounts,features = load_feature_words(featureFile)
wordCount = collections.defaultdict(doc_dict())
#每类文档特征词出现的次数
tCount = [0]*len(docCounts)
for line in open(textFile):
lable,text = line.strip().split(' ',1)
index = lable2id(lable[0])
words = text.split(' ')
for word in words:
if word in features:
tCount[index] += 1
wordCount[word][index] += 1
outModel = open(modelFile, 'w')
#拉普拉斯平滑
print("训练完毕,写入模型...")
for k,v in wordCount.items():
scores = [(v[i]+1) * 1.0 / (tCount[i]+len(wordCount)) for i in range(len(v))]
outModel.write(k+"\t"+scores+"\n")
outModel.close()
def load_model(modelFile):
'''
从模型文件中导入计算好的贝叶斯模型
'''
print("加载模型中...")
f = open(modelFile)
scores = {}
for line in f:
word,counts = line.strip().rsplit('\t',1)
scores[word] = eval(counts)
f.close()
return scores
def predict(featureFile, modelFile, testText):
'''
预测文档的类标,标准输入每一行为一个文档
'''
docCounts,features = load_feature_words()
docScores = [math.log(count * 1.0 /sum(docCounts)) for count in docCounts]
scores = load_model(modelFile)
rCount = 0
docCount = 0
print("正在使用测试数据验证模型效果...")
for line in testText:
lable,text = line.strip().split(' ',1)
index = lable2id(lable[0])
words = text.split(' ')
preValues = list(docScores)
for word in words:
if word in features:
for i in range(len(preValues)):
preValues[i]+=math.log(scores[word][i])
#give the index for list
m = max(preValues)
pIndex = preValues.index(m)
if pIndex == index:
rCount += 1
#print lable,lables[pIndex],text
docCount += 1
print("总共测试文本量: %d , 预测正确的类别量: %d, 朴素贝叶斯分类器准确度:%f" %(rCount,docCount,rCount * 1.0 / docCount))
if __name__=="__main__":
if len(sys.argv) != 4:
print("Usage: python sohu_news_topic_classification_using_naive_bayes.py sougou_news.txt feature_file.out model_file.out")
sys.exit()
inFile = sys.argv[1]
featureFile = sys.argv[2]
modelFile = sys.argv[3]
trainText, testText = shuffle(inFile)
count_for_cates(trainText, featureFile)
train_bayes(featureFile, trainText, modelFile)
predict(featureFile, modelFile, testText)
| [
"386344277@qq.com"
] | 386344277@qq.com |
11b0b57780fc2bb8391dfd861237d5444e9240ea | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_chart_display_units04.py | fc0490c067530d89aca6ff3e32dc9bee73d48d33 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,160 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_display_units04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_y_axis({'display_units': 'ten_thousands', 'display_units_visible': 0})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
2987363abf0a175a6d50f3ddbd9757b36709b381 | bec8f235b1392542560166dd02c2f0d88c949a24 | /autobahn/autobahn/wamp/uri.py | dacbea3980fd0a55929f7c1838274d21ccae7b8a | [
"Apache-2.0"
] | permissive | gourneau/AutobahnPython | f740f69b9ecbc305a97a5412ba3bb136a4bdec69 | 5193e799179c2bfc3b3f8dda86ccba69646c7ee3 | refs/heads/master | 2021-01-15T22:02:32.459491 | 2014-07-02T13:34:57 | 2014-07-02T13:34:57 | 21,437,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,339 | py | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import re
import six
# TODO:
# suffix matches
# args + kwargs
# uuid converter
# multiple URI patterns per decorated object
# classes: Pattern, EndpointPattern, ..
class Pattern:
"""
A WAMP URI Pattern.
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z][a-z0-9_]*$")
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
def __init__(self, uri, target):
"""
Constructor for WAMP URI pattern.
:param uri: The URI or URI pattern, e.g. `"com.myapp.product.<product:int>.update"`.
:type uri: str
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
"""
assert(type(uri) == six.text_type)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
components = uri.split('.')
pl = []
nc = {}
i = 0
for component in components:
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{}>[a-z][a-z0-9_]*)".format(name))
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + "\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns str -- The URI (pattern), e.g. `"com.myapp.product.<product:int>.update"`.
"""
return self._uri
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. `"com.myapp.product.123456.update"`.
:type uri: str
:returns tuple -- A tuple `(args, kwargs)`
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns bool -- `True`, iff this pattern is for a procedure endpoint.
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns bool -- `True`, iff this pattern is for an event handler.
"""
return self._target == Pattern.URI_TARGET_HANDLER
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns bool -- `True`, iff this pattern is for an exception.
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
ba00017e1750e5d0c550591117dc58935839be12 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /buildtools/third_party/libc++/trunk/test/libcxx/test/format.py | 19c9fc742a497f950919188035ea06e067ed5417 | [
"BSD-3-Clause",
"MIT",
"NCSA"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 7,344 | py | import errno
import os
import time
import lit.Test # pylint: disable=import-error
import lit.TestRunner # pylint: disable=import-error
import lit.util # pylint: disable=import-error
from libcxx.test.executor import LocalExecutor as LocalExecutor
import libcxx.util
class LibcxxTestFormat(object):
"""
Custom test format handler for use with the test format use by libc++.
Tests fall into two categories:
FOO.pass.cpp - Executable test which should compile, run, and exit with
code 0.
FOO.fail.cpp - Negative test case which is expected to fail compilation.
FOO.sh.cpp - A test that uses LIT's ShTest format.
"""
def __init__(self, cxx, use_verify_for_fail, execute_external,
executor, exec_env):
self.cxx = cxx
self.use_verify_for_fail = use_verify_for_fail
self.execute_external = execute_external
self.executor = executor
self.exec_env = dict(exec_env)
# TODO: Move this into lit's FileBasedTest
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
if any([filename.endswith(ext)
for ext in localConfig.suffixes]):
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
def execute(self, test, lit_config):
while True:
try:
return self._execute(test, lit_config)
except OSError as oe:
if oe.errno != errno.ETXTBSY:
raise
time.sleep(0.1)
def _execute(self, test, lit_config):
name = test.path_in_suite[-1]
is_sh_test = name.endswith('.sh.cpp')
is_pass_test = name.endswith('.pass.cpp')
is_fail_test = name.endswith('.fail.cpp')
if test.config.unsupported:
return (lit.Test.UNSUPPORTED,
"A lit.local.cfg marked this unsupported")
script = lit.TestRunner.parseIntegratedTestScript(
test, require_script=is_sh_test)
# Check if a result for the test was returned. If so return that
# result.
if isinstance(script, lit.Test.Result):
return script
if lit_config.noExecute:
return lit.Test.Result(lit.Test.PASS)
# Check that we don't have run lines on tests that don't support them.
if not is_sh_test and len(script) != 0:
lit_config.fatal('Unsupported RUN line found in test %s' % name)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir,
tmpBase)
script = lit.TestRunner.applySubstitutions(script, substitutions)
# Dispatch the test based on its suffix.
if is_sh_test:
if not isinstance(self.executor, LocalExecutor):
# We can't run ShTest tests with a executor yet.
# For now, bail on trying to run them
return lit.Test.UNSUPPORTED, 'ShTest format not yet supported'
return lit.TestRunner._runShTest(test, lit_config,
self.execute_external, script,
tmpBase)
elif is_fail_test:
return self._evaluate_fail_test(test)
elif is_pass_test:
return self._evaluate_pass_test(test, tmpBase, lit_config)
else:
# No other test type is supported
assert False
def _clean(self, exec_path): # pylint: disable=no-self-use
libcxx.util.cleanFile(exec_path)
def _evaluate_pass_test(self, test, tmpBase, lit_config):
execDir = os.path.dirname(test.getExecPath())
source_path = test.getSourcePath()
exec_path = tmpBase + '.exe'
object_path = tmpBase + '.o'
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
try:
# Compile the test
cmd, out, err, rc = self.cxx.compileLinkTwoSteps(
source_path, out=exec_path, object_file=object_path,
cwd=execDir)
compile_cmd = cmd
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
return lit.Test.FAIL, report
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
if self.exec_env:
env = self.exec_env
# TODO: Only list actually needed files in file_deps.
# Right now we just mark all of the .dat files in the same
# directory as dependencies, but it's likely less than that. We
# should add a `// FILE-DEP: foo.dat` to each test to track this.
data_files = [os.path.join(local_cwd, f)
for f in os.listdir(local_cwd) if f.endswith('.dat')]
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files, env)
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report = "Compiled With: %s\n%s" % (compile_cmd, report)
report += "Compiled test failed unexpectedly!"
return lit.Test.FAIL, report
return lit.Test.PASS, ''
finally:
# Note that cleanup of exec_file happens in `_clean()`. If you
# override this, cleanup is your reponsibility.
libcxx.util.cleanFile(object_path)
self._clean(exec_path)
def _evaluate_fail_test(self, test):
source_path = test.getSourcePath()
with open(source_path, 'r') as f:
contents = f.read()
verify_tags = ['expected-note', 'expected-remark', 'expected-warning',
'expected-error', 'expected-no-diagnostics']
use_verify = self.use_verify_for_fail and \
any([tag in contents for tag in verify_tags])
extra_flags = []
if use_verify:
extra_flags += ['-Xclang', '-verify',
'-Xclang', '-verify-ignore-unexpected=note']
cmd, out, err, rc = self.cxx.compile(source_path, out=os.devnull,
flags=extra_flags)
expected_rc = 0 if use_verify else 1
if rc == expected_rc:
return lit.Test.PASS, ''
else:
report = libcxx.util.makeReport(cmd, out, err, rc)
report_msg = ('Expected compilation to fail!' if not use_verify else
'Expected compilation using verify to pass!')
return lit.Test.FAIL, report + report_msg + '\n'
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
c320a6011e9be62d78fc5e08b3e9fbb8943f2cab | b1efb356e55df6a5f1243d803d51b8e9bb6e6938 | /nextgisweb/resmeta/__init__.py | e121253161e1db9ff2d05f3c3b4cccaaf10b7ef7 | [] | no_license | neroks/nextgisweb | 6fa6621824db05e51316bf993125f79773c97932 | 59ed0e9637a3df0e2388160d9871b435beeaa466 | refs/heads/2 | 2021-01-15T15:05:02.615869 | 2015-10-05T10:39:34 | 2015-10-05T10:39:34 | 43,687,459 | 0 | 0 | null | 2015-10-05T13:58:10 | 2015-10-05T13:58:10 | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..component import Component, require
from .util import COMP_ID
from .model import Base
@Component.registry.register
class ResourceMetadataComponent(Component):
identity = COMP_ID
metadata = Base.metadata
@require('resource')
def setup_pyramid(self, config):
from . import view # NOQA
view.setup_pyramid(self, config)
| [
"me@dezhin.net"
] | me@dezhin.net |
44edd73cb45272446b82301c8236ea055efbeea5 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/ghostnet_quant/src/config.py | c5428ae53bc66a09547fb9cba9eba55da4895044 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 1,599 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config_ascend = ed({
"num_classes": 37,
"image_height": 224,
"image_width": 224,
"batch_size": 256,
"epoch_size": 200,
"warmup_epochs": 4,
"lr": 0.4,
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 1024,
"save_checkpoint": True,
"save_checkpoint_epochs": 1,
"keep_checkpoint_max": 200,
"save_checkpoint_path": "./checkpoint",
})
config_gpu = ed({
"num_classes": 37,
"image_height": 224,
"image_width": 224,
"batch_size": 3,
"epoch_size": 370,
"warmup_epochs": 4,
"lr": 0.4,
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 1024,
"save_checkpoint": True,
"save_checkpoint_epochs": 1,
"keep_checkpoint_max": 500,
"save_checkpoint_path": "./checkpoint",
})
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
c3db3977e70bcce001aa8e9e81b214149ce62687 | f8104b29a8d0dbeb407060e494a206ca69335aeb | /tools/datasets/voc/voc_statistic.py | d3c9079646b9535e532a50f45ced63ee3ca400ec | [] | no_license | Sebastixian/wwtool | c19f665f96e8b942e94af47db590f5bb28072f06 | 2f462a3d028b766234d62a3ef706a0f08f10680a | refs/heads/master | 2023-06-01T04:21:22.066639 | 2021-06-25T07:40:13 | 2021-06-25T07:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | import wwtool
coco_small_class = { 1: 'airplane',
2: 'bridge',
3: 'storage-tank',
4: 'ship',
5: 'swimming-pool',
6: 'vehicle',
7: 'person',
8: 'wind-mill'}
ann_file_name = ['voc', 'merge']
# ann_file_name.append('small_object')
ann_file = './data/{}/v1/coco/annotations/{}.json'.format(ann_file_name[0], '_'.join(ann_file_name))
size_measure_by_ratio = False
if size_measure_by_ratio == False:
size_set = [4*4, 8*8, 16*16, 32*32, 64*64, 64*64]
label_set = ["4*4", "8*8", "16*16", "32*32", "64*64", "64*64-inf"]
else:
size_set = [0.12/100, 1.08/100, 9.72/100]
label_set = ["0.12/100", "1.08/100", "9.72/100"]
class_instance = wwtool.Small()
statistic = wwtool.COCO_Statistic(ann_file, size_set=size_set, label_set=label_set, size_measure_by_ratio=size_measure_by_ratio, class_instance=None, show_title=False)
for pie_flag in [False, True]:
statistic.total_size_distribution(plot_pie=pie_flag, save_file_name=ann_file_name[:])
for number_flag in [False, True]:
statistic.class_size_distribution(coco_class=None, save_file_name=ann_file_name[:], number=number_flag)
statistic.image_object_num_distribution(save_file_name=ann_file_name[:])
statistic.object_aspect_ratio_distribution(save_file_name=ann_file_name[:])
# statistic.class_num_per_image(coco_class=coco_dior_class, save_file_name=ann_file_name[:]) | [
"jwwangchn@outlook.com"
] | jwwangchn@outlook.com |
aaf2b699724a92ae1623e014c4f605c3897e122f | 5afb3dff6e99d9bf18208c83afb7a7d65f26bbd7 | /licode/st214.py | ccc8e64a5658d6d4189c7d1262b890e6fefb9db5 | [] | no_license | yanfriend/python-practice | 4b565e58db9cdc7596f9135a8f7b9bae4be18de3 | 236a74c0d1d84f730fa5d10146cc201a4c49567d | refs/heads/master | 2018-12-09T00:34:13.747045 | 2018-12-03T04:53:17 | 2018-12-03T04:53:17 | 62,094,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | """
Given a string S, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation.
For example:
Given "aacecaaa", return "a aacecaaa".
Given "abcd", return "dcb abcd".
"""
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
r=s[::-1]
for i in range(len(s)+1):
if s.startswith(r[i:]):
return r[:i]+s
# time out below
# pal_end=len(s)-1
# for i in range(len(s)-1,-1,-1):
# if self.is_pan(s,i):
# pal_end=i
# break
# return s[len(s)-1:pal_end:-1]+s
#
# def is_pan(selfs,s,end):
# l=0; r=end
# while (l<=end):
# if s[l]!=s[r]: return False
# l+=1; r-=1
# return True
print Solution().shortestPalindrome("aacecaaa")
| [
"ybai@pinterest.com"
] | ybai@pinterest.com |
165978e3b27287415483b3ffd702ec1802c32d0c | 28691ec55ebce9ec7045d12ea9675932ce12d671 | /py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/mesh/add_mesh.py | a3505018edc9af7061b1bf528d128f8c7977162c | [] | no_license | ianclarksmith/design-automation | 1e71315193effc0c18b4a8b41300bda6f41a3f09 | e27cc028fe582395f4a62f06697137867bb0fc33 | refs/heads/master | 2020-04-22T22:28:39.385395 | 2009-10-26T02:48:37 | 2009-10-26T02:48:37 | 37,266,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | add_mesh = {
"input_folder_name": "Mesh_Methods",
"input_file_name": "AddMesh",
"output_package_name": "mesh",
"output_module_name": "add_mesh",
"doc_html": """
Adds a mesh object to the document.
""",
"syntax_html": {
0: ("arrVertices", "arrFaceVertices", "arrVertexNormals", "arrTextureCoordinates", "arrVertexColors"),
},
"params_html": {
0: {
"name": "arrVertices",
"py_name": "vertices",
"opt_or_req": "Required",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "Vertices",
"doc": """
An array of 3-D points defining the vertices of the mesh.
"""
},
1: {
"name": "arrFaceVertices",
"py_name": "face_vertices",
"opt_or_req": "Required",
"type": "Array",
"name_prefix": "arr_of_int",
"name_main": "FaceVertices",
"doc": """
An array containing arrays of four numbers that define the vertex indices for each face of the mesh. If the third and forth vertex indices of a face are identical, a triangular face will be created. Otherwise a quad face will be created.
"""
},
2: {
"name": "arrVertexNormals",
"py_name": "vertex_normals",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "VertexNormals",
"doc": """
An array of 3-D vectors defining the vertex normals of the mesh. Note, for every vertex, the must be a corresponding vertex normal.
"""
},
3: {
"name": "arrTextureCoordinates",
"py_name": "texture_coordinates",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "TextureCoordinates",
"doc": """
An array of 2-D texture coordinates. Note, for every vertex, there must be a corresponding texture coordinate.
"""
},
4: {
"name": "arrVertexColors",
"py_name": "vertex_colors",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_int",
"name_main": "VertexColors",
"doc": """
An array of RGB color values. Note, for every vertex, there must be a corresponding vertex color.
"""
},
},
"returns_html": {
0: {
"type": "string",
"doc": "The identifier of the new object if successful."
},
1: {
"type": "null",
"doc": "If not successful, or on error."
},
},
"id_com": 494,
"params_com": {
0: {
"name": "vaVertices",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
1: {
"name": "vaFaces",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
2: {
"name": "vaNormals",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
3: {
"name": "vaTextures",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
4: {
"name": "vaColors",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
},
"returns_com": "tagVARIANT",
}
| [
"patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb"
] | patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb |
18b97799ee7cf38b86f53f0b5afee75638cfc52f | ac2142d192bde034ae3c6d7e07045c39d9a34aa3 | /面试题/字典按value排序.py | b3ea5016619987b3864c95af55647a3b65e58e5e | [] | no_license | budaLi/-500- | ee33a93a6c7f7d36e30a29dd1e12634034712d12 | 69c42389717f003198f652035bfc922eac8a6fef | refs/heads/master | 2022-11-20T08:42:24.255264 | 2020-07-22T07:41:52 | 2020-07-22T07:41:52 | 281,352,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # @Time : 2020/7/22 15:14
# @Author : Libuda
# @FileName: 字典按value排序.py
# @Software: PyCharm
d = {'a': 24, 'g': 52, 'i': 12, 'k': 33}
def sort(dict):
# 会将字典变为元祖的形式
print(dict.items())
# revers代表是否逆序
dict = sorted(dict.items(),key=lambda x:x[1],reverse=True)
return dict
if __name__ == '__main__':
s = sort(d)
print(s) | [
"1364826576@qq.com"
] | 1364826576@qq.com |
ee03770234e0e1877af6b6d638fd9b1c0e787c32 | e3a7622a4d2e16b1683c183568341b39c0de88b4 | /PycharmProjects/PythonCodes/07-爬虫/01-urllib库基础用法/10-贴吧.py | 360f5374537cd289b22d10d8cba9973508583f52 | [] | no_license | TriggerDark/StudyCodes | 937a8f6988cb475d275ff429cd32df823e457296 | 6f2f339d47dbae10d55d6b6da1d7e107f7dec85f | refs/heads/master | 2022-02-09T13:52:10.895963 | 2019-03-30T13:38:55 | 2019-03-30T13:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | from urllib import parse
from urllib.request import Request, urlopen
def loadPage(url, filename):
"""
作用:根据url发送请求,获取服务器响应文件
url: 需要爬取的url地址
filename: 处理的文件名
"""
ua_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36"
}
print("正在下载" + filename)
request = Request(url, headers=ua_headers)
return urlopen(request).read()
def writePage(html, filename):
"""
作用:将html内容写入文件
html: 服务器响应的文件内容
"""
print("正在保存" + filename)
with open(filename, "w") as f:
f.write(str(html))
print("-"*30)
def Spider(url, beginPage, endPage):
"""
作用:贴吧爬虫调度器,负责组合处理每个页面的url
url: 贴吧url的前部分
beginPage: 起始页面
endPage: 结束页面
"""
for page in range(beginPage, endPage + 1):
pn = (page - 1)*50
fullurl = url + "&pn=" + str(pn)
filename = "第" + str(page) + "页.html"
html = loadPage(fullurl, filename)
writePage(html, filename)
print("谢谢使用")
if __name__ == "__main__":
kw = input("请输入贴吧名:")
beginPage = int(input("请输入起始页:"))
endPage = int(input("请输入结束页:"))
url = "http://tieba.baidu.com/f?"
kw = parse.urlencode({"kw": kw})
fullurl = url + kw
Spider(fullurl, beginPage, endPage) | [
"2413044193@qq.com"
] | 2413044193@qq.com |
f48ad02a1442e3d0a72b9c11b4d7ebeb7243510d | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/transformers/examples/research_projects/information-gain-filtration/igf/igf.py | 99bd8c2d06d71ca1f60354c762eb8933eccdea89 | [
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 14,502 | py | # Copyright 2022 - Intel Corp. All rights reserved.
# Authors: Mayank Kumar Raunak, Javier Turek, Nicole Backage
import copy
import logging
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import joblib
from transformers import AdamW, GPT2LMHeadModel, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
def set_seed(seed):
"""
For reproducible training
Args:
seed: A seed for reproducible training
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def compute_perplexity(model, test_data, context_len):
"""
Computes perplexity of the transformer model on data in test_data
Args:
model: Pre-trained GPT2 model
test_data: Data on which perplexity calculation is required
context_len: The maximum total input sequence length after tokenization. Sequences longer
than this will be truncated, sequences shorter will be padded
Returns:
Perplexity on input test data
"""
model.eval()
device = next(model.parameters()).device
eval_batch_size = 1
context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device)
eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size)
eval_loss = torch.zeros(1, device=device)
nb_eval_examples = 0
for batch in eval_dataloader:
batch.to(device)
# pad
context.zero_()
for i in range(eval_batch_size):
context[i, :] = batch[i]
outputs = model(context, labels=context)
eval_loss += outputs[0].sum().item()
nb_eval_examples += batch.size(0)
eval_loss = eval_loss / nb_eval_examples
perplexity = torch.exp(eval_loss)
model.train()
return perplexity
def load_gpt2(model_name="gpt2"):
"""
load original gpt2 and save off for quicker loading
Args:
model_name: GPT-2
Returns:
GPT-2 model
"""
model = GPT2LMHeadModel.from_pretrained(model_name, output_hidden_states=True)
torch.save(model.state_dict(), model_name + "local.pt")
return model
def recopy_gpt2(orig_model, device, max_steps):
"""
Reset the model to the original pretrained GPT-2 weights after each iteration
Args:
orig_model: Original pretrained GPT-2 model imported from Transformers library
device: CPU/GPU
max_steps: number of training steps
Returns:
Original PreTrained GPT-2 model,
lm_optimizer: Adam optimizer with Decoupled weight decay
lm_scheduler: linear scheduler with the appropriate schedule
"""
model = copy.deepcopy(orig_model)
model.to(device)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
lm_optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
lm_scheduler = get_linear_schedule_with_warmup(lm_optimizer, 0, max_steps)
torch.cuda.empty_cache()
return model, lm_optimizer, lm_scheduler
def intermittent_save(contexts, real_perps, past_perps, filename):
"""
save the perplexity differences to filename
Args:
contexts: Example on which the perplexity is calculated
real_perps: Perplexity after back-propagating on the selected context
past_perps: Perplexity of model before training on the context
filename: File to store perplexity differences
Returns:
file with perplexity differences
"""
# save the perplexity differences to filename
avg = np.array(real_perps).mean()
std = np.array(real_perps).std()
perp_diff = (real_perps - avg) / std
data_final = list(zip(contexts, perp_diff, past_perps))
joblib.dump(data_final, filename)
def collect_objective_set(
model,
orig_perp,
context_len,
train_data,
objective_set,
max_steps,
device,
filename="dev.jbl",
recopy_model=recopy_gpt2,
):
"""
Collect individual IGF values from pre-trained transformer model
max_steps samples of training data to train secondary model
Args:
model: Pre-trained GPT2 model
orig_perp: Perplexity of original pretrained GPT-2 model
context_len: The maximum total input sequence length after tokenization. Sequences longer
than this will be truncated, sequences shorter will be padded
train_data: Data to train model
objective_set: Contexts used to create (X,IG(X)) pairs which is the training data for secondary learner
max_steps: To calculate training epochs of model
device: GPU/CPU
filename: To store intermediate perplexity differences
recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration
Returns:
file stored intermediate perplexity differences in intermediate stages
"""
# initialize variables to record relevant information
contexts = []
real_perps = []
past_perps = []
# Initialize the transformer model
orig_model = copy.deepcopy(model)
orig_model.to(device="cpu")
torch.cuda.empty_cache()
# Compute perplexity of initial transformer model for comparison
model.train()
model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps)
for step in tqdm(range(max_steps)):
context = torch.zeros((1, context_len), dtype=torch.long, device=device)
story = random.choice(train_data)
start = random.randint(0, len(story[0]) - context_len - 1)
context[0, :] = story[0][start : start + context_len]
lm_optimizer.zero_grad()
outputs = model(context, labels=context)
lm_loss = outputs[0]
past_perp = compute_perplexity(model, context, context_len)
model.train()
lm_loss.backward()
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
# Compute perplexity after back-propagating on the selected context
real_perp = compute_perplexity(model, objective_set, context_len)
# Periodically save the stored (X, IG(X)) pairs
if step % 1000 == 0 and step > 1:
intermittent_save(contexts, real_perps, past_perps, filename)
# Reset the pretrained model to the original pretrained GPT-2 weights after each iteration
model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps)
past_perps.append(past_perp.item())
real_perps.append(orig_perp - real_perp.item())
contexts.append(np.array(context.cpu()))
intermittent_save(contexts, real_perps, past_perps, filename)
def generate_datasets(
context_len, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True
):
"""
Generate objective set and training set
Args:
context_len: The maximum total input sequence length after tokenization. Sequences longer
than this will be truncated, sequences shorter will be padded
file: Tokenized data split into training set and objective set
number: size of objective dataset
min_len: minimum length of a context in objective set
trim: If True truncate the context if it exceeds context length
Returns:
Generated objective set and training data
"""
# Generate objective set and training set
# Designate the first number (100) articles that are long enough to be used
# as our objective set, rest (that are long enough) are training data for
# secondary learner
data = joblib.load(file)
print("data loaded")
objective_set = []
if trim:
for i, example in enumerate(data):
if len(example[0]) > min_len:
start = random.randint(0, len(example[0]) - context_len - 1)
objective_set.append(example[0, start : start + context_len])
if len(objective_set) >= number:
break
train_data = []
for j in range(i + 1, len(data)):
if len(data[j][0]) > min_len:
train_data.append(data[j])
else:
objective_set = data[0:number]
train_data = data[number:]
joblib.dump(objective_set, "objective_set.jbl")
print("objective set saved")
return train_data, objective_set
def train_secondary_learner(
secondary_learner, train_dataset, max_epochs, batch_size, eval_freq=50, igf_model_path="secondary_learner.pt"
):
"""
Train the secondary learner (igf_model)
Args:
secondary_learner: secondary learner
train_dataset: data to train secondary learner
max_epochs: number of epochs to train secondary learner
batch_size: batch size of training data of secondary learner
eval_freq: secondary model evaluation can be triggered at eval_freq
igf_model_path: path to store trained secondary learner
Returns:
Trained secondary learner
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# We will use the first 512 pairs from our dataset as a test set for
# our secondary learner and the rest to train
test_dataset = train_dataset[:512]
train_dataset = train_dataset[512:]
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
# secondary learner model set up
loss = nn.MSELoss()
test_loss = nn.MSELoss(reduction="sum")
secondary_learner.to(device)
q_optimizer = torch.optim.Adam(secondary_learner.parameters(), lr=0.00001)
secondary_learner.train()
# TODO in original code this is written as number of actual batches seen
# not number of items seen but other places it is number of items instead.
# improve consistency! changed this to epochs for clarity
best_test_loss = float("inf")
# Iterate through batches until we've used max_steps batches
for epoch in range(int(max_epochs)):
tr_q_loss = 0.0
secondary_learner.train()
for step, batch in enumerate(train_dataloader):
context = batch[0].to(device)
real_q = batch[1].to(device)
predicted_q = secondary_learner(context)
q_optimizer.zero_grad()
q_loss = loss(predicted_q, real_q.float())
q_loss.backward()
q_optimizer.step()
tr_q_loss += q_loss.item()
# model trains fairly quickly so we won't wait for a full epoch
# eval is triggered at eval_freq and end of epochs
if (step % eval_freq == 0 and step > 0) or ((step + 1) == len(train_dataloader)):
tr_loss = tr_q_loss / (step + 1)
secondary_learner.eval()
q_loss2 = 0.0
sum_q2 = 0.0
predicted = []
actual = []
# Compute performance of the secondary learner after this batch
for step2, batch2 in enumerate(test_dataloader):
features2 = batch2[0].to(device)
real_q2 = batch2[1].to(device)
predicted_q2 = secondary_learner(features2)
q_loss2 += test_loss(predicted_q2, real_q2).item()
sum_q2 += torch.sum(predicted_q2).item()
for ei, i in enumerate(predicted_q2.cpu().detach().numpy()):
predicted.append(i.item())
for ei, i in enumerate(real_q2.cpu().detach().numpy()):
actual.append(i.item())
q_loss2 /= len(test_dataset)
print(
"Epoch: ",
epoch,
"step: ",
step,
"Avg. q:",
sum_q2 / len(test_dataset),
"Train Loss: ",
tr_loss,
"Test Loss: ",
q_loss2,
)
if q_loss2 < best_test_loss:
joblib.dump((predicted, actual), "pred_vs_actual.jbl")
torch.save(secondary_learner.state_dict(), igf_model_path)
best_test_loss = q_loss2
secondary_learner.train()
return secondary_learner
class SecondaryLearner(nn.Module):
"""
Our secondary learner
"""
def __init__(self, model):
"""
We use a simple convolutional network as our secondary learner
Args:
model: Pre-trained GPT2 model
"""
# embeddings are from the pretrained model
super(SecondaryLearner, self).__init__()
self.embeddings = model.transformer.wte
self.embeddings.weight = copy.deepcopy(model.transformer.wte.weight)
self.conv = nn.Conv1d(self.embeddings.weight.size(1), 256, 3, padding=1)
self.fc = nn.Sequential(nn.Linear(256, 32), nn.Dropout(p=0.1), nn.Linear(32, 32), nn.Linear(32, 1))
def forward(self, context):
"""
Forward pass through the secondary learner
Args:
context: Context input to the secondary learner
Returns:
tensor after squeeze operation
"""
pooled = torch.max(self.conv(self.embeddings(context).squeeze(1).transpose(1, 2)), 2)[0]
qs = self.fc(pooled)
return qs.squeeze(1)
@classmethod
def from_pretrained(cls, state_path, model):
"""
Load the secondary learner
Args:
state_path: Path to save secondary learner
model: Pretrained GPT-2
Returns:
secondary learner
"""
secondary_learner = cls(model) # this calls __init__
state_dict = torch.load(state_path)
secondary_learner.load_state_dict(state_dict)
secondary_learner.embeddings = model.transformer.wte
secondary_learner.embeddings.weight = copy.deepcopy(model.transformer.wte.weight)
return secondary_learner
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
5b65f2b9d93b31d704c150d79dd67ce87b6c23f7 | 47366be5cbee9d7e086291c20f97f10ab2bf74fe | /code/journal_gemello_all_homes_leave_one_out_cluster.py | ebdf022a4357b68316c0ca6f4ab5b8ca31c44d8f | [] | no_license | nipunbatra/journal | 3d44eed05c95970606649d17402da54fc0a415ff | 94a8b88589e8f60e6f0314f8c5a374f22336b3e9 | refs/heads/master | 2021-01-09T20:40:45.844121 | 2016-07-27T15:16:29 | 2016-07-27T15:16:29 | 62,874,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,837 | py | """
This code generates the prediction for a region when we use homes containing all data
"""
# NEED TO RUN ON CLUSTER
import sys
CLUSTER = True
if CLUSTER:
sys.path.insert(0, '/if6/nb2cz/anaconda/lib/python2.7/site-packages')
import numpy as np
import pandas as pd
import pickle
from collections import OrderedDict
out_overall = pickle.load(open('../data/input/all_regions.pkl', 'r'))
region = "Austin"
df = out_overall[region]
df = df.rename(columns={'house_num_rooms': 'num_rooms',
'num_occupants': 'total_occupants',
'difference_ratio_min_max': 'ratio_difference_min_max'})
df = df[(df.full_agg_available == 1) & (df.md_available == 1)]
def scale_0_1(ser, minimum=None, maximum=None):
if minimum is not None:
pass
else:
minimum = ser.min()
maximum = ser.max()
return (ser - minimum).div(maximum - minimum)
def normalise(df):
new_df = df.copy()
max_aggregate = df[["aggregate_%d" % i for i in range(1, 13)]].max().max()
min_aggregate = df[["aggregate_%d" % i for i in range(1, 13)]].min().min()
new_df[["aggregate_%d" % i for i in range(1, 13)]] = scale_0_1(df[["aggregate_%d" % i for i in range(1, 13)]],
min_aggregate, max_aggregate)
for col in ['area', 'total_occupants', 'num_rooms', 'ratio_min_max',
'skew', 'kurtosis', 'variance', 'ratio_difference_min_max', 'p_25',
'p_50', 'p_75']:
new_df[col] = scale_0_1(df[col])
return new_df
df = normalise(df)
from all_functions import *
from features import *
import sys
from sklearn.neighbors import KNeighborsRegressor
from sklearn.cross_validation import ShuffleSplit
NUM_NEIGHBOUR_MAX = 6
F_MAX = 6
K_min, K_max = 1,6
F_min, F_max=1,8
import json
from sklearn.cross_validation import LeaveOneOut
from sklearn.cross_validation import KFold
from sklearn.ensemble import ExtraTreesRegressor
def _save_csv(out_df, path, appliance, num_homes, start_seed, end_seed, feature):
out_df.T.to_csv("%s/%s_%d_%d_%d_%s.csv" %(path, appliance, num_homes, start_seed, end_seed, feature),
index_label="Random seed")
def _find_accuracy(home, appliance, feature="Monthly"):
np.random.seed(42)
appliance_df = df.ix[all_homes]
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
test_homes = [home]
train_homes = appliance_df[~appliance_df.index.isin([home])].index
#all_home_appliance = deepcopy(all_homes)
#all_home_appliance[appliance] = train_homes
# Cross validation on inner loop to find best feature, K
train_size = len(train_homes)
l = LeaveOneOut(train_size)
out = OrderedDict()
for cv_train, cv_test in l:
#print cv_test
cv_train_home=appliance_df.ix[train_homes[cv_train]]
cv_test_home = appliance_df.ix[train_homes[cv_test]]
test_home_name = cv_test_home.index.values[0]
#print cv_test_home
out[test_home_name]={}
# Summing up energy across start to stop to get Y to learn optimum feature on
Y = cv_train_home[['%s_%d' %(appliance, i) for i in range(start, stop)]].sum(axis=1).values
forest = ExtraTreesRegressor(n_estimators=250,
random_state=0)
forest.fit(cv_train_home[feature_map[feature]], Y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
# Now varying K and top-N features
for K in range(K_min, K_max):
out[test_home_name][K]={}
for top_n in range(F_min,F_max):
out[test_home_name][K][top_n]=[]
top_n_features = cv_train_home[feature_map[feature]].columns[indices][:top_n]
# Now fitting KNN on this
for month in range(start, stop):
clf = KNeighborsRegressor(n_neighbors=K)
clf.fit(cv_train_home[top_n_features], cv_train_home['%s_%d' %(appliance, month)])
out[test_home_name][K][top_n].append(clf.predict(cv_test_home[top_n_features]))
# Now, finding the (K, top_n) combination that gave us best accuracy on CV test homes
accur = {}
for K in range(K_min, K_max):
accur[K] = {}
for top_n in range(F_min, F_max):
temp = {}
for h in out.iterkeys():
pred = pd.DataFrame(out[h][K][top_n]).T
#all_but_h = [x for x in out.keys() if x!=h]
pred.index = [h]
pred.columns = [['%s_%d' %(appliance, i) for i in range(start, stop)]]
gt = appliance_df.ix[h][['%s_%d' %(appliance, i) for i in range(start, stop)]]
error = (pred-gt).abs().div(gt).mul(100)
#print pred, gt, error
mean_error = error.mean().mean()
#print mean_error
temp[h]=mean_error
ac = pd.Series(temp).mean()
accur[K][top_n] = ac
accur_df = pd.DataFrame(accur)
#print accur_df
accur_min = accur_df.min().min()
max_ac_df = accur_df[accur_df==accur_min]
F_best = cv_train_home[feature_map[feature]].columns[indices][:max_ac_df.mean(axis=1).dropna().index.values[0]].tolist()
K_best = max_ac_df.mean().dropna().index.values[0]
# Now predicting for test home
train_overall = appliance_df.ix[appliance_df[~appliance_df.index.isin([home])].index]
test_overall = appliance_df[appliance_df.index.isin([home])]
pred_test = {}
gt_test = {}
for month in range(start, stop):
clf = KNeighborsRegressor(n_neighbors=K_best)
clf.fit(train_overall[F_best], train_overall['%s_%d' %(appliance, month)])
pred_test[month] = clf.predict(test_overall[F_best])
gt_test[month] = test_overall['%s_%d' %(appliance, month)]
#json.dump({'f':F_best, 'k':K_best,'accuracy':accur_max},open("../main-out-new/%s_%s_%d.json" %(appliance,feature, home),"w") )
pred_df = pd.DataFrame(pred_test)
pred_df.index = [home]
gt_df = pd.DataFrame(gt_test)
error = (gt_df-pred_df).abs().div(gt_df).mul(100)
return pred_df, gt_df, error, F_best, K_best
import os
out_path = os.path.expanduser("~/output/journal/gemello/all_homes/")
import sys
appliances = ["hvac","fridge","wm","dw","mw","oven"]
features = ["Static", "Monthly+Static", "Monthly"]
SLURM_OUT = "../slurm_out"
from subprocess import Popen
import time
for feature in features:
for appliance in appliances:
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
appliance_df= df.ix[df[['%s_%d' %(appliance,month) for month in range(start,stop)]].dropna().index]
for home in appliance_df.index:
home = int(home)
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
appliance_df = df.ix[df[['%s_%d' %(appliance,month) for month in range(start,stop)]].dropna().index]
all_homes = appliance_df.index
pred_df, gt_df, error, F_best, K_best = _find_accuracy(home, appliance, feature)
print appliance, home, feature, error.squeeze().mean()
if not os.path.exists(out_path):
print "here"
os.makedirs(out_path)
import pickle
filename = os.path.join(out_path, "%s_%s_%d.pkl" %(appliance,feature,home))
o = {'pred_df':pred_df,'gt_df':gt_df,'error':error,
'F_best':F_best,'K_best':K_best}
pickle.dump(o, open(filename,'w'))
#_save_csv(out_overall, os.path.expanduser("~/output/unified/kdd_all_features/"), appliance, num_homes, start_seed, end_seed, feature)
| [
"nipunb@iiitd.ac.in"
] | nipunb@iiitd.ac.in |
2c70798e16ac553da37c52d63676ab39931ffc65 | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/prometheus/python/pulumi_pulumi_kubernetes_crds_operators_prometheus/monitoring/v1/__init__.py | c08014ce868b9d012e89a5e5a10d7f8c359ea029 | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .Alertmanager import *
from .PodMonitor import *
from .Prometheus import *
from .PrometheusRule import *
from .ServiceMonitor import *
from .ThanosRuler import *
from ._inputs import *
from . import outputs
| [
"albertzhong0@gmail.com"
] | albertzhong0@gmail.com |
520ff5a148a237cbc9d139f883a89e8e8f2cc1e3 | d53d639db5a7a71a904d811dc271e19f294baa9d | /Travel/travelsapp/migrations/0023_auto_20200621_1809.py | aa56e73621cff30fb92bfc4e61f6ae612c8b6535 | [] | no_license | sdrsnadkry/django | e8a0f6d91094ae27ef3d5aef0c0f667b0b917ce7 | 83126dec62da6a715fab8654852f46fdeedc30f2 | refs/heads/master | 2022-11-27T04:38:25.891650 | 2020-08-07T07:46:42 | 2020-08-07T07:46:42 | 285,762,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.0.7 on 2020-06-21 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travelsapp', '0022_bookings'),
]
operations = [
migrations.AlterField(
model_name='bookings',
name='date',
field=models.DateField(),
),
]
| [
"sdadkry95@gmail.com"
] | sdadkry95@gmail.com |
5977f8f99167e8f39b9114736637b2fe01fddf7f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_bailed.py | 781bd6fa9e0ab38271aa62f4d74a985f9d2158ed | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _BAILED():
def __init__(self,):
self.name = "BAILED"
self.definitions = bail
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bail']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
00865f4edca156b1bb9f7af208f6e517657f0266 | b2fb13181e5fe114c5b128f30b3946024347321d | /Day12/Day12.py | 7198292796be9bd1b8f45bd64509cd79c1d6188d | [] | no_license | darsovit/AdventOfCode2017 | 43aefaefcad866fde38ad960106d20ab1d8d28bf | 8db815ea2ea618e25dd5946988d88c34563f0ace | refs/heads/master | 2021-09-01T08:21:41.369752 | 2017-12-26T01:12:46 | 2017-12-26T01:17:42 | 113,674,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | #!python
def parse_line( line, state ):
first=line.split(' <-> ')
state['connections'][int(first[0])]=list(map(lambda x: int(x), first[1].split(', ')))
print(state['connections'][int(first[0])])
def build_group( state, base ):
state['indirect'][base]=set( [ base ] )
startlen=0
endlen=1
new_entries=state['indirect'][base]
while startlen < endlen:
origset=state['indirect'][base]
newset=origset
startlen=len(newset)
for x in new_entries:
newset = newset | set( state['connections'][x] )
state['indirect'][base] = newset
endlen=len(newset)
new_entries = newset - origset
for x in state['indirect'][base]:
state['ingroup'][x] = base
state={}
state['connections']={}
state['indirect']={}
state['ingroup']={}
with open('input.txt') as f:
for line in f:
parse_line( line.strip(), state )
#state['indirect']={}
#state['indirect'][0]=set( state['connections'][0] )
#print( state['indirect'][0] )
#endlen=len(state['indirect'][0])
#startlen=0
#while startlen < endlen:
# newset=state['indirect'][0]
# startlen=len(newset)
# for x in state['indirect'][0]:
# newset = newset | set( state['connections'][x])
# state['indirect'][0] = newset
# endlen=len(newset)
for base in sorted(state['connections'].keys()):
if base not in state['ingroup']:
build_group( state, base )
print( len(state['indirect'][0]) )
print( len(state['indirect']) ) | [
"darsovit@gmail.com"
] | darsovit@gmail.com |
fb0c35c791681a9e193d1bfbbc7378ee78426409 | 182c651a9b00b9b4d80e6d51ae574cb793958cd6 | /quick/tutorials/extending/chapter3-bindings/chapter3-bindings.py | aa415cc1f6041c84a4719a593559bf809dbe9dc5 | [] | no_license | eudu/pyqt-examples | c61a7108e1fbfcf2cd918a0f99e9a5a90a3f305c | 8e533b7b3c5e9bbe0617ef1ecb9b169dd216c181 | refs/heads/master | 2020-03-16T01:23:19.573347 | 2018-05-06T20:20:57 | 2018-05-06T20:20:57 | 132,438,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | #!/usr/bin/python3
#############################################################################
##
## Copyright (C) 2018 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Digia Plc and its Subsidiary(-ies) nor the names
## of its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QRectF, Qt, QUrl
from PyQt5.QtGui import QColor, QGuiApplication, QPainter, QPen
from PyQt5.QtQml import qmlRegisterType
from PyQt5.QtQuick import QQuickPaintedItem, QQuickView
class PieChart(QQuickPaintedItem):
@pyqtProperty(str)
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
colorChanged = pyqtSignal()
@pyqtProperty(QColor, notify=colorChanged)
def color(self):
return self._color
@color.setter
def color(self, color):
if self._color != color:
self._color = QColor(color)
self.update()
self.colorChanged.emit()
def __init__(self, parent=None):
super(PieChart, self).__init__(parent)
self._name = ''
self._color = QColor()
def paint(self, painter):
painter.setPen(QPen(self._color, 2))
painter.setRenderHints(QPainter.Antialiasing, True)
rect = QRectF(0, 0, self.width(), self.height()).adjusted(1, 1, -1, -1)
painter.drawPie(rect, 90 * 16, 290 * 16)
@pyqtSlot()
def clearChart(self):
self.color = QColor(Qt.transparent)
self.update()
if __name__ == '__main__':
import os
import sys
app = QGuiApplication(sys.argv)
qmlRegisterType(PieChart, "Charts", 1, 0, "PieChart")
view = QQuickView()
view.setResizeMode(QQuickView.SizeRootObjectToView)
view.setSource(
QUrl.fromLocalFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'app.qml')))
view.show()
sys.exit(app.exec_())
| [
"dukalow@gmail.com"
] | dukalow@gmail.com |
d16aa6923b579e95b3a303185a0f8ef9a768ae8a | 261eba086816dbb3db4836c9b1e5869ccf0f8bae | /牛顿迭代法求解非线性方程/main.py | aa97a1a66a5a9ba51a601b6eb2e5e2b6e71c7523 | [] | no_license | budaLi/jianzi | e316bdfb25587d14d38f1bea98772bce5ac69198 | bca098de0f06ae1c78afc3203dfb0eea6a412dee | refs/heads/master | 2023-05-02T19:33:25.752799 | 2021-05-25T08:03:24 | 2021-05-25T08:03:24 | 271,513,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # coding=utf-8
# Copyright (c) 2020 ichinae.com, Inc. All Rights Reserved
"""
Module Summary Here.
Authors: lijinjun1351@ichinae.com
"""
from numpy import *
import numpy as np
def Fun(x,num): #方程组在这里,两个变量分别是x的两个分量,num是未知数个数,这里是2,f是2个方程组
i = num
f = np.zeros((i),dtype=float)
f[0] = x[0]+2*x[1]-3
f[1] = 2*x[0]**2 + x[1]**2-5
return f
def dfun(x,num): #计算雅可比矩阵的逆矩阵
df = np.zeros((num,num),dtype=float)
dx = 0.00001 #
x1 = np.copy(x)
for i in range(0,num): # 求导数,i是列,j是行
for j in range(0,num):
x1 = np.copy(x)
x1[j] = x1[j]+dx #x+dx
df[i,j] = (Fun(x1,num)[i]-Fun(x,num)[i])/dx #f(x+dx)-f(x)/dx
df_1 = np.linalg.inv(df) #计算逆矩阵
return df_1
def Newton(x,num):
x1 = np.copy(x)
i = 0
delta = np.copy(x)
while(np.sum(abs(delta)) > 1.e-3): #控制循环次数 10-3
x1 = x-dot(dfun(x,num),Fun(x,num)) #公式
delta = x1-x #比较x的变化
x = x1
i = i+1
return x
def main():
# 方程未知数的个数
num =2
x = np.ones((num),dtype=float)
#初始值
x[0]=1.5
x[1]=1.0
a = Newton(x,num)
print(a)
if __name__ == '__main__':
main() | [
"1364826576@qq.com"
] | 1364826576@qq.com |
2fdbf5995d22d446b67b4557c6fa3d84c77f7861 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-AVFoundation/PyObjCTest/test_avmusicevents.py | c80ab744d2f180dcafae94f772ee789b8b375805 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 4,577 | py | import AVFoundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestAVMusicEvents(TestCase):
def test_constants(self):
self.assertIsEnumType(AVFoundation.AVMIDIControlChangeMessageType)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBankSelect, 0)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeModWheel, 1)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBreath, 2)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeFoot, 4)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePortamentoTime, 5)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeDataEntry, 6)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVolume, 7)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBalance, 8)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePan, 10)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeExpression, 11)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSustain, 64)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePortamento, 65)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSostenuto, 66)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSoft, 67)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeLegatoPedal, 68)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeHold2Pedal, 69)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeFilterResonance, 71)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeReleaseTime, 72)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAttackTime, 73)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBrightness, 74)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeDecayTime, 75)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoRate, 76)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoDepth, 77)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoDelay, 78)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeReverbLevel, 91)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeChorusLevel, 93)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeRPN_LSB, 100)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeRPN_MSB, 101)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAllSoundOff, 120)
self.assertEqual(
AVFoundation.AVMIDIControlChangeMessageTypeResetAllControllers, 121
)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAllNotesOff, 123)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeOmniModeOff, 124)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeOmniModeOn, 125)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeMonoModeOn, 126)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeMonoModeOff, 127)
self.assertIsEnumType(AVFoundation.AVMIDIMetaEventType)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeSequenceNumber, 0x00)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeText, 0x01)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeCopyright, 0x02)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTrackName, 0x03)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeInstrument, 0x04)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeLyric, 0x05)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMarker, 0x06)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeCuePoint, 0x07)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMidiChannel, 0x20)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMidiPort, 0x21)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeEndOfTrack, 0x2F)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTempo, 0x51)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeSmpteOffset, 0x54)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTimeSignature, 0x58)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeKeySignature, 0x59)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeProprietaryEvent, 0x7F)
@min_os_level("13.0")
def test_constants13_0(self):
self.assertIsInstance(AVFoundation.AVExtendedNoteOnEventDefaultInstrument, int)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
b5ed87c30787e2e63e8deee43faf70ddc16b3e07 | 944401a6292baa2d23b9738898e0b0cb199d0795 | /color_quantization/median-cut/img_quality_assessment(IQA)/ssim/lab_cs/lab_ssim.py | 7d495b83f9e669fec5604bdd7a7b921d4422055b | [] | no_license | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 1,355 | py | """
image quality assessment (IQA) of the quantized images and the original image in L*a*b* color space
----- method: SSIM
----- version 1.0 (skimage library)
----- http://scikit-image.org/docs/dev/auto_examples/transform/plot_ssim.html
"""
import numpy as np
import csv
from PIL import Image
import skimage
from skimage import color
from skimage.measure import compare_ssim
from quantization import median_cut
def main() :
# ---- open the reference image
original_img = Image.open('../../../../img/sky.jpg')
testimg_list = []
for n_colors in range(1, 21):
lab_array = median_cut(original_img, n_colors)
testimg_list.append(lab_array)
# ---- get lab original array
ori_arr = np.array(original_img)
ori_arr_lab = skimage.color.rgb2lab(ori_arr)
# ---- rescale original raster
rescale_ori = (ori_arr_lab + [0, 128, 128]) / [100, 255, 255]
# ---- compare MSSIM
score_list = []
for i in testimg_list:
score = compare_ssim(rescale_ori[:,:,0], i[:,:,0], multichannel=True)
score_list.append(score)
# ---- save ssim score to csv file
csvfile = "ssim_lab_in_L.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in score_list:
writer.writerow([val])
if __name__ == "__main__":
main()
| [
"wnn2260@gmail.com"
] | wnn2260@gmail.com |
a285c1fb0cced66886bb31bfdeadbcf093397cb3 | 64a80df5e23b195eaba7b15ce207743e2018b16c | /Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_ble_adafruit/tone_service.py | 9b2c3972b4b73af73af5cfe20532239dce11384f | [] | no_license | aferlazzo/messageBoard | 8fb69aad3cd7816d4ed80da92eac8aa2e25572f5 | f9dd4dcc8663c9c658ec76b2060780e0da87533d | refs/heads/main | 2023-01-27T20:02:52.628508 | 2020-12-07T00:37:17 | 2020-12-07T00:37:17 | 318,548,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | # The MIT License (MIT)
#
# Copyright (c) 2020 Dan Halbert for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ble_adafruit.tone_service`
================================================================================
BLE access to play tones.
* Author(s): Dan Halbert
"""
__version__ = "1.2.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Adafruit.git"
import struct
from _bleio import PacketBuffer
from adafruit_ble.attributes import Attribute
from adafruit_ble.characteristics import Characteristic, ComplexCharacteristic
from adafruit_ble_adafruit.adafruit_service import AdafruitService
class _TonePacket(ComplexCharacteristic):
uuid = AdafruitService.adafruit_service_uuid(0xC01)
format = "<HI"
format_size = struct.calcsize(format)
def __init__(self):
super().__init__(
properties=Characteristic.WRITE,
read_perm=Attribute.NO_ACCESS,
max_length=self.format_size,
fixed_length=True,
)
def bind(self, service):
"""Binds the characteristic to the given Service."""
bound_characteristic = super().bind(service)
return PacketBuffer(bound_characteristic, buffer_size=1)
class ToneService(AdafruitService):
"""Play tones."""
uuid = AdafruitService.adafruit_service_uuid(0xC00)
_tone_packet = _TonePacket()
"""
Tuple of (frequency: 16 bits, in Hz, duration: 32 bits, in msecs).
If frequency == 0, a tone being played is turned off.
if duration == 0, play indefinitely.
"""
def __init__(self, service=None):
super().__init__(service=service)
self._tone_packet_buf = bytearray(_TonePacket.format_size)
@property
def tone(self):
"""Return (frequency, duration), or None if no value available"""
buf = self._tone_packet_buf
if self._tone_packet.readinto(buf) == 0:
# No new values available.
return None
return struct.unpack(_TonePacket.format, buf)
def play(self, frequency, duration):
"""
Frequency is in Hz. If frequency == 0, a tone being played is turned off.
Duration is in seconds. If duration == 0, play indefinitely.
"""
self._tone_packet = struct.pack(
_TonePacket.format,
frequency,
0 if duration == 0 else int(duration * 1000 + 0.5),
)
| [
"aferlazzo@gmail.com"
] | aferlazzo@gmail.com |
0cbb3fd6548df0a2c7dbfccdbd9d8f5aa52fcbf4 | 74eee5bdaae10b2cfbd936e3c10cc9c91b9220e0 | /Chapter 10 - Binary Trees/10.5_sum_of_binary_paths.py | b2597339593a8a6654df8b23d61e02bd1bb35045 | [] | no_license | kishan/Elements-of-Porgramming-Interviews-Python-Solutions | abc02af36102e059f7213610ce948a000879e9ec | 32fe89b4927da8e026ff27a6b9894f639a8a2de9 | refs/heads/master | 2020-12-25T06:52:42.729405 | 2016-08-26T16:08:40 | 2016-08-26T16:08:40 | 62,021,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """
Consider a binary tree in which each node contains a binary digit
Design an alogirthm to compute the sum of the binary numbers represented by the root-to-leaf paths
"""
def sum_to_root(node, partial_sum=0):
if node is None:
return 0
partial_sum = partial_sum*2 + node.data
if (node.left is None) and (node.right is None):
return partial_sum
else:
return sum_to_root(node.left, partial_sum) + sum_to_root(node.right, partial_sum)
| [
"kspatel2018@gmail.com"
] | kspatel2018@gmail.com |
af588fafe20c072ff22dc603dd5b083235819834 | 9f835d53232e954805b7ed1d93889e409209b36b | /2920.py | 3fe5d6d26574cdf3a6e64e3a56708f6a5b2b8766 | [] | no_license | dmswl0311/Baekjoon | 7c8a862fceff086b3d7740eef23b80164e1d5aeb | 22040aff6b64d5081e86d91b0d118d1a718a4316 | refs/heads/master | 2023-04-29T13:48:51.448245 | 2021-05-26T14:35:32 | 2021-05-26T14:35:32 | 323,482,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | num_list = list(map(int, input().split()))
ori_list = [1, 2, 3, 4, 5, 6, 7, 8]
if num_list == ori_list:
print("ascending")
elif num_list == sorted(ori_list, reverse=True):
print("descending")
else:
print("mixed")
| [
"dmswl_0311@naver.com"
] | dmswl_0311@naver.com |
a1a3d80bb3ff411b6a757c8662ed29b06d159dea | 4236d1c3b153847f888402af5dd218fe4004fddc | /events/models.py | 1fc1f12110011a0687ae2dbd3c8b8e0663e5ceef | [] | no_license | zurcx/zurczevents | 6feaae655dcc7bac08a9366e72e65da0ecace69d | 5134841e0afca3bc1f88e1d27980a58f91f5d984 | refs/heads/master | 2020-05-31T04:40:55.735346 | 2013-08-02T18:07:22 | 2013-08-02T18:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | #encoding: utf-8
from django.contrib.auth.models import User
from django.db import models
from django.core.urlresolvers import reverse
class Event(models.Model):
TYPE_CHOICES = (
(1, u'Workshop'),
(2, u'Dojo'),
(3, u'Palestra'),
)
name = models.CharField(verbose_name=u'Nome', max_length=100)
user = models.ForeignKey(User, verbose_name=u'Usuário',
null=True, blank=True)
type = models.IntegerField(choices=TYPE_CHOICES,
verbose_name=u'Tipo do Evento')
description = models.TextField(verbose_name=u'Descrição',
blank=True)
created_on = models.DateTimeField(verbose_name=u'Criado em',
auto_now_add=True)
link = models.URLField(verbose_name=u'Link', blank=True)
public = models.BooleanField(verbose_name=u'Público?',
default=True)
event_date = models.DateField(verbose_name=u'Data do Evento',
null=True, blank=True)
def comments_count(self):
return self.comments.count()
comments_count.short_description = u'Número de Comentários'
@models.permalink
def get_absolute_url(self):
return ('events_details', (), {'pk': self.pk})
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'Evento'
verbose_name_plural = u'Eventos'
ordering = ['name']
class Comment(models.Model):
name = models.CharField(verbose_name=u'Nome',
max_length=100)
email = models.EmailField(verbose_name=u'E-mail',)
event = models.ForeignKey(Event, verbose_name=u'Evento',
related_name='comments')
text = models.TextField(verbose_name=u'Texto')
website = models.URLField(verbose_name=u'Perfil Facebook',
blank=True)
created_on = models.DateTimeField(verbose_name=u'Criado em',
auto_now_add=True)
def __unicode__(self):
return self.text
class Meta:
verbose_name = u'Comentário'
verbose_name_plural = u'Comentários'
ordering = ['created_on'] | [
"luizfabiodacruz@gmail.com"
] | luizfabiodacruz@gmail.com |
7e326d364c90b3db321f8fa3fbc2d170629e46e8 | c779cb47c0f0966b958fe06b15a312fba4219392 | /blender_3dmigoto.py | 0df150053d0556bb21887cef4b91000f254844da | [] | no_license | funeko/3d-fixes | 75896497366d4bc63aa710a9ccd04eb191cb9695 | ee85033ecee8c156b7fe12d2eef7edc98f967210 | refs/heads/master | 2020-05-09T18:17:14.158749 | 2019-04-14T14:49:57 | 2019-04-14T14:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,966 | py | #!/usr/bin/env python3
bl_info = {
"name": "3DMigoto",
"author": "Ian Munsie (darkstarsword@gmail.com)",
"location": "File > Import-Export",
"description": "Imports meshes dumped with 3DMigoto's frame analysis and exports meshes suitable for re-injection",
"category": "Import-Export",
"tracker_url": "https://github.com/DarkStarSword/3d-fixes/issues",
}
# TODO:
# - Option to reduce vertices on import to simplify mesh (can be noticeably lossy)
# - Option to untesselate triangles on import?
# - Operator to generate vertex group map
# - Operator to set current pose from a constant buffer dump
# - Generate bones, using vertex groups to approximate position
# - And maybe orientation & magnitude, but I'll have to figure out some funky
# maths to have it follow the mesh like a cylinder
# - Add support for games using multiple VBs per mesh, e.g. Witcher 3
# - Test in a wider variety of games
# - Handle TANGENT better on both import & export?
import io
import re
from array import array
import struct
import numpy
import itertools
import collections
import os
from glob import glob
import json
import copy
import textwrap
import bpy
from bpy_extras.io_utils import unpack_list, ImportHelper, ExportHelper, orientation_helper_factory, axis_conversion
from bpy.props import BoolProperty, StringProperty, CollectionProperty
from bpy_extras.image_utils import load_image
from mathutils import Matrix, Vector
def keys_to_ints(d):
return {k.isdecimal() and int(k) or k:v for k,v in d.items()}
def keys_to_strings(d):
return {str(k):v for k,v in d.items()}
class Fatal(Exception): pass
# TODO: Support more DXGI formats:
f32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_FLOAT''')
f16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_FLOAT''')
u32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_UINT''')
u16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_UINT''')
u8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_UINT''')
s32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_SINT''')
s16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_SINT''')
s8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_SINT''')
unorm16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_UNORM''')
unorm8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_UNORM''')
snorm16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_SNORM''')
snorm8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_SNORM''')
misc_float_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD][0-9]+)+_(?:FLOAT|UNORM|SNORM)''')
misc_int_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD][0-9]+)+_[SU]INT''')
def EncoderDecoder(fmt):
if f32_pattern.match(fmt):
return (lambda data: b''.join(struct.pack('<f', x) for x in data),
lambda data: numpy.frombuffer(data, numpy.float32).tolist())
if f16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.float16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.float16).tolist())
if u32_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint32).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint32).tolist())
if u16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint16).tolist())
if u8_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint8).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint8).tolist())
if s32_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int32).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int32).tolist())
if s16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int16).tolist())
if s8_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int8).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int8).tolist())
if unorm16_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 65535.0)).astype(numpy.uint16).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.uint16) / 65535.0).tolist())
if unorm8_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 255.0)).astype(numpy.uint8).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.uint8) / 255.0).tolist())
if snorm16_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 32767.0)).astype(numpy.int16).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.int16) / 32767.0).tolist())
if snorm8_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 127.0)).astype(numpy.int8).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.int8) / 127.0).tolist())
raise Fatal('File uses an unsupported DXGI Format: %s' % fmt)
components_pattern = re.compile(r'''(?<![0-9])[0-9]+(?![0-9])''')
def format_components(fmt):
return len(components_pattern.findall(fmt))
def format_size(fmt):
matches = components_pattern.findall(fmt)
return sum(map(int, matches)) // 8
class InputLayoutElement(object):
def __init__(self, arg):
if isinstance(arg, io.IOBase):
self.from_file(arg)
else:
self.from_dict(arg)
self.encoder, self.decoder = EncoderDecoder(self.Format)
def from_file(self, f):
self.SemanticName = self.next_validate(f, 'SemanticName')
self.SemanticIndex = int(self.next_validate(f, 'SemanticIndex'))
self.Format = self.next_validate(f, 'Format')
self.InputSlot = int(self.next_validate(f, 'InputSlot'))
self.AlignedByteOffset = self.next_validate(f, 'AlignedByteOffset')
if self.AlignedByteOffset == 'append':
raise Fatal('Input layouts using "AlignedByteOffset=append" are not yet supported')
self.AlignedByteOffset = int(self.AlignedByteOffset)
self.InputSlotClass = self.next_validate(f, 'InputSlotClass')
self.InstanceDataStepRate = int(self.next_validate(f, 'InstanceDataStepRate'))
def to_dict(self):
d = {}
d['SemanticName'] = self.SemanticName
d['SemanticIndex'] = self.SemanticIndex
d['Format'] = self.Format
d['InputSlot'] = self.InputSlot
d['AlignedByteOffset'] = self.AlignedByteOffset
d['InputSlotClass'] = self.InputSlotClass
d['InstanceDataStepRate'] = self.InstanceDataStepRate
return d
def to_string(self, indent=2):
return textwrap.indent(textwrap.dedent('''
SemanticName: %s
SemanticIndex: %i
Format: %s
InputSlot: %i
AlignedByteOffset: %i
InputSlotClass: %s
InstanceDataStepRate: %i
''').lstrip() % (
self.SemanticName,
self.SemanticIndex,
self.Format,
self.InputSlot,
self.AlignedByteOffset,
self.InputSlotClass,
self.InstanceDataStepRate,
), ' '*indent)
def from_dict(self, d):
self.SemanticName = d['SemanticName']
self.SemanticIndex = d['SemanticIndex']
self.Format = d['Format']
self.InputSlot = d['InputSlot']
self.AlignedByteOffset = d['AlignedByteOffset']
self.InputSlotClass = d['InputSlotClass']
self.InstanceDataStepRate = d['InstanceDataStepRate']
@staticmethod
def next_validate(f, field):
line = next(f).strip()
assert(line.startswith(field + ': '))
return line[len(field) + 2:]
@property
def name(self):
if self.SemanticIndex:
return '%s%i' % (self.SemanticName, self.SemanticIndex)
return self.SemanticName
def pad(self, data, val):
padding = format_components(self.Format) - len(data)
assert(padding >= 0)
return data + [val]*padding
def clip(self, data):
return data[:format_components(self.Format)]
def size(self):
return format_size(self.Format)
def is_float(self):
return misc_float_pattern.match(self.Format)
def is_int(self):
return misc_int_pattern.match(self.Format)
def encode(self, data):
# print(self.Format, data)
return self.encoder(data)
def decode(self, data):
return self.decoder(data)
def __eq__(self, other):
return \
self.SemanticName == other.SemanticName and \
self.SemanticIndex == other.SemanticIndex and \
self.Format == other.Format and \
self.InputSlot == other.InputSlot and \
self.AlignedByteOffset == other.AlignedByteOffset and \
self.InputSlotClass == other.InputSlotClass and \
self.InstanceDataStepRate == other.InstanceDataStepRate
class InputLayout(object):
def __init__(self, custom_prop=[], stride=0):
self.elems = collections.OrderedDict()
self.stride = stride
for item in custom_prop:
elem = InputLayoutElement(item)
self.elems[elem.name] = elem
def serialise(self):
return [x.to_dict() for x in self.elems.values()]
def to_string(self):
ret = ''
for i, elem in enumerate(self.elems.values()):
ret += 'element[%i]:\n' % i
ret += elem.to_string()
return ret
def parse_element(self, f):
elem = InputLayoutElement(f)
self.elems[elem.name] = elem
def __iter__(self):
return iter(self.elems.values())
def __getitem__(self, semantic):
return self.elems[semantic]
def encode(self, vertex):
buf = bytearray(self.stride)
for semantic, data in vertex.items():
if semantic.startswith('~'):
continue
elem = self.elems[semantic]
data = elem.encode(data)
buf[elem.AlignedByteOffset:elem.AlignedByteOffset + len(data)] = data
assert(len(buf) == self.stride)
return buf
def decode(self, buf):
vertex = {}
for elem in self.elems.values():
data = buf[elem.AlignedByteOffset:elem.AlignedByteOffset + elem.size()]
vertex[elem.name] = elem.decode(data)
return vertex
def __eq__(self, other):
return self.elems == other.elems
class HashableVertex(dict):
def __hash__(self):
# Convert keys and values into immutable types that can be hashed
immutable = tuple((k, tuple(v)) for k,v in sorted(self.items()))
return hash(immutable)
class VertexBuffer(object):
vb_elem_pattern = re.compile(r'''vb\d+\[\d*\]\+\d+ (?P<semantic>[^:]+): (?P<data>.*)$''')
# Python gotcha - do not set layout=InputLayout() in the default function
# parameters, as they would all share the *same* InputLayout since the
# default values are only evaluated once on file load
def __init__(self, f=None, layout=None, load_vertices=True):
self.vertices = []
self.layout = layout and layout or InputLayout()
self.first = 0
self.vertex_count = 0
self.offset = 0
self.topology = 'trianglelist'
if f is not None:
self.parse_vb_txt(f, load_vertices)
def parse_vb_txt(self, f, load_vertices):
for line in map(str.strip, f):
# print(line)
if line.startswith('byte offset:'):
self.offset = int(line[13:])
if line.startswith('first vertex:'):
self.first = int(line[14:])
if line.startswith('vertex count:'):
self.vertex_count = int(line[14:])
if line.startswith('stride:'):
self.layout.stride = int(line[7:])
if line.startswith('element['):
self.layout.parse_element(f)
if line.startswith('topology:'):
self.topology = line[10:]
if line != 'topology: trianglelist':
raise Fatal('"%s" is not yet supported' % line)
if line.startswith('vertex-data:'):
if not load_vertices:
return
self.parse_vertex_data(f)
assert(len(self.vertices) == self.vertex_count)
def parse_vb_bin(self, f):
f.seek(self.offset)
# XXX: Should we respect the first/base vertex?
# f.seek(self.first * self.layout.stride, whence=1)
self.first = 0
while True:
vertex = f.read(self.layout.stride)
if not vertex:
break
self.vertices.append(self.layout.decode(vertex))
# We intentionally disregard the vertex count when loading from a
# binary file, as we assume frame analysis might have only dumped a
# partial buffer to the .txt files (e.g. if this was from a dump where
# the draw call index count was overridden it may be cut short, or
# where the .txt files contain only sub-meshes from each draw call and
# we are loading the .buf file because it contains the entire mesh):
self.vertex_count = len(self.vertices)
def append(self, vertex):
self.vertices.append(vertex)
self.vertex_count += 1
def parse_vertex_data(self, f):
vertex = {}
for line in map(str.strip, f):
#print(line)
if line.startswith('instance-data:'):
break
match = self.vb_elem_pattern.match(line)
if match:
vertex[match.group('semantic')] = self.parse_vertex_element(match)
elif line == '' and vertex:
self.vertices.append(vertex)
vertex = {}
if vertex:
self.vertices.append(vertex)
def parse_vertex_element(self, match):
fields = match.group('data').split(',')
if self.layout[match.group('semantic')].Format.endswith('INT'):
return tuple(map(int, fields))
return tuple(map(float, fields))
def remap_blendindices(self, obj, mapping):
def lookup_vgmap(x):
vgname = obj.vertex_groups[x].name
return mapping.get(vgname, mapping.get(x, x))
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex['~' + semantic] = vertex[semantic]
vertex[semantic] = tuple(lookup_vgmap(x) for x in vertex[semantic])
def revert_blendindices_remap(self):
# Significantly faster than doing a deep copy
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex[semantic] = vertex['~' + semantic]
del vertex['~' + semantic]
def disable_blendweights(self):
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex[semantic] = (0, 0, 0, 0)
def write(self, output, operator=None):
for vertex in self.vertices:
output.write(self.layout.encode(vertex))
msg = 'Wrote %i vertices to %s' % (len(self), output.name)
if operator:
operator.report({'INFO'}, msg)
else:
print(msg)
def __len__(self):
return len(self.vertices)
def merge(self, other):
if self.layout != other.layout:
raise Fatal('Vertex buffers have different input layouts - ensure you are only trying to merge the same vertex buffer split across multiple draw calls')
if self.first != other.first:
# FIXME: Future 3DMigoto might automatically set first from the
# index buffer and chop off unreferenced vertices to save space
raise Fatal('Cannot merge multiple vertex buffers - please check for updates of the 3DMigoto import script, or import each buffer separately')
self.vertices.extend(other.vertices[self.vertex_count:])
self.vertex_count = max(self.vertex_count, other.vertex_count)
assert(len(self.vertices) == self.vertex_count)
def wipe_semantic_for_testing(self, semantic, val=0):
print('WARNING: WIPING %s FOR TESTING PURPOSES!!!' % semantic)
semantic, _, components = semantic.partition('.')
if components:
components = [{'x':0, 'y':1, 'z':2, 'w':3}[c] for c in components]
else:
components = range(4)
for vertex in self.vertices:
for s in list(vertex):
if s == semantic:
v = list(vertex[semantic])
for component in components:
if component < len(v):
v[component] = val
vertex[semantic] = v
class IndexBuffer(object):
def __init__(self, *args, load_indices=True):
self.faces = []
self.first = 0
self.index_count = 0
self.format = 'DXGI_FORMAT_UNKNOWN'
self.offset = 0
self.topology = 'trianglelist'
if isinstance(args[0], io.IOBase):
assert(len(args) == 1)
self.parse_ib_txt(args[0], load_indices)
else:
self.format, = args
self.encoder, self.decoder = EncoderDecoder(self.format)
def append(self, face):
self.faces.append(face)
self.index_count += len(face)
def parse_ib_txt(self, f, load_indices):
for line in map(str.strip, f):
if line.startswith('byte offset:'):
self.offset = int(line[13:])
if line.startswith('first index:'):
self.first = int(line[13:])
elif line.startswith('index count:'):
self.index_count = int(line[13:])
elif line.startswith('topology:'):
self.topology = line[10:]
if line != 'topology: trianglelist':
raise Fatal('"%s" is not yet supported' % line)
elif line.startswith('format:'):
self.format = line[8:]
elif line == '':
if not load_indices:
return
self.parse_index_data(f)
assert(len(self.faces) * 3 == self.index_count)
def parse_ib_bin(self, f):
f.seek(self.offset)
stride = format_size(self.format)
# XXX: Should we respect the first index?
# f.seek(self.first * stride, whence=1)
self.first = 0
face = []
while True:
index = f.read(stride)
if not index:
break
face.append(*self.decoder(index))
if len(face) == 3:
self.faces.append(tuple(face))
face = []
assert(len(face) == 0)
# We intentionally disregard the index count when loading from a
# binary file, as we assume frame analysis might have only dumped a
# partial buffer to the .txt files (e.g. if this was from a dump where
# the draw call index count was overridden it may be cut short, or
# where the .txt files contain only sub-meshes from each draw call and
# we are loading the .buf file because it contains the entire mesh):
self.index_count = len(self.faces) * 3
def parse_index_data(self, f):
for line in map(str.strip, f):
face = tuple(map(int, line.split()))
assert(len(face) == 3)
self.faces.append(face)
def merge(self, other):
if self.format != other.format:
raise Fatal('Index buffers have different formats - ensure you are only trying to merge the same index buffer split across multiple draw calls')
self.first = min(self.first, other.first)
self.index_count += other.index_count
self.faces.extend(other.faces)
def write(self, output, operator=None):
for face in self.faces:
output.write(self.encoder(face))
msg = 'Wrote %i indices to %s' % (len(self), output.name)
if operator:
operator.report({'INFO'}, msg)
else:
print(msg)
def __len__(self):
return len(self.faces) * 3
def load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path):
if len(vb_paths) != 1 or len(ib_paths) > 1:
raise Fatal('Cannot merge meshes loaded from binary files')
# Loading from binary files, but still need to use the .txt files as a
# reference for the format:
vb_bin_path, vb_txt_path = vb_paths[0]
ib_bin_path, ib_txt_path = ib_paths[0]
vb = VertexBuffer(open(vb_txt_path, 'r'), load_vertices=False)
vb.parse_vb_bin(open(vb_bin_path, 'rb'))
ib = None
if ib_paths:
ib = IndexBuffer(open(ib_txt_path, 'r'), load_indices=False)
ib.parse_ib_bin(open(ib_bin_path, 'rb'))
return vb, ib, os.path.basename(vb_bin_path), pose_path
def load_3dmigoto_mesh(operator, paths):
vb_paths, ib_paths, use_bin, pose_path = zip(*paths)
pose_path = pose_path[0]
if use_bin[0]:
return load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path)
vb = VertexBuffer(open(vb_paths[0], 'r'))
# Merge additional vertex buffers for meshes split over multiple draw calls:
for vb_path in vb_paths[1:]:
tmp = VertexBuffer(open(vb_path, 'r'))
vb.merge(tmp)
# For quickly testing how importent any unsupported semantics may be:
#vb.wipe_semantic_for_testing('POSITION.w', 1.0)
#vb.wipe_semantic_for_testing('TEXCOORD.w', 0.0)
#vb.wipe_semantic_for_testing('TEXCOORD5', 0)
#vb.wipe_semantic_for_testing('BINORMAL')
#vb.wipe_semantic_for_testing('TANGENT')
#vb.write(open(os.path.join(os.path.dirname(vb_paths[0]), 'TEST.vb'), 'wb'), operator=operator)
ib = None
if ib_paths:
ib = IndexBuffer(open(ib_paths[0], 'r'))
# Merge additional vertex buffers for meshes split over multiple draw calls:
for ib_path in ib_paths[1:]:
tmp = IndexBuffer(open(ib_path, 'r'))
ib.merge(tmp)
return vb, ib, os.path.basename(vb_paths[0]), pose_path
def import_normals_step1(mesh, data):
# Ensure normals are 3-dimensional:
# XXX: Assertion triggers in DOA6
if len(data[0]) == 4:
if [x[3] for x in data] != [0.0]*len(data):
raise Fatal('Normals are 4D')
normals = [(x[0], x[1], x[2]) for x in data]
# To make sure the normals don't get lost by Blender's edit mode,
# or mesh.update() we need to set custom normals in the loops, not
# vertices.
#
# For testing, to make sure our normals are preserved let's use
# garbage ones:
#import random
#normals = [(random.random() * 2 - 1,random.random() * 2 - 1,random.random() * 2 - 1) for x in normals]
#
# Comment from other import scripts:
# Note: we store 'temp' normals in loops, since validate() may alter final mesh,
# we can only set custom lnors *after* calling it.
mesh.create_normals_split()
for l in mesh.loops:
l.normal[:] = normals[l.vertex_index]
def import_normals_step2(mesh):
# Taken from import_obj/import_fbx
clnors = array('f', [0.0] * (len(mesh.loops) * 3))
mesh.loops.foreach_get("normal", clnors)
# Not sure this is still required with use_auto_smooth, but the other
# importers do it, and at the very least it shouldn't hurt...
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
mesh.use_auto_smooth = True # This has a double meaning, one of which is to use the custom normals
mesh.show_edge_sharp = True
def import_vertex_groups(mesh, obj, blend_indices, blend_weights):
assert(len(blend_indices) == len(blend_weights))
if blend_indices:
# We will need to make sure we re-export the same blend indices later -
# that they haven't been renumbered. Not positive whether it is better
# to use the vertex group index, vertex group name or attach some extra
# data. Make sure the indices and names match:
num_vertex_groups = max(itertools.chain(*itertools.chain(*blend_indices.values()))) + 1
for i in range(num_vertex_groups):
obj.vertex_groups.new(str(i))
for vertex in mesh.vertices:
for semantic_index in sorted(blend_indices.keys()):
for i, w in zip(blend_indices[semantic_index][vertex.index], blend_weights[semantic_index][vertex.index]):
if w == 0.0:
continue
obj.vertex_groups[i].add((vertex.index,), w, 'REPLACE')
def import_uv_layers(mesh, obj, texcoords, flip_texcoord_v):
for (texcoord, data) in sorted(texcoords.items()):
# TEXCOORDS can have up to four components, but UVs can only have two
# dimensions. Not positive of the best way to handle this in general,
# but for now I'm thinking that splitting the TEXCOORD into two sets of
# UV coordinates might work:
dim = len(data[0])
if dim == 4:
components_list = ('xy', 'zw')
elif dim == 2:
components_list = ('xy',)
else:
raise Fatal('Unhandled TEXCOORD dimension: %i' % dim)
cmap = {'x': 0, 'y': 1, 'z': 2, 'w': 3}
for components in components_list:
uv_name = 'TEXCOORD%s.%s' % (texcoord and texcoord or '', components)
mesh.uv_textures.new(uv_name)
blender_uvs = mesh.uv_layers[uv_name]
# This will assign a texture to the UV layer, which works fine but
# working out which texture maps to which UV layer is guesswork
# before the import and the artist may as well just assign it
# themselves in the UV editor pane when they can see the unwrapped
# mesh to compare it with the dumped textures:
#
#path = textures.get(uv_layer, None)
#if path is not None:
# image = load_image(path)
# for i in range(len(mesh.polygons)):
# mesh.uv_textures[uv_layer].data[i].image = image
# Can't find an easy way to flip the display of V in Blender, so
# add an option to flip it on import & export:
if flip_texcoord_v:
flip_uv = lambda uv: (uv[0], 1.0 - uv[1])
# Record that V was flipped so we know to undo it when exporting:
obj['3DMigoto:' + uv_name] = {'flip_v': True}
else:
flip_uv = lambda uv: uv
uvs = [[d[cmap[c]] for c in components] for d in data]
for l in mesh.loops:
blender_uvs.data[l.index].uv = flip_uv(uvs[l.vertex_index])
# This loads unknown data from the vertex buffers as vertex layers
def import_vertex_layers(mesh, obj, vertex_layers):
for (element_name, data) in sorted(vertex_layers.items()):
dim = len(data[0])
cmap = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}
for component in range(dim):
if dim != 1 or element_name.find('.') == -1:
layer_name = '%s.%s' % (element_name, cmap[component])
else:
layer_name = element_name
if type(data[0][0]) == int:
mesh.vertex_layers_int.new(layer_name)
layer = mesh.vertex_layers_int[layer_name]
for v in mesh.vertices:
val = data[v.index][component]
# Blender integer layers are 32bit signed and will throw an
# exception if we are assigning an unsigned value that
# can't fit in that range. Reinterpret as signed if necessary:
if val < 0x80000000:
layer.data[v.index].value = val
else:
layer.data[v.index].value = struct.unpack('i', struct.pack('I', val))[0]
elif type(data[0][0]) == float:
mesh.vertex_layers_float.new(layer_name)
layer = mesh.vertex_layers_float[layer_name]
for v in mesh.vertices:
layer.data[v.index].value = data[v.index][component]
else:
raise Fatal('BUG: Bad layer type %s' % type(data[0][0]))
def import_faces_from_ib(mesh, ib):
mesh.loops.add(len(ib.faces) * 3)
mesh.polygons.add(len(ib.faces))
mesh.loops.foreach_set('vertex_index', unpack_list(ib.faces))
mesh.polygons.foreach_set('loop_start', [x*3 for x in range(len(ib.faces))])
mesh.polygons.foreach_set('loop_total', [3] * len(ib.faces))
def import_faces_from_vb(mesh, vb):
# Only lightly tested
num_faces = len(vb.vertices) // 3
mesh.loops.add(num_faces * 3)
mesh.polygons.add(num_faces)
mesh.loops.foreach_set('vertex_index', [x for x in range(num_faces * 3)])
mesh.polygons.foreach_set('loop_start', [x*3 for x in range(num_faces)])
mesh.polygons.foreach_set('loop_total', [3] * num_faces)
def import_vertices(mesh, vb):
mesh.vertices.add(len(vb.vertices))
seen_offsets = set()
blend_indices = {}
blend_weights = {}
texcoords = {}
vertex_layers = {}
use_normals = False
for elem in vb.layout:
if elem.InputSlotClass != 'per-vertex':
continue
# Discard elements that reuse offsets in the vertex buffer, e.g. COLOR
# and some TEXCOORDs may be aliases of POSITION:
if (elem.InputSlot, elem.AlignedByteOffset) in seen_offsets:
assert(elem.name != 'POSITION')
continue
seen_offsets.add((elem.InputSlot, elem.AlignedByteOffset))
data = tuple( x[elem.name] for x in vb.vertices )
if elem.name == 'POSITION':
# Ensure positions are 3-dimensional:
if len(data[0]) == 4:
if ([x[3] for x in data] != [1.0]*len(data)):
# XXX: Leaving this fatal error in for now, as the meshes
# it triggers on in DOA6 (skirts) lie about almost every
# semantic and we cannot import them with this version of
# the script regardless. Comment it out if you want to try
# importing anyway and preserving the W coordinate in a
# vertex group. It might also be possible to project this
# back into 3D if we assume the coordinates are homogeneous
# (i.e. divide XYZ by W), but that might be assuming too
# much for a generic script.
raise Fatal('Positions are 4D')
# Occurs in some meshes in DOA6, such as skirts.
# W coordinate must be preserved in these cases.
print('Positions are 4D, storing W coordinate in POSITION.w vertex layer')
vertex_layers['POSITION.w'] = [[x[3]] for x in data]
positions = [(x[0], x[1], x[2]) for x in data]
mesh.vertices.foreach_set('co', unpack_list(positions))
elif elem.name.startswith('COLOR'):
if len(data[0]) <= 3:
mesh.vertex_colors.new(elem.name)
color_layer = mesh.vertex_colors[elem.name].data
for l in mesh.loops:
color_layer[l.index].color = data[l.vertex_index] + [0]*(3-len(data[l.vertex_index]))
else:
mesh.vertex_colors.new(elem.name + '.RGB')
mesh.vertex_colors.new(elem.name + '.A')
color_layer = mesh.vertex_colors[elem.name + '.RGB'].data
alpha_layer = mesh.vertex_colors[elem.name + '.A'].data
for l in mesh.loops:
color_layer[l.index].color = data[l.vertex_index][:3]
alpha_layer[l.index].color = [data[l.vertex_index][3], 0, 0]
elif elem.name == 'NORMAL':
use_normals = True
import_normals_step1(mesh, data)
elif elem.name in ('TANGENT', 'BINORMAL'):
# # XXX: loops.tangent is read only. Not positive how to handle
# # this, or if we should just calculate it when re-exporting.
# for l in mesh.loops:
# assert(data[l.vertex_index][3] in (1.0, -1.0))
# l.tangent[:] = data[l.vertex_index][0:3]
print('NOTICE: Skipping import of %s in favour of recalculating on export' % elem.name)
elif elem.name.startswith('BLENDINDICES'):
blend_indices[elem.SemanticIndex] = data
elif elem.name.startswith('BLENDWEIGHT'):
blend_weights[elem.SemanticIndex] = data
elif elem.name.startswith('TEXCOORD') and elem.is_float():
texcoords[elem.SemanticIndex] = data
else:
print('NOTICE: Storing unhandled semantic %s %s as vertex layer' % (elem.name, elem.Format))
vertex_layers[elem.name] = data
return (blend_indices, blend_weights, texcoords, vertex_layers, use_normals)
def import_3dmigoto(operator, context, paths, merge_meshes=True, **kwargs):
if merge_meshes:
import_3dmigoto_vb_ib(operator, context, paths, **kwargs)
else:
obj = []
for p in paths:
try:
obj.append(import_3dmigoto_vb_ib(operator, context, [p], **kwargs))
except Fatal as e:
operator.report({'ERROR'}, str(e) + ': ' + str(p[:2]))
# FIXME: Group objects together
def import_3dmigoto_vb_ib(operator, context, paths, flip_texcoord_v=True, axis_forward='-Z', axis_up='Y', pose_cb_off=[0,0], pose_cb_step=1):
vb, ib, name, pose_path = load_3dmigoto_mesh(operator, paths)
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(mesh.name, mesh)
global_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
obj.matrix_world = global_matrix
# Attach the vertex buffer layout to the object for later exporting. Can't
# seem to retrieve this if attached to the mesh - to_mesh() doesn't copy it:
obj['3DMigoto:VBLayout'] = vb.layout.serialise()
obj['3DMigoto:VBStride'] = vb.layout.stride # FIXME: Strides of multiple vertex buffers
obj['3DMigoto:FirstVertex'] = vb.first
if ib is not None:
import_faces_from_ib(mesh, ib)
# Attach the index buffer layout to the object for later exporting.
obj['3DMigoto:IBFormat'] = ib.format
obj['3DMigoto:FirstIndex'] = ib.first
else:
import_faces_from_vb(mesh, vb)
(blend_indices, blend_weights, texcoords, vertex_layers, use_normals) = import_vertices(mesh, vb)
import_uv_layers(mesh, obj, texcoords, flip_texcoord_v)
import_vertex_layers(mesh, obj, vertex_layers)
import_vertex_groups(mesh, obj, blend_indices, blend_weights)
# Validate closes the loops so they don't disappear after edit mode and probably other important things:
mesh.validate(verbose=False, clean_customdata=False) # *Very* important to not remove lnors here!
# Not actually sure update is necessary. It seems to update the vertex normals, not sure what else:
mesh.update()
# Must be done after validate step:
if use_normals:
import_normals_step2(mesh)
else:
mesh.calc_normals()
base = context.scene.objects.link(obj)
base.select = True
context.scene.objects.active = obj
if pose_path is not None:
import_pose(operator, context, pose_path, limit_bones_to_vertex_groups=True,
axis_forward=axis_forward, axis_up=axis_up,
pose_cb_off=pose_cb_off, pose_cb_step=pose_cb_step)
context.scene.objects.active = obj
return obj
# from export_obj:
def mesh_triangulate(me):
import bmesh
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
def blender_vertex_to_3dmigoto_vertex(mesh, obj, blender_loop_vertex, layout, texcoords):
blender_vertex = mesh.vertices[blender_loop_vertex.vertex_index]
vertex = {}
seen_offsets = set()
# TODO: Warn if vertex is in too many vertex groups for this layout,
# ignoring groups with weight=0.0
vertex_groups = sorted(blender_vertex.groups, key=lambda x: x.weight, reverse=True)
for elem in layout:
if elem.InputSlotClass != 'per-vertex':
continue
if (elem.InputSlot, elem.AlignedByteOffset) in seen_offsets:
continue
seen_offsets.add((elem.InputSlot, elem.AlignedByteOffset))
if elem.name == 'POSITION':
if 'POSITION.w' in mesh.vertex_layers_float:
vertex[elem.name] = list(blender_vertex.undeformed_co) + \
[mesh.vertex_layers_float['POSITION.w'].data[blender_loop_vertex.vertex_index].value]
else:
vertex[elem.name] = elem.pad(list(blender_vertex.undeformed_co), 1.0)
elif elem.name.startswith('COLOR'):
if elem.name in mesh.vertex_colors:
vertex[elem.name] = elem.clip(list(mesh.vertex_colors[elem.name].data[blender_loop_vertex.index].color))
else:
vertex[elem.name] = list(mesh.vertex_colors[elem.name+'.RGB'].data[blender_loop_vertex.index].color) + \
[mesh.vertex_colors[elem.name+'.A'].data[blender_loop_vertex.index].color[0]]
elif elem.name == 'NORMAL':
vertex[elem.name] = elem.pad(list(blender_loop_vertex.normal), 0.0)
elif elem.name.startswith('TANGENT'):
# DOAXVV has +1/-1 in the 4th component. Not positive what this is,
# but guessing maybe the bitangent sign? Not even sure it is used...
# FIXME: Other games
vertex[elem.name] = elem.pad(list(blender_loop_vertex.tangent), blender_loop_vertex.bitangent_sign)
elif elem.name.startswith('BINORMAL'):
# Some DOA6 meshes (skirts) use BINORMAL, but I'm not certain it is
# actually the binormal. These meshes are weird though, since they
# use 4 dimensional positions and normals, so they aren't something
# we can really deal with at all. Therefore, the below is untested,
# FIXME: So find a mesh where this is actually the binormal,
# uncomment the below code and test.
# normal = blender_loop_vertex.normal
# tangent = blender_loop_vertex.tangent
# binormal = numpy.cross(normal, tangent)
# XXX: Does the binormal need to be normalised to a unit vector?
# binormal = binormal / numpy.linalg.norm(binormal)
# vertex[elem.name] = elem.pad(list(binormal), 0.0)
pass
elif elem.name.startswith('BLENDINDICES'):
i = elem.SemanticIndex * 4
vertex[elem.name] = elem.pad([ x.group for x in vertex_groups[i:i+4] ], 0)
elif elem.name.startswith('BLENDWEIGHT'):
# TODO: Warn if vertex is in too many vertex groups for this layout
i = elem.SemanticIndex * 4
vertex[elem.name] = elem.pad([ x.weight for x in vertex_groups[i:i+4] ], 0.0)
elif elem.name.startswith('TEXCOORD') and elem.is_float():
# FIXME: Handle texcoords of other dimensions
uvs = []
for uv_name in ('%s.xy' % elem.name, '%s.zw' % elem.name):
if uv_name in texcoords:
uvs += list(texcoords[uv_name][blender_loop_vertex.index])
vertex[elem.name] = uvs
else:
# Unhandled semantics are saved in vertex layers
data = []
for component in 'xyzw':
layer_name = '%s.%s' % (elem.name, component)
if layer_name in mesh.vertex_layers_int:
data.append(mesh.vertex_layers_int[layer_name].data[blender_loop_vertex.vertex_index].value)
elif layer_name in mesh.vertex_layers_float:
data.append(mesh.vertex_layers_float[layer_name].data[blender_loop_vertex.vertex_index].value)
if data:
#print('Retrieved unhandled semantic %s %s from vertex layer' % (elem.name, elem.Format), data)
vertex[elem.name] = data
if elem.name not in vertex:
print('NOTICE: Unhandled vertex element: %s' % elem.name)
#else:
# print('%s: %s' % (elem.name, repr(vertex[elem.name])))
return vertex
def write_fmt_file(f, vb, ib):
f.write('stride: %i\n' % vb.layout.stride)
f.write('topology: %s\n' % vb.topology)
if ib is not None:
f.write('format: %s\n' % ib.format)
f.write(vb.layout.to_string())
def export_3dmigoto(operator, context, vb_path, ib_path, fmt_path):
obj = context.object
if obj is None:
raise Fatal('No object selected')
stride = obj['3DMigoto:VBStride']
layout = InputLayout(obj['3DMigoto:VBLayout'], stride=stride)
mesh = obj.to_mesh(context.scene, True, 'PREVIEW', calc_tessface=False)
mesh_triangulate(mesh)
indices = [ l.vertex_index for l in mesh.loops ]
faces = [ indices[i:i+3] for i in range(0, len(indices), 3) ]
try:
ib_format = obj['3DMigoto:IBFormat']
except KeyError:
ib = None
raise Fatal('FIXME: Add capability to export without an index buffer')
else:
ib = IndexBuffer(ib_format)
# Calculates tangents and makes loop normals valid (still with our
# custom normal data from import time):
mesh.calc_tangents()
texcoord_layers = {}
for uv_layer in mesh.uv_layers:
texcoords = {}
try:
flip_texcoord_v = obj['3DMigoto:' + uv_layer.name]['flip_v']
if flip_texcoord_v:
flip_uv = lambda uv: (uv[0], 1.0 - uv[1])
else:
flip_uv = lambda uv: uv
except KeyError:
flip_uv = lambda uv: uv
for l in mesh.loops:
uv = flip_uv(uv_layer.data[l.index].uv)
texcoords[l.index] = uv
texcoord_layers[uv_layer.name] = texcoords
# Blender's vertices have unique positions, but may have multiple
# normals, tangents, UV coordinates, etc - these are stored in the
# loops. To export back to DX we need these combined together such that
# a vertex is a unique set of all attributes, but we don't want to
# completely blow this out - we still want to reuse identical vertices
# via the index buffer. There might be a convenience function in
# Blender to do this, but it's easy enough to do this ourselves
indexed_vertices = collections.OrderedDict()
for poly in mesh.polygons:
face = []
for blender_lvertex in mesh.loops[poly.loop_start:poly.loop_start + poly.loop_total]:
vertex = blender_vertex_to_3dmigoto_vertex(mesh, obj, blender_lvertex, layout, texcoord_layers)
face.append(indexed_vertices.setdefault(HashableVertex(vertex), len(indexed_vertices)))
if ib is not None:
ib.append(face)
vb = VertexBuffer(layout=layout)
for vertex in indexed_vertices:
vb.append(vertex)
vgmaps = {k[15:]:keys_to_ints(v) for k,v in obj.items() if k.startswith('3DMigoto:VGMap:')}
if '' not in vgmaps:
vb.write(open(vb_path, 'wb'), operator=operator)
base, ext = os.path.splitext(vb_path)
for (suffix, vgmap) in vgmaps.items():
path = vb_path
if suffix:
path = '%s-%s%s' % (base, suffix, ext)
vgmap_path = os.path.splitext(path)[0] + '.vgmap'
print('Exporting %s...' % path)
vb.remap_blendindices(obj, vgmap)
vb.write(open(path, 'wb'), operator=operator)
vb.revert_blendindices_remap()
sorted_vgmap = collections.OrderedDict(sorted(vgmap.items(), key=lambda x:x[1]))
json.dump(sorted_vgmap, open(vgmap_path, 'w'), indent=2)
if ib is not None:
ib.write(open(ib_path, 'wb'), operator=operator)
# Write format reference file
write_fmt_file(open(fmt_path, 'w'), vb, ib)
IOOBJOrientationHelper = orientation_helper_factory("IOOBJOrientationHelper", axis_forward='-Z', axis_up='Y')
class Import3DMigotoFrameAnalysis(bpy.types.Operator, ImportHelper, IOOBJOrientationHelper):
"""Import a mesh dumped with 3DMigoto's frame analysis"""
bl_idname = "import_mesh.migoto_frame_analysis"
bl_label = "Import 3DMigoto Frame Analysis Dump"
bl_options = {'PRESET', 'UNDO'}
filename_ext = '.txt'
filter_glob = StringProperty(
default='*.txt',
options={'HIDDEN'},
)
files = CollectionProperty(
name="File Path",
type=bpy.types.OperatorFileListElement,
)
flip_texcoord_v = BoolProperty(
name="Flip TEXCOORD V",
description="Flip TEXCOORD V asix during importing",
default=True,
)
load_related = BoolProperty(
name="Auto-load related meshes",
description="Automatically load related meshes found in the frame analysis dump",
default=True,
)
load_buf = BoolProperty(
name="Load .buf files instead",
description="Load the mesh from the binary .buf dumps instead of the .txt files\nThis will load the entire mesh as a single object instead of separate objects from each draw call",
default=False,
)
merge_meshes = BoolProperty(
name="Merge meshes together",
description="Merge all selected meshes together into one object. Meshes must be related",
default=False,
)
pose_cb = StringProperty(
name="Bone CB",
description='Indicate a constant buffer slot (e.g. "vs-cb2") containing the bone matrices',
default="",
)
pose_cb_off = bpy.props.IntVectorProperty(
name="Bone CB range",
description='Indicate start and end offsets (in multiples of 4 component values) to find the matrices in the Bone CB',
default=[0,0],
size=2,
min=0,
)
pose_cb_step = bpy.props.IntProperty(
name="Vertex group step",
description='If used vertex groups are 0,1,2,3,etc specify 1. If they are 0,3,6,9,12,etc specify 3',
default=1,
min=1,
)
def get_vb_ib_paths(self):
buffer_pattern = re.compile(r'''-(?:ib|vb[0-9]+)(?P<hash>=[0-9a-f]+)?(?=[^0-9a-f=])''')
dirname = os.path.dirname(self.filepath)
ret = set()
files = []
if self.load_related:
for filename in self.files:
match = buffer_pattern.search(filename.name)
if match is None or not match.group('hash'):
continue
paths = glob(os.path.join(dirname, '*%s*.txt' % filename.name[match.start():match.end()]))
files.extend([os.path.basename(x) for x in paths])
if not files:
files = [x.name for x in self.files]
for filename in files:
match = buffer_pattern.search(filename)
if match is None:
raise Fatal('Unable to find corresponding buffers from filename - ensure you are loading a dump from a timestamped Frame Analysis directory (not a deduped directory)')
use_bin = self.load_buf
if not match.group('hash') and not use_bin:
self.report({'INFO'}, 'Filename did not contain hash - if Frame Analysis dumped a custom resource the .txt file may be incomplete, Using .buf files instead')
use_bin = True # FIXME: Ask
ib_pattern = filename[:match.start()] + '-ib*' + filename[match.end():]
vb_pattern = filename[:match.start()] + '-vb*' + filename[match.end():]
ib_paths = glob(os.path.join(dirname, ib_pattern))
vb_paths = glob(os.path.join(dirname, vb_pattern))
if vb_paths and use_bin:
vb_bin_paths = [ os.path.splitext(x)[0] + '.buf' for x in vb_paths ]
ib_bin_paths = [ os.path.splitext(x)[0] + '.buf' for x in ib_paths ]
if all([ os.path.exists(x) for x in itertools.chain(vb_bin_paths, ib_bin_paths) ]):
# When loading the binary files, we still need to process
# the .txt files as well, as they indicate the format:
ib_paths = list(zip(ib_bin_paths, ib_paths))
vb_paths = list(zip(vb_bin_paths, vb_paths))
else:
self.report({'WARNING'}, 'Corresponding .buf files not found - using .txt files')
use_bin = False
pose_path = None
if self.pose_cb:
pose_pattern = filename[:match.start()] + '*-' + self.pose_cb + '=*.txt'
try:
pose_path = glob(os.path.join(dirname, pose_pattern))[0]
except IndexError:
pass
if len(ib_paths) != 1 or len(vb_paths) != 1:
raise Fatal('Only draw calls using a single vertex buffer and a single index buffer are supported for now')
ret.add((vb_paths[0], ib_paths[0], use_bin, pose_path))
return ret
def execute(self, context):
if self.load_buf:
# Is there a way to have the mutual exclusivity reflected in
# the UI? Grey out options or use radio buttons or whatever?
if self.merge_meshes or self.load_related:
self.report({'INFO'}, 'Loading .buf files selected: Disabled incompatible options')
self.merge_meshes = False
self.load_related = False
try:
keywords = self.as_keywords(ignore=('filepath', 'files', 'filter_glob', 'load_related', 'load_buf', 'pose_cb'))
paths = self.get_vb_ib_paths()
import_3dmigoto(self, context, paths, **keywords)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
def import_3dmigoto_raw_buffers(operator, context, vb_fmt_path, ib_fmt_path, vb_path=None, ib_path=None, vgmap_path=None, **kwargs):
paths = (((vb_path, vb_fmt_path), (ib_path, ib_fmt_path), True, None),)
import_3dmigoto(operator, context, paths, merge_meshes=False, **kwargs)
if vgmap_path:
apply_vgmap(operator, context, targets=[context.active_object], filepath=vgmap_path, rename=True)
class Import3DMigotoRaw(bpy.types.Operator, ImportHelper, IOOBJOrientationHelper):
"""Import raw 3DMigoto vertex and index buffers"""
bl_idname = "import_mesh.migoto_raw_buffers"
bl_label = "Import 3DMigoto Raw Buffers"
#bl_options = {'PRESET', 'UNDO'}
bl_options = {'UNDO'}
filename_ext = '.vb;.ib'
filter_glob = StringProperty(
default='*.vb;*.ib',
options={'HIDDEN'},
)
files = CollectionProperty(
name="File Path",
type=bpy.types.OperatorFileListElement,
)
flip_texcoord_v = BoolProperty(
name="Flip TEXCOORD V",
description="Flip TEXCOORD V asix during importing",
default=True,
)
def get_vb_ib_paths(self, filename):
vb_bin_path = os.path.splitext(filename)[0] + '.vb'
ib_bin_path = os.path.splitext(filename)[0] + '.ib'
fmt_path = os.path.splitext(filename)[0] + '.fmt'
vgmap_path = os.path.splitext(filename)[0] + '.vgmap'
if not os.path.exists(vb_bin_path):
raise Fatal('Unable to find matching .vb file for %s' % filename)
if not os.path.exists(ib_bin_path):
raise Fatal('Unable to find matching .ib file for %s' % filename)
if not os.path.exists(fmt_path):
fmt_path = None
if not os.path.exists(vgmap_path):
vgmap_path = None
return (vb_bin_path, ib_bin_path, fmt_path, vgmap_path)
def execute(self, context):
# I'm not sure how to find the Import3DMigotoReferenceInputFormat
# instance that Blender instantiated to pass the values from one
# import dialog to another, but since everything is modal we can
# just use globals:
global migoto_raw_import_options
migoto_raw_import_options = self.as_keywords(ignore=('filepath', 'files', 'filter_glob'))
done = set()
dirname = os.path.dirname(self.filepath)
for filename in self.files:
try:
(vb_path, ib_path, fmt_path, vgmap_path) = self.get_vb_ib_paths(os.path.join(dirname, filename.name))
if os.path.normcase(vb_path) in done:
continue
done.add(os.path.normcase(vb_path))
if fmt_path is not None:
import_3dmigoto_raw_buffers(self, context, fmt_path, fmt_path, vb_path=vb_path, ib_path=ib_path, vgmap_path=vgmap_path, **migoto_raw_import_options)
else:
migoto_raw_import_options['vb_path'] = vb_path
migoto_raw_import_options['ib_path'] = ib_path
bpy.ops.import_mesh.migoto_input_format('INVOKE_DEFAULT')
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
class Import3DMigotoReferenceInputFormat(bpy.types.Operator, ImportHelper):
bl_idname = "import_mesh.migoto_input_format"
bl_label = "Select a .txt file with matching format"
bl_options = {'UNDO', 'INTERNAL'}
filename_ext = '.txt;.fmt'
filter_glob = StringProperty(
default='*.txt;*.fmt',
options={'HIDDEN'},
)
def get_vb_ib_paths(self):
if os.path.splitext(self.filepath)[1].lower() == '.fmt':
return (self.filepath, self.filepath)
buffer_pattern = re.compile(r'''-(?:ib|vb[0-9]+)(?P<hash>=[0-9a-f]+)?(?=[^0-9a-f=])''')
dirname = os.path.dirname(self.filepath)
filename = os.path.basename(self.filepath)
match = buffer_pattern.search(filename)
if match is None:
raise Fatal('Reference .txt filename does not look like a 3DMigoto timestamped Frame Analysis Dump')
ib_pattern = filename[:match.start()] + '-ib*' + filename[match.end():]
vb_pattern = filename[:match.start()] + '-vb*' + filename[match.end():]
ib_paths = glob(os.path.join(dirname, ib_pattern))
vb_paths = glob(os.path.join(dirname, vb_pattern))
if len(ib_paths) < 1 or len(vb_paths) < 1:
raise Fatal('Unable to locate reference files for both vertex buffer and index buffer format descriptions')
return (vb_paths[0], ib_paths[0])
def execute(self, context):
global migoto_raw_import_options
try:
vb_fmt_path, ib_fmt_path = self.get_vb_ib_paths()
import_3dmigoto_raw_buffers(self, context, vb_fmt_path, ib_fmt_path, **migoto_raw_import_options)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
class Export3DMigoto(bpy.types.Operator, ExportHelper):
"""Export a mesh for re-injection into a game with 3DMigoto"""
bl_idname = "export_mesh.migoto"
bl_label = "Export 3DMigoto Vertex & Index Buffers"
filename_ext = '.vb'
filter_glob = StringProperty(
default='*.vb',
options={'HIDDEN'},
)
def execute(self, context):
try:
vb_path = self.filepath
ib_path = os.path.splitext(vb_path)[0] + '.ib'
fmt_path = os.path.splitext(vb_path)[0] + '.fmt'
# FIXME: ExportHelper will check for overwriting vb_path, but not ib_path
export_3dmigoto(self, context, vb_path, ib_path, fmt_path)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
def apply_vgmap(operator, context, targets=None, filepath='', commit=False, reverse=False, suffix='', rename=False):
if not targets:
targets = context.selected_objects
if not targets:
raise Fatal('No object selected')
vgmap = json.load(open(filepath, 'r'))
if reverse:
vgmap = {int(v):int(k) for k,v in vgmap.items()}
else:
vgmap = {k:int(v) for k,v in vgmap.items()}
for obj in targets:
if commit:
raise Fatal('commit not yet implemented')
prop_name = '3DMigoto:VGMap:' + suffix
obj[prop_name] = keys_to_strings(vgmap)
if rename:
for k,v in vgmap.items():
if str(k) in obj.vertex_groups.keys():
continue
if str(v) in obj.vertex_groups.keys():
obj.vertex_groups[str(v)].name = k
else:
obj.vertex_groups.new(str(v))
if '3DMigoto:VBLayout' not in obj:
operator.report({'WARNING'}, '%s is not a 3DMigoto mesh. Vertex Group Map custom property applied anyway' % obj.name)
else:
operator.report({'INFO'}, 'Applied vgmap to %s' % obj.name)
class ApplyVGMap(bpy.types.Operator, ImportHelper):
"""Apply vertex group map to the selected object"""
bl_idname = "mesh.migoto_vertex_group_map"
bl_label = "Apply 3DMigoto vgmap"
bl_options = {'UNDO'}
filename_ext = '.vgmap'
filter_glob = StringProperty(
default='*.vgmap',
options={'HIDDEN'},
)
#commit = BoolProperty(
# name="Commit to current mesh",
# description="Directly alters the vertex groups of the current mesh, rather than performing the mapping at export time",
# default=False,
# )
rename = BoolProperty(
name="Rename existing vertex groups",
description="Rename existing vertex groups to match the vgmap file",
default=True,
)
reverse = BoolProperty(
name="Swap from & to",
description="Switch the order of the vertex group map - if this mesh is the 'to' and you want to use the bones in the 'from'",
default=False,
)
suffix = StringProperty(
name="Suffix",
description="Suffix to add to the vertex buffer filename when exporting, for bulk exports of a single mesh with multiple distinct vertex group maps",
default='',
)
def invoke(self, context, event):
self.suffix = ''
return ImportHelper.invoke(self, context, event)
def execute(self, context):
try:
keywords = self.as_keywords(ignore=('filter_glob',))
apply_vgmap(self, context, **keywords)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
class ConstantBuffer(object):
def __init__(self, f, start_idx, end_idx):
self.entries = []
entry = []
i = 0
for line in map(str.strip, f):
if line.startswith('buf') or line.startswith('cb'):
entry.append(float(line.split()[1]))
if len(entry) == 4:
if i >= start_idx:
self.entries.append(entry)
else:
print('Skipping', entry)
entry = []
i += 1
if end_idx and i > end_idx:
break
assert(entry == [])
def as_3x4_matrices(self):
return [ Matrix(self.entries[i:i+3]) for i in range(0, len(self.entries), 3) ]
def import_pose(operator, context, filepath=None, limit_bones_to_vertex_groups=True, axis_forward='-Z', axis_up='Y', pose_cb_off=[0,0], pose_cb_step=1):
pose_buffer = ConstantBuffer(open(filepath, 'r'), *pose_cb_off)
matrices = pose_buffer.as_3x4_matrices()
obj = context.object
if not context.selected_objects:
obj = None
if limit_bones_to_vertex_groups and obj:
matrices = matrices[:len(obj.vertex_groups)]
name = os.path.basename(filepath)
arm_data = bpy.data.armatures.new(name)
arm = bpy.data.objects.new(name, object_data=arm_data)
conversion_matrix = axis_conversion(from_forward=axis_forward, from_up=axis_up).to_4x4()
context.scene.objects.link(arm)
# Construct bones (FIXME: Position these better)
# Must be in edit mode to add new bones
arm.select = True
context.scene.objects.active = arm
bpy.ops.object.mode_set(mode='EDIT')
for i, matrix in enumerate(matrices):
bone = arm_data.edit_bones.new(str(i * pose_cb_step))
bone.tail = Vector((0.0, 0.10, 0.0))
bpy.ops.object.mode_set(mode='OBJECT')
# Set pose:
for i, matrix in enumerate(matrices):
bone = arm.pose.bones[str(i * pose_cb_step)]
matrix.resize_4x4()
bone.matrix_basis = conversion_matrix * matrix * conversion_matrix.inverted()
# Apply pose to selected object, if any:
if obj is not None:
mod = obj.modifiers.new(arm.name, 'ARMATURE')
mod.object = arm
obj.parent = arm
# Hide pose object if it was applied to another object:
arm.hide = True
class Import3DMigotoPose(bpy.types.Operator, ImportHelper, IOOBJOrientationHelper):
"""Import a pose from a 3DMigoto constant buffer dump"""
bl_idname = "armature.migoto_pose"
bl_label = "Import 3DMigoto Pose"
bl_options = {'UNDO'}
filename_ext = '.txt'
filter_glob = StringProperty(
default='*.txt',
options={'HIDDEN'},
)
limit_bones_to_vertex_groups = BoolProperty(
name="Limit Bones to Vertex Groups",
description="Limits the maximum number of bones imported to the number of vertex groups of the active object",
default=True,
)
pose_cb_off = bpy.props.IntVectorProperty(
name="Bone CB range",
description='Indicate start and end offsets (in multiples of 4 component values) to find the matrices in the Bone CB',
default=[0,0],
size=2,
min=0,
)
pose_cb_step = bpy.props.IntProperty(
name="Vertex group step",
description='If used vertex groups are 0,1,2,3,etc specify 1. If they are 0,3,6,9,12,etc specify 3',
default=1,
min=1,
)
def execute(self, context):
try:
keywords = self.as_keywords(ignore=('filter_glob',))
import_pose(self, context, **keywords)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
def find_armature(obj):
if obj is None:
return None
if obj.type == 'ARMATURE':
return obj
return obj.find_armature()
def copy_bone_to_target_skeleton(context, target_arm, new_name, src_bone):
is_hidden = target_arm.hide
is_selected = target_arm.select
prev_active = context.scene.objects.active
target_arm.hide = False
target_arm.select = True
context.scene.objects.active = target_arm
bpy.ops.object.mode_set(mode='EDIT')
bone = target_arm.data.edit_bones.new(new_name)
bone.tail = Vector((0.0, 0.10, 0.0))
bpy.ops.object.mode_set(mode='OBJECT')
bone = target_arm.pose.bones[new_name]
bone.matrix_basis = src_bone.matrix_basis
context.scene.objects.active = prev_active
target_arm.select = is_selected
target_arm.hide = is_hidden
def merge_armatures(operator, context):
target_arm = find_armature(context.object)
if target_arm is None:
raise Fatal('No active target armature')
#print('target:', target_arm)
for src_obj in context.selected_objects:
src_arm = find_armature(src_obj)
if src_arm is None or src_arm == target_arm:
continue
#print('src:', src_arm)
# Create mapping between common bones:
bone_map = {}
for src_bone in src_arm.pose.bones:
for dst_bone in target_arm.pose.bones:
# Seems important to use matrix_basis - if using 'matrix'
# and merging multiple objects together, the last inserted bone
# still has the identity matrix when merging the next pose in
if src_bone.matrix_basis == dst_bone.matrix_basis:
if src_bone.name in bone_map:
operator.report({'WARNING'}, 'Source bone %s.%s matched multiple bones in the destination: %s, %s' %
(src_arm.name, src_bone.name, bone_map[src_bone.name], dst_bone.name))
else:
bone_map[src_bone.name] = dst_bone.name
# Can't have a duplicate name, even temporarily, so rename all the
# vertex groups first, and rename the source pose bones to match:
orig_names = {}
for vg in src_obj.vertex_groups:
orig_name = vg.name
vg.name = '%s.%s' % (src_arm.name, vg.name)
orig_names[vg.name] = orig_name
# Reassign vertex groups to matching bones in target armature:
for vg in src_obj.vertex_groups:
orig_name = orig_names[vg.name]
if orig_name in bone_map:
print('%s.%s -> %s' % (src_arm.name, orig_name, bone_map[orig_name]))
vg.name = bone_map[orig_name]
elif orig_name in src_arm.pose.bones:
# FIXME: Make optional
print('%s.%s -> new %s' % (src_arm.name, orig_name, vg.name))
copy_bone_to_target_skeleton(context, target_arm, vg.name, src_arm.pose.bones[orig_name])
else:
print('Vertex group %s missing corresponding bone in %s' % (orig_name, src_arm.name))
# Change existing armature modifier to target:
for modifier in src_obj.modifiers:
if modifier.type == 'ARMATURE' and modifier.object == src_arm:
modifier.object = target_arm
src_obj.parent = target_arm
context.scene.objects.unlink(src_arm)
class Merge3DMigotoPose(bpy.types.Operator):
"""Merge identically posed bones of related armatures into one"""
bl_idname = "armature.merge_pose"
bl_label = "Merge 3DMigoto Poses"
bl_options = {'UNDO'}
def execute(self, context):
try:
merge_armatures(self, context)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
class DeleteNonNumericVertexGroups(bpy.types.Operator):
"""Remove vertex groups with non-numeric names"""
bl_idname = "vertex_groups.delete_non_numeric"
bl_label = "Remove non-numeric vertex groups"
bl_options = {'UNDO'}
def execute(self, context):
try:
for obj in context.selected_objects:
for vg in reversed(obj.vertex_groups):
if vg.name.isdecimal():
continue
print('Removing vertex group', vg.name)
obj.vertex_groups.remove(vg)
except Fatal as e:
self.report({'ERROR'}, str(e))
return {'FINISHED'}
def menu_func_import_fa(self, context):
self.layout.operator(Import3DMigotoFrameAnalysis.bl_idname, text="3DMigoto frame analysis dump (vb.txt + ib.txt)")
def menu_func_import_raw(self, context):
self.layout.operator(Import3DMigotoRaw.bl_idname, text="3DMigoto raw buffers (.vb + .ib)")
def menu_func_import_pose(self, context):
self.layout.operator(Import3DMigotoPose.bl_idname, text="3DMigoto pose (.txt)")
def menu_func_export(self, context):
self.layout.operator(Export3DMigoto.bl_idname, text="3DMigoto raw buffers (.vb + .ib)")
def menu_func_apply_vgmap(self, context):
self.layout.operator(ApplyVGMap.bl_idname, text="Apply 3DMigoto vertex group map to current object (.vgmap)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import_fa)
bpy.types.INFO_MT_file_import.append(menu_func_import_raw)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_apply_vgmap)
bpy.types.INFO_MT_file_import.append(menu_func_import_pose)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import_fa)
bpy.types.INFO_MT_file_import.remove(menu_func_import_raw)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.remove(menu_func_apply_vgmap)
bpy.types.INFO_MT_file_import.remove(menu_func_import_pose)
if __name__ == "__main__":
register()
| [
"darkstarsword@gmail.com"
] | darkstarsword@gmail.com |
935f188168e56f7d9e289270aa76cbc5f4770897 | 08db28fa3836c36433aa105883a762396d4883c6 | /combine/opencv.py | eb63b33de40c9e1e7a9507722be5ef552d7aa6ad | [] | no_license | xieyipeng/FaceRecognition | 1127aaff0dd121319a8652abcfe8a59a7beaaf43 | dede5b181d6b70b87ccf00052df8056a912eff0f | refs/heads/master | 2022-09-19T07:02:33.624410 | 2020-06-02T03:03:58 | 2020-06-02T03:03:58 | 246,464,586 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | # -*- coding: utf-8 -*-
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from test_mtcnn_wider import face_detect
import cv2
import numpy as np
from test_vgg_ck import emotion
class picture(QWidget):
def __init__(self):
super(picture, self).__init__()
self.data_path = ''
self.rectangles = None
self.resize(300, 400)
self.setWindowTitle("label显示图片")
self.input = QLabel(self)
self.input.move(50, 90)
self.input.setFixedSize(200, 300)
btn = QPushButton(self)
btn.setText("打开图片")
btn.move(50, 30)
btn.clicked.connect(self.openimage)
det = QPushButton(self)
det.setText("检测")
det.move(170, 30)
det.clicked.connect(self.detect)
def openimage(self):
imgName, imgType = QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
jpg = QtGui.QPixmap(imgName).scaled(self.input.width(), self.input.height())
print(type(jpg))
self.input.setPixmap(jpg)
self.data_path = imgName
def detect(self):
self.rectangles, points = face_detect(image_path=self.data_path)
img = cv2.imread(self.data_path)
for rectangle in self.rectangles:
print(rectangle)
x1, y1, width, height, face_score = rectangle
x1, y1, x2, y2 = x1, y1, x1 + width, y1 + height
cv2.putText(img, str(rectangle[4]), (int(rectangle[0]), int(rectangle[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0))
cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])),
(255, 0, 0), 1)
category, emotion_score = emotion(img[int(y1):int(y2), int(x1):int(x2)])
print(category, emotion_score)
cv2.putText(img, category, (int(x1), int(y1) + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1,
cv2.LINE_AA)
cv2.imshow('image', img)
cv2.waitKey(0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
my = picture()
my.move(300, 300)
my.show()
sys.exit(app.exec_())
| [
"3239202719@qq.com"
] | 3239202719@qq.com |
a76b9eac5f02dc7d1cba5dc8630389d9c75deb48 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/g5m.py | 910c92044b50634fcd09dc453862a7359afa1fb9 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'g5M':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
56b182e8d53ba51c9edcdf868deccdbc7b7f37a1 | eb9ed8351d2e0bb4655c5970e91280767703f1a9 | /user_app/admin.py | 8556d66f35b7faaaf72c4fe7c99710b43de13661 | [
"Apache-2.0"
] | permissive | lmyfzx/Django-Mall | b8c03a7d2ddd56cde7f44b2f9bc8c08a486febab | 13cb59130d15e782f78bc5148409bef0f1c516e0 | refs/heads/master | 2023-01-23T10:17:46.477968 | 2020-11-21T12:44:02 | 2020-11-21T12:44:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | #
# from django.contrib import admin
# from user_app.model.seller_models import Shoppers, Store
#
#
#
# # Register your models here.
#
#
# @admin.register(Store)
# class StoreAdmin(admin.ModelAdmin):
# list_display = ('store_name', 'shopper_name', 'shop_grade', 'start_time', 'province', 'attention')
# # readonly_fields = ('shopper','shop_grade','attention')
# readonly_fields = ('shop_grade', 'attention', 'province', 'shopper')
#
# def shopper_name(self, obj):
# """商家名称"""
# return obj.shopper.username
#
# shopper_name.short_description = '商家名称'
#
# def has_add_permission(self, request):
# return False
#
# def has_delete_permission(self, request, obj=None):
# return False
#
# def get_queryset(self, request):
# result = super().get_queryset(request)
# if not request.user.is_superuser:
# return result.filter(shopper=request.user)
# return result
#
#
# @admin.register(Shoppers)
# class ShoppersAdmin(admin.ModelAdmin):
# # exclude = ('user',)
# list_display = ('shopper_name', 'head_images', 'phone', 'credit', 'sex', 'is_vip')
# readonly_fields = ('credit', 'is_vip', 'user')
#
# def sex(self, obj):
# """性别"""
# return obj.get_sex_display()
#
# def shopper_name(self, obj):
# """商家名称"""
# return obj.user.username
#
# shopper_name.short_description = '商家名称'
#
# def shopper_email(self, obj):
# """商家邮箱"""
# return obj.user.email
#
# shopper_email.short_description = '邮箱'
#
# def has_add_permission(self, request):
# return False
#
# def has_delete_permission(self, request, obj=None):
# return False
#
# def get_queryset(self, request):
# result = super().get_queryset(request)
# if not request.user.is_superuser:
# return result.filter(user=request.user)
# return result
#
#
| [
"syz247179876@126.com"
] | syz247179876@126.com |
60712320e294e2dfe3916fa779900d93f683284e | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/zhzd_add_20190618133428.py | 2ed7c6ff4cf5a493ab9a2255b061e4b7b4995775 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd5')#txt目录提取
emrtxt2s = EMRdef.txttq(u'D:\DeepLearning ER\EHRsex')
ryzd = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
lines=f.readlines()
lines = ''.join(lines)
lines = re.sub(' ','',lines)
lines = re.split('\n',lines)
for emrtxt2 in emrtxt2s:
f2 = open(emrtxt2,'r',errors="ignore")#中文加入errors
emrpath2 = os.path.basename(emrtxt2)
emrpath2 = os.path.splitext(emrpath2)[0]#提取目录
lines2 = f2.readlines()
lines2 = ''.join(lines2)
if emrpath == emrpath2:
lines.append(lines2)
ryzd.append(lines)
#导入关联规则
import orangecontrib.associate.fpgrowth as oaf
def dealRules(rules):
returnRules = []
for i in rules:
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ';' +'\t'+str(i[2])+ ';' +'\t'+str(i[3])
# print(temStr)
returnRules.append(temStr)
return returnRules
def dealResult(rules):#对规则处理
returnRules = []
for i in rules:
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ';' +'\t'+str(i[2])+ ';' +'\t'+str(i[3])+ ';' +'\t'+str(i[4])+ ';' +'\t'+str(i[5])+ ';' +'\t'+str(i[6])+ ';' +'\t'+str(i[7])
# print(temStr)
returnRules.append(temStr)
return returnRules
def ResultDFToSave(rules): #根据Qrange3关联分析生成的规则得到并返回对于的DataFrame数据结构的函数
returnRules = []
for i in rules:
temList = []
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr + str(j) + '&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr + str(j) + '&'
temStr = temStr[:-1]
temList.append(temStr); temList.append(i[2]); temList.append(i[3]); temList.append(i[4])
temList.append(i[5]); temList.append(i[6]); temList.append(i[7])
returnRules.append(temList)
return pd.DataFrame(returnRules,columns=('规则','项集出现数目','置信度','覆盖度','力度','提升度','利用度'))
if __name__ == '__main__':
supportRate = 0.004
confidenceRate = 0.6
itemsets = dict(oaf.frequent_itemsets(ryzd, supportRate))
rules = oaf.association_rules(itemsets, confidenceRate)
rules = list(rules)
regularNum = len(rules)
printRules = dealRules(rules)
result = list(oaf.rules_stats(rules, itemsets, len(ryzd))) #下面这个函数改变了rules,把rules用完了!
printResult = dealResult(result)
#################################################
# 下面将结果保存成excel格式的文件
dfToSave = ResultDFToSave(result)
dfToSave.to_excel(r'C:\Users\Administrator\Desktop\2.xlsx')
#######################################################
# 下面是根据不同置信度和关联度得到关联规则数目
listTable = []
supportRate = 0.001
confidenceRate = 0.1
for i in range(9):
support = supportRate*(i+1)
listS = []
for j in range(9):
confidence = confidenceRate*(j+1)
itemsets = dict(oaf.frequent_itemsets(ryzd, support))
rules = list(oaf.association_rules(itemsets, confidence))
listS.append(len(rules))
listTable.append(listS)
dfList = pd.DataFrame(listTable,index = [supportRate*(i+1) for i in range(9)],columns=[confidenceRate*(i+1) for i in range(9)])
dfList.to_excel(r'C:\\Users\Administrator\Desktop\outlunwen.xlsx')
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
de3c80a29cae8376ab53c57e9b03610ba50b9701 | fdafd2ef8a26a3e9ee6a4016ec6272516d64168f | /zeta_python/completed/2161.py | 15de788098d2c3241e90ba87cd8ff4e261a537c9 | [] | no_license | yenru0/CodeObjecct | 322d669d9e70b7202e5e527cda27da0b1e8f273d | b9d5260b973d7435c089c49bc8867be5d2be4d85 | refs/heads/master | 2021-06-28T06:13:57.978205 | 2021-03-13T00:47:53 | 2021-03-13T00:47:53 | 221,762,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from collections import deque
def solve(N):
queue = deque(range(1, N + 1))
cnt = 1
ret = []
while len(queue) != 1:
if cnt % 2 == 0:
queue.append(queue.popleft())
else:
ret.append(queue.popleft())
cnt += 1
cnt %= 2
ret.append(queue[0])
return " ".join(map(str, ret))
if __name__ == "__main__":
print(solve(int(input())))
| [
"yenru0604@gmail.com"
] | yenru0604@gmail.com |
797e0c3022cbfa099c71ee491210295c6c2e5f00 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YnsBcniRG9k77SSvA_10.py | 16e0a01c189baa0333bd56f0825414d0eee0fe66 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """
Imagine a school that kids attend for 6 years. In each year, there are five
groups started, marked with the letters _a, b, c, d, e_. For the first year,
the groups are _1a, 1b, 1c, 1d, 1e_ and for the last year, the groups are _6a,
6b, 6c, 6d, 6e_.
Write a function that returns the groups in the school by year (as a string),
separated with a comma and a space in the form of `"1a, 1b, 1c, 1d, 1e, 2a, 2b
(....) 5d, 5e, 6a, 6b, 6c, 6d, 6e"`.
### Examples
print_all_groups() ➞ "1a, 1b, 1c, 1d, 1e, 2a, 2b, 2c, 2d, 2e, 3a, 3b, 3c, 3d, 3e, 4a, 4b, 4c, 4d, 4e, 5a, 5b, 5c, 5d, 5e, 6a, 6b, 6c, 6d, 6e "
### Notes
Use nested "for" loops to achieve this, as well as the array of `["a", "b",
"c", "d", "e"]` groups.
"""
def print_all_groups():
lst = ["a", "b", "c", "d", "e"]
r = ''
for n in range(1, 7):
for ch in lst:
if ch == 'e' and n == 6:
break
r += str(n) + ch + ', '
r += '6e'
return r
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bfc4c70e5c4e5c20ff937ed96aa580ee5ad5d6f9 | 1c3ce4f21ce62ecfe5f0cfe1281ad912645b39bf | /genename2lrgref.py | 89174d67bb82b9aff523d8371876e93dfe7727e1 | [] | no_license | tz2614/softwarecarpentryworkshop | 6694d8d02ebbc71c74786e6ab477bdac716cdccf | c54301c7cc4ea890275f2a159322a4e5f0e39560 | refs/heads/master | 2021-08-08T03:19:18.534877 | 2017-11-09T12:44:08 | 2017-11-09T12:44:08 | 109,675,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | def genename2LRGref(gene):
#open and read LRGref list text file
f = open(lrgtext, "r")
#create a list of genenames, and a list of LRGrefs
genenames = []
LRGref_list = []
# iterate over the lines in the file
for line in f:
# split the line into a list of column values
if line startswith ("#"):
pass
if line startswith ("LRG_")
columns = line.split(' ')
# clean any whitespace off the items
columns = [col.strip() for col in columns]
# ensure the column has at least one value before printing
if columns:
print "first", columns[0] # print the first column
print "last", columns[-1] # print the last column
for line in fh:
line = line.rstrip()
#if line starts with ">", add it to seq_names.
if line.startswith(">"):
seq_names.append(line)
#print seq_names
# if seq_data is not empty, add the line to seq_data string
if seq_data:
seq_list.append(seq_data)
seq_data = ""
else:
seq_data += line
#for the last sequence, as there are no more ">" in the text, the sequence
#will not be appended to seq_data, hence this extra line need to be added.
seq_list.append(seq_data)
#check the seqs appended are ok
#print seq_list
#sort seqs according to length
seq_list = sorted(seq_list, key=len)
#print seq_list
#assign shortest seq to a variable
shortestk = seq_list[0]
print shortestk
kmers = []
str_len = len(shortestk)
# Iterate over kmer lengths
for kmer_len in range(1, str_len+1)[::-1]:
#print "Length", kmer_len
# Iterate over start position for that kmer length
for pos in range(0, (str_len-kmer_len)+1):
#print "Start position", pos
#assign the current kmer within shortestk to a variable
kmer = shortestk[pos:(pos+kmer_len)]
#append the kmer to the kmers list
kmers.append(kmer)
#sort the list of kmers according to length, while making it a unique list.
kmers = sorted(set(kmers), key=len)[::-1]
#print kmers
# search each kmer in each fasta sequences, in order, until one is found in every sequence.
# As kmers list start with the longest kmer, this should return the longest kmer within seqs.
for kmer in kmers:
#print "KMER", kmer
kmerfound = True
for seq in seq_list:
#print "SEQ", seq
if kmer not in seq:
kmerfound = False
break
if kmerfound:
print
print kmer
print
return kmer
textfile = "~/LRGproject/list_LRG_GRCh37.txt"
sharedmotif(fastafile)
| [
"tony_zheng35@hotmail.com"
] | tony_zheng35@hotmail.com |
ea4d3dd5fac2f2a29c107888f8bd6ad956851aad | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/services/types/campaign_audience_view_service.py | d277ef408ed541f9f6207c7ac87117215b11457c | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.services',
marshal='google.ads.googleads.v5',
manifest={
'GetCampaignAudienceViewRequest',
},
)
class GetCampaignAudienceViewRequest(proto.Message):
r"""Request message for
[CampaignAudienceViewService.GetCampaignAudienceView][google.ads.googleads.v5.services.CampaignAudienceViewService.GetCampaignAudienceView].
Attributes:
resource_name (str):
Required. The resource name of the campaign
audience view to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
c1b06abef4ce43d403bca1d6da1c63f136747d47 | 2b6715706ca85570e23188d7ffcb716e8e204f1b | /00Python代码/03Pentest_通过搜索引擎搜索关键字/enginesearchV2.0.py | af27d72c3cea7ef56fee73b6fcba746af7592aaf | [] | no_license | thinks520/CodeRecord | 3c2e9e11082ec305dc3352a2b4a034795f2b2182 | a9d32b9761de7a21030765a9f4ad41df94b88c63 | refs/heads/master | 2020-03-23T14:27:39.166999 | 2018-07-19T14:12:25 | 2018-07-19T14:12:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/usr/bin/python
#coding:utf-8
import requests
from lxml import etree
input=open('fangong.txt','r')
output1=open('result-10086.txt','w+')
for param in input:
baseurl='http://www.baidu.com/s?wd=inurl:10086.cn intext:"'+param+'"&rsv_spt=1&rsv_iqid=0xfbe8f0570001a8a4&issp=1&f=8&rsv_bp=0&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=1&rsv_sug3=1'
s=requests.get(baseurl)
r=s.content
html=etree.HTML(r)
result=html.xpath('//div[@class="nors"]/p')
try:
print result[0].text,param
except IndexError:
print baseurl
output1.writelines(baseurl+'\n'+'\n')
output1.close()
output2=open('result-12582.txt','w+')
for param in input:
baseurl='http://www.baidu.com/s?wd=inurl:12582.cn intext:"'+param+'"&rsv_spt=1&rsv_iqid=0xfbe8f0570001a8a4&issp=1&f=8&rsv_bp=0&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=1&rsv_sug3=1'
s=requests.get(baseurl)
r=s.content
html=etree.HTML(r)
result=html.xpath('//div[@class="nors"]/p')
try:
print result[0].text,param
except IndexError:
print baseurl
output2.writelines(baseurl+'\n'+'\n')
output2.close()
input.close() | [
"ljressrg@gmail.com"
] | ljressrg@gmail.com |
87e16b4002f252ff4e14773750a8d2a08e3f95b5 | 17ca5bae91148b5e155e18e6d758f77ab402046d | /M_BH_relation/read_MBH_form.py | 569af31bc6704cb0978eb63f565b592b46f0f811 | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,249 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 09:34:27 2018
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
f = open("fmos_MBH_table","r")
with f as g:
lines = g.readlines()
porp_list = lines[0].replace('#','').split(' ')
samples = [lines[i].split(' ') for i in range(1,len(lines))]
#for i in range(len(samples)):
ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'CDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','XID2138','XID2202',\
'XID2396', 'CID206', 'ECDFS-358',\
]
#==============================================================================
# ############ load the find the serial NO. for the list##########
#==============================================================================
ID_ser_dic = {}
#XID2202 to LID1622
#XID2138 to LID1820
#XID2396 to LID1878
#CDFS321 to ECDFS321
MB_ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'ECDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','LID1820','LID1622',\
'LID1878', 'CID206', 'ECDFS-358']
for j in range(len(ID)):
count = 0
for i in range(len(samples)):
if samples[i][1] == MB_ID[j]:
ID_ser_dic.update({ID[j]:i})
count += 1
if count == 0:
ID_ser_dic.update({ID[j]: -99})
##==============================================================================
## Print on the props on one sample
##==============================================================================
#tar_in = 3
#t_name = ID[tar_in]
#ser = ID_ser_dic[t_name]
#print "information for {0}".format(t_name)
#for i in range(len(samples[0])):
# print 'serial{0}'.format(i), porp_list[i], samples[ser][i]
#
##==============================================================================
## Comparing the Ha Hadr, Hb, Hbdr
##==============================================================================
#for target in ID:
# t_name = target
# if ID_ser_dic[t_name] != -99:
# ser = ID_ser_dic[t_name]
# print 'target, Ha Hadr, Hb, Hbdr', float(samples[ser][5])-float(samples[ser][6]), float(samples[ser][12])-float(samples[ser][13])
#
#for tar_in in range(len(ID)):
# #==============================================================================
# # test M_BH by Ha
# #6.71+0.48*(43.73806-42)+2.12*np.log10(4481.164/1000)
# #==============================================================================
# t_name = ID[tar_in]
# ser = ID_ser_dic[t_name]
# print "Cal MBH_Ha for {0}".format(t_name)
# if samples[ser][10] != 0:
# FWMH_a = float(samples[ser][8])
# logLHadr = float(samples[ser][6])
# cal_logMa = 6.71+0.48*(logLHadr-42)+2.12*np.log10(FWMH_a/1000)
# print float(cal_logMa) - float(samples[ser][10])
#
# #==============================================================================
# # test M_BH by Ha
# #6.71+0.48*(43.73806-42)+2.12*np.log10(4481.164/1000)
# #==============================================================================
# t_name = ID[tar_in]
# ser = ID_ser_dic[t_name]
# if samples[ser][21] != 0:
# FWMH_b = float(samples[ser][19])
# logL5100dr = float(samples[ser][16])
# cal_logMb = 6.91+0.5*(logL5100dr-44)+2.*np.log10(FWMH_b/1000)
# if float(samples[ser][21]) != 0:
# print "Cal MBH_Hb for {0}".format(t_name)
# print float(cal_logMb) - float(samples[ser][21])
diff = []
for i in range(len(samples)):
if float(samples[i][10]) != 0 and float(samples[i][21]) != 0:
print i
FWMH_a = float(samples[i][8])
logLHadr = float(samples[i][6])
cal_logMa = 6.71+0.48*(logLHadr-42)+2.12*np.log10(FWMH_a/1000)
FWMH_b = float(samples[i][19])
logL5100dr = float(samples[i][16])
cal_logMb = 6.91+0.5*(logL5100dr-44)+2.*np.log10(FWMH_b/1000)
diff.append(cal_logMa-cal_logMb)
print diff
diff = np.asarray(diff)
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
187862d60f6dd31623819405c6071193862cdcef | 08ab565444e52429c3ece47ee0bc014b0a04e08a | /backend/feedback/api/serializers.py | bdf744970ff552452828d7676fd6eabfa1b62c91 | [] | no_license | ScrollPage/Hackatom | 70bb2246df3c0cd7da51dbab941d3303a831f887 | 5bbeb5bf4936502f5d6d8e8b400f912583ab9d4e | refs/heads/main | 2023-03-20T05:06:07.505978 | 2021-03-15T16:58:42 | 2021-03-15T16:58:42 | 336,541,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from rest_framework import serializers
from feedback.models import Rating
class RatingSerializer(serializers.ModelSerializer):
'''Сериализация лайка'''
class Meta:
model = Rating
fields = ['star', 'initiative']
def create(self, validated_data):
rating, _ = Rating.objects.update_or_create(
appraiser=self.context['request'].user,
initiative=validated_data.get('initiative', None),
defaults={'star': validated_data.get('star')}
)
return rating | [
"54814200+reqww@users.noreply.github.com"
] | 54814200+reqww@users.noreply.github.com |
1644959f163792831f61fb27b876ea4ff1202e1b | 685f0f77fe85a15eb3835ff8cf457f5247e63205 | /backend/pickme_28419/wsgi.py | d6ee64c18c150f961af89cc9437ec158a87aba81 | [] | no_license | crowdbotics-apps/pickme-28419 | 7cdb7d226ea250fddbd93d0499e454ab6e889eab | 150334826c29a3ce8e05ef80442d8870303f22f2 | refs/heads/master | 2023-06-07T02:20:26.895149 | 2021-07-04T01:17:02 | 2021-07-04T01:17:02 | 382,738,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for pickme_28419 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pickme_28419.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7500c1826864a576d104db5f293a7f1ef125c122 | 31e90bec77ca264efd0867df1a8fceaa68e2749e | /chat/consumers.py | f141219d1ca17b646cdb312a39ab2b88e33646d5 | [] | no_license | safwanvk/ZabChat | c4621d45132b195bcd625647e04757e874730c98 | fb542062a1bd32e66d026e5cd0924ba166a50aec | refs/heads/master | 2022-11-19T20:32:23.201765 | 2020-07-17T15:22:01 | 2020-07-17T15:22:01 | 280,459,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
data = json.loads(text_data)
message = data['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
| [
"safwanvalakundil@gmail.com"
] | safwanvalakundil@gmail.com |
882c42cdf5dd921eca2da25a39ae8e01e16541c7 | 7df0845fdfb8597e2ed45b87a28fa61be9b63db7 | /0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py | b1f69f3fe68067a294fac544fef07864c61df95c | [] | no_license | yawzyag/holbertonschool-higher_level_programming | 0663903ea947b26e42b70892cd8ba8b1d6ef4af6 | 81036be0d13f22175f103f81fcddbf88308413c2 | refs/heads/master | 2020-05-18T02:06:39.595953 | 2019-09-26T21:59:14 | 2019-09-26T21:59:14 | 184,106,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | #!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from model_state import Base, State
from model_city import City
from sqlalchemy import desc, asc
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1],
sys.argv[2],
sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
result = session.query(State, City).join(City).all()
for row, l_ in result:
print("{}: ({}) {}".format(row.name, l_.id, l_.name))
session.close()
| [
"yesid.yag@gmail.com"
] | yesid.yag@gmail.com |
6f55109e0fb795a6e582f8b960aa89fa57ad4294 | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/ScenarioCharacterEmotionExcelTable.py | 2d37dc38d982bf713f5b0563564912d8e67edd9b | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ScenarioCharacterEmotionExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ScenarioCharacterEmotionExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsScenarioCharacterEmotionExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ScenarioCharacterEmotionExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ScenarioCharacterEmotionExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.ScenarioCharacterEmotionExcel import ScenarioCharacterEmotionExcel
obj = ScenarioCharacterEmotionExcel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ScenarioCharacterEmotionExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ScenarioCharacterEmotionExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def ScenarioCharacterEmotionExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def ScenarioCharacterEmotionExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ScenarioCharacterEmotionExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def ScenarioCharacterEmotionExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"rkolbe96@gmail.com"
] | rkolbe96@gmail.com |
9b7254d8e461320bcfbb1dcb7d9fe00fccd73a73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/54/usersdata/75/24223/submittedfiles/av1_p2_civil.py | 24b2b449ae48b2fda2bd494854cfe8e0fa7b3cee | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
from __future__ import division
def abs(x):
if x<0:
x=x*(-1)
return x
else:
return x
def maior(lista):
maior=lista[0]
for i in range (0,len(lista),1):
if lista[i]>maior:
maior=lista[i]
return maior
def menor(lista):
menor=lista[0]
for i in range (0,len(lista),1):
if lista[i]<menor:
menor=lista[i]
return menor
def altura(lista,altura):
soma=abs(maior(lista)-altura)+abs(menor(lista)-altura)
return soma
n=input('Digite a quantidade de pinos da fechadura:')
m=input('DIgite a altura para desbloqueio:')
a=[]
for i in range (0,n,1):
a.append(input('Digite a altura de cada pino:'))
print ('%1.d' %(altura(lista,m)))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d484f72f6efc4eb7626c7aacb752aea1c75a0017 | 76e931912629c37beedf7c9b112b53e7de5babd7 | /3-mouth04/总/project/day06/ddblog/topic/models.py | 167616ddd78fc3b638a7041c6b35f892c7c7bbd1 | [
"Apache-2.0"
] | permissive | gary-gggggg/gary | c59ac21d8e065f296ff986d11a0e4cbf186a1bc4 | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | refs/heads/main | 2023-02-23T06:54:34.500683 | 2021-02-01T10:17:02 | 2021-02-01T10:17:02 | 334,905,744 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from django.db import models
# Create your models here.
from user.models import UserProfile
class Topic(models.Model):
title = models.CharField('文章标题', max_length=50)
category = models.CharField('文章分类', max_length=20)
# public private
limit = models.CharField('文章权限', max_length=20)
introduce = models.CharField('文章简介', max_length=50)
content = models.TextField('文章内容')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
# 1对多的外键
user_profile = models.ForeignKey(UserProfile,
on_delete=models.CASCADE)
| [
"673248932@qq.com"
] | 673248932@qq.com |
191612a710061c4420bd155af34bf77bd96caf71 | 43aece1d354d6cfbacb22f30eae1d7ff1de83b09 | /moka/tests.py | 149ecb173d749b1edea6f1a87601cadd5598576a | [
"MIT"
] | permissive | harpiya/moka | 230342914b897fd373890e90497ca52acc8f7a96 | 220c4501dc37cc8db8213f2275a071d7b5bc3f69 | refs/heads/master | 2020-04-17T09:43:00.771430 | 2019-01-20T18:52:51 | 2019-01-20T18:52:51 | 166,470,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # @Author: Saadettin Yasir AKEL <developer>
# @Date: 2019-01-20T17:55:20+03:00
# @Email: yasir@harpiya.com
# @Project: Harpiya Kurumsal Yönetim Sistemi
# @Filename: tests.py
# @Last modified by: developer
# @Last modified time: 2019-01-20T18:33:16+03:00
# @License: MIT License. See license.txt
# @Copyright: Harpiya Yazılım Teknolojileri
from unittest import TestCase
from . import moka_checkout
import frappe
class TestMokaCheckout(TestCase):
def test_set_moka_checkout(self):
moka_checkout.set_moka_checkout(100, "USD")
self.assertEquals(frappe.local.response["type"], "redirect")
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
4b9bd5ab17684670b1321f414c01d15bf2d024c0 | 95363198c3af43c3cc74ef8131d556e566c75f7a | /diffusion_utils/utils/paths.py | acdad56e8e51202469e6d1b71805a9cb81a86b6c | [] | no_license | shinypond/multinomial_diffusion | 59fde847fd0afea3c4668d9481ea9e4e646be787 | 66f17340e4cd200059bff228cf98a597bf084c26 | refs/heads/main | 2023-08-03T15:23:47.578895 | 2021-09-11T12:10:15 | 2021-09-11T12:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import os
import sys
def add_parent_path(level=1):
script_path = os.path.realpath(sys.argv[0])
parent_dir = os.path.dirname(script_path)
for _ in range(level):
parent_dir = os.path.dirname(parent_dir)
sys.path.insert(0, parent_dir)
def add_parent_paths(levels=[1,2]):
for level in levels:
add_parent_path(level=level)
| [
"e.hoogeboom@gmail.com"
] | e.hoogeboom@gmail.com |
ba423c85f30eb13f5611de3d74f83b05a0a19409 | 7ef5bb39938e669b5571a097f01d96ee53458ad6 | /clone_graph/solution2.py | ef2aa47e3dce48a03fd9dbd320701e39f4c2005d | [
"BSD-2-Clause"
] | permissive | mahimadubey/leetcode-python | 61cd135515b26644197b4736a92a53bb1a5870a6 | 38acc65fa4315f86acb62874ca488620c5d77e17 | refs/heads/master | 2020-08-29T09:27:45.232412 | 2019-10-28T08:06:52 | 2019-10-28T08:06:52 | 217,993,547 | 0 | 0 | BSD-2-Clause | 2019-10-28T07:55:38 | 2019-10-28T07:55:38 | null | UTF-8 | Python | false | false | 1,272 | py | """
Clone an undirected graph. Each node in the graph contains a label and a list
of its neighbors.
"""
# Definition for a undirected graph node
# class UndirectedGraphNode(object):
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution(object):
def cloneGraph(self, node):
"""
:type node: UndirectedGraphNode
:rtype: UndirectedGraphNode
DFS
"""
if node is None:
return None
self.visited = set()
cloned_node = UndirectedGraphNode(node.label)
self.d = {node: cloned_node}
self.visit(node)
return self.d[node]
def visit(self, node):
if node not in self.visited:
self.visited.add(node)
cloned_node = self.d[node]
cloned_neighbors = []
for neighbor in node.neighbors:
if neighbor not in self.d:
cloned_neighbor = UndirectedGraphNode(neighbor.label)
self.d[neighbor] = cloned_neighbor
else:
cloned_neighbor = self.d[neighbor]
cloned_neighbors.append(cloned_neighbor)
self.visit(neighbor)
cloned_node.neighbors = cloned_neighbors
| [
"shichao.an@nyu.edu"
] | shichao.an@nyu.edu |
d022f4d82a5a6e613b95c709b5771d693fb92b4c | 9b02c05a71be741d8c33b59890a1fc9af51b3ba8 | /items_Log_to_Board.py | bb1b6b0ffa75920cf510403be43682fd93da452e | [] | no_license | craymaru/ultima-online-razor-enhanced-scripts | a524cfcfcf5ae0c780528a4cc95005a4817655f5 | 2190b6424480d035c1ea8adda97a956f66e1609d | refs/heads/master | 2022-12-10T09:21:43.664742 | 2022-11-27T15:17:11 | 2022-11-27T15:17:11 | 240,029,926 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | Misc.SendMessage("AXE?", 53)
axe_serial = Target.PromptTarget()
log_id = 0x1BDD
board_id = 0x1BD7
log_colors = {
0x0000: "Log",
0x07da: "Ork",
0x04a7: "Ash",
0x04a8: "Yew",
0x04a9: "Heartwood",
0x04aa: "Bloodwood",
0x047f: "Frostwood"
}
# DEFINES
def PutItemToBank(item_id, amount):
item = Items.FindByID(item_id, -1, Player.Backpack.Serial)
if item:
Items.Move(item, Player.Bank.Serial, amount)
Misc.Pause(700)
def GetItemFromBank(item_id, amount):
item = Items.FindByID(item_id, -1, Player.Bank.Serial)
if item:
Items.Move(item, Player.Backpack.Serial, amount)
Misc.Pause(500)
def LogToBoard(axe_serial):
for log_color in log_colors.keys():
item = Items.FindByID(log_id, log_color, Player.Backpack.Serial)
if item:
Target.Cancel()
Misc.Pause(50)
Misc.SendMessage(log_id)
Items.UseItem(axe_serial)
Target.WaitForTarget(1000, False)
Target.TargetExecute(item)
Misc.Pause(500)
Player.ChatSay(12, "bank")
while True:
LogToBoard(axe_serial)
PutItemToBank(board_id, 0)
GetItemFromBank(log_id, 200) | [
"craymaru@gmail.com"
] | craymaru@gmail.com |
6f8e46a58facc95761a795ca1509a66b63b362aa | 7d43ba52d958537905cfdde46cc194a97c45dc56 | /PL/Python/library/operating_system/standart_library/os.py | 41f4e75f18ffc7a0d2aa74c2dc8d78be54553b8e | [] | no_license | Koshmatova/workbook | 3e4d1f698a01f2be65c1abc83ee251ebc8a6bbcd | 902695e8e660689a1730c23790dbdc51737085c9 | refs/heads/master | 2023-05-01T02:30:46.868027 | 2021-05-10T03:49:08 | 2021-05-10T03:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,187 | py | os
#по сути запускает nt на win и posix На других ос
#СОДЕРЖИТ дескрипторные файлы
.name:'posix'|'nt'|'mac'|'os2'|'ce'|'java'
#имя ос
.environ:dict
#mutable словарь v окружения
#obj сопоставления(proxy?) с dict v пользовательской среды
#позволяет добавлять/удалять v окружения
.getenv(key, default=None)
#получение v окружения, в ОТЛ от .environ !>> exept при !СУЩ v
#~ os.environ.get("key")
.getlogin()
#Unix:username вошедшего в терминал
.getpid() -> current_process_pid
.uname()
#информация об ос
#>> obj ВКЛЮЧ attrs
.sysname
#os name
.nodename
#имя машины в сети
#?определяется реализацией
.release
#?релиз
.version
.machine
#?id машины
.access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True)
#проверка доступа текущего пользователя к obj
#?флаги(mode?)
os.F_OK
#obj СУЩ
os.R_OK
#obj доступен на чтение
os.W_OK
#obj доступен на запись
os.X_OK
#obj доступен на exe
.chdir(path)
#смена текущей dir
.chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
#ИЗМ прав доступа к obj
mode
#oct int
.chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True)
#Unix:ИЗМ id владельца & группы
.getcwd() -> current_working_dir
.link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True)
#создание жеской ссылки
.listdir(path=".")
#список файлов и dirs в директории
.makedirs()
#
.mkdir(path, mode=0o777, exist_ok=False)
#создание dir с созданием промежуточных директорий
.remove(path, *, dir_fd=None)
#?удаление файла
.rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
#переимонование obj
#без замены в случае СУЩ?
.renames(old, new)
#переименование с создание промежуточных dirs
#без замены в случае СУЩ?
.replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
#переимонование с принудительной заменой
.rmdir(path, *, dir_fd=None)
#удаление пустой dir
.removedirs(path)
#рекурсивно(?{xn}) удаляет директории в пути от последней пока они пусты
.putenv()
#
.symlink(source, link_name, target_is_directory=False, *, dir_fd=None)
#создать симлинк
.startfile()
#
.sync()
#Unix:запись ВСЕХ данных на диск
.truncate(path, length)
#обрезать файл до length
.utime(path, times=None, *, ns=None, dir_fd=None, follow_symlink=True)
#ИЗМ времени последнего доступа/ИЗМ файла
times:(access:secs, mod:secs)
#исключает ns
ns:(access:nsecs, mod:nsecs)
#исключает times
.popen(cmd,mode='r',bufferung=-1)
#запуск команд оболочки и(в отличие от .system) предоставляет интерфейс(подобный файлам) для чтения/записи в их стандартные потоки данных в отдельном процессе,подключается к потокам вывода программы
#похож на fileobj но значетельно отличается интерфейсами
f = os.popen('dir')
f >> <os._wrap_close obj ...>
f.readline() >> ' ’®¬ ў гбва®©б⢥ C \xadҐ Ё¬ҐҐв ¬ҐвЄЁ.\n'
bytes(f.readline(),'866') >> build\n #Не сработает для кириллицы
#or
os.popen('chcp') >> 866
os.popen('chcp 65001')
os.popen('chcp') >> 65001 #кодировка сохраняется для одного процесса
#как это закрепить?
#2.X кажется имеет свой итератор
f = os.popen()
f is iter(f) >> True
f.next() >> #работает
next(f) >> #работает
#3.x не имеет своего итератора, но умеет next
f = os.popen('dir')
f >> <os._wrap_close obj ...>
f.__next__() >> #работает
next(f) >> TypeErr: '_wrap_close' obj is not an iterator
f is iter(f) >> False #действительно не итератор
#истощается ?
f = os.popen('chcp')
f.readline() >> 'Active character encoding 866'
f.readline() >> ''
f.seek(0) >> ''
os.popen('systeminfo') >> запускает консоль(внутри py.exe),но сжирает весь вывод - результат не отображается без ручного вывода
#выполняет команду, возвращает <os._wrap_close obj> и продолжает выполнение в отличие от os.system
#может использоваться для
чтения результатов сконструированной командной строки фиксирующей продолжительнсть выполнения кода(timeit?)(лутц:глава 21)
сравнение выводов тестируемых сценариев(лутц: глава 25)
.open() -> int #descriptor
#open a file a low level io
.walk(top, topdown=True, onerror=None, followlinks=False) -> <generator object walk>
#генерация имен файлов дерева каталогов
topdown
topdown=True
#генерация сверху вниз
#для КАЖД каталога >> (path, dirs_list, files_list)
#старндартная генераторная(yield+yield from) fx рекурсивного прохода по каталогам -> не требует ожидания обхода всего дерева, а выдает результаты по мере поступления
#возвращает кортеж кортежей вида
(('dir_name',(contain_dir,...),[files,...]),...)
#на КАЖД уровне дерева каталогов
.system(command) -> int
#ПРИНАДЛЕЖ nt
#запуск команды оболочки в отдельном процессе, ждет завершения процесса прежде чем возобновить поток выполнения
#возвращает 0 в случае успеха, 1 в случае неудачи, например отсутсвие команды;не возвращает вывод и не открывает консольные окна
os.system('notepad') >> открывает notepad и ждет
os.system('cmd') >> idle:ждет;из файла - запускает cmd
os.system('dir') >> 0
.urandom(n)
#>> n случайных байт
#исп в криптографических целях
.path
#модуль
#работа с путями | [
"mkone112@gmail.com"
] | mkone112@gmail.com |
44bc5eb8810a57ea9c8b0764fe032720ae388e02 | ce972e94fcdf19d6809d94c2a73595233d1f741d | /catkin_ws/devel/lib/python3/dist-packages/tf2_msgs/msg/_LookupTransformActionGoal.py | 6b1aed424005e57d0c94295a80e81141492594c7 | [] | no_license | WilliamZipanHe/reward_shaping_ttr | cfa0e26579f31837c61af3e09621b4dad7eaaba2 | df56cc0153147bb067bc3a0eee0e1e4e1044407f | refs/heads/master | 2022-02-23T05:02:00.120626 | 2019-08-07T21:52:50 | 2019-08-07T21:52:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | /local-scratch/xlv/catkin_ws/devel/.private/tf2_msgs/lib/python3/dist-packages/tf2_msgs/msg/_LookupTransformActionGoal.py | [
"xlv@cs-mars-01.cmpt.sfu.ca"
] | xlv@cs-mars-01.cmpt.sfu.ca |
216375b04dadfc2f8857cb1a6300c1c69a16f350 | c61a28aba19f7cdf9a5127e8a782bf115c265e70 | /env/bin/csscapture | 84fbd44daf929a1a716e351d19990cf1389614cf | [] | no_license | sharmilaviji/RecruitPRO-NEW | fa72c8fc00f469a41798b1047c11dcc470fbc495 | dcfaedebe56b45acd6ddcab7e24c939b853a2c8c | refs/heads/master | 2021-05-26T12:14:12.611154 | 2020-04-27T04:40:50 | 2020-04-27T04:40:50 | 254,125,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/sharmila/frappe-bench/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from cssutils.scripts.csscapture import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sharmiviji1997@gmail.com"
] | sharmiviji1997@gmail.com | |
09e0e639e78b5d589ce1b29c07d10a0f7a616734 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/index/views_20210226000505.py | c8c780a29137164e7ee593f6beb0b9fcf9216d35 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from django.shortcuts import render
from .model import AboutSite
def home(request):
aboutdata=AboutSite.objects.all()
context=[
'about'=
]
return render(request,"index.html")
def blog(request):
return render(request,"blog.html")
| [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
11b059a875363c44c367c680d055a178be4e0f45 | 448d028fb4b4703f0be32e0e6780a389c13b3914 | /semaphore_example.py | a95d141ccab4b526b728c7a8687c06b03c191811 | [] | no_license | katryo/python-threading | 9453dd377d7ab4aebbdb3bc9c222b4fe4d3cf156 | f65ebe725f86463f2e97d9426d233257c91518cd | refs/heads/master | 2020-04-21T22:48:41.737864 | 2019-02-09T23:33:53 | 2019-02-09T23:33:53 | 169,925,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from threading import Semaphore, Thread
from datetime import datetime
from time import sleep
class Runner:
def run(self, sem):
for _ in range(3):
with sem:
print(datetime.now())
sleep(1)
if __name__ == '__main__':
sem = Semaphore(2)
runner = Runner()
thread_a = Thread(target=runner.run, args=(sem,))
thread_b = Thread(target=runner.run, args=(sem,))
thread_c = Thread(target=runner.run, args=(sem,))
thread_a.start()
thread_b.start()
thread_c.start()
| [
"katoryo55@gmail.com"
] | katoryo55@gmail.com |
b0766213ff36f4e3e57f2f14e1eac7fa6f65c9c3 | 13c14be20f16ffc14b7cde71ed8c4179e2410a0b | /algorithms/lisa-workbook.py | ac24edf42812ac4ec504b5bb62318e9c230d6018 | [] | no_license | gautamits/hackerrank | 79688e5735a27eed032ce0c34f4fe253cfb6b572 | aee6b00f4cd39c18e9107e933cceb55b9677c3c7 | refs/heads/master | 2020-05-21T22:55:50.977437 | 2018-12-11T05:09:36 | 2018-12-11T05:09:36 | 61,579,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from itertools import count, zip_longest
# Complete the workbook function below.
def workbook(n, k, arr):
page=count(1)
return sum([len([1 for probs in zip_longest(*[iter(range(1, num_chpt_probs+1))]*k) if next(page) in probs]) for num_chpt_probs in arr])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = workbook(n, k, arr)
fptr.write(str(result) + '\n')
fptr.close()
| [
"gautamamits95@gmail.com"
] | gautamamits95@gmail.com |
b89e6a9365194978cfa8663dd987ce19fe88410a | 72012dc3877b16b25f43cd62df1fc081c8f9299d | /my_site/views.py | 891f45eebcd96f9059c7081e4bfc31f47c753146 | [] | no_license | volitilov/wf_v2 | e12962bf1fbf3b6e73bd67bcccffc4e218575e5b | 1c2f585926f8258b208ad52f7ffa40576b4b37e2 | refs/heads/master | 2021-01-21T17:37:35.465356 | 2017-05-28T17:53:38 | 2017-05-28T17:53:38 | 91,968,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from django.template.context_processors import csrf
from django.shortcuts import redirect, render, get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Mesage, PortfolioItem
from .forms import ContactsForm
# Home page ::::::::::::::::::::::::::::::::::::::::::::::
def home(request):
data = {
'works': PortfolioItem.objects.all()[:3],
}
return render(request, 'pages/index.html', data)
# About page :::::::::::::::::::::::::::::::::::::::::::::
def about(request):
return render(request, 'pages/about.html', {})
# Portfolio page :::::::::::::::::::::::::::::::::::::::::
def portfolio(request):
data = {
'works': PortfolioItem.objects.all(),
}
return render(request, 'pages/portfolio.html', data)
# Info page ::::::::::::::::::::::::::::::::::::::::::::::
def info(request):
return render(request, 'pages/info.html', {})
# Services page ::::::::::::::::::::::::::::::::::::::::::
def services(request):
return render(request, 'pages/services.html', {})
# Contacts page ::::::::::::::::::::::::::::::::::::::::::
def contacts(request):
args = {}
args.update(csrf(request))
args['form'] = ContactsForm
if request.POST:
form = ContactsForm(request.POST)
if form.is_valid():
form.save()
import pdb; pdb.set_trace()
return render(request, 'pages/feedback.html', { 'name': form.cleaned_data['name'] })
else:
form = ContactsForm()
return render(request, 'pages/contacts.html', args)
# Work page ::::::::::::::::::::::::::::::::::::::::::::::
def work(request, pk):
# work_list = PortfolioItem.objects.all()
# paginator = Paginator(work_list, 1)
# page = request.GET.get('item')
# try:
# work = paginator.page(page)
# except PageNotAnInteger:
# work = paginator.page(1)
# except EmptyPage:
# work = paginator.page(paginator.num_pages)
works = PortfolioItem.objects.all()
work = get_object_or_404(PortfolioItem, pk=pk)
data = {
'works': works,
'work': work
}
return render(request, 'pages/work.html', data)
| [
"volitilov@gmail.com"
] | volitilov@gmail.com |
381fbd0dd22da0e55005395e2daaaf7acec16583 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/compare.py | e71b1bafdc55d4db015a2835ce578c908a0db98f | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 3,254 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import functools
from typing import List, Mapping, Optional
import dpath.exceptions
import dpath.util
import icdiff
import py
from pprintpp import pformat
MAX_COLS = py.io.TerminalWriter().fullwidth
MARGIN_LEFT = 20
GUTTER = 3
MARGINS = MARGIN_LEFT + GUTTER + 1
def diff_dicts(left, right, use_markup) -> Optional[List[str]]:
half_cols = MAX_COLS / 2 - MARGINS
pretty_left = pformat(left, indent=1, width=half_cols).splitlines()
pretty_right = pformat(right, indent=1, width=half_cols).splitlines()
diff_cols = MAX_COLS - MARGINS
if len(pretty_left) < 3 or len(pretty_right) < 3:
# avoid small diffs far apart by smooshing them up to the left
smallest_left = pformat(left, indent=2, width=1).splitlines()
smallest_right = pformat(right, indent=2, width=1).splitlines()
max_side = max(len(line) + 1 for line in smallest_left + smallest_right)
if (max_side * 2 + MARGIN_LEFT) < MAX_COLS:
diff_cols = max_side * 2 + GUTTER
pretty_left = pformat(left, indent=2, width=max_side).splitlines()
pretty_right = pformat(right, indent=2, width=max_side).splitlines()
differ = icdiff.ConsoleDiff(cols=diff_cols, tabsize=2)
if not use_markup:
# colorization is disabled in Pytest - either due to the terminal not
# supporting it or the user disabling it. We should obey, but there is
# no option in icdiff to disable it, so we replace its colorization
# function with a no-op
differ.colorize = lambda string: string
color_off = ""
else:
color_off = icdiff.color_codes["none"]
icdiff_lines = list(differ.make_table(pretty_left, pretty_right, context=True))
return ["equals failed"] + [color_off + line for line in icdiff_lines]
@functools.total_ordering
class HashMixin:
@staticmethod
def get_hash(obj):
if isinstance(obj, Mapping):
return hash(str({k: (HashMixin.get_hash(v)) for k, v in sorted(obj.items())}))
if isinstance(obj, List):
return hash(str(sorted([HashMixin.get_hash(v) for v in obj])))
return hash(obj)
def __hash__(self):
return HashMixin.get_hash(self)
def __lt__(self, other):
return hash(self) < hash(other)
def __eq__(self, other):
return hash(self) == hash(other)
class DictWithHashMixin(HashMixin, dict):
pass
class ListWithHashMixin(HashMixin, list):
pass
def delete_fields(obj: Mapping, path_list: List[str]) -> None:
for path in path_list:
try:
dpath.util.delete(obj, path)
except dpath.exceptions.PathNotFound:
pass
def make_hashable(obj, exclude_fields: List[str] = None) -> str:
"""
Simplify comparison of nested dicts/lists
:param obj value for comparison
:param exclude_fields if value is Mapping, some fields can be excluded
"""
if isinstance(obj, Mapping):
# If value is Mapping, some fields can be excluded
if exclude_fields:
delete_fields(obj, exclude_fields)
return DictWithHashMixin(obj)
if isinstance(obj, List):
return ListWithHashMixin(obj)
return obj
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
6b910e42492be063380898c263fff9db242597d7 | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /people/migrations/0185_auto_20211228_1012.py | f7d8a97b37ffb72adfa1415ed169341a3c45484d | [] | no_license | drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | Python | UTF-8 | Python | false | false | 2,187 | py | # Generated by Django 3.1.13 on 2021-12-28 10:12
from django.db import migrations, models
import django.db.models.deletion
import people.django_extensions
class Migration(migrations.Migration):
dependencies = [
('people', '0184_survey_question_survey_question_type'),
]
operations = [
migrations.AlterModelOptions(
name='survey_question',
options={'ordering': ['survey_section__survey__name', 'survey_section__name', 'number'], 'verbose_name_plural': 'survey questions'},
),
migrations.CreateModel(
name='Survey_Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.person')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey')),
],
options={
'verbose_name_plural': 'survey submissions',
'ordering': ['-date'],
},
bases=(people.django_extensions.DataAccessMixin, models.Model),
),
migrations.CreateModel(
name='Survey_Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('range_answer', models.IntegerField(default=0)),
('text_answer', models.CharField(blank=True, default='', max_length=500)),
('survey_question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey_question')),
('survey_submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey_submission')),
],
options={
'verbose_name_plural': 'survey answers',
'ordering': ['-survey_submission__date', '-survey_question__number'],
},
bases=(people.django_extensions.DataAccessMixin, models.Model),
),
]
| [
"dkoysta@gmail.com"
] | dkoysta@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.