hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
762e327b70f7eced22d60d967a62c8b851af8650
| 2,546
|
py
|
Python
|
tensorflow/python/eager/executor.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | 2
|
2016-09-27T05:37:33.000Z
|
2019-11-22T06:41:12.000Z
|
tensorflow/python/eager/executor.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/eager/executor.py
|
Stevanus-Christian/tensorflow
|
d44afcf5ca16c5d704c66f891b99eac804e7cd14
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executor for eager execution."""
from tensorflow.python import pywrap_tfe
class Executor(object):
"""A class for handling eager execution.
The default behavior for asynchronous execution is to serialize all ops on
a single thread. Having different `Executor` objects in different threads
enables executing ops asynchronously in parallel:
```python
def thread_function():
executor = executor.Executor(enable_async=True):
context.set_executor(executor)
a = threading.Thread(target=thread_function)
a.start()
b = threading.Thread(target=thread_function)
b.start()
```
"""
__slots__ = ["_handle"]
def __init__(self, handle):
self._handle = handle
def __del__(self):
try:
self.wait()
pywrap_tfe.TFE_DeleteExecutor(self._handle)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the pywrap module
# already being unloaded, self._handle. no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
def is_async(self):
return pywrap_tfe.TFE_ExecutorIsAsync(self._handle)
def handle(self):
return self._handle
def wait(self):
"""Waits for ops dispatched in this executor to finish."""
pywrap_tfe.TFE_ExecutorWaitForAllPendingNodes(self._handle)
def clear_error(self):
"""Clears errors raised in this executor during execution."""
pywrap_tfe.TFE_ExecutorClearError(self._handle)
def new_executor(enable_async, enable_streaming_enqueue=True):
handle = pywrap_tfe.TFE_NewExecutor(enable_async, enable_streaming_enqueue)
return Executor(handle)
| 33.946667
| 80
| 0.718382
|
ea832bd531f29f83f740c0aed0eca94336f0a72c
| 110
|
py
|
Python
|
docs/sections/section3/notebook/solutions/sol4.py
|
lingcog/2019-CS109A
|
f1eaa62976fe989c3ad3f3ab4b8dd5d71574a2c3
|
[
"MIT"
] | 442
|
2019-06-11T06:47:00.000Z
|
2022-03-12T11:19:31.000Z
|
docs/sections/section3/notebook/solutions/sol4.py
|
lelandroberts97/2019-CS109A
|
976da6b65c26fd3c5db285cbf9ec9cde92751a70
|
[
"MIT"
] | 3
|
2019-09-23T17:32:51.000Z
|
2022-02-09T06:06:00.000Z
|
docs/sections/section3/notebook/solutions/sol4.py
|
lelandroberts97/2019-CS109A
|
976da6b65c26fd3c5db285cbf9ec9cde92751a70
|
[
"MIT"
] | 486
|
2019-06-17T05:01:07.000Z
|
2022-03-13T20:30:44.000Z
|
titanic_train['sex_male'] = (titanic_train.sex == 'male').astype(int)
titanic_train['sex_male'].value_counts()
| 55
| 69
| 0.754545
|
73ef57943fd9da84a30816aee2942ad11cf887d5
| 6,378
|
py
|
Python
|
app/main.py
|
jvanelteren/ds
|
45bab16fda74bc92180a6f6576f08fdd9aa96528
|
[
"MIT"
] | null | null | null |
app/main.py
|
jvanelteren/ds
|
45bab16fda74bc92180a6f6576f08fdd9aa96528
|
[
"MIT"
] | null | null | null |
app/main.py
|
jvanelteren/ds
|
45bab16fda74bc92180a6f6576f08fdd9aa96528
|
[
"MIT"
] | null | null | null |
# %%
from typing import List
from fastapi import FastAPI, Request
from pydantic import BaseModel
import logging
import pickle
import random
from itertools import cycle
# this is absolutely essential to get rid of these *** cors errors
from fastapi.middleware.cors import CORSMiddleware
from fastapi import FastAPI, File, UploadFile, HTTPException, status
import numpy as np
from PIL import Image, ImageOps
from io import BytesIO
import time
from datetime import datetime
import os
import app.db.database as db
import torchvision
from torchvision import transforms
import torch
from torch import nn
# to be able to run locally and in the cloud
if os.getcwd() == '/ds/app':
os.chdir('/ds')
app = FastAPI()
# to solve cors errors
# you can specify allowed as a list of ip addresses, or just allow everything with ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# the log level needs to be set here and not in the terminal
logger = logging.getLogger("uvicorn")
logger.setLevel(logging.DEBUG)
class AgeResnet(nn.Module):
def __init__(self, size='18', feat_extract=False):
super().__init__()
resnet = 'torchvision.models.resnet'+size+'(pretrained=True)'
resnet = eval(resnet)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
if feat_extract:
# with feature extraction we only train the linear layer and keep the resnet parameters fixed
for m in self.modules():
m.requires_grad_(False)
self.fc = nn.Linear(in_features=512, out_features=1, bias=True)
nn.init.kaiming_normal_(self.fc.weight)
def forward(self, x):
out = self.resnet(x)
x = torch.flatten(out, 1)
return self.fc(x)
def img_to_reshaped_normalized_tensor(img, pad=False, crop=False):
# makes a tensor, scales range to 0-1 and normalizes to same as imagenet
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
resize = transforms.Resize((200, 200), interpolation=0)
if pad:
w, h = img.size
delta_w = max((h, w)) - w
delta_h = max((h, w)) - h
padding = (delta_w//2, delta_h//2, delta_w -
(delta_w//2), delta_h-(delta_h//2))
img = ImageOps.expand(img, padding)
if crop:
img = ImageOps.fit(img, size=(200, 200), method=5,
bleed=0.0, centering=(0.5, 0.5))
img = resize(img)
img = transforms.functional.pil_to_tensor(img)
img = normalize(img.float()/255)
return img
def gen_img_ids():
img_ids = list(range(len(df)))
random.shuffle(img_ids)
return cycle(img_ids)
def next_batch(gen, n):
return [next(gen) for _ in range(n)]
class Ages(BaseModel):
age: List[int] = []
faceids: List[str] = []
actual: List[int] = []
comp: List[int] = []
@app.get("/backend/get_images/")
async def return_images():
batch_info_df = df.loc[next_batch(img_batch_gen, 6)]
# this was a placeholder for images, but can be approached locally
faces = list(range(1, 7))
faceids = ['../../'+str(f) for f in batch_info_df['path']]
computer = list(batch_info_df['pred'])
actual = list(batch_info_df['actual'])
return {'faces': faces,
'faceids': faceids,
'computer': computer,
'actual': actual
}
@app.post("/backend/submit_preds/") # use post since server receives
async def submit_preds(ages: Ages, request: Request):
# this is the way to use the pydantic base model
ip = request.headers['X-Real-IP'] if 'X-Real-IP' in request.headers else 'unknown'
batch_size = len(ages.age)
# save ages to database
if ages.age and ages.faceids:
for i in range(len(ages.age)):
if abs(int(ages.age[i])-int(ages.actual[i])) < 20:
# only add when error is < 20
db.create_pred(conn, [ip, ages.faceids[i], ages.age[i], ages.actual[i], abs(int(
ages.age[i])-int(ages.actual[i])), ages.comp[i], abs(int(ages.comp[i])-int(ages.actual[i]))])
print('added', [ip, ages.faceids[i], ages.age[i], ages.actual[i], abs(int(
ages.age[i])-int(ages.actual[i])), ages.comp[i], abs(int(ages.comp[i])-int(ages.actual[i]))])
return {'items_db': str(db.count_predictions(conn)),
'mae_human': str(round(db.human_mae(conn), 1)),
'mae_comp': str(round(db.comp_mae(conn), 1))}
@app.post("/backend/upload/")
async def create_file(file: bytes = File(...)):
#
try:
# transforms.functional.pil_to_tensor
pil_image = ((Image.open(BytesIO(file))))
print(pil_image.size)
except:
return {"status": 'failed processing image'}
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail="Unable to process file"
)
# print(np.array(pil_image).shape)
pred = model(img_to_reshaped_normalized_tensor(pil_image)[None])
# from pathlib import Path
# path = Path('app/uploads/')
# pil_image.save(path/(str(time.time())+'.png'),"PNG")
# todo resizing, normalizing and running it through a model and returning the prediction
db.add_upload(conn2)
return {"status": str(round(pred.item()))}
@app.get("/backend/stats/")
async def send_stats():
return {"num_uploads": db.count_uploads(conn2),
"num_predictions": db.count_predictions(conn)}
#%%
if os.name == 'nt':
f = open("app/models/windows_predictions.pickle", "rb")
else:
f = open("app/models/predictions.pickle", "rb")
df = pickle.load(f)
logger.debug(f'number of items in dataset {len(df)}')
img_batch_gen = gen_img_ids()
model = AgeResnet()
model.load_state_dict(torch.load('app/models/model4.18',
map_location=torch.device('cpu')))
model.eval()
conn = db.open_db('app/db/predictions.db')
conn2 = db.open_upload('app/db/uploads.db')
num_uploads = db.count_uploads(conn2)
items_db = db.count_predictions(conn)
if not items_db:
items_db = 0
mae_human = db.human_mae(conn)
mae_comp = round(df['loss'].mean(), 1)
print(f"{items_db} items in database, mae human {mae_human}, mae_comp {mae_comp}")
print('started')
| 31.89
| 113
| 0.640326
|
1cf8e86d5990a3a069780102f01640271727535c
| 1,468
|
py
|
Python
|
LINETCR/Api/Poll.py
|
Kaneki711/Skyteam
|
e3203a218780eee2a7820866d59b014e04c0a975
|
[
"MIT"
] | 4
|
2018-02-24T19:03:26.000Z
|
2020-03-28T13:22:25.000Z
|
LINETCR/Api/Poll.py
|
Kaneki711/Skyteam
|
e3203a218780eee2a7820866d59b014e04c0a975
|
[
"MIT"
] | null | null | null |
LINETCR/Api/Poll.py
|
Kaneki711/Skyteam
|
e3203a218780eee2a7820866d59b014e04c0a975
|
[
"MIT"
] | 1
|
2020-03-28T13:22:28.000Z
|
2020-03-28T13:22:28.000Z
|
import os, sys, time
path = os.path.join(os.path.dirname(__file__), '../lib/')
sys.path.insert(0, path)
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from curve import LineService
from curve.ttypes import *
class Poll:
client = None
auth_query_path = "/api/v4/TalkService.do";
http_query_path = "/S4";
polling_path = "/P4";
host = "gd2.line.naver.jp";
port = 443;
UA = "Line/7.18.0"
LA = "DESKTOPMAC\t5.3.3-YOSEMITE-x64\tMAC\t10.12.0"
rev = 0
def __init__(self, authToken):
self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+ self.http_query_path)
self.transport.setCustomHeaders({
"User-Agent" : self.UA,
"X-Line-Application" : self.LA,
"X-Line-Access": authToken
});
self.protocol = TCompactProtocol.TCompactProtocol(self.transport);
self.client = LineService.Client(self.protocol)
self.rev = self.client.getLastOpRevision()
self.transport.path = self.polling_path
self.transport.open()
def stream(self, sleep=50000):
#usleep = lambda x: time.sleep(x/1000000.0)
while True:
try:
Ops = self.client.fetchOps(self.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(self.rev))
for Op in Ops:
# print Op.type
if (Op.type != OpType.END_OF_OPERATION):
self.rev = max(self.rev, Op.revision)
return Op
#usleep(sleep)
| 27.185185
| 99
| 0.662125
|
851ec94c887dba62d5c713b3d3a4b1c95240c925
| 1,010
|
py
|
Python
|
isi_sdk_8_1_1/test/test_event_channel_extended_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/test/test_event_channel_extended_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/test/test_event_channel_extended_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.event_channel_extended_extended import EventChannelExtendedExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestEventChannelExtendedExtended(unittest.TestCase):
"""EventChannelExtendedExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEventChannelExtendedExtended(self):
"""Test EventChannelExtendedExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.event_channel_extended_extended.EventChannelExtendedExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.634146
| 115
| 0.736634
|
e4c1e3083842d5032b3ca8a85748dd08d84728ce
| 8,505
|
py
|
Python
|
setup.py
|
macdaliot/stix-shifter
|
4f2850269cec213fd31a27c36bb4df45ed693087
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
macdaliot/stix-shifter
|
4f2850269cec213fd31a27c36bb4df45ed693087
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
macdaliot/stix-shifter
|
4f2850269cec213fd31a27c36bb4df45ed693087
|
[
"Apache-2.0"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='stix_shifter', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Tools and interface to translate STIX formatted results and queries to different data source formats and to set up appropriate connection strings for invoking and triggering actions in openwhisk', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/IBM/stix-shifter', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='ibm', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# # How mature is this project? Common values are
# # 3 - Alpha
# # 4 - Beta
# # 5 - Production/Stable
# 'Development Status :: 3 - Alpha',
# # Indicate who your project is intended for
# 'Intended Audience :: Developers',
# 'Topic :: Software Development :: Build Tools',
# # Pick your license as you wish
'License :: OSI Approved :: MIT License',
# # Specify the Python versions you support here. In particular, ensure
# # that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='datasource stix translate transform transmit', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['stix2-patterns>=1.1.0', 'stix2-validator>=0.5.0',
'antlr4-python3-runtime==4.7', 'python-dateutil>=2.7.3', 'xmltodict>=0.11.0'
# ,'stix2-matcher@https://github.com/oasis-open/cti-pattern-matcher/archive/v0.1.0.zip#egg=stix2-matcher' # uncomment when running setup.py locally
], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['*.json'])], # Optional
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Source': 'https://github.com/IBM/stix-shifter',
},
)
| 42.954545
| 225
| 0.675367
|
4376fbfd72ad7fa3de5129db0df9d5ef4e18aa58
| 15,861
|
py
|
Python
|
capsulenet-onehot-opt.py
|
lauromoraes/CapsNet-promoter
|
9b08912648ff5d58a11ebb42225d9ad9851c61ac
|
[
"MIT"
] | 2
|
2021-11-08T16:21:56.000Z
|
2022-03-07T01:49:26.000Z
|
capsulenet-onehot-opt.py
|
lauromoraes/CapsNet-promoter
|
9b08912648ff5d58a11ebb42225d9ad9851c61ac
|
[
"MIT"
] | null | null | null |
capsulenet-onehot-opt.py
|
lauromoraes/CapsNet-promoter
|
9b08912648ff5d58a11ebb42225d9ad9851c61ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Keras implementation of CapsNet in Hinton's paper Dynamic Routing Between Capsules.
Usage:
python CapsNet.py
python CapsNet.py --epochs 100
python CapsNet.py --epochs 100 --num_routing 3
... ...
"""
import os
import numpy as np
import pandas as pd
np.random.seed(1337)
from keras import layers, models, optimizers
from keras import backend as K
from capsulelayers2 import CapsuleLayer, PrimaryCap, Length, Mask
from keras.preprocessing import sequence
from keras.utils.vis_utils import plot_model
from sklearn.model_selection import StratifiedShuffleSplit
from metrics import margin_loss
headers = ['partition','mcc','f1','sn','sp','acc','prec','tp','fp','tn', 'fn']
results = {'partition':[],'mcc':[],'f1':[],'sn':[],'sp':[],'acc':[],'prec':[],'tp':[],'fp':[],'tn':[],'fn':[]}
max_features = 79
maxlen = 16
def CapsNet(input_shape, n_class, num_routing):
from keras import layers, models
from capsulelayers2 import CapsuleLayer, PrimaryCap, Length, Mask
from keras.preprocessing import sequence
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 4d, [None, width, height, channels]
:param n_class: number of classes
:param num_routing: number of routing iterations
:return: A Keras Model with 2 inputs and 2 outputs
"""
x = layers.Input(shape=input_shape)
# conv1 = layers.Conv1D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(embed)
conv1 = layers.Conv2D(filters=128, kernel_size=(4,11), strides=(1,1), padding='valid', activation='relu', name='conv1')(x)
# conv1 = layers.Dropout(0.1)(conv1)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_vector]
# primarycaps = PrimaryCap(conv1, dim_vector=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
primarycaps = PrimaryCap(conv1, dim_vector=2, n_channels=16, kernel_size=(1,11), strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
# digitcaps = CapsuleLayer(num_capsule=n_class, dim_vector=16, num_routing=num_routing, name='digitcaps')(primarycaps)
digitcaps = CapsuleLayer(num_capsule=n_class, dim_vector=16, num_routing=1, name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='out_caps')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
# # Decoder network.
# y = layers.Input(shape=(n_class,))
# masked = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer.
# # x_recon = layers.Dense(512, activation='relu')(masked)
# # x_recon = layers.Dense(1024, activation='relu')(x_recon)
# x_recon = layers.Dense(512, activation='relu')(masked)
# x_recon = layers.Dense(1024, activation='relu')(x_recon)
# # x_recon = layers.Dropout(0.3)(x_recon)
# x_recon = layers.Dense(np.prod(input_shape), activation='sigmoid')(x_recon)
# x_recon = layers.Reshape(target_shape=input_shape, name='out_recon')(x_recon)
# # two-input-two-output keras Model
# return models.Model([x, y], [out_caps, x_recon])
def get_calls():
from keras import callbacks as C
import math
cycles = 50
calls = list()
calls.append( C.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0) )
calls.append( C.CSVLogger(args.save_dir + '/log.csv') )
calls.append( C.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs/{}'.format(actual_partition), batch_size=args.batch, histogram_freq=args.debug) )
calls.append( C.EarlyStopping(monitor='val_loss', patience=args.patience, verbose=0) )
# calls.append( C.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=0.0001, verbose=0) )
calls.append( C.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch)) )
# calls.append( C.LearningRateScheduler(schedule=lambda epoch: args.lr * math.cos(1+( (epoch-1 % (args.epochs/cycles)))/(args.epochs/cycles) ) ))
# calls.append( C.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.)) )
return calls
def train(model, data, args, actual_partition):
from keras import callbacks as C
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
calls = get_calls()
lossfunc = ['mse', 'binary_crossentropy']
# compile the model
# validation_data=[[x_test, y_test], [y_test, x_test]]
# validation_split=0.1
seeds = [23, 29, 31, 37, 41, 43, 47, 53, 59, 61]
# for s in range(len(seeds[:args.seeds])):
for s in range(len(seeds)):
seed = seeds[s]
print('{} Train on SEED {}'.format(s, seed))
name = args.save_dir + '/{}_partition-{}_seed-{}_weights.h5'.format(prefix_name, actual_partition, s)
# print '\n\nNAME {}\n\n'.format(name)
# calls[0] = C.ModelCheckpoint(name + '-{epoch:02d}.h5', save_best_only=True, save_weights_only=True, verbose=1)
calls[0] = C.ModelCheckpoint(name, save_best_only=True, save_weights_only=True, verbose=0, monitor='val_loss',)
model.compile(optimizer=optimizers.Adam(lr=args.lr),
loss=[margin_loss, lossfunc[1]],
# loss=lossfunc[0],
loss_weights=[1., args.recon],
metrics=['accuracy']
)
kf = StratifiedShuffleSplit(n_splits=1, random_state=seed, test_size=0.05)
kf.get_n_splits(x_train, y_train)
for t_index, v_index in kf.split(x_train, y_train):
X_train, X_val = x_train[t_index], x_train[v_index]
Y_train, Y_val = y_train[t_index], y_train[v_index]
val_data=[[X_val, Y_val], [Y_val, X_val]]
model.fit([X_train, Y_train], [Y_train, X_train], batch_size=args.batch, epochs=args.epochs, validation_data=val_data, callbacks=calls, verbose=0)
# model.save_weights(args.save_dir + '/trained_model.h5')
# print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)
# from utils import plot_log
# plot_log(args.save_dir + '/log.csv', show=True)
return model
def test(model, data):
from ml_statistics import BaseStatistics
x_test, y_test = data
Y = np.zeros(y_test.shape)
y_pred, x_recon = model.predict([x_test, Y], batch_size=8)
stats = BaseStatistics(y_test, y_pred)
return stats, y_pred
def load_dataset(organism):
from ml_data import SequenceNucsData, SequenceNucHotvector, SequenceMotifHot
global max_features
global maxlen
print('Load organism: {}'.format(organism))
npath, ppath = './fasta/{}_neg.fa'.format(organism), './fasta/{}_pos.fa'.format(organism)
print(npath, ppath)
k = 1
max_features = 4**k
# samples = SequenceNucHotvector(npath, ppath)
samples = SequenceMotifHot(npath, ppath)
X, y = samples.getX(), samples.getY()
# X = X.reshape(-1, 38, 79, 1).astype('float32')
# X = X.astype('int32')
# ini = 59
# # ini = 199
# X = X[:, (ini-30):(ini+11)]
y = y.astype('int32')
print('Input Shapes\nX: {} | y: {}'.format(X.shape, y.shape))
maxlen = X.shape[2]
return X, y
def load_partition(train_index, test_index, X, y):
x_train = X[train_index,:]
y_train = y[train_index]
x_test = X[test_index,:]
y_test = y[test_index]
# y_train = to_categorical(y_train.astype('float32'))
# y_test = to_categorical(y_test.astype('float32'))
return (x_train, y_train), (x_test, y_test)
def get_best_weight(args, actual_partition):
# Select weights
fpre = prefix_name+'_partition-{}'.format(actual_partition)
fsuf = '_weights.h5'
model_weights = [ x for x in os.listdir(args.save_dir+'/') if x.startswith(fpre) and x.endswith(fsuf) ]
print 'Testing weigths', model_weights
best_mcc = -10000.0
selected_weight = None
selected_stats = None
# Clear model
K.clear_session()
# Iterate over generated weights for this partition
for i in range(len(model_weights)):
weight_file = model_weights[i]
print('weight_file', weight_file)
# Create new model to receive this weights
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:], n_class=1, num_routing=args.rout)
model.load_weights(args.save_dir + '/' + weight_file)
# Get statistics for model loaded with current weights
stats, y_pred = test(model=model, data=(x_test, y_test))
print('MCC = {}'.format(stats.Mcc))
# Get current best weigth
if best_mcc < stats.Mcc:
best_mcc = stats.Mcc
selected_weight = weight_file
selected_stats = stats
print('Selected BEST')
print stats
# Clear model
K.clear_session()
# Persist best weights
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:], n_class=1, num_routing=args.rout)
model.load_weights(args.save_dir + '/' + selected_weight)
model.save_weights(args.save_dir+'/{}_partition-{}_bestweights.h5'.format(prefix_name, actual_partition))
K.clear_session()
# Delete others weights
for i in range(len(model_weights)):
weight_file = model_weights[i]
print('Deleting weight: {}'.format(weight_file))
path = args.save_dir + '/' + weight_file
try:
os.remove(path)
except:
pass
return (selected_stats, selected_weight)
def allocate_stats(stats):
global results
results['partition'].append(actual_partition)
results['mcc'].append(stats.Mcc)
results['f1'].append(stats.F1)
results['sn'].append(stats.Sn)
results['sp'].append(stats.Sp)
results['acc'].append(stats.Acc)
results['prec'].append(stats.Prec)
results['tp'].append(stats.tp)
results['fp'].append(stats.fp)
results['tn'].append(stats.tn)
results['fn'].append(stats.fn)
def get_args():
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch', default=32, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--filters', default=256, type=int)
parser.add_argument('--kernel_size', default=256, type=int)
parser.add_argument('--patience', default=10, type=int)
parser.add_argument('--seeds', default=3, type=int)
parser.add_argument('--lr', default=0.001, type=float, help="Initial learning rate")
parser.add_argument('--lr_decay', default=0.9, type=float, help="The value multiplied by lr at each epoch. Set a larger value for larger epochs")
parser.add_argument('--recon', default=0.0005, type=float, help="The coefficient for the loss of decoder")
parser.add_argument('--rout', default=3, type=int, help="Number of iterations used in routing algorithm. Should > 0.") # num_routing should > 0
# parser.add_argument('--shift_fraction', default=0.0, type=float, help="Fraction of pixels to shift at most in each direction.")
parser.add_argument('--debug', default=1, type=int) # debug>0 will save weights by TensorBoard
parser.add_argument('--save_dir', default='./result')
parser.add_argument('--is_training', default=1, type=int, help="Size of embedding vector. Should > 0.")
parser.add_argument('--weights', default=None)
parser.add_argument('-o', '--organism', default=None, help="The organism used for test. Generate auto path for fasta files. Should be specified when testing")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
global prefix_name
'python capsulenet-onehot.py -o Bacillus --filters 256 --kernel_size 9 --lr 0.001 --lr_decay 0.9 --recon 0.001 --rout 3 --batch 8 --patience 1 --seeds 1'
args_names = (
('capsulenet', 'onehot'),
('org', args.organism),
('filters', args.filters),
('kernel_size', args.kernel_size),
('lr', args.lr),
('decay', args.lr_decay),
('recon', args.recon),
('rout', args.rout),
('batch', args.batch),
('patience', args.patience),
('seeds', args.seeds)
)
prefix_name = ( '_'.join( ('-'.join(str(y) for y in x)) for x in args_names) )
print '*'*100
print prefix_name
print '*'*100
# load data
X, y = load_dataset(args.organism)
# (x_train, y_train), (x_test, y_test) = load_imdb()
kf = StratifiedShuffleSplit(n_splits=5, random_state=34267)
kf.get_n_splits(X, y)
actual_partition = 0
for train_index, test_index in kf.split(X, y):
actual_partition+=1
print('>>> Testing PARTITION {}'.format(actual_partition))
(x_train, y_train), (x_test, y_test) = load_partition(train_index, test_index, X, y)
print(x_train.shape)
print(y_train.shape)
# Define model
model, eval_model, manipulate_model = CapsNet(input_shape=x_train.shape[1:], n_class=1, num_routing=args.rout)
model.summary()
# plot_model(model, to_file=args.save_dir + '/model.png', show_shapes=True)
# Train model and get weights
train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args, actual_partition=actual_partition)
K.clear_session()
# Select best weights for this partition
(stats, weight_file) = get_best_weight(args, actual_partition)
print('Selected BEST: {} ({})'.format(weight_file, stats.Mcc))
# model.save_weights(args.save_dir + '/best_trained_model_partition_{}.h5'.format(actual_partition) )
# print('Best Trained model for partition {} saved to \'%s/best_trained_model_partition_{}.h5\''.format(actual_partition, args.save_dir, actual_partition))
# Allocate results of best weights for this partition
allocate_stats(stats)
# break
# Write results of partitions to CSV
df = pd.DataFrame(results, columns=headers)
df.to_csv('results_'+prefix_name+'.csv')
| 40.256345
| 162
| 0.660173
|
f5f647035ddba3705858201b79ac49b7a209fff2
| 3,629
|
py
|
Python
|
unet/module.py
|
gaungalif/unet.pytorch
|
5b03105d58418fef37f020e1e7aeff65eb9accf6
|
[
"Apache-2.0"
] | null | null | null |
unet/module.py
|
gaungalif/unet.pytorch
|
5b03105d58418fef37f020e1e7aeff65eb9accf6
|
[
"Apache-2.0"
] | null | null | null |
unet/module.py
|
gaungalif/unet.pytorch
|
5b03105d58418fef37f020e1e7aeff65eb9accf6
|
[
"Apache-2.0"
] | null | null | null |
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
import torch.optim as optim
import torchmetrics
from unet.base import *
class UNetEncoder(nn.Module):
def __init__(self, in_chan, start_feat=64):
super(UNetEncoder, self).__init__()
self.out_chan = start_feat * 8
self.inconv = InConv(in_chan, start_feat)
self.down1 = DownConv(start_feat, start_feat*2)
self.down2 = DownConv(start_feat*2, start_feat*4)
self.down3 = DownConv(start_feat*4, start_feat*8)
self.down4 = DownConv(start_feat*8, start_feat*8)
def forward(self, x):
inc = self.inconv(x)
dc1 = self.down1(inc)
dc2 = self.down2(dc1)
dc3 = self.down3(dc2)
dc4 = self.down4(dc3)
return dc4, dc3, dc2, dc1, inc
class UNetDecoder(nn.Module):
def __init__(self, in_chan, n_classes):
super(UNetDecoder, self).__init__()
self.up1 = UpConv(in_chan, in_chan//4)
self.up2 = UpConv(in_chan//2, in_chan//8)
self.up3 = UpConv(in_chan//4, in_chan//16)
self.up4 = UpConv(in_chan//8, in_chan//16)
self.outconv = OutConv(in_chan//16, n_classes)
def forward(self, dc4, dc3, dc2, dc1, inc):
up1 = self.up1(dc4, dc3)
up2 = self.up2(up1, dc2)
up3 = self.up3(up2, dc1)
up4 = self.up4(up3, inc)
out = self.outconv(up4)
return out
class UNet(pl.LightningModule):
def __init__(self, in_chan=3, n_classes=1, start_feat=32, **kwargs):
super(UNet, self).__init__()
self.encoder_in_chan = in_chan
self.decoder_in_chan = start_feat * 16
self.start_feat = start_feat
self.criterion = nn.BCEWithLogitsLoss()
self.encoder = UNetEncoder(in_chan=self.encoder_in_chan, start_feat=start_feat)
self.decoder = UNetDecoder(in_chan=self.decoder_in_chan, n_classes=n_classes)
self.trn_loss: torchmetrics.AverageMeter = torchmetrics.AverageMeter()
self.val_loss: torchmetrics.AverageMeter = torchmetrics.AverageMeter()
def forward_step(self, x):
dc4, dc3, dc2, dc1, inc = self.encoder(x)
out = self.decoder(dc4, dc3, dc2, dc1, inc)
return out
def forward(self, imgs):
output = self.forward_step(imgs)
return output
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
def shared_step(self, batch, batch_idx):
images, labels = batch
preds = self.forward(images)
loss = self.criterion(preds, labels)
return loss
def training_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
trn_loss = self.trn_loss(loss)
self.log('trn_step_loss', trn_loss, prog_bar=True, logger=True)
return loss
def training_epoch_end(self, outs):
self.log('trn_epoch_loss', self.trn_loss.compute(), logger=True)
def validation_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
val_loss = self.val_loss(loss)
self.log('val_step_loss', val_loss, prog_bar=True, logger=True)
return loss
def validation_epoch_end(self, outs):
self.log('val_epoch_loss', self.val_loss.compute(), logger=True)
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=0.00005)
if __name__ == '__main__':
model = UNet(in_chan=3, n_classes=1, start_feat=224)
input = torch.rand(1, 3, 224, 224)
out = model(input)
print(out.shape)
| 32.990909
| 87
| 0.633232
|
2c86a353ed287b3f1d738341b3af803ef567a0a0
| 2,611
|
py
|
Python
|
examples/experiments/calm-textgame/drrn/memory.py
|
qxcv/jiminy-cricket
|
1c5469549f746a2b84cf0724f6e154a4c141187b
|
[
"MIT"
] | 11
|
2021-09-16T20:24:56.000Z
|
2022-02-18T21:28:58.000Z
|
examples/experiments/calm-textgame/drrn/memory.py
|
qxcv/jiminy-cricket
|
1c5469549f746a2b84cf0724f6e154a4c141187b
|
[
"MIT"
] | null | null | null |
examples/experiments/calm-textgame/drrn/memory.py
|
qxcv/jiminy-cricket
|
1c5469549f746a2b84cf0724f6e154a4c141187b
|
[
"MIT"
] | 4
|
2021-09-23T20:59:47.000Z
|
2022-03-30T00:02:23.000Z
|
from collections import namedtuple
import random
State = namedtuple('State', ('obs', 'description', 'inventory', 'state', 'raw_state', 'env_hash'))
Transition = namedtuple('Transition', ('state', 'act', 'act_string', 'reward', 'next_state', 'next_acts', 'poss_acts', 'done'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, transition):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = transition
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class PrioritizedReplayMemory(object):
def __init__(self, capacity, priority_fraction):
self.priority_fraction = priority_fraction
self.alpha_capacity = int(capacity * priority_fraction)
self.beta_capacity = capacity - self.alpha_capacity
self.alpha_memory, self.beta_memory = [], []
self.alpha_position, self.beta_position = 0, 0
def clear_alpha(self):
self.alpha_memory = []
self.alpha_position = 0
def push(self, transition, is_prior=False):
"""Saves a transition."""
if self.priority_fraction == 0.0:
is_prior = False
if is_prior:
if len(self.alpha_memory) < self.alpha_capacity:
self.alpha_memory.append(None)
self.alpha_memory[self.alpha_position] = transition
self.alpha_position = (self.alpha_position + 1) % self.alpha_capacity
else:
if len(self.beta_memory) < self.beta_capacity:
self.beta_memory.append(None)
self.beta_memory[self.beta_position] = transition
self.beta_position = (self.beta_position + 1) % self.beta_capacity
def sample(self, batch_size):
if self.priority_fraction == 0.0:
from_beta = min(batch_size, len(self.beta_memory))
res = random.sample(self.beta_memory, from_beta)
else:
from_alpha = min(int(self.priority_fraction * batch_size), len(self.alpha_memory))
from_beta = min(batch_size - int(self.priority_fraction * batch_size), len(self.beta_memory))
res = random.sample(self.alpha_memory, from_alpha) + random.sample(self.beta_memory, from_beta)
random.shuffle(res)
return res
def __len__(self):
return len(self.alpha_memory) + len(self.beta_memory)
| 38.970149
| 127
| 0.646496
|
9ea5644be63bde0665977867f6003194d59dffba
| 1,087
|
py
|
Python
|
challenges/left_join/conftest.py
|
asakatida/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | null | null | null |
challenges/left_join/conftest.py
|
asakatida/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | 2
|
2020-09-24T13:13:49.000Z
|
2021-06-25T15:15:35.000Z
|
challenges/left_join/conftest.py
|
grandquista/data-structures-and-algorithms.py
|
587d1a66a6c15a3c7d7786275608f065687e1810
|
[
"MIT"
] | null | null | null |
from pytest import fixture
from data_structures.hash_table.hash_table import HashTable
@fixture
def abcd_table1():
"""
"""
hash_table = HashTable()
hash_table.set("a", object())
hash_table.set("b", object())
hash_table.set("c", object())
hash_table.set("d", object())
return hash_table
@fixture
def abcd_table2():
"""
"""
hash_table = HashTable()
hash_table.set("a", object())
hash_table.set("b", object())
hash_table.set("c", object())
hash_table.set("d", object())
return hash_table
@fixture
def efgh_table():
"""
"""
hash_table = HashTable()
hash_table.set("e", object())
hash_table.set("f", object())
hash_table.set("g", object())
hash_table.set("h", object())
return hash_table
@fixture
def aceg_table():
"""
"""
hash_table = HashTable()
hash_table.set("a", object())
hash_table.set("c", object())
hash_table.set("e", object())
hash_table.set("g", object())
return hash_table
@fixture
def empty_table():
"""
"""
return HashTable()
| 18.423729
| 59
| 0.602576
|
859ea97bbe8ab46dac889be8776cb864587cc582
| 5,883
|
py
|
Python
|
src/tabs/feelingLucky.py
|
EthanG45/CSE412-HAML-Project
|
e6f754b2de35079453c1bf5e8814dc5fe4b6741c
|
[
"MIT"
] | 1
|
2022-02-09T05:42:43.000Z
|
2022-02-09T05:42:43.000Z
|
src/tabs/feelingLucky.py
|
EthanG45/CSE412-HAML-Project
|
e6f754b2de35079453c1bf5e8814dc5fe4b6741c
|
[
"MIT"
] | null | null | null |
src/tabs/feelingLucky.py
|
EthanG45/CSE412-HAML-Project
|
e6f754b2de35079453c1bf5e8814dc5fe4b6741c
|
[
"MIT"
] | 3
|
2020-11-28T23:06:03.000Z
|
2022-03-14T02:23:50.000Z
|
import PySimpleGUI as sg
### #### #### #### #### #### #### #### #### ###
# FEELING LUCKY TABLE TABS #
### #### #### #### #### #### #### #### #### ###
class FeelingLuckyTab:
def __init__(self, db):
self.db = db
self.albumNameList = self.db.allAlbumName()
self.artistNameList = self.db.allArtistName()
self.songNameList = self.db.allSongName()
self.bandNameList = self.db.allBandName()
self.instrumentList = self.db.allInstName()
def updateLists(self):
self.albumNameList = self.db.allAlbumName()
self.artistNameList = self.db.allArtistName()
self.songNameList = self.db.allSongName()
self.bandNameList = self.db.allBandName()
self.instrumentList = self.db.allInstName()
def feelingLuckyTabGUI(self):
flArtistBySongName = sg.Tab(
'Artists',
[[sg.Text("Search Artist by Song Name")],
[sg.Input(key='-INPUT-ARTIST-F01-')],
[sg.Button('search', key='-BUTTON-ARTIST-F01-')],
[sg.Table(values=[[' ', ' ', ' ']], headings=[
'Artist Name', 'Age', 'Known For'], key='-TABLE-ARTIST-F01-', enable_events=True, size=(1220, 120))]
],
key='F01'
)
yearList = list(range(2021, 999, -1))
flSongByYearAndArtist = sg.Tab(
'Songs',
[[sg.Text("Search Song by Year and Artist")],
[sg.Text("Release Year"), sg.Combo(yearList, key='-INPUT-YEAR-F02-')],
[sg.Text("Artist"), sg.Listbox(values=self.artistNameList,
key='-INPUT-ARTIST-F02-', size=(50, 20))],
[sg.Button('search', key='-BUTTON-SONG-F02-')],
[sg.Table(values=[[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']], headings=[
'Song', 'Album', 'Artist', 'Genre', 'Duration', 'Link',
'Release Year', 'Average Rating', 'Listeners', 'Rating'], key='-TABLE-SONG-F02-', enable_events=True, size=(1220, 120))]
],
key='F02'
)
flRecordLabelByAlbumName = sg.Tab(
'Record Label by Album',
[[sg.Text("Search Record Label Details by Album")],
[sg.Text("Album"), sg.Listbox(values=self.albumNameList, key='-INPUT-ALBUM-F03-', size=(50, 20))],
[sg.Button('search', key='-BUTTON-RECORD-LABEL-F03-')],
[sg.Table(values=[[' ', ' ', ' ']], headings=['CompanyName', 'Date Established', 'Label Location'], key='-TABLE-RECORD-LABEL-F03-', enable_events=True, size=(1220, 120))],
],
key='F03'
)
flRecordLabelBySongName = sg.Tab(
'Record Label by Song',
[[sg.Text("Search Record Label Details by Song")],
[sg.Text("Song"), sg.Listbox(values=self.songNameList, key='-INPUT-SONG-F04-', size=(50, 20))],
[sg.Button('search', key='-BUTTON-SONG-F04-')],
[sg.Table(values=[[' ', ' ', ' ', ' ']], headings=['CompanyName', 'Recent Album', 'Date Established', 'Label Location'], key='-TABLE-RECORD-LABEL-F04-', enable_events=True, size=(1220, 120))],
],
key='F04'
)
flRecordLabelByBandName = sg.Tab(
'Record Label by Band',
[[sg.Text("Search Record Label Details by Band")],
[sg.Text("Band"), sg.Listbox(values=self.bandNameList, key='-INPUT-BAND-F05-', size=(50, 20))],
[sg.Button('search', key='-BUTTON-RECORD-LABEL-F05-')],
[sg.Table(values=[[' ', ' ', ' ', ' ']], headings=['CompanyName', 'Recent Album', 'Date Established', 'Label Location'], key='-TABLE-RECORD-LABEL-F05-', enable_events=True, size=(1220, 120))],
],
key='F05'
)
flRecordLabelByInstrument = sg.Tab(
'Record Label by Instrument',
[[sg.Text("Search Record Label Details by Instrument")],
[sg.Text("Instrument"), sg.Listbox(values=self.instrumentList, key='-INPUT-INSTRUMENT-F06-', size=(50, 20))],
[sg.Button('search', key='-BUTTON-RECORD-LABEL-F06-')],
[sg.Table(values=[[' ', ' ', ' ', ' ']], headings=['CompanyName', 'Recent Album', 'Date Established', 'Label Location'], key='-TABLE-RECORD-LABEL-F06-', enable_events=True, size=(1220, 120))],
],
key='F06'
)
### #### #### #### #### #### #### #### #### ###
# END OF FEELING LUCKY TABLE TABS #
### #### #### #### #### #### #### #### #### ###
feelingLuckyTab = sg.Tab(
'Feeling Lucky',
[[sg.TabGroup(
[[
flArtistBySongName,
flSongByYearAndArtist,
flRecordLabelByAlbumName,
flRecordLabelBySongName,
flRecordLabelByBandName,
flRecordLabelByInstrument
]],
key='tabgroupFeelingLucky',
enable_events=True
) # end of TabGroup
]],
key='feeling_lucky_tab'
) # end of tab insights
self.updateLists()
return feelingLuckyTab
| 49.436975
| 324
| 0.448241
|
6f5aa9da51a6c5fb34fa259803e60044a6e37fc9
| 299
|
py
|
Python
|
is_natural_number/__init__.py
|
adamzerella/is-natural-number
|
584bc21a5d5d01ace31b9d94b44d32409dec233f
|
[
"MIT"
] | null | null | null |
is_natural_number/__init__.py
|
adamzerella/is-natural-number
|
584bc21a5d5d01ace31b9d94b44d32409dec233f
|
[
"MIT"
] | 2
|
2019-05-03T14:55:22.000Z
|
2019-05-03T15:02:59.000Z
|
is_natural_number/__init__.py
|
adamzerella/is-natural-number
|
584bc21a5d5d01ace31b9d94b44d32409dec233f
|
[
"MIT"
] | null | null | null |
"""
Check if a value is a natural number.
@param value: Value to check
@param includeZero: Whether or not to consider 0 a natural number.
@return: True or False
"""
def isNaturalNumber(value, includeZero=False):
if( includeZero == True ):
return int(value) >= 0
else:
return int(value) >= 1
| 21.357143
| 66
| 0.705686
|
a6540cbf09fad1cf9ef14b36bbd7b4601ca3594b
| 14,236
|
py
|
Python
|
python/MLFQ/mlfq.py
|
yiGmMk/leetcode
|
a6479865dec2a685e0071a32d5593ea62502823d
|
[
"Apache-2.0"
] | null | null | null |
python/MLFQ/mlfq.py
|
yiGmMk/leetcode
|
a6479865dec2a685e0071a32d5593ea62502823d
|
[
"Apache-2.0"
] | null | null | null |
python/MLFQ/mlfq.py
|
yiGmMk/leetcode
|
a6479865dec2a685e0071a32d5593ea62502823d
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
from __future__ import print_function
import sys
from optparse import OptionParser
import random
# to make Python2 and Python3 act the same -- how dumb
def random_seed(seed):
try:
random.seed(seed, version=1)
except:
random.seed(seed)
return
# finds the highest nonempty queue
# -1 if they are all empty
def FindQueue():
q = hiQueue
while q > 0:
if len(queue[q]) > 0:
return q
q -= 1
if len(queue[0]) > 0:
return 0
return -1
def Abort(str):
sys.stderr.write(str + '\n')
exit(1)
#
# PARSE ARGUMENTS
#
parser = OptionParser()
parser.add_option('-s', '--seed', help='the random seed',
default=0, action='store', type='int', dest='seed')
parser.add_option('-n', '--numQueues',
help='number of queues in MLFQ (if not using -Q)',
default=3, action='store', type='int', dest='numQueues')
parser.add_option('-q', '--quantum', help='length of time slice (if not using -Q)',
default=10, action='store', type='int', dest='quantum')
parser.add_option('-a', '--allotment', help='length of allotment (if not using -A)',
default=1, action='store', type='int', dest='allotment')
parser.add_option('-Q', '--quantumList',
help='length of time slice per queue level, specified as ' + \
'x,y,z,... where x is the quantum length for the highest ' + \
'priority queue, y the next highest, and so forth',
default='', action='store', type='string', dest='quantumList')
parser.add_option('-A', '--allotmentList',
help='length of time allotment per queue level, specified as ' + \
'x,y,z,... where x is the # of time slices for the highest ' + \
'priority queue, y the next highest, and so forth',
default='', action='store', type='string', dest='allotmentList')
parser.add_option('-j', '--numJobs', default=3, help='number of jobs in the system',
action='store', type='int', dest='numJobs')
parser.add_option('-m', '--maxlen', default=100, help='max run-time of a job ' +
'(if randomly generating)', action='store', type='int',
dest='maxlen')
parser.add_option('-M', '--maxio', default=10,
help='max I/O frequency of a job (if randomly generating)',
action='store', type='int', dest='maxio')
parser.add_option('-B', '--boost', default=0,
help='how often to boost the priority of all jobs back to ' +
'high priority', action='store', type='int', dest='boost')
parser.add_option('-i', '--iotime', default=5,
help='how long an I/O should last (fixed constant)',
action='store', type='int', dest='ioTime')
parser.add_option('-S', '--stay', default=False,
help='reset and stay at same priority level when issuing I/O',
action='store_true', dest='stay')
parser.add_option('-I', '--iobump', default=False,
help='if specified, jobs that finished I/O move immediately ' + \
'to front of current queue',
action='store_true', dest='iobump')
parser.add_option('-l', '--jlist', default='',
help='a comma-separated list of jobs to run, in the form ' + \
'x1,y1,z1:x2,y2,z2:... where x is start time, y is run ' + \
'time, and z is how often the job issues an I/O request',
action='store', type='string', dest='jlist')
parser.add_option('-c', help='compute answers for me', action='store_true',
default=True, dest='solve')
(options, args) = parser.parse_args()
random.seed(options.seed)
# MLFQ: How Many Queues
numQueues = options.numQueues
quantum = {}
if options.quantumList != '':
# instead, extract number of queues and their time slic
quantumLengths = options.quantumList.split(',')
numQueues = len(quantumLengths)
qc = numQueues - 1
for i in range(numQueues):
quantum[qc] = int(quantumLengths[i])
qc -= 1
else:
for i in range(numQueues):
quantum[i] = int(options.quantum)
allotment = {}
if options.allotmentList != '':
allotmentLengths = options.allotmentList.split(',')
if numQueues != len(allotmentLengths):
print('number of allotments specified must match number of quantums')
exit(1)
qc = numQueues - 1
for i in range(numQueues):
allotment[qc] = int(allotmentLengths[i])
if qc != 0 and allotment[qc] <= 0:
print('allotment must be positive integer')
exit(1)
qc -= 1
else:
for i in range(numQueues):
allotment[i] = int(options.allotment)
hiQueue = numQueues - 1
# MLFQ: I/O Model
# the time for each IO: not great to have a single fixed time but...
ioTime = int(options.ioTime)
# This tracks when IOs and other interrupts are complete
ioDone = {}
# This stores all info about the jobs
job = {}
# seed the random generator
random_seed(options.seed)
# jlist 'startTime,runTime,ioFreq:startTime,runTime,ioFreq:...'
jobCnt = 0
if options.jlist != '':
allJobs = options.jlist.split(':')
for j in allJobs:
jobInfo = j.split(',')
if len(jobInfo) != 3:
print('Badly formatted job string. Should be x1,y1,z1:x2,y2,z2:...')
print('where x is the startTime, y is the runTime, and z is the I/O frequency.')
exit(1)
assert(len(jobInfo) == 3)
startTime = int(jobInfo[0])
runTime = int(jobInfo[1])
ioFreq = int(jobInfo[2])
job[jobCnt] = {'currPri':hiQueue, 'ticksLeft':quantum[hiQueue],
'allotLeft':allotment[hiQueue], 'startTime':startTime,
'runTime':runTime, 'timeLeft':runTime, 'ioFreq':ioFreq, 'doingIO':False,
'firstRun':-1}
if startTime not in ioDone:
ioDone[startTime] = []
ioDone[startTime].append((jobCnt, 'JOB BEGINS'))
jobCnt += 1
else:
# do something random
for j in range(options.numJobs):
startTime = 0
runTime = int(random.random() * (options.maxlen - 1) + 1)
ioFreq = int(random.random() * (options.maxio - 1) + 1)
job[jobCnt] = {'currPri':hiQueue, 'ticksLeft':quantum[hiQueue],
'allotLeft':allotment[hiQueue], 'startTime':startTime,
'runTime':runTime, 'timeLeft':runTime, 'ioFreq':ioFreq, 'doingIO':False,
'firstRun':-1}
if startTime not in ioDone:
ioDone[startTime] = []
ioDone[startTime].append((jobCnt, 'JOB BEGINS'))
jobCnt += 1
numJobs = len(job)
print('Here is the list of inputs:')
print('OPTIONS jobs', numJobs)
print('OPTIONS queues', numQueues)
for i in range(len(quantum)-1,-1,-1):
print('OPTIONS allotments for queue %2d is %3d' % (i, allotment[i]))
print('OPTIONS quantum length for queue %2d is %3d' % (i, quantum[i]))
print('OPTIONS boost', options.boost)
print('OPTIONS ioTime', options.ioTime)
print('OPTIONS stayAfterIO', options.stay)
print('OPTIONS iobump', options.iobump)
print('\n')
print('For each job, three defining characteristics are given:')
print(' startTime : at what time does the job enter the system')
print(' runTime : the total CPU time needed by the job to finish')
print(' ioFreq : every ioFreq time units, the job issues an I/O')
print(' (the I/O takes ioTime units to complete)\n')
print('Job List:')
for i in range(numJobs):
print(' Job %2d: startTime %3d - runTime %3d - ioFreq %3d' % (i, job[i]['startTime'], job[i]['runTime'], job[i]['ioFreq']))
print('')
if options.solve == False:
print('Compute the execution trace for the given workloads.')
print('If you would like, also compute the response and turnaround')
print('times for each of the jobs.')
print('')
print('Use the -c flag to get the exact results when you are finished.\n')
exit(0)
# initialize the MLFQ queues
queue = {}
for q in range(numQueues):
queue[q] = []
# TIME IS CENTRAL
currTime = 0
# use these to know when we're finished
totalJobs = len(job)
finishedJobs = 0
print('\nExecution Trace:\n')
while finishedJobs < totalJobs:
# find highest priority job
# run it until either
# (a) the job uses up its time quantum
# (b) the job performs an I/O
# check for priority boost
if options.boost > 0 and currTime != 0:
if currTime % options.boost == 0:
print('[ time %d ] BOOST ( every %d )' % (currTime, options.boost))
# remove all jobs from queues (except high queue) and put them in high queue
for q in range(numQueues-1):
for j in queue[q]:
if job[j]['doingIO'] == False:
queue[hiQueue].append(j)
queue[q] = []
# change priority to high priority
# reset number of ticks left for all jobs (just for lower jobs?)
# add to highest run queue (if not doing I/O)
for j in range(numJobs):
# print('-> Boost %d (timeLeft %d)' % (j, job[j]['timeLeft']))
if job[j]['timeLeft'] > 0:
# print('-> FinalBoost %d (timeLeft %d)' % (j, job[j]['timeLeft']))
job[j]['currPri'] = hiQueue
job[j]['ticksLeft'] = allotment[hiQueue]
# print('BOOST END: QUEUES look like:', queue)
# check for any I/Os done
if currTime in ioDone:
for (j, type) in ioDone[currTime]:
q = job[j]['currPri']
job[j]['doingIO'] = False
print('[ time %d ] %s by JOB %d' % (currTime, type, j))
if options.iobump == False or type == 'JOB BEGINS':
queue[q].append(j)
else:
queue[q].insert(0, j)
# now find the highest priority job
currQueue = FindQueue()
if currQueue == -1:
print('[ time %d ] IDLE' % (currTime))
currTime += 1
continue
# there was at least one runnable job, and hence ...
currJob = queue[currQueue][0]
if job[currJob]['currPri'] != currQueue:
Abort('currPri[%d] does not match currQueue[%d]' % (job[currJob]['currPri'], currQueue))
job[currJob]['timeLeft'] -= 1
job[currJob]['ticksLeft'] -= 1
if job[currJob]['firstRun'] == -1:
job[currJob]['firstRun'] = currTime
runTime = job[currJob]['runTime']
ioFreq = job[currJob]['ioFreq']
ticksLeft = job[currJob]['ticksLeft']
allotLeft = job[currJob]['allotLeft']
timeLeft = job[currJob]['timeLeft']
print('[ time %d ] Run JOB %d at PRIORITY %d [ TICKS %d ALLOT %d TIME %d (of %d) ]' % \
(currTime, currJob, currQueue, ticksLeft, allotLeft, timeLeft, runTime))
if timeLeft < 0:
Abort('Error: should never have less than 0 time left to run')
# UPDATE TIME
currTime += 1
# CHECK FOR JOB ENDING
if timeLeft == 0:
print('[ time %d ] FINISHED JOB %d' % (currTime, currJob))
finishedJobs += 1
job[currJob]['endTime'] = currTime
# print('BEFORE POP', queue)
done = queue[currQueue].pop(0)
# print('AFTER POP', queue)
assert(done == currJob)
continue
# CHECK FOR IO
issuedIO = False
if ioFreq > 0 and (((runTime - timeLeft) % ioFreq) == 0):
# time for an IO!
print('[ time %d ] IO_START by JOB %d' % (currTime, currJob))
issuedIO = True
desched = queue[currQueue].pop(0)
assert(desched == currJob)
job[currJob]['doingIO'] = True
# this does the bad rule -- reset your tick counter if you stay at the same level
if options.stay == True:
job[currJob]['ticksLeft'] = quantum[currQueue]
job[currJob]['allotLeft'] = allotment[currQueue]
# add to IO Queue: but which queue?
futureTime = currTime + ioTime
if futureTime not in ioDone:
ioDone[futureTime] = []
print('IO DONE')
ioDone[futureTime].append((currJob, 'IO_DONE'))
# CHECK FOR QUANTUM ENDING AT THIS LEVEL (BUT REMEMBER, THERE STILL MAY BE ALLOTMENT LEFT)
if ticksLeft == 0:
if issuedIO == False:
# IO HAS NOT BEEN ISSUED (therefor pop from queue)'
desched = queue[currQueue].pop(0)
assert(desched == currJob)
job[currJob]['allotLeft'] = job[currJob]['allotLeft'] - 1
if job[currJob]['allotLeft'] == 0:
# this job is DONE at this level, so move on
if currQueue > 0:
# in this case, have to change the priority of the job
job[currJob]['currPri'] = currQueue - 1
job[currJob]['ticksLeft'] = quantum[currQueue-1]
job[currJob]['allotLeft'] = allotment[currQueue-1]
if issuedIO == False:
queue[currQueue-1].append(currJob)
else:
job[currJob]['ticksLeft'] = quantum[currQueue]
job[currJob]['allotLeft'] = allotment[currQueue]
if issuedIO == False:
queue[currQueue].append(currJob)
else:
# this job has more time at this level, so just push it to end
job[currJob]['ticksLeft'] = quantum[currQueue]
if issuedIO == False:
queue[currQueue].append(currJob)
# print out statistics
print('')
print('Final statistics:')
responseSum = 0
turnaroundSum = 0
for i in range(numJobs):
response = job[i]['firstRun'] - job[i]['startTime']
turnaround = job[i]['endTime'] - job[i]['startTime']
print(' Job %2d: startTime %3d - response %3d - turnaround %3d' % (i, job[i]['startTime'], response, turnaround))
responseSum += response
turnaroundSum += turnaround
print('\n Avg %2d: startTime n/a - response %.2f - turnaround %.2f' % (i, float(responseSum)/numJobs, float(turnaroundSum)/numJobs))
print('\n')
| 38.16622
| 133
| 0.577269
|
3a02ccff847c39e1662fab63435cbb0e7e314559
| 1,007
|
py
|
Python
|
tfcontracts/combined_contract.py
|
vasiliykarasev/tfcontracts
|
14a1cff3ce01b01b32ebeb5e16bec6263cec8bb5
|
[
"MIT"
] | 2
|
2021-01-19T18:34:31.000Z
|
2021-01-21T01:15:24.000Z
|
tfcontracts/combined_contract.py
|
vasiliykarasev/tfcontracts
|
14a1cff3ce01b01b32ebeb5e16bec6263cec8bb5
|
[
"MIT"
] | null | null | null |
tfcontracts/combined_contract.py
|
vasiliykarasev/tfcontracts
|
14a1cff3ce01b01b32ebeb5e16bec6263cec8bb5
|
[
"MIT"
] | null | null | null |
from . import contract
from typing import Any, Callable, Sequence
class CombinedContract(contract.FunctionContract):
"""A contract that internally represents and enforces a contract collection.
Example:
>>> @CombinedContract(
>>> [ShapeContract(), DTypeContract(), ValueContract()])
>>> def my_func(x, y):
>>> # function body
"""
def __init__(self, contracts: Sequence[contract.FunctionContract]) -> None:
self._contracts = contracts
def check_precondition(self, func: Callable[..., Any], *args,
**kwargs) -> None:
"""Checks that function arguments satisfy preconditions."""
for contract in self._contracts:
contract.check_precondition(func, *args, **kwargs)
def check_postcondition(self, func_results: Any,
func: Callable[..., Any]) -> None:
"""Checks that function arguments satisfy postconditions."""
for contract in self._contracts:
contract.check_precondition(func_results, func)
| 34.724138
| 78
| 0.670308
|
85c9f11607315aa5833527c1dc66c19418c1960c
| 13,745
|
py
|
Python
|
scripts/RosettaTR/run_fold_and_dock.py
|
RosettaCommons/RFDesign
|
b404b8b2c57f89c047529c30259aeeb8f6012b61
|
[
"MIT"
] | 45
|
2022-01-12T04:39:36.000Z
|
2022-03-25T12:33:36.000Z
|
scripts/RosettaTR/run_fold_and_dock.py
|
ZhuofanShen/RFDesign
|
9fea2bafbbb7cbf702c9884e8b3ec69ed50ff2f5
|
[
"MIT"
] | 6
|
2022-01-15T16:48:39.000Z
|
2022-03-15T16:20:34.000Z
|
scripts/RosettaTR/run_fold_and_dock.py
|
ZhuofanShen/RFDesign
|
9fea2bafbbb7cbf702c9884e8b3ec69ed50ff2f5
|
[
"MIT"
] | 10
|
2022-01-12T11:28:03.000Z
|
2022-03-30T11:36:41.000Z
|
import sys,os,json
import tempfile
import numpy as np
from arguments_fold_and_dock import *
from utils_fold_and_dock import *
from pyrosetta import *
from pyrosetta.rosetta.protocols.minimization_packing import MinMover
from pyrosetta.teaching import *
vdw_weight = {0: 3.0, 1: 5.0, 2: 10.0}
rsr_dist_weight = {0: 3.0, 1: 2.0, 3: 1.0}
rsr_orient_weight = {0: 1.0, 1: 1.0, 3: 0.5}
def main():
########################################################
# process inputs
########################################################
# read params
scriptdir = os.path.dirname(os.path.realpath(__file__))
with open(scriptdir + '/data/params.json') as jsonfile:
params = json.load(jsonfile)
# get command line arguments
args = get_args(params)
print(args)
if os.path.exists(args.OUT):
return
# init PyRosetta
init_cmd = list()
init_cmd.append("-multithreading:interaction_graph_threads 1 -multithreading:total_threads 1")
init_cmd.append("-hb_cen_soft")
init_cmd.append("-detect_disulf -detect_disulf_tolerance 2.0") # detect disulfide bonds based on Cb-Cb distance (CEN mode) or SG-SG distance (FA mode)
init_cmd.append("-relax:dualspace true -relax::minimize_bond_angles -default_max_cycles 200")
init_cmd.append("-mute all")
init_cmd.append("-unmute core.scoring.ScoreFunction")
#init_cmd.append("-unmute protocol")
init_cmd.append("-ex1 -ex2aro")
init_cmd.append("-mh:path:scores_BB_BB /home/aivan/GIT/trRosetta2/fold_and_dock.homo/motif_dock/xh_16_")
init_cmd.append("-mh:score:use_ss1 false")
init_cmd.append("-mh:score:use_ss2 false")
init_cmd.append("-mh:score:use_aa1 true")
init_cmd.append("-mh:score:use_aa2 true")
init(" ".join(init_cmd))
########################################################
# Scoring functions and movers
########################################################
sf = ScoreFunction()
sf.add_weights_from_file(scriptdir + '/data/scorefxn.wts')
sf1 = ScoreFunction()
sf1.add_weights_from_file(scriptdir + '/data/scorefxn1.wts')
sf_vdw = ScoreFunction()
sf_vdw.add_weights_from_file(scriptdir + '/data/scorefxn_vdw.wts')
sf_cart = ScoreFunction()
sf_cart.add_weights_from_file(scriptdir + '/data/scorefxn_cart.wts')
sf_dock = create_score_function('motif_dock_score')
sf_dock.set_weight(rosetta.core.scoring.atom_pair_constraint, 1.0)
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(False)
mmap.set_jump(True)
mmap_rb = MoveMap()
mmap_rb.set_bb(False)
mmap_rb.set_chi(False)
mmap_rb.set_jump(True)
min_mover1 = MinMover(mmap, sf1, 'lbfgs_armijo_nonmonotone', 0.001, True)
min_mover1.max_iter(1000)
min_mover_vdw = MinMover(mmap, sf_vdw, 'lbfgs_armijo_nonmonotone', 0.001, True)
min_mover_vdw.max_iter(500)
min_mover_vdw_rb = MinMover(mmap_rb, sf_vdw, 'lbfgs_armijo_nonmonotone', 0.001, True)
min_mover_vdw_rb.max_iter(500)
min_mover_cart = MinMover(mmap, sf_cart, 'lbfgs_armijo_nonmonotone', 0.000001, True)
min_mover_cart.max_iter(300)
min_mover_cart.cartesian(True)
# read and process restraints & sequence
seq = read_fasta(args.FASTA)
L1 = seq.index('/')
L = len(seq) - seq.count('/')
params['seq'] = seq.replace('/', '')
rst = gen_rst(params, L1)
########################################################
# initialize pose
########################################################
pose0 = pose_from_sequence(seq, 'centroid')
setup_foldtree(pose0, "A_B", Vector1([1]))
if (args.bb == ''):
print('setting random (phi,psi,omega)...')
set_random_dihedral(pose0, L)
else:
print('setting predicted (phi,psi,omega)...')
bb = np.load(args.bb)
set_predicted_dihedral(pose0,bb['phi'],bb['psi'],bb['omega'])
remove_clash(sf_vdw, min_mover_vdw, pose0)
#
rst_user = None
if os.path.exists(args.rsr):
rst_user = rosetta.protocols.constraint_movers.ConstraintSetMover()
rst_user.add_constraints(True)
rst_user.constraint_file(args.rsr)
Emin = 99999.9
########################################################
# minimization
########################################################
for run in range(params['NRUNS']):
# define repeat_mover here!! (update vdw weights: weak (1.0) -> strong (10.0)
sf.set_weight(rosetta.core.scoring.vdw, vdw_weight.setdefault(run, 10.0))
sf.set_weight(rosetta.core.scoring.atom_pair_constraint, rsr_dist_weight.setdefault(run, 1.0))
sf.set_weight(rosetta.core.scoring.dihedral_constraint, rsr_orient_weight.setdefault(run, 0.5))
sf.set_weight(rosetta.core.scoring.angle_constraint, rsr_orient_weight.setdefault(run, 0.5))
min_mover = MinMover(mmap, sf, 'lbfgs_armijo_nonmonotone', 0.001, True)
min_mover.max_iter(1000)
repeat_mover = RepeatMover(min_mover, 3)
#
pose = Pose()
pose.assign(pose0)
pose.remove_constraints()
if rst_user != None:
rst_user.apply(pose)
if run > 0:
# diversify backbone
dphi = np.random.uniform(-10,10,L)
dpsi = np.random.uniform(-10,10,L)
for i in range(1,L+1):
pose.set_phi(i,pose.phi(i)+dphi[i-1])
pose.set_psi(i,pose.psi(i)+dpsi[i-1])
# remove clashes
remove_clash(sf_vdw, min_mover_vdw, pose)
# Save checkpoint
if args.save_chk:
pose.dump_pdb("%s_run%d_init.pdb"%('.'.join(args.OUT.split('.')[:-1]), run))
if args.mode == 0:
# short
print('short')
add_rst(pose, rst, 3, 12, params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 0))
# medium
print('medium')
add_rst(pose, rst, 12, 24, params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 1))
# long
print('long')
add_rst(pose, rst, 24, len(seq), params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 2))
elif args.mode == 1:
# short + medium
print('short + medium')
add_rst(pose, rst, 3, 24, params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 0))
# long
print('long')
add_rst(pose, rst, 24, len(seq), params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 1))
elif args.mode == 2:
# short + medium + long
print('short + medium + long')
add_rst(pose, rst, 3, len(seq), params)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_mode%d_step%d.pdb"%('.'.join(args.OUT.split('.')[:-1]), run, args.mode, 0))
#rigidbody_pert(pose)
dock_low_res(pose, sf_dock)
repeat_mover.apply(pose)
remove_clash(sf_vdw, min_mover1, pose)
min_mover_cart.apply(pose)
if args.save_chk:
pose.dump_pdb("%s_run%d_dock.pdb"%('.'.join(args.OUT.split('.')[:-1]), run))
# check whether energy has decreased
pose.conformation().detect_disulfides() # detect disulfide bonds
E = sf_cart(pose)
if E < Emin:
print("Energy(iter=%d): %.1f --> %.1f (accept)"%(run, Emin, E))
Emin = E
pose0 = pose.clone()
#pose0.assign(pose)
else:
print("Energy(iter=%d): %.1f --> %.1f (reject)"%(run, Emin, E))
## mutate ALA back to GLY
#for i,a in enumerate(seq_symm):
# if a == 'G':
# mutator = rosetta.protocols.simple_moves.MutateResidue(i+1,'GLY')
# mutator.apply(pose0)
# print('mutation: A%dG'%(i+1))
########################################################
# fix backbone geometry
########################################################
pose0.remove_constraints()
# apply more strict criteria to detect disulfide bond
# Set options for disulfide tolerance -> 1.0A
print (rosetta.basic.options.get_real_option('in:detect_disulf_tolerance'))
rosetta.basic.options.set_real_option('in:detect_disulf_tolerance', 1.0)
print (rosetta.basic.options.get_real_option('in:detect_disulf_tolerance'))
pose0.conformation().detect_disulfides()
# Converto to all atom representation
switch = SwitchResidueTypeSetMover("fa_standard")
switch.apply(pose0)
# idealize problematic local regions if exists
idealize = rosetta.protocols.idealize.IdealizeMover()
poslist = rosetta.utility.vector1_unsigned_long()
scorefxn=create_score_function('empty')
scorefxn.set_weight(rosetta.core.scoring.cart_bonded, 1.0)
scorefxn.score(pose0)
emap = pose0.energies()
print("idealize...")
for res in range(1,L+1):
cart = emap.residue_total_energy(res)
if cart > 50:
poslist.append(res)
print( "idealize %d %8.3f"%(res,cart) )
if len(poslist) > 0:
idealize.set_pos_list(poslist)
try:
idealize.apply(pose0)
except:
print('!!! idealization failed !!!')
# Save checkpoint
if args.save_chk:
pose0.dump_pdb("%s_before_relax.pdb"%'.'.join(args.OUT.split('.')[:-1]))
########################################################
# full-atom refinement
########################################################
if args.fastrelax == True:
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(True)
mmap.set_jump(True)
# First round: Repeat 2 torsion space relax w/ strong disto/anglogram constraints
sf_fa_round1 = create_score_function('ref2015_cart')
sf_fa_round1.set_weight(rosetta.core.scoring.atom_pair_constraint, 3.0)
sf_fa_round1.set_weight(rosetta.core.scoring.dihedral_constraint, 1.0)
sf_fa_round1.set_weight(rosetta.core.scoring.angle_constraint, 1.0)
sf_fa_round1.set_weight(rosetta.core.scoring.pro_close, 0.0)
relax_round1 = rosetta.protocols.relax.FastRelax(sf_fa_round1, "%s/data/relax_round1.txt"%scriptdir)
relax_round1.set_movemap(mmap)
print('relax: First round... (focused on torsion space relaxation)')
params['PCUT'] = 0.15
pose0.remove_constraints()
if rst_user != None:
rst_user.apply(pose0)
add_rst(pose0, rst, 1, len(seq), params, nogly=True)
#add_rst(pose0, rst, 3, len(seq), params, nogly=True, use_orient=True)
relax_round1.apply(pose0)
# Set options for disulfide tolerance -> 0.5A
print (rosetta.basic.options.get_real_option('in:detect_disulf_tolerance'))
rosetta.basic.options.set_real_option('in:detect_disulf_tolerance', 0.5)
print (rosetta.basic.options.get_real_option('in:detect_disulf_tolerance'))
sf_dock = create_score_function("ref2015")
sf_dock.set_weight(rosetta.core.scoring.atom_pair_constraint, 1.0)
sf_dock.set_weight(rosetta.core.scoring.dihedral_constraint, 0.0)
sf_dock.set_weight(rosetta.core.scoring.angle_constraint, 0.0)
dock_high_res(pose0, sf_dock)
sf_fa = create_score_function('ref2015_cart')
sf_fa.set_weight(rosetta.core.scoring.atom_pair_constraint, 0.1)
sf_fa.set_weight(rosetta.core.scoring.dihedral_constraint, 0.0)
sf_fa.set_weight(rosetta.core.scoring.angle_constraint, 0.0)
relax_round2 = rosetta.protocols.relax.FastRelax(sf_fa, "%s/data/relax_round2.txt"%scriptdir)
relax_round2.set_movemap(mmap)
relax_round2.cartesian(True)
relax_round2.dualspace(True)
print('relax: Second round... (cartesian space)')
params['PCUT'] = 0.30 # To reduce the number of pair restraints..
pose0.remove_constraints()
pose0.conformation().detect_disulfides() # detect disulfide bond again w/ stricter cutoffs
add_rst(pose0, rst, 3, len(seq), params, nogly=True, use_orient=False)
if rst_user != None:
rst_user.apply(pose0)
relax_round2.apply(pose0)
# Re-evaluate score w/o any constraints
scorefxn_min=create_score_function('ref2015')
scorefxn_min.score(pose0)
########################################################
# save final model
########################################################
pose0.dump_pdb(args.OUT)
if __name__ == '__main__':
main()
| 37.760989
| 154
| 0.593889
|
0d89a27ff0f565778bd52afe868e944b82b033b0
| 6,432
|
py
|
Python
|
kuroganeHammerAPIAccess.py
|
fpdotmonkey/escargoon
|
b70d986dd3cd434dcce56cb567d48f096d18a4a0
|
[
"MIT"
] | null | null | null |
kuroganeHammerAPIAccess.py
|
fpdotmonkey/escargoon
|
b70d986dd3cd434dcce56cb567d48f096d18a4a0
|
[
"MIT"
] | null | null | null |
kuroganeHammerAPIAccess.py
|
fpdotmonkey/escargoon
|
b70d986dd3cd434dcce56cb567d48f096d18a4a0
|
[
"MIT"
] | null | null | null |
import sys
import urllib3
import json
import requests
# baseURL = 'https://api.kuroganehammer.com/api/characters/name/'
# kingDedede = 'bayonetta/'
# http = urllib3.PoolManager()
# request = http.request('GET', baseURL + kingDedede + 'moves?game=ultimate')
# data = json.loads(request.data.decode('utf-8'))
# print(data)
# ownerID = data['OwnerId']
# dddMoves = http.request('GET', 'https://api.kuroganehammer.com/api/%i/movements' % ownerID)
#print(json.loads(request.data.decode('utf-8')))
def makeAPICall(character, attribute, game, trySmash4):
baseURL = 'https://api.kuroganehammer.com/api/characters/name/{}/{}?game={}'
apiURL = baseURL.format(character, attribute, game)
request = requests.get(apiURL)
statusCode = request.status_code
data = request.json()
try:
if data == [] or data['Message'] == "Resource of type 'ICharacter' not found.":
if trySmash4 and game == 'ultimate':
print("%s data for %s in %s could not be found. Retrying with Smash4 data." % (attribute, character, game), file=sys.stderr)
data = makeAPICall(character, attribute, 'smash4', False)
else:
errorReturn = {}
errorReturn['Error'] = "%s data for %s in %s could not be found. You may have spelled something wrong or this character does not have data available for that game." % (attribute, character, game)
errorReturn['Status Code'] = statusCode
return errorReturn
except:
pass
return data
def getCharacterData(character, game='ultimate', trySmash4=True):
dataCatagory = ''
apiData = makeAPICall(character, dataCatagory, game, trySmash4)
try:
desiredData = ('MainImageUrl', 'ThumbnailUrl', 'ColorTheme', 'DisplayName')
characterData = {}
characterData['Character'] = apiData['Name']
characterData['Game'] = apiData['Game']
characterData['Data Catagory'] = dataCatagory
for attribute in desiredData:
characterData[attribute] = apiData[attribute]
return characterData
except KeyError:
return apiData
def getMoveList(character, game='ultimate', trySmash4=True):
dataCatagory = 'moves'
apiData = makeAPICall(character, dataCatagory, game, trySmash4)
try:
moveList = []
for move in apiData:
moveList.append(move['Name'])
moveListData = {}
moveListData['Character'] = apiData[0]['Owner']
moveListData['Game'] = apiData[0]['Game']
moveListData['Data Catagory'] = dataCatagory
moveListData['Move List'] = moveList
return moveListData
except KeyError:
return apiData
def getMoveData(character, move, game='ultimate', trySmash4=True):
dataCatagory = 'moves'
apiData = makeAPICall(character, dataCatagory, game, trySmash4)
try:
desiredMove = next((_move for _move in apiData if _move['Name'] == move), None)
if desiredMove is None:
print('Move `%s` does not exist! Are you sure you spelled it right?' % move)
desiredData = ('Name', 'HitboxActive', 'FirstActionableFrame', 'BaseDamage', 'Angle', 'BaseKnockBackSetKnockback', 'LandingLag', 'AutoCancel', 'KnockbackGrowth', 'MoveType', 'IsWeightDependent')
moveData = {}
moveData['Character'] = desiredMove['Owner']
moveData['Game'] = desiredMove['Game']
moveData['Data Catagory'] = dataCatagory
for attribute in desiredData:
moveData[attribute] = desiredMove[attribute]
return moveData
except KeyError:
return apiData
def getMovementData(character, game='ultimate', trySmash4=True):
dataCatagory = 'movements'
apiData = makeAPICall(character, dataCatagory, game, trySmash4)
try:
movementData = {}
movementData['Character'] = apiData[0]['Owner']
movementData['Game'] = apiData[0]['Game']
movementData['Data Catagory'] = dataCatagory
for attribute in apiData:
movementData[attribute['Name']] = attribute['Value']
return movementData
except KeyError:
return apiData
def getCharacterAttributeData(character, game='ultimate', trySmash4=True):
dataCatagory = 'characterattributes'
apiData = makeAPICall(character, dataCatagory, game, trySmash4)
try:
characterAttributeData = {}
characterAttributeData['Character'] = apiData[0]['Owner']
characterAttributeData['Game'] = apiData[0]['Game']
characterAttributeData['Data Catagory'] = dataCatagory
for attribute in apiData:
key = attribute['Name']
value = []
for item in attribute['Values']:
value.append([item['Name'], item['Value']])
characterAttributeData[key] = value
return characterAttributeData
except KeyError:
return apiData
def tablePrint(dictionary):
try:
keyColumnWidth = len(max(dictionary.keys(), key=len)) + 2
output = ''
for key, value in dictionary.items():
output = output + key + (keyColumnWidth - len(key)) * ' '
if isinstance(value, list):
output = output + str(value[0]) + '\n'
for item in value[1:]:
output = output + keyColumnWidth * ' ' + str(item) + '\n'
else:
output = output + str(value) + '\n'
return output
except ValueError:
return ''
def listPrint(list):
output = ''
for element in list:
output = output + element + '\n'
return output
if '__main__' == __name__:
characterData = getCharacterData('kingdedede')
moveList = getMoveList('pichu')
moveData = getMoveData('ness', 'Jab 1')
movementData = getMovementData('olimar')
characterAttributeData = getCharacterAttributeData('megaman', game='smash4')
failCharacterAttributeData = getCharacterAttributeData('kingdedede',
trySmash4=False)
print(characterData)
print(tablePrint(characterData))
print(tablePrint(moveList))
print(tablePrint(moveData))
print(tablePrint(movementData))
print(tablePrint(characterAttributeData))
print(tablePrint(failCharacterAttributeData))
| 31.072464
| 212
| 0.622357
|
f428f3fb17ed8931ca578fc729bb75b95b5d2e57
| 2,985
|
py
|
Python
|
cfn_model/model/ModelElement.py
|
jaymecd/cloudformation-validator
|
4f6951a002f338010b63fa3fbd23ddd8022558fa
|
[
"MIT"
] | 7
|
2018-11-18T00:29:55.000Z
|
2020-05-18T13:23:37.000Z
|
cfn_model/model/ModelElement.py
|
jaymecd/cloudformation-validator
|
4f6951a002f338010b63fa3fbd23ddd8022558fa
|
[
"MIT"
] | 1
|
2018-10-16T20:40:27.000Z
|
2018-10-17T02:18:05.000Z
|
cfn_model/model/ModelElement.py
|
jaymecd/cloudformation-validator
|
4f6951a002f338010b63fa3fbd23ddd8022558fa
|
[
"MIT"
] | 2
|
2019-10-23T15:22:52.000Z
|
2020-06-22T07:00:45.000Z
|
from __future__ import absolute_import, division, print_function
import inspect
import sys
def lineno():
"""Returns the current line number in our program."""
return str(' - ModelElement - line number: '+str(inspect.currentframe().f_back.f_lineno))
class ModelElement():
"""
Model element
"""
def __init__(self, cfn_model, debug=False):
"""
Initialize
:param cfn_model:
:param debug:
"""
# attr_accessor :logical_resource_id, :resource_type, :metadata
self.logical_resource_id = None
self.resource_type = None
self.metadata = None
self.debug= debug
self.cfn_model = cfn_model
if self.debug:
print('ModelElement - init'+lineno())
def another_element(self, another_model_element):
"""
???
:param another_model_element:
:return:
"""
if self.debug:
print('another element'+lineno())
# FIXME
sys.exit(1)
#found_unequal_instance_var = false
#instance_variables_without_at_sign.each do |instance_variable|
# if instance_variable != :logical_resource_id && instance_variable != :cfn_model
# if self.send(instance_variable) != another_model_element.send(instance_variable)
# found_unequal_instance_var = true
# end
# end
#end
#!found_unequal_instance_var
def method_missing(self, method_name, *args):
"""
???
:param method_name:
:param args:
:return:
"""
if self.debug:
print('method_missing'+lineno())
# FIXME
sys.exit(1)
#if method_name =~ / ^ (\w+)=$ /
#instance_variable_set
#"@#{$1}", args[0]
#else
#References.resolve_value( @ cfn_model, instance_variable_get("@#{method_name}"))
#end
def instance_variables_without_at_sign(self):
"""
Instance variables without an at sign
:return:
"""
if self.debug:
print('instance_variables_without_at_sign'+lineno())
# FIXME
sys(exit)
#self.instance_variables.map { |instance_variable| strip(instance_variable) }
def strip(self, sym):
"""
???
:param sym:
:return:
"""
if self.debug:
print('strip'+lineno())
# FIXME
sys.exit(1)
#sym.to_s.gsub( / @ /, '').to_sym
def emit_instance_vars(self):
"""
???
:return:
"""
if self.debug:
print('emit_instance_vars'+lineno())
instance_vars_str = ''
sys.exit(1)
#for variable in self.instance_variables_without_at_sign()
#self.instance_variables.each do |instance_variable|
# instance_vars_str += " #{instance_variable}=#{instance_variable_get(instance_variable)}\n"
#return instance_vars_str
| 27.385321
| 102
| 0.571189
|
f549d11c5fc3e8d0b2bdb10ec583da29dd31ba60
| 975
|
py
|
Python
|
exp/detect_coco/prep_input.py
|
ChopinSharp/info-ground
|
12fba3c478b806f2fe068faac81237fd0f458b80
|
[
"Apache-2.0"
] | 56
|
2020-09-21T07:41:08.000Z
|
2022-01-10T13:28:36.000Z
|
exp/detect_coco/prep_input.py
|
ChopinSharp/info-ground
|
12fba3c478b806f2fe068faac81237fd0f458b80
|
[
"Apache-2.0"
] | 5
|
2020-08-26T15:50:29.000Z
|
2022-01-04T07:53:07.000Z
|
exp/detect_coco/prep_input.py
|
ChopinSharp/info-ground
|
12fba3c478b806f2fe068faac81237fd0f458b80
|
[
"Apache-2.0"
] | 15
|
2020-08-24T16:36:20.000Z
|
2022-01-17T12:51:45.000Z
|
import os
import glob
import click
from tqdm import tqdm
from data.coco.constants import coco_paths,CocoConstants
import utils.io as io
@click.command()
@click.option(
'--out_dir',
type=str,
default=coco_paths['proc_dir'],
help='Output directory')
@click.option(
'--subset',
type=click.Choice(['train','val','test']),
default='train',
help='Subset to run detection on')
def main(**kwargs):
data_const = CocoConstants()
image_dir = data_const.image_subset_dir[kwargs['subset']]
image_path_list = glob.glob(os.path.join(image_dir,'*.jpg'))
det_input = []
for image_path in tqdm(image_path_list):
det_input.append({
'path': image_path,
'id': os.path.splitext(os.path.basename(image_path))[0]
})
io.dump_json_object(
det_input,
os.path.join(
kwargs['out_dir'],
'det_input_'+kwargs['subset']+'.json'))
if __name__=='__main__':
main()
| 23.780488
| 67
| 0.631795
|
1b2fba4674c78998e286a60e5a5f545fc70512f0
| 710
|
py
|
Python
|
python/cuml_evaluate.py
|
JayWu7/ModernCoreset
|
932e4984dcb54d48abdf751746d22c0b22b6f591
|
[
"MIT"
] | 1
|
2021-07-17T07:08:00.000Z
|
2021-07-17T07:08:00.000Z
|
python/cuml_evaluate.py
|
JayWu7/ModernCoreset
|
932e4984dcb54d48abdf751746d22c0b22b6f591
|
[
"MIT"
] | null | null | null |
python/cuml_evaluate.py
|
JayWu7/ModernCoreset
|
932e4984dcb54d48abdf751746d22c0b22b6f591
|
[
"MIT"
] | null | null | null |
import sys
from cuda_kmeans import cuml_kmeans_csv
if __name__ == '__main__':
if len(sys.argv) < 5:
print('Parameters error!')
print("Usage: 'python cuml_evaluate.py <original_data_path> <coreset_path> <coreset_weights_path> <cluster_size>'")
exit()
data_path = sys.argv[1]
coreset_path = sys.argv[2]
coreset_weights_path = sys.argv[3]
cluster_size = int(sys.argv[4])
sample_size = None
if len(sys.argv) > 5: #optional parameters
sample_size = int(sys.argv[5])
cuml_kmeans_csv(data_path, cluster_size, sample_size=sample_size)
cuml_kmeans_csv(coreset_path, cluster_size, csv_weights_path=coreset_weights_path)
| 25.357143
| 123
| 0.678873
|
65c518f73ffa07225135b3cb1165a0dbffc912a8
| 36,323
|
py
|
Python
|
assemble.py
|
r-shekhar/ReanalysisUnifier
|
da2ac677c19ac85cdc4946c5523910591b512a98
|
[
"MIT"
] | null | null | null |
assemble.py
|
r-shekhar/ReanalysisUnifier
|
da2ac677c19ac85cdc4946c5523910591b512a98
|
[
"MIT"
] | null | null | null |
assemble.py
|
r-shekhar/ReanalysisUnifier
|
da2ac677c19ac85cdc4946c5523910591b512a98
|
[
"MIT"
] | 1
|
2019-12-14T23:43:58.000Z
|
2019-12-14T23:43:58.000Z
|
#!/usr/bin/env python
import pymongo
MongoClient = pymongo.MongoClient
from pprint import pprint
import itertools
import functools
import multiprocessing
from pprint import pprint
import numpy as np
import pandas as pd
import netCDF4
import sys
import datetime
def year_month_iter(start_year, start_month, end_year, end_month ):
ym_start= 12*start_year + start_month - 1
ym_end= 12*end_year + end_month - 1
for ym in range( ym_start, ym_end ):
y, m = divmod( ym, 12 )
yield datetime.datetime(y, m+1, 1)
def joblist_create(col, name_regex, varNames):
print("Querying database for {0} {1}".format(name_regex, ' '.join(varNames)))
q = {}
q['filename_on_disk'] = {"$regex": name_regex}
VarNames = [x.replace('.', '|') for x in varNames]
q['varnames'] = {"$in": VarNames}
filenames = []
datetimes = []
indices = []
variables = []
# pprint.pprint(col.find_one())
i = 0
for j in col.find(q):
# print(j['filename_on_disk'])
# pprint(j)
n = len(j['filetime'])
filenames.extend([j['filename_on_disk'], ]*n)
datetimes.extend(j['filetime'])
indices.extend(list(range(n)))
#assuming more than one variable from varNames isn't in a file
for v,v2 in zip(varNames,VarNames):
# print(j['variables'].keys())
# print(v)
if v2 in j['variables']:
# print(v)
variables.extend([v]*n)
i += 1
d = {'filenames': filenames, 'datetimes': datetimes, 'indices': indices,
'variables': variables}
# pprint(d)
df = pd.DataFrame(d)
df = df.sort_values(by='datetimes')
# renumber row index
df.reset_index()
df.index = range(len(datetimes))
# diagnostics
# dt = np.array(df.datetimes, dtype='M8[h]')
# d2 = dt[1:] - dt[:-1]
# print(d2)
return df
def create_output_file(reanalysis, timeres,
field, field_units, field_long_name,
level, latitude, longitude, noclobber=True,
compress=True):
filename = 'reanalysis_clean/{0}.{1}.{2}.nc'.format(reanalysis, timeres,
field)
out_fh = netCDF4.Dataset(filename, 'w',
format='NETCDF4',
noclobber=noclobber)
out_fh.Conventions = 'CF-1.5'
out_fh.Dataset = reanalysis
out_fh.TimeResolution = timeres
out_fh.createDimension("time", None)
out_fh.createDimension("latitude", len(latitude))
out_fh.createDimension("longitude", len(longitude))
coord_tuple = ("time", "latitude", "longitude")
if not (level is None):
out_fh.createDimension("level", len(level))
level_coord = out_fh.createVariable("level", 'f4', ('level',),
zlib=True, complevel=1)
level_coord.units = 'hPa'
level_coord.axis = 'Z'
if np.max(level) > 2000.:
level_coord[:] = level / 100.
else:
level_coord[:] = level
coord_tuple = ("time", "level", "latitude", "longitude")
time = out_fh.createVariable('time', 'f4', ('time',),
zlib=True, complevel=1)
time.units = "hours since 1930-1-1"
time.standard_name = 'time'
time.axis = 'T'
lat = out_fh.createVariable('latitude', 'f4', ('latitude',),
zlib=True, complevel=1)
lat.standard_name = "latitude"
lat.units = 'degrees_north'
lat.actual_range = (-90., 90.)
lat.axis = 'Y'
lat[:] = latitude
lon = out_fh.createVariable("longitude", 'f4', ('longitude',),
zlib=True, complevel=1)
lon.standard_name = "longitude"
lon.units = 'degrees_east'
lon.actual_range = (0., 360.)
lon.axis = 'X'
lon[:] = longitude
data = out_fh.createVariable(field, 'f4', coord_tuple,
zlib=True, complevel=1)
data.long_name = field_long_name
data.units = field_units
return out_fh
def glue_joblist_into_data(joblist, reanalysis, timeres,
field, field_units, field_long_name):
imap = itertools.imap
# print(joblist)
print("Building dataset for {0} {1} {2}".format(
reanalysis, timeres, field))
data_file_initialized = False
m = imap(fetch_data, joblist.itertuples())
for k in m:
new_index, new_timestamp, data, coord_arrays = k
if not data_file_initialized:
if len(coord_arrays) == 2:
lat, lon = coord_arrays
fh = create_output_file(reanalysis, timeres,
field, field_units, field_long_name,
None, *coord_arrays)
elif len(coord_arrays) == 3:
lev, lat, lon = coord_arrays
fh = create_output_file(reanalysis, timeres,
field, field_units, field_long_name,
*coord_arrays)
else:
print("Don't understand coordinate system.")
print(coord_arrays)
sys.exit(8)
data_file_initialized = True
new_datetime = new_timestamp.to_datetime()
fh.variables['time'][new_index] = netCDF4.date2num(
new_datetime, units=fh.variables['time'].units)
fh.variables[field][new_index] = data
del data
import gc
gc.collect()
fh.close()
print("Dataset build complete for {0} {1} {2}".format(
reanalysis, timeres, field))
def glue_joblist_into_monthly_data(joblist, reanalysis, timeres,
field, field_units, field_long_name,
deaccumulate=None):
# could use multiprocessing easily, but seems to cause memory issues
# better off not using it. Parallelize by extracting multiple variables
# in separate python processes
imap = itertools.imap
# print(joblist)
print("Building dataset for {0} {1} {2}".format(
reanalysis, timeres, field))
dts = np.array(joblist.datetimes, dtype=np.datetime64)
if (dts.shape[0] == 0):
print("No files matched specified pattern. Are you sure the files are available and the specified pattern is correct?")
start_datetime = pd.to_datetime(dts[0])
end_datetime = pd.to_datetime(dts[-1])
if end_datetime.month == 12:
ymi = list(year_month_iter(start_datetime.year, start_datetime.month,
end_datetime.year+1, 2))
elif end_datetime.month == 11:
ymi = list(year_month_iter(start_datetime.year, start_datetime.month,
end_datetime.year+1, 1))
else:
ymi = list(year_month_iter(start_datetime.year, start_datetime.month,
end_datetime.year, end_datetime.month+2))
data_file_initialized = False
adjusted_units = "m" if (field_units == 'm2 s-2') else field_units
for i in range(len(ymi)-1):
print(ymi[i])
month_start_date = pd.Timestamp(ymi[i])
month_end_date = pd.Timestamp(ymi[i+1])
joblist_month_subset = joblist[joblist.datetimes >= month_start_date]
joblist_month_subset = joblist_month_subset[
joblist.datetimes < month_end_date]
N_timesteps = joblist_month_subset.shape[0]
m = imap(fetch_data, joblist_month_subset.itertuples())
k = m.next()
(junk_index, junk_timestamp, data, coord_arrays) = k
if not data_file_initialized:
if len(coord_arrays) == 2:
lat, lon = coord_arrays
fh = create_output_file(reanalysis, timeres,
field, adjusted_units, field_long_name,
None, *coord_arrays)
elif len(coord_arrays) == 3:
lev, lat, lon = coord_arrays
fh = create_output_file(reanalysis, timeres,
field, adjusted_units, field_long_name,
*coord_arrays)
else:
print("Don't understand coordinate system.")
print(coord_arrays)
sys.exit(8)
data_file_initialized = True
if (N_timesteps > 1 and (deaccumulate != "ERAI")):
print("Averaging over {0} timesteps".format(N_timesteps))
data_sum = np.array(data.copy(), dtype=np.float64)
for k in m:
(junk_index, junk_timestamp, data, junk_coords) = k
data_sum += data
data_sum /= N_timesteps
data = data_sum.astype(np.float32)
elif (N_timesteps > 1 and (deaccumulate == "ERAI")):
print("Using ERA-Interim deaccumulation")
new_shape = [N_timesteps,]
new_shape.extend(data.shape)
data_arr = np.zeros(tuple(new_shape), dtype=np.float32)
#restart the iterator
m = imap(fetch_data, joblist_month_subset.itertuples())
# fill the array
for jtimestep, k in enumerate(m):
(junk_index, junk_timestamp, data, junk_coords) = k
data_arr[jtimestep] = data / (3.*3600.) # 3 hour accumulation
#sanity check to make sure we're deaccumulating a whole day
assert(N_timesteps % 8 == 0)
# ERAI accumulates over every 12 hours at 3 hour intervals
# if jtimestep%4 = 0, do nothing, else subtract the previous entry.
# loop backwards to avoid modifying values before we've deaccumulated
for jtimestep in reversed(range(0, N_timesteps)):
# print((jtimestep, np.mean(data_arr[jtimestep])))
if (jtimestep%4 != 0):
data_arr[jtimestep] = data_arr[jtimestep] - data_arr[jtimestep-1]
# Take time mean of deaccumulated data, pass it for writing
data = np.mean(data_arr, axis=0)
else:
print("Taking data")
fh.variables['time'][i] = netCDF4.date2num( ymi[i],
units=fh.variables['time'].units)
if field_units == 'm2 s-2':
data /= 9.81
fh.variables[field][i] = data
fh.close()
print("Dataset build complete for {0} {1} {2}".format(
reanalysis, timeres, field))
def fetch_data(t):
new_index, dt, fn, old_index, varname = t
fh = netCDF4.Dataset(fn, 'r')
d = fh.variables[varname][old_index]
dcoords = []
for z in fh.variables[varname].dimensions[1:]:
if (fh.variables[z].units == "hPa"):
dcoords.append(fh.variables[z][:]*100.)
else:
dcoords.append(fh.variables[z][:])
fh.close()
return ((new_index, dt, d, dcoords))
def main():
client = MongoClient()
db = client.rawdata
col = db.netcdf
deaccumulate_ERAI_forecast = "ERAI"
dispatch_table = [
#--------------------- CFSR ----------------------------------------------------------------
(joblist_create, "^cfsr.monthly", ["SHTFL_L1_Avg_1",], ('cfsr', 'monthly', 'SHF', 'W m-2', "Sensible Heat Flux"),),
(joblist_create, "^cfsr.monthly", ["LHTFL_L1_Avg_1",], ('cfsr', 'monthly', 'LHF', 'W m-2', "Latent Heat Flux"),),
(joblist_create, "^cfsr.monthly", ["CSDLF_L1_Avg_1",], ('cfsr', 'monthly', 'SLWDN_CLRSKY', 'W m-2', "Longwave Down at Surface, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["CSDSF_L1_Avg_1",], ('cfsr', 'monthly', 'SSWDN_CLRSKY', 'W m-2', "Shortwave Down at Surface, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["CSULF_L1_Avg_1",], ('cfsr', 'monthly', 'SLWUP_CLRSKY', 'W m-2', "Longwave Up at Surface, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["CSUSF_L1_Avg_1",], ('cfsr', 'monthly', 'SSWUP_CLRSKY', 'W m-2', "Shortwave Up at Surface, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["CSULF_L8_Avg_1",], ('cfsr', 'monthly', 'TLWUP_CLRSKY', 'W m-2', "Longwave Up at TOA, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["CSUSF_L8_Avg_1",], ('cfsr', 'monthly', 'TSWUP_CLRSKY', 'W m-2', "Shortwave Up at TOA, Clear Sky"),),
(joblist_create, "^cfsr.monthly", ["DSWRF_L8_Avg_1",], ('cfsr', 'monthly', 'TSWDN', 'W m-2', "Shortwave Down at TOA"),),
(joblist_create, "^cfsr.monthly", ["USWRF_L8_Avg_1",], ('cfsr', 'monthly', 'TSWUP', 'W m-2', "Shortwave Up at TOA"),),
(joblist_create, "^cfsr.monthly", ["ULWRF_L8_Avg_1",], ('cfsr', 'monthly', 'TLWUP', 'W m-2', "Longwave Up at TOA"),),
(joblist_create, "^cfsr.monthly", ["DLWRF_L1_Avg_1",], ('cfsr', 'monthly', 'SLWDN', 'W m-2', "Longwave Down at Surface"),),
(joblist_create, "^cfsr.monthly", ["ULWRF_L1_Avg_1",], ('cfsr', 'monthly', 'SLWUP', 'W m-2', "Longwave Up at Surface"),),
(joblist_create, "^cfsr.monthly", ["DSWRF_L1_Avg_1",], ('cfsr', 'monthly', 'SSWDN', 'W m-2', "Shortwave Down at Surface"),),
(joblist_create, "^cfsr.monthly", ["USWRF_L1_Avg_1",], ('cfsr', 'monthly', 'SSWUP', 'W m-2', "Shortwave Up at Surface"),),
(joblist_create, "^cfsr.monthly", ["GFLUX_L1_Avg_1",], ('cfsr', 'monthly', 'GHF', 'W m-2', "Ground Heat Flux"),),
(joblist_create, "^cfsr.monthly", ["HGT_L100_Avg", "HGT_L100"], ('cfsr', 'monthly', 'GHT', "m", "Geopotential Height")),
(joblist_create, "^cfsr.monthly", ["HGT_L1_Avg", "HGT_L1"], ('cfsr', 'monthly', 'GHT_SURF', "m", "Geopotential Height At Surface")),
(joblist_create, "^cfsr.monthly", ["PRES_L1_Avg", "PRES_L1"], ('cfsr', 'monthly', 'PSFC', "Pa", "Surface Pressure")),
(joblist_create, "^cfsr.monthly", ["PRMSL_L101_Avg", "PRMSL_L101"], ('cfsr', 'monthly', 'MSLP', "Pa", "Pressure at Mean Sea Level")),
(joblist_create, "^cfsr.monthly", ["SPF_H_L100_Avg", "SPF_H_L100"], ('cfsr', 'monthly', 'SPHUM', "kg kg-1", "Specific Humidity")),
(joblist_create, "^cfsr.monthly", ["TMP_L100_Avg", "TMP_L100"], ('cfsr', 'monthly', 'T', "K", "Air Temperature")),
(joblist_create, "^cfsr.monthly", ["U_GRD_L100_Avg", "U_GRD_L100"], ('cfsr', 'monthly', 'U', "m s-1", "Zonal Wind")),
(joblist_create, "^cfsr.monthly", ["V_GRD_L100_Avg", "V_GRD_L100"], ('cfsr', 'monthly', 'V', "m s-1", "Meridional Wind")),
(joblist_create, "^cfsr.monthly", ["V_VEL_L100_Avg", "V_VEL_L100"], ('cfsr', 'monthly', 'OMEGA', "Pa s-1", "Pressure Velocity")),
#--------------------- Precip --------------------------------------------------------------
(joblist_create, "^cmap.monthly", ["precip"], ('cmap', 'monthly', 'PRECIP', "mm day-1", "Precipitation")),
(joblist_create, "^gpcc.monthly.*combined.*", ["precip"], ('gpcc', 'monthly', 'PRECIP', "mm", "Precipitation")),
(joblist_create, "^gpcp.monthly.*", ["precip"], ('gpcp', 'monthly', 'PRECIP', "mm day-1", "Precipitation")),
#--------------------- MERRA ---------------------------------------------------------------
(joblist_create, "^merra.monthly", ["h"], ('merra', 'monthly', 'GHT', "m", "Geopotential Height")),
(joblist_create, "^merra.monthly", ["phis"], ('merra', 'monthly', 'GHT_SURF', "m-2 s-2", "Geopotential Height At Surface")),
(joblist_create, "^merra.monthly", ["ps"], ('merra', 'monthly', 'PSFC', "Pa", "Surface Pressure")),
(joblist_create, "^merra.monthly", ["slp"], ('merra', 'monthly', 'MSLP', "Pa", "Pressure at Mean Sea Level")),
(joblist_create, "^merra.monthly", ["qv"], ('merra', 'monthly', 'SPHUM', "kg kg-1", "Specific Humidity")),
(joblist_create, "^merra.monthly", ["t"], ('merra', 'monthly', 'T', "K", "Air Temperature")),
(joblist_create, "^merra.monthly", ["u"], ('merra', 'monthly', 'U', "m s-1", "Zonal Wind")),
(joblist_create, "^merra.monthly", ["v"], ('merra', 'monthly', 'V', "m s-1", "Meridional Wind")),
(joblist_create, "^merra.monthly", ["omega"], ('merra', 'monthly', 'OMEGA', "Pa s-1", "Pressure Velocity")),
(joblist_create, "^merra.monthly", ["hflux",], ('merra', 'monthly', 'SHF', 'W m-2', "Sensible Heat Flux"),),
(joblist_create, "^merra.monthly", ["eflux",], ('merra', 'monthly', 'LHF', 'W m-2', "Latent Heat Flux"),),
(joblist_create, "^merra.monthly", ["lwgabclr",], ('merra', 'monthly', 'SLWAB_CLRSKY', 'W m-2', "Longwave Absorbed at Surface, Clear Sky"),),
(joblist_create, "^merra.monthly", ["lwgntclr",], ('merra', 'monthly', 'SLWNT_CLRSKY', 'W m-2', "Longwave Net at Surface, Clear Sky"),),
(joblist_create, "^merra.monthly", ["lwtupclr",], ('merra', 'monthly', 'TLWUP_CLRSKY', 'W m-2', "Longwave Up at TOA, Clear Sky"),),
(joblist_create, "^merra.monthly", ["swgdnclr",], ('merra', 'monthly', 'SSWDN_CLRSKY', 'W m-2', "Shortwave Down at Surface, Clear Sky"),),
(joblist_create, "^merra.monthly", ["swgntclr",], ('merra', 'monthly', 'SSWNT_CLRSKY', 'W m-2', "Shortwave Net at Surface, Clear Sky"),),
(joblist_create, "^merra.monthly", ["swtntclr",], ('merra', 'monthly', 'TSWNT_CLRSKY', 'W m-2', "Shortwave Net at TOA, Clear Sky"),),
(joblist_create, "^merra.monthly", ["lwgab",], ('merra', 'monthly', 'SLWAB', 'W m-2', "Longwave Absorbed at Surface"),),
(joblist_create, "^merra.monthly", ["lwgem",], ('merra', 'monthly', 'SLWEM', 'W m-2', "Longwave Emitted at Surface"),),
(joblist_create, "^merra.monthly", ["lwgnt",], ('merra', 'monthly', 'SLWNT', 'W m-2', "Longwave Net at Surface"),),
(joblist_create, "^merra.monthly", ["lwtup",], ('merra', 'monthly', 'TLWUP', 'W m-2', "Longwave Up at TOA"),),
(joblist_create, "^merra.monthly", ["swgdn",], ('merra', 'monthly', 'SSWDN', 'W m-2', "Shortwave Down at Surface"),),
(joblist_create, "^merra.monthly", ["swgnt",], ('merra', 'monthly', 'SSWNT', 'W m-2', "Shortwave Net at Surface"),),
(joblist_create, "^merra.monthly", ["swtdn",], ('merra', 'monthly', 'TSWDN', 'W m-2', "Shortwave Down at TOA"),),
(joblist_create, "^merra.monthly", ["swtnt",], ('merra', 'monthly', 'TSWNT', 'W m-2', "Shortwave Net at TOA"),),
#--------------------- MERRA 2 -------------------------------------------------------------
(joblist_create, "^merra2.monthly", ["H"], ('merra2', 'monthly', 'GHT', "m", "Geopotential Height")),
(joblist_create, "^merra2.monthly", ["PHIS"], ('merra2', 'monthly', 'GHT_SURF', "m-2 s-2", "Geopotential Height At Surface")),
(joblist_create, "^merra2.monthly", ["PS"], ('merra2', 'monthly', 'PSFC', "Pa", "Surface Pressure")),
(joblist_create, "^merra2.monthly", ["SLP"], ('merra2', 'monthly', 'MSLP', "Pa", "Pressure at Mean Sea Level")),
(joblist_create, "^merra2.monthly", ["QV"], ('merra2', 'monthly', 'SPHUM', "kg kg-1", "Specific Humidity")),
(joblist_create, "^merra2.monthly", ["T"], ('merra2', 'monthly', 'T', "K", "Air Temperature")),
(joblist_create, "^merra2.monthly", ["U"], ('merra2', 'monthly', 'U', "m s-1", "Zonal Wind")),
(joblist_create, "^merra2.monthly", ["V"], ('merra2', 'monthly', 'V', "m s-1", "Meridional Wind")),
(joblist_create, "^merra2.monthly", ["OMEGA"], ('merra2', 'monthly', 'OMEGA', "Pa s-1", "Pressure Velocity")),
(joblist_create, "^merra2.monthly", ["EVAP",], ('merra2', 'monthly', 'EVAP', 'kg m-2 s-1', "Evaporation"),),
(joblist_create, "^merra2.monthly", ["PRECTOT",], ('merra2', 'monthly', 'PRECIP', 'kg m-2 s-1', "Precipitation"),),
(joblist_create, "^merra2.monthly", ["PRECTOTCORR",], ('merra2', 'monthly', 'PRECIP_BC', 'kg m-2 s-1', "Precipitation Bias Corrected"),),
(joblist_create, "^merra2.monthly", ["HFLUX",], ('merra2', 'monthly', 'SHF', 'W m-2', "Sensible Heat Flux"),),
(joblist_create, "^merra2.monthly", ["EFLUX",], ('merra2', 'monthly', 'LHF', 'W m-2', "Latent Heat Flux"),),
(joblist_create, "^merra2.monthly", ["LWGABCLR",], ('merra2', 'monthly', 'SLWAB_CLRSKY', 'W m-2', "Longwave Absorbed at Surface, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["LWGNTCLR",], ('merra2', 'monthly', 'SLWNT_CLRSKY', 'W m-2', "Longwave Net at Surface, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["LWTUPCLR",], ('merra2', 'monthly', 'TLWUP_CLRSKY', 'W m-2', "Longwave Up at TOA, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["SWGDNCLR",], ('merra2', 'monthly', 'SSWDN_CLRSKY', 'W m-2', "Shortwave Down at Surface, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["SWGNTCLR",], ('merra2', 'monthly', 'SSWNT_CLRSKY', 'W m-2', "Shortwave Net at Surface, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["SWTNTCLR",], ('merra2', 'monthly', 'TSWNT_CLRSKY', 'W m-2', "Shortwave Net at TOA, Clear Sky"),),
(joblist_create, "^merra2.monthly", ["LWGAB",], ('merra2', 'monthly', 'SLWAB', 'W m-2', "Longwave Absorbed at Surface"),),
(joblist_create, "^merra2.monthly", ["LWGEM",], ('merra2', 'monthly', 'SLWEM', 'W m-2', "Longwave Emitted at Surface"),),
(joblist_create, "^merra2.monthly", ["LWGNT",], ('merra2', 'monthly', 'SLWNT', 'W m-2', "Longwave Net at Surface"),),
(joblist_create, "^merra2.monthly", ["LWTUP",], ('merra2', 'monthly', 'TLWUP', 'W m-2', "Longwave Up at TOA"),),
(joblist_create, "^merra2.monthly", ["SWGDN",], ('merra2', 'monthly', 'SSWDN', 'W m-2', "Shortwave Down at Surface"),),
(joblist_create, "^merra2.monthly", ["SWGNT",], ('merra2', 'monthly', 'SSWNT', 'W m-2', "Shortwave Net at Surface"),),
(joblist_create, "^merra2.monthly", ["SWTDN",], ('merra2', 'monthly', 'TSWDN', 'W m-2', "Shortwave Down at TOA"),),
(joblist_create, "^merra2.monthly", ["SWTNT",], ('merra2', 'monthly', 'TSWNT', 'W m-2', "Shortwave Net at TOA"),),
#--------------------- ERA Interim ---------------------------------------------------------
(joblist_create, "^eraI.monthly", ["z"], ('erai', 'monthly', 'GHT', "m2 s-2", "Geopotential Height")),
# (joblist_create, "^eraI.monthly", ["PHIS"], ('eraI', 'monthly', 'GHT_SURF', "m-2 s-2", "Geopotential Height At Surface")),
(joblist_create, "^eraI.monthly", ["sp"], ('erai', 'monthly', 'PSFC', "Pa", "Surface Pressure")),
(joblist_create, "^eraI.monthly", ["msl"], ('erai', 'monthly', 'MSLP', "Pa", "Pressure at Mean Sea Level")),
(joblist_create, "^eraI.monthly", ["q"], ('erai', 'monthly', 'SPHUM', "kg kg-1", "Specific Humidity")),
(joblist_create, "^eraI.monthly", ["t"], ('erai', 'monthly', 'T', "K", "Air Temperature")),
(joblist_create, "^eraI.monthly", ["u"], ('erai', 'monthly', 'U', "m s-1", "Zonal Wind")),
(joblist_create, "^eraI.monthly", ["v"], ('erai', 'monthly', 'V', "m s-1", "Meridional Wind")),
(joblist_create, "^eraI.monthly", ["w"], ('erai', 'monthly', 'OMEGA', "Pa s-1", "Pressure Velocity")),
(joblist_create, "^eraI.monthly", ["vo"], ('erai', 'monthly', 'MODEL_VORTICITY', "s-1", "Relative Vorticity")),
(joblist_create, "^eraI.monthly", ["d"], ('erai', 'monthly', 'MODEL_DIVERGENCE', "s-1", "Divergence")),
(joblist_create, "^eraI.monthly", ["p65.162"], ('erai', 'monthly', 'EASTWARD_MASS_FLUX', "kg m-1 s-1", "Vertical Integral of Eastward Mass Flux")),
(joblist_create, "^eraI.monthly", ["p66.162"], ('erai', 'monthly', 'NORTHWARD_MASS_FLUX', "kg m-1 s-1", "Vertical Integral of Northward Mass Flux")),
(joblist_create, "^eraI.monthly", ["p69.162"], ('erai', 'monthly', 'EASTWARD_HEAT_FLUX', "W m-1", "Vertical Integral of Eastward Mass Flux")),
(joblist_create, "^eraI.monthly", ["p70.162"], ('erai', 'monthly', 'NORTHWARD_HEAT_FLUX', "W m-1", "Vertical Integral of Northward Mass Flux")),
(joblist_create, "^eraI.monthly", ["p71.162"], ('erai', 'monthly', 'EASTWARD_WATERVAPOR_FLUX', "kg m-1 s-1", "Vertical Integral of Eastward Water Vapor Flux")),
(joblist_create, "^eraI.monthly", ["p72.162"], ('erai', 'monthly', 'NORTHWARD_WATERVAPOR_FLUX', "kg m-1 s-1", "Vertical Integral of Northward Water Vapor Flux")),
(joblist_create, "^eraI.monthly", ["p73.162"], ('erai', 'monthly', 'EASTWARD_GHT_FLUX', "W m-1", "Vertical Integral of Eastward Geopotential Flux")),
(joblist_create, "^eraI.monthly", ["p74.162"], ('erai', 'monthly', 'NORTHWARD_GHT_FLUX', "W m-1", "Vertical Integral of Northward Geopotential Flux")),
(joblist_create, "^eraI.monthly", ["p81.162"], ('erai', 'monthly', 'DIV_MASS_FLUX', "kg m-2 s-1", "Vertical Integral of Divergence of Mass Flux")),
(joblist_create, "^eraI.monthly", ["p83.162"], ('erai', 'monthly', 'DIV_HEAT_FLUX', "W m-2", "Vertical Integral of Divergence of Heat Flux")),
(joblist_create, "^eraI.monthly", ["p84.162"], ('erai', 'monthly', 'DIV_WATERVAPOR_FLUX', "kg m-2 s-1", "Vertical Integral of Divergence of Water Vapor Flux")),
(joblist_create, "^eraI.monthly", ["p85.162"], ('erai', 'monthly', 'DIV_GHT_FLUX', "W m-2", "Vertical Integral of Divergence of Geopotential Flux")),
(joblist_create, "^eraI.monthly", ["sshf",], ('erai', 'monthly', 'SHF', 'W m-2', "Sensible Heat Flux", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["slhf",], ('erai', 'monthly', 'LHF', 'W m-2', "Latent Heat Flux", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["ssrd",], ('erai', 'monthly', 'SSWDN', 'W m-2', "Shortwave Down at Surface", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["strd",], ('erai', 'monthly', 'SLWDN', 'W m-2', "Longwave Down at Surface", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["ssr",], ('erai', 'monthly', 'SSWNT', 'W m-2', "Shortwave Net at Surface", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["str",], ('erai', 'monthly', 'SLWNT', 'W m-2', "Longwave Net at Surface", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["tisr",], ('erai', 'monthly', 'TSWDN', 'W m-2', "Shortwave Down at TOA", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["tsr",], ('erai', 'monthly', 'TSWNT', 'W m-2', "Shortwave Net at TOA", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["ttr",], ('erai', 'monthly', 'TLWNT', 'W m-2', "Longwave Net at TOA", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["strc",], ('erai', 'monthly', 'SLWNT_CLRSKY', 'W m-2', "Longwave Net at Surface, Clear Sky", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["ssrc",], ('erai', 'monthly', 'SSWNT_CLRSKY', 'W m-2', "Shortwave Net at Surface, Clear Sky", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["ttrc",], ('erai', 'monthly', 'TLWNT_CLRSKY', 'W m-2', "Longwave Net at TOA, Clear Sky", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["tsrc",], ('erai', 'monthly', 'TSWNT_CLRSKY', 'W m-2', "Shortwave Net at TOA, Clear Sky", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["tsrc",], ('erai', 'monthly', 'TSWNT_CLRSKY', 'W m-2', "Shortwave Net at TOA, Clear Sky", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["tp",], ('erai', 'monthly', 'PRECIP', 'm', "Precipitation. meters per 3 hours.", deaccumulate_ERAI_forecast),),
(joblist_create, "^eraI.monthly", ["e",], ('erai', 'monthly', 'EVAP', 'm', "Evaporation. meters per 3 hours.", deaccumulate_ERAI_forecast),),
(joblist_create, "^ersst.monthly", ["sst",], ('ersst', 'monthly', 'SST', 'K', "Sea Surface Temperature"),),
#--------------------- CMIP5 runs ---------------------------------------------------------
#joblist_create #regex matching files with this var #var name in files # new prefix #timefreq #new name #units # new description
# (joblist_create, "^cmip5.hfls.Amon.GFDL.ESM2G.historical.r1i1p1", ["hfls",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'LHF', 'W m-2', "Latent Heat Flux"),),
# (joblist_create, "^cmip5.hfss.Amon.GFDL.ESM2G.historical.r1i1p1", ["hfss",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SHF', 'W m-2', "Sensible Heat Flux"),),
# (joblist_create, "^cmip5.hurs.Amon.GFDL.ESM2G.historical.r1i1p1", ["hurs",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'RH_SURF', '%', "Near Surface Relative Humidity"),),
# (joblist_create, "^cmip5.hus.Amon.GFDL.ESM2G.historical.r1i1p1", ["hus",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SPHUM', 'kg kg-1', "Specific Humidity"),),
# (joblist_create, "^cmip5.huss.Amon.GFDL.ESM2G.historical.r1i1p1", ["huss",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SPHUM_SURF', 'kg kg-1', "Near Surface Specific Humidity"),),
# (joblist_create, "^cmip5.mrsos.Lmon.GFDL.ESM2G.historical.r1i1p1", ["mrsos",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SOILMOIST', 'kg m-2', "Soil Moisture in Upper Soil Column"),),
# (joblist_create, "^cmip5.pr.Amon.GFDL.ESM2G.historical.r1i1p1", ["pr",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'PRECIP', 'kg m-2 s-1', "Precipitation"),),
# (joblist_create, "^cmip5.ps.Amon.GFDL.ESM2G.historical.r1i1p1", ["ps",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'PSFC', 'Pa', "Surface Pressure"),),
# (joblist_create, "^cmip5.rlds.Amon.GFDL.ESM2G.historical.r1i1p1", ["rlds",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SLWDN', 'W m-2', "Longwave Down at Surface"),),
# (joblist_create, "^cmip5.rlus.Amon.GFDL.ESM2G.historical.r1i1p1", ["rlus",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'SLWUP', 'W m-2', "Longwave Up at Surface"),),
# (joblist_create, "^cmip5.ta.Amon.GFDL.ESM2G.historical.r1i1p1", ["ta",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'T', 'K', "Air Temperature"),),
# (joblist_create, "^cmip5.tas.Amon.GFDL.ESM2G.historical.r1i1p1", ["tas",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'T_SURF', 'K', "Near Surface Air Temperature"),),
# (joblist_create, "^cmip5.ua.Amon.GFDL.ESM2G.historical.r1i1p1", ["ua",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'U', 'm s-1', "Zonal Wind"),),
# (joblist_create, "^cmip5.va.Amon.GFDL.ESM2G.historical.r1i1p1", ["va",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'V', 'm s-1', "Meridional Wind"),),
# (joblist_create, "^cmip5.wap.Amon.GFDL.ESM2G.historical.r1i1p1", ["wap",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'OMEGA', 'Pa s-1', "Pressure Velocity"),),
# (joblist_create, "^cmip5.zg.Amon.GFDL.ESM2G.historical.r1i1p1", ["zg",], ('cmip5.gfdl.esm2g.historical', 'monthly', 'GHT', 'm', "Geopotential Height"),),
]
# obtained with `ls zg* --color=none |cut -f1-5 -d_ |uniq`
cmip5_list = """
zg_Amon_ACCESS1-0_historical_r1i1p1
zg_Amon_ACCESS1-3_historical_r1i1p1
zg_Amon_bcc-csm1-1_historical_r1i1p1
zg_Amon_bcc-csm1-1-m_historical_r1i1p1
zg_Amon_BNU-ESM_historical_r1i1p1
zg_Amon_CanCM4_historical_r1i1p1
zg_Amon_CanESM2_historical_r1i1p1
zg_Amon_CanESM2_rcp85_r1i1p1
zg_Amon_CCSM4_historical_r1i1p1
zg_Amon_CESM1-BGC_historical_r1i1p1
zg_Amon_CESM1-CAM5_historical_r1i1p1
zg_Amon_CESM1-FASTCHEM_historical_r1i1p1
zg_Amon_CESM1-WACCM_historical_r1i1p1
zg_Amon_CMCC-CESM_historical_r1i1p1
zg_Amon_CMCC-CM_historical_r1i1p1
zg_Amon_CMCC-CMS_historical_r1i1p1
zg_Amon_CNRM-CM5-2_historical_r1i1p1
zg_Amon_CNRM-CM5_historical_r1i1p1
zg_Amon_CSIRO-Mk3-6-0_historical_r1i1p1
zg_Amon_FGOALS-g2_historical_r1i1p1
zg_Amon_FIO-ESM_historical_r1i1p1
zg_Amon_GFDL-CM2p1_historical_r1i1p1
zg_Amon_GFDL-CM3_historical_r1i1p1
zg_Amon_GFDL-CM3_rcp85_r1i1p1
zg_Amon_GFDL-ESM2G_historical_r1i1p1
zg_Amon_GFDL-ESM2M_historical_r1i1p1
zg_Amon_GISS-E2-H-CC_historical_r1i1p1
zg_Amon_GISS-E2-H_historical_r1i1p1
zg_Amon_GISS-E2-R-CC_historical_r1i1p1
zg_Amon_GISS-E2-R_historical_r1i1p1
zg_Amon_HadCM3_historical_r1i1p1
zg_Amon_HadGEM2-AO_historical_r1i1p1
zg_Amon_HadGEM2-CC_historical_r1i1p1
zg_Amon_HadGEM2-ES_historical_r1i1p1
zg_Amon_inmcm4_historical_r1i1p1
zg_Amon_IPSL-CM5A-LR_historical_r1i1p1
zg_Amon_IPSL-CM5A-MR_historical_r1i1p1
zg_Amon_IPSL-CM5B-LR_historical_r1i1p1
zg_Amon_MIROC4h_historical_r1i1p1
zg_Amon_MIROC5_historical_r1i1p1
zg_Amon_MIROC-ESM-CHEM_historical_r1i1p1
zg_Amon_MIROC-ESM_historical_r1i1p1
zg_Amon_MPI-ESM-LR_historical_r1i1p1
zg_Amon_MPI-ESM-MR_historical_r1i1p1
zg_Amon_MPI-ESM-P_historical_r1i1p1
zg_Amon_MRI-CGCM3_historical_r1i1p1
zg_Amon_MRI-ESM1_historical_r1i1p1
zg_Amon_NorESM1-ME_historical_r1i1p1
zg_Amon_NorESM1-M_historical_r1i1p1
"""
cmip_models = [x.replace('zg_', '{0}.') for x in cmip5_list.strip().split()]
cmip_variable_list = (
("evspsbl", "EVAP", 'kg m-2 s-1', "Evaporation"),
("hfls", 'LHF', 'W m-2', "Latent Heat Flux"),
("hfss", 'SHF', 'W m-2', "Sensible Heat Flux"),
("hurs", 'RH_SURF', '%', "Near Surface Relative Humidity"),
("hus", 'SPHUM', 'kg kg-1', "Specific Humidity"),
("huss", 'SPHUM_SURF', 'kg kg-1', "Near Surface Specific Humidity"),
("mrsos", 'SOILMOIST', 'kg m-2', "Soil Moisture in Upper Soil Column"),
("pr", 'PRECIP', 'kg m-2 s-1', "Precipitation"),
("ps", 'PSFC', 'Pa', "Surface Pressure"),
("rlds", 'SLWDN', 'W m-2', "Longwave Down at Surface"),
("rlus", 'SLWUP', 'W m-2', "Longwave Up at Surface"),
("rlut", 'TLWUP', 'W m-2', "Longwave Up at TOA"),
("rsds", 'SSWDN', 'W m-2', "Shortwave Down at Surface"),
("rsdt", 'TSWDN', 'W m-2', "Shortwave Down at TOA"),
("rsus", 'SSWUP', 'W m-2', "Shortwave Up at Surface"),
("rsuscs",'SSWUP_CLRSKY','W m-2', "Surface Shortwave Up, CLear Sky"),
("rsut", 'TSWUP', 'W m-2', "Shortwave Up at TOA"),
("ta", 'T', 'K', "Air Temperature"),
("tas", 'T_SURF', 'K', "Near Surface Air Temperature"),
("ts", 'T_SKIN', 'K', "Skin Temperature"),
("ua", 'U', 'm s-1', "Zonal Wind"),
("uas", 'U_SURF', 'm s-1', "Zonal Wind at Surface"),
("va", 'V', 'm s-1', "Meridional Wind"),
("vas", 'V_SURF', 'm s-1', "Meridional Wind at Surface"),
("wap", 'OMEGA', 'Pa s-1', "Pressure Velocity"),
("zg", 'GHT', 'm', "Geopotential Height"),
)
import string
tr_table = string.maketrans('-_', '..')
for m in cmip_models:
for v in cmip_variable_list:
model_regex = '^cmip5.{0}'.format(m.format(v[0]))
model_regex = model_regex.translate(tr_table)
filename_stem = model_regex
filename_stem = filename_stem.replace('^', '')
filename_stem = filename_stem.replace('.Amon.', '.')
filename_stem = filename_stem.replace('.r1i1p1', '')
tup = (joblist_create, model_regex, [v[0], ], (filename_stem, 'monthly', v[1], v[2], v[3]),)
dispatch_table.append(tup)
try:
choice = int(sys.argv[1])
assert(choice >=0 and choice < len(dispatch_table))
except:
print(("Did not understand specified argument. "
" Require a number between 0-{0}".format(len(dispatch_table)-1)) )
pprint(list(enumerate(dispatch_table)))
sys.exit(8)
k = dispatch_table[choice]
f = k[0]
jl = f(col, k[1], k[2])
glue_joblist_into_monthly_data(jl, *k[3])
client.close()
if __name__ == '__main__':
main()
| 59.938944
| 200
| 0.581064
|
fc72cf0ce0063d572117997cff0f5054cfb37bd5
| 3,570
|
py
|
Python
|
proj/archs/segmentation/baselines/net10a_isola.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/archs/segmentation/baselines/net10a_isola.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/archs/segmentation/baselines/net10a_isola.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from proj.archs.cluster.vgg import VGGNet
from proj.archs.segmentation.net10a import SegmentationNet10aTrunk, \
SegmentationNet10a
from proj.utils.segmentation.baselines.general import get_patches
__all__ = ["SegmentationNet10aIsola"]
class IsolaHead(nn.Module):
def __init__(self, config):
super(IsolaHead, self).__init__()
self.patch_side = config.isola_patch_side
self.siamese_branch = nn.Sequential(
nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
# nn.Conv2d(in_channels=1024, out_channels=1024,
# kernel_size=3, stride=1, padding=1, bias=False),
# nn.BatchNorm2d(1024),
# nn.ReLU(inplace=True)
)
self.joint = nn.Sequential(
nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 1),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(2048, 1)
)
def forward(self, patches1, patches2):
patches1 = self.siamese_branch(patches1)
patches2 = self.siamese_branch(patches2)
ni, k, h, w = patches1.size()
ni2, k2, h2, w2 = patches1.size()
if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \
(h == self.patch_side) and (w == self.patch_side)):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
assert (False)
# flatten all but first dim
patches1 = patches1.contiguous() # otherwise view may behave funny
patches2 = patches2.contiguous()
patches1 = patches1.view(patches1.size(0), -1)
patches2 = patches2.view(patches2.size(0), -1)
concatenated = torch.cat((patches1, patches2), dim=1)
ni3, nf = concatenated.size()
if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *
self.patch_side))):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
print(patches1.size())
print(patches2.size())
print(ni3, nf)
assert (False)
return self.joint(concatenated) # n, 1
class SegmentationNet10aIsola(VGGNet):
def __init__(self, config):
super(SegmentationNet10aIsola, self).__init__()
self.patch_side = config.isola_patch_side
self.input_sz = config.input_sz
self.features_sz = SegmentationNet10a.cfg[-1][0]
print("SegmentationNet10aIsola: %d %d %d" % (self.patch_side,
self.input_sz,
self.features_sz))
self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)
self.isola_head = IsolaHead(config)
self._initialize_weights()
def forward(self, x, centre=None, other=None, penultimate=False):
x = self.features(x)
x = F.interpolate(x, size=self.input_sz, mode="bilinear")
if not penultimate:
assert ((centre is not None) and (other is not None))
patches1, patches2 = \
get_patches(x, centre, other, self.patch_side)
adjacency = self.isola_head(patches1, patches2)
x = torch.sigmoid(adjacency)
return x
| 35
| 83
| 0.571989
|
2f2472662414ade8a4045d84e1e90c57e9721620
| 317
|
py
|
Python
|
leet/plan/algorithms/back_track/notes.py
|
manojkumar-github/DataStructures-DynamicProgramming-in-Python-JAVA-Cplusplus
|
16722a60c4c744ad3d240469b28f5d6ab6e9c25d
|
[
"MIT"
] | null | null | null |
leet/plan/algorithms/back_track/notes.py
|
manojkumar-github/DataStructures-DynamicProgramming-in-Python-JAVA-Cplusplus
|
16722a60c4c744ad3d240469b28f5d6ab6e9c25d
|
[
"MIT"
] | null | null | null |
leet/plan/algorithms/back_track/notes.py
|
manojkumar-github/DataStructures-DynamicProgramming-in-Python-JAVA-Cplusplus
|
16722a60c4c744ad3d240469b28f5d6ab6e9c25d
|
[
"MIT"
] | null | null | null |
#!/usr/bin.env python
# Copyright (C) Pearson Assessments - 2020. All Rights Reserved.
# Proprietary - Use with Pearson Written Permission Only
"""
1) Solving the problem RECURSIVELY
2) by trying to build solution incrementally one piece at a time
3) removing those solutions that fail to satisfy the constraint
"""
| 31.7
| 64
| 0.769716
|
6e4cb09e99c6dac6917cab70a0d8baa5b1b8f0bb
| 118
|
py
|
Python
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
python/testData/intentions/PyInvertIfConditionIntentionTest/commentsNoElseIf.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
def func():
value = "not-none"
# Is none
<caret>if value is None:
print("None")
print(value)
| 14.75
| 28
| 0.525424
|
6292973d960b1f659a01574e9b17dbdadc2681d6
| 363
|
py
|
Python
|
dummyauthgate/__init__.py
|
d0p1s4m4/qwebirc
|
fc0f197fd514676a50ac2d082f81cda4b3274abe
|
[
"BSD-3-Clause"
] | null | null | null |
dummyauthgate/__init__.py
|
d0p1s4m4/qwebirc
|
fc0f197fd514676a50ac2d082f81cda4b3274abe
|
[
"BSD-3-Clause"
] | 49
|
2020-02-10T11:20:58.000Z
|
2021-07-12T11:15:53.000Z
|
dummyauthgate/__init__.py
|
d0p1s4m4/qwebirc
|
fc0f197fd514676a50ac2d082f81cda4b3274abe
|
[
"BSD-3-Clause"
] | null | null | null |
class DummyImplementation(object):
def __init__(self, *args, **kwargs):
pass
def __getattr__(self, *args, **kwargs):
raise Exception("Not implemented.")
def login_optional(self, *args, **kwargs):
return None
@classmethod
def get_session_data(self, *args, **kwargs):
return {}
twisted = DummyImplementation
| 21.352941
| 48
| 0.639118
|
6d5315882c8cf98c4f784900579bfb73790e241a
| 3,271
|
py
|
Python
|
basex/common/cryptutils.py
|
yinziyan1206/x-base
|
dc74124ad9b07b799ef03917a0e9a882a062ac40
|
[
"BSD-2-Clause"
] | null | null | null |
basex/common/cryptutils.py
|
yinziyan1206/x-base
|
dc74124ad9b07b799ef03917a0e9a882a062ac40
|
[
"BSD-2-Clause"
] | null | null | null |
basex/common/cryptutils.py
|
yinziyan1206/x-base
|
dc74124ad9b07b799ef03917a0e9a882a062ac40
|
[
"BSD-2-Clause"
] | null | null | null |
__author__ = 'ziyan.yin'
import hashlib
import hmac
from typing import Union
import pybase64 as base64
def md5(content: Union[str, bytes, bytearray, memoryview]) -> str:
"""
md5 encrypt
:param content: words
:return: encrypted codes
"""
m = hashlib.md5()
if isinstance(content, str):
content = content.encode()
m.update(content)
return m.hexdigest()
def hmac_md5(content: Union[str, bytes, bytearray, memoryview], key: bytes = b'') -> str:
"""
md5 encrypt with hmac
:param content: words
:param key: secret
:return: encrypted words
"""
if isinstance(content, str):
content = content.encode()
m = hmac.new(key, content, digestmod=hashlib.md5)
return m.hexdigest()
def sha1(content: Union[str, bytes, bytearray, memoryview]) -> str:
"""
sha1 encrypt
:param content: words
:return: encrypted codes
"""
sha = hashlib.sha1()
if isinstance(content, str):
content = content.encode()
sha.update(content)
return sha.hexdigest()
def hmac_sha1(content: Union[str, bytes, bytearray, memoryview], key: bytes = b'') -> str:
"""
sha1 encrypt with hmac
:param content: words
:param key: secret
:return: encrypted words
"""
if isinstance(content, str):
content = content.encode()
m = hmac.new(key, content, digestmod=hashlib.sha1)
return m.hexdigest()
def sha256(content: Union[str, bytes, bytearray, memoryview]) -> str:
"""
sha256 encrypt
:param content: words
:return: encrypted codes
"""
sha = hashlib.sha256()
if isinstance(content, str):
content = content.encode()
sha.update(content)
return sha.hexdigest()
def hmac_sha256(content: Union[str, bytes, bytearray, memoryview], key: bytes = b'') -> str:
"""
sha256 encrypt with hmac
:param content: words
:param key: secret
:return: encrypted words
"""
if isinstance(content, str):
content = content.encode()
m = hmac.new(key, content, digestmod=hashlib.sha256)
return m.hexdigest()
def b64encode(content: Union[str, bytes, bytearray, memoryview]) -> bytes:
"""
base64 encode
:param content: words
:return: encrypted codes
"""
if isinstance(content, str):
content = content.encode()
return base64.b64encode(content)
def b64decode(content: Union[str, bytes, bytearray, memoryview]) -> bytes:
"""
base64 decode
:param content: codes
:return: decrypted words
"""
if isinstance(content, str):
content = content.encode()
return base64.b64decode(content)
def urlsafe_b64encode(content: Union[str, bytes, bytearray, memoryview]) -> bytes:
"""
base64 urlsafe encode
:param content: words
:return: encrypted codes
"""
if isinstance(content, str):
content = content.encode()
return base64.urlsafe_b64encode(content)
def urlsafe_b64decode(content: Union[str, bytes, bytearray, memoryview]) -> bytes:
"""
base64 urlsafe decode
:param content: words
:return: encrypted codes
"""
if isinstance(content, str):
content = content.encode()
return base64.urlsafe_b64decode(content)
| 23.364286
| 92
| 0.63528
|
e616cdd87fde5b0b6fb0e382e783b536d29cb5d1
| 2,649
|
py
|
Python
|
test/integration/test_text_to_speech_v1.py
|
Omegastick/python-sdk
|
568db5c5afabb3d99e2cb6be1afd69b08e2191d5
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_text_to_speech_v1.py
|
Omegastick/python-sdk
|
568db5c5afabb3d99e2cb6be1afd69b08e2191d5
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_text_to_speech_v1.py
|
Omegastick/python-sdk
|
568db5c5afabb3d99e2cb6be1afd69b08e2191d5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import unittest
import watson_developer_cloud
import pytest
import os
@pytest.mark.skipif(
os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES')
class TestIntegrationTextToSpeechV1(unittest.TestCase):
text_to_speech = None
original_customizations = None
created_customization = None
@classmethod
def setup_class(cls):
cls.text_to_speech = watson_developer_cloud.TextToSpeechV1(
username="YOUR SERVICE USERNAME", password="YOUR SERVICE PASSWORD")
cls.text_to_speech.set_default_headers({
'X-Watson-Learning-Opt-Out':
'1',
'X-Watson-Test':
'1'
})
cls.original_customizations = cls.text_to_speech.list_voice_models()
cls.created_customization = cls.text_to_speech.create_voice_model(
name="test_integration_customization",
description="customization for tests")
@classmethod
def teardown_class(cls):
custid = cls.created_customization['customization_id']
cls.text_to_speech.delete_voice_model(customization_id=custid)
def test_voices(self):
output = self.text_to_speech.list_voices()
assert output['voices'] is not None
voice = self.text_to_speech.get_voice(output['voices'][0]['name'])
assert voice is not None
def test_speak(self):
output = self.text_to_speech.synthesize(
text="my voice is my passport",
accept='audio/wav',
voice='en-US_AllisonVoice')
assert output.content is not None
def test_pronunciation(self):
output = self.text_to_speech.get_pronunciation('hello')
assert output['pronunciation'] is not None
def test_customizations(self):
old_length = len(self.original_customizations['customizations'])
new_length = len(
self.text_to_speech.list_voice_models()['customizations'])
assert new_length - old_length >= 1
def test_custom_words(self):
customization_id = self.created_customization['customization_id']
words = self.text_to_speech.list_words(customization_id)['words']
assert len(words) == 0 # pylint: disable=len-as-condition
self.text_to_speech.add_word(
customization_id, word="ACLs", translation="ackles")
words = [{"word": "MACLs", "translation": "mackles"}]
self.text_to_speech.add_words(customization_id, words)
self.text_to_speech.delete_word(customization_id, 'ACLs')
word = self.text_to_speech.get_word(customization_id, 'MACLs')
assert word['translation'] == 'mackles'
| 37.842857
| 79
| 0.679124
|
c9e2439bdc07bb8ccd877e1b6e6d80e521116b94
| 526
|
py
|
Python
|
wheat/types/end_of_slot_bundle.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 15
|
2021-07-12T14:27:42.000Z
|
2022-02-09T04:32:44.000Z
|
wheat/types/end_of_slot_bundle.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 21
|
2021-07-12T23:25:36.000Z
|
2021-10-29T23:19:55.000Z
|
wheat/types/end_of_slot_bundle.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 8
|
2021-07-12T13:15:19.000Z
|
2022-03-15T08:41:18.000Z
|
from dataclasses import dataclass
from typing import Optional
from wheat.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from wheat.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class EndOfSubSlotBundle(Streamable):
challenge_chain: ChallengeChainSubSlot
infused_challenge_chain: Optional[InfusedChallengeChainSubSlot]
reward_chain: RewardChainSubSlot
proofs: SubSlotProofs
| 26.3
| 67
| 0.819392
|
1f8462afb32dd78dc512dfe9b615e9b7688ada0e
| 12,626
|
py
|
Python
|
Sorting/sort_summary.py
|
alexgonzl/TreeMazeAnalyses2
|
9bd20328368a915a0d9b81c02ae7af37c5c0c839
|
[
"MIT"
] | null | null | null |
Sorting/sort_summary.py
|
alexgonzl/TreeMazeAnalyses2
|
9bd20328368a915a0d9b81c02ae7af37c5c0c839
|
[
"MIT"
] | null | null | null |
Sorting/sort_summary.py
|
alexgonzl/TreeMazeAnalyses2
|
9bd20328368a915a0d9b81c02ae7af37c5c0c839
|
[
"MIT"
] | null | null | null |
import sys, os, json, datetime, getopt, shutil, filecmp
from pathlib import Path
import numpy as np
import pandas as pd
## Fixed Parameters
overwrite = 1
nTTs = 16
def get_session_clusters(session):
'''
session = Path object indicating the directory of the session
'''
assert session.exists(), 'Could not find session. Check Spelling/Path.'
table = {'session': session.name, 'Clustered': 0, 'path': str(session), 'nCells': 0, 'nMua': 0, 'cell_IDs': {},
'mua_IDs': {}}
TTs = np.arange(1, nTTs + 1)
for tt in TTs:
fn = session / ('tt_' + str(tt)) / 'cluster_group.tsv'
# assert fn.exists(), 'could not find record for tt {}; in {}'.format(tt,sessionName)
if fn.exists():
try:
table['cell_IDs'][int(tt)] = []
table['mua_IDs'][int(tt)] = []
d = pd.read_csv(fn, delimiter='\t')
# found bug here. if clusters do not follow an order [0,1,2,3]
# this will give incorrect answers.
# cells = np.where(d['group']=='good')[0].tolist()
# mua = np.where(d['group']=='mua')[0].tolist()
# correct version.
cells = d['cluster_id'][d['group'] == 'good'].tolist()
mua = d['cluster_id'][d['group'] == 'mua'].tolist()
table['cell_IDs'][int(tt)] = []
table['mua_IDs'][int(tt)] = []
for cc in cells:
table['cell_IDs'][int(tt)].append(cc)
for mm in mua:
table['mua_IDs'][int(tt)].append(mm)
table['nCells'] += len(cells)
table['nMua'] += len(mua)
table['dateClustered'][int(tt)] = datetime.datetime.fromtimestamp(int(fn.stat().st_mtime)).strftime(
"%B %d %Y, %I:%M%p")
except:
print('In Session {}, Error Processing TT {}'.format(session.name, tt))
else:
table['unClustered_TTs'].append(int(tt))
if len(table['unClustered_TTs']) == 0:
table['All_TTs_Clustered'] = 1
table['Clustered'] = 1
elif len(table['unClustered_TTs']) == nTTs:
table['Clustered'] = 0
print("\nSession {} no TTs clustered.".format(session.name))
else:
table['Clustered'] = 1
if table['Clustered']:
table['dateSummary'] = datetime.datetime.today().strftime("%B %d %Y, %I:%M%p")
print("\nResults for {}:\n nCells = {} \n nMuas = {}".format(session.name, table['nCells'], table['nMua']))
return table
def CopyClustersToOak(localDir, oakDir):
localDir = Path(localDir)
oakDir = Path(oakDir)
for SummaryFile in oakDir.glob("*_ClusteringSummary.json"):
with SummaryFile.open(mode='r') as f:
cluster_summary = json.load(f)
for session in cluster_summary['Sessions']:
# copy individual tetrode clusters
sessionName = session + '_KSClusters'
existsList = []
notExistsList = []
updatedList = []
notUpDatedList = []
for tt in np.arange(1, nTTs + 1):
try:
fn = localDir / sessionName / ('tt_' + str(tt)) / 'cluster_group.tsv'
fn2 = localDir / sessionName / ('tt_' + str(tt)) / 'spike_clusters.npy'
if fn.exists():
sp = oakDir / sessionName / ('tt_' + str(tt)) / 'cluster_group.tsv'
sp2 = oakDir / sessionName / ('tt_' + str(tt)) / 'spike_clusters.npy'
if sp.exists():
# if it exists @ destination but has been change, overwrite.
if not filecmp.cmp(str(fn), str(sp), shallow=True):
shutil.copy(str(fn), str(sp))
updatedList.append(tt)
print('{}: TT {} overwrite.'.format(session, tt))
else:
# otherwise ignore.
existsList.append(tt)
notUpDatedList.append(tt)
else:
# if it doesn't exist, copy
shutil.copy(str(fn), str(sp))
updatedList.append(tt)
print('{}: TT {} Copy.'.format(session, tt))
if not filecmp.cmp(str(fn2), str(sp2), shallow=True):
shutil.copy(str(fn2), str(sp2))
print('Updating spike_cluster IDs file')
else:
notExistsList.append(tt)
except:
notUpDatedList.append(tt)
print("Possible Mounting Issue. Try umounting/remounting the Oak partition.")
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
if len(existsList) == 16:
print("{}: All Files Exists in Cluster.".format(session))
elif len(existsList) > 0:
print("{}:\n Files exists and not updated TTs {} \n Files do not exists {} ".format(session, existsList,
notExistsList))
else:
print("{}: No Cluster Files to Copy.".format(session))
if len(updatedList) > 0:
print("{}: Updated Cluster Files: {}".format(session, updatedList))
# if len(notUpDatedList)==16:
# print("{}: No Clusters Updated. ".format(session))
# elif len(notUpDatedList)==0:
# print("{}: Updated all tetrode clusters".format(session))
# else:
# print("{}: Indetical cluster files, no updates for TTs {}".format(session, notUpDatedList))
print()
def GetClusterTable(cl_summary, oakPath, localPath=''):
'''
Human readble summary cluster table. Tells, what needs to be done still!
'''
oakPath = Path(oakPath)
colnames = ['SessionDate', 'Task', 'Animal', 'Clustered', 'nCells', 'nMua', 'BestTT']
emptyEntry = {key: [0] for key in colnames}
Sessions = []
Dates = []
Tasks = []
Animals = []
Clustered = []
for se, cl in cl_summary['Sessions'].items():
tmp = se.split('_')
Dates.append(tmp[2])
Tasks.append(tmp[1])
Animals.append(tmp[0])
Sessions.append(se)
Clustered.append(cl)
d = pd.DataFrame(0, index=Sessions, columns=colnames)
d['Task'] = Tasks
d['SessionDate'] = Dates
d['Animal'] = Animals
d['Clustered'] = Clustered
for sn in Sessions:
if d.at[sn, 'Clustered']:
date = d.at[sn, 'SessionDate']
task = d.at[sn, 'Task']
animal = d.at[sn, 'Animal']
try:
info = cl_summary[animal][date][task]
d.at[sn, 'nCells'] = info['nCells']
d.at[sn, 'nMua'] = info['nMua']
d.at[sn, 'BestTT'] = dict_argmax(info['cell_IDs']) + 1
# d.at[sn,'SummaryDate'] = info['dateSummary']
except:
print("Error updating session {}".format(sn))
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
print(d)
localSave = 0
fn = 'ClusterTableSummary.csv'
try:
if len(str(localPath)) > 0:
localPath = Path(localPath)
d.to_csv(str(localPath / fn))
localSave = 1
print('File Saved to {}'.format(oakPath))
else:
localSave = 0
except:
print('Could not save file locally.')
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
try:
d.to_csv(str(oakPath / fn))
oakSave = 1
print('File Saved to {}'.format(oakPath))
except:
if localSave and (not oakSave):
try:
shutil.copy2(str(localPath / fn), str(oakPath / fn))
print('File copy from local to {}'.format(oakPath))
except:
print('Could not save table to Oak.')
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
else:
print('Could not save table to Oak.')
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
def UpdateClusterInfo(oakPath, animal, localPath, overwrite=False):
oakPath = Path(oakPath) # convert to Path object
# Cluster Summary Json File. If it doesn't exist, create it.
cnt = 0
cl_summary_fn = oakPath / ('{}_ClusteringSummary.json'.format(animal))
if cl_summary_fn.exists() and overwrite == 0:
print('Loading Existing Summary File.')
with cl_summary_fn.open() as f:
cl_summary = json.load(f)
else: # new summary
print('Making New Summary File.')
cl_summary = {}
cl_summary[animal] = {}
cl_summary['Sessions'] = {}
for session in localPath.glob('*_KSClusters'):
try:
print(session.name, cl_summary[sessions][session.name] == 0)
if cl_summary[sessions][session.name] == 0 or overwrite:
updateSession = 1
else:
updateSession = 0
except:
updateSession = 1
if updateSession:
try:
tmp = session.name.split('_')
an = tmp[0]
# assert animal==an, 'Error, invalid session found.'
task = tmp[1]
date = tmp[2]
oses = an + '_' + task + '_' + date
print(oses)
if an == 'NE':
an = 'Ne'
if not date in cl_summary[animal].keys():
cl_summary[an][date] = {}
if not task in cl_summary[an][date].keys() or overwrite:
cl_summary[an][date][task] = {}
cl_summary[an][date][task] = GetSessionClusters(session)
cl_summary['Sessions'][oses] = cl_summary[an][date][task]['Clustered']
except:
print('Unable to process session {}'.format(session))
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
# Save
try:
localPath = Path(localPath)
if localPath.exists():
fn = cl_summary_fn.name
with (localPath / fn).open(mode='w') as f:
json.dump(cl_summary, f, indent=4)
print('File Saved locally.')
except:
print('unable to save json file locally.')
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
try:
with cl_summary_fn.open(mode='w') as f:
json.dump(cl_summary, f, indent=4)
print('File Saved into OAK')
except:
print('unable to update json cluster info file in oak. probably permission issue.')
print("Error", sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2].tb_lineno)
GetClusterTable(cl_summary, oakPath, localPath)
# print info
nC = 0;
nM = 0;
nCS = 0;
nS = 0;
for d in cl_summary[animal].keys():
info = cl_summary[animal][d]
for t in info.keys():
nS += 1
info2 = info[t]
if info2['Clustered']:
nC += info2['nCells']
nM += info2['nMua']
nCS += 1
print("\n Overall Summary for {} : \n nSessions = {} \n nClusteredSessions {} \n nCells = {} \n nMua = {}".format(
animal, nS, nCS, nC, nM))
return cl_summary
##### AUXILIARY FUNCTIONS #####
def dict_argmax(d):
d2 = []
for k in d.keys():
d2.append(len(d[k]))
return np.argmax(d2)
if __name__ == "__main__":
animal = 'Li'
oakPath = Path('/mnt/o/giocomo/alexg/Clustered/', animal)
localPath = Path('/mnt/c/Users/alexg8/Documents/Data/', animal, 'Clustered')
CopyClustersToOak(localPath, oakPath)
# Bug here, doesn't seem to just read exisiting file.
# UpdateClusterInfo(oakPath,animal,localPath)
| 40.08254
| 121
| 0.500396
|
c6daa4015611f6bd971baba940dbfb6704af27fd
| 618
|
py
|
Python
|
secrets.py
|
skybohan/woodblock_weather
|
0bc1d34dd911a85bd064cc3565c5a0caa1fab6af
|
[
"MIT"
] | 1
|
2022-03-27T20:48:34.000Z
|
2022-03-27T20:48:34.000Z
|
secrets.py
|
skybohan/woodblock_weather
|
0bc1d34dd911a85bd064cc3565c5a0caa1fab6af
|
[
"MIT"
] | null | null | null |
secrets.py
|
skybohan/woodblock_weather
|
0bc1d34dd911a85bd064cc3565c5a0caa1fab6af
|
[
"MIT"
] | null | null | null |
# This file is where you keep secret settings, passwords, and tokens!
# If you put them in the code you risk committing that info or sharing it
# which would be not great. So, instead, keep it all in this one file and
# keep it a secret.
secrets = {
'ssid' : 'Add SSID here', # Keep the two '' quotes around the name
'password' : 'Add WiFi password here', # Keep the two '' quotes around password
'darksky_token' : 'Dark Sky key', # Your Dark Sky API key
'aio_username' : 'username', # Your Adafruit IO username
'aio_key' : 'Adafruit IO key', # Your Adafruit IO key
}
| 47.538462
| 91
| 0.65534
|
d79cb67da6aa8139aa3d476207a9a6e90939c2f0
| 2,352
|
py
|
Python
|
tests/gamestonk_terminal/cryptocurrency/defi/test_llama_model.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | 1
|
2022-03-19T23:53:38.000Z
|
2022-03-19T23:53:38.000Z
|
tests/gamestonk_terminal/cryptocurrency/defi/test_llama_model.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
tests/gamestonk_terminal/cryptocurrency/defi/test_llama_model.py
|
minhhoang1023/GamestonkTerminal
|
195dc19b491052df080178c0cc6a9d535a91a704
|
[
"MIT"
] | null | null | null |
# IMPORTATION STANDARD
import gzip
import json
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.cryptocurrency.defi import llama_model
def filter_json_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "FILTERED" in headers:
return response
limit = 10
content = response["body"]["string"]
if content.decode().startswith("H4sI"):
content = gzip.decompress(content).decode()
content = json.loads(content)
else:
content = json.loads(content)
if isinstance(content, list):
new_content = content[:limit]
elif isinstance(content, dict):
new_content = {k: content[k] for k in list(content)[:limit]}
else:
raise AttributeError(f"Content type not supported : {content}")
new_content_json = json.dumps(new_content)
new_content_gz = gzip.compress(new_content_json.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["FILTERED"] = ["TRUE"]
return response
def gzip_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "COMPRESSED" in headers:
return response
content = response["body"]["string"].decode()
if content.startswith("H4sI"):
content = gzip.decompress(content)
new_content_gz = gzip.compress(content.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["COMPRESSED"] = ["TRUE"]
return response
@pytest.mark.vcr(before_record_response=gzip_data)
@pytest.mark.vcr
@pytest.mark.parametrize(
"protocol",
[
"anchor",
],
)
def test_get_defi_protocol(protocol, recorder):
df = llama_model.get_defi_protocol(protocol)
recorder.capture(df)
@pytest.mark.vcr(before_record_response=filter_json_data)
def test_get_defi_protocols():
df = llama_model.get_defi_protocols()
# recorder not used
# somehow there are some whitespace diff between captured/recorded
assert isinstance(df, pd.DataFrame)
assert not df.empty
@pytest.mark.vcr(before_record_response=gzip_data)
def test_get_defi_tvl(recorder):
df = llama_model.get_defi_tvl()
recorder.capture(df)
| 25.021277
| 71
| 0.689626
|
da96623ab00cd38a6d8722f021b5967825c4ceb6
| 568
|
py
|
Python
|
server/model/Applications.py
|
alivcor/airavat
|
e319ea28f39a4c2480d35bdbce5b038f41c0de93
|
[
"MIT"
] | null | null | null |
server/model/Applications.py
|
alivcor/airavat
|
e319ea28f39a4c2480d35bdbce5b038f41c0de93
|
[
"MIT"
] | 2
|
2021-03-19T14:32:15.000Z
|
2021-03-19T14:49:29.000Z
|
server/model/Applications.py
|
alivcor/airavat
|
e319ea28f39a4c2480d35bdbce5b038f41c0de93
|
[
"MIT"
] | 1
|
2021-11-08T10:08:33.000Z
|
2021-11-08T10:08:33.000Z
|
import sqlalchemy as db
class Applications:
def __init__(self, session):
self.session = session
self.applications = db.Table('AIRAVAT_APPLICATIONS',
session.dbEngine.metadata,
autoload=True,
autoload_with=session.dbEngine.engine)
def fetchAll(self):
apps = db.select([self.applications])
resultSet = self.session.dbEngine.db_session.query(apps)
return [r._asdict() for r in resultSet]
# return resultSet.__dict__
| 33.411765
| 70
| 0.582746
|
b164ded05335e92dcd49d133185e7634bbd529a5
| 3,287
|
py
|
Python
|
tests/test_tokenization_xlm.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 2
|
2020-11-30T11:30:40.000Z
|
2021-03-26T17:20:33.000Z
|
tests/test_tokenization_xlm.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 3
|
2021-06-08T23:15:29.000Z
|
2022-01-13T03:40:10.000Z
|
tests/test_tokenization_xlm.py
|
timpal0l/transformers
|
d86d57faa3b6511c6e4d9139535d77b695b9af8a
|
[
"Apache-2.0"
] | 1
|
2020-03-19T06:01:53.000Z
|
2020-03-19T06:01:53.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from .test_tokenization_common import TokenizerTesterMixin
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
| 33.20202
| 94
| 0.624277
|
7e5cfd2dbd74458b411e27dfba7d8f40799c53a6
| 20,266
|
py
|
Python
|
djangocms_text_ckeditor/cms_plugins.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_text_ckeditor/cms_plugins.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_text_ckeditor/cms_plugins.py
|
toffi9/djangocms-text-ckeditor
|
175a1a444de8ca1ba4742196cb83150d45b5c505
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import json
import re
import cms
from cms.models import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils.placeholder import get_toolbar_plugin_struct
from cms.utils.urlutils import admin_reverse
from django.conf.urls import url
from django.contrib.admin.utils import unquote
from django.core import signing
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.urlresolvers import reverse
from django.db import transaction
from django.forms.fields import CharField
from django.http import (
Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.translation import ugettext
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from . import settings
from .forms import ActionTokenValidationForm, DeleteOnCancelForm, RenderPluginForm, TextForm
from .models import Text
from .utils import (
OBJ_ADMIN_WITH_CONTENT_RE_PATTERN,
plugin_tags_to_admin_html,
plugin_tags_to_id_list,
plugin_tags_to_user_html,
random_comment_exempt,
replace_plugin_tags,
plugin_to_tag,
_plugin_tags_to_html,
)
from .widgets import TextEditorWidget
CMS_34 = LooseVersion(cms.__version__) >= LooseVersion('3.4')
def _user_can_change_placeholder(request, placeholder):
if CMS_34:
return placeholder.has_change_permission(request.user)
return placeholder.has_change_permission(request)
def post_add_plugin(operation, **kwargs):
from djangocms_history.actions import ADD_PLUGIN
from djangocms_history.helpers import get_bound_plugins, get_plugin_data
from djangocms_history.models import dump_json
text_plugin = kwargs['plugin']
new_plugin_ids = set(text_plugin._get_inline_plugin_ids())
if not new_plugin_ids:
# User has not embedded any plugins on the text
return
new_plugins = CMSPlugin.objects.filter(pk__in=new_plugin_ids)
new_plugins = get_bound_plugins(new_plugins)
# Extend the recorded added plugins to include the inline plugins (if any)
action = operation.actions.only('post_action_data').get(action=ADD_PLUGIN, order=1)
post_data = json.loads(action.post_action_data)
post_data['plugins'].extend(get_plugin_data(plugin) for plugin in new_plugins)
action.post_action_data = dump_json(post_data)
action.save(update_fields=['post_action_data'])
def pre_change_plugin(operation, **kwargs):
from djangocms_history.actions import ADD_PLUGIN, DELETE_PLUGIN
from djangocms_history.helpers import get_bound_plugins, get_plugin_data
old_text_plugin = kwargs['old_plugin']
old_plugin_ids = set(old_text_plugin._get_inline_plugin_ids())
new_text_plugin = kwargs['new_plugin']
new_plugin_ids = set(new_text_plugin._get_inline_plugin_ids())
added_plugin_ids = new_plugin_ids.difference(old_plugin_ids)
deleted_plugin_ids = old_plugin_ids.difference(new_plugin_ids)
plugin_ids = added_plugin_ids | deleted_plugin_ids
if added_plugin_ids == deleted_plugin_ids:
# User has not added or removed embedded plugins
return
order = 1
# This app is a special case.
# We know the old and new tree orders because inline plugins
# have already been set on the database when this pre operation
# is executed.
old_tree = (
old_text_plugin
.cmsplugin_set
.filter(pk__in=old_plugin_ids)
.order_by('position')
.values_list('pk', flat=True)
)
old_tree = list(old_tree)
new_tree = (
new_text_plugin
.cmsplugin_set
.filter(pk__in=new_plugin_ids)
.order_by('position')
.values_list('pk', flat=True)
)
new_tree = list(new_tree)
plugins = CMSPlugin.objects.filter(pk__in=plugin_ids)
bound_plugins = list(get_bound_plugins(plugins))
if added_plugin_ids:
order += 1
pre_action_data = {
'order': old_tree,
'parent_id': old_text_plugin.pk,
}
post_plugin_data = [get_plugin_data(plugin) for plugin in bound_plugins
if plugin.pk in added_plugin_ids]
post_action_data = {
'order': new_tree,
'parent_id': old_text_plugin.pk,
'plugins': post_plugin_data,
}
operation.create_action(
action=ADD_PLUGIN,
language=old_text_plugin.language,
placeholder=kwargs['placeholder'],
pre_data=pre_action_data,
post_data=post_action_data,
order=order,
)
if deleted_plugin_ids:
order += 1
deleted_plugins = [plugin for plugin in bound_plugins if plugin.pk in deleted_plugin_ids]
pre_plugin_data = [get_plugin_data(plugin) for plugin in deleted_plugins]
pre_action_data = {
'order': old_tree,
'parent_id': old_text_plugin.pk,
'plugins': pre_plugin_data,
}
post_plugin_data = [get_plugin_data(plugin, only_meta=True) for plugin in deleted_plugins]
post_action_data = {
'order': new_tree,
'parent_id': old_text_plugin.pk,
'plugins': post_plugin_data,
}
operation.create_action(
action=DELETE_PLUGIN,
language=old_text_plugin.language,
placeholder=kwargs['placeholder'],
pre_data=pre_action_data,
post_data=post_action_data,
order=order,
)
class TextPlugin(CMSPluginBase):
model = Text
name = settings.TEXT_PLUGIN_NAME
module = settings.TEXT_PLUGIN_MODULE_NAME
form = TextForm
render_template = "cms/plugins/text.html"
change_form_template = "cms/plugins/text_plugin_change_form.html"
ckeditor_configuration = settings.TEXT_CKEDITOR_CONFIGURATION
disable_child_plugins = True
# These are executed by the djangocms-history app
# We use them to inject inline plugin data
operation_handler_callbacks = {
'post_add_plugin': post_add_plugin,
'pre_change_plugin': pre_change_plugin,
}
if CMS_34:
# On django CMS 3.5 this attribute is set automatically
# when do_post_copy is defined in the plugin class.
_has_do_post_copy = True
@classmethod
def do_post_copy(self, instance, source_map):
ids = plugin_tags_to_id_list(instance.body)
ids_map = {pk: source_map[pk].pk for pk in ids if pk in source_map}
new_text = replace_plugin_tags(instance.body, ids_map)
self.model.objects.filter(pk=instance.pk).update(body=new_text)
@staticmethod
def get_translation_export_content(field, plugin_data):
def _render_plugin_with_content(obj, match):
from djangocms_translations.utils import get_text_field_child_label
field = get_text_field_child_label(obj.plugin_type)
content = getattr(obj, field) if field else ''
return plugin_to_tag(obj, content)
content = _plugin_tags_to_html(plugin_data[field], output_func=_render_plugin_with_content)
subplugins_within_this_content = plugin_tags_to_id_list(content)
return content, subplugins_within_this_content
@staticmethod
def set_translation_import_content(content, plugin):
data = [x.groups() for x in re.finditer(OBJ_ADMIN_WITH_CONTENT_RE_PATTERN, content)]
data = {int(pk): value for pk, value in data}
return {
subplugin_id: data[subplugin_id]
for subplugin_id in plugin_tags_to_id_list(content)
}
def get_editor_widget(self, request, plugins, plugin):
"""
Returns the Django form Widget to be used for
the text area
"""
cancel_url_name = self.get_admin_url_name('delete_on_cancel')
cancel_url = reverse('admin:%s' % cancel_url_name)
render_plugin_url_name = self.get_admin_url_name('render_plugin')
render_plugin_url = reverse('admin:%s' % render_plugin_url_name)
action_token = self.get_action_token(request, plugin)
# should we delete the text plugin when
# the user cancels?
delete_text_on_cancel = (
'delete-on-cancel' in request.GET and
not plugin.get_plugin_instance()[0]
)
widget = TextEditorWidget(
installed_plugins=plugins, pk=plugin.pk,
placeholder=plugin.placeholder,
plugin_language=plugin.language,
configuration=self.ckeditor_configuration,
render_plugin_url=render_plugin_url,
cancel_url=cancel_url,
action_token=action_token,
delete_on_cancel=delete_text_on_cancel,
)
return widget
def get_form_class(self, request, plugins, plugin):
"""
Returns a subclass of Form to be used by this plugin
"""
widget = self.get_editor_widget(
request=request,
plugins=plugins,
plugin=plugin,
)
instance = plugin.get_plugin_instance()[0]
if instance:
context = RequestContext(request)
context['request'] = request
rendered_text = plugin_tags_to_admin_html(
text=instance.body,
context=context,
)
else:
rendered_text = None
# We avoid mutating the Form declared above by subclassing
class TextPluginForm(self.form):
body = CharField(widget=widget, required=False)
def __init__(self, *args, **kwargs):
initial = kwargs.pop('initial', {})
if rendered_text:
initial['body'] = rendered_text
super(TextPluginForm, self).__init__(*args, initial=initial, **kwargs)
return TextPluginForm
@xframe_options_sameorigin
def add_view(self, request, form_url='', extra_context=None):
if 'plugin' in request.GET:
# CMS >= 3.4 compatibility
self.cms_plugin_instance = self._get_plugin_or_404(request.GET['plugin'])
if getattr(self, "cms_plugin_instance", None):
# This can happen if the user did not properly cancel the plugin
# and so a "ghost" plugin instance is left over.
# The instance is a record that points to the Text plugin
# but is not a real text plugin instance.
return super(TextPlugin, self).add_view(
request, form_url, extra_context
)
if not self.has_add_permission(request):
# this permission check is done by Django on the normal
# workflow of adding a plugin.
# This is NOT the normal workflow because we create a plugin
# on GET request to the /add/ endpoint and so we bypass
# django's add_view, thus bypassing permission check.
message = ugettext('You do not have permission to add a plugin')
return HttpResponseForbidden(force_text(message))
try:
# CMS 3.3 compatibility
data = self.validate_add_request(request)
except AttributeError:
# CMS >= 3.4 compatibility
_data = self._cms_initial_attributes
data = {
'plugin_language': _data['language'],
'placeholder_id': _data['placeholder'],
'parent': _data['parent'],
'position': _data['position'],
'plugin_type': _data['plugin_type'],
'plugin_parent': _data['parent'],
}
except PermissionDenied:
message = ugettext('You do not have permission to add a plugin')
return HttpResponseForbidden(force_text(message))
except ValidationError as error:
return HttpResponseBadRequest(error.message)
# Sadly we have to create the CMSPlugin record on add GET request
# because we need this record in order to allow the user to add
# child plugins to the text (image, link, etc..)
plugin = CMSPlugin.objects.create(
language=data['plugin_language'],
plugin_type=data['plugin_type'],
position=data['position'],
placeholder=data['placeholder_id'],
parent=data.get('plugin_parent'),
)
query = request.GET.copy()
query['plugin'] = plugin.pk
success_url = admin_reverse('cms_page_add_plugin')
# Because we've created the cmsplugin record
# we need to delete the plugin when a user cancels.
success_url += '?delete-on-cancel&' + query.urlencode()
return HttpResponseRedirect(success_url)
def get_plugin_urls(self):
def pattern(regex, func):
name = self.get_admin_url_name(func.__name__)
return url(regex, func, name=name)
url_patterns = [
pattern(r'^render-plugin/$', self.render_plugin),
pattern(r'^delete-on-cancel/$', self.delete_on_cancel),
]
return url_patterns
def get_admin_url_name(self, name):
plugin_type = self.__class__.__name__.lower()
url_name = "%s_%s_%s" % (self.model._meta.app_label, plugin_type, name)
return url_name
def _get_text_plugin_from_request(self, request, data):
if not (request.user.is_active and request.user.is_staff):
raise PermissionDenied
form = ActionTokenValidationForm(data)
if form.is_valid():
session_key = request.session.session_key
text_plugin_id = form.get_id_from_token(session_key)
if text_plugin_id:
return self._get_plugin_or_404(text_plugin_id)
message = ugettext("Unable to process your request. Invalid token.")
raise ValidationError(message=force_text(message))
@random_comment_exempt
@xframe_options_sameorigin
def render_plugin(self, request):
try:
text_plugin = self._get_text_plugin_from_request(request, data=request.GET)
except ValidationError as error:
return HttpResponseBadRequest(error.message)
form = RenderPluginForm(request.GET, text_plugin=text_plugin)
if not form.is_valid():
message = ugettext("Unable to process your request.")
return HttpResponseBadRequest(message)
plugin_class = text_plugin.get_plugin_class_instance()
# The following is needed for permission checking
plugin_class.opts = plugin_class.model._meta
if not (plugin_class.has_change_permission(request, obj=text_plugin) and
_user_can_change_placeholder(request, text_plugin.placeholder)):
raise PermissionDenied
return HttpResponse(form.render_plugin(request))
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.atomic
def delete_on_cancel(self, request):
# This view is responsible for deleting a plugin
# bypassing the delete permissions.
try:
text_plugin = self._get_text_plugin_from_request(request, data=request.POST)
except ValidationError as error:
return HttpResponseBadRequest(error.message)
# This form validates the the given plugin is a child
# of the text plugin or is a text plugin.
# If the plugin is a child then we validate that this child
# is not present in the text plugin (because then it's not a cancel).
# If the plugin is a text plugin then we validate that the text
# plugin does NOT have a real instance attached.
form = DeleteOnCancelForm(request.POST, text_plugin=text_plugin)
if not form.is_valid():
message = ugettext("Unable to process your request.")
return HttpResponseBadRequest(message)
plugin_class = text_plugin.get_plugin_class_instance()
# The following is needed for permission checking
plugin_class.opts = plugin_class.model._meta
# Check for add permissions because this view is meant
# only for plugins created through the ckeditor
# and the ckeditor plugin itself.
if not (plugin_class.has_add_permission(request) and
_user_can_change_placeholder(request, text_plugin.placeholder)):
raise PermissionDenied
# Token is validated after checking permissions
# to avoid non-auth users from triggering validation mechanism.
form.delete()
# 204 -> request was successful but no response returned.
return HttpResponse(status=204)
@classmethod
def get_child_plugin_candidates(cls, slot, page):
# This plugin can only have text_enabled plugins
# as children.
text_enabled_plugins = plugin_pool.get_text_enabled_plugins(
placeholder=slot,
page=page,
)
return text_enabled_plugins
def get_form(self, request, obj=None, **kwargs):
plugin = getattr(self, "cms_plugin_instance", None) or obj
get_plugin = plugin_pool.get_plugin
child_plugin_types = self.get_child_classes(
slot=plugin.placeholder.slot,
page=self.page,
)
child_plugins = (get_plugin(name) for name in child_plugin_types)
plugins = get_toolbar_plugin_struct(
child_plugins,
plugin.placeholder.slot,
self.page,
)
form = self.get_form_class(
request=request,
plugins=plugins,
plugin=plugin,
)
kwargs['form'] = form # override standard form
return super(TextPlugin, self).get_form(request, obj, **kwargs)
def render(self, context, instance, placeholder):
context.update({
'body': plugin_tags_to_user_html(
instance.body,
context,
),
'placeholder': placeholder,
'object': instance
})
return context
def save_model(self, request, obj, form, change):
if getattr(self, "cms_plugin_instance", None):
# Because the plugin was created by manually
# creating the CMSPlugin record, it's important
# to assign all the values from the CMSPlugin record
# to the real "non ghost" instance.
fields = self.cms_plugin_instance._meta.fields
for field in fields:
# assign all the fields - we can do this, because object is
# subclassing cms_plugin_instance (one to one relation)
value = getattr(self.cms_plugin_instance, field.name)
setattr(obj, field.name, value)
super(TextPlugin, self).save_model(request, obj, form, change)
# This must come after calling save
# If `clean_plugins()` deletes child plugins, django-treebeard will call
# save() again on the Text instance (aka obj in this context) to update mptt values (numchild, etc).
# See this ticket for details https://github.com/divio/djangocms-text-ckeditor/issues/212
obj.clean_plugins()
def get_action_token(self, request, obj):
plugin_id = force_text(obj.pk)
# salt is different for every user
signer = signing.Signer(salt=request.session.session_key)
return signer.sign(plugin_id)
def _get_plugin_or_404(self, pk):
plugin_type = self.__class__.__name__
plugins = (
CMSPlugin
.objects
.select_related('placeholder', 'parent')
.filter(plugin_type=plugin_type)
)
field = self.model._meta.pk
try:
object_id = field.to_python(unquote(pk))
except (ValidationError, ValueError):
raise Http404('Invalid plugin id')
return get_object_or_404(plugins, pk=object_id)
plugin_pool.register_plugin(TextPlugin)
| 37.669145
| 108
| 0.662736
|
259782422f5c1e10b4608f85a75b4cae42b5790c
| 1,030
|
py
|
Python
|
Tutorial_Geral/kivymd_Binding_Input.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
Tutorial_Geral/kivymd_Binding_Input.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
Tutorial_Geral/kivymd_Binding_Input.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.button import MDRectangleFlatButton
from kivy.lang import Builder
from helpers import username_helper
class DemoApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Green"
screen = Screen()
'''username = MDTextField(text='Coloque seu nome',
pos_hint={'center_x': 0.5, 'center_y': 0.5},
size_hint_x=None, width=200)'''
botão = MDRectangleFlatButton(text='Hello World',
pos_hint={'center_x': 0.5,
'center_y': 0.4},
on_release=self.show_data)
self.username = Builder.load_string(username_helper)
screen.add_widget(self.username)
screen.add_widget(botão)
return screen
def show_data(self, obj):
print(self.username.text)
if __name__ == '__main__':
DemoApp().run()
| 31.212121
| 75
| 0.567961
|
fb16ec91683a444266bf53a15fbc65a56d394036
| 249
|
py
|
Python
|
env/Lib/site-packages/alembic/__init__.py
|
theXtroyer1221/Climate-luft
|
37eabdd78c15172ea980b59d1aff65d8628cb845
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/alembic/__init__.py
|
theXtroyer1221/Climate-luft
|
37eabdd78c15172ea980b59d1aff65d8628cb845
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/alembic/__init__.py
|
theXtroyer1221/Climate-luft
|
37eabdd78c15172ea980b59d1aff65d8628cb845
|
[
"MIT"
] | null | null | null |
import sys
from . import context # noqa
from . import op # noqa
from .runtime import environment
from .runtime import migration
__version__ = "1.5.5"
sys.modules["alembic.migration"] = migration
sys.modules["alembic.environment"] = environment
| 20.75
| 48
| 0.75502
|
c1855c39ecb0b1a65ac4affa7ee1f5a7714a43ad
| 198
|
py
|
Python
|
tech_project/lib/python2.7/site-packages/cms/test_utils/project/sampleapp/urls_example.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 5
|
2015-03-08T08:46:58.000Z
|
2021-11-16T12:34:15.000Z
|
cms/test_utils/project/sampleapp/urls_example.py
|
thisisalamin/django-cms
|
eeb1e4712b3866e243daf800c142e2199e4be9df
|
[
"BSD-3-Clause"
] | 102
|
2020-08-11T23:57:18.000Z
|
2022-03-12T00:46:00.000Z
|
cms/test_utils/project/sampleapp/urls_example.py
|
thisisalamin/django-cms
|
eeb1e4712b3866e243daf800c142e2199e4be9df
|
[
"BSD-3-Clause"
] | 4
|
2019-01-26T09:58:37.000Z
|
2019-06-24T08:12:43.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from ..placeholderapp import views
app_name = 'example_app'
urlpatterns = [
url(r'^example/$', views.example_view, name="example"),
]
| 18
| 59
| 0.681818
|
1fa3c155294dc1b93586c6ebd3b6e0001c99e16c
| 18,685
|
py
|
Python
|
allennlp/data/tokenizers/pretrained_transformer_tokenizer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | null | null | null |
allennlp/data/tokenizers/pretrained_transformer_tokenizer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | 71
|
2020-10-19T13:06:15.000Z
|
2022-03-29T13:04:06.000Z
|
allennlp/data/tokenizers/pretrained_transformer_tokenizer.py
|
lgessler/allennlp
|
0e64b4d3281808fac0fe00cc5b56e5378dbb7615
|
[
"Apache-2.0"
] | null | null | null |
import copy
import logging
from typing import Any, Dict, List, Optional, Tuple, Iterable
from overrides import overrides
from transformers import PreTrainedTokenizer
from allennlp.common.util import sanitize_wordpiece
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
logger = logging.getLogger(__name__)
@Tokenizer.register("pretrained_transformer")
class PretrainedTransformerTokenizer(Tokenizer):
"""
A `PretrainedTransformerTokenizer` uses a model from HuggingFace's
`transformers` library to tokenize some input text. This often means wordpieces
(where `'AllenNLP is awesome'` might get split into `['Allen', '##NL', '##P', 'is',
'awesome']`), but it could also use byte-pair encoding, or some other tokenization, depending
on the pretrained model that you're using.
We take a model name as an input parameter, which we will pass to
`AutoTokenizer.from_pretrained`.
We also add special tokens relative to the pretrained model and truncate the sequences.
This tokenizer also indexes tokens and adds the indexes to the `Token` fields so that
they can be picked up by `PretrainedTransformerIndexer`.
Registered as a `Tokenizer` with name "pretrained_transformer".
# Parameters
model_name : `str`
The name of the pretrained wordpiece tokenizer to use.
add_special_tokens : `bool`, optional, (default=`True`)
If set to `True`, the sequences will be encoded with the special tokens relative
to their model.
max_length : `int`, optional (default=`None`)
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride : `int`, optional (default=`0`)
If set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)
Dictionary with
[additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)
for `AutoTokenizer.from_pretrained`.
""" # noqa: E501
def __init__(
self,
model_name: str,
add_special_tokens: bool = True,
max_length: Optional[int] = None,
stride: int = 0,
tokenizer_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
else:
tokenizer_kwargs = tokenizer_kwargs.copy()
tokenizer_kwargs.setdefault("use_fast", True)
# Note: Just because we request a fast tokenizer doesn't mean we get one.
from allennlp.common import cached_transformers
self.tokenizer = cached_transformers.get_tokenizer(
model_name, add_special_tokens=False, **tokenizer_kwargs
)
self._add_special_tokens = add_special_tokens
self._max_length = max_length
self._stride = stride
self._tokenizer_lowercases = self.tokenizer_lowercases(self.tokenizer)
try:
self._reverse_engineer_special_tokens("a", "b", model_name, tokenizer_kwargs)
except AssertionError:
# For most transformer models, "a" and "b" work just fine as dummy tokens. For a few,
# they don't, and so we use "1" and "2" instead.
self._reverse_engineer_special_tokens("1", "2", model_name, tokenizer_kwargs)
def _reverse_engineer_special_tokens(
self,
token_a: str,
token_b: str,
model_name: str,
tokenizer_kwargs: Optional[Dict[str, Any]],
):
# storing the special tokens
self.sequence_pair_start_tokens = []
self.sequence_pair_mid_tokens = []
self.sequence_pair_end_tokens = []
# storing token type ids for the sequences
self.sequence_pair_first_token_type_id = None
self.sequence_pair_second_token_type_id = None
# storing the special tokens
self.single_sequence_start_tokens = []
self.single_sequence_end_tokens = []
# storing token type id for the sequence
self.single_sequence_token_type_id = None
# Reverse-engineer the tokenizer for two sequences
from allennlp.common import cached_transformers
tokenizer_with_special_tokens = cached_transformers.get_tokenizer(
model_name, add_special_tokens=True, **tokenizer_kwargs
)
dummy_output = tokenizer_with_special_tokens.encode_plus(
token_a,
token_b,
add_special_tokens=True,
return_token_type_ids=True,
return_attention_mask=False,
)
dummy_a = self.tokenizer.encode(token_a, add_special_tokens=False)[0]
assert dummy_a in dummy_output["input_ids"]
dummy_b = self.tokenizer.encode(token_b, add_special_tokens=False)[0]
assert dummy_b in dummy_output["input_ids"]
assert dummy_a != dummy_b
seen_dummy_a = False
seen_dummy_b = False
for token_id, token_type_id in zip(
dummy_output["input_ids"], dummy_output["token_type_ids"]
):
if token_id == dummy_a:
if seen_dummy_a or seen_dummy_b: # seeing a twice or b before a
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_a = True
assert (
self.sequence_pair_first_token_type_id is None
or self.sequence_pair_first_token_type_id == token_type_id
), "multiple different token type ids found for the first sequence"
self.sequence_pair_first_token_type_id = token_type_id
continue
if token_id == dummy_b:
if seen_dummy_b: # seeing b twice
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_b = True
assert (
self.sequence_pair_second_token_type_id is None
or self.sequence_pair_second_token_type_id == token_type_id
), "multiple different token type ids found for the second sequence"
self.sequence_pair_second_token_type_id = token_type_id
continue
token = Token(
tokenizer_with_special_tokens.convert_ids_to_tokens(token_id),
text_id=token_id,
type_id=token_type_id,
)
if not seen_dummy_a:
self.sequence_pair_start_tokens.append(token)
elif not seen_dummy_b:
self.sequence_pair_mid_tokens.append(token)
else:
self.sequence_pair_end_tokens.append(token)
assert (
len(self.sequence_pair_start_tokens)
+ len(self.sequence_pair_mid_tokens)
+ len(self.sequence_pair_end_tokens)
) == self.tokenizer.num_special_tokens_to_add(pair=True)
# Reverse-engineer the tokenizer for one sequence
dummy_output = tokenizer_with_special_tokens.encode_plus(
token_a,
add_special_tokens=True,
return_token_type_ids=True,
return_attention_mask=False,
)
seen_dummy_a = False
for token_id, token_type_id in zip(
dummy_output["input_ids"], dummy_output["token_type_ids"]
):
if token_id == dummy_a:
if seen_dummy_a:
raise ValueError("Cannot auto-determine the number of special tokens added.")
seen_dummy_a = True
assert (
self.single_sequence_token_type_id is None
or self.single_sequence_token_type_id == token_type_id
), "multiple different token type ids found for the sequence"
self.single_sequence_token_type_id = token_type_id
continue
token = Token(
tokenizer_with_special_tokens.convert_ids_to_tokens(token_id),
text_id=token_id,
type_id=token_type_id,
)
if not seen_dummy_a:
self.single_sequence_start_tokens.append(token)
else:
self.single_sequence_end_tokens.append(token)
assert (
len(self.single_sequence_start_tokens) + len(self.single_sequence_end_tokens)
) == self.tokenizer.num_special_tokens_to_add(pair=False)
@staticmethod
def tokenizer_lowercases(tokenizer: PreTrainedTokenizer) -> bool:
# Huggingface tokenizers have different ways of remembering whether they lowercase or not. Detecting it
# this way seems like the least brittle way to do it.
tokenized = tokenizer.tokenize(
"A"
) # Use a single character that won't be cut into word pieces.
detokenized = " ".join(tokenized)
return "a" in detokenized
@overrides
def tokenize(self, text: str) -> List[Token]:
"""
This method only handles a single sentence (or sequence) of text.
"""
max_length = self._max_length
if max_length is not None and self._add_special_tokens:
max_length -= self.num_special_tokens_for_sequence()
encoded_tokens = self.tokenizer.encode_plus(
text=text,
add_special_tokens=False,
max_length=max_length,
stride=self._stride,
return_tensors=None,
return_offsets_mapping=self.tokenizer.is_fast,
return_attention_mask=False,
return_token_type_ids=True,
)
# token_ids contains a final list with ids for both regular and special tokens
token_ids, token_type_ids, token_offsets = (
encoded_tokens["input_ids"],
encoded_tokens["token_type_ids"],
encoded_tokens.get("offset_mapping"),
)
# If we don't have token offsets, try to calculate them ourselves.
if token_offsets is None:
token_offsets = self._estimate_character_indices(text, token_ids)
tokens = []
for token_id, token_type_id, offsets in zip(token_ids, token_type_ids, token_offsets):
if offsets is None or offsets[0] >= offsets[1]:
start = None
end = None
else:
start, end = offsets
tokens.append(
Token(
text=self.tokenizer.convert_ids_to_tokens(token_id, skip_special_tokens=False),
text_id=token_id,
type_id=token_type_id,
idx=start,
idx_end=end,
)
)
if self._add_special_tokens:
tokens = self.add_special_tokens(tokens)
return tokens
def _estimate_character_indices(
self, text: str, token_ids: List[int]
) -> List[Optional[Tuple[int, int]]]:
"""
The huggingface tokenizers produce tokens that may or may not be slices from the
original text. Differences arise from lowercasing, Unicode normalization, and other
kinds of normalization, as well as special characters that are included to denote
various situations, such as "##" in BERT for word pieces from the middle of a word, or
"Ġ" in RoBERTa for the beginning of words not at the start of a sentence.
This code attempts to calculate character offsets while being tolerant to these
differences. It scans through the text and the tokens in parallel, trying to match up
positions in both. If it gets out of sync, it backs off to not adding any token
indices, and attempts to catch back up afterwards. This procedure is approximate.
Don't rely on precise results, especially in non-English languages that are far more
affected by Unicode normalization.
"""
token_texts = [
sanitize_wordpiece(t) for t in self.tokenizer.convert_ids_to_tokens(token_ids)
]
token_offsets: List[Optional[Tuple[int, int]]] = [None] * len(token_ids)
if self._tokenizer_lowercases:
text = text.lower()
token_texts = [t.lower() for t in token_texts]
min_allowed_skipped_whitespace = 3
allowed_skipped_whitespace = min_allowed_skipped_whitespace
text_index = 0
token_index = 0
while text_index < len(text) and token_index < len(token_ids):
token_text = token_texts[token_index]
token_start_index = text.find(token_text, text_index)
# Did we not find it at all?
if token_start_index < 0:
token_index += 1
# When we skip a token, we increase our tolerance, so we have a chance of catching back up.
allowed_skipped_whitespace += 1 + min_allowed_skipped_whitespace
continue
# Did we jump too far?
non_whitespace_chars_skipped = sum(
1 for c in text[text_index:token_start_index] if not c.isspace()
)
if non_whitespace_chars_skipped > allowed_skipped_whitespace:
# Too many skipped characters. Something is wrong. Ignore this token.
token_index += 1
# When we skip a token, we increase our tolerance, so we have a chance of catching back up.
allowed_skipped_whitespace += 1 + min_allowed_skipped_whitespace
continue
allowed_skipped_whitespace = min_allowed_skipped_whitespace
token_offsets[token_index] = (
token_start_index,
token_start_index + len(token_text),
)
text_index = token_start_index + len(token_text)
token_index += 1
return token_offsets
def _intra_word_tokenize(
self, string_tokens: List[str]
) -> Tuple[List[Token], List[Optional[Tuple[int, int]]]]:
tokens: List[Token] = []
offsets: List[Optional[Tuple[int, int]]] = []
for token_string in string_tokens:
wordpieces = self.tokenizer.encode_plus(
token_string,
add_special_tokens=False,
return_tensors=None,
return_offsets_mapping=False,
return_attention_mask=False,
return_token_type_ids=False,
)
wp_ids = wordpieces["input_ids"]
if len(wp_ids) > 0:
offsets.append((len(tokens), len(tokens) + len(wp_ids) - 1))
tokens.extend(
Token(text=wp_text, text_id=wp_id)
for wp_id, wp_text in zip(wp_ids, self.tokenizer.convert_ids_to_tokens(wp_ids))
)
else:
offsets.append(None)
return tokens, offsets
@staticmethod
def _increment_offsets(
offsets: Iterable[Optional[Tuple[int, int]]], increment: int
) -> List[Optional[Tuple[int, int]]]:
return [
None if offset is None else (offset[0] + increment, offset[1] + increment)
for offset in offsets
]
def intra_word_tokenize(
self, string_tokens: List[str]
) -> Tuple[List[Token], List[Optional[Tuple[int, int]]]]:
"""
Tokenizes each word into wordpieces separately and returns the wordpiece IDs.
Also calculates offsets such that tokens[offsets[i][0]:offsets[i][1] + 1]
corresponds to the original i-th token.
This function inserts special tokens.
"""
tokens, offsets = self._intra_word_tokenize(string_tokens)
tokens = self.add_special_tokens(tokens)
offsets = self._increment_offsets(offsets, len(self.single_sequence_start_tokens))
return tokens, offsets
def intra_word_tokenize_sentence_pair(
self, string_tokens_a: List[str], string_tokens_b: List[str]
) -> Tuple[List[Token], List[Tuple[int, int]], List[Tuple[int, int]]]:
"""
Tokenizes each word into wordpieces separately and returns the wordpiece IDs.
Also calculates offsets such that wordpieces[offsets[i][0]:offsets[i][1] + 1]
corresponds to the original i-th token.
This function inserts special tokens.
"""
tokens_a, offsets_a = self._intra_word_tokenize(string_tokens_a)
tokens_b, offsets_b = self._intra_word_tokenize(string_tokens_b)
offsets_b = self._increment_offsets(
offsets_b,
(
len(self.sequence_pair_start_tokens)
+ len(tokens_a)
+ len(self.sequence_pair_mid_tokens)
),
)
tokens_a = self.add_special_tokens(tokens_a, tokens_b)
offsets_a = self._increment_offsets(offsets_a, len(self.sequence_pair_start_tokens))
return tokens_a, offsets_a, offsets_b
def add_special_tokens(
self, tokens1: List[Token], tokens2: Optional[List[Token]] = None
) -> List[Token]:
def with_new_type_id(tokens: List[Token], type_id: int) -> List[Token]:
return [dataclasses.replace(t, type_id=type_id) for t in tokens]
# Make sure we don't change the input parameters
tokens2 = copy.deepcopy(tokens2)
# We add special tokens and also set token type ids.
import dataclasses
if tokens2 is None:
return (
self.single_sequence_start_tokens
+ with_new_type_id(tokens1, self.single_sequence_token_type_id)
+ self.single_sequence_end_tokens
)
else:
return (
self.sequence_pair_start_tokens
+ with_new_type_id(tokens1, self.sequence_pair_first_token_type_id)
+ self.sequence_pair_mid_tokens
+ with_new_type_id(tokens2, self.sequence_pair_second_token_type_id)
+ self.sequence_pair_end_tokens
)
def num_special_tokens_for_sequence(self) -> int:
return len(self.single_sequence_start_tokens) + len(self.single_sequence_end_tokens)
def num_special_tokens_for_pair(self) -> int:
return (
len(self.sequence_pair_start_tokens)
+ len(self.sequence_pair_mid_tokens)
+ len(self.sequence_pair_end_tokens)
)
| 41.988764
| 161
| 0.635001
|
988a6149a1cc6b8e004e1de4babf861129860ae6
| 82,409
|
py
|
Python
|
src/transformers/models/bert/modeling_tf_bert.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 101
|
2021-12-22T00:03:51.000Z
|
2022-03-30T07:39:09.000Z
|
src/transformers/models/bert/modeling_tf_bert.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 13
|
2020-10-13T11:41:11.000Z
|
2022-02-16T14:13:31.000Z
|
src/transformers/models/bert/modeling_tf_bert.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 30
|
2021-04-30T07:11:22.000Z
|
2022-03-15T19:34:58.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 BERT model. """
import math
import warnings
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFCausalLMOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFNextSentencePredictorOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFNextSentencePredictionLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-cased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
class TFBertPreTrainingLoss:
"""
Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining
NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss
computation.
"""
def compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
masked_lm_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
mask=masked_lm_active_loss,
)
masked_lm_labels = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
)
next_sentence_active_loss = tf.not_equal(tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=next_sentence_active_loss
)
next_sentence_label = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["next_sentence_label"], shape=(-1,)), mask=next_sentence_active_loss
)
masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
next_sentence_loss = loss_fn(y_true=next_sentence_label, y_pred=next_sentence_reduced_logits)
masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(next_sentence_loss)[0]))
masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
return masked_lm_loss + next_sentence_loss
class TFBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.hidden_size = config.hidden_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.embeddings_sum = tf.keras.layers.Add()
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(inputs=hidden_states)
mixed_key_layer = self.key(inputs=hidden_states)
mixed_value_layer = self.value(inputs=hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = tf.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
attention_output = tf.matmul(attention_probs, value_layer)
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
class TFBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFBertAttention(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFBertSelfAttention(config, name="self")
self.dense_output = TFBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.self_attention(
hidden_states=input_tensor,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class TFBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFBertOutput(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFBertLayer(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFBertAttention(config, name="attention")
self.intermediate = TFBertIntermediate(config, name="intermediate")
self.bert_output = TFBertOutput(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(hidden_states=attention_output)
layer_output = self.bert_output(
hidden_states=intermediate_output, input_tensor=attention_output, training=training
)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFBertEncoder(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.layer = [TFBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[i],
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFBertPooler(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(inputs=first_token_tensor)
return pooled_output
class TFBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(inputs=hidden_states)
return hidden_states
class TFBertLMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.transform = TFBertPredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.input_embeddings
def set_output_embeddings(self, value: tf.Variable):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.transform(hidden_states=hidden_states)
seq_length = shape_list(hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFBertMLMHead(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.predictions = TFBertLMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
prediction_scores = self.predictions(hidden_states=sequence_output)
return prediction_scores
class TFBertNSPHead(tf.keras.layers.Layer):
def __init__(self, config: BertConfig, **kwargs):
super().__init__(**kwargs)
self.seq_relationship = tf.keras.layers.Dense(
units=2,
kernel_initializer=get_initializer(config.initializer_range),
name="seq_relationship",
)
def call(self, pooled_output: tf.Tensor) -> tf.Tensor:
seq_relationship_score = self.seq_relationship(inputs=pooled_output)
return seq_relationship_score
@keras_serializable
class TFBertMainLayer(tf.keras.layers.Layer):
config_class = BertConfig
def __init__(self, config: BertConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFBertEmbeddings(config, name="embeddings")
self.encoder = TFBertEncoder(config, name="encoder")
self.pooler = TFBertPooler(config, name="pooler") if add_pooling_layer else None
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=inputs["input_ids"],
position_ids=inputs["position_ids"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=inputs["head_mask"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
base_model_prefix = "bert"
@dataclass
class TFBertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.TFBertForPreTraining`.
Args:
prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
prediction_logits: tf.Tensor = None
seq_relationship_logits: tf.Tensor = None
hidden_states: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
attentions: Optional[Union[Tuple[tf.Tensor], tf.Tensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the
model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class TFBertModel(TFBertPreTrainedModel):
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining:
a `masked language modeling` head and a `next sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class TFBertForPreTraining(TFBertPreTrainedModel, TFBertPreTrainingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"position_ids",
r"cls.predictions.decoder.weight",
r"cls.predictions.decoder.bias",
]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.nsp = TFBertNSPHead(config, name="nsp___cls")
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
def get_prefix_bias_name(self) -> str:
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import BertTokenizer, TFBertForPreTraining
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = TFBertForPreTraining.from_pretrained('bert-base-uncased')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
next_sentence_label=next_sentence_label,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
seq_relationship_score = self.nsp(pooled_output=pooled_output)
total_loss = None
if inputs["labels"] is not None and inputs["next_sentence_label"] is not None:
d_labels = {"labels": inputs["labels"]}
d_labels["next_sentence_label"] = inputs["next_sentence_label"]
total_loss = self.compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))
if not inputs["return_dict"]:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return TFBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFBertForPreTrainingOutput) -> TFBertForPreTrainingOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBertForPreTrainingOutput(
prediction_logits=output.prediction_logits,
seq_relationship_logits=output.seq_relationship_logits,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING)
class TFBertForMaskedLM(TFBertPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"cls.seq_relationship",
r"cls.predictions.decoder.weight",
r"nsp___cls",
]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if config.is_decoder:
logger.warning(
"If you want to use `TFBertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
def get_prefix_bias_name(self) -> str:
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
loss = (
None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFBertLMHeadModel(TFBertPreTrainedModel, TFCausalLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"cls.seq_relationship",
r"cls.predictions.decoder.weight",
r"nsp___cls",
]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if not config.is_decoder:
logger.warning("If you want to use `TFBertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
self.mlm = TFBertMLMHead(config, input_embeddings=self.bert.embeddings, name="mlm___cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
def get_prefix_bias_name(self) -> str:
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFCausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.mlm(sequence_output=sequence_output, training=inputs["training"])
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.compute_loss(labels=labels, logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING,
)
class TFBertForNextSentencePrediction(TFBertPreTrainedModel, TFNextSentencePredictionLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"cls.predictions"]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.nsp = TFBertNSPHead(config, name="nsp___cls")
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFNextSentencePredictorOutput, Tuple[tf.Tensor]]:
r"""
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import BertTokenizer, TFBertForNextSentencePrediction
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='tf')
>>> logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0]
>>> assert logits[0][0] < logits[0][1] # the next sentence was random
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
next_sentence_label=next_sentence_label,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
seq_relationship_scores = self.nsp(pooled_output=pooled_output)
next_sentence_loss = (
None
if inputs["next_sentence_label"] is None
else self.compute_loss(labels=inputs["next_sentence_label"], logits=seq_relationship_scores)
)
if not inputs["return_dict"]:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return TFNextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFNextSentencePredictorOutput) -> TFNextSentencePredictorOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFNextSentencePredictorOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class TFBertForSequenceClassification(TFBertPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, name="bert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class TFBertForMultipleChoice(TFBertPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = (
tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length)) if inputs["input_ids"] is not None else None
)
flat_attention_mask = (
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
if inputs["attention_mask"] is not None
else None
)
flat_token_type_ids = (
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
if inputs["token_type_ids"] is not None
else None
)
flat_position_ids = (
tf.reshape(tensor=inputs["position_ids"], shape=(-1, seq_length))
if inputs["position_ids"] is not None
else None
)
flat_inputs_embeds = (
tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.bert(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
position_ids=flat_position_ids,
head_mask=inputs["head_mask"],
inputs_embeds=flat_inputs_embeds,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput:
output = self.call(input_ids=inputs)
return self.serving_output(output)
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class TFBertForTokenClassification(TFBertPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"mlm___cls",
r"nsp___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
logits = self.classifier(inputs=sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class TFBertForQuestionAnswering(TFBertPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"mlm___cls",
r"nsp___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
def __init__(self, config: BertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.bert = TFBertMainLayer(config, add_pooling_layer=False, name="bert")
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="qa_outputs",
)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.bert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| 44.258324
| 190
| 0.66676
|
8350b6174a829f3127ca8d18d1a9221d26ffe8de
| 1,579
|
py
|
Python
|
setup.py
|
jchaykow/dgmnet
|
7b8e8b46094d7199a64b81e7f4585c3f6eeaeee0
|
[
"MIT"
] | 2
|
2021-02-01T21:48:29.000Z
|
2021-02-01T21:51:15.000Z
|
setup.py
|
jchaykow/dgmnet
|
7b8e8b46094d7199a64b81e7f4585c3f6eeaeee0
|
[
"MIT"
] | null | null | null |
setup.py
|
jchaykow/dgmnet
|
7b8e8b46094d7199a64b81e7f4585c3f6eeaeee0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.md').read()
#history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
'numpy',
'pandas',
'scikit-learn',
'scipy',
'matplotlib',
'seaborn',
'jupyter',
'ipykernel',
'torch',
'networkx',
'pyyaml'
]
#test_requirements = [
# # TODO: put package test requirements here
#]
setup(
name='dgmnet',
version='1.0.0',
description='Deep Generative Modeling for Networks',
#long_description=readme + '\n\n' + history,
author='Gavin Hartnett',
author_email='hartnett@rand.org',
url='https://code.rand.org/hartnett/dgmnet',
#packages=[
# 'deepwalk',
#],
#entry_points={'console_scripts': ['deepwalk = deepwalk.__main__:main']},
#package_dir={'deepwalk':
# 'deepwalk'},
include_package_data=True,
install_requires=requirements,
license="MIT",
#zip_safe=False,
#keywords='deepwalk',
#classifiers=[
# 'Development Status :: 2 - Pre-Alpha',
# 'Intended Audience :: Developers',
# 'License :: OSI Approved :: BSD License',
# 'Natural Language :: English',
# "Programming Language :: Python :: 2",
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
#],
#test_suite='tests',
#tests_require=test_requirements
)
| 26.762712
| 77
| 0.602913
|
36207c4f3a167e80016784752014f428f7c167d2
| 19,350
|
py
|
Python
|
randobot/handler.py
|
fenhl/rslbot
|
269f5bdd23c4d9f0d24fc85156e344506dda4f34
|
[
"MIT"
] | null | null | null |
randobot/handler.py
|
fenhl/rslbot
|
269f5bdd23c4d9f0d24fc85156e344506dda4f34
|
[
"MIT"
] | 9
|
2020-12-12T10:55:30.000Z
|
2022-03-14T20:57:35.000Z
|
randobot/handler.py
|
fenhl/rslbot
|
269f5bdd23c4d9f0d24fc85156e344506dda4f34
|
[
"MIT"
] | null | null | null |
import sys
import asyncio
import contextlib
import datetime
import json
import pathlib
import re
import subprocess
import lazyjson # https://github.com/fenhl/lazyjson
from racetime_bot import RaceHandler, monitor_cmd, can_moderate, can_monitor
DATA = lazyjson.File('/usr/local/share/fenhl/ootr-web.json')
GEN_LOCK = asyncio.Lock()
def natjoin(sequence, default):
if len(sequence) == 0:
return str(default)
elif len(sequence) == 1:
return str(sequence[0])
elif len(sequence) == 2:
return f'{sequence[0]} and {sequence[1]}'
else:
return ', '.join(sequence[:-1]) + f', and {sequence[-1]}'
def format_duration(duration):
parts = []
hours, duration = divmod(duration, datetime.timedelta(hours=1))
if hours > 0:
parts.append(f'{hours} hour{"" if hours == 1 else "s"}')
minutes, duration = divmod(duration, datetime.timedelta(minutes=1))
if minutes > 0:
parts.append(f'{minutes} minute{"" if minutes == 1 else "s"}')
if duration > datetime.timedelta():
seconds = duration.total_seconds()
parts.append(f'{seconds} second{"" if seconds == 1 else "s"}')
return natjoin(parts, '0 seconds')
def format_breaks(duration, interval):
return f'{format_duration(duration)} every {format_duration(interval)}'
def parse_duration(args, default):
if len(args) == 0:
raise ValueError('Empty duration args')
duration = datetime.timedelta()
for arg in args:
arg = arg.lower()
while len(arg) > 0:
match = re.match('([0-9]+)([smh:]?)', arg)
if not match:
raise ValueError('Unknown duration format')
unit = {
'': default,
's': 'seconds',
'm': 'minutes',
'h': 'hours',
':': 'default'
}[match.group(2)]
default = {
'hours': 'minutes',
'minutes': 'seconds',
'seconds': 'seconds'
}[unit]
duration += datetime.timedelta(**{unit: float(match.group(1))})
arg = arg[len(match.group(0)):]
return duration
class RandoHandler(RaceHandler):
"""
RandoBot race handler. Generates seeds, presets, and frustration.
"""
stop_at = ['cancelled', 'finished']
def __init__(self, rsl_script_path, output_path, base_uri, **kwargs):
super().__init__(**kwargs)
self.rsl_script_path = pathlib.Path(rsl_script_path)
self.output_path = output_path
self.base_uri = base_uri
self.presets = {
'league': {
'info': 'Random Settings League',
'help': 'Random Settings League (default)'
},
'beginner': {
'info': 'Random Settings for beginners',
'help': 'random settings for beginners, see https://ootr.fenhl.net/static/rsl-beginner-weights.html for details'
},
'intermediate': {
'info': 'Intermediate Random settings',
'help': 'a step between Beginner and League'
},
'ddr': {
'info': 'Random Settings DDR',
'help': 'League but always normal damage and with cutscenes useful for tricks in the DDR ruleset'
},
'coop': {
'info': 'Random Settings Co-Op',
'help': 'random settings Co-Op'
},
'multiworld': {
'info': 'Random Settings Multiworld',
'help': 'roll with !seed multiworld <worldcount>'
}
}
self.preset_aliases = {
'rsl': 'league',
'solo': 'league',
'co-op': 'coop',
'mw': 'multiworld',
}
self.seed_rolled = False
def should_stop(self):
return (
self.data.get('goal', {}).get('name') != 'Random settings league'
or self.data.get('goal', {}).get('custom', False)
or super().should_stop()
)
async def begin(self):
"""
Send introduction messages.
"""
if self.should_stop():
return
asyncio.create_task(self.heartbeat(), name=f'heartbeat for {self.data.get("name")}')
for section in self.data.get('info', '').split(' | '):
if section.startswith(f'Seed: {self.base_uri}'):
self.state['spoiler_log'] = section[len(f'Seed: {self.base_uri}'):].split('.zpf')[0] + '_Spoiler.json'
self.state['intro_sent'] = True
break
if not self.state.get('intro_sent') and not self._race_in_progress():
await self.send_message(
'Welcome to the OoTR Random Settings League! Create a seed with !seed <preset>'
)
await self.send_message(
'If no preset is selected, default RSL settings will be used. For a list of presets, use !presets'
)
await self.send_message(
'I will post the spoiler log after the race.'
)
self.state['intro_sent'] = True
if 'locked' not in self.state:
self.state['locked'] = False
if 'fpa' not in self.state:
self.state['fpa'] = False
if 'breaks' not in self.state:
self.state['breaks'] = None
async def heartbeat(self):
while not self.should_stop():
await asyncio.sleep(20)
await self.ws.send(json.dumps({'action': 'ping'}))
async def break_notifications(self):
duration, interval = self.state['breaks']
await asyncio.sleep((interval - datetime.timedelta(minutes=5)).total_seconds())
while not self.should_stop():
asyncio.create_task(self.send_message('@entrants Reminder: Next break in 5 minutes.'))
await asyncio.sleep(datetime.timedelta(minutes=5).total_seconds())
if self.should_stop():
break
asyncio.create_task(self.send_message(f'@entrants Break time! Please pause for {format_duration(duration)}.'))
await asyncio.sleep(duration.total_seconds())
if self.should_stop():
break
asyncio.create_task(self.send_message('@entrants Break ended. You may resume playing.'))
await asyncio.sleep((interval - duration - datetime.timedelta(minutes=5)).total_seconds())
@monitor_cmd
async def ex_lock(self, args, message):
"""
Handle !lock commands.
Prevent seed rolling unless user is a race monitor.
"""
self.state['locked'] = True
await self.send_message(
'Lock initiated. I will now only roll seeds for race monitors.'
)
@monitor_cmd
async def ex_unlock(self, args, message):
"""
Handle !unlock commands.
Remove lock preventing seed rolling unless user is a race monitor.
"""
if self._race_in_progress():
return
self.state['locked'] = False
await self.send_message(
'Lock released. Anyone may now roll a seed.'
)
async def ex_seed(self, args, message):
"""
Handle !seed commands.
"""
if self._race_in_progress():
return
await self.roll_and_send(args, message)
async def ex_presets(self, args, message):
"""
Handle !presets commands.
"""
if self._race_in_progress():
return
await self.send_presets()
async def ex_fpa(self, args, message):
if len(args) == 1 and args[0] in ('on', 'off'):
if not can_monitor(message):
resp = 'Sorry %(reply_to)s, only race monitors can do that.'
elif args[0] == 'on':
if self.state['fpa']:
resp = 'Fair play agreement is already activated.'
else:
self.state['fpa'] = True
resp = (
'Fair play agreement is now active. @entrants may '
'use the !fpa command during the race to notify of a '
'crash. Race monitors should enable notifications '
'using the bell 🔔 icon below chat.'
)
else: # args[0] == 'off'
if not self.state['fpa']:
resp = 'Fair play agreement is not active.'
else:
self.state['fpa'] = False
resp = 'Fair play agreement is now deactivated.'
elif self.state['fpa']:
if self._race_in_progress():
resp = '@everyone FPA has been invoked by @%(reply_to)s.'
else:
resp = 'FPA cannot be invoked before the race starts.'
else:
resp = (
'Fair play agreement is not active. Race monitors may enable '
'FPA for this race with !fpa on'
)
if resp:
reply_to = message.get('user', {}).get('name', 'friend')
await self.send_message(resp % {'reply_to': reply_to})
async def ex_breaks(self, args, message):
if self._race_in_progress():
return
if len(args) == 0:
if self.state['breaks'] is None:
await self.send_message('Breaks are currently disabled. Example command to enable: !breaks 5m every 2h30')
else:
await self.send_message(f'Breaks are currently set to {format_breaks(*self.state["breaks"])}. Disable with !breaks off')
elif len(args) == 1 and args[0] == 'off':
self.state['breaks'] = None
await self.send_message('Breaks are now disabled.')
else:
reply_to = message.get('user', {}).get('name')
try:
sep_idx = args.index('every')
duration = parse_duration(args[:sep_idx], default='minutes')
interval = parse_duration(args[sep_idx + 1:], default='hours')
except ValueError:
await self.send_message(f'Sorry {reply_to or "friend"}, I don\'t recognise that format for breaks. Example commands: !breaks 5m every 2h30, !breaks off')
else:
if duration < datetime.timedelta(minutes=1):
await self.send_message(f'Sorry {reply_to or "friend"}, minimum break time (if enabled at all) is 1 minute. You can disable breaks entirely with !breaks off')
elif interval < duration + datetime.timedelta(minutes=5):
await self.send_message(f'Sorry {reply_to or "friend"}, there must be a minimum of 5 minutes between breaks since I notify runners 5 minutes in advance.')
elif duration + interval >= datetime.timedelta(hours=24):
await self.send_message(f'Sorry {reply_to or "friend"}, race rooms are automatically closed after 24 hours so these breaks wouldn\'t work.')
else:
self.state['breaks'] = duration, interval
await self.send_message(f'Breaks set to {format_breaks(duration, interval)}.')
async def roll_and_send(self, args, message):
"""
Read an incoming !seed command, and generate a new seed if valid.
"""
reply_to = message.get('user', {}).get('name')
if self.state.get('locked') and not can_monitor(message):
await self.send_message(
'Sorry %(reply_to)s, seed rolling is locked. Only race '
'monitors may roll a seed for this race.'
% {'reply_to': reply_to or 'friend'}
)
return
if self.state.get('seed_rolled') and not can_moderate(message):
await self.send_message(
'Well excuuuuuse me princess, but I already rolled a seed. '
'Don\'t get greedy!'
)
return
if len(args) >= 1:
preset = self.preset_aliases.get(args[0].lower(), args[0].lower())
if preset not in self.presets:
await self.send_message(
'Sorry %(reply_to)s, I don\'t recognise that preset. Use '
'!presets to see what is available.'
% {'reply_to': reply_to or 'friend'}
)
return
else:
preset = 'league'
if preset == 'multiworld':
if len(args) == 2:
try:
world_count = int(args[1])
except ValueError:
await self.send_message('World count must be a number')
return
if world_count < 2:
await self.send_message('World count must be at least 2')
return
if world_count > 15:
await self.send_message('Sorry, I can only roll seeds with up to 15 worlds. Please download the RSL script from https://github.com/matthewkirby/plando-random-settings to roll seeds for more than 15 players.')
return
else:
await self.send_message('Missing world count (e.g. “!seed multiworld 2” for 2 worlds)')
return
else:
if len(args) > 1:
await self.send_message('Unexpected parameter')
return
else:
world_count = 1
await self.send_message('Rolling seed…') #TODO also announce position in queue (#5)
async with GEN_LOCK:
await self.roll(preset, world_count, reply_to)
async def race_data(self, data):
await super().race_data(data)
if self.data.get('started_at') is not None:
if not self.state.get('break_notifications_started') and self.state.get('breaks') is not None:
self.state['break_notifications_started'] = True
asyncio.create_task(self.break_notifications(), name=f'break notifications for {self.data.get("name")}')
with contextlib.suppress(Exception):
DATA['races'][self.state['file_stem']]['startTime'] = self.data['started_at']
if self.data.get('status', {}).get('value') in ('finished', 'cancelled'):
await self.send_spoiler()
elif self.data.get('status', {}).get('value') == 'finished':
await self.send_spoiler()
elif self.data.get('status', {}).get('value') == 'cancelled':
with contextlib.suppress(Exception):
(pathlib.Path(self.output_path) / f'{self.state["file_stem"]}.zpf').unlink(missing_ok=True)
(pathlib.Path(self.output_path) / f'{self.state["file_stem"]}.zpfz').unlink(missing_ok=True)
(pathlib.Path(self.output_path) / f'{self.state["file_stem"]}_Spoiler.json').unlink(missing_ok=True)
del DATA['races'][self.state['file_stem']]
async def roll(self, preset, world_count, reply_to):
"""
Generate a seed and send it to the race room.
"""
args = [sys.executable, 'RandomSettingsGenerator.py']
if preset != 'league':
args.append(f'--override={preset}_override.json')
if world_count != 1:
args.append(f'--worldcount={world_count}')
try:
process = await asyncio.create_subprocess_exec(*args, cwd=self.rsl_script_path)
exit_code = await process.wait()
if exit_code == 0:
pass
elif exit_code == 1:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (RSL script crashed, please notify Fenhl)')
return
elif exit_code == 2:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (Max retries exceeded, please try again or notify Fenhl)')
return
else:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (Error code {exit_code}, please notify Fenhl)')
return
except subprocess.CalledProcessError:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (RSL script missing, please notify Fenhl)')
return
patch_files = list((self.rsl_script_path / 'patches').glob('*.zpf')) #TODO parse filename from output
if len(patch_files) == 0:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (Patch file not found, please notify Fenhl)')
return
elif len(patch_files) > 1:
await self.send_message(f'Sorry {reply_to or "friend"}, something went wrong while generating the seed. (Multiple patch files found, please notify Fenhl)')
return
file_name = patch_files[0].name
file_stem = patch_files[0].stem
self.state['file_stem'] = file_stem
patch_files[0].rename(pathlib.Path(self.output_path) / file_name)
for extra_output_path in [self.rsl_script_path / 'patches' / f'{file_stem}_Cosmetics.json', self.rsl_script_path / 'patches' / f'{file_stem}_Distribution.json']:
if extra_output_path.exists():
extra_output_path.unlink()
seed_uri = self.base_uri + file_name
self.state['spoiler_log'] = file_stem + '_Spoiler.json'
await self.send_message(
'%(reply_to)s, here is your seed: %(seed_uri)s'
% {'reply_to': reply_to or 'Okay', 'seed_uri': seed_uri}
)
await self.set_raceinfo(f'{self.presets[preset]["info"]} | Seed: {seed_uri}', overwrite=preset == 'league', prefix=False)
with contextlib.suppress(Exception):
DATA['races'][file_stem] = {
'roomSlug': self.data['slug'],
'weights': preset
}
with contextlib.suppress(Exception):
with (self.rsl_script_path / 'patches' / self.state['spoiler_log']).open() as f:
await self.send_message(
'The hash is %(file_hash)s.'
% {'file_hash': ', '.join(json.load(f)['file_hash'])}
)
self.state['seed_rolled'] = True
async def send_presets(self):
"""
Send a list of known presets to the race room.
"""
await self.send_message('Available presets:')
for name, data in self.presets.items():
await self.send_message(f'{name} – {data["help"]}')
async def send_spoiler(self):
if 'spoiler_log' in self.state and not self.state.get('spoiler_sent', False):
(self.rsl_script_path / 'patches' / self.state['spoiler_log']).rename(pathlib.Path(self.output_path) / self.state['spoiler_log'])
spoiler_uri = self.base_uri + self.state['spoiler_log']
await self.send_message(f'Here is the spoiler log: {spoiler_uri}')
self.state['spoiler_sent'] = True
await self.set_raceinfo(f'Spoiler log: {spoiler_uri}', prefix=False)
def _race_in_progress(self):
return self.data.get('status').get('value') in ('pending', 'in_progress')
| 43.977273
| 228
| 0.566253
|
a00870bd6815d280cfdfac39778537425e5636ce
| 543
|
py
|
Python
|
nscl/configs/common.py
|
K-A-R-T/DCL-Release
|
44c6e1234af63daa1ae32302eef5981651a5a0aa
|
[
"MIT"
] | 343
|
2019-04-29T03:24:27.000Z
|
2022-03-31T19:25:08.000Z
|
nscl/configs/common.py
|
K-A-R-T/DCL-Release
|
44c6e1234af63daa1ae32302eef5981651a5a0aa
|
[
"MIT"
] | 15
|
2019-06-07T02:23:46.000Z
|
2021-06-14T15:51:10.000Z
|
nscl/configs/common.py
|
K-A-R-T/DCL-Release
|
44c6e1234af63daa1ae32302eef5981651a5a0aa
|
[
"MIT"
] | 92
|
2019-04-29T07:32:56.000Z
|
2022-02-01T22:35:57.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : common.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 09/29/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Common configuration.
"""
from jacinle.utils.container import G
__all__ = ['make_base_configs']
class Config(G):
pass
def make_base_configs():
configs = Config()
configs.data = G()
configs.model = G()
configs.train = G()
configs.train.weight_decay = 1e-4
return configs
| 15.970588
| 45
| 0.651934
|
30cdc675ec0b06ccfc9586c95f417e96d111488b
| 427
|
py
|
Python
|
accounts/serializer.py
|
nephsir/daraja
|
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
|
[
"MIT"
] | null | null | null |
accounts/serializer.py
|
nephsir/daraja
|
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
|
[
"MIT"
] | null | null | null |
accounts/serializer.py
|
nephsir/daraja
|
0deb9913ab863eadfc1a27f5e292b220f86a1bb7
|
[
"MIT"
] | null | null | null |
from dataclasses import field, fields
from pyexpat import model
from rest_framework import serializers
from .models import Loan
from mpesa_api.models import mpesa_response
class LoanSerializer(serializers.ModelSerializer):
class Meta:
model = Loan
fields = ('__all__')
class MpesaSerializer(serializers.ModelSerializer):
class Meta:
model = mpesa_response
fields = ('__all__')
| 26.6875
| 51
| 0.728337
|
156030eaba9703ddb9873d89851b09d8b0603ca2
| 661
|
py
|
Python
|
actors/components/inventory.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | 3
|
2019-12-22T22:44:43.000Z
|
2020-02-11T11:14:10.000Z
|
actors/components/inventory.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | null | null | null |
actors/components/inventory.py
|
Catsuko/Westward
|
3c04df668f7e04ca45e622017ffa9dfe6d3c242c
|
[
"MIT"
] | null | null | null |
from actors.components.component import Component
class Inventory(Component):
def __init__(self, items=frozenset()):
self.items = items
def update(self):
return Inventory(frozenset([item.update() for item in iter(self.items)]))
def use_primary(self, actor, root, target, tile):
if len(self.items) == 0:
return root
primary_item = next(iter(self.items))
root, updated_item = primary_item.use(actor, tile, target, root)
updated_items = [updated_item if item is primary_item else item for item in self.items]
return actor.replace(self, Inventory(frozenset(updated_items)), root)
| 34.789474
| 95
| 0.677761
|
f24df63b1b5b5cc90fc48e2dfa3d7e74907e87e3
| 1,055
|
py
|
Python
|
Leetcode/Python/_547.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_547.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_547.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
class Union:
def __init__(self, size):
self.root = [i for i in range(size)]
self.rank = [1] * size
self.count = size
def find(self, x):
if x == self.root[x]:
return x
self.root[x] = self.find(self.root[x])
return self.root[x]
def union(self, x, y):
root_x = self.find(x)
root_y = self.find(y)
if root_x != root_y:
if self.rank[root_x] < self.rank[root_y]:
self.root[root_x] = root_y
elif self.rank[root_x] > self.rank[root_y]:
self.root[root_y] = root_x
else:
self.rank[root_x] += 1
self.root[root_y] = root_x
self.count -= 1
class Solution:
def findCircleNum(self, isConnected):
union = Union(len(isConnected))
for i in range(len(isConnected)):
for j in range(len(isConnected[0])):
if isConnected[i][j] == 1:
union.union(i, j)
return union.count
| 30.142857
| 55
| 0.495735
|
535eff964f91e4724e144287fbc6f05f59d2d0ea
| 3,214
|
py
|
Python
|
tests/test-update.py
|
beckjake/python3-hglib
|
0e04d5bcbc1dd20dd54d284339a425abeb74ad1d
|
[
"MIT"
] | null | null | null |
tests/test-update.py
|
beckjake/python3-hglib
|
0e04d5bcbc1dd20dd54d284339a425abeb74ad1d
|
[
"MIT"
] | null | null | null |
tests/test-update.py
|
beckjake/python3-hglib
|
0e04d5bcbc1dd20dd54d284339a425abeb74ad1d
|
[
"MIT"
] | 1
|
2020-01-06T08:20:24.000Z
|
2020-01-06T08:20:24.000Z
|
from . import common
from hglib import error
class test_update(common.basetest):
def setUp(self):
common.basetest.setUp(self)
self.append('a', 'a')
self.rev0, self.node0 = self.client.commit('first', addremove=True)
self.append('a', 'a')
self.rev1, self.node1 = self.client.commit('second')
def test_basic(self):
u, m, r, ur = self.client.update(self.rev0)
self.assertEquals(u, 1)
self.assertEquals(m, 0)
self.assertEquals(r, 0)
self.assertEquals(ur, 0)
def test_unresolved(self):
self.client.update(self.rev0)
self.append('a', 'b')
u, m, r, ur = self.client.update()
self.assertEquals(u, 0)
self.assertEquals(m, 0)
self.assertEquals(r, 0)
self.assertEquals(ur, 1)
self.assertTrue(('M', 'a') in self.client.status())
def test_merge(self):
self.append('a', '\n\n\n\nb')
rev2, node2 = self.client.commit('third')
self.append('a', 'b')
self.client.commit('fourth')
self.client.update(rev2)
old = open('a', 'rb').read()
f = open('a', 'wb')
f.write(b'a' + old)
f.close()
u, m, r, ur = self.client.update()
self.assertEquals(u, 0)
self.assertEquals(m, 1)
self.assertEquals(r, 0)
self.assertEquals(ur, 0)
self.assertEquals(self.client.status(), [('M', 'a')])
def test_tip(self):
self.client.update(self.rev0)
u, m, r, ur = self.client.update()
self.assertEquals(u, 1)
self.assertEquals(self.client.parents()[0].node, self.node1)
self.client.update(self.rev0)
self.append('a', 'b')
rev2, node2 = self.client.commit('new head')
self.client.update(self.rev0)
self.client.update()
self.assertEquals(self.client.parents()[0].node, node2)
def test_check_clean(self):
self.assertRaises(ValueError, self.client.update, clean=True, check=True)
def test_clean(self):
old = open('a').read()
self.append('a', 'b')
self.assertRaises(error.CommandError, self.client.update, check=True)
u, m, r, ur = self.client.update(clean=True)
self.assertEquals(u, 1)
self.assertEquals(old, open('a').read())
def test_basic_plain(self):
f = open('.hg/hgrc', 'a')
f.write('[defaults]\nupdate=-v\n')
f.close()
self.test_basic()
def test_largefiles(self):
import os
f = open('.hg/hgrc', 'a')
f.write('[extensions]\nlargefiles=\n')
f.close()
self.append('b', 'a')
try:
self.client.rawcommand(['add', 'b', '--large'])
except error.CommandError:
return
rev2, node2 = self.client.commit('third')
# Go back to 0
self.client.rawcommand(['update', str(self.rev0)],
# Keep the 'changed' version
prompt=lambda s, d: b'c\n')
u, m, r, ur = self.client.update(rev2, clean=True)
self.assertEquals(u, 2)
self.assertEquals(m, 0)
self.assertEquals(r, 0)
self.assertEquals(ur, 0)
| 32.464646
| 81
| 0.555694
|
7dd92e2e2a53e0736b730706bb1691ca0e7af0a7
| 118
|
py
|
Python
|
{{cookiecutter.project_name}}/tests/test_{{cookiecutter.package_name}}.py
|
clbarnes/python-template
|
c7ad6b779f8e6aae8c5bf9049aa4ebe14e42c115
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/tests/test_{{cookiecutter.package_name}}.py
|
clbarnes/python-template
|
c7ad6b779f8e6aae8c5bf9049aa4ebe14e42c115
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/tests/test_{{cookiecutter.package_name}}.py
|
clbarnes/python-template
|
c7ad6b779f8e6aae8c5bf9049aa4ebe14e42c115
|
[
"MIT"
] | null | null | null |
def test_importable():
import {{cookiecutter.package_name}}
assert {{cookiecutter.package_name}}.__version__
| 23.6
| 52
| 0.754237
|
b4362d811283e4e9df29cc315c7bb2308ce1319a
| 290
|
py
|
Python
|
devolpmentscratch/sampletest.py
|
catatonicpig/ModCal
|
bd792e16cbb343ce330905feeb00e2e2b045653d
|
[
"BSD-3-Clause"
] | 2
|
2020-11-16T17:34:48.000Z
|
2021-01-15T19:17:33.000Z
|
devolpmentscratch/sampletest.py
|
catatonicpig/ModCal
|
bd792e16cbb343ce330905feeb00e2e2b045653d
|
[
"BSD-3-Clause"
] | null | null | null |
devolpmentscratch/sampletest.py
|
catatonicpig/ModCal
|
bd792e16cbb343ce330905feeb00e2e2b045653d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Header here."""
import numpy as np
def borehole_model(x, theta):
"""Given x and theta, return matrix of [row x] times [row theta] of values."""
return f
def borehole_true(x):
"""Given x, return matrix of [row x] times 1 of values."""
return y
| 20.714286
| 82
| 0.62069
|
260c684d4d8d12178613ff0fe5120cd554875771
| 3,513
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/features/enterprise_support/tests/factories.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/features/enterprise_support/tests/factories.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/features/enterprise_support/tests/factories.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Factoryboy factories.
"""
from uuid import UUID
import factory
from faker import Factory as FakerFactory
from enterprise.models import (
EnterpriseCourseEnrollment,
EnterpriseCustomer,
EnterpriseCustomerBrandingConfiguration,
EnterpriseCustomerIdentityProvider,
EnterpriseCustomerUser,
)
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
FAKER = FakerFactory.create()
class EnterpriseCustomerFactory(factory.django.DjangoModelFactory):
"""
EnterpriseCustomer factory.
Creates an instance of EnterpriseCustomer with minimal boilerplate - uses this class' attributes as default
parameters for EnterpriseCustomer constructor.
"""
class Meta:
"""
Meta for EnterpriseCustomerFactory.
"""
model = EnterpriseCustomer
uuid = factory.LazyAttribute(lambda x: UUID(FAKER.uuid4())) # pylint: disable=no-member
name = factory.LazyAttribute(lambda x: FAKER.company()) # pylint: disable=no-member
slug = factory.LazyAttribute(lambda x: FAKER.slug()) # pylint: disable=no-member
active = True
site = factory.SubFactory(SiteFactory)
enable_data_sharing_consent = True
enforce_data_sharing_consent = EnterpriseCustomer.AT_ENROLLMENT
class EnterpriseCustomerUserFactory(factory.django.DjangoModelFactory):
"""
EnterpriseCustomer factory.
Creates an instance of EnterpriseCustomerUser with minimal boilerplate - uses this class' attributes as default
parameters for EnterpriseCustomerUser constructor.
"""
class Meta:
"""
Meta for EnterpriseCustomerFactory.
"""
model = EnterpriseCustomerUser
enterprise_customer = factory.SubFactory(EnterpriseCustomerFactory)
user_id = factory.LazyAttribute(lambda x: FAKER.pyint()) # pylint: disable=no-member
class EnterpriseCourseEnrollmentFactory(factory.django.DjangoModelFactory):
"""
EnterpriseCourseEnrollment factory.
Creates an instance of EnterpriseCourseEnrollment with minimal boilerplate.
"""
class Meta:
"""
Meta for EnterpriseCourseEnrollmentFactory.
"""
model = EnterpriseCourseEnrollment
course_id = factory.LazyAttribute(lambda x: FAKER.slug()) # pylint: disable=no-member
enterprise_customer_user = factory.SubFactory(EnterpriseCustomerUserFactory)
class EnterpriseCustomerBrandingConfigurationFactory(factory.django.DjangoModelFactory):
"""
EnterpriseCustomerBrandingConfiguration factory
Creates an instance of EnterpriseCustomerBrandingConfiguration with minimal boilerplate.
"""
class Meta:
"""
Meta for EnterpriseCustomerBrandingConfigurationFactory.
"""
model = EnterpriseCustomerBrandingConfiguration
logo = FAKER.image_url() # pylint: disable=no-member
primary_color = FAKER.color() # pylint: disable=no-member
secondary_color = FAKER.color() # pylint: disable=no-member
tertiary_color = FAKER.color() # pylint: disable=no-member
class EnterpriseCustomerIdentityProviderFactory(factory.django.DjangoModelFactory):
"""
EnterpriseCustomerIdentityProvider factory.
"""
class Meta:
"""
Meta for EnterpriseCustomerIdentityProviderFactory.
"""
model = EnterpriseCustomerIdentityProvider
enterprise_customer = factory.SubFactory(EnterpriseCustomerFactory)
provider_id = factory.LazyAttribute(lambda x: FAKER.slug()) # pylint: disable=no-member
| 29.771186
| 115
| 0.738685
|
b7a8c483857351b5e577c2bcfad6fe425cb99e71
| 3,085
|
py
|
Python
|
setup.py
|
iatechicken/fhir.resources
|
8ccb21aaa00755c6d230522bd7ddb655155b4bcb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
iatechicken/fhir.resources
|
8ccb21aaa00755c6d230522bd7ddb655155b4bcb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
iatechicken/fhir.resources
|
8ccb21aaa00755c6d230522bd7ddb655155b4bcb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["pydantic[email]>=1.7.2"]
setup_requirements = ["pytest-runner"]
orjson_requirements = ["orjson>=3.4.3"]
test_requirements = [
"coverage",
"pytest>5.4.0;python_version>='3.6'",
"pytest-cov>=2.10.0;python_version>='3.6'",
"flake8==3.8.3",
"flake8-isort==3.0.0",
"flake8-bugbear==20.1.4",
"requests==2.23.0",
"isort==4.3.21",
"black",
"mypy",
]
development_requirements = [
"Jinja2==2.11.1",
"MarkupSafe==1.1.1",
"colorlog==2.10.0",
"certifi",
"fhirspec",
"zest-releaser[recommended]",
]
setup(
author="Md Nazrul Islam",
author_email="email2nazrul@gmail.com",
# Get more from https://pypi.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Typing :: Typed",
],
description="FHIR Resources as Model Class",
install_requires=requirements,
license="BSD license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="fhir, resources, python, hl7, health IT, healthcare",
name="fhir.resources",
namespace_packages=["fhir"],
packages=find_packages(exclude=["ez_setup"]),
package_data={"fhir.resources": ["py.typed"]},
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
extras_require={
"orjson": orjson_requirements,
"test": (test_requirements + setup_requirements),
"all": (
test_requirements
+ setup_requirements
+ development_requirements
+ orjson_requirements
),
},
url="https://github.com/nazrulworld/fhir.resources",
version="6.0.0b12.dev0",
zip_safe=False,
python_requires=">=3.6",
project_urls={
"CI: Travis": "https://travis-ci.org/github/nazrulworld/fhir.resources",
"Coverage: codecov": "https://codecov.io/gh/nazrulworld/fhir.resources",
"GitHub: issues": "https://github.com/nazrulworld/fhir.resources/issues",
"GitHub: repo": "https://github.com/nazrulworld/fhir.resources",
},
)
| 31.479592
| 81
| 0.624635
|
3eb895e821fb22d4a50a89f3135556f8f2e16071
| 1,258
|
py
|
Python
|
kern_sym_test.py
|
rlluo1/SET
|
20625a3fa528b75a1ffe8794ecb9b9d0557d363f
|
[
"NASA-1.3"
] | 10
|
2018-01-08T22:09:24.000Z
|
2020-12-16T00:57:06.000Z
|
kern_sym_test.py
|
rlluo1/SET
|
20625a3fa528b75a1ffe8794ecb9b9d0557d363f
|
[
"NASA-1.3"
] | 7
|
2018-01-09T20:05:51.000Z
|
2019-09-05T19:12:49.000Z
|
kern_sym_test.py
|
rlluo1/SET
|
20625a3fa528b75a1ffe8794ecb9b9d0557d363f
|
[
"NASA-1.3"
] | 5
|
2018-08-07T15:18:09.000Z
|
2021-05-12T19:33:52.000Z
|
from osgeo import gdal
#from osgeo import gdal_array
import numpy as np
from PIL import Image
#from matplotlib import pyplot as plt
#tests whether north, south kernels match (they should)
fp000 = "kernel_30.23151_1.0_80.0_0.0.tif"
fp180 = "kernel_30.23151_1.0_80.0_-180.0.tif"
ds_k000 = gdal.Open(fp000)
ds_k180 = gdal.Open(fp180)
print("GUIS Kernel 000 Metadata")
print(ds_k000.GetMetadata())
print("GUIS Kernel 180 Metadata")
print(ds_k180.GetMetadata())
print("[ RASTER BAND COUNT ]: ", ds_k000.RasterCount)
bnd000 = ds_k000.GetRasterBand(1)
nodata000 = bnd000.GetNoDataValue()
print(type(bnd000))
print("[ RASTER BAND COUNT ]: ", ds_k180.RasterCount)
bnd180 = ds_k180.GetRasterBand(1)
nodata180 = bnd180.GetNoDataValue()
print(type(bnd180))
arr000 = bnd000.ReadAsArray()
print(type(arr000))
# arr000 = np.ma.masked_equal(arr000, nodata000)
arr000[np.isnan(arr000)] = 9999
print(arr000.min())
arr180 = bnd180.ReadAsArray()
print(type(arr180))
# arr180 = np.ma.masked_equal(arr180, nodata180)
arr180[np.isnan(arr180)] = 999
print(arr180.min())
arr000flip = np.flipud(arr000)
arr_symchk = np.divide(arr180, arr000flip)
print(arr_symchk.min())
print(arr_symchk.max())
# img = Image.fromarray(arr_symchk)
# #img.save('symmetry_check.png')
# img.show()
| 26.765957
| 55
| 0.754372
|
f0111e0c249c2e3c63fa028777cfd2344f2170a1
| 4,414
|
py
|
Python
|
ivi/extra/dcpwr.py
|
lude-ma/python-ivi
|
f62907a2922d5fc98e0a524ef6ddbaa62791ff14
|
[
"MIT"
] | 1
|
2017-09-09T06:04:14.000Z
|
2017-09-09T06:04:14.000Z
|
ivi/extra/dcpwr.py
|
lude-ma/python-ivi
|
f62907a2922d5fc98e0a524ef6ddbaa62791ff14
|
[
"MIT"
] | null | null | null |
ivi/extra/dcpwr.py
|
lude-ma/python-ivi
|
f62907a2922d5fc98e0a524ef6ddbaa62791ff14
|
[
"MIT"
] | null | null | null |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
class OCP(object):
"Extension IVI methods for power supplies supporting overcurrent protection"
def __init__(self, *args, **kwargs):
super(OCP, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'OCP'
ivi.add_group_capability(self, cls+grp)
self._output_ocp_enabled = list()
self._output_ocp_limit = list()
self._output_spec = [
{
'range': {
'P8V': (9.0, 20.0),
'P20V': (21.0, 10.0)
},
'ovp_max': 22.0,
'ocp_max': 22.0,
'voltage_max': 9.0,
'current_max': 20.0
}
]
ivi.add_property(self, 'outputs[].ocp_enabled',
self._get_output_ocp_enabled,
self._set_output_ocp_enabled,
None,
ivi.Doc("""
Specifies whether the power supply provides over-current protection. If
this attribute is set to True, the power supply disables the output when
the output current is greater than or equal to the value of the OCP
Limit attribute.
"""))
ivi.add_property(self, 'outputs[].ocp_limit',
self._get_output_ocp_limit,
self._set_output_ocp_limit,
None,
ivi.Doc("""
Specifies the current the power supply allows. The units are Amps.
If the OCP Enabled attribute is set to True, the power supply disables the
output when the output current is greater than or equal to the value of
this attribute.
If the OCP Enabled is set to False, this attribute does not affect the
behavior of the instrument.
"""))
self._init_outputs()
def _init_outputs(self):
try:
super(OCP, self)._init_outputs()
except AttributeError:
pass
self._output_ocp_enabled = list()
self._output_ocp_limit = list()
for i in range(self._output_count):
self._output_ocp_enabled.append(True)
self._output_ocp_limit.append(0)
def _get_output_ocp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ocp_enabled[index]
def _set_output_ocp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ocp_enabled[index] = value
def _get_output_ocp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ocp_limit[index]
def _set_output_ocp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_ocp_limit[index] = value
def _output_reset_output_protection(self, index):
pass
| 37.726496
| 98
| 0.59266
|
f92096aa55de377aa191c4ba350bb50f173398c3
| 4,545
|
py
|
Python
|
botenv/lib/python3.9/site-packages/telegram/inline/inlinequeryresultcachedaudio.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | null | null | null |
botenv/lib/python3.9/site-packages/telegram/inline/inlinequeryresultcachedaudio.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | null | null | null |
botenv/lib/python3.9/site-packages/telegram/inline/inlinequeryresultcachedaudio.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultCachedAudio."""
from typing import TYPE_CHECKING, Any, Union, Tuple, List
from telegram import InlineQueryResult, MessageEntity
from telegram.utils.helpers import DEFAULT_NONE
from telegram.utils.types import ODVInput
if TYPE_CHECKING:
from telegram import InputMessageContent, ReplyMarkup
class InlineQueryResultCachedAudio(InlineQueryResult):
"""
Represents a link to an mp3 audio file stored on the Telegram servers. By default, this audio
file will be sent by the user. Alternatively, you can use :attr:`input_message_content` to
send a message with the specified content instead of the audio.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
audio_file_id (:obj:`str`): A valid file identifier for the audio file.
caption (:obj:`str`, optional): Caption, 0-1024 characters after entities parsing.
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`], optional): List of special
entities that appear in the caption, which can be specified instead of
:attr:`parse_mode`.
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the audio.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Attributes:
type (:obj:`str`): 'audio'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
audio_file_id (:obj:`str`): A valid file identifier for the audio file.
caption (:obj:`str`): Optional. Caption, 0-1024 characters after entities parsing.
parse_mode (:obj:`str`): Optional. Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption. See the constants
in :class:`telegram.ParseMode` for the available modes.
caption_entities (List[:class:`telegram.MessageEntity`]): Optional. List of special
entities that appear in the caption, which can be specified instead of
:attr:`parse_mode`.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the audio.
"""
__slots__ = (
'reply_markup',
'caption_entities',
'caption',
'parse_mode',
'audio_file_id',
'input_message_content',
)
def __init__(
self,
id: str, # pylint: disable=W0622
audio_file_id: str,
caption: str = None,
reply_markup: 'ReplyMarkup' = None,
input_message_content: 'InputMessageContent' = None,
parse_mode: ODVInput[str] = DEFAULT_NONE,
caption_entities: Union[Tuple[MessageEntity, ...], List[MessageEntity]] = None,
**_kwargs: Any,
):
# Required
super().__init__('audio', id)
self.audio_file_id = audio_file_id
# Optionals
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
self.reply_markup = reply_markup
self.input_message_content = input_message_content
| 45
| 99
| 0.690869
|
5f94aaa8261f98a4cb1cff84f8ca601fd0903d99
| 5,398
|
py
|
Python
|
Basic_ML/Sentiment_Analysis/combined.py
|
jrclimer/Projects
|
6023f8309685d1a273d7e89993863c89ad85dfb5
|
[
"MIT"
] | 27
|
2016-11-18T11:15:58.000Z
|
2021-02-26T05:46:37.000Z
|
Basic_ML/Sentiment_Analysis/combined.py
|
imsrgadich/Projects_shang
|
a9d4395a98a79fb0a700a99168cd358ab7494fdf
|
[
"MIT"
] | 1
|
2022-01-21T16:09:40.000Z
|
2022-01-21T16:30:10.000Z
|
Basic_ML/Sentiment_Analysis/combined.py
|
imsrgadich/Projects_shang
|
a9d4395a98a79fb0a700a99168cd358ab7494fdf
|
[
"MIT"
] | 22
|
2016-11-27T06:02:26.000Z
|
2021-09-22T13:40:55.000Z
|
import pandas as pd
import numpy as np
import csv, collections
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
import nltk.stem
import nltk
import sys
pos = pd.read_csv('positive.gz',sep=',',header=0)
neg = pd.read_csv('negative.gz',sep=',',header=0)
pos_text = pos['Summary'] + " " + pos['Text']
neg_text = neg['Summary'] + " " + neg['Text']
pos_text = pos_text.map(lambda x: x.decode('utf8', 'ignore').replace('<br />',' '))
neg_text = neg_text.map(lambda x: x.decode('utf8', 'ignore').replace('<br />',' '))
pos_train = pos_text.iloc[:40000]
neg_train = neg_text.iloc[:40000]
pos_test = pos_text.iloc[40000:]
neg_test = neg_text.iloc[40000:]
X_train = pos_train.append(neg_train)
y_train = np.append(np.ones((len(pos_train))),np.zeros((len(neg_train))))
y_test_pos = np.ones((len(pos_test)))
y_test_neg = np.zeros((len(neg_test)))
print "vectorizing reviews"
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=3, stop_words='english', ngram_range=(1, 1), decode_error='ignore')
X_train = vectorizer.fit_transform(X_train)
X_test_pos = vectorizer.transform(pos_test)
X_test_neg = vectorizer.transform(neg_test)
#classify emails with naive bayes
print "classifing reviews w/ Naive Bayes"
clf = MultinomialNB()
clf.fit(X_train, y_train)
nb_proba_train = clf.predict_proba(X_train)[:,1][:,np.newaxis]
nb_proba_pos = clf.predict_proba(X_test_pos)[:,1][:,np.newaxis]
nb_proba_neg = clf.predict_proba(X_test_neg)[:,1][:,np.newaxis]
def load_sentiwordnet():
print 'loading sentiwordnet'
sent_scores = collections.defaultdict(list)
with open("SentiWordNet_3.0.0_20130122.txt", "r") as csvfile:
reader = csv.reader(csvfile, delimiter='\t',quotechar='"')
for line in reader:
if line[0].startswith("#"):
continue
if len(line)==1:
continue
POS, ID, PosScore, NegScore, SynsetTerms, Gloss = line
if len(POS)==0 or len(ID)==0:
continue
for term in SynsetTerms.split(" "):
term = term.split("#")[0]
term = term.replace("-", " ").replace("_", " ")
key = "%s/%s"%(POS, term.split("#")[0])
sent_scores[key].append((float(PosScore),float(NegScore)))
for key, value in sent_scores.items():
sent_scores[key] = np.mean(value, axis=0)
return sent_scores
def evaluate_sentiment(text):
pos_score = 0
neg_score = 0
tokened = nltk.word_tokenize(text)
pos_pairs = nltk.pos_tag(tokened)
for tuple in pos_pairs:
pos = ''
if tuple[1] == "NN":
pos = 'n/'
if tuple[1] == "JJ":
pos = 'a/'
if tuple[1] == "VB":
pos = 'v/'
if tuple[1] == "RB":
pos = 'r/'
try:
pos_score += sentiwordnet[pos+tuple[0].lower()][0]
neg_score += sentiwordnet[pos+tuple[0].lower()][1]
except:
pass
return pos_score, neg_score
sentiwordnet = load_sentiwordnet()
X_train = pos_train.append(neg_train)
swn_proba_train = np.zeros((len(X_train),1))
processed = 0
for i in range(len(X_train)):
pos_score,neg_score = evaluate_sentiment(X_train.iloc[i])
if pos_score == 0 and neg_score == 0:
swn_proba_train[i,0] += 0.5
else:
swn_proba_train[i,0] += pos_score/(pos_score + neg_score)
processed += 1
sys.stdout.write('SentiWordNet processed %i of %i training set reviews \r' % (processed, len(X_train)))
sys.stdout.flush()
print ''
swn_proba_pos = np.zeros((len(pos_test),1))
processed = 0
for i in range(len(pos_test)):
pos_score,neg_score = evaluate_sentiment(pos_test.iloc[i])
if pos_score == 0 and neg_score == 0:
swn_proba_pos[i,0] += 0.5
else:
swn_proba_pos[i,0] += pos_score/(pos_score + neg_score)
processed += 1
sys.stdout.write('SentiWordNet processed %i of %i positive test set reviews \r' % (processed, len(pos_test)))
sys.stdout.flush()
print ''
swn_proba_neg = np.zeros((len(neg_test),1))
processed = 0
for i in range(len(neg_test)):
pos_score,neg_score = evaluate_sentiment(neg_test.iloc[i])
if pos_score == 0 and neg_score == 0:
swn_proba_neg[i,0] += 0.5
else:
swn_proba_neg[i,0] += pos_score/(pos_score + neg_score)
processed += 1
sys.stdout.write('SentiWordNet processed %i of %i negative test set reviews \r' % (processed, len(neg_test)))
sys.stdout.flush()
print ''
print "training logistic regression classifier"
comb_train = np.concatenate((swn_proba_train,nb_proba_train),1)
comb_test_pos = np.concatenate((swn_proba_pos,nb_proba_pos),1)
comb_test_neg = np.concatenate((swn_proba_neg,nb_proba_neg),1)
lr = LogisticRegression()
lr.fit(comb_train, y_train)
score_pos = lr.score(comb_test_pos, y_test_pos)
score_neg = lr.score(comb_test_neg, y_test_neg)
print "Combined accuracy on %i positive reviews: %.2f%%" % (len(y_test_pos), score_pos*100)
print "Combined accuracy on %i negative reviews: %.2f%%" % (len(y_test_neg), score_neg*100)
| 36.972603
| 113
| 0.662838
|
b7f24bb5ae6ccfdc333eeb2595a6efaf58941b38
| 2,935
|
py
|
Python
|
api/user/profile.py
|
Kingtous/Flask-CodeRunningServer
|
9613903ba70a6106cb393da45c083c854646969e
|
[
"MIT"
] | 3
|
2020-01-26T10:42:18.000Z
|
2020-10-18T15:29:03.000Z
|
api/user/profile.py
|
Kingtous/Flask-CodeRunningServer
|
9613903ba70a6106cb393da45c083c854646969e
|
[
"MIT"
] | null | null | null |
api/user/profile.py
|
Kingtous/Flask-CodeRunningServer
|
9613903ba70a6106cb393da45c083c854646969e
|
[
"MIT"
] | 1
|
2020-10-18T15:29:04.000Z
|
2020-10-18T15:29:04.000Z
|
from flask import request, g
from flask_restful import Resource
from app.database_models import User
from app_config import auth
from app_utils import AppUtils
from common.constants.response_code import ResponseClass, ResponseCode
# 修改资料,只能改昵称和头像
class AlterProfile(Resource):
@auth.login_required
def post(self):
field = request.json.get("field", None)
value = request.json.get("value", None)
if field is not None and value is not None:
user = g.user # 需要重新查找,这个User未绑定sql,无法更新
session = AppUtils.get_session()
user = session.query(User).filter_by(id=user.id).first()
try:
if field == "nickname":
user.nickname = value
session.commit()
return ResponseClass.ok()
elif field == "avatar_url":
user.avatar_url = value
session.commit()
return ResponseClass.ok()
finally:
session.close()
return ResponseClass.warn(ResponseCode.FORMAT_ERROR)
# 获取统计数据
class UserStatistic(Resource):
@auth.login_required
def get(self, id):
session = AppUtils.get_session()
try:
user = g.user
if user.id != id:
# 查询他人的信息
query_user = session.query(User).filter_by(id=id).first()
return ResponseClass.ok_with_data(
query_user.get_minimal_data()) if query_user is not None else ResponseClass.warn(
ResponseCode.USER_NOT_EXIST)
else:
ResponseClass.ok_with_data(user.get_self_data())
finally:
session.close()
# 资料页点赞
class UserLikeApi(Resource):
@auth.login_required
def get(self, user_id):
session = AppUtils.get_session()
try:
from app.database_models import User
q_user = session.query(User).filter_by(id=user_id).with_for_update().first()
if q_user is None:
return ResponseClass.warn(ResponseCode.USER_NOT_EXIST)
else:
# 判断是否点过赞,点过赞则警告
from app.database_models import UserLikes
result = session.query(UserLikes).filter_by(user_id=g.user.id,
like_user=user_id).with_for_update().first()
if result is None:
# 点赞
q_user.likes += 1
likes = UserLikes()
likes.user_id = g.user.id
likes.like_user = user_id
session.add(likes)
else:
q_user.likes -= 1
session.delete(result)
session.commit()
return ResponseClass.ok_with_data(q_user.likes)
finally:
session.close()
| 34.127907
| 104
| 0.547189
|
443a026c744c5193e6ebaada5893073e70eeb084
| 598
|
py
|
Python
|
tests/sphinx_xref/conf.py
|
tbeadle/recommonmark
|
98200fc475543b18d90db62fe771bfc092ae1b98
|
[
"MIT"
] | 240
|
2015-07-28T18:39:34.000Z
|
2019-06-26T20:54:26.000Z
|
tests/sphinx_xref/conf.py
|
tbeadle/recommonmark
|
98200fc475543b18d90db62fe771bfc092ae1b98
|
[
"MIT"
] | 140
|
2015-07-29T06:22:10.000Z
|
2019-06-28T14:01:50.000Z
|
tests/sphinx_xref/conf.py
|
tbeadle/recommonmark
|
98200fc475543b18d90db62fe771bfc092ae1b98
|
[
"MIT"
] | 199
|
2015-07-29T06:10:19.000Z
|
2019-06-21T19:03:00.000Z
|
# -*- coding: utf-8 -*-
from recommonmark.parser import CommonMarkParser
extensions = 'sphinx.ext.autosectionlabel']
autosectionlabel_prefix_document = True
templates_path = ['_templates']
source_suffix = '.md'
source_parsers = { '.md': CommonMarkParser }
master_doc = 'index'
project = u'sphinxproj'
copyright = u'2015, rtfd'
author = u'rtfd'
version = '0.1'
release = '0.1'
highlight_language = 'python'
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'alabaster'
html_static_path = ['_static']
htmlhelp_basename = 'sphinxproj'
| 23
| 48
| 0.745819
|
582d427acb28a9daa9c84ffc67f06458c2e33073
| 219
|
py
|
Python
|
Django Projects/SecureWebsite/secure_app/urls.py
|
Phantom586/My_Projects
|
117f684302ddb53f8f840545fa5d429fa2f37147
|
[
"MIT"
] | null | null | null |
Django Projects/SecureWebsite/secure_app/urls.py
|
Phantom586/My_Projects
|
117f684302ddb53f8f840545fa5d429fa2f37147
|
[
"MIT"
] | null | null | null |
Django Projects/SecureWebsite/secure_app/urls.py
|
Phantom586/My_Projects
|
117f684302ddb53f8f840545fa5d429fa2f37147
|
[
"MIT"
] | 2
|
2020-07-04T14:10:30.000Z
|
2020-11-07T11:24:19.000Z
|
from django.urls import path
from secure_app import views
app_name = 'secure_app'
urlpatterns = [
path('register/', views.register, name='register'),
path('user_login/', views.user_login, name='user_login'),
]
| 24.333333
| 61
| 0.721461
|
efbca0dd7d004aeb3d5d26a4e06b9fb104bdaed0
| 1,606
|
py
|
Python
|
zygoat/utils/files.py
|
Ian-MacLeod/zygoat
|
83773fdebf8cddf06903c2d32bd575e33e23e252
|
[
"MIT"
] | null | null | null |
zygoat/utils/files.py
|
Ian-MacLeod/zygoat
|
83773fdebf8cddf06903c2d32bd575e33e23e252
|
[
"MIT"
] | 1
|
2020-02-25T13:06:02.000Z
|
2020-02-25T13:06:02.000Z
|
zygoat/utils/files.py
|
kborer/zygoat
|
638bbdb2bc8b39510c03c77d968e94aadf5ae51b
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
import os
import logging
from click import style
log = logging.getLogger()
def walk_up():
"""
Generator expression to provide paths up to the system root
Does not work on Windows, but then again, neither does Zygoat
"""
path = os.getcwd()
while True:
log.debug(f'Searching {style(path, bold=True)}')
yield path
if path == '/':
raise FileNotFoundError
path = os.path.dirname(path)
def find_nearest(file_name):
"""
Returns the absolute path to the nearest existing file matching file_name
:param file_name: Name of the file to locate
"""
try:
for path in walk_up():
target = os.path.join(path, file_name)
# If the file is not found, walk_up() will error out
if os.path.exists(target):
log.debug(f'Found {file_name} in {os.path.dirname(target)}')
return target
except FileNotFoundError:
raise FileNotFoundError(f'Unable to locate {file_name} in current or any parent directory')
@contextmanager
def use_dir(path):
"""
A context manager for switching into an arbitrary directory for a block
:param path: A valid directory path
"""
owd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(owd)
@contextmanager
def repository_root():
"""
A shortcut for locating the nearest repository root and doing a use_dir with it
"""
root = os.path.dirname(find_nearest('.git'))
with use_dir(root):
yield
| 22.305556
| 99
| 0.630137
|
e476fcd6d11c59c7dd308f6f3c93f5e6b9057004
| 10,012
|
py
|
Python
|
EHR_Only/GBT/Death_FAMD.py
|
shreyaskar123/EHR-Discontinuity
|
8d2becfd784b9cbe697f8308d60023701971ef5d
|
[
"MIT"
] | null | null | null |
EHR_Only/GBT/Death_FAMD.py
|
shreyaskar123/EHR-Discontinuity
|
8d2becfd784b9cbe697f8308d60023701971ef5d
|
[
"MIT"
] | null | null | null |
EHR_Only/GBT/Death_FAMD.py
|
shreyaskar123/EHR-Discontinuity
|
8d2becfd784b9cbe697f8308d60023701971ef5d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
medicare = pd.read_csv("/netapp2/home/se197/data/CMS/Data/medicare.csv")
# In[ ]:
# In[1]:
get_ipython().system('pip install xgboost')
# In[2]:
train_set = medicare[medicare.Hospital != 'BWH'] # MGH; n = 204014
validation_set = medicare[medicare.Hospital == 'BWH'] # BWH and Neither; n = 115726
import numpy as np
fifty_perc_EHR_cont = np.percentile(medicare['Cal_MPEC_R0'],50)
train_set_high = train_set[train_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
train_set_low= train_set[train_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
validation_set_high = validation_set[validation_set.Cal_MPEC_R0 >= fifty_perc_EHR_cont]
validation_set_low = validation_set[validation_set.Cal_MPEC_R0 < fifty_perc_EHR_cont]
# In[3]:
predictor_variable = [
'Co_CAD_R0', 'Co_Embolism_R0', 'Co_DVT_R0', 'Co_PE_R0', 'Co_AFib_R0',
'Co_Hypertension_R0', 'Co_Hyperlipidemia_R0', 'Co_Atherosclerosis_R0',
'Co_HF_R0', 'Co_HemoStroke_R0', 'Co_IscheStroke_R0', 'Co_OthStroke_R0',
'Co_TIA_R0', 'Co_COPD_R0', 'Co_Asthma_R0', 'Co_Pneumonia_R0', 'Co_Alcoholabuse_R0',
'Co_Drugabuse_R0', 'Co_Epilepsy_R0', 'Co_Cancer_R0', 'Co_MorbidObesity_R0',
'Co_Dementia_R0', 'Co_Depression_R0', 'Co_Bipolar_R0', 'Co_Psychosis_R0',
'Co_Personalitydisorder_R0', 'Co_Adjustmentdisorder_R0', 'Co_Anxiety_R0',
'Co_Generalizedanxiety_R0', 'Co_OldMI_R0', 'Co_AcuteMI_R0', 'Co_PUD_R0',
'Co_UpperGIbleed_R0', 'Co_LowerGIbleed_R0', 'Co_Urogenitalbleed_R0',
'Co_Othbleed_R0', 'Co_PVD_R0', 'Co_LiverDisease_R0', 'Co_MRI_R0',
'Co_ESRD_R0', 'Co_Obesity_R0', 'Co_Sepsis_R0', 'Co_Osteoarthritis_R0',
'Co_RA_R0', 'Co_NeuroPain_R0', 'Co_NeckPain_R0', 'Co_OthArthritis_R0',
'Co_Osteoporosis_R0', 'Co_Fibromyalgia_R0', 'Co_Migraine_R0', 'Co_Headache_R0',
'Co_OthPain_R0', 'Co_GeneralizedPain_R0', 'Co_PainDisorder_R0',
'Co_Falls_R0', 'Co_CoagulationDisorder_R0', 'Co_WhiteBloodCell_R0', 'Co_Parkinson_R0',
'Co_Anemia_R0', 'Co_UrinaryIncontinence_R0', 'Co_DecubitusUlcer_R0',
'Co_Oxygen_R0', 'Co_Mammography_R0', 'Co_PapTest_R0', 'Co_PSATest_R0',
'Co_Colonoscopy_R0', 'Co_FecalOccultTest_R0', 'Co_FluShot_R0', 'Co_PneumococcalVaccine_R0', 'Co_RenalDysfunction_R0', 'Co_Valvular_R0', 'Co_Hosp_Prior30Days_R0',
'Co_RX_Antibiotic_R0', 'Co_RX_Corticosteroid_R0', 'Co_RX_Aspirin_R0', 'Co_RX_Dipyridamole_R0',
'Co_RX_Clopidogrel_R0', 'Co_RX_Prasugrel_R0', 'Co_RX_Cilostazol_R0', 'Co_RX_Ticlopidine_R0',
'Co_RX_Ticagrelor_R0', 'Co_RX_OthAntiplatelet_R0', 'Co_RX_NSAIDs_R0',
'Co_RX_Opioid_R0', 'Co_RX_Antidepressant_R0', 'Co_RX_AAntipsychotic_R0', 'Co_RX_TAntipsychotic_R0',
'Co_RX_Anticonvulsant_R0', 'Co_RX_PPI_R0', 'Co_RX_H2Receptor_R0', 'Co_RX_OthGastro_R0',
'Co_RX_ACE_R0', 'Co_RX_ARB_R0', 'Co_RX_BBlocker_R0', 'Co_RX_CCB_R0', 'Co_RX_Thiazide_R0',
'Co_RX_Loop_R0', 'Co_RX_Potassium_R0', 'Co_RX_Nitrates_R0', 'Co_RX_Aliskiren_R0',
'Co_RX_OthAntihypertensive_R0', 'Co_RX_Antiarrhythmic_R0', 'Co_RX_OthAnticoagulant_R0',
'Co_RX_Insulin_R0', 'Co_RX_Noninsulin_R0', 'Co_RX_Digoxin_R0', 'Co_RX_Statin_R0',
'Co_RX_Lipid_R0', 'Co_RX_Lithium_R0', 'Co_RX_Benzo_R0', 'Co_RX_ZDrugs_R0',
'Co_RX_OthAnxiolytic_R0', 'Co_RX_Dementia_R0', 'Co_RX_Hormone_R0',
'Co_RX_Osteoporosis_R0', 'Co_N_Drugs_R0', 'Co_N_Hosp_R0', 'Co_Total_HospLOS_R0',
'Co_N_MDVisit_R0', 'Co_RX_AnyAspirin_R0', 'Co_RX_AspirinMono_R0', 'Co_RX_ClopidogrelMono_R0',
'Co_RX_AspirinClopidogrel_R0', 'Co_RX_DM_R0', 'Co_RX_Antipsychotic_R0'
]
co_train_gpop = train_set[predictor_variable]
co_train_high = train_set_high[predictor_variable]
co_train_low = train_set_low[predictor_variable]
co_validation_gpop = validation_set[predictor_variable]
co_validation_high = validation_set_high[predictor_variable]
co_validation_low = validation_set_low[predictor_variable]
# In[4]:
out_train_death_gpop = train_set['ehr_claims_death']
out_train_death_high = train_set_high['ehr_claims_death']
out_train_death_low = train_set_low['ehr_claims_death']
out_validation_death_gpop = validation_set['ehr_claims_death']
out_validation_death_high = validation_set_high['ehr_claims_death']
out_validation_death_low = validation_set_low['ehr_claims_death']
# In[5]:
'''
NOT USING THIS
INSTEAD USING XGBOOST: A FASTER IMPLEMENTATION OF XGBOOST
https://github.com/dmlc/xgboost/tree/master/python-package
def GBT(X,y):
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingRegressor
from imblearn.over_sampling import SMOTE
param_grid = [{
'learning_rate': [0.05,0.1,0.2],
'n_estimators': [100,150,200]
}]
boost_clf = GradientBoostingRegressor()
boosting_grid_search = GridSearchCV(estimator = boost_clf, param_grid = param_grid)
best_clf = boosting_grid_search.fit(X, y)
return best_clf
'''
# In[6]:
def xgBoost(X,y):
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
model = XGBClassifier()
param_grid = [{
'max_depth': [2,3],
'n_estimators': [60,160],
}]
grid_search = GridSearchCV(
estimator=model,
param_grid=param_grid,
n_jobs = 10,
cv = 5,
verbose=True
)
best_clf = grid_search.fit(X,y)
return best_clf
# In[7]:
def scores(X,y):
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import log_loss
pred = best_clf.predict(X)
actual = y
print(accuracy_score(actual,pred), file = open('death_famd_gbt_ehr.out', 'a'))
print(f1_score(actual,pred), file = open('death_famd_gbt_ehr.out', 'a'))
print(fbeta_score(actual,pred, average = 'macro', beta = 2), file = open('death_famd_gbt_ehr.out', 'a'))
print(roc_auc_score(actual, best_clf.predict_proba(X)[:,1]), file = open('death_famd_gbt_ehr.out', 'a'))
print(log_loss(actual,best_clf.predict_proba(X)[:,1]), file = open('death_famd_gbt_ehr.out', 'a'))
# In[8]:
def cross_val(X,y):
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
from sklearn.metrics import log_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import fbeta_score
import sklearn
import numpy as np
cv = KFold(n_splits=5, random_state=1, shuffle=True)
log_loss = []
auc = []
accuracy = []
f1 = []
f2 = []
for train_index, test_index in cv.split(X):
X_train, X_test, y_train, y_test = X.iloc[train_index], X.iloc[test_index], y.iloc[train_index], y.iloc[test_index]
model = xgBoost(X_train, y_train)
prob = model.predict(X_test) # prob is a vector of probabilities
pred = np.round(model.predict(X_test)) # pred is the rounded predictions
log_loss.append(sklearn.metrics.log_loss(y_test, prob))
auc.append(sklearn.metrics.roc_auc_score(y_test, prob))
accuracy.append(sklearn.metrics.accuracy_score(y_test, pred))
f1.append(sklearn.metrics.f1_score(y_test, pred, average = 'macro'))
f2.append(fbeta_score(y_test,pred, average = 'macro', beta = 2))
print(np.mean(accuracy), file = open('death_famd_gbt_ehr.out', 'a'))
print(np.mean(f1), file = open('death_famd_gbt_ehr.out', 'a'))
print(np.mean(f2), file = open('death_famd_gbt_ehr.out', 'a'))
print(np.mean(auc), file = open('death_famd_gbt_ehr.out', 'a'))
print(np.mean(log_loss), file = open('death_famd_gbt_ehr.out', 'a'))
# # FAMD Transformation
# In[9]:
from prince import FAMD
famd = FAMD(n_components = 15, n_iter = 3, random_state = 101)
for (colName, colData) in co_train_gpop.iteritems():
if (colName != 'Co_N_Drugs_R0' and colName!= 'Co_N_Hosp_R0' and colName != 'Co_Total_HospLOS_R0' and colName != 'Co_N_MDVisit_R0'):
co_train_gpop[colName].replace((1,0) ,('yes','no'), inplace = True)
co_train_low[colName].replace((1,0) ,('yes','no'), inplace = True)
co_train_high[colName].replace((1,0) ,('yes','no'), inplace = True)
co_validation_gpop[colName].replace((1,0), ('yes','no'), inplace = True)
co_validation_high[colName].replace((1,0), ('yes','no'), inplace = True)
co_validation_low[colName].replace((1,0), ('yes','no'), inplace = True)
famd.fit(co_train_gpop)
co_train_gpop_FAMD = famd.transform(co_train_gpop)
famd.fit(co_train_high)
co_train_high_FAMD = famd.transform(co_train_high)
famd.fit(co_train_low)
co_train_low_FAMD = famd.transform(co_train_low)
famd.fit(co_validation_gpop)
co_validation_gpop_FAMD = famd.transform(co_validation_gpop)
famd.fit(co_validation_high)
co_validation_high_FAMD = famd.transform(co_validation_high)
famd.fit(co_validation_low)
co_validation_low_FAMD = famd.transform(co_validation_low)
# # General Population
# In[10]:
print("", file = open('death_famd_gbt_ehr.out', 'a'))
best_clf = xgBoost(co_train_gpop_FAMD, out_train_death_gpop)
cross_val(co_train_gpop_FAMD, out_train_death_gpop)
print("", file = open('death_famd_gbt_ehr.out', 'a'))
scores(co_validation_gpop_FAMD, out_validation_death_gpop)
# In[ ]:
# # High Continuity
# In[11]:
best_clf = xgBoost(co_train_high_FAMD, out_train_death_high)
print("", file = open('death_famd_gbt_ehr.out', 'a'))
cross_val(co_train_high_FAMD, out_train_death_high)
print("", file = open('death_famd_gbt_ehr.out', 'a'))
scores(co_validation_high_FAMD, out_validation_death_high)
# # Low Continuity
#
# In[12]:
print("", file = open('death_famd_gbt_ehr.out', 'a'))
best_clf = xgBoost(co_train_low_FAMD, out_train_death_low)
cross_val(co_train_low_FAMD, out_train_death_low)
print("", file = open('death_famd_gbt_ehr.out', 'a'))
scores(co_validation_low_FAMD, out_validation_death_low)
# In[ ]:
# In[ ]:
# In[ ]:
| 32.718954
| 169
| 0.723232
|
ded81f4729ad91b8f22919c07d805a06a6e001fd
| 4,587
|
py
|
Python
|
mnist/rde.py
|
jmaces/fw-rde
|
7ae126eed6c7ad2acff551fcff834ee0a96b4c71
|
[
"MIT"
] | 3
|
2021-11-22T19:40:06.000Z
|
2022-01-27T08:57:34.000Z
|
mnist/rde.py
|
ZIB-IOL/fw-rde
|
7ae126eed6c7ad2acff551fcff834ee0a96b4c71
|
[
"MIT"
] | null | null | null |
mnist/rde.py
|
ZIB-IOL/fw-rde
|
7ae126eed6c7ad2acff551fcff834ee0a96b4c71
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras.backend as K
from models import load_model, load_adfmodel
import instances
# GENERAL PARAMETERS
MODE = 'diag' # 'diag', 'half', or 'full'
RANK = 784 # only affects 'half' mode
IMG_SHAPE = [28, 28]
# LOAD MODEL
adfmodel = load_adfmodel(mode=MODE)
model = load_model()
# LOAD DATA STATISTICS AND SAMPLE GENERATOR
mean, covariance = instances.load_statistics(mode=MODE, rank=RANK)
mean = np.expand_dims(mean, 0)
covariance = np.expand_dims(covariance, 0)
generator = instances.load_generator()
def get_data_sample(index):
return (
generator[index],
os.path.splitext(os.path.split(generator.filenames[index])[1])[0],
)
def store_single_result(mapping, index, fname, rate):
savedir = os.path.join('results', fname)
os.makedirs(savedir, exist_ok=True)
mapping = np.reshape(mapping, IMG_SHAPE)
plt.imsave(
os.path.join(
savedir,
'{}-mode-rate{}-nx.png'.format(MODE, rate),
),
mapping.squeeze(),
cmap='Reds',
vmin=0.0,
vmax=1.0,
format='png',
)
def store_collected_results(mappings, index, node, pred, fname, rates,
weights=None, perm=None, order=None):
savedir = os.path.join('results', fname)
os.makedirs(savedir, exist_ok=True)
mappings = np.reshape(mappings, [len(rates)]+IMG_SHAPE)
plt.imsave(
os.path.join(
savedir,
'{}-mode-rates-averaged-nx.png'.format(MODE),
),
np.mean(mappings, axis=0).squeeze(),
cmap='Reds',
vmin=0.0,
vmax=1.0,
format='png',
)
if order is not None:
order = np.reshape(order, IMG_SHAPE)
plt.imsave(
os.path.join(
savedir,
'{}-mode-rates-ordered-nx.png'.format(MODE),
),
order.squeeze(),
cmap='Reds',
vmin=0.0,
vmax=1.0,
format='png',
)
np.savez_compressed(
os.path.join(
savedir,
'{}-mode-rates-nx.npz'.format(MODE),
),
**{
'mapping': np.average(mappings, weights=weights, axis=0).squeeze(),
'mappings': mappings,
'rates': rates,
'index': index,
'mode': MODE,
'node': node,
'prediction': pred,
'rank': RANK,
'weights': weights,
'perm': perm,
'order': order,
}
)
def get_distortion(x, mean=mean, covariance=covariance, model=model,
adfmodel=adfmodel, mode=MODE):
x_tensor = tf.constant(x, dtype=tf.float32)
m_tensor = tf.constant(mean, dtype=tf.float32)
c_tensor = tf.constant(covariance, dtype=tf.float32)
s_flat = tf.placeholder(tf.float32, (np.prod(x_tensor.shape),))
s_tensor = tf.reshape(s_flat, x.shape)
pred = model.predict(x)
node = np.argpartition(pred[0, ...], -2)[-1]
target = pred[0, node]
mean_in = s_tensor*x_tensor + (1-s_tensor)*m_tensor
if mode == 'diag':
covariance_in = tf.square(1-s_tensor)*c_tensor
elif mode == 'half':
covariance_in = c_tensor*(1-s_tensor)
elif mode == 'full':
covrank = len(c_tensor.get_shape().as_list())
perm = ([0] + list(range((covrank-1)//2+1, covrank))
+ list(range(1, (covrank-1)//2+1)))
covariance_in = c_tensor*(1-s_tensor)
covariance_in = K.permute_dimensions(
covariance_in,
perm,
)
covariance_in = covariance_in*(1-s_tensor)
covariance_in = K.permute_dimensions(
covariance_in,
perm,
)
out_mean, out_covariance = adfmodel([mean_in, covariance_in])
if mode == 'diag':
loss = 1/2*(K.mean(K.square(out_mean[..., node]-target))
+ K.mean(out_covariance[..., node]))
elif mode == 'half':
out_covariance = K.sum(K.square(out_covariance), axis=1)
loss = 1/2*(K.mean(K.square(out_mean[..., node]-target))
+ K.mean(out_covariance[..., node]))
elif mode == 'full':
loss = 1/2*(K.mean(K.square(out_mean[..., node]-target))
+ K.mean(out_covariance[..., node, node]))
gradient = K.gradients(loss, [s_flat])[0]
f_out = K.function([s_flat], [loss])
f_gradient = K.function([s_flat], [gradient])
return lambda s: f_out([s])[0], lambda s: f_gradient([s])[0], node, pred
| 31.854167
| 79
| 0.566165
|
ee1efe703de04a4840f3df4ded34039f1fb179d9
| 3,522
|
py
|
Python
|
aiida/orm/utils/remote.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/orm/utils/remote.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/utils/remote.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import six
def clean_remote(transport, path):
"""
Recursively remove a remote folder, with the given absolute path, and all its contents. The path should be
made accessible through the transport channel, which should already be open
:param transport: an open Transport channel
:param path: an absolute path on the remote made available through the transport
"""
if not isinstance(path, six.string_types):
raise ValueError('the path has to be a string type')
if not os.path.isabs(path):
raise ValueError('the path should be absolute')
if not transport.is_open:
raise ValueError('the transport should already be open')
basedir, relative_path = os.path.split(path)
try:
transport.chdir(basedir)
transport.rmtree(relative_path)
except IOError:
pass
def get_calcjob_remote_paths(pks=None, past_days=None, older_than=None, computers=None, user=None):
"""
Return a mapping of computer uuids to a list of remote paths, for a given set of calcjobs. The set of
calcjobs will be determined by a query with filters based on the pks, past_days, older_than,
computers and user arguments.
:param pks: onlu include calcjobs with a pk in this list
:param past_days: only include calcjobs created since past_days
:param older_than: only include calcjobs older than
:param computers: only include calcjobs that were ran on these computers
:param user: only include calcjobs of this user
:return: mapping of computer uuid and list of remote paths, or None
"""
from datetime import timedelta
from aiida import orm
from aiida.orm import CalcJobNode
from aiida.common import timezone
filters_calc = {}
filters_computer = {}
if user is None:
user = orm.User.objects.get_default()
if computers is not None:
filters_computer['id'] = {'in': [computer.pk for computer in computers]}
if past_days is not None:
filters_calc['mtime'] = {'>': timezone.now() - timedelta(days=past_days)}
if older_than is not None:
filters_calc['mtime'] = {'<': timezone.now() - timedelta(days=older_than)}
if pks:
filters_calc['id'] = {'in': pks}
qb = orm.QueryBuilder()
qb.append(CalcJobNode, tag='calc', project=['attributes.remote_workdir'], filters=filters_calc)
qb.append(orm.Computer, with_node='calc', tag='computer', project=['*'], filters=filters_computer)
qb.append(orm.User, with_node='calc', filters={'email': user.email})
if qb.count() == 0:
return None
path_mapping = {}
for path, computer in qb.all():
if path is not None:
path_mapping.setdefault(computer.uuid, []).append(path)
return path_mapping
| 37.073684
| 110
| 0.636286
|
92221a16983961dbc16d8006c9deb115184bee1e
| 247
|
py
|
Python
|
dress/helper/generator_password_helper.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 2
|
2019-10-23T09:06:47.000Z
|
2019-11-07T12:52:42.000Z
|
dress/helper/generator_password_helper.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 4
|
2017-12-28T01:44:42.000Z
|
2017-12-31T13:08:18.000Z
|
dress/helper/generator_password_helper.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 2
|
2019-10-15T07:42:33.000Z
|
2019-10-24T06:49:22.000Z
|
import random
def generator_password_helper(password_len):
# fix #1 bash safety characters without escaping
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ,._+:@%-"
return "".join(random.sample(s, password_len))
| 30.875
| 81
| 0.773279
|
73ccfcd346cf5ca3480f25556f400858b28789c2
| 560
|
py
|
Python
|
problem_2.py
|
cconnerolson/aerospace_assignment_5
|
4b325cf7e5ad15dd7a58d24bea06d78dede15ffa
|
[
"MIT"
] | 1
|
2020-11-28T05:15:54.000Z
|
2020-11-28T05:15:54.000Z
|
problem_2.py
|
cconnerolson/aerospace_assignment_5
|
4b325cf7e5ad15dd7a58d24bea06d78dede15ffa
|
[
"MIT"
] | null | null | null |
problem_2.py
|
cconnerolson/aerospace_assignment_5
|
4b325cf7e5ad15dd7a58d24bea06d78dede15ffa
|
[
"MIT"
] | 1
|
2020-11-28T05:15:57.000Z
|
2020-11-28T05:15:57.000Z
|
""" A large tank supplies helium through a conv-div nozzle to the atmosphere. Pressure in the tank remains constant at
P_t = 8 MPa and temperature remains constant at T_t = 1000 K. There are no shock waves at the nozzle. The nozzle is
designed to discharge at exit Mach number of 3.5 with exit area A_e = 100 mm². For helium, 𝜸 = 1.66 and
R = 2077 J/(kg·K).
Determine:
-the pressure at the nozzle exit, P_e
-the mass flux through the device, mdot
"""
# Givens
P_t = 8 # MPa
T_t = 1000 # K
Ma = 3.5
A_e = 100 # mm²
gamma = 1.66 # 𝜸
R = 2077 # J/(kg·K)
| 29.473684
| 119
| 0.692857
|
0b0e032dbb428892d3a1932c438a1e7475eca13d
| 4,605
|
py
|
Python
|
src/uvm/base/uvm_topdown_phase.py
|
rodrigomelo9/uvm-python
|
e3127eba2cc1519a61dc6f736d862a8dcd6fce20
|
[
"Apache-2.0"
] | 140
|
2020-01-18T00:14:17.000Z
|
2022-03-29T10:57:24.000Z
|
src/uvm/base/uvm_topdown_phase.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 24
|
2020-01-18T18:40:58.000Z
|
2021-03-25T17:39:07.000Z
|
src/uvm/base/uvm_topdown_phase.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 34
|
2020-01-18T12:22:59.000Z
|
2022-02-11T07:03:11.000Z
|
#----------------------------------------------------------------------
# Copyright 2007-2011 Mentor Graphics Corporation
# Copyright 2007-2010 Cadence Design Systems, Inc.
# Copyright 2010 Synopsys, Inc.
# Copyright 2019 Tuomas Poikela (tpoikela)
# All Rights Reserved Worldwide
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing
# permissions and limitations under the License.
#----------------------------------------------------------------------
#------------------------------------------------------------------------------
#
# Class: uvm_topdown_phase
#
#------------------------------------------------------------------------------
# Virtual base class for function phases that operate top-down.
# The pure virtual function execute() is called for each component.
#
# A top-down function phase completes when the <execute()> method
# has been called and returned on all applicable components
# in the hierarchy.
from .uvm_phase import UVMPhase
from .uvm_globals import uvm_report_fatal, uvm_report_info
from .uvm_debug import uvm_debug
from .uvm_object_globals import (UVM_DEBUG, UVM_PHASE_ENDED, UVM_PHASE_EXECUTING, UVM_PHASE_IMP,
UVM_PHASE_READY_TO_END, UVM_PHASE_STARTED)
class UVMTopdownPhase(UVMPhase):
def __init__(self, name):
"""
Function: new
Create a new instance of a top-down phase
Args:
name:
"""
UVMPhase.__init__(self, name, UVM_PHASE_IMP)
def traverse(self, comp, phase, state):
"""
Function: traverse
Traverses the component tree in top-down order, calling `execute` for
each component.
Args:
comp:
phase:
state:
"""
uvm_debug(self, 'traverse', self.get_name() +
' traversing topdown phase now with comp' + comp.get_name())
name = ""
phase_domain = phase.get_domain()
comp_domain = comp.get_domain()
if UVMPhase.m_phase_trace:
dom_name = "NO DOMAIN"
if comp_domain is not None:
dom_name = comp_domain.get_name()
uvm_report_info("PH_TRACE", ("topdown-phase phase={} state={} comp={}"
+ "comp.domain={} phase.domain={}").format(
str(phase), str(state), comp.get_full_name(),
dom_name, phase_domain.get_name()), UVM_DEBUG)
from .uvm_domain import UVMDomain
if phase_domain == UVMDomain.get_common_domain() or phase_domain == comp_domain:
if state == UVM_PHASE_STARTED:
comp.m_current_phase = phase
comp.m_apply_verbosity_settings(phase)
comp.phase_started(phase)
elif state == UVM_PHASE_EXECUTING:
if not(phase.get_name() == "build" and comp.m_build_done):
ph = self
comp.m_phasing_active += 1
if self in comp.m_phase_imps:
ph = comp.m_phase_imps[self]
ph.execute(comp, phase)
comp.m_phasing_active -= 1
elif state == UVM_PHASE_READY_TO_END:
comp.phase_ready_to_end(phase)
elif state == UVM_PHASE_ENDED:
comp.phase_ended(phase)
comp.m_current_phase = None
else:
uvm_report_fatal("PH_BADEXEC","topdown phase traverse internal error")
if comp.has_first_child():
child = comp.get_first_child()
while child is not None:
self.traverse(child, phase, state)
child = comp.get_next_child()
def execute(self, comp, phase):
"""
Function: execute
Executes the top-down phase `phase` for the component `comp`.
Args:
comp:
phase:
"""
# reseed this process for random stability
#process proc = process::self();
#proc.srandom(uvm_create_random_seed(phase.get_type_name(), comp.get_full_name()));
comp.m_current_phase = phase
self.exec_func(comp,phase)
| 36.259843
| 96
| 0.573073
|
70670f11e55afba7e0a7ed7fe0319d5a61bf025f
| 9,724
|
py
|
Python
|
Wsd2.py
|
pratipo/wsd
|
b1322dc5372cb09e46f601fecac0394ade3d4c76
|
[
"MIT"
] | null | null | null |
Wsd2.py
|
pratipo/wsd
|
b1322dc5372cb09e46f601fecac0394ade3d4c76
|
[
"MIT"
] | null | null | null |
Wsd2.py
|
pratipo/wsd
|
b1322dc5372cb09e46f601fecac0394ade3d4c76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# wsd project main script
# hardware: ws2801 led strips + raspberry pi + internet adapter
# software pulls twits from an 'admin' (twits and retwits) and
# displays the last result through the led strip
# Written by Pratipo.org, hightly based on Adafruit's IoT Pinter. MIT license.
# MUST BE RUN AS ROOT (due to GPIO access)
import RPi.GPIO as GPIO
import time, random
class Wsd2:
def __init__(self, m=30, h=7, w=5):
# Open SPI device
dev = "/dev/spidev0.0"
self.spidev = file(dev, "wb")
self.lapse = 0.1
self.tick = 0
self.gamma = bytearray(256)
for i in range(256):
self.gamma[i] = int(pow(float(i) / 255.0, 2.5) * 255.0 + 0.5)
self.asciiTable = [
[0x00,0x00,0x00,0x00,0x00], # 0x20 32
[0x00,0x00,0x6f,0x00,0x00], # ! 0x21 33
[0x00,0x07,0x00,0x07,0x00], # " 0x22 34
[0x14,0x7f,0x14,0x7f,0x14], # # 0x23 35
[0x00,0x07,0x04,0x1e,0x00], # $ 0x24 36
[0x23,0x13,0x08,0x64,0x62], # % 0x25 37
[0x36,0x49,0x56,0x20,0x50], # & 0x26 38
[0x00,0x00,0x07,0x00,0x00], # ' 0x27 39
[0x00,0x1c,0x22,0x41,0x00], # ( 0x28 40
[0x00,0x41,0x22,0x1c,0x00], # ) 0x29 41
[0x14,0x08,0x3e,0x08,0x14], # * 0x2a 42
[0x08,0x08,0x3e,0x08,0x08], # + 0x2b 43
[0x00,0x50,0x30,0x00,0x00], # , 0x2c 44
[0x08,0x08,0x08,0x08,0x08], # - 0x2d 45
[0x00,0x60,0x60,0x00,0x00], # . 0x2e 46
[0x20,0x10,0x08,0x04,0x02], # / 0x2f 47
[0x3e,0x51,0x49,0x45,0x3e], # 0 0x30 48
[0x00,0x42,0x7f,0x40,0x00], # 1 0x31 49
[0x42,0x61,0x51,0x49,0x46], # 2 0x32 50
[0x21,0x41,0x45,0x4b,0x31], # 3 0x33 51
[0x18,0x14,0x12,0x7f,0x10], # 4 0x34 52
[0x27,0x45,0x45,0x45,0x39], # 5 0x35 53
[0x3c,0x4a,0x49,0x49,0x30], # 6 0x36 54
[0x01,0x71,0x09,0x05,0x03], # 7 0x37 55
[0x36,0x49,0x49,0x49,0x36], # 8 0x38 56
[0x06,0x49,0x49,0x29,0x1e], # 9 0x39 57
[0x00,0x36,0x36,0x00,0x00], # : 0x3a 58
[0x00,0x56,0x36,0x00,0x00], # ; 0x3b 59
[0x08,0x14,0x22,0x41,0x00], # < 0x3c 60
[0x14,0x14,0x14,0x14,0x14], # = 0x3d 61
[0x00,0x41,0x22,0x14,0x08], # > 0x3e 62
[0x02,0x01,0x51,0x09,0x06], # ? 0x3f 63
[0x3e,0x41,0x5d,0x49,0x4e], # @ 0x40 64
[0x7e,0x09,0x09,0x09,0x7e], # A 0x41 65
[0x7f,0x49,0x49,0x49,0x36], # B 0x42 66
[0x3e,0x41,0x41,0x41,0x22], # C 0x43 67
[0x7f,0x41,0x41,0x41,0x3e], # D 0x44 68
[0x7f,0x49,0x49,0x49,0x41], # E 0x45 69
[0x7f,0x09,0x09,0x09,0x01], # F 0x46 70
[0x3e,0x41,0x49,0x49,0x7a], # G 0x47 71
[0x7f,0x08,0x08,0x08,0x7f], # H 0x48 72
[0x00,0x41,0x7f,0x41,0x00], # I 0x49 73
[0x20,0x40,0x41,0x3f,0x01], # J 0x4a 74
[0x7f,0x08,0x14,0x22,0x41], # K 0x4b 75
[0x7f,0x40,0x40,0x40,0x40], # L 0x4c 76
[0x7f,0x02,0x0c,0x02,0x7f], # M 0x4d 77
[0x7f,0x04,0x08,0x10,0x7f], # N 0x4e 78
[0x3e,0x41,0x41,0x41,0x3e], # O 0x4f 79
[0x7f,0x09,0x09,0x09,0x06], # P 0x50 80
[0x3e,0x41,0x51,0x21,0x5e], # Q 0x51 81
[0x7f,0x09,0x19,0x29,0x46], # R 0x52 82
[0x46,0x49,0x49,0x49,0x31], # S 0x53 83
[0x01,0x01,0x7f,0x01,0x01], # T 0x54 84
[0x3f,0x40,0x40,0x40,0x3f], # U 0x55 85
[0x0f,0x30,0x40,0x30,0x0f], # V 0x56 86
[0x3f,0x40,0x30,0x40,0x3f], # W 0x57 87
[0x63,0x14,0x08,0x14,0x63], # X 0x58 88
[0x07,0x08,0x70,0x08,0x07], # Y 0x59 89
[0x61,0x51,0x49,0x45,0x43], # Z 0x5a 90
[0x3c,0x4a,0x49,0x29,0x1e], # [ 0x5b 91
[0x02,0x04,0x08,0x10,0x20], # \ 0x5c 92
[0x00,0x41,0x7f,0x00,0x00], # ] 0x5d 93
[0x04,0x02,0x01,0x02,0x04], # ^ 0x5e 94
[0x40,0x40,0x40,0x40,0x40], # _ 0x5f 95
[0x00,0x00,0x03,0x04,0x00], # ` 0x60 96
[0x20,0x54,0x54,0x54,0x78], # a 0x61 97
[0x7f,0x48,0x44,0x44,0x38], # b 0x62 98
[0x38,0x44,0x44,0x44,0x20], # c 0x63 99
[0x38,0x44,0x44,0x48,0x7f], # d 0x64 100
[0x38,0x54,0x54,0x54,0x18], # e 0x65 101
[0x08,0x7e,0x09,0x01,0x02], # f 0x66 102
[0x0c,0x52,0x52,0x52,0x3e], # g 0x67 103
[0x7f,0x08,0x04,0x04,0x78], # h 0x68 104
[0x00,0x44,0x7d,0x40,0x00], # i 0x69 105
[0x20,0x40,0x44,0x3d,0x00], # j 0x6a 106
[0x00,0x7f,0x10,0x28,0x44], # k 0x6b 107
[0x00,0x41,0x7f,0x40,0x00], # l 0x6c 108
[0x7c,0x04,0x18,0x04,0x78], # m 0x6d 109
[0x7c,0x08,0x04,0x04,0x78], # n 0x6e 110
[0x38,0x44,0x44,0x44,0x38], # o 0x6f 111
[0x7c,0x14,0x14,0x14,0x08], # p 0x70 112
[0x08,0x14,0x14,0x18,0x7c], # q 0x71 113
[0x7c,0x08,0x04,0x04,0x08], # r 0x72 114
[0x48,0x54,0x54,0x54,0x20], # s 0x73 115
[0x04,0x3f,0x44,0x40,0x20], # t 0x74 116
[0x3c,0x40,0x40,0x20,0x7c], # u 0x75 117
[0x1c,0x20,0x40,0x20,0x1c], # v 0x76 118
[0x3c,0x40,0x30,0x40,0x3c], # w 0x77 119
[0x44,0x28,0x10,0x28,0x44], # x 0x78 120
[0x0c,0x50,0x50,0x50,0x3c], # y 0x79 121
[0x44,0x64,0x54,0x4c,0x44], # z 0x7a 122
[0x00,0x08,0x36,0x41,0x41], # [ 0x7b 123
[0x00,0x00,0x7f,0x00,0x00], # | 0x7c 124
[0x41,0x41,0x36,0x08,0x00], # ] 0x7d 125
[0x04,0x02,0x04,0x08,0x04] # ~ 0x7e 126
]
self.modules = m
self.moduleH = h
self.moduleW = w
self.mN = self.moduleH*self.moduleW # pixels in one module
self.preOffset = self.moduleW * 5
#160 == twit total length in chars
#self.stringLength = 140 +10
#self.asciiString = [32 for i in range(140+10)] # 32 == 'space' in the ascii table
#self.binMatrix = [[0 for i in range(self.moduleH)] for j in range(140+10*(self.moduleW+1))]
self.asciiString = [32 for i in range(140 + 10)]
self.binMatrix = [[0 for i in range(h)] for j in range((140+10)*(w+1))]
self.pixels = bytearray(m*w*h*3)
self.mode = 0
self.colors = [0 for i in range(140+10)]
def setText(self, t):
self.asciiString = [ord(c) for c in t]
self.asciiTobinMatrix()
def asciiTobinMatrix(self):
bits = [1,2,4,8,16,32,64,128]
print("twit length")
print(len(self.asciiString))
self.binMatrix = [[0 for i in range(self.moduleH)] for j in range((140+10)*(self.moduleW+1))]
for char in range(len(self.asciiString)):
for col in range(self.moduleW):
for row in range(self.moduleH):
self.binMatrix[char*(self.moduleW+1) + col + self.preOffset][row] = bool( self.asciiTable[self.asciiString [char]-32][col] & bits[row] )
def setPixel(self, x, y, color):
b = x/int(self.moduleW)
r = x%self.moduleW # division left-over part
if (y%2 == 1): # if odd raw
r = (self.moduleW-1) - r
# pixels in previous panels + previous pixels in panel
pindex = ( b*self.mN + ( y*int(self.moduleW) + r ) )*3
for i in range(3):
self.pixels[pindex + i] = self.gamma[color[i]]
def display(self):
# print 'displaying text'
self.spidev.write(self.pixels)
self.spidev.flush()
time.sleep(0.001)
def wheel(self, wheelPos):
if (wheelPos < 85):
return [wheelPos * 3, 255 - wheelPos * 3, 0]
elif (wheelPos < 170):
wheelPos -= 85
return [255 - wheelPos * 3, 0, wheelPos * 3]
else:
wheelPos -= 170
return [0, wheelPos * 3, 255 - wheelPos * 3]
def loadPixels(self, seed, seed2, offset=0):
for x in range(self.modules*self.moduleW):
color= [0,0,0]
for y in range(self.moduleH):
if (self.mode == 7):
color = [255,0,0]
elif (self.mode == 8):
color = [0,255,0]
elif (self.mode == 9):
color = [0,0,255]
elif (self.mode == 10):
color = [255,255,255]
#random colored pixels
elif (self.mode == 0):
color = self.wheel((random.randint(0,255)+x+self.tick)%255)
#gradient colored pixels... modes 1 to 3
elif (self.mode == 1):
color = self.wheel((seed+4*(x+self.tick))%255)
elif (self.mode == 2):
color = self.wheel((seed-4*self.tick-3*y)%255)
elif (self.mode == 3):
color = self.wheel((seed+4*x)%255)
# stripes
elif (self.mode == 4):
if(((x+y)/(seed2))%2):
color = self.wheel(seed%255)
else:
color= [100,100,100]
else:
if ( len(self.colors)-1 >= (x+offset-1)/(self.moduleW+1) ):
if(self.mode == 5):
color = self.wheel( (self.colors[(x+offset-1)/(self.moduleW+1)] + self.tick)%255 )
elif(self.mode == 6):
color = self.wheel((self.colors[(x+offset-1)/(self.moduleW+1)] + 2*self.tick + 4*y)%255)
if ( (x+offset)>=len(self.binMatrix) ): #??? clear end of the text -> pixel is turned off
self.setPixel(x, y, [0, 0 ,0])
else:
if (self.binMatrix[x+offset][y]):
c = [0,0,0]
# fix diferent color schema in the last 9 modules
#remap R G B chanels
if (x < self.moduleW*12):
# first 11 modules are GBR :$
c = [color[1],color[2],color[0]]
else:
# last modules are BRG :$
c = [color[2],color[0],color[1]]
#end fix
self.setPixel(x, y, c)
else:
self.setPixel(x, y, [0, 0 ,0])
self.tick += 1
self.display()
def rollPixels(self, roll):
self.colors = [random.randint(0,255) for r in range(len(self.asciiString))]
print(self.colors)
self.mode = random.randint(1,10) #0-6 color combinations
print(self.mode)
seedColor = random.randint(0,255)
bandwidth = random.randint(1,7)
if (roll):
ran = range( (self.moduleW+1)*(len(self.asciiString))+self.preOffset )
else:
ran = range(1)
for offset in ran:
self.loadPixels(seedColor, bandwidth, offset)
if(roll):
time.sleep(self.lapse)
else:
time.sleep(self.lapse*300)
def staticPixels(self):
self.colors = 0
self.mode = 7
print(self.mode)
seedColor = 0
bandwidth = 0
self.loadPixels(seedColor, bandwidth, 0)
time.sleep(self.lapse*300)
#display = Wsd2()
#display.setText("Practicas artisticas y redes")
#display.rollPixels()
| 33.881533
| 142
| 0.598828
|
b38a5ec55877db1c11f149d0a15cf8a996b68431
| 33,542
|
py
|
Python
|
bin/sa_haveibeenpwned/aob_py2/future/backports/misc.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 2
|
2020-08-17T07:52:48.000Z
|
2020-12-18T16:39:32.000Z
|
bin/sa_haveibeenpwned/aob_py3/future/backports/misc.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 5
|
2020-12-15T23:40:14.000Z
|
2022-02-23T15:43:18.000Z
|
bin/sa_haveibeenpwned/aob_py2/future/backports/misc.py
|
hRun/SA-haveibeenpwned
|
2a8ae3dedc405dc3c8dac1cb6a705a70f574afdb
|
[
"Apache-2.0"
] | 4
|
2019-05-16T09:57:33.000Z
|
2021-07-14T12:31:21.000Z
|
"""
Miscellaneous function (re)definitions from the Py3.4+ standard library
for Python 2.6/2.7.
- math.ceil (for Python 2.7)
- collections.OrderedDict (for Python 2.6)
- collections.Counter (for Python 2.6)
- collections.ChainMap (for all versions prior to Python 3.3)
- itertools.count (for Python 2.6, with step parameter)
- subprocess.check_output (for Python 2.6)
- reprlib.recursive_repr (for Python 2.6+)
- functools.cmp_to_key (for Python 2.6)
"""
from __future__ import absolute_import
import subprocess
from math import ceil as oldceil
from collections import Mapping, MutableMapping
from operator import itemgetter as _itemgetter, eq as _eq
import sys
import heapq as _heapq
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from socket import getaddrinfo, SOCK_STREAM, error, socket
from future.utils import iteritems, itervalues, PY26, PY3
def ceil(x):
"""
Return the ceiling of x as an int.
This is the smallest integral value >= x.
"""
return int(oldceil(x))
########################################################################
### reprlib.recursive_repr decorator from Py3.4
########################################################################
from itertools import islice
if PY3:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
else:
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
@recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
# {{{ http://code.activestate.com/recipes/576611/ (r11)
try:
from operator import itemgetter
from heapq import nlargest
except ImportError:
pass
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(*args, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
if not args:
raise TypeError("descriptor '__init__' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
super(Counter, self).__init__()
self.update(*args, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(*args, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if not args:
raise TypeError("descriptor 'update' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super(Counter, self).update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(*args, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if not args:
raise TypeError("descriptor 'subtract' of 'Counter' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super(Counter, self).__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
def __pos__(self):
'Adds an empty counter, effectively stripping negative and zero counts'
return self + Counter()
def __neg__(self):
'''Subtracts from an empty counter. Strips positive and zero counts,
and flips the sign on negative counts.
'''
return Counter() - self
def _keep_positive(self):
'''Internal method to strip elements with a negative or zero count'''
nonpositive = [elem for elem, count in self.items() if not count > 0]
for elem in nonpositive:
del self[elem]
return self
def __iadd__(self, other):
'''Inplace add from another counter, keeping only positive counts.
>>> c = Counter('abbb')
>>> c += Counter('bcc')
>>> c
Counter({'b': 4, 'c': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] += count
return self._keep_positive()
def __isub__(self, other):
'''Inplace subtract counter, but keep only results with positive counts.
>>> c = Counter('abbbc')
>>> c -= Counter('bccd')
>>> c
Counter({'b': 2, 'a': 1})
'''
for elem, count in other.items():
self[elem] -= count
return self._keep_positive()
def __ior__(self, other):
'''Inplace union is the maximum of value from either counter.
>>> c = Counter('abbb')
>>> c |= Counter('bcc')
>>> c
Counter({'b': 3, 'c': 2, 'a': 1})
'''
for elem, other_count in other.items():
count = self[elem]
if other_count > count:
self[elem] = other_count
return self._keep_positive()
def __iand__(self, other):
'''Inplace intersection is the minimum of corresponding counts.
>>> c = Counter('abbb')
>>> c &= Counter('bcc')
>>> c
Counter({'b': 1})
'''
for elem, count in self.items():
other_count = other[elem]
if other_count < count:
self[elem] = other_count
return self._keep_positive()
def check_output(*popenargs, **kwargs):
"""
For Python 2.6 compatibility: see
http://stackoverflow.com/questions/4814970/
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def count(start=0, step=1):
"""
``itertools.count`` in Py 2.6 doesn't accept a step
parameter. This is an enhanced version of ``itertools.count``
for Py2.6 equivalent to ``itertools.count`` in Python 2.7+.
"""
while True:
yield start
start += step
########################################################################
### ChainMap (helper for configparser and string.Template)
### From the Py3.4 source code. See also:
### https://github.com/kkxue/Py2ChainMap/blob/master/py2chainmap.py
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
# Py2 compatibility:
__nonzero__ = __bool__
@recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {0!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {0!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
# Re-use the same sentinel as in the Python stdlib socket module:
from socket import _GLOBAL_DEFAULT_TIMEOUT
# Was: _GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Backport of 3-argument create_connection() for Py2.6.
Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
# Backport from Py2.7 for Py2.6:
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return K
# Back up our definitions above in case they're useful
_OrderedDict = OrderedDict
_Counter = Counter
_check_output = check_output
_count = count
_ceil = ceil
__count_elements = _count_elements
_recursive_repr = recursive_repr
_ChainMap = ChainMap
_create_connection = create_connection
_cmp_to_key = cmp_to_key
# Overwrite the definitions above with the usual ones
# from the standard library:
if sys.version_info >= (2, 7):
from collections import OrderedDict, Counter
from itertools import count
from functools import cmp_to_key
try:
from subprocess import check_output
except ImportError:
# Not available. This happens with Google App Engine: see issue #231
pass
from socket import create_connection
if sys.version_info >= (3, 0):
from math import ceil
from collections import _count_elements
if sys.version_info >= (3, 3):
from reprlib import recursive_repr
from collections import ChainMap
| 35.645058
| 98
| 0.556288
|
1b34f1346dd031c197f059b7ac504f54dc3988bb
| 4,158
|
py
|
Python
|
lleval.py
|
builtinnya/sevabot-plugins
|
236f8d13b516a3649626b11150ad5d88d3c729d1
|
[
"MIT"
] | 1
|
2015-11-08T06:36:23.000Z
|
2015-11-08T06:36:23.000Z
|
lleval.py
|
builtinnya/sevabot-plugins
|
236f8d13b516a3649626b11150ad5d88d3c729d1
|
[
"MIT"
] | null | null | null |
lleval.py
|
builtinnya/sevabot-plugins
|
236f8d13b516a3649626b11150ad5d88d3c729d1
|
[
"MIT"
] | null | null | null |
#!/sevabot
# -*- coding: utf-8 -*-
"""
Evaluator for lightweight programming languages using LLEval.
LLEval is available at http://colabv6.dan.co.jp/lleval.html
"""
from __future__ import unicode_literals
import logging
import re
import requests
from requests.exceptions import RequestException
from sevabot.bot.stateful import StatefulSkypeHandler
from sevabot.utils import ensure_unicode
logger = logging.getLogger('LLEval')
# Set to debug only during dev
logger.setLevel(logging.INFO)
logger.debug('LLEval module level load import')
HELP_TEXT = """Evaluator for lightweight programming languages using LLEval.
Usage
------------------------------
#!<LANGUAGE_SPECIFIER>
<SOURCE_CODE>
LANGUAGE_SPECIFIER: Specify the language the code written in. Examples:
py (python), py3 (python3.2), rb (ruby)
/usr/bin/python, /usr/bin/python3.2, /usr/bin/ruby
See http://colabv6.dan.co.jp/lleval.html for details.
SOURCE_CODE: Source code text.
Example (FizzBuzz Questions)
------------------------------
#!py
def fizzbuzz(n):
for i in range(1, n+1):
if i % 15 == 0: yield 'FizzBuzz'
elif i % 5 == 0: yield 'Buzz'
elif i % 3 == 0: yield 'Fizz'
else: yield i
for x in fizzbuzz(100): print x
"""
class LLEvalHandler(StatefulSkypeHandler):
"""
Skype message handler class for the LLEval.
"""
def __init__(self):
"""
Use `init` method to initialize a handler.
"""
logger.debug('LLEval handler constructed')
def init(self, sevabot):
"""
Set-up our state. This is called every time module is (re)loaded.
:param sevabot: Handle to Sevabot instance.
"""
logger.debug('LLEval handler init')
self.sevabot = sevabot
self.skype = sevabot.getSkype()
def help(self, msg):
"""
Print help text to chat.
"""
msg.Chat.SendMessage(HELP_TEXT)
def lleval(self, lang, src, msg):
"""
Evaluate the source code by making a query to LLEval.
:param lang: Language specifier e.g. py, py3, c.
:param src: Source code.
:param msg: Skype message instance.
"""
try:
payload = {'l': lang, 's': src}
r = requests.get('http://api.dan.co.jp/lleval.cgi', params=payload)
r.raise_for_status()
return r.json()
except (RequestException, ValueError) as e:
msg.Chat.SendMessage(e)
def send_eval_result(self, response, msg):
"""
Send the evaluation result to chat.
"""
if not response:
return
stdout = response.get('stdout', '')
stderr = response.get('stderr', '')
if stdout and stderr:
text = ''
text += 'stdout:\n' + stdout + '\n'
text += 'stderr:\n' + stderr
msg.Chat.SendMessage(text)
elif stdout:
msg.Chat.SendMessage(stdout)
elif stderr:
msg.Chat.SendMessage(stderr)
def handle_message(self, msg, status):
"""
Override this method to customize a handler.
"""
body = ensure_unicode(msg.Body)
logger.debug("LLEval handler got: {}".format(body))
if not body.startswith('#!'):
return False
if status == 'SENT':
# Avoid infinite loop caused by self-reproducing code
return True
m = re.match('#!(?P<lang>\S+)\s+(?P<src>.*)', body, re.DOTALL)
if not m:
self.help(msg)
return True
lang = m.group('lang')
src = m.group('src')
if lang.startswith('/'):
# Source code contains language specifier
lang = None
src = m.group(0)
r = self.lleval(lang, src, msg)
self.send_eval_result(r, msg)
return True
def shutdown(self):
"""
Called when the module is reloaded.
"""
logger.debug('LLEval handler shutdown')
# Export the instance to Sevabot
sevabot_handler = LLEvalHandler()
__all__ = ['sevabot_handler']
| 22.475676
| 79
| 0.575998
|
bd9710c3860bcadf09a7bc885f570942cdc62f73
| 171
|
py
|
Python
|
tower.py
|
SAVHS/UnfortunateMisunderstanding
|
537841fc7dd168f071fc634c23ec33c86151498c
|
[
"Unlicense"
] | 1
|
2021-12-12T05:13:42.000Z
|
2021-12-12T05:13:42.000Z
|
tower.py
|
SAVHS/UnfortunateMisunderstanding
|
537841fc7dd168f071fc634c23ec33c86151498c
|
[
"Unlicense"
] | null | null | null |
tower.py
|
SAVHS/UnfortunateMisunderstanding
|
537841fc7dd168f071fc634c23ec33c86151498c
|
[
"Unlicense"
] | null | null | null |
from panda3d.core import *
class Tower():
def __init__(self, pos, health, tower="models/room_industrial"):
self.tower = tower
loader.loadModel(tower)
| 24.428571
| 68
| 0.672515
|
f894b6e2f29a33c30e9c17176c640c2ea09efb0c
| 1,750
|
py
|
Python
|
niscv_v2/experiments/simulation/kernel_cost.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
niscv_v2/experiments/simulation/kernel_cost.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
niscv_v2/experiments/simulation/kernel_cost.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import scipy.stats as st
from niscv_v2.basics.exp import Exp
from niscv_v2.basics import utils
from datetime import datetime as dt
import pickle
def experiment(dim, fun, size_est, sn, size_kn, ratio):
mean = np.zeros(dim)
target = lambda x: st.multivariate_normal(mean=mean).pdf(x)
proposal = st.multivariate_normal(mean=mean + 0.5, cov=4)
exp = Exp(dim, target, fun, proposal, size_est, sn=sn, adjust=False, show=False)
ts = [dt.now()]
exp.initial_estimation()
ts.append(dt.now())
exp.resampling(size_kn, ratio, bootstrap='st')
ts.append(dt.now())
exp.density_estimation(mode=1, local=False, gamma=0.3, bdwth=1.0, alpha0=0.1)
ts.append(dt.now())
exp.nonparametric_estimation(mode=2)
ts.append(dt.now())
exp.control_calculation()
ts.append(dt.now())
exp.regression_estimation()
ts.append(dt.now())
exp.likelihood_estimation()
ts.append(dt.now())
ts = np.array(ts)
return ts[1:] - ts[:-1]
def run(it, dim):
size_kns = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
results = []
for size_kn in size_kns:
np.random.seed(19971107 + it)
print(dim, it, size_kn)
result1 = experiment(dim=dim, fun=utils.integrand(1), size_est=4000, sn=False, size_kn=size_kn, ratio=1000)
result2 = experiment(dim=dim, fun=utils.integrand(1), size_est=4000, sn=True, size_kn=size_kn, ratio=1000)
results.append(result1 + result2)
return results
def main(dim):
R = []
for it in range(10):
R.append(run(it, dim))
with open('../../data/simulation/kernel_cost_{}D'.format(dim), 'wb') as file:
pickle.dump(R, file)
if __name__ == '__main__':
main(4)
main(6)
main(8)
| 29.166667
| 115
| 0.648571
|
c9bb824f886e1198704a63e94a6a2b810286fcc6
| 11,504
|
py
|
Python
|
models/resnet.py
|
Hilbert70403/Infrared-Small-Target
|
0b7bddc13ed3b2362735ea858af6e7d18d4374cd
|
[
"MIT"
] | 21
|
2021-11-08T08:06:36.000Z
|
2022-03-26T14:22:35.000Z
|
models/resnet.py
|
Hilbert70403/Infrared-Small-Target
|
0b7bddc13ed3b2362735ea858af6e7d18d4374cd
|
[
"MIT"
] | 4
|
2022-01-19T11:37:13.000Z
|
2022-02-28T07:45:19.000Z
|
models/resnet.py
|
Hilbert70403/Infrared-Small-Target
|
0b7bddc13ed3b2362735ea858af6e7d18d4374cd
|
[
"MIT"
] | 9
|
2021-11-15T09:24:41.000Z
|
2022-03-24T08:11:00.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import math
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
x = self.layer1(x)
c1 = self.layer2(x)
c2 = self.layer3(c1)
c3 = self.layer4(c2)
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# x = self.fc(x)
return c1, c2, c3
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
| 39.262799
| 106
| 0.628651
|
4165dbd24e63b1d975a1e5b4b8b26e27e2272b26
| 11,589
|
py
|
Python
|
litex_things/deps/litex/litex/soc/interconnect/csr.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex/litex/soc/interconnect/csr.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex/litex/soc/interconnect/csr.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# This file is Copyright (c) 2015-2018 Florent Kermarrec <florent@enjoy-digital.fr>
# This file is Copyright (c) 2016-2019 Tim 'mithro' Ansell <me@mith.ro>
# License: BSD
"""
Configuration and Status Registers
**********************************
The lowest-level description of a register is provided by the ``CSR`` class,
which maps to the value at a single address on the target bus. Also provided
are helper classes for dealing with values larger than the CSR buses data
width.
* ``CSRConstant``, for constant values.
* ``CSRStatus``, for providing information to the CPU.
* ``CSRStorage``, for allowing control via the CPU.
Generating register banks
=========================
A module can provide bus-independent CSRs by implementing a ``get_csrs`` method
that returns a list of instances of the classes described above.
Similarly, bus-independent memories can be returned as a list by a
``get_memories`` method.
To avoid listing those manually, a module can inherit from the ``AutoCSR``
class, which provides ``get_csrs`` and ``get_memories`` methods that scan for
CSR and memory attributes and return their list.
"""
from migen import *
from migen.util.misc import xdir
from migen.fhdl.tracer import get_obj_var_name
class _CSRBase(DUID):
def __init__(self, size, name):
DUID.__init__(self)
self.name = get_obj_var_name(name)
if self.name is None:
raise ValueError("Cannot extract CSR name from code, need to specify.")
self.size = size
class CSRConstant(DUID):
"""Register which contains a constant value.
Useful for providing information on how a HDL was instantiated to firmware
running on the device.
"""
def __init__(self, value, bits_sign=None, name=None):
DUID.__init__(self)
self.value = Constant(value, bits_sign)
self.name = get_obj_var_name(name)
if self.name is None:
raise ValueError("Cannot extract CSR name from code, need to specify.")
def read(self):
"""Read method for simulation."""
return self.value.value
class CSR(_CSRBase):
"""Basic CSR register.
Parameters
----------
size : int
Size of the CSR register in bits.
Must be less than CSR bus width!
name : string
Provide (or override the name) of the CSR register.
Attributes
----------
r : Signal(size), out
Contains the data written from the bus interface.
``r`` is only valid when ``re`` is high.
re : Signal(), out
The strobe signal for ``r``.
It is active for one cycle, after or during a write from the bus.
w : Signal(size), in
The value to be read from the bus.
Must be provided at all times.
"""
def __init__(self, size=1, name=None):
_CSRBase.__init__(self, size, name)
self.re = Signal(name=self.name + "_re")
self.r = Signal(self.size, name=self.name + "_r")
self.w = Signal(self.size, name=self.name + "_w")
def read(self):
"""Read method for simulation."""
return (yield self.w)
def write(self, value):
"""Write method for simulation."""
yield self.r.eq(value)
yield self.re.eq(1)
yield
yield self.re.eq(0)
class _CompoundCSR(_CSRBase, Module):
def __init__(self, size, name):
_CSRBase.__init__(self, size, name)
self.simple_csrs = []
def get_simple_csrs(self):
if not self.finalized:
raise FinalizeError
return self.simple_csrs
def do_finalize(self, busword):
raise NotImplementedError
class CSRStatus(_CompoundCSR):
"""Status Register.
The ``CSRStatus`` class is meant to be used as a status register that is
read-only from the CPU.
The user design is expected to drive its ``status`` signal.
The advantage of using ``CSRStatus`` instead of using ``CSR`` and driving
``w`` is that the width of ``CSRStatus`` can be arbitrary.
Status registers larger than the bus word width are automatically broken
down into several ``CSR`` registers to span several addresses.
*Be careful, though:* the atomicity of reads is not guaranteed.
Parameters
----------
size : int
Size of the CSR register in bits.
Can be bigger than the CSR bus width.
reset : string
Value of the register after reset.
name : string
Provide (or override the name) of the ``CSRStatus`` register.
Attributes
----------
status : Signal(size), in
The value of the CSRStatus register.
"""
def __init__(self, size=1, reset=0, name=None):
_CompoundCSR.__init__(self, size, name)
self.status = Signal(self.size, reset=reset)
def do_finalize(self, busword):
nwords = (self.size + busword - 1)//busword
for i in reversed(range(nwords)):
nbits = min(self.size - i*busword, busword)
sc = CSR(nbits, self.name + str(i) if nwords > 1 else self.name)
self.comb += sc.w.eq(self.status[i*busword:i*busword+nbits])
self.simple_csrs.append(sc)
def read(self):
"""Read method for simulation."""
return (yield self.status)
class CSRStorage(_CompoundCSR):
"""Control Register.
The ``CSRStorage`` class provides a memory location that can be read and
written by the CPU, and read and optionally written by the design.
It can span several CSR addresses.
Parameters
----------
size : int
Size of the CSR register in bits.
Can be bigger than the CSR bus width.
reset : string
Value of the register after reset.
atomic_write : bool
Provide an mechanism for atomic CPU writes is provided.
When enabled, writes to the first CSR addresses go to a back-buffer
whose contents are atomically copied to the main buffer when the last
address is written.
write_from_dev : bool
Allow the design to update the CSRStorage value.
*Warning*: The atomicity of reads by the CPU is not guaranteed.
alignment_bits : int
???
name : string
Provide (or override the name) of the ``CSRStatus`` register.
Attributes
----------
storage_full : Signal(size), out
???
storage : Signal(size), out
Signal providing the value of the ``CSRStorage`` object.
re : Signal(), in
The strobe signal indicating a write to the ``CSRStorage`` register.
It is active for one cycle, after or during a write from the bus.
we : Signal(), out
Only available when ``write_from_dev == True``
???
dat_w : Signal(), out
Only available when ``write_from_dev == True``
???
"""
def __init__(self, size=1, reset=0, atomic_write=False, write_from_dev=False, alignment_bits=0, name=None):
_CompoundCSR.__init__(self, size, name)
self.alignment_bits = alignment_bits
self.storage_full = Signal(self.size, reset=reset)
self.storage = Signal(self.size - self.alignment_bits, reset=reset >> alignment_bits)
self.comb += self.storage.eq(self.storage_full[self.alignment_bits:])
self.atomic_write = atomic_write
self.re = Signal()
if write_from_dev:
self.we = Signal()
self.dat_w = Signal(self.size - self.alignment_bits)
self.sync += If(self.we, self.storage_full.eq(self.dat_w << self.alignment_bits))
def do_finalize(self, busword):
nwords = (self.size + busword - 1)//busword
if nwords > 1 and self.atomic_write:
backstore = Signal(self.size - busword, name=self.name + "_backstore")
for i in reversed(range(nwords)):
nbits = min(self.size - i*busword, busword)
sc = CSR(nbits, self.name + str(i) if nwords else self.name)
self.simple_csrs.append(sc)
lo = i*busword
hi = lo+nbits
# read
if lo >= self.alignment_bits:
self.comb += sc.w.eq(self.storage_full[lo:hi])
elif hi > self.alignment_bits:
self.comb += sc.w.eq(Cat(Replicate(0, hi - self.alignment_bits),
self.storage_full[self.alignment_bits:hi]))
else:
self.comb += sc.w.eq(0)
# write
if nwords > 1 and self.atomic_write:
if i:
self.sync += If(sc.re, backstore[lo-busword:hi-busword].eq(sc.r))
else:
self.sync += If(sc.re, self.storage_full.eq(Cat(sc.r, backstore)))
else:
self.sync += If(sc.re, self.storage_full[lo:hi].eq(sc.r))
self.sync += self.re.eq(sc.re)
def read(self):
"""Read method for simulation."""
return (yield self.storage) << self.alignment_bits
def write(self, value):
"""Write method for simulation."""
yield self.storage.eq(value >> self.alignment_bits)
yield self.re.eq(1)
yield
yield self.re.eq(0)
def csrprefix(prefix, csrs, done):
for csr in csrs:
if csr.duid not in done:
csr.name = prefix + csr.name
done.add(csr.duid)
def memprefix(prefix, memories, done):
for memory in memories:
if memory.duid not in done:
memory.name_override = prefix + memory.name_override
done.add(memory.duid)
def _make_gatherer(method, cls, prefix_cb):
def gatherer(self):
try:
exclude = self.autocsr_exclude
except AttributeError:
exclude = {}
try:
prefixed = self.__prefixed
except AttributeError:
prefixed = self.__prefixed = set()
r = []
for k, v in xdir(self, True):
if k not in exclude:
if isinstance(v, cls):
r.append(v)
elif hasattr(v, method) and callable(getattr(v, method)):
items = getattr(v, method)()
prefix_cb(k + "_", items, prefixed)
r += items
return sorted(r, key=lambda x: x.duid)
return gatherer
class AutoCSR:
"""MixIn to provide bus independent access to CSR registers.
A module can inherit from the ``AutoCSR`` class, which provides
``get_csrs``, ``get_memories`` and ``get_constants`` methods that scan for
CSR and memory attributes and return their list.
If the module has child objects that implement ``get_csrs``,
``get_memories`` or ``get_constants``, they will be called by the
``AutoCSR`` methods and their CSR and memories added to the lists returned,
with the child objects' names as prefixes.
"""
get_memories = _make_gatherer("get_memories", Memory, memprefix)
get_csrs = _make_gatherer("get_csrs", _CSRBase, csrprefix)
get_constants = _make_gatherer("get_constants", CSRConstant, csrprefix)
class GenericBank(Module):
def __init__(self, description, busword):
# Turn description into simple CSRs and claim ownership of compound CSR modules
self.simple_csrs = []
for c in description:
if isinstance(c, CSR):
assert c.size <= busword
self.simple_csrs.append(c)
else:
c.finalize(busword)
self.simple_csrs += c.get_simple_csrs()
self.submodules += c
self.decode_bits = bits_for(len(self.simple_csrs)-1)
| 33.111429
| 111
| 0.617827
|
afefbc83c9eeb3b95a224982e10e516c9bb5b399
| 898
|
py
|
Python
|
pytmcapi/test/test_trigger.py
|
mverrilli/tmc-api-clients
|
0d2752a4c2f43b19da9714072d03c15dccf2619a
|
[
"Apache-2.0"
] | null | null | null |
pytmcapi/test/test_trigger.py
|
mverrilli/tmc-api-clients
|
0d2752a4c2f43b19da9714072d03c15dccf2619a
|
[
"Apache-2.0"
] | null | null | null |
pytmcapi/test/test_trigger.py
|
mverrilli/tmc-api-clients
|
0d2752a4c2f43b19da9714072d03c15dccf2619a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Talend Management Console Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.trigger import Trigger # noqa: E501
from swagger_client.rest import ApiException
class TestTrigger(unittest.TestCase):
"""Trigger unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTrigger(self):
"""Test Trigger"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.trigger.Trigger() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.902439
| 119
| 0.694878
|
13014e727aeff40c627daecbefbcda6257f546ad
| 291
|
py
|
Python
|
src/10/loading_modules_from_a_remote_machine_using_import_hooks/metaexample.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 14
|
2017-05-20T04:06:46.000Z
|
2022-01-23T06:48:45.000Z
|
src/10/loading_modules_from_a_remote_machine_using_import_hooks/metaexample.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 1
|
2021-06-10T20:17:55.000Z
|
2021-06-10T20:17:55.000Z
|
src/10/loading_modules_from_a_remote_machine_using_import_hooks/metaexample.py
|
tuanavu/python-gitbook
|
948a05e065b0f40afbfd22f697dff16238163cde
|
[
"MIT"
] | 15
|
2017-03-29T17:57:33.000Z
|
2021-08-24T02:20:08.000Z
|
# metaexample.py
#
# Example of using a meta-path importer
# Enable for debugging
if False:
import logging
logging.basicConfig(level=logging.DEBUG)
import urlimport
urlimport.install_meta('http://localhost:15000')
import fib
import spam
import grok.blah
print(grok.blah.__file__)
| 17.117647
| 48
| 0.773196
|
17f268b7574760a904296022fcd1bfc8960a6a1a
| 427
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scattergl/error_x/_symmetric.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/scattergl/error_x/_symmetric.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/scattergl/error_x/_symmetric.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class SymmetricValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="symmetric", parent_name="scattergl.error_x", **kwargs
):
super(SymmetricValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 30.5
| 80
| 0.662763
|
cb86254bd93660e78c3cad64cf0d824f202c2459
| 9,826
|
py
|
Python
|
bilean/engine/user.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | 2
|
2016-01-03T11:20:42.000Z
|
2016-01-06T06:41:51.000Z
|
bilean/engine/user.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | null | null | null |
bilean/engine/user.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from bilean.common import exception
from bilean.common.i18n import _
from bilean.common import utils
from bilean.db import api as db_api
from bilean.engine import event as event_mod
from bilean.engine import resource as resource_mod
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
LOG = logging.getLogger(__name__)
class User(object):
"""User object contains all user operations"""
statuses = (
INIT, FREE, ACTIVE, WARNING, FREEZE,
) = (
'INIT', 'FREE', 'ACTIVE', 'WARNING', 'FREEZE',
)
def __init__(self, user_id, **kwargs):
self.id = user_id
self.policy_id = kwargs.get('policy_id', None)
self.balance = kwargs.get('balance', 0)
self.rate = kwargs.get('rate', 0.0)
self.credit = kwargs.get('credit', 0)
self.last_bill = kwargs.get('last_bill', None)
self.status = kwargs.get('status', self.INIT)
self.status_reason = kwargs.get('status_reason', 'Init user')
self.created_at = kwargs.get('created_at', None)
self.updated_at = kwargs.get('updated_at', None)
self.deleted_at = kwargs.get('deleted_at', None)
def store(self, context):
"""Store the user record into database table."""
values = {
'policy_id': self.policy_id,
'balance': self.balance,
'rate': self.rate,
'credit': self.credit,
'last_bill': self.last_bill,
'status': self.status,
'status_reason': self.status_reason,
'created_at': self.created_at,
'updated_at': self.updated_at,
'deleted_at': self.deleted_at,
}
if self.created_at:
db_api.user_update(context, self.id, values)
else:
values.update(id=self.id)
user = db_api.user_create(context, values)
self.created_at = user.created_at
return self.id
@classmethod
def init_users(cls, context):
"""Init users from keystone."""
k_client = context.clients.client('keystone')
tenants = k_client.tenants.list()
tenant_ids = [tenant.id for tenant in tenants]
users = cls.load_all(context)
user_ids = [user.id for user in users]
for tid in tenant_ids:
if tid not in user_ids:
user = cls(tid, status=cls.INIT,
status_reason='Init from keystone')
user.store(context)
return True
@classmethod
def _from_db_record(cls, record):
'''Construct a user object from database record.
:param record: a DB user object that contains all fields;
'''
kwargs = {
'policy_id': record.policy_id,
'balance': record.balance,
'rate': record.rate,
'credit': record.credit,
'last_bill': record.last_bill,
'status': record.status,
'status_reason': record.status_reason,
'created_at': record.created_at,
'updated_at': record.updated_at,
'deleted_at': record.deleted_at,
}
return cls(record.id, **kwargs)
@classmethod
def load(cls, context, user_id=None, user=None, realtime=False,
show_deleted=False, tenant_safe=True):
'''Retrieve a user from database.'''
if user is None:
user = db_api.user_get(context, user_id,
show_deleted=show_deleted,
tenant_safe=tenant_safe)
if user is None:
raise exception.UserNotFound(user=user_id)
u = cls._from_db_record(user)
if not realtime:
return u
if u.rate > 0 and u.status != u.FREEZE:
seconds = (timeutils.utcnow() - u.last_bill).total_seconds()
u.balance -= u.rate * seconds
return u
@classmethod
def load_all(cls, context, show_deleted=False, limit=None,
marker=None, sort_keys=None, sort_dir=None,
filters=None):
'''Retrieve all users of from database.'''
records = db_api.user_get_all(context, show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters)
return [cls._from_db_record(record) for record in records]
@classmethod
def delete(cls, context, user_id=None, user=None):
'''Delete a user from database.'''
if user is not None:
db_api.user_delete(context, user_id=user.id)
return True
elif user_id is not None:
db_api.user_delete(context, user_id=user_id)
return True
return False
def to_dict(self):
user_dict = {
'id': self.id,
'policy_id': self.policy_id,
'balance': self.balance,
'rate': self.rate,
'credit': self.credit,
'last_bill': utils.format_time(self.last_bill),
'status': self.status,
'status_reason': self.status_reason,
'created_at': utils.format_time(self.created_at),
'updated_at': utils.format_time(self.updated_at),
'deleted_at': utils.format_time(self.deleted_at),
}
return user_dict
def set_status(self, status, reason=None):
'''Set status of the user.'''
self.status = status
if reason:
self.status_reason = reason
def update_with_resource(self, context, resource, action='create'):
'''Update user with resource'''
if 'create' == action:
d_rate = resource.rate
if self.rate > 0:
self.do_bill(context)
elif 'delete' == action:
self.do_bill(context)
d_rate = -resource.rate
elif 'update' == action:
self.do_bill(context)
d_rate = resource.d_rate
self._change_user_rate(context, d_rate)
self.store(context)
def _change_user_rate(self, context, d_rate):
# Update the rate of user
old_rate = self.rate
new_rate = old_rate + d_rate
if old_rate == 0 and new_rate > 0:
self.last_bill = timeutils.utcnow()
if d_rate > 0 and self.status == self.FREE:
self.status = self.ACTIVE
elif d_rate < 0:
if new_rate == 0 and self.balance > 0:
self.status = self.FREE
elif self.status == self.WARNING:
p_time = cfg.CONF.bilean_task.prior_notify_time * 3600
rest_usage = p_time * new_rate
if self.balance > rest_usage:
self.status = self.ACTIVE
self.rate = new_rate
def do_recharge(self, context, value):
'''Do recharge for user.'''
if self.rate > 0 and self.status != self.FREEZE:
self.do_bill(context)
self.balance += value
if self.status == self.INIT and self.balance > 0:
self.set_status(self.ACTIVE, reason='Recharged')
elif self.status == self.FREEZE and self.balance > 0:
reason = _("Status change from freeze to active because "
"of recharge.")
self.set_status(self.ACTIVE, reason=reason)
elif self.status == self.WARNING:
prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
rest_usage = prior_notify_time * self.rate
if self.balance > rest_usage:
reason = _("Status change from warning to active because "
"of recharge.")
self.set_status(self.ACTIVE, reason=reason)
event_mod.record(context, self.id, action='recharge', value=value)
self.store(context)
def _freeze(self, context, reason=None):
'''Freeze user when balance overdraft.'''
LOG.info(_("Freeze user because of: %s") % reason)
self._release_resource(context)
LOG.info(_("Balance of user %s overdraft, change user's "
"status to 'freeze'") % self.id)
self.status = self.FREEZE
self.status_reason = reason
def _release_resource(self, context):
'''Do freeze user, delete all resources ralated to user.'''
filters = {'user_id': self.id}
resources = resource_mod.Resource.load_all(context, filters=filters)
for resource in resources:
resource.do_delete(context)
def do_delete(self, context):
db_api.user_delete(context, self.id)
return True
def do_bill(self, context):
'''Do bill once, pay the cost until now.'''
now = timeutils.utcnow()
total_seconds = (now - self.last_bill).total_seconds()
self.balance = self.balance - self.rate * total_seconds
self.last_bill = now
if self.balance < 0:
self._freeze(context, reason="Balance overdraft")
self.store(context)
event_mod.record(context, self.id,
action='charge',
seconds=total_seconds)
| 37.079245
| 78
| 0.586098
|
aa04fc267a0f8906245e2c5dd4ceb4cf48e4b12e
| 666
|
py
|
Python
|
manage.py
|
spadr/django_iot_core
|
d71ab74ccc90a20e24cadc0c27bf2c400ea33f91
|
[
"MIT"
] | null | null | null |
manage.py
|
spadr/django_iot_core
|
d71ab74ccc90a20e24cadc0c27bf2c400ea33f91
|
[
"MIT"
] | 1
|
2021-09-27T00:54:58.000Z
|
2021-09-27T00:54:58.000Z
|
manage.py
|
spadr/django_iot_core
|
d71ab74ccc90a20e24cadc0c27bf2c400ea33f91
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'iotproject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522
| 74
| 0.68018
|
4f9a3075ae02bdcdb02819a0be91758544ee85f9
| 7,551
|
py
|
Python
|
controller/maestro.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
controller/maestro.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
controller/maestro.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
import serial
#
#---------------------------
# Maestro Servo Controller
#---------------------------
#
# Support for the Pololu Maestro line of servo controllers
#
# Steven Jacobs -- Aug 2013
# https://github.com/FRC4564/Maestro/
#
# These functions provide access to many of the Maestro's capabilities using the
# Pololu serial protocol
#
class Controller:
# When connected via USB, the Maestro creates two virtual serial ports
# /dev/ttyACM0 for commands and /dev/ttyACM1 for communications.
# Be sure the Maestro is configured for "USB Dual Port" serial mode.
# "USB Chained Mode" may work as well, but hasn't been tested.
#
# Pololu protocol allows for multiple Maestros to be connected to a single
# communication channel. Each connected device is then indexed by number.
# This device number defaults to 0x0C (or 12 in decimal), which this module
# assumes. If two or more controllers are connected to different serial
# ports, then you can specify the port number when intiating a controller
# object. Ports will typically start at 0 and count by twos. So with two
# controllers ports 0 and 2 would be used.
def __init__(self,port):
# Open the command port
ttyStr = str(port)
self.usb = serial.Serial(ttyStr)
# Command lead-in and device 12 are sent for each Pololu serial commands.
self.PololuCmd = chr(0xaa) + chr(0xc)
# Track target position for each servo. The function isMoving() will
# use the Target vs Current servo position to determine if movement is
# occuring. Upto 24 servos on a Maestro, (0-23). Targets start at 0.
self.Targets = [0] * 24
# Servo minimum and maximum targets can be restricted to protect components.
self.Mins = [0] * 24
self.Maxs = [0] * 24
# Cleanup by closing USB serial port
def close(self):
self.usb.close()
# Set channels min and max value range. Use this as a safety to protect
# from accidentally moving outside known safe parameters. A setting of 0
# allows unrestricted movement.
#
# ***Note that the Maestro itself is configured to limit the range of servo travel
# which has precedence over these values. Use the Maestro Control Center to configure
# ranges that are saved to the controller. Use setRange for software controllable ranges.
def setRange(self, chan, min, max):
self.Mins[chan] = min
self.Maxs[chan] = max
# Return Minimum channel range value
def getMin(self, chan):
return self.Mins[chan]
# Return Minimum channel range value
def getMax(self, chan):
return self.Max[chan]
# Set channel to a specified target value. Servo will begin moving based
# on Speed and Acceleration parameters previously set.
# Target values will be constrained within Min and Max range, if set.
# For servos, target represents the pulse width in of quarter-microseconds
# Servo center is at 1500 microseconds, or 6000 quarter-microseconds
# Typcially valid servo range is 3000 to 9000 quarter-microseconds
# If channel is configured for digital output, values < 6000 = Low ouput
def setTarget(self, chan, target):
# if Min is defined and Target is below, force to Min
if self.Mins[chan] > 0 and target < self.Mins[chan]:
target = self.Mins[chan]
# if Max is defined and Target is above, force to Max
if self.Maxs[chan] > 0 and target > self.Maxs[chan]:
target = self.Maxs[chan]
#
lsb = target & 0x7f #7 bits for least significant byte
msb = (target >> 7) & 0x7f #shift 7 and take next 7 bits for msb
# Send Pololu intro, device number, command, channel, and target lsb/msb
cmd = self.PololuCmd + chr(0x04) + chr(chan) + chr(lsb) + chr(msb)
self.usb.write(cmd)
# Record Target value
self.Targets[chan] = target
# Set speed of channel
# Speed is measured as 0.25microseconds/10milliseconds
# For the standard 1ms pulse width change to move a servo between extremes, a speed
# of 1 will take 1 minute, and a speed of 60 would take 1 second.
# Speed of 0 is unrestricted.
def setSpeed(self, chan, speed):
lsb = speed & 0x7f #7 bits for least significant byte
msb = (speed >> 7) & 0x7f #shift 7 and take next 7 bits for msb
# Send Pololu intro, device number, command, channel, speed lsb, speed msb
cmd = self.PololuCmd + chr(0x07) + chr(chan) + chr(lsb) + chr(msb)
self.usb.write(cmd)
# Set acceleration of channel
# This provide soft starts and finishes when servo moves to target position.
# Valid values are from 0 to 255. 0=unrestricted, 1 is slowest start.
# A value of 1 will take the servo about 3s to move between 1ms to 2ms range.
def setAccel(self, chan, accel):
lsb = accel & 0x7f #7 bits for least significant byte
msb = (accel >> 7) & 0x7f #shift 7 and take next 7 bits for msb
# Send Pololu intro, device number, command, channel, accel lsb, accel msb
cmd = self.PololuCmd + chr(0x09) + chr(chan) + chr(lsb) + chr(msb)
self.usb.write(cmd)
# Get the current position of the device on the specified channel
# The result is returned in a measure of quarter-microseconds, which mirrors
# the Target parameter of setTarget.
# This is not reading the true servo position, but the last target position sent
# to the servo. If the Speed is set to below the top speed of the servo, then
# the position result will align well with the acutal servo position, assuming
# it is not stalled or slowed.
def getPosition(self, chan):
cmd = self.PololuCmd + chr(0x10) + chr(chan)
self.usb.write(cmd)
lsb = ord(self.usb.read())
msb = ord(self.usb.read())
return (msb << 8) + lsb
# Test to see if a servo has reached its target position. This only provides
# useful results if the Speed parameter is set slower than the maximum speed of
# the servo.
# ***Note if target position goes outside of Maestro's allowable range for the
# channel, then the target can never be reached, so it will appear to allows be
# moving to the target. See setRange comment.
def isMoving(self, chan):
if self.Targets[chan] > 0:
if self.getPosition(chan) <> self.Targets[chan]:
return True
return False
# Have all servo outputs reached their targets? This is useful only if Speed and/or
# Acceleration have been set on one or more of the channels. Returns True or False.
def getMovingState(self):
cmd = self.PololuCmd + chr(0x13)
self.usb.write(cmd)
if self.usb.read() == chr(0):
return False
else:
return True
# Run a Maestro Script subroutine in the currently active script. Scripts can
# have multiple subroutines, which get numbered sequentially from 0 on up. Code your
# Maestro subroutine to either infinitely loop, or just end (return is not valid).
def runScriptSub(self, subNumber):
cmd = self.PololuCmd + chr(0x27) + chr(subNumber)
# can pass a param with comman 0x28
# cmd = self.PololuCmd + chr(0x28) + chr(subNumber) + chr(lsb) + chr(msb)
self.usb.write(cmd)
# Stop the current Maestro Script
def stopScript(self):
cmd = self.PololuCmd + chr(0x24)
self.usb.write(cmd)
| 47.19375
| 94
| 0.661104
|
60489c35f91852d5dc92bfbeb136aaeb18392fb5
| 6,153
|
py
|
Python
|
test/functional/feature_dersig.py
|
durgeshkmr/minicoin
|
4f082abe13cd34a759bf8ffb344a49244615960e
|
[
"MIT"
] | null | null | null |
test/functional/feature_dersig.py
|
durgeshkmr/minicoin
|
4f082abe13cd34a759bf8ffb344a49244615960e
|
[
"MIT"
] | null | null | null |
test/functional/feature_dersig.py
|
durgeshkmr/minicoin
|
4f082abe13cd34a759bf8ffb344a49244615960e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.blocktools import create_coinbase, create_block, create_transaction
from test_framework.messages import msg_block
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.script import CScript
from test_framework.test_framework import MinicoinTestFramework
from test_framework.util import assert_equal, bytes_to_hex_str, wait_until
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
class BIP66Test(MinicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, amount=1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False, 'reject-reason': '64: non-mandatory-script-verify-flag (Non-canonical DER signature)'}],
self.nodes[0].testmempoolaccept(rawtxs=[bytes_to_hex_str(spendtx.serialize())], allowhighfees=True)
)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# minicoind is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and minicoind produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| 42.729167
| 142
| 0.672192
|
ec18ff3fd29757d17c5a9e9d89806340c5e34760
| 4,472
|
py
|
Python
|
entities.py
|
monatis/qrestaurantapp
|
8cf4bebc5a66eabc25c3781ed240ed681f0eb255
|
[
"Apache-2.0"
] | null | null | null |
entities.py
|
monatis/qrestaurantapp
|
8cf4bebc5a66eabc25c3781ed240ed681f0eb255
|
[
"Apache-2.0"
] | null | null | null |
entities.py
|
monatis/qrestaurantapp
|
8cf4bebc5a66eabc25c3781ed240ed681f0eb255
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from sqlalchemy import Column, DateTime, Float, Integer, String, Table, Text, text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
created_at = Column(DateTime)
updated_at = Column(DateTime)
class Customer(Base):
__tablename__ = 'customers'
id = Column(Integer, primary_key=True)
name = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
email = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
password_hash = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
api_key = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
status = Column(Integer, nullable=False)
created_at = Column(DateTime, nullable=False)
class DeliveryOrder(Base):
__tablename__ = 'delivery_orders'
id = Column(Integer, primary_key=True)
order_id = Column(Integer, nullable=False)
menu_id = Column(Integer, nullable=False)
qty = Column(Integer, nullable=False)
address = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
phone = Column(String(255, 'utf8mb4_unicode_ci'))
notes = Column(String(255, 'utf8mb4_unicode_ci'))
price_total = Column(Float(asdecimal=True), nullable=False)
status = Column(Integer, nullable=False, server_default=text("0"))
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
class Desk(Base):
__tablename__ = 'desks'
id = Column(Integer, primary_key=True)
name = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
capacity = Column(Integer, nullable=False)
available = Column(Integer, nullable=False, server_default=text("1"))
created_at = Column(DateTime)
updated_at = Column(DateTime)
class Menu(Base):
__tablename__ = 'menus'
id = Column(Integer, primary_key=True)
name = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
description = Column(Text(collation='utf8mb4_unicode_ci'), nullable=False)
category_id = Column(Integer, nullable=False)
price = Column(Float(asdecimal=True), nullable=False)
image = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
created_at = Column(DateTime)
updated_at = Column(DateTime)
class Migration(Base):
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True)
migration = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
batch = Column(Integer, nullable=False)
class Order(Base):
__tablename__ = 'orders'
id = Column(Integer, primary_key=True)
customer_id = Column(Integer, nullable=False)
status = Column(Integer, nullable=False, server_default=text("0"))
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
t_password_resets = Table(
'password_resets', metadata,
Column('email', String(255, 'utf8mb4_unicode_ci'), nullable=False, index=True),
Column('token', String(255, 'utf8mb4_unicode_ci'), nullable=False),
Column('created_at', DateTime)
)
class PlaceOrder(Base):
__tablename__ = 'place_orders'
id = Column(Integer, primary_key=True)
order_id = Column(Integer, nullable=False)
desk_id = Column(Integer, nullable=False)
menu_id = Column(Integer, nullable=False)
qty = Column(Integer, nullable=False)
price_total = Column(Float(asdecimal=True), nullable=False)
status = Column(Integer, nullable=False, server_default=text("0"))
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
class Promotion(Base):
__tablename__ = 'promotions'
id = Column(Integer, primary_key=True)
menu_id = Column(Integer, nullable=False)
image = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
created_at = Column(DateTime)
updated_at = Column(DateTime)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
email = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False, unique=True)
password = Column(String(255, 'utf8mb4_unicode_ci'), nullable=False)
remember_token = Column(String(100, 'utf8mb4_unicode_ci'))
created_at = Column(DateTime)
updated_at = Column(DateTime)
| 34.137405
| 83
| 0.720259
|
125477f7ea0f8d03b9871578550860b5532a845e
| 1,576
|
py
|
Python
|
ap/model.py
|
SuvorovAV/TodosGarpix
|
1ec07e4208da82b52d220d25ae5725132f80c24e
|
[
"MIT"
] | null | null | null |
ap/model.py
|
SuvorovAV/TodosGarpix
|
1ec07e4208da82b52d220d25ae5725132f80c24e
|
[
"MIT"
] | null | null | null |
ap/model.py
|
SuvorovAV/TodosGarpix
|
1ec07e4208da82b52d220d25ae5725132f80c24e
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, r'/home/alexandr/work_test/Todo/TodosGarpix')
import datetime
from flask import abort
from sqlalchemy.dialects.postgresql import JSON
from ap.base import db
class Todo(db.Model):
__tablename__ = "tasks"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(150))
content = db.Column(db.String())
create_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def __repr__(self):
return f"<Todo title {self.title}>"
def todo_get_all(self):
tasks = Todo.query.all()
results = [
{
"id": task.id,
"title": task.title,
"content": task.content,
"create_at": task.create_at,
}
for task in tasks
]
return results
def todo_create(self, data):
head = Todo(title=data["title"], content=data["content"])
db.session.add(head)
db.session.commit()
return head
def todo_update(self, id, data):
task = Todo.query.get_or_404(id)
if not "title" in data:
abort(400)
if not "content" in data:
abort(400)
task.title = data["title"]
task.content = data["content"]
db.session.add(task)
db.session.commit()
return task
def todo_delete(self, id):
task = Todo.query.get_or_404(id)
db.session.delete(task)
db.session.commit()
def get_todo(self, id):
task = Todo.query.get_or_404(id)
return task
| 26.266667
| 72
| 0.578046
|
3b2f760a94ae49ee68f151e58bd60f673e6cd8e7
| 16,182
|
py
|
Python
|
saleor/payment/gateways/adyen/utils/common.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 3
|
2021-05-02T05:35:54.000Z
|
2022-01-06T12:13:52.000Z
|
saleor/payment/gateways/adyen/utils/common.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 117
|
2021-08-23T04:31:08.000Z
|
2022-03-28T04:40:19.000Z
|
saleor/payment/gateways/adyen/utils/common.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | null | null | null |
import json
import logging
from decimal import Decimal
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
import Adyen
import opentracing
import opentracing.tags
from django.conf import settings
from django_countries.fields import Country
from .....checkout.calculations import (
checkout_line_total,
checkout_shipping_price,
checkout_total,
)
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....checkout.models import Checkout
from .....checkout.utils import is_shipping_required
from .....discount.utils import fetch_active_discounts
from .....payment.models import Payment
from .....plugins.manager import get_plugins_manager
from .... import PaymentError
from ....interface import PaymentMethodInfo
from ....utils import price_to_minor_unit
if TYPE_CHECKING:
from ....interface import AddressData, PaymentData
logger = logging.getLogger(__name__)
# https://docs.adyen.com/checkout/payment-result-codes
FAILED_STATUSES = ["refused", "error", "cancelled"]
PENDING_STATUSES = ["pending", "received"]
AUTH_STATUS = "authorised"
def get_tax_percentage_in_adyen_format(total_gross, total_net):
tax_percentage_in_adyen_format = 0
if total_gross and total_net:
# get tax percent in adyen format
gross_percentage = total_gross / total_net
gross_percentage = gross_percentage.quantize(Decimal(".01")) # 1.23
tax_percentage = gross_percentage * 100 - 100 # 23.00
tax_percentage_in_adyen_format = int(tax_percentage * 100) # 2300
return tax_percentage_in_adyen_format
def api_call(
request_data: Optional[Dict[str, Any]], method: Callable, **kwargs
) -> Adyen.Adyen:
try:
return method(request_data, **kwargs)
except (Adyen.AdyenError, ValueError, TypeError) as e:
logger.warning(f"Unable to process the payment: {e}")
raise PaymentError(f"Unable to process the payment request: {e}.")
def prepare_address_request_data(address: Optional["AddressData"]) -> Optional[dict]:
"""Create address structure for Adyen request.
The sample recieved from Adyen team:
Customer enters only address 1: 2500 Valley Creek Way
Ideal: houseNumberOrName: "2500", street: "Valley Creek Way"
If above not possible: houseNumberOrName: "", street: "2500 Valley Creek Way"
***Note the blank string above
Customer enters address 1 and address 2: 30 Granger Circle, 160 Bath Street
Ideal: houseNumberOrName: "30 Granger Circle", street: "160 Bath Street"
"""
house_number_or_name = ""
if not address:
return None
city = address.city or address.country_area or "ZZ"
country = str(address.country) if address.country else "ZZ"
postal_code = address.postal_code or "ZZ"
state_or_province = address.country_area or "ZZ"
if address.company_name:
house_number_or_name = address.company_name
street = address.street_address_1
if address.street_address_2:
street += f" {address.street_address_2}"
elif address.street_address_2:
street = address.street_address_2
house_number_or_name = address.street_address_1
else:
street = address.street_address_1
return {
"city": city,
"country": country,
"houseNumberOrName": house_number_or_name,
"postalCode": postal_code,
"stateOrProvince": state_or_province,
"street": street,
}
def request_data_for_payment(
payment_information: "PaymentData",
return_url: str,
merchant_account: str,
native_3d_secure: bool,
) -> Dict[str, Any]:
payment_data = payment_information.data or {}
if not payment_data.pop("is_valid", True):
raise PaymentError("Payment data are not valid.")
extra_request_params = {}
channel = payment_data.get("channel", "web")
origin_url = payment_data.get("originUrl")
browser_info = payment_data.get("browserInfo")
if browser_info:
extra_request_params["browserInfo"] = browser_info
billing_address = payment_data.get("billingAddress")
if billing_address:
extra_request_params["billingAddress"] = billing_address
elif billing_address := prepare_address_request_data(payment_information.billing):
extra_request_params["billingAddress"] = billing_address
delivery_address = payment_data.get("deliveryAddress")
if delivery_address:
extra_request_params["deliveryAddress"] = delivery_address
elif delivery_address := prepare_address_request_data(payment_information.shipping):
extra_request_params["deliveryAddress"] = delivery_address
shopper_ip = payment_data.get("shopperIP")
if shopper_ip:
extra_request_params["shopperIP"] = shopper_ip
device_fingerprint = payment_data.get("deviceFingerprint")
if device_fingerprint:
extra_request_params["deviceFingerprint"] = device_fingerprint
if channel.lower() == "web" and origin_url:
extra_request_params["origin"] = origin_url
shopper_name = payment_data.get("shopperName")
if shopper_name:
extra_request_params["shopperName"] = shopper_name
extra_request_params["channel"] = channel
payment_method = payment_data.get("paymentMethod")
if not payment_method:
raise PaymentError("Unable to find the paymentMethod section.")
method = payment_method.get("type", "")
if native_3d_secure and "scheme" == method:
extra_request_params["additionalData"] = {"allow3DS2": "true"}
extra_request_params["shopperEmail"] = payment_information.customer_email
if payment_information.billing:
extra_request_params["shopperName"] = {
"firstName": payment_information.billing.first_name,
"lastName": payment_information.billing.last_name,
}
request_data = {
"amount": {
"value": price_to_minor_unit(
payment_information.amount, payment_information.currency
),
"currency": payment_information.currency,
},
"reference": payment_information.graphql_payment_id,
"paymentMethod": payment_method,
"returnUrl": return_url,
"merchantAccount": merchant_account,
"shopperEmail": payment_information.customer_email,
"shopperReference": payment_information.customer_email,
**extra_request_params,
}
methods_that_require_checkout_details = ["afterpaytouch", "clearpay"]
# klarna in method - because there is a lot of variable klarna methods - like pay
# later with klarna or pay with klarna etc
if "klarna" in method or method in methods_that_require_checkout_details:
request_data = append_checkout_details(payment_information, request_data)
return request_data
def get_shipping_data(manager, checkout_info, lines, discounts):
address = checkout_info.shipping_address or checkout_info.billing_address
currency = checkout_info.checkout.currency
shipping_total = checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
)
total_gross = shipping_total.gross.amount
total_net = shipping_total.net.amount
tax_amount = shipping_total.tax.amount
tax_percentage_in_adyen_format = get_tax_percentage_in_adyen_format(
total_gross, total_net
)
return {
"quantity": 1,
"amountExcludingTax": price_to_minor_unit(total_net, currency),
"taxPercentage": tax_percentage_in_adyen_format,
"description": (
f"Shipping - {checkout_info.delivery_method_info.delivery_method.name}"
),
"id": f"Shipping:{checkout_info.delivery_method_info.delivery_method.id}",
"taxAmount": price_to_minor_unit(tax_amount, currency),
"amountIncludingTax": price_to_minor_unit(total_gross, currency),
}
def append_checkout_details(payment_information: "PaymentData", payment_data: dict):
checkout = (
Checkout.objects.prefetch_related(
"shipping_method",
)
.filter(payments__id=payment_information.payment_id)
.first()
)
if not checkout:
raise PaymentError("Unable to calculate products for klarna.")
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
discounts = fetch_active_discounts()
checkout_info = fetch_checkout_info(checkout, lines, discounts, manager)
currency = payment_information.currency
country_code = checkout.get_country()
payment_data["shopperLocale"] = get_shopper_locale_value(country_code)
payment_data["countryCode"] = country_code
line_items = []
for line_info in lines:
total = checkout_line_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
checkout_line_info=line_info,
discounts=discounts,
)
address = checkout_info.shipping_address or checkout_info.billing_address
unit_price = manager.calculate_checkout_line_unit_price(
total,
line_info.line.quantity,
checkout_info,
lines,
line_info,
address,
discounts,
)
unit_gross = unit_price.gross.amount
unit_net = unit_price.net.amount
tax_amount = unit_price.tax.amount
tax_percentage_in_adyen_format = get_tax_percentage_in_adyen_format(
unit_gross, unit_net
)
line_data = {
"quantity": line_info.line.quantity,
"amountExcludingTax": price_to_minor_unit(unit_net, currency),
"taxPercentage": tax_percentage_in_adyen_format,
"description": (
f"{line_info.variant.product.name}, {line_info.variant.name}"
),
"id": line_info.variant.sku or line_info.variant.get_global_id(),
"taxAmount": price_to_minor_unit(tax_amount, currency),
"amountIncludingTax": price_to_minor_unit(unit_gross, currency),
}
line_items.append(line_data)
if checkout_info.delivery_method_info.delivery_method and is_shipping_required(
lines
):
line_items.append(get_shipping_data(manager, checkout_info, lines, discounts))
payment_data["lineItems"] = line_items
return payment_data
def get_shopper_locale_value(country_code: str):
# Remove this function when "shopperLocale" will come from frontend site
country_code_to_shopper_locale_value = {
# https://docs.adyen.com/checkout/components-web/
# localization-components#change-language
"CN": "zh_CN",
"DK": "da_DK",
"NL": "nl_NL",
"US": "en_US",
"FI": "fi_FI",
"FR": "fr_FR",
"DR": "de_DE",
"IT": "it_IT",
"JP": "ja_JP",
"KR": "ko_KR",
"NO": "no_NO",
"PL": "pl_PL",
"BR": "pt_BR",
"RU": "ru_RU",
"ES": "es_ES",
"SE": "sv_SE",
}
return country_code_to_shopper_locale_value.get(country_code, "en_US")
def request_data_for_gateway_config(
checkout: "Checkout", merchant_account
) -> Dict[str, Any]:
manager = get_plugins_manager()
address = checkout.billing_address or checkout.shipping_address
discounts = fetch_active_discounts()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, discounts, manager)
total = checkout_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
)
country = address.country if address else None
if country:
country_code = country.code
else:
country_code = Country(settings.DEFAULT_COUNTRY).code
channel = checkout.get_value_from_metadata("channel", "web")
return {
"merchantAccount": merchant_account,
"countryCode": country_code,
"channel": channel,
"amount": {
"value": price_to_minor_unit(total.gross.amount, checkout.currency),
"currency": checkout.currency,
},
}
def request_for_payment_refund(
payment_information: "PaymentData", merchant_account, token
) -> Dict[str, Any]:
return {
"merchantAccount": merchant_account,
"modificationAmount": {
"value": price_to_minor_unit(
payment_information.amount, payment_information.currency
),
"currency": payment_information.currency,
},
"originalReference": token,
"reference": payment_information.graphql_payment_id,
}
def request_for_payment_capture(
payment_information: "PaymentData", merchant_account: str, token: str
) -> Dict[str, Any]:
return {
"merchantAccount": merchant_account,
"modificationAmount": {
"value": price_to_minor_unit(
payment_information.amount, payment_information.currency
),
"currency": payment_information.currency,
},
"originalReference": token,
"reference": payment_information.graphql_payment_id,
}
def update_payment_with_action_required_data(
payment: Payment, action: dict, details: list
):
action_required_data = {
"payment_data": action["paymentData"],
"parameters": [detail["key"] for detail in details],
}
if payment.extra_data:
payment_extra_data = json.loads(payment.extra_data)
try:
payment_extra_data.append(action_required_data)
extra_data = payment_extra_data
except AttributeError:
extra_data = [payment_extra_data, action_required_data]
else:
extra_data = [action_required_data]
payment.extra_data = json.dumps(extra_data)
payment.save(update_fields=["extra_data"])
def call_capture(
payment_information: "PaymentData",
merchant_account: str,
token: str,
adyen_client: Adyen.Adyen,
):
# https://docs.adyen.com/checkout/capture#make-an-api-call-to-capture-a-payment
request = request_for_payment_capture(
payment_information=payment_information,
merchant_account=merchant_account,
token=token,
)
with opentracing.global_tracer().start_active_span(
"adyen.payment.capture"
) as scope:
span = scope.span
span.set_tag(opentracing.tags.COMPONENT, "payment")
span.set_tag("service.name", "adyen")
return api_call(request, adyen_client.payment.capture)
def request_for_payment_cancel(
payment_information: "PaymentData",
merchant_account: str,
token: str,
):
return {
"merchantAccount": merchant_account,
"originalReference": token,
"reference": payment_information.graphql_payment_id,
}
def get_payment_method_info(
payment_information: "PaymentData", api_call_result: Adyen.Adyen
):
additional_data = api_call_result.message.get("additionalData")
payment_data = payment_information.data or {}
payment_method = payment_data.get("paymentMethod", {}).get("type")
brand = None
if additional_data:
brand = additional_data.get("paymentMethod")
payment_method_info = PaymentMethodInfo(
brand=brand,
type="card" if payment_method == "scheme" else payment_method,
)
return payment_method_info
def get_request_data_for_check_payment(data: dict, merchant_account: str) -> dict:
amount_input = data["card"].get("money")
security_code = data["card"].get("cvc")
request_data = {
"merchantAccount": merchant_account,
"paymentMethod": {
"type": data["method"],
"number": data["card"]["code"],
},
}
if amount_input:
currency = amount_input["currency"]
value = amount_input["amount"]
request_data["amount"] = {
"value": price_to_minor_unit(value, currency),
"currency": currency,
}
if security_code:
request_data["paymentMethod"]["securityCode"] = security_code # type: ignore
return request_data
| 34.067368
| 88
| 0.681992
|
c4668abe305ab6927a49c6688ebbcf1c8141eb4b
| 105
|
py
|
Python
|
goldy_smart_house/events/__init__.py
|
THEGOLDENPRO/Goldy-Smart-House
|
1745ed8f2aa60d77ba2031d2ee1a679bd771214b
|
[
"MIT"
] | null | null | null |
goldy_smart_house/events/__init__.py
|
THEGOLDENPRO/Goldy-Smart-House
|
1745ed8f2aa60d77ba2031d2ee1a679bd771214b
|
[
"MIT"
] | null | null | null |
goldy_smart_house/events/__init__.py
|
THEGOLDENPRO/Goldy-Smart-House
|
1745ed8f2aa60d77ba2031d2ee1a679bd771214b
|
[
"MIT"
] | null | null | null |
"""Goldy Smart House trigger events."""
from . import _on_command_
on_command = _on_command_.on_command
| 21
| 39
| 0.780952
|
20328c30e72484dc4aad3ffe175fffa99eb0bde2
| 12,871
|
py
|
Python
|
topologic/embedding/omnibus_embedding.py
|
microsoft/topologic
|
d3a2155a42469ccb16de178f47bec81b0476fdc8
|
[
"MIT"
] | 24
|
2020-02-10T23:51:06.000Z
|
2021-11-17T02:34:47.000Z
|
topologic/embedding/omnibus_embedding.py
|
microsoft/topologic
|
d3a2155a42469ccb16de178f47bec81b0476fdc8
|
[
"MIT"
] | 26
|
2020-02-11T18:37:33.000Z
|
2020-11-11T00:14:41.000Z
|
topologic/embedding/omnibus_embedding.py
|
microsoft/topologic
|
d3a2155a42469ccb16de178f47bec81b0476fdc8
|
[
"MIT"
] | 6
|
2020-07-31T11:05:36.000Z
|
2021-11-10T08:18:52.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import logging
from typing import List, Union, Optional, Tuple
import networkx as nx
import numpy as np
import scipy as sp
from .embedding_container import EmbeddingContainer
from .spectral_embedding import _generate_embedding
from ..connected_components import largest_connected_component
from ..embedding import EmbeddingMethod
from ..graph_augmentation import rank_edges, \
diagonal_augmentation
def omnibus_embedding(
graphs: List[nx.Graph],
maximum_dimensions: int = 100,
elbow_cut: Optional[int] = 1,
embedding_method: EmbeddingMethod = EmbeddingMethod.LAPLACIAN_SPECTRAL_EMBEDDING,
svd_seed: Optional[int] = None,
num_iterations: int = 5,
power_iteration_normalizer: str = 'QR',
num_oversamples: int = 10
) -> List[Tuple[EmbeddingContainer, EmbeddingContainer]]:
"""
Generates a pairwise omnibus embedding for each pair of graphs in a list of graphs. If given graphs A, B, and C,
the embeddings will be computed for A,B and B,C.
There should be exactly the same number of nodes in each graph with exactly the same labels. The list of graphs
should represent a time series and should be in an order such that time is continuous through the list of graphs.
If the labels differ between each pair of graphs, then those nodes will only be found in the resulting embedding
if they exist in the largest connected component of the union of all edges across all graphs in the time series.
:param List[networkx.Graph] graphs: A list of graphs that will be used to generate the omnibus embedding. Each graph
should have exactly the same vertices as each of the other graphs. The order of the graphs in the list matter.
The first graph will be at time 0 and each following graph will increment time by 1.
:param int maximum_dimensions: Maximum dimensions of embeddings that will be returned - defaults to 100. Actual
dimensions of resulting embeddings should be significantly smaller, but will never be over this value.
:param int elbow_cut: scree plot elbow detection will detect (usually) many elbows. This value specifies which
elbow to use prior to filtering out extraneous dimensions.
:param topologic.embedding.EmbeddingMethod embedding_method: The embedding technique used to generate the Omnibus
embedding.
:param Optional[int] svd_seed: If not provided, uses a random number every time, making consistent results difficult
Set this to a random int if you want consistency between executions over the same graph.
:param int num_iterations: The number of iterations to be used in the svd solver.
:param int num_oversamples: Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
:param Optional[str] power_iteration_normalizer:
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `num_iterations` <= 2 and switches to LU otherwise.
Options: 'auto' (default), 'QR', 'LU', 'none'
:return: A List of EmbeddingContainers each containing a matrix, which itself contains the embedding for each node.
the tuple also contains a vector containing the corresponding vertex labels for each row in the matrix. the
matrix and vector are positionally correlated.
:rtype: List[(EmbeddingContainer, EmbeddingContainer)]
"""
logger = logging.getLogger(__name__)
if not graphs:
raise ValueError('Graphs must be provided but was None')
if len(graphs) <= 1:
raise ValueError('You must provide at least two graphs to compute the Omnibus embedding but there were only '
f'{len(graphs)} graphs.')
is_directed = nx.is_directed(graphs[0])
for graph in graphs[1:]:
if nx.is_directed(graph) != is_directed:
raise ValueError('All graphs must be either directed or all must be undirected.')
logger.info('Generating adjacency matrices from list of graphs')
if embedding_method == EmbeddingMethod.ADJACENCY_SPECTRAL_EMBEDDING:
get_matrices_function = _get_adjacency_matrices
elif embedding_method == EmbeddingMethod.LAPLACIAN_SPECTRAL_EMBEDDING:
get_matrices_function = _get_laplacian_matrices
else:
raise TypeError(f'Unexpected EmbeddingMethod for argument embedding_method: {embedding_method}.')
logger.info('Generating the omnibus embedding')
embedding_containers = []
# union graph lcc strategy
union_graph = graphs[0].copy()
for graph in graphs[1:]:
for source, target, weight in graph.edges(data='weight'):
if union_graph.has_edge(source, target):
union_graph[source][target]['weight'] += weight
else:
union_graph.add_edge(source, target, weight=weight)
union_graph_lcc = largest_connected_component(union_graph)
union_graph_lcc_nodes = union_graph_lcc.nodes()
previous_graph = graphs[0].copy()
count = 1
for graph in graphs[1:]:
logging.debug(f'Calculating omni for graph {count} of {len(graphs) - 1}')
count = count + 1
current_graph = graph.copy()
# reduce each graph so that the node set contains exactly the same nodes as union_graph_lcc_nodes
_sync_nodes(previous_graph, union_graph_lcc_nodes)
_sync_nodes(current_graph, union_graph_lcc_nodes)
pairwise_graphs_reduced = [previous_graph, current_graph]
for i in range(len(pairwise_graphs_reduced)):
graph_to_augment = pairwise_graphs_reduced[i]
augmented_graph = _augment_graph(graph_to_augment)
pairwise_graphs_reduced[i] = augmented_graph
labels, pairwise_matrices = get_matrices_function(pairwise_graphs_reduced)
omnibus_matrix = generate_omnibus_matrix(pairwise_matrices)
embedding = _generate_embedding(
elbow_cut,
is_directed,
omnibus_matrix,
maximum_dimensions,
min(omnibus_matrix.shape),
num_oversamples,
num_iterations,
power_iteration_normalizer,
svd_seed
)
number_of_nodes = len(pairwise_graphs_reduced[0].nodes())
embeddings = [embedding[x: x + number_of_nodes] for x in range(0, embedding.shape[0], number_of_nodes)]
previous_graph = graph
embedding_containers.append(
(EmbeddingContainer(embeddings[0], labels),
EmbeddingContainer(embeddings[1], labels))
)
return embedding_containers
def _augment_graph(graph_to_augment):
ranked_graph = rank_edges(graph_to_augment)
augmented_graph = diagonal_augmentation(ranked_graph)
return augmented_graph
def _sync_nodes(graph_to_reduce, set_of_valid_nodes):
to_remove = []
for n in graph_to_reduce.nodes():
if n not in set_of_valid_nodes:
to_remove.append(n)
graph_to_reduce.remove_nodes_from(to_remove)
for node in set_of_valid_nodes:
if not graph_to_reduce.has_node(node):
graph_to_reduce.add_node(node)
def _reduce_to_common_nodes(graphs: List[nx.Graph]):
"""
Reduces each graph in the provided list to only the nodes contained in each and every other graph. In other words,
reduce each graph to be the intersection of nodes of all other graphs.
:param graphs: The list of graphs to reduce
:return: A list of pruned graphs
"""
sets = []
graphs_copy = copy.deepcopy(graphs)
for graph in graphs_copy:
sets.append(set(graph.nodes()))
set_of_all_common_nodes = set.intersection(*sets)
# reduce graphs to only have nodes contained in all other graphs
for graph in graphs_copy:
to_remove = []
for node in graph.nodes():
if node not in set_of_all_common_nodes:
to_remove.append(node)
for node in to_remove:
graph.remove_node(node)
return graphs_copy
def _get_unstacked_embeddings(embedding, graphs, labels):
containers = []
# unstack embeddings and labels
number_of_nodes = len(graphs[0].nodes())
embeddings = [embedding[x: x + number_of_nodes] for x in range(0, embedding.shape[0], number_of_nodes)]
vertex_labels = [labels[x: x + number_of_nodes] for x in range(0, embedding.shape[0], number_of_nodes)]
for i, embedding in enumerate(embeddings):
containers.append(
EmbeddingContainer(
embedding=embedding,
vertex_labels=vertex_labels[i]
)
)
return containers
def _get_adjacency_matrices(graphs):
matrices = []
labels = set()
for graph in graphs:
sorted_nodes = sorted(graph.nodes())
matrices.append(nx.to_scipy_sparse_matrix(graph, nodelist=sorted_nodes))
for node in sorted_nodes:
labels.add(node)
return sorted(list(labels)), matrices
def _get_laplacian_matrices(graphs):
labels, adjacency_matrices = _get_adjacency_matrices(graphs)
laplacian_matrices = []
for matrix in adjacency_matrices:
laplacian_matrices.append(_get_lse_matrix(matrix))
return labels, laplacian_matrices
def _get_lse_matrix(adjacency_matrix: np.ndarray):
in_degree = adjacency_matrix.sum(axis=0).astype(float)
out_degree = adjacency_matrix.sum(axis=1).T.astype(float)
in_degree_array = np.squeeze(np.asarray(in_degree))
out_degree_array = np.squeeze(np.asarray(out_degree))
in_root = in_degree_array ** (-0.5)
out_root = out_degree_array ** (-0.5)
in_root[np.isinf(in_root)] = 0
out_root[np.isinf(out_root)] = 0
in_diagonal = sp.sparse.diags(in_root)
out_diagonal = sp.sparse.diags(out_root)
lse_matrix = out_diagonal.dot(adjacency_matrix).dot(in_diagonal)
return lse_matrix
def generate_omnibus_matrix(
matrices: List[Union[np.ndarray, sp.sparse.csr_matrix]]
) -> np.ndarray:
"""
Generate the omnibus matrix from a list of adjacency or laplacian matrices as described by 'A central limit theorem
for an omnibus embedding of random dot product graphs.'
Given an iterable of matrices a, b, ... n then the omnibus matrix is defined as::
[[ a, .5 * (a + b), ..., .5 * (a + n)],
[.5 * (b + a), b, ..., .5 * (b + n)],
[ ..., ..., ..., ...],
[.5 * (n + a), .5 * (n + b, ..., n]
]
The current iteration of this function operates in O(n) but a further optimization could take it to O(.5 * n)
See also:
The original paper - https://arxiv.org/abs/1705.09355
:param matrices: The list of matrices to generate the Omnibus matrix
:type matrices: List[Union[numpy.ndarray, scipy.sparse.csr_matrix]]
:return: An Omnibus matrix
"""
horizontal_stacker, vertical_stacker = _get_stacker_functions(matrices[0])
rows = []
# Iterate over each column
for column_index in range(0, len(matrices)):
current_row = []
current_matrix = matrices[column_index]
for row_index in range(0, len(matrices)):
if row_index == column_index:
# we are on the diagonal, we do not need to perform any calculation and instead add the current matrix
# to the current_row
current_row.append(current_matrix)
else:
# otherwise we are not on the diagonal and we average the current_matrix with the matrix at row_index
# and add that to our current_row
matrices_averaged = (current_matrix + matrices[row_index]) * .5
current_row.append(matrices_averaged)
# an entire row has been generated, we will create a horizontal stack of each matrix in the row completing the
# row
rows.append(horizontal_stacker(current_row))
return vertical_stacker(rows)
def _get_stacker_functions(matrix):
if sp.sparse.issparse(matrix):
horizontal_stacker = sp.sparse.hstack
vertical_stacker = sp.sparse.vstack
else:
horizontal_stacker = np.hstack
vertical_stacker = np.vstack
return horizontal_stacker, vertical_stacker
| 39.121581
| 120
| 0.690389
|
28e7cd95d1d16ef2e9a831b4b06b7ab0fa871907
| 2,950
|
py
|
Python
|
valid_show.py
|
ahmed-shariff/robust_hand_tracking
|
265da51345d65026fea6c4018ee652f295a2192d
|
[
"MIT"
] | 3
|
2019-11-12T18:46:37.000Z
|
2020-01-26T08:19:41.000Z
|
valid_show.py
|
ahmed-shariff/robust_hand_tracking
|
265da51345d65026fea6c4018ee652f295a2192d
|
[
"MIT"
] | 1
|
2019-10-07T12:14:36.000Z
|
2020-08-12T19:46:58.000Z
|
valid_show.py
|
ahmed-shariff/robust_hand_tracking
|
265da51345d65026fea6c4018ee652f295a2192d
|
[
"MIT"
] | 1
|
2019-11-13T16:20:21.000Z
|
2019-11-13T16:20:21.000Z
|
from darknet import Darknet
import dataset
import torch
from torch.autograd import Variable
from torchvision import datasets, transforms
from utils import *
import os
def valid(datacfg, cfgfile, weightfile, outfile):
options = read_data_cfg(datacfg)
valid_images = options['valid']
name_list = options['names']
prefix = 'results'
names = load_class_names(name_list)
with open(valid_images) as fp:
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
m = Darknet(cfgfile)
m.print_network()
m.load_weights(weightfile)
m.cuda()
m.eval()
valid_dataset = dataset.listDataset(valid_images, shape=(m.width, m.height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]))
valid_batchsize = 2
assert(valid_batchsize > 1)
kwargs = {'num_workers': 4, 'pin_memory': True}
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=valid_batchsize, shuffle=False, **kwargs)
fps = [0]*m.num_classes
if not os.path.exists('results'):
os.mkdir('results')
for i in range(m.num_classes):
buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
fps[i] = open(buf, 'w')
lineId = -1
conf_thresh = 0.005
nms_thresh = 0.45
for batch_idx, (data, target) in enumerate(valid_loader):
data = data.cuda()
data = Variable(data, volatile = True)
output = m(data).data
batch_boxes = get_region_boxes(output, conf_thresh, m.num_classes, m.anchors, m.num_anchors, 0, 1)
for i in range(output.size(0)):
lineId = lineId + 1
fileId = os.path.basename(valid_files[lineId]).split('.')[0]
width, height = get_image_size(valid_files[lineId])
print(valid_files[lineId])
boxes = batch_boxes[i]
boxes = nms(boxes, nms_thresh)
for box in boxes:
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
det_conf = box[4]
for j in range((len(box)-5)/2):
cls_conf = box[5+2*j]
cls_id = box[6+2*j]
prob =det_conf * cls_conf
fps[cls_id].write('%s %f %f %f %f %f\n' % (fileId, prob, x1, y1, x2, y2))
for i in range(m.num_classes):
fps[i].close()
if __name__ == '__main__':
import sys
if len(sys.argv) == 4:
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
outfile = 'comp4_det_test_'
valid(datacfg, cfgfile, weightfile, outfile)
else:
print('Usage:')
print(' python valid.py datacfg cfgfile weightfile')
| 33.146067
| 106
| 0.564068
|
cc6e4b1220fc6be02f9cf549fdde957e4a6be8e1
| 3,020
|
py
|
Python
|
mars/tensor/arithmetic/arctanh.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | 1
|
2022-02-02T03:03:48.000Z
|
2022-02-02T03:03:48.000Z
|
mars/tensor/arithmetic/arctanh.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/arithmetic/arctanh.py
|
wjsi/mars
|
a69fb19edfe748d4393b90ff2c4941a76c084596
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorUnaryOp
from .utils import arithmetic_operand
@arithmetic_operand(sparse_mode="unary")
class TensorArctanh(TensorUnaryOp):
_op_type_ = OperandDef.ARCTANH
_func_name = "arctanh"
@infer_dtype(np.arctanh)
def arctanh(x, out=None, where=None, **kwargs):
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input tensor.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated tensor is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
out : Tensor
Array of the same shape as `x`.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> import mars.tensor as mt
>>> mt.arctanh([0, -0.5]).execute()
array([ 0. , -0.54930614])
"""
op = TensorArctanh(**kwargs)
return op(x, out=out, where=where)
| 34.712644
| 79
| 0.677152
|
745fca71039bbdb6b9c03d86cab63f919f602589
| 4,515
|
py
|
Python
|
satella/coding/structures/heaps/base.py
|
piotrmaslanka/satella
|
bf4ba7a21ad2ac93a366442a2b4574dc5568b87e
|
[
"MIT"
] | 12
|
2019-12-13T10:17:38.000Z
|
2022-01-05T09:01:36.000Z
|
satella/coding/structures/heaps/base.py
|
piotrmaslanka/satella
|
bf4ba7a21ad2ac93a366442a2b4574dc5568b87e
|
[
"MIT"
] | 26
|
2016-04-01T11:55:26.000Z
|
2021-12-30T17:03:59.000Z
|
satella/coding/structures/heaps/base.py
|
piotrmaslanka/satella
|
bf4ba7a21ad2ac93a366442a2b4574dc5568b87e
|
[
"MIT"
] | 1
|
2021-05-31T08:45:22.000Z
|
2021-05-31T08:45:22.000Z
|
import collections
import copy
import heapq
import typing as tp
from satella.coding.decorators.decorators import wraps
from satella.coding.typing import T, Predicate
def _extras_to_one(fun):
@wraps(fun)
def inner(self, a, *args, **kwargs):
return fun(self, ((a,) + args) if len(args) > 0 else a, **kwargs)
return inner
class Heap(collections.UserList, tp.Generic[T]):
"""
Sane heap as object - not like heapq.
Goes from lowest-to-highest (first popped is smallest).
Standard Python comparision rules apply.
Not thread-safe
"""
def __init__(self, from_list: tp.Optional[tp.Iterable[T]] = None):
super().__init__(from_list)
heapq.heapify(self.data)
def push_many(self, items: tp.Iterable[T]) -> None:
for item in items:
self.push(item)
def pop_item(self, item: T) -> T:
"""
Pop an item off the heap, maintaining the heap invariant
:raise ValueError: element not found
"""
self.data.remove(item) # raises: ValueError
heapq.heapify(self.data)
return item
@_extras_to_one
def push(self, item: T) -> None:
"""
Use it like:
>>> heap.push(3)
or:
>>> heap.push(4, myobject)
"""
heapq.heappush(self.data, item)
def __deepcopy__(self, memo={}) -> 'Heap':
return self.__class__(copy.deepcopy(self.data, memo=memo))
def __copy__(self) -> 'Heap':
return self.__class__(copy.copy(self.data))
def __iter__(self) -> tp.Iterator[T]:
return self.data.__iter__()
def pop(self) -> T:
"""
Return smallest element of the heap.
:raises IndexError: on empty heap
"""
return heapq.heappop(self.data)
def filter_map(self, filter_fun: tp.Optional[Predicate[T]] = None,
map_fun: tp.Optional[tp.Callable[[T], tp.Any]] = None):
"""
Get only items that return True when condition(item) is True. Apply a
transform: item' = item(condition) on
the rest. Maintain heap invariant.
"""
heap = filter(filter_fun, self.data) if filter_fun else self.data
heap = map(map_fun, heap) if map_fun else heap
heap = list(heap) if not isinstance(heap, list) else heap
self.data = heap
heapq.heapify(self.data)
def __bool__(self) -> bool:
"""
Is this empty?
"""
return len(self.data) > 0
def iter_ascending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted ascending.
State of the heap is not changed
"""
heap = copy.copy(self.data)
while heap:
yield heapq.heappop(heap)
def iter_descending(self) -> tp.Iterable[T]:
"""
Return an iterator returning all elements in this heap sorted descending.
State of the heap is not changed.
This loads all elements of the heap into memory at once, so be careful.
"""
return reversed(list(self.iter_ascending()))
def __eq__(self, other: 'Heap') -> bool:
return self.data == other.data
def __len__(self) -> int:
return len(self.data)
def __str__(self) -> str:
return '<satella.coding.Heap: %s elements>' % (len(self, ))
def __repr__(self) -> str:
return u'<satella.coding.Heap>'
def __contains__(self, item: T) -> bool:
return item in self.data
class SetHeap(Heap):
"""
A heap with additional invariant that no two elements are the same.
Note that elements you insert in this must be eq-able and hashable, ie. you can put them in a
dict.
Optimized for fast insertions and fast __contains__
#notthreadsafe
"""
def __init__(self, from_list: tp.Optional[tp.Iterable[T]] = None):
super().__init__(from_list=from_list)
self.set = set(self.data)
def push(self, item: T):
if item not in self.set:
super().push(item)
self.set.add(item)
def pop(self) -> T:
item = super().pop()
self.set.remove(item)
return item
def __contains__(self, item: T) -> bool:
return item in self.set
def filter_map(self, filter_fun: tp.Optional[Predicate[T]] = None,
map_fun: tp.Optional[tp.Callable[[T], tp.Any]] = None):
super().filter_map(filter_fun=filter_fun, map_fun=map_fun)
self.set = set(self.data)
| 27.87037
| 97
| 0.60155
|
3ce0a1cd3eeff143b31db480e94fdf5cc04ee630
| 4,066
|
py
|
Python
|
src/server/crud.py
|
MMMMMMMingor/SCUT-OFSV
|
6db6b162bd9442217e7acb84e6bf4cda2976b95f
|
[
"MIT"
] | null | null | null |
src/server/crud.py
|
MMMMMMMingor/SCUT-OFSV
|
6db6b162bd9442217e7acb84e6bf4cda2976b95f
|
[
"MIT"
] | null | null | null |
src/server/crud.py
|
MMMMMMMingor/SCUT-OFSV
|
6db6b162bd9442217e7acb84e6bf4cda2976b95f
|
[
"MIT"
] | null | null | null |
import sys
from model import User, SingleMin, MultiMean, EB_DBA, LS_DBA, Result
import pickle
import numpy as np
import uuid
sys.path.append("../")
import algo.core.dtw as dtw
import algo.core.template as tpl
import util
def add_user(username: str) -> User:
new_user = User.create(name=username)
return new_user
def get_user(user_id: int) -> User:
user = User.select().where(User.id == user_id).get()
return user
def get_user_id_by_name(username: str) -> int:
user_id = User.select(User.id).where(User.name == username).get()
return user_id
def add_single_min_tpl(user_id: int, single_min_tpl: np.ndarray, threshold: float) -> SingleMin:
buffer = pickle.dumps(single_min_tpl)
new_one = SingleMin.create(
user_id=user_id, single_min_tpl=buffer, threshold=threshold)
return new_one
def get_single_min_tpl(user_id: int) -> (np.ndarray, float):
single_min = SingleMin.select().where(SingleMin.user_id == user_id).get()
return pickle.loads(single_min.single_min_tpl), single_min.threshold
def add_multi_mean_tpl(user_id: int, signatures: list, threshold: float) -> MultiMean:
buffer = pickle.dumps(signatures)
new_one = MultiMean.create(
user_id=user_id, signatures=buffer, threshold=threshold)
return new_one
def get_multi_tpl(user_id) -> (list, float):
multi_mean = MultiMean.select().where(MultiMean.user_id == user_id).get()
return pickle.loads(multi_mean.signatures), multi_mean.threshold
def add_eb_dba_tpl(user_id: int, eb_dba_tpl: np.ndarray, threshold: float) -> EB_DBA:
buffer = pickle.dumps(eb_dba_tpl)
new_one = EB_DBA.create(
user_id=user_id, eb_dba_tpl=buffer, threshold=threshold)
return new_one
def get_eb_dba_tpl(user_id: int) -> (np.ndarray, float):
eb_dba = EB_DBA.select().where(EB_DBA.user_id == user_id).get()
return pickle.loads(eb_dba.eb_dba_tpl), eb_dba.threshold
def add_ls_dba_tpl(user_id: int, eb_dba_tpl: np.ndarray, ls: np.ndarray, threshold: float) -> LS_DBA:
buffer1 = pickle.dumps(eb_dba_tpl)
buffer2 = pickle.dumps(ls)
new_one = LS_DBA.create(
user_id=user_id, eb_dba_tpl=buffer1, ls=buffer2, threshold=threshold)
return new_one
def get_ls_dba_tpl(user_id: int) -> (np.ndarray, np.ndarray, float):
ls_dba = LS_DBA.select().where(LS_DBA.user_id == user_id).get()
return pickle.loads(ls_dba.eb_dba_tpl), pickle.loads(ls_dba.ls), ls_dba.threshold
def add_result(user: str, algo: str, threshold: float, result: float, cost: float):
uid = uuid.uuid1()
new_one = Result.create(id=uid,user=user, algo=algo, threshold=threshold, result=result, cost=cost)
new_one.id = uid
return new_one
def get_result(res_id: str) -> Result:
return Result.select().where(Result.id == res_id).get()
def cal_enroll_threshold(single_min_tpl: np.ndarray, eb_dba_tpl: np.ndarray, ls: np.ndarray, enrollment_signatures: list) -> (float, float, float, float):
with util.my_timer('single min'):
single_min_thresholds = []
for sig in enrollment_signatures:
res = dtw.DTW(single_min_tpl, sig)
single_min_thresholds.append(res)
with util.my_timer('multi mean'):
multi_mean_thresholds = []
for i, sig in enumerate(enrollment_signatures):
res = tpl.get_multi_mean_dtw(enrollment_signatures[0:i] + enrollment_signatures[i: -1], sig)
multi_mean_thresholds.append(res)
with util.my_timer('eb-dba'):
eb_dba_thresholds = []
for sig in enrollment_signatures:
res = dtw.DTW(sig, eb_dba_tpl)
eb_dba_thresholds.append(res)
with util.my_timer('ls-dba'):
ls_dba_thresholds = []
for sig in enrollment_signatures:
res = dtw.DTW(sig, eb_dba_tpl, local_stability=ls)
ls_dba_thresholds.append(res)
return max(single_min_thresholds), max(multi_mean_thresholds), max(eb_dba_thresholds), max(ls_dba_thresholds)
| 36.303571
| 155
| 0.688146
|
ea1ee41fae77b41c30bfa848a2c88f7ad0bad305
| 1,648
|
py
|
Python
|
python_script/test_sphere_class.py
|
listenzcc/3D_model_matlab
|
2dcaf50a51cf02591737aaa6b4924ed3848f1840
|
[
"BSD-3-Clause"
] | null | null | null |
python_script/test_sphere_class.py
|
listenzcc/3D_model_matlab
|
2dcaf50a51cf02591737aaa6b4924ed3848f1840
|
[
"BSD-3-Clause"
] | null | null | null |
python_script/test_sphere_class.py
|
listenzcc/3D_model_matlab
|
2dcaf50a51cf02591737aaa6b4924ed3848f1840
|
[
"BSD-3-Clause"
] | null | null | null |
# code: utf-8
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
class Sphere:
def __init__(self, ax):
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
self.xyz = np.concatenate(
(x[:, :, np.newaxis],
y[:, :, np.newaxis],
z[:, :, np.newaxis]), axis=2)
self.u0, self.v0 = 0, 0
self.x0, self.y0, self.z0 = 0, 0, 0
self.ax = ax
def draw(self):
trans_v = np.array([
[1, 0, 0],
[0, np.cos(self.v0), -np.sin(self.v0)],
[0, np.sin(self.v0), np.cos(self.v0)]
])
trans_u = np.array([
[np.cos(self.u0), -np.sin(self.u0), 0],
[np.sin(self.u0), np.cos(self.u0), 0],
[0, 0, 1]
])
xyz = np.matmul(self.xyz, np.matmul(trans_v, trans_u))
sphere = ax.plot_wireframe(xyz[:, :, 0]+self.x0,
xyz[:, :, 1]+self.y0,
xyz[:, :, 2]+self.z0, color='r')
sphere.__setattr__('___target___', 1)
sp = Sphere(ax)
sp1 = Sphere(ax)
sp1.x0, sp1.y0, sp1.z0 = 1, 1, 1
# sp.u0 = np.pi/4
# sp.draw()
# plt.show(block=True)
def is_target(x, t='___target___'):
return hasattr(x, t)
for j in range(10000):
print(j)
# sp.u0 += np.pi/100
sp.v0 += np.pi/50
any(e.remove() for e in ax.findobj(is_target))
sp.draw()
sp1.draw()
plt.ion()
plt.pause(0.001)
print('done.')
| 23.884058
| 67
| 0.492718
|
74f3d3bbfdeacc8ab7bb63f4d0fcdc8775ca99d5
| 400
|
py
|
Python
|
src/openne/dataloaders/__init__.py
|
zhangzw16/OpenNE
|
3a5ceec95da6c090537a288a7e521bc9eed1718e
|
[
"MIT"
] | 4
|
2020-09-21T07:32:36.000Z
|
2021-06-09T07:23:34.000Z
|
src/openne/dataloaders/__init__.py
|
Bznkxs/OpenNE
|
046b038d32a29e9f377163c2bc80dda949629483
|
[
"MIT"
] | 1
|
2020-12-02T03:32:45.000Z
|
2020-12-02T03:32:45.000Z
|
src/openne/dataloaders/__init__.py
|
zhangzw16/OpenNE
|
3a5ceec95da6c090537a288a7e521bc9eed1718e
|
[
"MIT"
] | 3
|
2020-07-04T12:43:53.000Z
|
2021-08-05T09:47:26.000Z
|
from .graph import Dataset, Graph, LocalFile, Adapter, NetResources, create_self_defined_dataset
from .matlab_matrix import MatlabMatrix, PPI, Wikipedia, Flickr, BlogCatalog
from .wiki import Wiki
from .planetoid_dataset import PubMed, Cora, CiteSeer
datasetlist = [PPI, Wikipedia, Flickr, BlogCatalog, Wiki, PubMed, Cora, CiteSeer]
datasetdict = {Cls.__name__.lower(): Cls for Cls in datasetlist}
| 44.444444
| 96
| 0.8
|
4df1944e6c8bae0523085c1da869b563594ef5a6
| 19,196
|
py
|
Python
|
scenegraph/exp-official/taskographyv2tiny10_ploi/ploi_test_stats.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | 1
|
2022-01-30T22:06:57.000Z
|
2022-01-30T22:06:57.000Z
|
scenegraph/exp-official/taskographyv2tiny10_ploi/ploi_test_stats.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
scenegraph/exp-official/taskographyv2tiny10_ploi/ploi_test_stats.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
STATS = [
{
"num_node_expansions": 0,
"search_time": 0.0590206,
"total_time": 0.135665,
"plan_length": 239,
"plan_cost": 239,
"objects_used": 138,
"objects_total": 313,
"neural_net_time": 0.0729224681854248,
"num_replanning_steps": 0,
"wall_time": 0.7833693027496338
},
{
"num_node_expansions": 0,
"search_time": 0.064626,
"total_time": 0.137415,
"plan_length": 225,
"plan_cost": 225,
"objects_used": 137,
"objects_total": 313,
"neural_net_time": 0.018321752548217773,
"num_replanning_steps": 0,
"wall_time": 0.729494571685791
},
{
"num_node_expansions": 0,
"search_time": 0.0406082,
"total_time": 0.110957,
"plan_length": 225,
"plan_cost": 225,
"objects_used": 135,
"objects_total": 313,
"neural_net_time": 0.018448352813720703,
"num_replanning_steps": 0,
"wall_time": 0.6801040172576904
},
{
"num_node_expansions": 0,
"search_time": 0.0443666,
"total_time": 0.122266,
"plan_length": 203,
"plan_cost": 203,
"objects_used": 139,
"objects_total": 313,
"neural_net_time": 0.0182950496673584,
"num_replanning_steps": 0,
"wall_time": 0.7371094226837158
},
{
"num_node_expansions": 0,
"search_time": 0.0810146,
"total_time": 0.149395,
"plan_length": 300,
"plan_cost": 300,
"objects_used": 135,
"objects_total": 313,
"neural_net_time": 0.028308391571044922,
"num_replanning_steps": 0,
"wall_time": 0.7561242580413818
},
{
"num_node_expansions": 0,
"search_time": 0.106388,
"total_time": 0.203916,
"plan_length": 264,
"plan_cost": 264,
"objects_used": 133,
"objects_total": 250,
"neural_net_time": 0.01630115509033203,
"num_replanning_steps": 0,
"wall_time": 0.9337825775146484
},
{
"num_node_expansions": 0,
"search_time": 0.0947902,
"total_time": 0.161375,
"plan_length": 279,
"plan_cost": 279,
"objects_used": 124,
"objects_total": 250,
"neural_net_time": 0.017307519912719727,
"num_replanning_steps": 0,
"wall_time": 0.7529006004333496
},
{
"num_node_expansions": 0,
"search_time": 0.0631903,
"total_time": 0.145896,
"plan_length": 225,
"plan_cost": 225,
"objects_used": 130,
"objects_total": 250,
"neural_net_time": 0.0173947811126709,
"num_replanning_steps": 0,
"wall_time": 0.78566575050354
},
{
"num_node_expansions": 0,
"search_time": 0.0675286,
"total_time": 0.150828,
"plan_length": 234,
"plan_cost": 234,
"objects_used": 130,
"objects_total": 250,
"neural_net_time": 0.017730712890625,
"num_replanning_steps": 0,
"wall_time": 0.7938532829284668
},
{
"num_node_expansions": 0,
"search_time": 0.0712452,
"total_time": 0.171318,
"plan_length": 227,
"plan_cost": 227,
"objects_used": 134,
"objects_total": 250,
"neural_net_time": 0.017328739166259766,
"num_replanning_steps": 0,
"wall_time": 0.8543844223022461
},
{
"num_node_expansions": 0,
"search_time": 0.0853429,
"total_time": 0.16759,
"plan_length": 216,
"plan_cost": 216,
"objects_used": 127,
"objects_total": 313,
"neural_net_time": 0.017778635025024414,
"num_replanning_steps": 0,
"wall_time": 0.8059256076812744
},
{
"num_node_expansions": 0,
"search_time": 0.0590514,
"total_time": 0.129913,
"plan_length": 233,
"plan_cost": 233,
"objects_used": 125,
"objects_total": 313,
"neural_net_time": 0.017973661422729492,
"num_replanning_steps": 0,
"wall_time": 0.7291240692138672
},
{
"num_node_expansions": 0,
"search_time": 0.219878,
"total_time": 0.303706,
"plan_length": 231,
"plan_cost": 231,
"objects_used": 127,
"objects_total": 313,
"neural_net_time": 0.01891636848449707,
"num_replanning_steps": 0,
"wall_time": 0.9535608291625977
},
{
"num_node_expansions": 0,
"search_time": 0.047874,
"total_time": 0.106879,
"plan_length": 148,
"plan_cost": 148,
"objects_used": 124,
"objects_total": 313,
"neural_net_time": 0.020419597625732422,
"num_replanning_steps": 0,
"wall_time": 0.6595931053161621
},
{
"num_node_expansions": 0,
"search_time": 0.0687187,
"total_time": 0.139548,
"plan_length": 212,
"plan_cost": 212,
"objects_used": 124,
"objects_total": 313,
"neural_net_time": 0.01863574981689453,
"num_replanning_steps": 0,
"wall_time": 0.7431893348693848
},
{
"num_node_expansions": 0,
"search_time": 0.100507,
"total_time": 0.192935,
"plan_length": 305,
"plan_cost": 305,
"objects_used": 142,
"objects_total": 310,
"neural_net_time": 0.019444704055786133,
"num_replanning_steps": 0,
"wall_time": 0.8713023662567139
},
{
"num_node_expansions": 0,
"search_time": 0.137879,
"total_time": 0.239788,
"plan_length": 329,
"plan_cost": 329,
"objects_used": 145,
"objects_total": 310,
"neural_net_time": 0.01857900619506836,
"num_replanning_steps": 0,
"wall_time": 0.9619204998016357
},
{
"num_node_expansions": 0,
"search_time": 0.111021,
"total_time": 0.201476,
"plan_length": 273,
"plan_cost": 273,
"objects_used": 142,
"objects_total": 310,
"neural_net_time": 0.0187380313873291,
"num_replanning_steps": 0,
"wall_time": 0.8721878528594971
},
{
"num_node_expansions": 0,
"search_time": 0.202718,
"total_time": 0.31818,
"plan_length": 318,
"plan_cost": 318,
"objects_used": 144,
"objects_total": 310,
"neural_net_time": 0.020519018173217773,
"num_replanning_steps": 0,
"wall_time": 1.07771635055542
},
{
"num_node_expansions": 0,
"search_time": 0.116241,
"total_time": 0.234645,
"plan_length": 252,
"plan_cost": 252,
"objects_used": 147,
"objects_total": 310,
"neural_net_time": 0.019451379776000977,
"num_replanning_steps": 0,
"wall_time": 0.9695379734039307
},
{
"num_node_expansions": 0,
"search_time": 0.227062,
"total_time": 0.338123,
"plan_length": 318,
"plan_cost": 318,
"objects_used": 160,
"objects_total": 270,
"neural_net_time": 0.018569231033325195,
"num_replanning_steps": 4,
"wall_time": 2.2580912113189697
},
{
"num_node_expansions": 0,
"search_time": 0.11693,
"total_time": 0.225464,
"plan_length": 249,
"plan_cost": 249,
"objects_used": 158,
"objects_total": 270,
"neural_net_time": 0.01886725425720215,
"num_replanning_steps": 4,
"wall_time": 2.1730399131774902
},
{
"num_node_expansions": 0,
"search_time": 0.0553674,
"total_time": 0.126864,
"plan_length": 202,
"plan_cost": 202,
"objects_used": 148,
"objects_total": 270,
"neural_net_time": 0.018817901611328125,
"num_replanning_steps": 2,
"wall_time": 1.246582269668579
},
{
"num_node_expansions": 0,
"search_time": 0.0704041,
"total_time": 0.154796,
"plan_length": 236,
"plan_cost": 236,
"objects_used": 158,
"objects_total": 270,
"neural_net_time": 0.020676851272583008,
"num_replanning_steps": 2,
"wall_time": 1.3242926597595215
},
{
"num_node_expansions": 0,
"search_time": 0.167932,
"total_time": 0.258431,
"plan_length": 215,
"plan_cost": 215,
"objects_used": 155,
"objects_total": 270,
"neural_net_time": 0.019160985946655273,
"num_replanning_steps": 2,
"wall_time": 1.4720509052276611
},
{
"num_node_expansions": 0,
"search_time": 0.292947,
"total_time": 0.397104,
"plan_length": 272,
"plan_cost": 272,
"objects_used": 151,
"objects_total": 389,
"neural_net_time": 0.024260997772216797,
"num_replanning_steps": 0,
"wall_time": 1.110828161239624
},
{
"num_node_expansions": 0,
"search_time": 0.142221,
"total_time": 0.261433,
"plan_length": 300,
"plan_cost": 300,
"objects_used": 155,
"objects_total": 389,
"neural_net_time": 0.0247347354888916,
"num_replanning_steps": 0,
"wall_time": 1.0322165489196777
},
{
"num_node_expansions": 0,
"search_time": 0.133152,
"total_time": 0.257929,
"plan_length": 259,
"plan_cost": 259,
"objects_used": 152,
"objects_total": 389,
"neural_net_time": 0.029698610305786133,
"num_replanning_steps": 0,
"wall_time": 1.019019603729248
},
{
"num_node_expansions": 0,
"search_time": 0.0761752,
"total_time": 0.159307,
"plan_length": 271,
"plan_cost": 271,
"objects_used": 148,
"objects_total": 389,
"neural_net_time": 0.02542424201965332,
"num_replanning_steps": 0,
"wall_time": 0.8079466819763184
},
{
"num_node_expansions": 0,
"search_time": 0.0664833,
"total_time": 0.147118,
"plan_length": 226,
"plan_cost": 226,
"objects_used": 146,
"objects_total": 389,
"neural_net_time": 0.025787830352783203,
"num_replanning_steps": 0,
"wall_time": 0.7878320217132568
},
{
"num_node_expansions": 0,
"search_time": 0.0757025,
"total_time": 0.157409,
"plan_length": 232,
"plan_cost": 232,
"objects_used": 126,
"objects_total": 365,
"neural_net_time": 0.02201223373413086,
"num_replanning_steps": 0,
"wall_time": 0.8110518455505371
},
{
"num_node_expansions": 0,
"search_time": 0.0516564,
"total_time": 0.173149,
"plan_length": 172,
"plan_cost": 172,
"objects_used": 133,
"objects_total": 365,
"neural_net_time": 0.022777080535888672,
"num_replanning_steps": 0,
"wall_time": 0.9378814697265625
},
{
"num_node_expansions": 0,
"search_time": 0.0372471,
"total_time": 0.0991201,
"plan_length": 214,
"plan_cost": 214,
"objects_used": 120,
"objects_total": 365,
"neural_net_time": 0.023756980895996094,
"num_replanning_steps": 0,
"wall_time": 0.6919658184051514
},
{
"num_node_expansions": 0,
"search_time": 0.0672498,
"total_time": 0.164864,
"plan_length": 194,
"plan_cost": 194,
"objects_used": 126,
"objects_total": 365,
"neural_net_time": 0.023845911026000977,
"num_replanning_steps": 0,
"wall_time": 0.8676002025604248
},
{
"num_node_expansions": 0,
"search_time": 0.0801759,
"total_time": 0.165487,
"plan_length": 227,
"plan_cost": 227,
"objects_used": 126,
"objects_total": 365,
"neural_net_time": 0.022589683532714844,
"num_replanning_steps": 0,
"wall_time": 0.8307783603668213
},
{
"num_node_expansions": 0,
"search_time": 0.0313722,
"total_time": 0.0909993,
"plan_length": 144,
"plan_cost": 144,
"objects_used": 91,
"objects_total": 180,
"neural_net_time": 0.011240005493164062,
"num_replanning_steps": 0,
"wall_time": 0.6486868858337402
},
{
"num_node_expansions": 0,
"search_time": 0.0428838,
"total_time": 0.131334,
"plan_length": 134,
"plan_cost": 134,
"objects_used": 100,
"objects_total": 180,
"neural_net_time": 0.011623620986938477,
"num_replanning_steps": 1,
"wall_time": 0.9986300468444824
},
{
"num_node_expansions": 0,
"search_time": 0.0581445,
"total_time": 0.14261,
"plan_length": 156,
"plan_cost": 156,
"objects_used": 94,
"objects_total": 180,
"neural_net_time": 0.012029647827148438,
"num_replanning_steps": 0,
"wall_time": 0.8016660213470459
},
{
"num_node_expansions": 0,
"search_time": 0.031466,
"total_time": 0.0903979,
"plan_length": 134,
"plan_cost": 134,
"objects_used": 89,
"objects_total": 180,
"neural_net_time": 0.011792659759521484,
"num_replanning_steps": 0,
"wall_time": 0.6729941368103027
},
{
"num_node_expansions": 0,
"search_time": 0.0976693,
"total_time": 0.185484,
"plan_length": 187,
"plan_cost": 187,
"objects_used": 98,
"objects_total": 180,
"neural_net_time": 0.016064882278442383,
"num_replanning_steps": 0,
"wall_time": 0.8486185073852539
},
{
"num_node_expansions": 0,
"search_time": 0.123185,
"total_time": 0.225347,
"plan_length": 213,
"plan_cost": 213,
"objects_used": 110,
"objects_total": 160,
"neural_net_time": 0.009734153747558594,
"num_replanning_steps": 0,
"wall_time": 0.8976569175720215
},
{
"num_node_expansions": 0,
"search_time": 0.0398813,
"total_time": 0.11707,
"plan_length": 166,
"plan_cost": 166,
"objects_used": 109,
"objects_total": 160,
"neural_net_time": 0.011234045028686523,
"num_replanning_steps": 0,
"wall_time": 0.7407865524291992
},
{
"num_node_expansions": 0,
"search_time": 0.0978265,
"total_time": 0.191934,
"plan_length": 183,
"plan_cost": 183,
"objects_used": 110,
"objects_total": 160,
"neural_net_time": 0.010131359100341797,
"num_replanning_steps": 0,
"wall_time": 0.8443722724914551
},
{
"num_node_expansions": 0,
"search_time": 0.0455502,
"total_time": 0.114871,
"plan_length": 171,
"plan_cost": 171,
"objects_used": 104,
"objects_total": 160,
"neural_net_time": 0.009799003601074219,
"num_replanning_steps": 0,
"wall_time": 0.6895046234130859
},
{
"num_node_expansions": 0,
"search_time": 0.290929,
"total_time": 0.366261,
"plan_length": 207,
"plan_cost": 207,
"objects_used": 109,
"objects_total": 160,
"neural_net_time": 0.01043391227722168,
"num_replanning_steps": 0,
"wall_time": 0.9823651313781738
},
{
"num_node_expansions": 0,
"search_time": 0.157317,
"total_time": 0.313883,
"plan_length": 238,
"plan_cost": 238,
"objects_used": 137,
"objects_total": 327,
"neural_net_time": 0.018967628479003906,
"num_replanning_steps": 6,
"wall_time": 2.89699649810791
},
{
"num_node_expansions": 0,
"search_time": 0.0374121,
"total_time": 0.0887978,
"plan_length": 188,
"plan_cost": 188,
"objects_used": 111,
"objects_total": 327,
"neural_net_time": 0.018378734588623047,
"num_replanning_steps": 0,
"wall_time": 0.5646920204162598
},
{
"num_node_expansions": 0,
"search_time": 0.0479637,
"total_time": 0.115348,
"plan_length": 185,
"plan_cost": 185,
"objects_used": 111,
"objects_total": 327,
"neural_net_time": 0.018879175186157227,
"num_replanning_steps": 0,
"wall_time": 0.622706413269043
},
{
"num_node_expansions": 0,
"search_time": 0.0907916,
"total_time": 0.194928,
"plan_length": 207,
"plan_cost": 207,
"objects_used": 130,
"objects_total": 327,
"neural_net_time": 0.018724918365478516,
"num_replanning_steps": 7,
"wall_time": 2.6484827995300293
},
{
"num_node_expansions": 0,
"search_time": 0.0440995,
"total_time": 0.0953451,
"plan_length": 199,
"plan_cost": 199,
"objects_used": 112,
"objects_total": 327,
"neural_net_time": 0.01787734031677246,
"num_replanning_steps": 0,
"wall_time": 0.5606136322021484
},
{
"num_node_expansions": 0,
"search_time": 0.064341,
"total_time": 0.124621,
"plan_length": 189,
"plan_cost": 189,
"objects_used": 93,
"objects_total": 165,
"neural_net_time": 0.009817123413085938,
"num_replanning_steps": 0,
"wall_time": 0.6311872005462646
},
{
"num_node_expansions": 0,
"search_time": 0.0527209,
"total_time": 0.118478,
"plan_length": 190,
"plan_cost": 190,
"objects_used": 94,
"objects_total": 165,
"neural_net_time": 0.009423971176147461,
"num_replanning_steps": 0,
"wall_time": 0.6589460372924805
},
{
"num_node_expansions": 0,
"search_time": 0.0799768,
"total_time": 0.162909,
"plan_length": 197,
"plan_cost": 197,
"objects_used": 99,
"objects_total": 165,
"neural_net_time": 0.009454011917114258,
"num_replanning_steps": 0,
"wall_time": 0.7666943073272705
},
{
"num_node_expansions": 0,
"search_time": 0.0745703,
"total_time": 0.13608,
"plan_length": 197,
"plan_cost": 197,
"objects_used": 93,
"objects_total": 165,
"neural_net_time": 0.013700008392333984,
"num_replanning_steps": 0,
"wall_time": 0.6576852798461914
},
{
"num_node_expansions": 0,
"search_time": 0.104704,
"total_time": 0.175394,
"plan_length": 184,
"plan_cost": 184,
"objects_used": 98,
"objects_total": 165,
"neural_net_time": 0.009774446487426758,
"num_replanning_steps": 0,
"wall_time": 0.7290678024291992
}
]
| 28.996979
| 48
| 0.552771
|
6c2842771169b40d1c40ec750878df805a209431
| 3,033
|
py
|
Python
|
conans/test/unittests/client/generators/virtualbuildenv_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/test/unittests/client/generators/virtualbuildenv_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/test/unittests/client/generators/virtualbuildenv_test.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
# coding=utf-8
import platform
import unittest
from conans.client.generators.virtualbuildenv import VirtualBuildEnvGenerator
from conans.test.utils.mocks import MockSettings, ConanFileMock
class VirtualBuildEnvGeneratorGCCTest(unittest.TestCase):
activate_sh = "activate_build.sh"
activate_bat = "activate_build.bat"
activate_ps1 = "activate_build.ps1"
@classmethod
def setUpClass(cls):
conanfile = ConanFileMock()
conanfile.settings = MockSettings({"compiler": "gcc",
"build_type": "Release"})
cls.generator = VirtualBuildEnvGenerator(conanfile)
cls.generator.output_path = "not-used"
cls.result = cls.generator.content
def test_output(self):
keys = ["deactivate_build.sh", "activate_build.sh", "environment_build.sh.env",
"activate_build.ps1", "deactivate_build.ps1", "environment_build.ps1.env"]
if platform.system() == "Windows":
keys += ["activate_build.bat", "deactivate_build.bat", "environment_build.bat.env"]
self.assertListEqual(sorted(keys), sorted(self.result.keys()))
def test_environment(self):
self.assertEqual(self.generator.env["CFLAGS"], ['-O3', '-s', '--sysroot=/path/to/sysroot'])
self.assertEqual(self.generator.env["CPPFLAGS"], ['-DNDEBUG'])
self.assertEqual(self.generator.env["CXXFLAGS"], ['-O3', '-s', '--sysroot=/path/to/sysroot'])
self.assertEqual(self.generator.env["LDFLAGS"], ['--sysroot=/path/to/sysroot'])
self.assertEqual(self.generator.env["LIBS"], [])
def test_scripts(self):
content = self.result["environment_build.sh.env"]
self.assertIn('CPPFLAGS="-DNDEBUG${CPPFLAGS:+ $CPPFLAGS}"', content)
self.assertIn('CXXFLAGS="-O3 -s --sysroot=/path/to/sysroot${CXXFLAGS:+ $CXXFLAGS}"', content)
self.assertIn('CFLAGS="-O3 -s --sysroot=/path/to/sysroot${CFLAGS:+ $CFLAGS}"', content)
self.assertIn('LDFLAGS="--sysroot=/path/to/sysroot${LDFLAGS:+ $LDFLAGS}"', content)
self.assertIn('LIBS="${LIBS:+ $LIBS}"', content)
content = self.result["environment_build.ps1.env"]
self.assertIn('CPPFLAGS=-DNDEBUG $env:CPPFLAGS', content)
self.assertIn('CXXFLAGS=-O3 -s --sysroot=/path/to/sysroot $env:CXXFLAGS', content)
self.assertIn('CFLAGS=-O3 -s --sysroot=/path/to/sysroot $env:CFLAGS', content)
self.assertIn('LDFLAGS=--sysroot=/path/to/sysroot $env:LDFLAGS', content)
self.assertIn('LIBS=$env:LIBS', content)
if platform.system() == "Windows":
content = self.result["environment_build.bat.env"]
self.assertIn('CPPFLAGS=-DNDEBUG %CPPFLAGS%', content)
self.assertIn('CXXFLAGS=-O3 -s --sysroot=/path/to/sysroot %CXXFLAGS%', content)
self.assertIn('CFLAGS=-O3 -s --sysroot=/path/to/sysroot %CFLAGS%', content)
self.assertIn('LDFLAGS=--sysroot=/path/to/sysroot %LDFLAGS%', content)
self.assertIn('LIBS=%LIBS%', content)
| 48.142857
| 101
| 0.65183
|
a4a8fdc6735ca27e06b9125952cf7409e00947a2
| 7,598
|
py
|
Python
|
ExFamToPerson/contracts/HPos_MotherFather_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
ExFamToPerson/contracts/HPos_MotherFather_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
ExFamToPerson/contracts/HPos_MotherFather_IsolatedLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPos_MotherFather_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPos_MotherFather_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPos_MotherFather_IsolatedLHS, self).__init__(name='HPos_MotherFather_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Pos_MotherFather')
# Set the node attributes
# match class Member() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Member"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Family() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Family"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Member() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Member"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| 50.317881
| 128
| 0.486707
|
1b58b6398fb9695fcbcad68672df3e93f99682d5
| 633
|
py
|
Python
|
setup.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | null | null | null |
setup.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | null | null | null |
setup.py
|
pritam-dey3/lemkelcp
|
4d963a6d0e6ba531496f5b0e99a52c0d288e4a6e
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name="lemkelcp",
version='0.1',
author="Andy Lamperski",
author_email="alampers@umn.edu",
description=("A Python implementation of Lemke's Algorithm for linear complementarity problems"),
license="MIT",
url="https://github.com/AndyLamperski/lemkelcp",
install_requires=['numpy', 'tabulate'],
keywords="linear complementarity problem lcp optimization",
packages=['lemkelcp'],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering :: Mathematics"
]
)
| 30.142857
| 101
| 0.663507
|
64a45e89105dba884a658e2073488c2a4639e545
| 32,126
|
py
|
Python
|
neutron/api/v2/attributes.py
|
mihaibroc/neutron
|
dfb7c826d0846970bac00003cea98c9f6bd8f0b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/api/v2/attributes.py
|
mihaibroc/neutron
|
dfb7c826d0846970bac00003cea98c9f6bd8f0b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/api/v2/attributes.py
|
mihaibroc/neutron
|
dfb7c826d0846970bac00003cea98c9f6bd8f0b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import netaddr
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from neutron.common import constants
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
# Used by range check to indicate no limit for a bound.
UNLIMITED = None
# TODO(watanabe.isao): A fix like in neutron/db/models_v2.py needs to be
# done in other db modules, to reuse the following constants.
# Common definitions for maximum string field length
NAME_MAX_LEN = 255
TENANT_ID_MAX_LEN = 255
DESCRIPTION_MAX_LEN = 255
DEVICE_ID_MAX_LEN = 255
DEVICE_OWNER_MAX_LEN = 255
def _verify_dict_keys(expected_keys, target_dict, strict=True):
"""Allows to verify keys in a dictionary.
:param expected_keys: A list of keys expected to be present.
:param target_dict: The dictionary which should be verified.
:param strict: Specifies whether additional keys are allowed to be present.
:return: True, if keys in the dictionary correspond to the specification.
"""
if not isinstance(target_dict, dict):
msg = (_("Invalid input. '%(target_dict)s' must be a dictionary "
"with keys: %(expected_keys)s") %
{'target_dict': target_dict, 'expected_keys': expected_keys})
LOG.debug(msg)
return msg
expected_keys = set(expected_keys)
provided_keys = set(target_dict.keys())
predicate = expected_keys.__eq__ if strict else expected_keys.issubset
if not predicate(provided_keys):
msg = (_("Validation of dictionary's keys failed. "
"Expected keys: %(expected_keys)s "
"Provided keys: %(provided_keys)s") %
{'expected_keys': expected_keys,
'provided_keys': provided_keys})
LOG.debug(msg)
return msg
def is_attr_set(attribute):
return not (attribute is None or attribute is ATTR_NOT_SPECIFIED)
def _validate_values(data, valid_values=None):
if data not in valid_values:
msg = (_("'%(data)s' is not in %(valid_values)s") %
{'data': data, 'valid_values': valid_values})
LOG.debug(msg)
return msg
def _validate_not_empty_string_or_none(data, max_len=None):
if data is not None:
return _validate_not_empty_string(data, max_len=max_len)
def _validate_not_empty_string(data, max_len=None):
msg = _validate_string(data, max_len=max_len)
if msg:
return msg
if not data.strip():
msg = _("'%s' Blank strings are not permitted") % data
LOG.debug(msg)
return msg
def _validate_string_or_none(data, max_len=None):
if data is not None:
return _validate_string(data, max_len=max_len)
def _validate_string(data, max_len=None):
if not isinstance(data, six.string_types):
msg = _("'%s' is not a valid string") % data
LOG.debug(msg)
return msg
if max_len is not None and len(data) > max_len:
msg = (_("'%(data)s' exceeds maximum length of %(max_len)s") %
{'data': data, 'max_len': max_len})
LOG.debug(msg)
return msg
def _validate_boolean(data, valid_values=None):
try:
convert_to_boolean(data)
except n_exc.InvalidInput:
msg = _("'%s' is not a valid boolean value") % data
LOG.debug(msg)
return msg
def _validate_range(data, valid_values=None):
"""Check that integer value is within a range provided.
Test is inclusive. Allows either limit to be ignored, to allow
checking ranges where only the lower or upper limit matter.
It is expected that the limits provided are valid integers or
the value None.
"""
min_value = valid_values[0]
max_value = valid_values[1]
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if min_value is not UNLIMITED and data < min_value:
msg = _("'%(data)s' is too small - must be at least "
"'%(limit)d'") % {'data': data, 'limit': min_value}
LOG.debug(msg)
return msg
if max_value is not UNLIMITED and data > max_value:
msg = _("'%(data)s' is too large - must be no larger than "
"'%(limit)d'") % {'data': data, 'limit': max_value}
LOG.debug(msg)
return msg
def _validate_no_whitespace(data):
"""Validates that input has no whitespace."""
if re.search(r'\s', data):
msg = _("'%s' contains whitespace") % data
LOG.debug(msg)
raise n_exc.InvalidInput(error_message=msg)
return data
def _validate_mac_address(data, valid_values=None):
try:
valid_mac = netaddr.valid_mac(_validate_no_whitespace(data))
except Exception:
valid_mac = False
# TODO(arosen): The code in this file should be refactored
# so it catches the correct exceptions. _validate_no_whitespace
# raises AttributeError if data is None.
if not valid_mac:
msg = _("'%s' is not a valid MAC address") % data
LOG.debug(msg)
return msg
def _validate_mac_address_or_none(data, valid_values=None):
if data is None:
return
return _validate_mac_address(data, valid_values)
def _validate_ip_address(data, valid_values=None):
try:
netaddr.IPAddress(_validate_no_whitespace(data))
# The followings are quick checks for IPv6 (has ':') and
# IPv4. (has 3 periods like 'xx.xx.xx.xx')
# NOTE(yamamoto): netaddr uses libraries provided by the underlying
# platform to convert addresses. For example, inet_aton(3).
# Some platforms, including NetBSD and OS X, have inet_aton
# implementation which accepts more varying forms of addresses than
# we want to accept here. The following check is to reject such
# addresses. For Example:
# >>> netaddr.IPAddress('1' * 59)
# IPAddress('199.28.113.199')
# >>> netaddr.IPAddress(str(int('1' * 59) & 0xffffffff))
# IPAddress('199.28.113.199')
# >>>
if ':' not in data and data.count('.') != 3:
raise ValueError()
except Exception:
msg = _("'%s' is not a valid IP address") % data
LOG.debug(msg)
return msg
def _validate_ip_pools(data, valid_values=None):
"""Validate that start and end IP addresses are present.
In addition to this the IP addresses will also be validated
"""
if not isinstance(data, list):
msg = _("Invalid data format for IP pool: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['start', 'end']
for ip_pool in data:
msg = _verify_dict_keys(expected_keys, ip_pool)
if msg:
return msg
for k in expected_keys:
msg = _validate_ip_address(ip_pool[k])
if msg:
return msg
def _validate_fixed_ips(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for fixed IP: '%s'") % data
LOG.debug(msg)
return msg
ips = []
for fixed_ip in data:
if not isinstance(fixed_ip, dict):
msg = _("Invalid data format for fixed IP: '%s'") % fixed_ip
LOG.debug(msg)
return msg
if 'ip_address' in fixed_ip:
# Ensure that duplicate entries are not set - just checking IP
# suffices. Duplicate subnet_id's are legitimate.
fixed_ip_address = fixed_ip['ip_address']
if fixed_ip_address in ips:
msg = _("Duplicate IP address '%s'") % fixed_ip_address
LOG.debug(msg)
else:
msg = _validate_ip_address(fixed_ip_address)
if msg:
return msg
ips.append(fixed_ip_address)
if 'subnet_id' in fixed_ip:
msg = _validate_uuid(fixed_ip['subnet_id'])
if msg:
return msg
def _validate_nameservers(data, valid_values=None):
if not hasattr(data, '__iter__'):
msg = _("Invalid data format for nameserver: '%s'") % data
LOG.debug(msg)
return msg
hosts = []
for host in data:
# This must be an IP address only
msg = _validate_ip_address(host)
if msg:
msg = _("'%(host)s' is not a valid nameserver. %(msg)s") % {
'host': host, 'msg': msg}
LOG.debug(msg)
return msg
if host in hosts:
msg = _("Duplicate nameserver '%s'") % host
LOG.debug(msg)
return msg
hosts.append(host)
def _validate_hostroutes(data, valid_values=None):
if not isinstance(data, list):
msg = _("Invalid data format for hostroute: '%s'") % data
LOG.debug(msg)
return msg
expected_keys = ['destination', 'nexthop']
hostroutes = []
for hostroute in data:
msg = _verify_dict_keys(expected_keys, hostroute)
if msg:
return msg
msg = _validate_subnet(hostroute['destination'])
if msg:
return msg
msg = _validate_ip_address(hostroute['nexthop'])
if msg:
return msg
if hostroute in hostroutes:
msg = _("Duplicate hostroute '%s'") % hostroute
LOG.debug(msg)
return msg
hostroutes.append(hostroute)
def _validate_ip_address_or_none(data, valid_values=None):
if data is None:
return None
return _validate_ip_address(data, valid_values)
def _validate_subnet(data, valid_values=None):
msg = None
try:
net = netaddr.IPNetwork(_validate_no_whitespace(data))
if '/' not in data:
msg = _("'%(data)s' isn't a recognized IP subnet cidr,"
" '%(cidr)s' is recommended") % {"data": data,
"cidr": net.cidr}
else:
return
except Exception:
msg = _("'%s' is not a valid IP subnet") % data
if msg:
LOG.debug(msg)
return msg
def _validate_subnet_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
for item in data:
msg = _validate_subnet(item)
if msg:
return msg
def _validate_subnet_or_none(data, valid_values=None):
if data is None:
return
return _validate_subnet(data, valid_values)
def _validate_regex(data, valid_values=None):
try:
if re.match(valid_values, data):
return
except TypeError:
pass
msg = _("'%s' is not a valid input") % data
LOG.debug(msg)
return msg
def _validate_regex_or_none(data, valid_values=None):
if data is None:
return
return _validate_regex(data, valid_values)
def _validate_uuid(data, valid_values=None):
if not uuidutils.is_uuid_like(data):
msg = _("'%s' is not a valid UUID") % data
LOG.debug(msg)
return msg
def _validate_uuid_or_none(data, valid_values=None):
if data is not None:
return _validate_uuid(data)
def _validate_uuid_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = _validate_uuid(item)
if msg:
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
def _validate_dict_item(key, key_validator, data):
# Find conversion function, if any, and apply it
conv_func = key_validator.get('convert_to')
if conv_func:
data[key] = conv_func(data.get(key))
# Find validator function
# TODO(salv-orlando): Structure of dict attributes should be improved
# to avoid iterating over items
val_func = val_params = None
for (k, v) in six.iteritems(key_validator):
if k.startswith('type:'):
# ask forgiveness, not permission
try:
val_func = validators[k]
except KeyError:
msg = _("Validator '%s' does not exist.") % k
LOG.debug(msg)
return msg
val_params = v
break
# Process validation
if val_func:
return val_func(data.get(key), val_params)
def _validate_dict(data, key_specs=None):
if not isinstance(data, dict):
msg = _("'%s' is not a dictionary") % data
LOG.debug(msg)
return msg
# Do not perform any further validation, if no constraints are supplied
if not key_specs:
return
# Check whether all required keys are present
required_keys = [key for key, spec in six.iteritems(key_specs)
if spec.get('required')]
if required_keys:
msg = _verify_dict_keys(required_keys, data, False)
if msg:
return msg
# Perform validation and conversion of all values
# according to the specifications.
for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs)
if k in data]:
msg = _validate_dict_item(key, key_validator, data)
if msg:
return msg
def _validate_dict_or_none(data, key_specs=None):
if data is not None:
return _validate_dict(data, key_specs)
def _validate_dict_or_empty(data, key_specs=None):
if data != {}:
return _validate_dict(data, key_specs)
def _validate_dict_or_nodata(data, key_specs=None):
if data:
return _validate_dict(data, key_specs)
def _validate_non_negative(data, valid_values=None):
try:
data = int(data)
except (ValueError, TypeError):
msg = _("'%s' is not an integer") % data
LOG.debug(msg)
return msg
if data < 0:
msg = _("'%s' should be non-negative") % data
LOG.debug(msg)
return msg
def convert_to_boolean(data):
if isinstance(data, six.string_types):
val = data.lower()
if val == "true" or val == "1":
return True
if val == "false" or val == "0":
return False
elif isinstance(data, bool):
return data
elif isinstance(data, int):
if data == 0:
return False
elif data == 1:
return True
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_boolean_if_not_none(data):
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
try:
return int(data)
except (ValueError, TypeError):
msg = _("'%s' is not a integer") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_to_int_if_not_none(data):
if data is not None:
return convert_to_int(data)
return data
def convert_to_positive_float_or_none(val):
# NOTE(salv-orlando): This conversion function is currently used by
# a vendor specific extension only at the moment It is used for
# port's RXTX factor in neutron.plugins.vmware.extensions.qos.
# It is deemed however generic enough to be in this module as it
# might be used in future for other API attributes.
if val is None:
return
try:
val = float(val)
if val < 0:
raise ValueError()
except (ValueError, TypeError):
msg = _("'%s' must be a non negative decimal.") % val
raise n_exc.InvalidInput(error_message=msg)
return val
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key).
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:raises: n_exc.InvalidInput if any of the strings are malformed
(e.g. do not contain a key) or if any
of the keys appear more than once.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in six.iteritems(kvp_map))
def convert_none_to_empty_list(value):
return [] if value is None else value
def convert_none_to_empty_dict(value):
return {} if value is None else value
def convert_to_list(data):
if data is None:
return []
elif hasattr(data, '__iter__'):
return list(data)
else:
return [data]
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
# Note: In order to ensure that the MAC address is unicast the first byte
# must be even.
MAC_PATTERN = "^%s[aceACE02468](:%s{2}){5}$" % (HEX_ELEM, HEX_ELEM)
# Dictionary that maintains a list of validation functions
validators = {'type:dict': _validate_dict,
'type:dict_or_none': _validate_dict_or_none,
'type:dict_or_empty': _validate_dict_or_empty,
'type:dict_or_nodata': _validate_dict_or_nodata,
'type:fixed_ips': _validate_fixed_ips,
'type:hostroutes': _validate_hostroutes,
'type:ip_address': _validate_ip_address,
'type:ip_address_or_none': _validate_ip_address_or_none,
'type:ip_pools': _validate_ip_pools,
'type:mac_address': _validate_mac_address,
'type:mac_address_or_none': _validate_mac_address_or_none,
'type:nameservers': _validate_nameservers,
'type:non_negative': _validate_non_negative,
'type:range': _validate_range,
'type:regex': _validate_regex,
'type:regex_or_none': _validate_regex_or_none,
'type:string': _validate_string,
'type:string_or_none': _validate_string_or_none,
'type:not_empty_string': _validate_not_empty_string,
'type:not_empty_string_or_none':
_validate_not_empty_string_or_none,
'type:subnet': _validate_subnet,
'type:subnet_list': _validate_subnet_list,
'type:subnet_or_none': _validate_subnet_or_none,
'type:uuid': _validate_uuid,
'type:uuid_or_none': _validate_uuid_or_none,
'type:uuid_list': _validate_uuid_list,
'type:values': _validate_values,
'type:boolean': _validate_boolean}
# Define constants for base resource name
NETWORK = 'network'
NETWORKS = '%ss' % NETWORK
PORT = 'port'
PORTS = '%ss' % PORT
SUBNET = 'subnet'
SUBNETS = '%ss' % SUBNET
SUBNETPOOL = 'subnetpool'
SUBNETPOOLS = '%ss' % SUBNETPOOL
# Note: a default of ATTR_NOT_SPECIFIED indicates that an
# attribute is not required, but will be generated by the plugin
# if it is not specified. Particularly, a value of ATTR_NOT_SPECIFIED
# is different from an attribute that has been specified with a value of
# None. For example, if 'gateway_ip' is omitted in a request to
# create a subnet, the plugin will receive ATTR_NOT_SPECIFIED
# and the default gateway_ip will be generated.
# However, if gateway_ip is specified as None, this means that
# the subnet does not have a gateway IP.
# The following is a short reference for understanding attribute info:
# default: default value of the attribute (if missing, the attribute
# becomes mandatory.
# allow_post: the attribute can be used on POST requests.
# allow_put: the attribute can be used on PUT requests.
# validate: specifies rules for validating data in the attribute.
# convert_to: transformation to apply to the value before it is returned
# is_visible: the attribute is returned in GET responses.
# required_by_policy: the attribute is required by the policy engine and
# should therefore be filled by the API layer even if not present in
# request body.
# enforce_policy: the attribute is actively part of the policy enforcing
# mechanism, ie: there might be rules which refer to this attribute.
RESOURCE_ATTRIBUTE_MAP = {
NETWORKS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': NAME_MAX_LEN},
'default': '', 'is_visible': True},
'subnets': {'allow_post': False, 'allow_put': False,
'default': [],
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': True,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
},
PORTS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'mac_address': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:mac_address': None},
'enforce_policy': True,
'is_visible': True},
'fixed_ips': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'convert_list_to': convert_kvp_list_to_dict,
'validate': {'type:fixed_ips': None},
'enforce_policy': True,
'is_visible': True},
'device_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_ID_MAX_LEN},
'default': '',
'is_visible': True},
'device_owner': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': DEVICE_OWNER_MAX_LEN},
'default': '',
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
SUBNETS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True, 'default': '',
'validate': {'type:string': NAME_MAX_LEN},
'is_visible': True},
'ip_version': {'allow_post': True, 'allow_put': False,
'convert_to': convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'network_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None},
'is_visible': True},
'subnetpool_id': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
'prefixlen': {'allow_post': True,
'allow_put': False,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'required_by_policy': False,
'is_visible': False},
'cidr': {'allow_post': True,
'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:subnet_or_none': None},
'required_by_policy': False,
'is_visible': True},
'gateway_ip': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_address_or_none': None},
'is_visible': True},
'allocation_pools': {'allow_post': True, 'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:ip_pools': None},
'is_visible': True},
'dns_nameservers': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:nameservers': None},
'is_visible': True},
'host_routes': {'allow_post': True, 'allow_put': True,
'convert_to': convert_none_to_empty_list,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:hostroutes': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': TENANT_ID_MAX_LEN},
'required_by_policy': True,
'is_visible': True},
'enable_dhcp': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': convert_to_boolean,
'is_visible': True},
'ipv6_ra_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values': constants.IPV6_MODES},
'is_visible': True},
'ipv6_address_mode': {'allow_post': True, 'allow_put': False,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:values':
constants.IPV6_MODES},
'is_visible': True},
SHARED: {'allow_post': False,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': False,
'required_by_policy': True,
'enforce_policy': True},
},
SUBNETPOOLS: {
'id': {'allow_post': False,
'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'name': {'allow_post': True,
'allow_put': True,
'validate': {'type:not_empty_string': None},
'is_visible': True},
'tenant_id': {'allow_post': True,
'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'prefixes': {'allow_post': True,
'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True},
'default_quota': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'ip_version': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'default_prefixlen': {'allow_post': True,
'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'default': ATTR_NOT_SPECIFIED,
'is_visible': True},
'min_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
'max_prefixlen': {'allow_post': True,
'allow_put': True,
'default': ATTR_NOT_SPECIFIED,
'validate': {'type:non_negative': None},
'convert_to': convert_to_int,
'is_visible': True},
SHARED: {'allow_post': True,
'allow_put': False,
'default': False,
'convert_to': convert_to_boolean,
'is_visible': True,
'required_by_policy': True,
'enforce_policy': True},
}
}
# Identify the attribute used by a resource to reference another resource
RESOURCE_FOREIGN_KEYS = {
NETWORKS: 'network_id'
}
PLURALS = {NETWORKS: NETWORK,
PORTS: PORT,
SUBNETS: SUBNET,
SUBNETPOOLS: SUBNETPOOL,
'dns_nameservers': 'dns_nameserver',
'host_routes': 'host_route',
'allocation_pools': 'allocation_pool',
'fixed_ips': 'fixed_ip',
'extensions': 'extension'}
| 36.54835
| 79
| 0.569943
|
2d105afe37e941009fa72f7a87050f94c475fcc5
| 910
|
py
|
Python
|
john_doe/cities/scotland.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/scotland.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/scotland.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
cities = [
'Glasgow',
'Edinburgh',
'Aberdeen',
'Dundee',
'Paisley',
'East Kilbride',
'Livingston',
'Hamilton',
'Cumbernauld',
'Dunfermline',
'Kirkcaldy',
'Ayr',
'Perth',
'Inverness',
'Kilmarnock',
'Coatbridge',
'Greenock',
'Glenrothes',
'Airdrie',
'Stirling',
'Falkirk',
'Irvine',
'Dumfries',
'Motherwell',
'Rutherglen',
'Arbroath',
'Musselburgh',
'Elgin',
'Renfrew',
'Alloa',
'Dumbarton',
'Peterhead',
'St Andrews',
'Troon',
'Linlithgow',
'Fraserburgh',
'Galashiels',
'Montrose',
'Stonehaven',
'Stranraer',
'Forres',
'Nairn',
'Cupar',
'Haddington',
'Lanark',
'Oban',
'Dunbar',
'Peebles',
'Thurso',
'Brechin',
'Kirkwall',
'Wick',
'Lerwick',
'Fort William',
'Stornoway',
'Campbeltown'
]
| 15.423729
| 20
| 0.497802
|
8c4038b9da42d5e1026ba5832f44299d72a313ca
| 48
|
py
|
Python
|
tests/__init__.py
|
bogdandm/redis-bulk-cleaner
|
3d5dc61f8064eb320bc09e519b6d766fbd8e72e0
|
[
"MIT"
] | 3
|
2020-10-05T17:30:21.000Z
|
2022-03-25T14:47:54.000Z
|
tests/__init__.py
|
bogdandm/redis_bulk_cleaner
|
3d5dc61f8064eb320bc09e519b6d766fbd8e72e0
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
bogdandm/redis_bulk_cleaner
|
3d5dc61f8064eb320bc09e519b6d766fbd8e72e0
|
[
"MIT"
] | null | null | null |
"""Unit test package for redis_bulk_cleaner."""
| 24
| 47
| 0.75
|
80b2ddacca4e0ccd0ad4b2e2cbd5fbab06320fe7
| 2,357
|
py
|
Python
|
preprocessing/notes_to_tsv.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 25
|
2020-02-12T00:07:03.000Z
|
2021-12-01T22:50:24.000Z
|
preprocessing/notes_to_tsv.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 1
|
2021-01-28T22:49:23.000Z
|
2021-01-28T22:49:23.000Z
|
preprocessing/notes_to_tsv.py
|
som-shahlab/ehr-rwe
|
9653a6abc837dee7759ed245939716b7d50525cc
|
[
"Apache-2.0"
] | 3
|
2021-03-09T02:47:19.000Z
|
2021-05-21T14:51:02.000Z
|
"""
python raw_note_export.py -i <INPUT> -o <OUTPUT> -b 1000
"""
import os
import sys
import glob
import argparse
from toolz import partition_all
def mimic_preprocessing(text):
"""
:param text:
:return:
"""
# remove junk headers that concatenate multiple notes
sents = []
skip = False
for line in text.split('\n'):
if line.strip() == '(Over)':
skip = True
elif line.strip() == '(Cont)':
skip = False
continue
if not skip:
sents.append(line)
text = '\n'.join(sents)
return text
def save_tsv(data, outfpath):
with open(outfpath, 'w') as fp:
fp.write("DOC_NAME\tTEXT\n")
for row in data:
row = '\t'.join(row)
fp.write(f"{row}\n")
def main(args):
filelist = glob.glob(f'{args.inputdir}/*')
batches = partition_all(args.batch_size, filelist)
print(f'Documents: {len(filelist)}')
if not os.path.exists(args.outputdir):
print("output dir does not exist")
return
for i,batch in enumerate(batches):
data = []
for fpath in batch:
doc_name = fpath.split("/")[-1].split(".")[0]
text = open(fpath,'r').read()
if args.fmt == 'mimic':
text = mimic_preprocessing(text) if args.preprocess == 'mimic' else text
# escape whitespace
text = text.replace('\n', '\\n').replace('\t', '\\t')
data.append((doc_name, text))
outfpath = f'{args.outputdir}/{args.batch_size}.{i}.tsv'
print(outfpath)
save_tsv(data, outfpath)
data = []
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default=None, help="input directory")
argparser.add_argument("-o", "--outputdir", type=str, default=None, help="output directory")
argparser.add_argument("-b", "--batch_size", type=int, default=1000, help="batch size")
argparser.add_argument("-f", "--fmt", type=str, default="mimic", help="document source format")
argparser.add_argument("-e", "--export_fmt", type=str, default="tsv", help="document export format")
argparser.add_argument("-P", "--preprocess", type=str, default=None, help="preprocess docs")
args = argparser.parse_args()
main(args)
| 28.39759
| 104
| 0.589733
|
3588cdc1e6ec70a39e1e657e050d2180445bb72c
| 776
|
py
|
Python
|
terrascript/oneandone/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/oneandone/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/oneandone/r.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/oneandone/r.py
import terrascript
class oneandone_server(terrascript.Resource):
pass
class oneandone_firewall_policy(terrascript.Resource):
pass
class oneandone_private_network(terrascript.Resource):
pass
class oneandone_public_ip(terrascript.Resource):
pass
class oneandone_shared_storage(terrascript.Resource):
pass
class oneandone_monitoring_policy(terrascript.Resource):
pass
class oneandone_loadbalancer(terrascript.Resource):
pass
class oneandone_vpn(terrascript.Resource):
pass
class oneandone_ssh_key(terrascript.Resource):
pass
class oneandone_block_storage(terrascript.Resource):
pass
class oneandone_image(terrascript.Resource):
pass
class oneandone_baremetal(terrascript.Resource):
pass
| 18.926829
| 56
| 0.800258
|
dfaa9f6af5c91a7b2774d57014b2cb82fec9aacc
| 720
|
py
|
Python
|
src/main/python/scenario_describer/ScenarioDescriberConnector.py
|
bartoszgaj/moral-dilema-detector
|
548ebe3db47caaae71a6f93162ebdf2b9b66325c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/scenario_describer/ScenarioDescriberConnector.py
|
bartoszgaj/moral-dilema-detector
|
548ebe3db47caaae71a6f93162ebdf2b9b66325c
|
[
"Apache-2.0"
] | 11
|
2020-08-28T08:41:04.000Z
|
2021-12-09T22:44:30.000Z
|
src/main/python/scenario_describer/ScenarioDescriberConnector.py
|
bartoszgaj/moral-dilema-detector
|
548ebe3db47caaae71a6f93162ebdf2b9b66325c
|
[
"Apache-2.0"
] | 3
|
2020-07-07T15:13:41.000Z
|
2021-08-24T21:40:48.000Z
|
import sys
import Ice
import adapter_ice
class ScenarioDescriberConnector:
# string_proxy = ":tcp -h localhost -p 10000"
def __init__(self, string_proxy):
self.__string_proxy = string_proxy
communicator = Ice.initialize(sys.argv)
self.__base = communicator.stringToProxy("adapter/manager" + string_proxy)
self.__managerPrx = adapter_ice.ManagerPrx.checkedCast(self.__base)
if self.__managerPrx is None: raise ConnectionError("Invalid proxy")
self.__communicator = communicator
def get_manager(self):
return self.__managerPrx
def get_base(self):
return self.__base
def get_communicator(self):
return self.__communicator
| 25.714286
| 82
| 0.706944
|
2abe60f24dc3fc387d79a2abc2ba73055dc83741
| 254
|
py
|
Python
|
misc/testConsole.py
|
geek-plus/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | 252
|
2015-11-17T14:21:50.000Z
|
2022-03-11T10:19:47.000Z
|
misc/testConsole.py
|
wan-qy/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | 23
|
2018-01-09T13:28:52.000Z
|
2019-12-12T06:11:44.000Z
|
misc/testConsole.py
|
wan-qy/vlcp
|
e7936e00929fcef00c04d4da39b67d9679d5f083
|
[
"Apache-2.0"
] | 37
|
2016-08-03T04:42:22.000Z
|
2021-12-30T16:57:10.000Z
|
'''
Created on 2015/12/30
@author: hubo
'''
from vlcp.server import main
from vlcp.config.config import manager
if __name__ == '__main__':
#manager['module.console.startinconsole'] = True
main(None, ('vlcp.service.debugging.console.Console',))
| 21.166667
| 59
| 0.716535
|
52e08bbb71f1af73889b329e949c4a3a2c1d459b
| 12,615
|
py
|
Python
|
pytests/failover/MultiNodeAutoFailoverTests.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
pytests/failover/MultiNodeAutoFailoverTests.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
pytests/failover/MultiNodeAutoFailoverTests.py
|
cgghali/TAF
|
1de8dec77ad781c373e18d9c285befd534ac203a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from couchbase_helper.documentgenerator import doc_generator
from failover.AutoFailoverBaseTest import AutoFailoverBaseTest
from membase.api.exception import RebalanceFailedException, \
ServerUnavailableException
from membase.api.rest_client import RestConnection
class MultiNodeAutoFailoverTests(AutoFailoverBaseTest):
def setUp(self):
super(MultiNodeAutoFailoverTests, self).setUp()
self.data_load_spec = self.input.param("data_load_spec",
"volume_test_load")
self.master = self.servers[0]
def tearDown(self):
super(MultiNodeAutoFailoverTests, self).tearDown()
def _is_failover_expected(self, failure_node_number):
failover_not_expected = (
self.max_count == 1 and failure_node_number > 1 and
self.pause_between_failover_action <
self.timeout or self.num_replicas < 1)
failover_not_expected = failover_not_expected or (
1 < self.max_count < failure_node_number and
self.pause_between_failover_action < self.timeout or
self.num_replicas < failure_node_number)
return not failover_not_expected
def _multi_node_failover(self):
servers_to_fail = self.server_to_fail
for i in range(self.max_count):
self.server_to_fail = [servers_to_fail[i]]
self.failover_expected = self._is_failover_expected(i + 1)
self.failover_actions[self.failover_action](self)
self.sleep(self.timeout)
def test_autofailover(self):
"""
Test the basic autofailover for different failure scenarios.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required.
3. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def _get_server_group_nodes(self, server_group):
servers_in_group = self.zones[server_group]
server_group_nodes = []
for server in self.servers:
if server.ip in servers_in_group:
server_group_nodes.append(server)
return server_group_nodes
def test_autofailover_for_server_group(self):
self.enable_autofailover_and_validate()
self.shuffle_nodes_between_zones_and_rebalance()
self.sleep(30,"waiting")
self.server_to_fail = self._get_server_group_nodes("Group 2")
self.failover_expected = True
tasks = self.subsequent_load_gen()
try:
self.failover_actions[self.failover_action](self)
except:
result = self._check_for_autofailover_initiation_for_server_group_failover(self.server_to_fail)
self.assertTrue(result,
"Server group failover msg was not seen in logs")
finally:
self.sleep(300)
self.start_couchbase_server()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_during_rebalance(self):
"""
Test autofailover for different failure scenarios while
rebalance
of nodes in progress
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing nodes.
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_task = self.task.async_rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
try:
rebalance_task.result()
except RebalanceFailedException:
pass
except ServerUnavailableException:
pass
except Exception:
pass
else:
self.fail("Rebalance should fail since a node went down")
finally:
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_autofailover_after_rebalance(self):
"""
Test autofailover for different failure scenarios after
rebalance
of nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
rebalance_success = self.task.rebalance(self.servers,
self.servers_to_add,
self.servers_to_remove)
if not rebalance_success:
self.disable_firewall()
self.fail("Rebalance failed. Check logs")
tasks = self.subsequent_load_gen()
self._multi_node_failover()
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
self.disable_autofailover_and_validate()
def test_rebalance_after_autofailover(self):
"""
Test autofailover for different failure scenarios and then
rebalance
nodes
1. Enable autofailover and validate
2. Start rebalance of nodes by either adding or removing
nodes and
wait for the rebalance to be completed
3. Fail a node and validate if node is failed over if required.
4. Disable autofailover and validate.
:return: Nothing
"""
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
for node in self.servers_to_add:
self.rest.add_node(user=self.orchestrator.rest_username,
password=self.orchestrator.rest_password,
remoteIp=node.ip)
nodes = self.rest.node_statuses()
nodes_to_remove = [node.id for node in nodes if
node.ip in [t.ip for t in
self.servers_to_remove]]
nodes = [node.id for node in nodes]
started = self.rest.rebalance(nodes, nodes_to_remove)
rebalance_success = False
if started:
rebalance_success = self.rest.monitorRebalance()
if (not rebalance_success or not started) and not \
self.failover_expected:
self.fail("Rebalance failed. Check logs")
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_addback_of_node(self):
"""
Test autofailover of nodes and then addback of the node after
failover
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Addback node and validate that the addback was successful.
:return: Nothing
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
self.enable_autofailover_and_validate()
self.sleep(5)
tasks = self.subsequent_load_gen()
self._multi_node_failover()
self.server_to_fail = self._servers_to_fail()
self.bring_back_failed_nodes_up()
self.sleep(30)
self.nodes = self.rest.node_statuses()
for node in self.server_to_fail:
self.rest.add_back_node("ns_1@{}".format(node.ip))
self.rest.set_recovery_type("ns_1@{}".format(node.ip),
self.recovery_strategy)
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while recovering failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def test_autofailover_and_remove_failover_node(self):
"""
Test autofailover of nodes and remove the node via rebalance
after
the failover.
1. Enable autofailover and validate
2. Fail a node and validate if node is failed over if required
3. Rebalance of node if failover was successful and validate.
:return:
"""
if not self.failover_expected:
self.log.info("Since no failover is expected in the test, "
"skipping the test")
return
tasks = self.subsequent_load_gen()
self.enable_autofailover_and_validate()
self.sleep(5)
self._multi_node_failover()
self.nodes = self.rest.node_statuses()
self.remove_after_failover = True
self.rest.rebalance(otpNodes=[node.id for node in self.nodes])
msg = "rebalance failed while removing failover nodes {0}" \
.format(self.server_to_fail[0])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True),
msg)
if self.spec_name is None:
for task in tasks:
self.task.jython_task_manager.get_task_result(task)
else:
self.wait_for_async_data_load_to_complete(tasks)
def _check_for_autofailover_initiation_for_server_group_failover(
self, failed_over_nodes):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}','ns_1@{}']".format(
failed_over_nodes[0].ip, failed_over_nodes[1].ip)
self.log.info("ui_logs_text: {0}".format(ui_logs_text))
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def subsequent_load_gen(self, async_load=True):
if self.spec_name is None:
subsequent_load_gen = doc_generator(self.key,
self.num_items,
self.num_items*2,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
tasks = self.async_load_all_buckets(
subsequent_load_gen, "create", 0)
return tasks
else:
doc_loading_spec = self.bucket_util.get_crud_template_from_package(
self.data_load_spec)
tasks = self.bucket_util.run_scenario_from_spec(
self.task,
self.cluster,
self.bucket_util.buckets,
doc_loading_spec,
mutation_num=0,
async_load=async_load)
return tasks
def wait_for_async_data_load_to_complete(self, task):
self.task.jython_task_manager.get_task_result(task)
| 42.190635
| 107
| 0.613317
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.