code stringlengths 84 247k | code_en stringlengths 84 247k | language stringclasses 1
value | file_path stringlengths 37 157 | license stringclasses 1
value | token_count int64 33 243k |
|---|---|---|---|---|---|
#! /usr/bin/env python3
import argparse
import collections
import csv
choices = list(map(chr, range(ord('A'), ord('E')+1)))
def parse_dump(f):
table = {}
with open(f, 'r') as stream:
for line in stream.readlines():
if line.startswith('Captured:'):
parts = line.replace('(','').replace(')','').replace(',','').split()
answer = parts[1]
student_id = ''.join(parts[2:])
table[student_id] = answer
return table
def tally(table):
count = collections.OrderedDict()
for choice in choices:
count[choice] = 0
for student_id, answer in table.items():
if answer in count:
count[answer] += 1;
return count
def save(f, table):
with open(f, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in table.items():
writer.writerow([key, value])
def main():
parser = argparse.ArgumentParser(
description='Tallies raw class dump data and optionaly outputs a CSV matching IDs to answers')
parser.add_argument(
'dump', help='raw dump file')
parser.add_argument('-t', '--table' , help='saves table to file', nargs=1)
args = parser.parse_args()
table = parse_dump(args.dump)
count = tally(table)
print('Students: %i' % len(table))
for choice in choices:
print('%s\t%i' % (choice, count[choice]))
if args.table:
save(args.table[0], table)
if __name__ == '__main__':
main()
| #! /usr/bin/env python3
import argparse
import collections
import csv
choices = list(map(chr, range(ord('A'), ord('E')+1)))
def parse_dump(f):
table = {}
with open(f, 'r') as stream:
for line in stream.readlines():
if line.startswith('Captured:'):
parts = line.replace('(','').replace(')','').replace(',','').split()
answer = parts[1]
student_id = ''.join(parts[2:])
table[student_id] = answer
return table
def tally(table):
count = collections.OrderedDict()
for choice in choices:
count[choice] = 0
for student_id, answer in table.items():
if answer in count:
count[answer] += 1;
return count
def save(f, table):
with open(f, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in table.items():
writer.writerow([key, value])
def main():
parser = argparse.ArgumentParser(
description='Tallies raw class dump data and optionaly outputs a CSV matching IDs to answers')
parser.add_argument(
'dump', help='raw dump file')
parser.add_argument('-t', '--table' , help='saves table to file', nargs=1)
args = parser.parse_args()
table = parse_dump(args.dump)
count = tally(table)
print('Students: %i' % len(table))
for choice in choices:
print('%s\t%i' % (choice, count[choice]))
if args.table:
save(args.table[0], table)
if __name__ == '__main__':
main()
| en | 000418673_wizard97-iSkipper_tally_7bb28e209a89.py | unknown | 463 |
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
| from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
| en | 000182694_4heck-ru102py_meter_reading_3d42e5bb117a.py | unknown | 586 |
#!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils
import argparse
import logging
import matplotlib.cm as cm
import matplotlib.markers as markers
import matplotlib.pyplot as plt
import numpy as np
import os
import pylatex.utils
import pyndri
from sklearn.manifold import TSNE
import nvsm
MARKERS = ['o', 's', '<', '>', '^', 'v', 'd', 'p', '*', '8',
'1', '2', '3', '4',
markers.TICKLEFT, markers.TICKRIGHT,
markers.TICKUP, markers.TICKDOWN,
markers.CARETLEFT, markers.CARETRIGHT,
markers.CARETUP, markers.CARETDOWN]
plt.rcParams["figure.figsize"] = (8.0, 4.25)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('index', type=argparse_utils.existing_directory_path)
parser.add_argument('--limit',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--object_classification',
type=argparse_utils.existing_file_path,
nargs='+',
default=None)
parser.add_argument('--filter_unclassified',
action='store_true',
default=False)
parser.add_argument('--l2_normalize',
action='store_true',
default=False)
parser.add_argument('--mode',
choices=('tsne', 'embedding_projector'),
default='tsne')
parser.add_argument('--legend',
action='store_true',
default=False)
parser.add_argument('--tick_labels',
action='store_true',
default=False)
parser.add_argument('--edges',
action='store_true',
default=False)
parser.add_argument('--border',
action='store_true',
default=False)
parser.add_argument('--plot_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
# Set matplotlib style.
plt.style.use('bmh')
logging.info('Loading index.')
index = pyndri.Index(args.index)
logging.info('Loading cuNVSM model.')
model_base, epoch_and_ext = args.model.rsplit('_', 1)
epoch = int(epoch_and_ext.split('.')[0])
if not os.path.exists('{}_meta'.format(model_base)):
model_meta_base, batch_idx = model_base.rsplit('_', 1)
else:
model_meta_base = model_base
model = nvsm.load_model(
nvsm.load_meta(model_meta_base),
model_base, epoch,
only_object_embeddings=True)
raw_object_representations = np.copy(model.object_representations)
if args.limit:
raw_object_representations = raw_object_representations[:args.limit, :]
for object_classification in args.object_classification:
root, ext = os.path.splitext(args.plot_out)
plot_out = '{}-{}.{}'.format(
root, os.path.basename(object_classification), ext.lstrip('.'))
if object_classification and args.filter_unclassified:
logging.info('Filtering unclassified.')
with open(object_classification, 'r') as f_objects:
object_ids = [line.strip().split()[0] for line in f_objects]
indices = sorted(model.inv_object_mapping[idx]
for _, idx in index.document_ids(object_ids)
if idx in model.inv_object_mapping)
logging.info('Considering %d out of %d representations.',
len(indices), len(object_ids))
translation_table = {idx: i for i, idx in enumerate(indices)}
object_representations = raw_object_representations[indices]
assert object_representations.shape[0] == \
len(translation_table)
else:
translation_table = None
raise NotImplementedError()
logging.info('Loading object clusters.')
cluster_id_to_product_ids = {}
if object_classification:
with open(object_classification, 'r') as f_objects:
for line in f_objects:
object_id, cluster_id = line.strip().split()
if cluster_id not in cluster_id_to_product_ids:
cluster_id_to_product_ids[cluster_id] = set()
cluster_id_to_product_ids[cluster_id].add(object_id)
for cluster_id in list(cluster_id_to_product_ids.keys()):
object_ids = list(cluster_id_to_product_ids[cluster_id])
cluster_id_to_product_ids[cluster_id] = set(
(model.inv_object_mapping[int_object_id]
if translation_table is None
else translation_table[
model.inv_object_mapping[int_object_id]])
for ext_object_id, int_object_id in
index.document_ids(object_ids)
if int_object_id in model.inv_object_mapping and
(args.limit is None or
(model.inv_object_mapping[int_object_id] <
args.limit)))
else:
raise NotImplementedError()
assert len(cluster_id_to_product_ids) < len(MARKERS)
if args.l2_normalize:
logging.info('L2-normalizing representations.')
object_representations /= np.linalg.norm(
object_representations,
axis=1, keepdims=True)
if args.mode == 'tsne':
logging.info('Running t-SNE.')
twodim_object_representations = \
TSNE(n_components=2, init='pca', random_state=0).\
fit_transform(object_representations)
logging.info('Plotting %s.', twodim_object_representations.shape)
colors = cm.rainbow(
np.linspace(0, 1, len(cluster_id_to_product_ids)))
for idx, cluster_id in enumerate(
sorted(cluster_id_to_product_ids.keys(),
key=lambda cluster_id: len(
cluster_id_to_product_ids[cluster_id]),
reverse=True)):
row_ids = list(cluster_id_to_product_ids[cluster_id])
plt.scatter(
twodim_object_representations[row_ids, 0],
twodim_object_representations[row_ids, 1],
marker=MARKERS[idx],
edgecolors='grey' if args.edges else None,
cmap=plt.cm.Spectral,
color=colors[idx],
alpha=0.3,
label=pylatex.utils.escape_latex(cluster_id))
plt.grid()
plt.tight_layout()
if args.legend:
plt.legend(bbox_to_anchor=(0, -0.15, 1, 0),
loc=2,
ncol=2,
mode='expand',
borderaxespad=0)
if not args.tick_labels:
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
if not args.border:
# plt.gcf().patch.set_visible(False)
plt.gca().axis('off')
logging.info('Writing %s.', plot_out)
plt.savefig(plot_out,
bbox_inches='tight',
transparent=True,
pad_inches=0,
dpi=200)
elif args.mode == 'embedding_projector':
logging.info('Dumping to TensorFlow embedding projector format.')
with open('{}_vectors.tsv'.format(plot_out), 'w') as f_vectors, \
open('{}_meta.tsv'.format(plot_out), 'w') as f_meta:
f_meta.write('document_id\tclass\n')
def write_rowids(row_ids, cluster_id):
for row_id in row_ids:
f_vectors.write(
'{}\n'.format('\t'.join(
'{:.5f}'.format(x)
for x in object_representations[row_id])))
f_meta.write('{}\t{}\n'.format(
index.ext_document_id(
model.object_mapping[row_id]),
cluster_id))
for cluster_id in cluster_id_to_product_ids.keys():
row_ids = list(cluster_id_to_product_ids[cluster_id])
write_rowids(row_ids, cluster_id)
logging.info('All done!')
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils
import argparse
import logging
import matplotlib.cm as cm
import matplotlib.markers as markers
import matplotlib.pyplot as plt
import numpy as np
import os
import pylatex.utils
import pyndri
from sklearn.manifold import TSNE
import nvsm
MARKERS = ['o', 's', '<', '>', '^', 'v', 'd', 'p', '*', '8',
'1', '2', '3', '4',
markers.TICKLEFT, markers.TICKRIGHT,
markers.TICKUP, markers.TICKDOWN,
markers.CARETLEFT, markers.CARETRIGHT,
markers.CARETUP, markers.CARETDOWN]
plt.rcParams["figure.figsize"] = (8.0, 4.25)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('index', type=argparse_utils.existing_directory_path)
parser.add_argument('--limit',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--object_classification',
type=argparse_utils.existing_file_path,
nargs='+',
default=None)
parser.add_argument('--filter_unclassified',
action='store_true',
default=False)
parser.add_argument('--l2_normalize',
action='store_true',
default=False)
parser.add_argument('--mode',
choices=('tsne', 'embedding_projector'),
default='tsne')
parser.add_argument('--legend',
action='store_true',
default=False)
parser.add_argument('--tick_labels',
action='store_true',
default=False)
parser.add_argument('--edges',
action='store_true',
default=False)
parser.add_argument('--border',
action='store_true',
default=False)
parser.add_argument('--plot_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
# Set matplotlib style.
plt.style.use('bmh')
logging.info('Loading index.')
index = pyndri.Index(args.index)
logging.info('Loading cuNVSM model.')
model_base, epoch_and_ext = args.model.rsplit('_', 1)
epoch = int(epoch_and_ext.split('.')[0])
if not os.path.exists('{}_meta'.format(model_base)):
model_meta_base, batch_idx = model_base.rsplit('_', 1)
else:
model_meta_base = model_base
model = nvsm.load_model(
nvsm.load_meta(model_meta_base),
model_base, epoch,
only_object_embeddings=True)
raw_object_representations = np.copy(model.object_representations)
if args.limit:
raw_object_representations = raw_object_representations[:args.limit, :]
for object_classification in args.object_classification:
root, ext = os.path.splitext(args.plot_out)
plot_out = '{}-{}.{}'.format(
root, os.path.basename(object_classification), ext.lstrip('.'))
if object_classification and args.filter_unclassified:
logging.info('Filtering unclassified.')
with open(object_classification, 'r') as f_objects:
object_ids = [line.strip().split()[0] for line in f_objects]
indices = sorted(model.inv_object_mapping[idx]
for _, idx in index.document_ids(object_ids)
if idx in model.inv_object_mapping)
logging.info('Considering %d out of %d representations.',
len(indices), len(object_ids))
translation_table = {idx: i for i, idx in enumerate(indices)}
object_representations = raw_object_representations[indices]
assert object_representations.shape[0] == \
len(translation_table)
else:
translation_table = None
raise NotImplementedError()
logging.info('Loading object clusters.')
cluster_id_to_product_ids = {}
if object_classification:
with open(object_classification, 'r') as f_objects:
for line in f_objects:
object_id, cluster_id = line.strip().split()
if cluster_id not in cluster_id_to_product_ids:
cluster_id_to_product_ids[cluster_id] = set()
cluster_id_to_product_ids[cluster_id].add(object_id)
for cluster_id in list(cluster_id_to_product_ids.keys()):
object_ids = list(cluster_id_to_product_ids[cluster_id])
cluster_id_to_product_ids[cluster_id] = set(
(model.inv_object_mapping[int_object_id]
if translation_table is None
else translation_table[
model.inv_object_mapping[int_object_id]])
for ext_object_id, int_object_id in
index.document_ids(object_ids)
if int_object_id in model.inv_object_mapping and
(args.limit is None or
(model.inv_object_mapping[int_object_id] <
args.limit)))
else:
raise NotImplementedError()
assert len(cluster_id_to_product_ids) < len(MARKERS)
if args.l2_normalize:
logging.info('L2-normalizing representations.')
object_representations /= np.linalg.norm(
object_representations,
axis=1, keepdims=True)
if args.mode == 'tsne':
logging.info('Running t-SNE.')
twodim_object_representations = \
TSNE(n_components=2, init='pca', random_state=0).\
fit_transform(object_representations)
logging.info('Plotting %s.', twodim_object_representations.shape)
colors = cm.rainbow(
np.linspace(0, 1, len(cluster_id_to_product_ids)))
for idx, cluster_id in enumerate(
sorted(cluster_id_to_product_ids.keys(),
key=lambda cluster_id: len(
cluster_id_to_product_ids[cluster_id]),
reverse=True)):
row_ids = list(cluster_id_to_product_ids[cluster_id])
plt.scatter(
twodim_object_representations[row_ids, 0],
twodim_object_representations[row_ids, 1],
marker=MARKERS[idx],
edgecolors='grey' if args.edges else None,
cmap=plt.cm.Spectral,
color=colors[idx],
alpha=0.3,
label=pylatex.utils.escape_latex(cluster_id))
plt.grid()
plt.tight_layout()
if args.legend:
plt.legend(bbox_to_anchor=(0, -0.15, 1, 0),
loc=2,
ncol=2,
mode='expand',
borderaxespad=0)
if not args.tick_labels:
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
if not args.border:
# plt.gcf().patch.set_visible(False)
plt.gca().axis('off')
logging.info('Writing %s.', plot_out)
plt.savefig(plot_out,
bbox_inches='tight',
transparent=True,
pad_inches=0,
dpi=200)
elif args.mode == 'embedding_projector':
logging.info('Dumping to TensorFlow embedding projector format.')
with open('{}_vectors.tsv'.format(plot_out), 'w') as f_vectors, \
open('{}_meta.tsv'.format(plot_out), 'w') as f_meta:
f_meta.write('document_id\tclass\n')
def write_rowids(row_ids, cluster_id):
for row_id in row_ids:
f_vectors.write(
'{}\n'.format('\t'.join(
'{:.5f}'.format(x)
for x in object_representations[row_id])))
f_meta.write('{}\t{}\n'.format(
index.ext_document_id(
model.object_mapping[row_id]),
cluster_id))
for cluster_id in cluster_id_to_product_ids.keys():
row_ids = list(cluster_id_to_product_ids[cluster_id])
write_rowids(row_ids, cluster_id)
logging.info('All done!')
if __name__ == '__main__':
sys.exit(main())
| en | 000069431_cvangysel-cuNVSM_visualize_a71df3d22bfa.py | unknown | 2,390 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .resnet_angular import ResNetAngular
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
models_backbones = {'rmnet': RMNetAngular, 'mobilenet': MobileFaceNet, 'resnet': ResNetAngular,
'shufflenetv2': ShuffleNetV2Angular, 'se_resnet': SEResNetAngular}
models_landmarks = {'landnet': LandmarksNet}
| """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .resnet_angular import ResNetAngular
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
models_backbones = {'rmnet': RMNetAngular, 'mobilenet': MobileFaceNet, 'resnet': ResNetAngular,
'shufflenetv2': ShuffleNetV2Angular, 'se_resnet': SEResNetAngular}
models_landmarks = {'landnet': LandmarksNet}
| en | 000600117_xzry6-openvino_training_extensions_common_a2ecae668f17.py | unknown | 371 |
# -*- coding: utf-8 -*-
# @File : sessionio.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import SessionIO_MSG_ZH, METERPRETER_PROMPT, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, \
SessionIO_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class SessionIO(object):
@staticmethod
def create(ipaddress=None, sessionid=None, user_input=None):
try:
user_input = user_input.strip()
if user_input.startswith('shell'):
command = user_input[len('shell'):].strip()
if len(command) == 0:
new_bufer = "\nNot support switch to Dos/Bash,input like\'shell whoami\' to run os cmd.\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
return context
else:
user_input = f"shell -c '{command}'"
if user_input.startswith('exit'):
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterSessionKill, params,
timeout=RPC_SESSION_OPER_SHORT_REQ)
context = data_return(203, result, SessionIO_MSG_ZH.get(203), SessionIO_MSG_EN.get(203))
return context
params = [sessionid, user_input]
result = RpcClient.call(Method.SessionMeterpreterWrite, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
elif result.get('result') == 'success':
new_bufer = f"{METERPRETER_PROMPT}{user_input}\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
else:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
except Exception as E:
logger.error(E)
context = data_return(306, {}, SessionIO_MSG_ZH.get(306), SessionIO_MSG_EN.get(306))
return context
@staticmethod
def update(ipaddress=None, sessionid=None):
old_result = Xcache.get_sessionio_cache(ipaddress)
if sessionid is None or sessionid == -1:
context = data_return(202, old_result, SessionIO_MSG_ZH.get(202), SessionIO_MSG_EN.get(202))
return context
try:
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterRead, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None or (isinstance(result, dict) is not True):
context = data_return(303, old_result, SessionIO_MSG_ZH.get(303), SessionIO_MSG_EN.get(303))
return context
new_bufer = result.get('data')
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200)) # code特殊处理
except Exception as E:
logger.error(E)
context = data_return(306, old_result, SessionIO_MSG_ZH.get(405), SessionIO_MSG_EN.get(405))
return context
@staticmethod
def destroy(ipaddress=None):
"""清空历史记录"""
result = Xcache.del_sessionio_cache(ipaddress)
context = data_return(204, result, SessionIO_MSG_ZH.get(204), SessionIO_MSG_EN.get(204))
return context
| # -*- coding: utf-8 -*-
# @File : sessionio.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import SessionIO_MSG_ZH, METERPRETER_PROMPT, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, \
SessionIO_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class SessionIO(object):
@staticmethod
def create(ipaddress=None, sessionid=None, user_input=None):
try:
user_input = user_input.strip()
if user_input.startswith('shell'):
command = user_input[len('shell'):].strip()
if len(command) == 0:
new_bufer = "\nNot support switch to Dos/Bash,input like\'shell whoami\' to run os cmd.\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
return context
else:
user_input = f"shell -c '{command}'"
if user_input.startswith('exit'):
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterSessionKill, params,
timeout=RPC_SESSION_OPER_SHORT_REQ)
context = data_return(203, result, SessionIO_MSG_ZH.get(203), SessionIO_MSG_EN.get(203))
return context
params = [sessionid, user_input]
result = RpcClient.call(Method.SessionMeterpreterWrite, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
elif result.get('result') == 'success':
new_bufer = f"{METERPRETER_PROMPT}{user_input}\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
else:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
except Exception as E:
logger.error(E)
context = data_return(306, {}, SessionIO_MSG_ZH.get(306), SessionIO_MSG_EN.get(306))
return context
@staticmethod
def update(ipaddress=None, sessionid=None):
old_result = Xcache.get_sessionio_cache(ipaddress)
if sessionid is None or sessionid == -1:
context = data_return(202, old_result, SessionIO_MSG_ZH.get(202), SessionIO_MSG_EN.get(202))
return context
try:
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterRead, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None or (isinstance(result, dict) is not True):
context = data_return(303, old_result, SessionIO_MSG_ZH.get(303), SessionIO_MSG_EN.get(303))
return context
new_bufer = result.get('data')
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200)) # code特殊处理
except Exception as E:
logger.error(E)
context = data_return(306, old_result, SessionIO_MSG_ZH.get(405), SessionIO_MSG_EN.get(405))
return context
@staticmethod
def destroy(ipaddress=None):
"""清空历史记录"""
result = Xcache.del_sessionio_cache(ipaddress)
context = data_return(204, result, SessionIO_MSG_ZH.get(204), SessionIO_MSG_EN.get(204))
return context
| en | 000707334_evi1hack-viperpython_sessionio_5ee00cdde83b.py | unknown | 1,178 |
import ctypes
import gc
import logging
import time
from collections import deque
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import angr
import archinfo
from angr import Block, Project, SimState
from angr.engines.successors import SimSuccessors
from cle.backends.elf.metaelf import MetaELF
from capstone import x86_const
from ..errors import HaseError
from ..loader import Loader
from ..progress_log import ProgressLog
from ..pt import Instruction, InstructionClass
from ..pwn_wrapper import ELF, Coredump, Mapping
from .cdanalyzer import CoredumpAnalyzer
from .filter import FilterTrace
from .hook import setup_project_hook
from .start_state import create_start_state
from .state import State, StateManager
l = logging.getLogger(__name__)
def constrain_registers(state: State, coredump: Coredump) -> bool:
# FIXME: if exception caught is omitted by hook?
# If same address, then give registers
if state.registers["rip"].value == coredump.registers["rip"]:
# don't give rbp, rsp
assert state.registers["rsp"].value == coredump.registers["rsp"]
registers = [
"gs",
"rip",
"rdx",
"r15",
"rax",
"rsi",
"rcx",
"r14",
"fs",
"r12",
"r13",
"r10",
"r11",
"rbx",
"r8",
"r9",
"eflags",
"rdi",
]
for name in registers:
state.registers[name] = coredump.registers[name]
return True
else:
l.warning("RIP mismatch.")
arip = state.simstate.regs.rip
crip = hex(coredump.registers["rip"])
arsp = state.simstate.regs.rsp
crsp = hex(coredump.registers["rsp"])
l.warning("{} {} {} {}".format(arip, crip, arsp, crsp))
return False
def repair_syscall_jump(state_block: Any, step: SimSuccessors) -> SimState:
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
# manually syscall will have no entry and just execute it.
if (
ins_repr.startswith("syscall")
and 0x3000000 <= step.successors[0].reg_concrete("rip") < 0x3002000
):
return step.successors[0].step(num_inst=1)
return step
class Tracer:
def __init__(
self,
executable: str,
trace: List[Instruction],
coredump: Coredump,
loader: Loader,
name: str = "(unamed)",
) -> None:
self.name = name
self.executable = executable
# we keep this for debugging in ipdb
self.loader = loader
self.project = loader.angr_project()
assert self.project.loader.main_object.os.startswith("UNIX")
self.coredump = coredump
self.debug_unsat = None # type: Optional[SimState]
self.instruction = None # type: Optional[Instruction]
self.trace = trace
elf = ELF(executable)
start = elf.symbols.get("_start")
main = elf.symbols.get("main")
lib_text_addrs = {} # type: Dict[str, int]
lib_opts = self.loader.load_options()["lib_opts"]
for lib in lib_opts:
lib_text_addrs[lib] = lib_opts[lib]['base_addr'] + MetaELF.get_text_offset(lib)
self.cdanalyzer = CoredumpAnalyzer(
elf, self.coredump, lib_text_addrs
)
for (idx, event) in enumerate(self.trace):
if event.ip == start or event.ip == main:
self.trace = trace[idx:]
self.use_hook = True
hooked_symbols, omitted_section = setup_project_hook(
self.project, self.cdanalyzer.gdb
)
self.filter = FilterTrace(
self.project,
self.trace,
hooked_symbols,
self.cdanalyzer.gdb,
omitted_section,
elf.statically_linked,
name,
)
self.old_trace = self.trace
self.trace, self.trace_idx, self.hook_target = self.filter.filtered_trace()
l.info(
"Trace length: {} | OldTrace length: {}".format(
len(self.trace), len(self.old_trace)
)
)
self.hook_plt_idx = list(self.hook_target.keys())
self.hook_plt_idx.sort()
self.filter.entry_check()
self.start_state = create_start_state(self.project, self.trace, self.cdanalyzer)
self.start_state.inspect.b(
"call", when=angr.BP_BEFORE, action=self.concretize_indirect_calls
)
self.start_state.inspect.b(
"successor", when=angr.BP_AFTER, action=self.concretize_ip
)
def concretize_indirect_calls(self, state: SimState) -> None:
assert self.instruction is not None
if not state.ip.symbolic:
ip = state.solver.eval(state.ip)
assert self.filter.test_plt_vdso(ip) or ip == self.instruction.ip
state.inspect.function_address = self.instruction.ip
def concretize_ip(self, state: SimState) -> None:
assert self.instruction is not None
ip = self.instruction.ip
if state.scratch.target.symbolic:
state.ip = ip
state.add_constraints(state.scratch.target == ip, action=True)
# avoid evaluation of symbolic target
state.scratch.target = ip
def desc_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.project.loader.describe_addr(inst.ip),
)
def desc_old_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.old_trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start, hex(inst.ip), self.project.loader.describe_addr(inst.ip)
)
def desc_addr(self, addr: int) -> str:
return self.project.loader.describe_addr(addr)
def desc_stack_inst(
self, start: int, end: Optional[int] = None, show_extra: bool = True
) -> None:
for i, inst in enumerate(self.trace[start:end]):
blk = self.project.factory.block(inst.ip)
first_ins = blk.capstone.insns[0]
if (
first_ins.mnemonic == "push"
or first_ins.mnemonic == "pop"
or first_ins.mnemonic == "enter"
or first_ins.mnemonic == "leave"
# or first_ins.mnemonic == 'call'
# or first_ins.mnemonic == 'retn'
or (
len(first_ins.operands) > 0
and first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
)
):
if show_extra:
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.desc_addr(inst.ip),
str(first_ins),
)
else:
print(str(first_ins))
def desc_callstack(self, state: Optional[SimState] = None) -> None:
state = self.debug_state[-1] if state is None else state
callstack = state.callstack
for i, c in enumerate(callstack):
print(
"Frame {}: {} => {}, sp = {}".format(
i,
self.desc_addr(c.call_site_addr),
self.desc_addr(c.func_addr),
hex(c.stack_ptr),
)
)
def repair_exit_handler(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "exit"
):
if len(state.libc.exit_handler):
addr = state.libc.exit_handler[0]
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
return step
def repair_alloca_ins(self, state: SimState, state_block: Block) -> None:
# NOTE: alloca problem, focus on sub rsp, rax
# Typical usage: alloca(strlen(x))
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
if first_ins.mnemonic == "sub":
if (
first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
and first_ins.operands[1].type == 1
):
reg_name = first_ins.reg_name(first_ins.operands[1].reg)
reg_v = getattr(state.regs, reg_name)
if state.solver.symbolic(reg_v):
setattr(state.regs, reg_name, state.libc.max_str_len)
def repair_jump_ins(
self,
state: SimState,
state_block: Any,
previous_instruction: Instruction,
instruction: Instruction,
) -> Tuple[bool, str]:
# NOTE: typical case: switch(getchar())
if previous_instruction.iclass == InstructionClass.ptic_other:
return False, ""
jump_ins = ["jmp", "call"] # currently not deal with jcc regs
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("ret"):
if not state.solver.symbolic(state.regs.rsp):
mem = state.memory.load(state.regs.rsp, 8)
jump_target = 0
if not state.solver.symbolic(mem):
jump_target = state.solver.eval(mem)
if jump_target != instruction.ip:
return True, "ret"
else:
return True, "ok"
else:
return True, "ret"
for ins in jump_ins:
if ins_repr.startswith(ins):
# call rax
if first_ins.operands[0].type == 1:
reg_name = first_ins.op_str
reg_v = getattr(state.regs, reg_name)
if (
state.solver.symbolic(reg_v)
or state.solver.eval(reg_v) != instruction.ip
):
setattr(state.regs, reg_name, instruction.ip)
return True, ins
# jmp 0xaabb
if first_ins.operands[0].type == 2:
return True, ins
# jmp [base + index*scale + disp]
if first_ins.operands[0].type == 3:
self.last_jump_table = state
mem = first_ins.operands[0].value.mem
target = mem.disp
if mem.index:
reg_index_name = first_ins.reg_name(mem.index)
reg_index = getattr(state.regs, reg_index_name)
if state.solver.symbolic(reg_index):
return True, ins
else:
target += state.solver.eval(reg_index) * mem.scale
if mem.base:
reg_base_name = first_ins.reg_name(mem.base)
reg_base = getattr(state.regs, reg_base_name)
if state.solver.symbolic(reg_base):
return True, ins
else:
target += state.solver.eval(reg_base)
ip_mem = state.memory.load(target, 8, endness="Iend_LE")
if not state.solver.symbolic(ip_mem):
jump_target = state.solver.eval(ip_mem)
if jump_target != instruction.ip:
return True, ins
else:
return True, "ok"
else:
return True, ins
return False, "ok"
def repair_ip(self, state: SimState) -> int:
try:
addr = state.solver.eval(state._ip)
# NOTE: repair IFuncResolver
if (
self.project.loader.find_object_containing(addr)
== self.project.loader.extern_object
):
func = self.project._sim_procedures.get(addr, None)
if func:
funcname = func.kwargs["funcname"]
libf = self.project.loader.find_symbol(funcname)
if libf:
addr = libf.rebased_addr
except Exception:
logging.exception("Error while repairing ip for {}".format(self.name))
# NOTE: currently just try to repair ip for syscall
addr = self.debug_state[-2].addr
return addr
def repair_func_resolver(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "IFuncResolver"
):
func = self.filter.find_function(self.debug_state[-2].addr)
if func:
addr = self.project.loader.find_symbol(func.name).rebased_addr
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
else:
raise HaseError("Cannot resolve function")
return step
def last_match(self, choice: SimState, instruction: Instruction) -> bool:
# if last trace is A -> A
if (
instruction == self.trace[-1]
and len(self.trace) > 2
and self.trace[-1].ip == self.trace[-2].ip
):
if choice.addr == instruction.ip:
return True
return False
def jump_match(
self,
old_state: SimState,
choice: SimState,
previous_instruction: Instruction,
instruction: Instruction,
) -> bool:
if choice.addr == instruction.ip:
l.debug("jump 0%x -> 0%x", previous_instruction.ip, choice.addr)
return True
return False
def repair_satness(self, old_state: SimState, new_state: SimState) -> None:
if not new_state.solver.satisfiable():
new_state.solver._stored_solver = old_state.solver._solver.branch()
if not self.debug_unsat:
self.debug_sat = old_state
self.debug_unsat = new_state
def repair_ip_at_syscall(self, old_block: Block, new_state: SimState) -> None:
capstone = old_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("syscall"):
new_state.regs.ip_at_syscall = new_state.ip
def post_execute(
self, old_state: SimState, old_block: Block, state: SimState
) -> None:
self.repair_satness(old_state, state)
self.repair_ip_at_syscall(old_block, state)
def execute(
self,
state: SimState,
previous_instruction: Instruction,
instruction: Instruction,
index: int,
) -> Tuple[SimState, SimState]:
self.debug_state.append(state)
state_block = state.block() # type: Block
force_jump, force_type = self.repair_jump_ins(
state, state_block, previous_instruction, instruction
)
self.repair_alloca_ins(state, state_block)
try:
step = self.project.factory.successors(
state, num_inst=1 # , force_addr=addr
)
step = repair_syscall_jump(state_block, step)
step = self.repair_func_resolver(state, step)
step = self.repair_exit_handler(state, step)
except Exception:
logging.exception("Error while finding successor for {}".format(self.name))
new_state = state.copy()
new_state.regs.ip = instruction.ip
self.post_execute(state, state_block, new_state)
return state, new_state
if force_jump:
new_state = state.copy()
if force_type == "call":
if not self.project.is_hooked(instruction.ip):
new_state.regs.rsp -= 8
ret_addr = state.addr + state_block.capstone.insns[0].size
new_state.memory.store(
new_state.regs.rsp, ret_addr, endness="Iend_LE"
)
elif force_type == "ret":
new_state.regs.rsp += 8
new_state.regs.ip = instruction.ip
choices = [new_state]
else:
choices = step.successors + step.unsat_successors
old_state = state
l.info(repr(state) + " " + repr(previous_instruction) + " " + repr(instruction))
for choice in choices:
# HACKS: if ip is symbolic
try:
if self.last_match(choice, instruction):
return choice, choice
if self.jump_match(
old_state, choice, previous_instruction, instruction
):
self.post_execute(old_state, state_block, choice)
return old_state, choice
except angr.SimValueError:
logging.exception("Error while jumping in {}".format(self.name))
pass
new_state = state.copy()
new_state.regs.ip = instruction.ip
return state, new_state
def valid_address(self, address: int) -> bool:
return self.project.loader.find_object_containing(address)
def run(self) -> StateManager:
simstate = self.start_state
states = StateManager(self, len(self.trace) + 1)
states.add_major(State(0, None, self.trace[0], None, simstate))
self.debug_unsat = None # type: Optional[SimState]
self.debug_state = deque(maxlen=50) # type: deque
self.skip_addr = {} # type: Dict[int, int]
cnt = -1
interval = max(1, len(self.trace) // 200)
length = len(self.trace) - 1
l.info("start processing trace")
progress_log = ProgressLog(
name="process trace of {}".format(self.name),
total_steps=len(self.trace),
log_frequency=int(1e3),
kill_limit=60 * 60 * 24,
)
# prev_instr.ip == state.ip
for previous_idx in range(len(self.trace) - 1):
previous_instruction = self.trace[previous_idx]
if previous_idx + 1 >= len(self.trace):
self.instruction = self.trace[previous_idx]
else:
self.instruction = self.trace[previous_idx + 1]
cnt += 1
progress_log.update(cnt)
if not cnt % 500:
gc.collect()
assert self.valid_address(self.instruction.ip)
old_simstate, new_simstate = self.execute(
simstate, previous_instruction, self.instruction, cnt
)
simstate = new_simstate
if cnt % interval == 0 or length - cnt < 15:
states.add_major(
State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
)
if (
self.project.loader.find_object_containing(self.instruction.ip)
== self.project.loader.main_object
):
states.last_main_state = State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
constrain_registers(states.major_states[-1], self.coredump)
return states
| import ctypes
import gc
import logging
import time
from collections import deque
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import angr
import archinfo
from angr import Block, Project, SimState
from angr.engines.successors import SimSuccessors
from cle.backends.elf.metaelf import MetaELF
from capstone import x86_const
from ..errors import HaseError
from ..loader import Loader
from ..progress_log import ProgressLog
from ..pt import Instruction, InstructionClass
from ..pwn_wrapper import ELF, Coredump, Mapping
from .cdanalyzer import CoredumpAnalyzer
from .filter import FilterTrace
from .hook import setup_project_hook
from .start_state import create_start_state
from .state import State, StateManager
l = logging.getLogger(__name__)
def constrain_registers(state: State, coredump: Coredump) -> bool:
# FIXME: if exception caught is omitted by hook?
# If same address, then give registers
if state.registers["rip"].value == coredump.registers["rip"]:
# don't give rbp, rsp
assert state.registers["rsp"].value == coredump.registers["rsp"]
registers = [
"gs",
"rip",
"rdx",
"r15",
"rax",
"rsi",
"rcx",
"r14",
"fs",
"r12",
"r13",
"r10",
"r11",
"rbx",
"r8",
"r9",
"eflags",
"rdi",
]
for name in registers:
state.registers[name] = coredump.registers[name]
return True
else:
l.warning("RIP mismatch.")
arip = state.simstate.regs.rip
crip = hex(coredump.registers["rip"])
arsp = state.simstate.regs.rsp
crsp = hex(coredump.registers["rsp"])
l.warning("{} {} {} {}".format(arip, crip, arsp, crsp))
return False
def repair_syscall_jump(state_block: Any, step: SimSuccessors) -> SimState:
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
# manually syscall will have no entry and just execute it.
if (
ins_repr.startswith("syscall")
and 0x3000000 <= step.successors[0].reg_concrete("rip") < 0x3002000
):
return step.successors[0].step(num_inst=1)
return step
class Tracer:
def __init__(
self,
executable: str,
trace: List[Instruction],
coredump: Coredump,
loader: Loader,
name: str = "(unamed)",
) -> None:
self.name = name
self.executable = executable
# we keep this for debugging in ipdb
self.loader = loader
self.project = loader.angr_project()
assert self.project.loader.main_object.os.startswith("UNIX")
self.coredump = coredump
self.debug_unsat = None # type: Optional[SimState]
self.instruction = None # type: Optional[Instruction]
self.trace = trace
elf = ELF(executable)
start = elf.symbols.get("_start")
main = elf.symbols.get("main")
lib_text_addrs = {} # type: Dict[str, int]
lib_opts = self.loader.load_options()["lib_opts"]
for lib in lib_opts:
lib_text_addrs[lib] = lib_opts[lib]['base_addr'] + MetaELF.get_text_offset(lib)
self.cdanalyzer = CoredumpAnalyzer(
elf, self.coredump, lib_text_addrs
)
for (idx, event) in enumerate(self.trace):
if event.ip == start or event.ip == main:
self.trace = trace[idx:]
self.use_hook = True
hooked_symbols, omitted_section = setup_project_hook(
self.project, self.cdanalyzer.gdb
)
self.filter = FilterTrace(
self.project,
self.trace,
hooked_symbols,
self.cdanalyzer.gdb,
omitted_section,
elf.statically_linked,
name,
)
self.old_trace = self.trace
self.trace, self.trace_idx, self.hook_target = self.filter.filtered_trace()
l.info(
"Trace length: {} | OldTrace length: {}".format(
len(self.trace), len(self.old_trace)
)
)
self.hook_plt_idx = list(self.hook_target.keys())
self.hook_plt_idx.sort()
self.filter.entry_check()
self.start_state = create_start_state(self.project, self.trace, self.cdanalyzer)
self.start_state.inspect.b(
"call", when=angr.BP_BEFORE, action=self.concretize_indirect_calls
)
self.start_state.inspect.b(
"successor", when=angr.BP_AFTER, action=self.concretize_ip
)
def concretize_indirect_calls(self, state: SimState) -> None:
assert self.instruction is not None
if not state.ip.symbolic:
ip = state.solver.eval(state.ip)
assert self.filter.test_plt_vdso(ip) or ip == self.instruction.ip
state.inspect.function_address = self.instruction.ip
def concretize_ip(self, state: SimState) -> None:
assert self.instruction is not None
ip = self.instruction.ip
if state.scratch.target.symbolic:
state.ip = ip
state.add_constraints(state.scratch.target == ip, action=True)
# avoid evaluation of symbolic target
state.scratch.target = ip
def desc_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.project.loader.describe_addr(inst.ip),
)
def desc_old_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.old_trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start, hex(inst.ip), self.project.loader.describe_addr(inst.ip)
)
def desc_addr(self, addr: int) -> str:
return self.project.loader.describe_addr(addr)
def desc_stack_inst(
self, start: int, end: Optional[int] = None, show_extra: bool = True
) -> None:
for i, inst in enumerate(self.trace[start:end]):
blk = self.project.factory.block(inst.ip)
first_ins = blk.capstone.insns[0]
if (
first_ins.mnemonic == "push"
or first_ins.mnemonic == "pop"
or first_ins.mnemonic == "enter"
or first_ins.mnemonic == "leave"
# or first_ins.mnemonic == 'call'
# or first_ins.mnemonic == 'retn'
or (
len(first_ins.operands) > 0
and first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
)
):
if show_extra:
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.desc_addr(inst.ip),
str(first_ins),
)
else:
print(str(first_ins))
def desc_callstack(self, state: Optional[SimState] = None) -> None:
state = self.debug_state[-1] if state is None else state
callstack = state.callstack
for i, c in enumerate(callstack):
print(
"Frame {}: {} => {}, sp = {}".format(
i,
self.desc_addr(c.call_site_addr),
self.desc_addr(c.func_addr),
hex(c.stack_ptr),
)
)
def repair_exit_handler(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "exit"
):
if len(state.libc.exit_handler):
addr = state.libc.exit_handler[0]
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
return step
def repair_alloca_ins(self, state: SimState, state_block: Block) -> None:
# NOTE: alloca problem, focus on sub rsp, rax
# Typical usage: alloca(strlen(x))
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
if first_ins.mnemonic == "sub":
if (
first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
and first_ins.operands[1].type == 1
):
reg_name = first_ins.reg_name(first_ins.operands[1].reg)
reg_v = getattr(state.regs, reg_name)
if state.solver.symbolic(reg_v):
setattr(state.regs, reg_name, state.libc.max_str_len)
def repair_jump_ins(
self,
state: SimState,
state_block: Any,
previous_instruction: Instruction,
instruction: Instruction,
) -> Tuple[bool, str]:
# NOTE: typical case: switch(getchar())
if previous_instruction.iclass == InstructionClass.ptic_other:
return False, ""
jump_ins = ["jmp", "call"] # currently not deal with jcc regs
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("ret"):
if not state.solver.symbolic(state.regs.rsp):
mem = state.memory.load(state.regs.rsp, 8)
jump_target = 0
if not state.solver.symbolic(mem):
jump_target = state.solver.eval(mem)
if jump_target != instruction.ip:
return True, "ret"
else:
return True, "ok"
else:
return True, "ret"
for ins in jump_ins:
if ins_repr.startswith(ins):
# call rax
if first_ins.operands[0].type == 1:
reg_name = first_ins.op_str
reg_v = getattr(state.regs, reg_name)
if (
state.solver.symbolic(reg_v)
or state.solver.eval(reg_v) != instruction.ip
):
setattr(state.regs, reg_name, instruction.ip)
return True, ins
# jmp 0xaabb
if first_ins.operands[0].type == 2:
return True, ins
# jmp [base + index*scale + disp]
if first_ins.operands[0].type == 3:
self.last_jump_table = state
mem = first_ins.operands[0].value.mem
target = mem.disp
if mem.index:
reg_index_name = first_ins.reg_name(mem.index)
reg_index = getattr(state.regs, reg_index_name)
if state.solver.symbolic(reg_index):
return True, ins
else:
target += state.solver.eval(reg_index) * mem.scale
if mem.base:
reg_base_name = first_ins.reg_name(mem.base)
reg_base = getattr(state.regs, reg_base_name)
if state.solver.symbolic(reg_base):
return True, ins
else:
target += state.solver.eval(reg_base)
ip_mem = state.memory.load(target, 8, endness="Iend_LE")
if not state.solver.symbolic(ip_mem):
jump_target = state.solver.eval(ip_mem)
if jump_target != instruction.ip:
return True, ins
else:
return True, "ok"
else:
return True, ins
return False, "ok"
def repair_ip(self, state: SimState) -> int:
try:
addr = state.solver.eval(state._ip)
# NOTE: repair IFuncResolver
if (
self.project.loader.find_object_containing(addr)
== self.project.loader.extern_object
):
func = self.project._sim_procedures.get(addr, None)
if func:
funcname = func.kwargs["funcname"]
libf = self.project.loader.find_symbol(funcname)
if libf:
addr = libf.rebased_addr
except Exception:
logging.exception("Error while repairing ip for {}".format(self.name))
# NOTE: currently just try to repair ip for syscall
addr = self.debug_state[-2].addr
return addr
def repair_func_resolver(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "IFuncResolver"
):
func = self.filter.find_function(self.debug_state[-2].addr)
if func:
addr = self.project.loader.find_symbol(func.name).rebased_addr
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
else:
raise HaseError("Cannot resolve function")
return step
def last_match(self, choice: SimState, instruction: Instruction) -> bool:
# if last trace is A -> A
if (
instruction == self.trace[-1]
and len(self.trace) > 2
and self.trace[-1].ip == self.trace[-2].ip
):
if choice.addr == instruction.ip:
return True
return False
def jump_match(
self,
old_state: SimState,
choice: SimState,
previous_instruction: Instruction,
instruction: Instruction,
) -> bool:
if choice.addr == instruction.ip:
l.debug("jump 0%x -> 0%x", previous_instruction.ip, choice.addr)
return True
return False
def repair_satness(self, old_state: SimState, new_state: SimState) -> None:
if not new_state.solver.satisfiable():
new_state.solver._stored_solver = old_state.solver._solver.branch()
if not self.debug_unsat:
self.debug_sat = old_state
self.debug_unsat = new_state
def repair_ip_at_syscall(self, old_block: Block, new_state: SimState) -> None:
capstone = old_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("syscall"):
new_state.regs.ip_at_syscall = new_state.ip
def post_execute(
self, old_state: SimState, old_block: Block, state: SimState
) -> None:
self.repair_satness(old_state, state)
self.repair_ip_at_syscall(old_block, state)
def execute(
self,
state: SimState,
previous_instruction: Instruction,
instruction: Instruction,
index: int,
) -> Tuple[SimState, SimState]:
self.debug_state.append(state)
state_block = state.block() # type: Block
force_jump, force_type = self.repair_jump_ins(
state, state_block, previous_instruction, instruction
)
self.repair_alloca_ins(state, state_block)
try:
step = self.project.factory.successors(
state, num_inst=1 # , force_addr=addr
)
step = repair_syscall_jump(state_block, step)
step = self.repair_func_resolver(state, step)
step = self.repair_exit_handler(state, step)
except Exception:
logging.exception("Error while finding successor for {}".format(self.name))
new_state = state.copy()
new_state.regs.ip = instruction.ip
self.post_execute(state, state_block, new_state)
return state, new_state
if force_jump:
new_state = state.copy()
if force_type == "call":
if not self.project.is_hooked(instruction.ip):
new_state.regs.rsp -= 8
ret_addr = state.addr + state_block.capstone.insns[0].size
new_state.memory.store(
new_state.regs.rsp, ret_addr, endness="Iend_LE"
)
elif force_type == "ret":
new_state.regs.rsp += 8
new_state.regs.ip = instruction.ip
choices = [new_state]
else:
choices = step.successors + step.unsat_successors
old_state = state
l.info(repr(state) + " " + repr(previous_instruction) + " " + repr(instruction))
for choice in choices:
# HACKS: if ip is symbolic
try:
if self.last_match(choice, instruction):
return choice, choice
if self.jump_match(
old_state, choice, previous_instruction, instruction
):
self.post_execute(old_state, state_block, choice)
return old_state, choice
except angr.SimValueError:
logging.exception("Error while jumping in {}".format(self.name))
pass
new_state = state.copy()
new_state.regs.ip = instruction.ip
return state, new_state
def valid_address(self, address: int) -> bool:
return self.project.loader.find_object_containing(address)
def run(self) -> StateManager:
simstate = self.start_state
states = StateManager(self, len(self.trace) + 1)
states.add_major(State(0, None, self.trace[0], None, simstate))
self.debug_unsat = None # type: Optional[SimState]
self.debug_state = deque(maxlen=50) # type: deque
self.skip_addr = {} # type: Dict[int, int]
cnt = -1
interval = max(1, len(self.trace) // 200)
length = len(self.trace) - 1
l.info("start processing trace")
progress_log = ProgressLog(
name="process trace of {}".format(self.name),
total_steps=len(self.trace),
log_frequency=int(1e3),
kill_limit=60 * 60 * 24,
)
# prev_instr.ip == state.ip
for previous_idx in range(len(self.trace) - 1):
previous_instruction = self.trace[previous_idx]
if previous_idx + 1 >= len(self.trace):
self.instruction = self.trace[previous_idx]
else:
self.instruction = self.trace[previous_idx + 1]
cnt += 1
progress_log.update(cnt)
if not cnt % 500:
gc.collect()
assert self.valid_address(self.instruction.ip)
old_simstate, new_simstate = self.execute(
simstate, previous_instruction, self.instruction, cnt
)
simstate = new_simstate
if cnt % interval == 0 or length - cnt < 15:
states.add_major(
State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
)
if (
self.project.loader.find_object_containing(self.instruction.ip)
== self.project.loader.main_object
):
states.last_main_state = State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
constrain_registers(states.major_states[-1], self.coredump)
return states
| en | 000661692_efeslab-hase_tracer_20ccb368c3b8.py | unknown | 5,726 |
import json
import time
import torch
import random
import numpy as np
from pprint import pprint
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.stacking.datasets import get_out_of_folds_data, StackingDataset
from src.stacking.transforms import get_transforms
from src.stacking.argus_models import StackingModel
from src import config
EXPERIMENT_NAME = 'fcnet_stacking_rs_004'
START_FROM = 0
EXPERIMENTS = [
'auxiliary_007',
'auxiliary_010',
'auxiliary_012',
'auxiliary_014'
]
DATASET_SIZE = 128 * 256
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 4
SAVE_DIR = config.experiments_dir / EXPERIMENT_NAME
def train_folds(save_dir, folds_data):
random_params = {
'base_size': int(np.random.choice([64, 128, 256, 512])),
'reduction_scale': int(np.random.choice([2, 4, 8, 16])),
'p_dropout': float(np.random.uniform(0.0, 0.5)),
'lr': float(np.random.uniform(0.0001, 0.00001)),
'patience': int(np.random.randint(3, 12)),
'factor': float(np.random.uniform(0.5, 0.8)),
'batch_size': int(np.random.choice([32, 64, 128])),
}
pprint(random_params)
save_dir.mkdir(parents=True, exist_ok=True)
with open(save_dir / 'random_params.json', 'w') as outfile:
json.dump(random_params, outfile)
params = {
'nn_module': ('FCNet', {
'in_channels': len(config.classes) * len(EXPERIMENTS),
'num_classes': len(config.classes),
'base_size': random_params['base_size'],
'reduction_scale': random_params['reduction_scale'],
'p_dropout': random_params['p_dropout']
}),
'loss': 'BCEWithLogitsLoss',
'optimizer': ('Adam', {'lr': random_params['lr']}),
'device': 'cuda',
}
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = save_dir / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_dataset = StackingDataset(folds_data, train_folds,
get_transforms(True),
DATASET_SIZE)
val_dataset = StackingDataset(folds_data, val_folds,
get_transforms(False))
train_loader = DataLoader(train_dataset,
batch_size=random_params['batch_size'],
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset,
batch_size=random_params['batch_size'] * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = StackingModel(params)
callbacks = [
MonitorCheckpoint(save_fold_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap',
patience=random_params['patience'],
factor=random_params['factor'],
min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=20),
LoggingToFile(save_fold_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=300,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
SAVE_DIR.mkdir(parents=True, exist_ok=True)
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
if CORRECTIONS:
with open(config.corrections_json_path) as file:
corrections = json.load(file)
print("Corrections:", corrections)
else:
corrections = None
folds_data = get_out_of_folds_data(EXPERIMENTS, corrections)
for num in range(START_FROM, 10000):
np.random.seed(num)
random.seed(num)
save_dir = SAVE_DIR / f'{num:04}'
train_folds(save_dir, folds_data)
time.sleep(5.0)
torch.cuda.empty_cache()
time.sleep(5.0)
| import json
import time
import torch
import random
import numpy as np
from pprint import pprint
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.stacking.datasets import get_out_of_folds_data, StackingDataset
from src.stacking.transforms import get_transforms
from src.stacking.argus_models import StackingModel
from src import config
EXPERIMENT_NAME = 'fcnet_stacking_rs_004'
START_FROM = 0
EXPERIMENTS = [
'auxiliary_007',
'auxiliary_010',
'auxiliary_012',
'auxiliary_014'
]
DATASET_SIZE = 128 * 256
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 4
SAVE_DIR = config.experiments_dir / EXPERIMENT_NAME
def train_folds(save_dir, folds_data):
random_params = {
'base_size': int(np.random.choice([64, 128, 256, 512])),
'reduction_scale': int(np.random.choice([2, 4, 8, 16])),
'p_dropout': float(np.random.uniform(0.0, 0.5)),
'lr': float(np.random.uniform(0.0001, 0.00001)),
'patience': int(np.random.randint(3, 12)),
'factor': float(np.random.uniform(0.5, 0.8)),
'batch_size': int(np.random.choice([32, 64, 128])),
}
pprint(random_params)
save_dir.mkdir(parents=True, exist_ok=True)
with open(save_dir / 'random_params.json', 'w') as outfile:
json.dump(random_params, outfile)
params = {
'nn_module': ('FCNet', {
'in_channels': len(config.classes) * len(EXPERIMENTS),
'num_classes': len(config.classes),
'base_size': random_params['base_size'],
'reduction_scale': random_params['reduction_scale'],
'p_dropout': random_params['p_dropout']
}),
'loss': 'BCEWithLogitsLoss',
'optimizer': ('Adam', {'lr': random_params['lr']}),
'device': 'cuda',
}
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = save_dir / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_dataset = StackingDataset(folds_data, train_folds,
get_transforms(True),
DATASET_SIZE)
val_dataset = StackingDataset(folds_data, val_folds,
get_transforms(False))
train_loader = DataLoader(train_dataset,
batch_size=random_params['batch_size'],
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset,
batch_size=random_params['batch_size'] * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = StackingModel(params)
callbacks = [
MonitorCheckpoint(save_fold_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap',
patience=random_params['patience'],
factor=random_params['factor'],
min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=20),
LoggingToFile(save_fold_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=300,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
SAVE_DIR.mkdir(parents=True, exist_ok=True)
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
if CORRECTIONS:
with open(config.corrections_json_path) as file:
corrections = json.load(file)
print("Corrections:", corrections)
else:
corrections = None
folds_data = get_out_of_folds_data(EXPERIMENTS, corrections)
for num in range(START_FROM, 10000):
np.random.seed(num)
random.seed(num)
save_dir = SAVE_DIR / f'{num:04}'
train_folds(save_dir, folds_data)
time.sleep(5.0)
torch.cuda.empty_cache()
time.sleep(5.0)
| en | 000354174_wubinbai-argus-freesound_stacking_random_search_7c250aa8a89c.py | unknown | 1,380 |
"""Add onboarding email fields to user
Revision ID: 2c6aaada8bff
Revises: f4a49acd8801
Create Date: 2021-05-02 12:25:35.640366
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2c6aaada8bff"
down_revision = "f4a49acd8801"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("users", sa.Column("last_onboarding_email_sent", sa.DateTime(timezone=True), nullable=True))
op.add_column("users", sa.Column("onboarding_emails_sent", sa.Integer(), server_default="0", nullable=False))
op.add_column("users", sa.Column("added_to_mailing_list", sa.Boolean(), server_default="false", nullable=False))
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'send_onboarding_emails'")
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'add_users_to_email_list'")
def downgrade():
op.drop_column("users", "added_to_mailing_list")
op.drop_column("users", "onboarding_emails_sent")
op.drop_column("users", "last_onboarding_email_sent")
| """Add onboarding email fields to user
Revision ID: 2c6aaada8bff
Revises: f4a49acd8801
Create Date: 2021-05-02 12:25:35.640366
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2c6aaada8bff"
down_revision = "f4a49acd8801"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("users", sa.Column("last_onboarding_email_sent", sa.DateTime(timezone=True), nullable=True))
op.add_column("users", sa.Column("onboarding_emails_sent", sa.Integer(), server_default="0", nullable=False))
op.add_column("users", sa.Column("added_to_mailing_list", sa.Boolean(), server_default="false", nullable=False))
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'send_onboarding_emails'")
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'add_users_to_email_list'")
def downgrade():
op.drop_column("users", "added_to_mailing_list")
op.drop_column("users", "onboarding_emails_sent")
op.drop_column("users", "last_onboarding_email_sent")
| en | 000485161_foormea-couchers_2c6aaada8bff_add_onboarding_email_fields_to_user_ee3c4a3b27c7.py | unknown | 371 |
from clld.web.adapters.geojson import GeoJson, get_lonlat
from clld.web.maps import Map, ParameterMap, Layer
class LanguagesMap(Map):
def get_options(self):
return {'icon_size': 20, 'no_showlabels': True}
class SegmentMap(ParameterMap):
def get_options(self):
return {'icon_size': 20}
class InventoryMap(Map):
def get_options(self):
return {'icon_size': 20}
def get_layers(self):
yield Layer(
self.ctx.id,
self.ctx.name,
GeoJson(self.ctx).render(self.ctx.language, self.req, dump=False))
def get_default_options(self):
return {
'center': list(reversed(get_lonlat(self.ctx.language) or [0, 0])),
'zoom': 3,
'no_popup': True,
'no_link': True,
'sidebar': True}
def includeme(config):
config.register_map('languages', LanguagesMap)
config.register_map('parameter', SegmentMap)
config.register_map('contribution', InventoryMap)
| from clld.web.adapters.geojson import GeoJson, get_lonlat
from clld.web.maps import Map, ParameterMap, Layer
class LanguagesMap(Map):
def get_options(self):
return {'icon_size': 20, 'no_showlabels': True}
class SegmentMap(ParameterMap):
def get_options(self):
return {'icon_size': 20}
class InventoryMap(Map):
def get_options(self):
return {'icon_size': 20}
def get_layers(self):
yield Layer(
self.ctx.id,
self.ctx.name,
GeoJson(self.ctx).render(self.ctx.language, self.req, dump=False))
def get_default_options(self):
return {
'center': list(reversed(get_lonlat(self.ctx.language) or [0, 0])),
'zoom': 3,
'no_popup': True,
'no_link': True,
'sidebar': True}
def includeme(config):
config.register_map('languages', LanguagesMap)
config.register_map('parameter', SegmentMap)
config.register_map('contribution', InventoryMap)
| en | 000558051_ltxom-phoible_maps_810b6a6d0cc4.py | unknown | 313 |
############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import json
import time
import uuid
from json import JSONDecodeError
from typing import Optional
from pygls.lsp.methods import (COMPLETION, TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN)
from pygls.lsp.types import (CompletionItem, CompletionList, CompletionOptions,
CompletionParams, ConfigurationItem,
ConfigurationParams, Diagnostic,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams, MessageType, Position,
Range, Registration, RegistrationParams,
Unregistration, UnregistrationParams)
from pygls.lsp.types.basic_structures import (WorkDoneProgressBegin,
WorkDoneProgressEnd,
WorkDoneProgressReport)
from pygls.server import LanguageServer
COUNT_DOWN_START_IN_SECONDS = 10
COUNT_DOWN_SLEEP_IN_SECONDS = 1
class JsonLanguageServer(LanguageServer):
CMD_COUNT_DOWN_BLOCKING = 'countDownBlocking'
CMD_COUNT_DOWN_NON_BLOCKING = 'countDownNonBlocking'
CMD_PROGRESS = 'progress'
CMD_REGISTER_COMPLETIONS = 'registerCompletions'
CMD_SHOW_CONFIGURATION_ASYNC = 'showConfigurationAsync'
CMD_SHOW_CONFIGURATION_CALLBACK = 'showConfigurationCallback'
CMD_SHOW_CONFIGURATION_THREAD = 'showConfigurationThread'
CMD_UNREGISTER_COMPLETIONS = 'unregisterCompletions'
CONFIGURATION_SECTION = 'jsonServer'
def __init__(self):
super().__init__()
json_server = JsonLanguageServer()
def _validate(ls, params):
ls.show_message_log('Validating json...')
text_doc = ls.workspace.get_document(params.text_document.uri)
source = text_doc.source
diagnostics = _validate_json(source) if source else []
ls.publish_diagnostics(text_doc.uri, diagnostics)
def _validate_json(source):
"""Validates json file."""
diagnostics = []
try:
json.loads(source)
except JSONDecodeError as err:
msg = err.msg
col = err.colno
line = err.lineno
d = Diagnostic(
range=Range(
start=Position(line=line - 1, character=col - 1),
end=Position(line=line - 1, character=col)
),
message=msg,
source=type(json_server).__name__
)
diagnostics.append(d)
return diagnostics
@json_server.feature(COMPLETION, CompletionOptions(trigger_characters=[',']))
def completions(params: Optional[CompletionParams] = None) -> CompletionList:
"""Returns completion items."""
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label='"'),
CompletionItem(label='['),
CompletionItem(label=']'),
CompletionItem(label='{'),
CompletionItem(label='}'),
]
)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_BLOCKING)
def count_down_10_seconds_blocking(ls, *args):
"""Starts counting down and showing message synchronously.
It will `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
time.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_NON_BLOCKING)
async def count_down_10_seconds_non_blocking(ls, *args):
"""Starts counting down and showing message asynchronously.
It won't `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
await asyncio.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
@json_server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(server: JsonLanguageServer, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
server.show_message('Text Document Did Close')
@json_server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
"""Text document did open notification."""
ls.show_message('Text Document Did Open')
_validate(ls, params)
@json_server.command(JsonLanguageServer.CMD_PROGRESS)
async def progress(ls: JsonLanguageServer, *args):
"""Create and start the progress on the client."""
token = 'token'
# Create
await ls.progress.create_async(token)
# Begin
ls.progress.begin(token, WorkDoneProgressBegin(title='Indexing', percentage=0))
# Report
for i in range(1, 10):
ls.progress.report(
token,
WorkDoneProgressReport(message=f'{i * 10}%', percentage= i * 10),
)
await asyncio.sleep(2)
# End
ls.progress.end(token, WorkDoneProgressEnd(message='Finished'))
@json_server.command(JsonLanguageServer.CMD_REGISTER_COMPLETIONS)
async def register_completions(ls: JsonLanguageServer, *args):
"""Register completions method on the client."""
params = RegistrationParams(registrations=[
Registration(
id=str(uuid.uuid4()),
method=COMPLETION,
register_options={"triggerCharacters": "[':']"})
])
response = await ls.register_capability_async(params)
if response is None:
ls.show_message('Successfully registered completions method')
else:
ls.show_message('Error happened during completions registration.',
MessageType.Error)
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_ASYNC)
async def show_configuration_async(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using coroutines."""
try:
config = await ls.get_configuration_async(
ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]))
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_CALLBACK)
def show_configuration_callback(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using callback."""
def _config_callback(config):
try:
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]), _config_callback)
@json_server.thread()
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_THREAD)
def show_configuration_thread(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using thread pool."""
try:
config = ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
])).result(2)
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_UNREGISTER_COMPLETIONS)
async def unregister_completions(ls: JsonLanguageServer, *args):
"""Unregister completions method on the client."""
params = UnregistrationParams(unregisterations=[
Unregistration(id=str(uuid.uuid4()), method=COMPLETION)
])
response = await ls.unregister_capability_async(params)
if response is None:
ls.show_message('Successfully unregistered completions method')
else:
ls.show_message('Error happened during completions unregistration.',
MessageType.Error)
| ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import json
import time
import uuid
from json import JSONDecodeError
from typing import Optional
from pygls.lsp.methods import (COMPLETION, TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN)
from pygls.lsp.types import (CompletionItem, CompletionList, CompletionOptions,
CompletionParams, ConfigurationItem,
ConfigurationParams, Diagnostic,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams, MessageType, Position,
Range, Registration, RegistrationParams,
Unregistration, UnregistrationParams)
from pygls.lsp.types.basic_structures import (WorkDoneProgressBegin,
WorkDoneProgressEnd,
WorkDoneProgressReport)
from pygls.server import LanguageServer
COUNT_DOWN_START_IN_SECONDS = 10
COUNT_DOWN_SLEEP_IN_SECONDS = 1
class JsonLanguageServer(LanguageServer):
CMD_COUNT_DOWN_BLOCKING = 'countDownBlocking'
CMD_COUNT_DOWN_NON_BLOCKING = 'countDownNonBlocking'
CMD_PROGRESS = 'progress'
CMD_REGISTER_COMPLETIONS = 'registerCompletions'
CMD_SHOW_CONFIGURATION_ASYNC = 'showConfigurationAsync'
CMD_SHOW_CONFIGURATION_CALLBACK = 'showConfigurationCallback'
CMD_SHOW_CONFIGURATION_THREAD = 'showConfigurationThread'
CMD_UNREGISTER_COMPLETIONS = 'unregisterCompletions'
CONFIGURATION_SECTION = 'jsonServer'
def __init__(self):
super().__init__()
json_server = JsonLanguageServer()
def _validate(ls, params):
ls.show_message_log('Validating json...')
text_doc = ls.workspace.get_document(params.text_document.uri)
source = text_doc.source
diagnostics = _validate_json(source) if source else []
ls.publish_diagnostics(text_doc.uri, diagnostics)
def _validate_json(source):
"""Validates json file."""
diagnostics = []
try:
json.loads(source)
except JSONDecodeError as err:
msg = err.msg
col = err.colno
line = err.lineno
d = Diagnostic(
range=Range(
start=Position(line=line - 1, character=col - 1),
end=Position(line=line - 1, character=col)
),
message=msg,
source=type(json_server).__name__
)
diagnostics.append(d)
return diagnostics
@json_server.feature(COMPLETION, CompletionOptions(trigger_characters=[',']))
def completions(params: Optional[CompletionParams] = None) -> CompletionList:
"""Returns completion items."""
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label='"'),
CompletionItem(label='['),
CompletionItem(label=']'),
CompletionItem(label='{'),
CompletionItem(label='}'),
]
)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_BLOCKING)
def count_down_10_seconds_blocking(ls, *args):
"""Starts counting down and showing message synchronously.
It will `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
time.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_NON_BLOCKING)
async def count_down_10_seconds_non_blocking(ls, *args):
"""Starts counting down and showing message asynchronously.
It won't `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
await asyncio.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
@json_server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(server: JsonLanguageServer, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
server.show_message('Text Document Did Close')
@json_server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
"""Text document did open notification."""
ls.show_message('Text Document Did Open')
_validate(ls, params)
@json_server.command(JsonLanguageServer.CMD_PROGRESS)
async def progress(ls: JsonLanguageServer, *args):
"""Create and start the progress on the client."""
token = 'token'
# Create
await ls.progress.create_async(token)
# Begin
ls.progress.begin(token, WorkDoneProgressBegin(title='Indexing', percentage=0))
# Report
for i in range(1, 10):
ls.progress.report(
token,
WorkDoneProgressReport(message=f'{i * 10}%', percentage= i * 10),
)
await asyncio.sleep(2)
# End
ls.progress.end(token, WorkDoneProgressEnd(message='Finished'))
@json_server.command(JsonLanguageServer.CMD_REGISTER_COMPLETIONS)
async def register_completions(ls: JsonLanguageServer, *args):
"""Register completions method on the client."""
params = RegistrationParams(registrations=[
Registration(
id=str(uuid.uuid4()),
method=COMPLETION,
register_options={"triggerCharacters": "[':']"})
])
response = await ls.register_capability_async(params)
if response is None:
ls.show_message('Successfully registered completions method')
else:
ls.show_message('Error happened during completions registration.',
MessageType.Error)
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_ASYNC)
async def show_configuration_async(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using coroutines."""
try:
config = await ls.get_configuration_async(
ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]))
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_CALLBACK)
def show_configuration_callback(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using callback."""
def _config_callback(config):
try:
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]), _config_callback)
@json_server.thread()
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_THREAD)
def show_configuration_thread(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using thread pool."""
try:
config = ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
])).result(2)
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_UNREGISTER_COMPLETIONS)
async def unregister_completions(ls: JsonLanguageServer, *args):
"""Unregister completions method on the client."""
params = UnregistrationParams(unregisterations=[
Unregistration(id=str(uuid.uuid4()), method=COMPLETION)
])
response = await ls.unregister_capability_async(params)
if response is None:
ls.show_message('Successfully unregistered completions method')
else:
ls.show_message('Error happened during completions unregistration.',
MessageType.Error)
| en | 000330104_DillanCMills-pygls_server_bdbe47f7cf8d.py | unknown | 2,576 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import re
import datetime
import scrapy
from scrapy import Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity, Compose, MapCompose, TakeFirst, Join
from dateutil.parser import parse as dateutil_parse
from w3lib.html import remove_tags
def num_page_extractor(num_pages):
if num_pages:
return num_pages.split()[0]
return None
def safe_parse_date(date):
try:
date = dateutil_parse(date, fuzzy=True, default=datetime.datetime.min)
date = date.strftime("%Y-%m-%d %H:%M:%S")
except ValueError:
date = None
return date
def extract_publish_dates(maybe_dates):
maybe_dates = [s for s in maybe_dates if "published" in s.lower()]
return [safe_parse_date(date) for date in maybe_dates]
def extract_year(s):
s = s.lower().strip()
match = re.match(".*first published.*(\d{4})", s)
if match:
return match.group(1)
def extract_ratings(txt):
"""Extract the rating histogram from embedded Javascript code
The embedded code looks like this:
|----------------------------------------------------------|
| renderRatingGraph([6, 3, 2, 2, 1]); |
| if ($('rating_details')) { |
| $('rating_details').insert({top: $('rating_graph')}) |
| } |
|----------------------------------------------------------|
"""
codelines = "".join(txt).split(";")
rating_code = [line.strip() for line in codelines if "renderRatingGraph" in line]
if not rating_code:
return None
rating_code = rating_code[0]
rating_array = rating_code[rating_code.index("[") + 1 : rating_code.index("]")]
ratings = {5 - i:int(x) for i, x in enumerate(rating_array.split(","))}
return ratings
def filter_asin(asin):
if asin and len(str(asin)) == 10:
return asin
return None
def isbn_filter(isbn):
if isbn and len(str(isbn)) == 10 and isbn.isdigit():
return isbn
def isbn13_filter(isbn):
if isbn and len(str(isbn)) == 13 and isbn.isdigit():
return isbn
def filter_empty(vals):
return [v.strip() for v in vals if v.strip()]
def split_by_newline(txt):
return txt.split("\n")
class BookItem(scrapy.Item):
# Scalars
url = Field()
title = Field(input_processor=MapCompose(str.strip))
author = Field(input_processor=MapCompose(str.strip))
num_ratings = Field(input_processor=MapCompose(str.strip, int))
num_reviews = Field(input_processor=MapCompose(str.strip, int))
avg_rating = Field(input_processor=MapCompose(str.strip, float))
num_pages = Field(input_processor=MapCompose(str.strip, num_page_extractor, int))
language = Field(input_processor=MapCompose(str.strip))
publish_date = Field(input_processor=extract_publish_dates)
original_publish_year = Field(input_processor=MapCompose(extract_year, int))
isbn = Field(input_processor=MapCompose(str.strip, isbn_filter))
isbn13 = Field(input_processor=MapCompose(str.strip, isbn13_filter))
asin = Field(input_processor=MapCompose(filter_asin))
series = Field()
# Lists
awards = Field(output_processor=Identity())
places = Field(output_processor=Identity())
characters = Field(output_processor=Identity())
genres = Field(output_processor=Compose(set, list))
# Dicts
rating_histogram = Field(input_processor=MapCompose(extract_ratings))
class BookLoader(ItemLoader):
default_output_processor = TakeFirst()
class AuthorItem(scrapy.Item):
# Scalars
url = Field()
name = Field()
birth_date = Field(input_processor=MapCompose(safe_parse_date))
death_date = Field(input_processor=MapCompose(safe_parse_date))
avg_rating = Field(serializer=float)
num_ratings = Field(serializer=int)
num_reviews = Field(serializer=int)
# Lists
genres = Field(output_processor=Compose(set, list))
influences = Field(output_processor=Compose(set, list))
# Blobs
about = Field(
# Take the first match, remove HTML tags, convert to list of lines, remove empty lines, remove the "edit data" prefix
input_processor=Compose(TakeFirst(), remove_tags, split_by_newline, filter_empty, lambda s: s[1:]),
output_processor=Join()
)
class AuthorLoader(ItemLoader):
default_output_processor = TakeFirst()
| # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import re
import datetime
import scrapy
from scrapy import Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity, Compose, MapCompose, TakeFirst, Join
from dateutil.parser import parse as dateutil_parse
from w3lib.html import remove_tags
def num_page_extractor(num_pages):
if num_pages:
return num_pages.split()[0]
return None
def safe_parse_date(date):
try:
date = dateutil_parse(date, fuzzy=True, default=datetime.datetime.min)
date = date.strftime("%Y-%m-%d %H:%M:%S")
except ValueError:
date = None
return date
def extract_publish_dates(maybe_dates):
maybe_dates = [s for s in maybe_dates if "published" in s.lower()]
return [safe_parse_date(date) for date in maybe_dates]
def extract_year(s):
s = s.lower().strip()
match = re.match(".*first published.*(\d{4})", s)
if match:
return match.group(1)
def extract_ratings(txt):
"""Extract the rating histogram from embedded Javascript code
The embedded code looks like this:
|----------------------------------------------------------|
| renderRatingGraph([6, 3, 2, 2, 1]); |
| if ($('rating_details')) { |
| $('rating_details').insert({top: $('rating_graph')}) |
| } |
|----------------------------------------------------------|
"""
codelines = "".join(txt).split(";")
rating_code = [line.strip() for line in codelines if "renderRatingGraph" in line]
if not rating_code:
return None
rating_code = rating_code[0]
rating_array = rating_code[rating_code.index("[") + 1 : rating_code.index("]")]
ratings = {5 - i:int(x) for i, x in enumerate(rating_array.split(","))}
return ratings
def filter_asin(asin):
if asin and len(str(asin)) == 10:
return asin
return None
def isbn_filter(isbn):
if isbn and len(str(isbn)) == 10 and isbn.isdigit():
return isbn
def isbn13_filter(isbn):
if isbn and len(str(isbn)) == 13 and isbn.isdigit():
return isbn
def filter_empty(vals):
return [v.strip() for v in vals if v.strip()]
def split_by_newline(txt):
return txt.split("\n")
class BookItem(scrapy.Item):
# Scalars
url = Field()
title = Field(input_processor=MapCompose(str.strip))
author = Field(input_processor=MapCompose(str.strip))
num_ratings = Field(input_processor=MapCompose(str.strip, int))
num_reviews = Field(input_processor=MapCompose(str.strip, int))
avg_rating = Field(input_processor=MapCompose(str.strip, float))
num_pages = Field(input_processor=MapCompose(str.strip, num_page_extractor, int))
language = Field(input_processor=MapCompose(str.strip))
publish_date = Field(input_processor=extract_publish_dates)
original_publish_year = Field(input_processor=MapCompose(extract_year, int))
isbn = Field(input_processor=MapCompose(str.strip, isbn_filter))
isbn13 = Field(input_processor=MapCompose(str.strip, isbn13_filter))
asin = Field(input_processor=MapCompose(filter_asin))
series = Field()
# Lists
awards = Field(output_processor=Identity())
places = Field(output_processor=Identity())
characters = Field(output_processor=Identity())
genres = Field(output_processor=Compose(set, list))
# Dicts
rating_histogram = Field(input_processor=MapCompose(extract_ratings))
class BookLoader(ItemLoader):
default_output_processor = TakeFirst()
class AuthorItem(scrapy.Item):
# Scalars
url = Field()
name = Field()
birth_date = Field(input_processor=MapCompose(safe_parse_date))
death_date = Field(input_processor=MapCompose(safe_parse_date))
avg_rating = Field(serializer=float)
num_ratings = Field(serializer=int)
num_reviews = Field(serializer=int)
# Lists
genres = Field(output_processor=Compose(set, list))
influences = Field(output_processor=Compose(set, list))
# Blobs
about = Field(
# Take the first match, remove HTML tags, convert to list of lines, remove empty lines, remove the "edit data" prefix
input_processor=Compose(TakeFirst(), remove_tags, split_by_newline, filter_empty, lambda s: s[1:]),
output_processor=Join()
)
class AuthorLoader(ItemLoader):
default_output_processor = TakeFirst()
| en | 000778511_havanagrawal-GoodreadsScraper_items_38eb78c97a7d.py | unknown | 1,400 |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.tasks.fixed.vit."""
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.fixed import mlp_mixer
tasks = [
'MLPMixer_Cifar100_bs256_tiny16',
'MLPMixer_Cifar100_small16',
'MLPMixer_Cifar100_tiny16',
'MLPMixer_Food101_64_bs256_tiny16',
'MLPMixer_Food101_64_small16',
'MLPMixer_Food101_64_tiny16',
'MLPMixer_ImageNet64_bs256_tiny16',
'MLPMixer_ImageNet64_small16',
'MLPMixer_ImageNet64_tiny16',
]
class MLPMixerTest(parameterized.TestCase):
@parameterized.parameters(tasks)
def test_tasks(self, task_name):
task = getattr(mlp_mixer, task_name)()
test_utils.smoketest_task(task)
if __name__ == '__main__':
absltest.main()
| # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.tasks.fixed.vit."""
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.fixed import mlp_mixer
tasks = [
'MLPMixer_Cifar100_bs256_tiny16',
'MLPMixer_Cifar100_small16',
'MLPMixer_Cifar100_tiny16',
'MLPMixer_Food101_64_bs256_tiny16',
'MLPMixer_Food101_64_small16',
'MLPMixer_Food101_64_tiny16',
'MLPMixer_ImageNet64_bs256_tiny16',
'MLPMixer_ImageNet64_small16',
'MLPMixer_ImageNet64_tiny16',
]
class MLPMixerTest(parameterized.TestCase):
@parameterized.parameters(tasks)
def test_tasks(self, task_name):
task = getattr(mlp_mixer, task_name)()
test_utils.smoketest_task(task)
if __name__ == '__main__':
absltest.main()
| en | 000090906_google-learned_optimization_mlp_mixer_test_0de2bfbcf571.py | unknown | 499 |
import os
import time
import uuid
from smbprotocol.connection import Connection
def test_connection(server, port):
conn = Connection(uuid.uuid4(), server, port=port)
print("Opening connection to %s:%d" % (server, port))
conn.connect(timeout=5)
conn.disconnect(True)
if __name__ == '__main__':
server = os.environ.get("SMB_SERVER", "127.0.0.1")
port = int(os.environ.get("SMB_PORT", 445))
print("Waiting for SMB server to be online")
attempt = 1
total_attempts = 20
while attempt < total_attempts:
print("Starting attempt %d" % attempt)
try:
test_connection(server, port)
break
except Exception as e:
print("Connection attempt %d failed: %s" % (attempt, str(e)))
attempt += 1
if attempt == total_attempts:
raise Exception("Timeout while waiting for SMB server to come "
"online")
print("Sleeping for 5 seconds before next attempt")
time.sleep(5)
print("Connection successful")
| import os
import time
import uuid
from smbprotocol.connection import Connection
def test_connection(server, port):
conn = Connection(uuid.uuid4(), server, port=port)
print("Opening connection to %s:%d" % (server, port))
conn.connect(timeout=5)
conn.disconnect(True)
if __name__ == '__main__':
server = os.environ.get("SMB_SERVER", "127.0.0.1")
port = int(os.environ.get("SMB_PORT", 445))
print("Waiting for SMB server to be online")
attempt = 1
total_attempts = 20
while attempt < total_attempts:
print("Starting attempt %d" % attempt)
try:
test_connection(server, port)
break
except Exception as e:
print("Connection attempt %d failed: %s" % (attempt, str(e)))
attempt += 1
if attempt == total_attempts:
raise Exception("Timeout while waiting for SMB server to come "
"online")
print("Sleeping for 5 seconds before next attempt")
time.sleep(5)
print("Connection successful")
| en | 000623938_wokis-smbprotocol_check-smb_1e52cef5d4b6.py | unknown | 304 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Config file of the Gophish command line interface.
@author: Martin Dubé
@organization: Gosecure inc.
@license: MIT License
@contact: mdube@gosecure.ca
Copyright (c) 2017, Gosecure
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
#
# Step 1: Gophish configuration
#
# Just the basic configuration for basic features
#
API_KEY = ''
API_URL = 'http://127.0.0.1:3333'
#
# Step 2: Campaign configuration
#
# Information regarding your campaign. Most comes from the gophish WebUI.
CAMPAIGN_NAME = 'John Doe'
CAMPAIGN_URL = 'https://path.toyourwebsite.com'
WORKING_DIR = '/path/to/working/dir'
EMAILS_PATH = WORKING_DIR + 'emails.txt'
# Landing Pages
LP_NAME = 'Landing Page Name'
# Two specific fields required by --print-creds to properly parse the JSON payloads.
# Update the fields based on your landing pages user and password fields.
LP_USER_FIELD = 'cUser'
LP_PWD_FIELD = 'cPass'
# Email Template
ET_NAME = 'Email Template Name'
# Sending Profiles
SP_NAME = 'Sending Profile Name'
# Batch Management Settings
GROUP_SIZE = 50
START_INTERVAL = 1 # Unit = minutes. Default=1. Increase when you have more than 10 batch.
BATCH_INTERVAL = 1 # Unit = minutes
# Verify TLS when testing credentials
# Default is True
VERIFY_TLS = True
# Owa login testing settings
OWA_DOMAIN = 'DOMAIN'
OWA_SERVER = 'outlook.example.com'
# Netscaler login testing settings
NETSCALER_SERVER = 'vpn.example.com'
# Juniper (Secure Access SSL VPN)
JUNIPER_DOMAIN = 'DOMAIN'
JUNIPER_SERVER = 'vpn.example.com'
# HINT: Consider verifying the URI as some organizations have multiple
# URIs which are 2FA or 1FA. The default one is often 2FA.
# For istance, /url/ can become /url_XX/, where XX is a number.
JUNIPER_URI = '/dana-na/auth/url/login.cgi'
# HINT: Find it in the source code of the login page. Look for a hidden
# input field named "realm".
JUNIPER_REALM = 'bla'
#
# Step 3: Things that should not change for most users
#
FILE_DATE_FMT = '%Y%m%d_%H%M%S'
FILE_DATE = datetime.datetime.now().strftime(FILE_DATE_FMT)
CAMPAIGN_NAME_TPL = '%s - Group %i'
CAMPAIGN_PREFIX = CAMPAIGN_NAME_TPL[:-2] % CAMPAIGN_NAME
RESULTS_PATH = WORKING_DIR + 'campaign_results_%s.csv' % CAMPAIGN_NAME
CREDS_PATH = WORKING_DIR + 'campaign_creds_%s_%s.csv' % (FILE_DATE, CAMPAIGN_NAME)
JSON_PATH = WORKING_DIR + 'campaign_raw_%s.json' % CAMPAIGN_NAME
GEOIP_PATH = WORKING_DIR + 'campaign_geoip_%s.csv' % CAMPAIGN_NAME
# Reporting
EXCLUDED_IP = []
GOPHISH_HOST = ''
GOPHISH_SSH_PORT = 22
GOPHISH_SSH_USER = 'root'
GOPHISH_SSH_PASS = None
GOPHISH_SSH_KEY = '/path/to/key'
GOPHISH_SSH_KEY_PASSPHRASE = 'some_pass'
# Gophish timestamps are in UTC. This will put dates as this timezone.
GOPHISH_TIMEZONE = "America/Toronto"
APACHE_HOST = GOPHISH_HOST
APACHE_SSH_PORT = GOPHISH_SSH_PORT
APACHE_SSH_USER = GOPHISH_SSH_USER
APACHE_SSH_PASS = GOPHISH_SSH_PASS
APACHE_SSH_KEY = GOPHISH_SSH_KEY
APACHE_SSH_KEY_PASSPHRASE = GOPHISH_SSH_KEY_PASSPHRASE
APACHE_LOGS_FOLDER = '/var/log/apache2/'
APACHE_LOGS_PREFIX = 'path.toyourwebsite.com'
# Take if from /etc/apache2/apache2.conf. The line starts with LogFormat. Currently using the "combined" one.
APACHE_LOGS_FORMAT = "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\""
APACHE_MALWARE_NAME = 'malware.zip'
EMPIRE_API_URL = 'https://127.0.0.1:1337'
EMPIRE_API_KEY = 'some_key'
SENDGRID_API_KEY = 'some_key'
#
# By default, we disable SSL verification as gophish uses a self-signed cert.
#
import gophish.client
import requests
from requests.packages import urllib3
class GophishClient(gophish.client.GophishClient):
""" A standard HTTP REST client used by Gophish """
def __init__(self, api_key, host, **kwargs):
super(GophishClient, self).__init__(api_key, host, **kwargs)
def execute(self, method, path, **kwargs):
""" Executes a request to a given endpoint, returning the result """
url = "{}{}".format(self.host, path)
kwargs.update(self._client_kwargs)
response = requests.request(
method, url, params={"api_key": self.api_key}, verify=False, **kwargs)
return response
# Just to remove a SubjectAltNameWarning.
urllib3.disable_warnings()
#
# Step 4: Advanced TLS settings
#
#
#
# Uncomment to configure TLS Client certificates or other TLS settings.
#
#
#import ssl
#import gophish.client
#from requests import Session
#from requests.adapters import HTTPAdapter
#from requests.packages.urllib3.poolmanager import PoolManager
#from requests.packages import urllib3
#
#class TLSHttpAdapter(HTTPAdapter):
# '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
#
# def init_poolmanager(self, connections, maxsize, block=False):
# self.poolmanager = PoolManager(num_pools=connections,
# maxsize=maxsize,
# block=block,
# ssl_version=ssl.PROTOCOL_TLSv1_2,
# cert_reqs='CERT_REQUIRED')
#
#class GophishClient(gophish.client.GophishClient):
# """ A standard HTTP REST client used by Gophish """
# def __init__(self, api_key, host, cert_file=None, ca_file=None, **kwargs):
# super(GophishClient, self).__init__(api_key, host, **kwargs)
# self.session = Session()
# self.session.mount(API_URL, TLSHttpAdapter())
# self.cert_file = '/path/to/client_cert.pem'
# self.ca_file = '/path/to/root_ca.crt'
#
# def execute(self, method, path, **kwargs):
# """ Executes a request to a given endpoint, returning the result """
#
# url = "{}{}".format(self.host, path)
# kwargs.update(self._client_kwargs)
# response = self.session.request(method, url, params={"api_key": self.api_key},
# cert=(self.cert_file), verify=self.ca_file, **kwargs)
# return response
#
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Config file of the Gophish command line interface.
@author: Martin Dubé
@organization: Gosecure inc.
@license: MIT License
@contact: mdube@gosecure.ca
Copyright (c) 2017, Gosecure
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
#
# Step 1: Gophish configuration
#
# Just the basic configuration for basic features
#
API_KEY = ''
API_URL = 'http://127.0.0.1:3333'
#
# Step 2: Campaign configuration
#
# Information regarding your campaign. Most comes from the gophish WebUI.
CAMPAIGN_NAME = 'John Doe'
CAMPAIGN_URL = 'https://path.toyourwebsite.com'
WORKING_DIR = '/path/to/working/dir'
EMAILS_PATH = WORKING_DIR + 'emails.txt'
# Landing Pages
LP_NAME = 'Landing Page Name'
# Two specific fields required by --print-creds to properly parse the JSON payloads.
# Update the fields based on your landing pages user and password fields.
LP_USER_FIELD = 'cUser'
LP_PWD_FIELD = 'cPass'
# Email Template
ET_NAME = 'Email Template Name'
# Sending Profiles
SP_NAME = 'Sending Profile Name'
# Batch Management Settings
GROUP_SIZE = 50
START_INTERVAL = 1 # Unit = minutes. Default=1. Increase when you have more than 10 batch.
BATCH_INTERVAL = 1 # Unit = minutes
# Verify TLS when testing credentials
# Default is True
VERIFY_TLS = True
# Owa login testing settings
OWA_DOMAIN = 'DOMAIN'
OWA_SERVER = 'outlook.example.com'
# Netscaler login testing settings
NETSCALER_SERVER = 'vpn.example.com'
# Juniper (Secure Access SSL VPN)
JUNIPER_DOMAIN = 'DOMAIN'
JUNIPER_SERVER = 'vpn.example.com'
# HINT: Consider verifying the URI as some organizations have multiple
# URIs which are 2FA or 1FA. The default one is often 2FA.
# For istance, /url/ can become /url_XX/, where XX is a number.
JUNIPER_URI = '/dana-na/auth/url/login.cgi'
# HINT: Find it in the source code of the login page. Look for a hidden
# input field named "realm".
JUNIPER_REALM = 'bla'
#
# Step 3: Things that should not change for most users
#
FILE_DATE_FMT = '%Y%m%d_%H%M%S'
FILE_DATE = datetime.datetime.now().strftime(FILE_DATE_FMT)
CAMPAIGN_NAME_TPL = '%s - Group %i'
CAMPAIGN_PREFIX = CAMPAIGN_NAME_TPL[:-2] % CAMPAIGN_NAME
RESULTS_PATH = WORKING_DIR + 'campaign_results_%s.csv' % CAMPAIGN_NAME
CREDS_PATH = WORKING_DIR + 'campaign_creds_%s_%s.csv' % (FILE_DATE, CAMPAIGN_NAME)
JSON_PATH = WORKING_DIR + 'campaign_raw_%s.json' % CAMPAIGN_NAME
GEOIP_PATH = WORKING_DIR + 'campaign_geoip_%s.csv' % CAMPAIGN_NAME
# Reporting
EXCLUDED_IP = []
GOPHISH_HOST = ''
GOPHISH_SSH_PORT = 22
GOPHISH_SSH_USER = 'root'
GOPHISH_SSH_PASS = None
GOPHISH_SSH_KEY = '/path/to/key'
GOPHISH_SSH_KEY_PASSPHRASE = 'some_pass'
# Gophish timestamps are in UTC. This will put dates as this timezone.
GOPHISH_TIMEZONE = "America/Toronto"
APACHE_HOST = GOPHISH_HOST
APACHE_SSH_PORT = GOPHISH_SSH_PORT
APACHE_SSH_USER = GOPHISH_SSH_USER
APACHE_SSH_PASS = GOPHISH_SSH_PASS
APACHE_SSH_KEY = GOPHISH_SSH_KEY
APACHE_SSH_KEY_PASSPHRASE = GOPHISH_SSH_KEY_PASSPHRASE
APACHE_LOGS_FOLDER = '/var/log/apache2/'
APACHE_LOGS_PREFIX = 'path.toyourwebsite.com'
# Take if from /etc/apache2/apache2.conf. The line starts with LogFormat. Currently using the "combined" one.
APACHE_LOGS_FORMAT = "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\""
APACHE_MALWARE_NAME = 'malware.zip'
EMPIRE_API_URL = 'https://127.0.0.1:1337'
EMPIRE_API_KEY = 'some_key'
SENDGRID_API_KEY = 'some_key'
#
# By default, we disable SSL verification as gophish uses a self-signed cert.
#
import gophish.client
import requests
from requests.packages import urllib3
class GophishClient(gophish.client.GophishClient):
""" A standard HTTP REST client used by Gophish """
def __init__(self, api_key, host, **kwargs):
super(GophishClient, self).__init__(api_key, host, **kwargs)
def execute(self, method, path, **kwargs):
""" Executes a request to a given endpoint, returning the result """
url = "{}{}".format(self.host, path)
kwargs.update(self._client_kwargs)
response = requests.request(
method, url, params={"api_key": self.api_key}, verify=False, **kwargs)
return response
# Just to remove a SubjectAltNameWarning.
urllib3.disable_warnings()
#
# Step 4: Advanced TLS settings
#
#
#
# Uncomment to configure TLS Client certificates or other TLS settings.
#
#
#import ssl
#import gophish.client
#from requests import Session
#from requests.adapters import HTTPAdapter
#from requests.packages.urllib3.poolmanager import PoolManager
#from requests.packages import urllib3
#
#class TLSHttpAdapter(HTTPAdapter):
# '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
#
# def init_poolmanager(self, connections, maxsize, block=False):
# self.poolmanager = PoolManager(num_pools=connections,
# maxsize=maxsize,
# block=block,
# ssl_version=ssl.PROTOCOL_TLSv1_2,
# cert_reqs='CERT_REQUIRED')
#
#class GophishClient(gophish.client.GophishClient):
# """ A standard HTTP REST client used by Gophish """
# def __init__(self, api_key, host, cert_file=None, ca_file=None, **kwargs):
# super(GophishClient, self).__init__(api_key, host, **kwargs)
# self.session = Session()
# self.session.mount(API_URL, TLSHttpAdapter())
# self.cert_file = '/path/to/client_cert.pem'
# self.ca_file = '/path/to/root_ca.crt'
#
# def execute(self, method, path, **kwargs):
# """ Executes a request to a given endpoint, returning the result """
#
# url = "{}{}".format(self.host, path)
# kwargs.update(self._client_kwargs)
# response = self.session.request(method, url, params={"api_key": self.api_key},
# cert=(self.cert_file), verify=self.ca_file, **kwargs)
# return response
#
| en | 000616520_ninostephen-gophish-cli_config.default_532b62ae482d.py | unknown | 2,260 |
import tensorflow as tf
import utils.utils as utils
class SemanticCNN:
def __init__(self, config,
sequence_length, vocab_size, embedding_size, num_filters):
self.config = config
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_filters = num_filters
if config.get('main', 'seed') == 'None':
self.seed = None
else:
self.seed = config.getint('main', 'seed')
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool(self, data, filter_size):
return tf.nn.max_pool(data,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
def variable(self, flavor, shape):
if flavor == 'W_truncated_normal':
return tf.Variable(
tf.truncated_normal(shape,
stddev=0.1,
seed=self.seed,
dtype=tf.float32))
elif flavor == 'W_random_uniform':
return tf.Variable(
tf.random_uniform(shape,
minval=-1.0,
maxval=1.0))
elif flavor == 'b':
return tf.Variable(tf.constant(0.1, shape=shape),
dtype=tf.float32)
else:
return None
def train_input_placeholders(self):
x = tf.placeholder(tf.float32,
shape=[None, self.sequence_length],
name="x")
y_ = tf.placeholder(tf.float32,
[None, self.config.getint('main', 'num_classes')], name="y_")
return x, y_
def model(self, data):
l2_loss = tf.constant(0.0)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
embed_W = self.variable('W_random_uniform', [self.vocab_size, self.embedding_size])
embedded_words = tf.nn.embedding_lookup(embed_W, tf.cast(data, tf.int32))
embedded_words_expanded = tf.expand_dims(embedded_words, -1)
filter3_shape = [3, self.embedding_size, 1, self.num_filters]
pool_filter3_W = self.variable('W_truncated_normal', filter3_shape)
pool_filter3_b = self.variable('b', [self.num_filters])
conv1 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter3_W), pool_filter3_b))
pool_filter3 = self.max_pool(conv1, 3)
filter4_shape = [4, self.embedding_size, 1, self.num_filters]
pool_filter4_W = self.variable('W_truncated_normal', filter4_shape)
pool_filter4_b = self.variable('b', [self.num_filters])
conv2 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter4_W), pool_filter4_b))
pool_filter4 = self.max_pool(conv2, 4)
filter5_shape = [5, self.embedding_size, 1, self.num_filters]
pool_filter5_W = self.variable('W_truncated_normal', filter5_shape)
pool_filter5_b = self.variable('b', [self.num_filters])
conv3 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter5_W), pool_filter5_b))
pool_filter5 = self.max_pool(conv3, 5)
pool_combined = tf.concat(3, [pool_filter3, pool_filter4, pool_filter5])
pool_final = tf.reshape(pool_combined, [-1, self.num_filters * 3])
dropout = tf.nn.dropout(pool_final, keep_prob)
final_W = tf.get_variable("W", shape=[self.num_filters * 3,
self.config.getint('main', 'num_classes')],
initializer=tf.contrib.layers.xavier_initializer())
final_b = tf.Variable(tf.constant(0.1,
shape=[self.config.getint('main', 'num_classes')]), name="b")
logits = tf.matmul(dropout, final_W) + final_b
y_conv = tf.nn.softmax(logits)
l2_loss += tf.nn.l2_loss(final_W) + tf.nn.l2_loss(final_b)
return y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W
| import tensorflow as tf
import utils.utils as utils
class SemanticCNN:
def __init__(self, config,
sequence_length, vocab_size, embedding_size, num_filters):
self.config = config
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_filters = num_filters
if config.get('main', 'seed') == 'None':
self.seed = None
else:
self.seed = config.getint('main', 'seed')
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool(self, data, filter_size):
return tf.nn.max_pool(data,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
def variable(self, flavor, shape):
if flavor == 'W_truncated_normal':
return tf.Variable(
tf.truncated_normal(shape,
stddev=0.1,
seed=self.seed,
dtype=tf.float32))
elif flavor == 'W_random_uniform':
return tf.Variable(
tf.random_uniform(shape,
minval=-1.0,
maxval=1.0))
elif flavor == 'b':
return tf.Variable(tf.constant(0.1, shape=shape),
dtype=tf.float32)
else:
return None
def train_input_placeholders(self):
x = tf.placeholder(tf.float32,
shape=[None, self.sequence_length],
name="x")
y_ = tf.placeholder(tf.float32,
[None, self.config.getint('main', 'num_classes')], name="y_")
return x, y_
def model(self, data):
l2_loss = tf.constant(0.0)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
embed_W = self.variable('W_random_uniform', [self.vocab_size, self.embedding_size])
embedded_words = tf.nn.embedding_lookup(embed_W, tf.cast(data, tf.int32))
embedded_words_expanded = tf.expand_dims(embedded_words, -1)
filter3_shape = [3, self.embedding_size, 1, self.num_filters]
pool_filter3_W = self.variable('W_truncated_normal', filter3_shape)
pool_filter3_b = self.variable('b', [self.num_filters])
conv1 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter3_W), pool_filter3_b))
pool_filter3 = self.max_pool(conv1, 3)
filter4_shape = [4, self.embedding_size, 1, self.num_filters]
pool_filter4_W = self.variable('W_truncated_normal', filter4_shape)
pool_filter4_b = self.variable('b', [self.num_filters])
conv2 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter4_W), pool_filter4_b))
pool_filter4 = self.max_pool(conv2, 4)
filter5_shape = [5, self.embedding_size, 1, self.num_filters]
pool_filter5_W = self.variable('W_truncated_normal', filter5_shape)
pool_filter5_b = self.variable('b', [self.num_filters])
conv3 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter5_W), pool_filter5_b))
pool_filter5 = self.max_pool(conv3, 5)
pool_combined = tf.concat(3, [pool_filter3, pool_filter4, pool_filter5])
pool_final = tf.reshape(pool_combined, [-1, self.num_filters * 3])
dropout = tf.nn.dropout(pool_final, keep_prob)
final_W = tf.get_variable("W", shape=[self.num_filters * 3,
self.config.getint('main', 'num_classes')],
initializer=tf.contrib.layers.xavier_initializer())
final_b = tf.Variable(tf.constant(0.1,
shape=[self.config.getint('main', 'num_classes')]), name="b")
logits = tf.matmul(dropout, final_W) + final_b
y_conv = tf.nn.softmax(logits)
l2_loss += tf.nn.l2_loss(final_W) + tf.nn.l2_loss(final_b)
return y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W
| en | 000118625_macdaliot-deep-pwning_semantic_cnn_1fd9fe9205eb.py | unknown | 1,354 |
from typing import Optional, Dict, Callable
import torch
# This file contains various physical constants and functions to convert units
# from the atomic units
__all__ = ["length_to", "time_to", "freq_to", "ir_ints_to", "raman_ints_to",
"edipole_to", "equadrupole_to"]
# 1 atomic unit in SI
LENGTH = 5.29177210903e-11 # m
TIME = 2.4188843265857e-17 # s
CHARGE = 1.602176634e-19 # C
# 1 atomic unit in other unit
DEBYE = 2.541746473 # Debye (for dipole)
ANGSTROM = LENGTH / 1e-10 # angstrom (length)
AMU = 5.485799090649e-4 # atomic mass unit (mass)
# constants in SI
LIGHT_SPEED = 2.99792458e8 # m/s
# scales
ATTO = 1e-15
FEMTO = 1e-12
NANO = 1e-9
MICRO = 1e-6
MILLI = 1e-3
CENTI = 1e-2
DECI = 1e-1
KILO = 1e3
MEGA = 1e6
GIGA = 1e9
TERA = 1e12
PhysVarType = torch.Tensor
UnitType = Optional[str]
_length_converter = {
"angst": ANGSTROM,
"angstrom": ANGSTROM,
"m": LENGTH,
"cm": LENGTH / CENTI,
}
_freq_converter = {
"cm-1": CENTI / TIME / LIGHT_SPEED,
"cm^-1": CENTI / TIME / LIGHT_SPEED,
"hz": 1.0 / TIME,
"khz": 1.0 / TIME / KILO,
"mhz": 1.0 / TIME / MEGA,
"ghz": 1.0 / TIME / GIGA,
"thz": 1.0 / TIME / TERA,
}
_ir_ints_converter = {
"(debye/angst)^2/amu": (DEBYE / ANGSTROM) ** 2 / AMU,
"km/mol": (DEBYE / ANGSTROM) ** 2 / AMU * 42.256, # from https://dx.doi.org/10.1002%2Fjcc.24344
}
_raman_ints_converter = {
"angst^4/amu": ANGSTROM ** 4 / AMU,
}
_time_converter = {
"s": TIME,
"us": TIME / MICRO,
"ns": TIME / NANO,
"fs": TIME / FEMTO,
}
_edipole_converter = {
"d": DEBYE,
"debye": DEBYE,
"c*m": DEBYE, # Coulomb meter
}
_equadrupole_converter = {
"debye*angst": DEBYE * ANGSTROM # Debye angstrom
}
def _avail_keys(converter: Dict[str, float]) -> str:
# returns the available keys in a string of list of string
return str(list(_length_converter.keys()))
def _add_docstr_to(phys: str, converter: Dict[str, float]) -> Callable:
# automatically add docstring for converter functions
def decorator(callable: Callable):
callable.__doc__ = f"""
Convert the {phys} from atomic unit to the given unit.
Available units are (case-insensitive): {_avail_keys(converter)}
"""
return callable
return decorator
@_add_docstr_to("time", _time_converter)
def time_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit time from atomic unit to the given unit
return _converter_to(a, unit, _time_converter)
@_add_docstr_to("frequency", _freq_converter)
def freq_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit frequency from atomic unit to the given unit
return _converter_to(a, unit, _freq_converter)
@_add_docstr_to("IR intensity", _ir_ints_converter)
def ir_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _ir_ints_converter)
@_add_docstr_to("Raman intensity", _raman_ints_converter)
def raman_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _raman_ints_converter)
@_add_docstr_to("length", _length_converter)
def length_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit length from atomic unit to the given unit
return _converter_to(a, unit, _length_converter)
@_add_docstr_to("electric dipole", _edipole_converter)
def edipole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _edipole_converter)
@_add_docstr_to("electric quadrupole", _equadrupole_converter)
def equadrupole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _equadrupole_converter)
def _converter_to(a: PhysVarType, unit: UnitType, converter: Dict[str, float]) -> PhysVarType:
# converter from the atomic unit
if unit is None:
return a
u = unit.lower()
try:
return a * converter[u]
except KeyError:
avail_units = _avail_keys(converter)
raise ValueError(f"Unknown unit: {unit}. Available units are: {avail_units}")
| from typing import Optional, Dict, Callable
import torch
# This file contains various physical constants and functions to convert units
# from the atomic units
__all__ = ["length_to", "time_to", "freq_to", "ir_ints_to", "raman_ints_to",
"edipole_to", "equadrupole_to"]
# 1 atomic unit in SI
LENGTH = 5.29177210903e-11 # m
TIME = 2.4188843265857e-17 # s
CHARGE = 1.602176634e-19 # C
# 1 atomic unit in other unit
DEBYE = 2.541746473 # Debye (for dipole)
ANGSTROM = LENGTH / 1e-10 # angstrom (length)
AMU = 5.485799090649e-4 # atomic mass unit (mass)
# constants in SI
LIGHT_SPEED = 2.99792458e8 # m/s
# scales
ATTO = 1e-15
FEMTO = 1e-12
NANO = 1e-9
MICRO = 1e-6
MILLI = 1e-3
CENTI = 1e-2
DECI = 1e-1
KILO = 1e3
MEGA = 1e6
GIGA = 1e9
TERA = 1e12
PhysVarType = torch.Tensor
UnitType = Optional[str]
_length_converter = {
"angst": ANGSTROM,
"angstrom": ANGSTROM,
"m": LENGTH,
"cm": LENGTH / CENTI,
}
_freq_converter = {
"cm-1": CENTI / TIME / LIGHT_SPEED,
"cm^-1": CENTI / TIME / LIGHT_SPEED,
"hz": 1.0 / TIME,
"khz": 1.0 / TIME / KILO,
"mhz": 1.0 / TIME / MEGA,
"ghz": 1.0 / TIME / GIGA,
"thz": 1.0 / TIME / TERA,
}
_ir_ints_converter = {
"(debye/angst)^2/amu": (DEBYE / ANGSTROM) ** 2 / AMU,
"km/mol": (DEBYE / ANGSTROM) ** 2 / AMU * 42.256, # from https://dx.doi.org/10.1002%2Fjcc.24344
}
_raman_ints_converter = {
"angst^4/amu": ANGSTROM ** 4 / AMU,
}
_time_converter = {
"s": TIME,
"us": TIME / MICRO,
"ns": TIME / NANO,
"fs": TIME / FEMTO,
}
_edipole_converter = {
"d": DEBYE,
"debye": DEBYE,
"c*m": DEBYE, # Coulomb meter
}
_equadrupole_converter = {
"debye*angst": DEBYE * ANGSTROM # Debye angstrom
}
def _avail_keys(converter: Dict[str, float]) -> str:
# returns the available keys in a string of list of string
return str(list(_length_converter.keys()))
def _add_docstr_to(phys: str, converter: Dict[str, float]) -> Callable:
# automatically add docstring for converter functions
def decorator(callable: Callable):
callable.__doc__ = f"""
Convert the {phys} from atomic unit to the given unit.
Available units are (case-insensitive): {_avail_keys(converter)}
"""
return callable
return decorator
@_add_docstr_to("time", _time_converter)
def time_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit time from atomic unit to the given unit
return _converter_to(a, unit, _time_converter)
@_add_docstr_to("frequency", _freq_converter)
def freq_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit frequency from atomic unit to the given unit
return _converter_to(a, unit, _freq_converter)
@_add_docstr_to("IR intensity", _ir_ints_converter)
def ir_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _ir_ints_converter)
@_add_docstr_to("Raman intensity", _raman_ints_converter)
def raman_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _raman_ints_converter)
@_add_docstr_to("length", _length_converter)
def length_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit length from atomic unit to the given unit
return _converter_to(a, unit, _length_converter)
@_add_docstr_to("electric dipole", _edipole_converter)
def edipole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _edipole_converter)
@_add_docstr_to("electric quadrupole", _equadrupole_converter)
def equadrupole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _equadrupole_converter)
def _converter_to(a: PhysVarType, unit: UnitType, converter: Dict[str, float]) -> PhysVarType:
# converter from the atomic unit
if unit is None:
return a
u = unit.lower()
try:
return a * converter[u]
except KeyError:
avail_units = _avail_keys(converter)
raise ValueError(f"Unknown unit: {unit}. Available units are: {avail_units}")
| en | 000696965_Jaikinator-dqc_units_ac11863adb40.py | unknown | 1,662 |
# Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add guest_data table
Revision ID: 9fe6423022c2
Revises: fd8bc05fbc78
Create Date: 2021-01-25 19:08:22.522260+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9fe6423022c2'
down_revision = 'fd8bc05fbc78'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('guest_data',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('guest_id', postgresql.UUID(), nullable=False),
sa.Column('event_id', postgresql.UUID(), nullable=False),
sa.Column('dietary_restrictions',
sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], [
'events.id'], name='guest_data_event_id_fkey', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['guest_id'], [
'guests.id'], name='guest_data_guest_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='guest_data_id_pkey')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('guest_data')
# ### end Alembic commands ###
| # Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add guest_data table
Revision ID: 9fe6423022c2
Revises: fd8bc05fbc78
Create Date: 2021-01-25 19:08:22.522260+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9fe6423022c2'
down_revision = 'fd8bc05fbc78'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('guest_data',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('guest_id', postgresql.UUID(), nullable=False),
sa.Column('event_id', postgresql.UUID(), nullable=False),
sa.Column('dietary_restrictions',
sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], [
'events.id'], name='guest_data_event_id_fkey', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['guest_id'], [
'guests.id'], name='guest_data_guest_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='guest_data_id_pkey')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('guest_data')
# ### end Alembic commands ###
| en | 000317035_lazytype-ent_9fe6423022c2_202112519822_add_guest_data_table_de6e03b1f754.py | unknown | 511 |
import pickle
import pdb
from onconet.utils.c_index import get_censoring_dist
NO_DATASET_ERR = "Dataset {} not in DATASET_REGISTRY! Available datasets are {}"
DATASET_REGISTRY = {}
def RegisterDataset(dataset_name):
"""Registers a dataset."""
def decorator(f):
DATASET_REGISTRY[dataset_name] = f
return f
return decorator
def get_dataset_class(args):
if args.dataset not in DATASET_REGISTRY:
raise Exception(
NO_DATASET_ERR.format(args.dataset, DATASET_REGISTRY.keys()))
return DATASET_REGISTRY[args.dataset]
def build_path_to_hidden_dict(args):
res = pickle.load(open(args.hiddens_results_path,'rb'))
path_to_hidden = {}
for split in ['train','dev','test']:
hiddens, paths = res['{}_hiddens'.format(split)]
for indx, path in enumerate(paths):
path_to_hidden[path] = hiddens[indx]
print("Built path to hidden dict with {} paths, of dim: {}".format(len(path_to_hidden), hiddens[0].shape[0]))
return path_to_hidden, hiddens[0].shape[0]
# Depending on arg, build dataset
def get_dataset(args, transformers, test_transformers):
dataset_class = get_dataset_class(args)
if args.ten_fold_cross_val or args.use_precomputed_hiddens:
args.patient_to_partition_dict = {}
if args.use_precomputed_hiddens:
path_to_hidden_dict, args.hidden_dim = build_path_to_hidden_dict(args)
if args.force_input_dim:
args.hidden_dim = args.input_dim
path_to_hidden_dict = (lambda input_dim, path_to_hidden_dict : {k:v[:input_dim] for k,v in path_to_hidden_dict.items()})(args.input_dim, path_to_hidden_dict)
args.precomputed_hidden_dim = args.hidden_dim
args.exam_to_year_dict = {}
args.exam_to_device_dict = {}
train = dataset_class(args, transformers, 'train')
dev = dataset_class(args, test_transformers, 'dev')
test = dataset_class(args, test_transformers, 'test')
if args.survival_analysis_setup:
args.censoring_distribution = get_censoring_dist(train if len(train) > 0 else test)
if args.use_precomputed_hiddens:
train.path_to_hidden_dict = path_to_hidden_dict
dev.path_to_hidden_dict = path_to_hidden_dict
test.path_to_hidden_dict = path_to_hidden_dict
return train, dev, test
| import pickle
import pdb
from onconet.utils.c_index import get_censoring_dist
NO_DATASET_ERR = "Dataset {} not in DATASET_REGISTRY! Available datasets are {}"
DATASET_REGISTRY = {}
def RegisterDataset(dataset_name):
"""Registers a dataset."""
def decorator(f):
DATASET_REGISTRY[dataset_name] = f
return f
return decorator
def get_dataset_class(args):
if args.dataset not in DATASET_REGISTRY:
raise Exception(
NO_DATASET_ERR.format(args.dataset, DATASET_REGISTRY.keys()))
return DATASET_REGISTRY[args.dataset]
def build_path_to_hidden_dict(args):
res = pickle.load(open(args.hiddens_results_path,'rb'))
path_to_hidden = {}
for split in ['train','dev','test']:
hiddens, paths = res['{}_hiddens'.format(split)]
for indx, path in enumerate(paths):
path_to_hidden[path] = hiddens[indx]
print("Built path to hidden dict with {} paths, of dim: {}".format(len(path_to_hidden), hiddens[0].shape[0]))
return path_to_hidden, hiddens[0].shape[0]
# Depending on arg, build dataset
def get_dataset(args, transformers, test_transformers):
dataset_class = get_dataset_class(args)
if args.ten_fold_cross_val or args.use_precomputed_hiddens:
args.patient_to_partition_dict = {}
if args.use_precomputed_hiddens:
path_to_hidden_dict, args.hidden_dim = build_path_to_hidden_dict(args)
if args.force_input_dim:
args.hidden_dim = args.input_dim
path_to_hidden_dict = (lambda input_dim, path_to_hidden_dict : {k:v[:input_dim] for k,v in path_to_hidden_dict.items()})(args.input_dim, path_to_hidden_dict)
args.precomputed_hidden_dim = args.hidden_dim
args.exam_to_year_dict = {}
args.exam_to_device_dict = {}
train = dataset_class(args, transformers, 'train')
dev = dataset_class(args, test_transformers, 'dev')
test = dataset_class(args, test_transformers, 'test')
if args.survival_analysis_setup:
args.censoring_distribution = get_censoring_dist(train if len(train) > 0 else test)
if args.use_precomputed_hiddens:
train.path_to_hidden_dict = path_to_hidden_dict
dev.path_to_hidden_dict = path_to_hidden_dict
test.path_to_hidden_dict = path_to_hidden_dict
return train, dev, test
| en | 000153257_harrivle-Mirai_factory_bfbc756459e6.py | unknown | 777 |
#!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
""" Epic-Kitchens dataset. """
import os
import random
import torch
import torch.utils.data
import utils.logging as logging
import time
import oss2 as oss
from torchvision.transforms import Compose
import torchvision.transforms._transforms_video as transforms
import torch.nn.functional as F
from datasets.utils.transformations import (
ColorJitter,
KineticsResizedCrop
)
from datasets.base.base_dataset import BaseVideoDataset
from datasets.utils.random_erasing import RandomErasing
import utils.bucket as bu
from datasets.base.builder import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Epickitchen100(BaseVideoDataset):
def __init__(self, cfg, split):
super(Epickitchen100, self).__init__(cfg, split)
if (self.split == "test" or self.split == "submission") and self.cfg.PRETRAIN.ENABLE == False:
self._pre_transformation_config_required = True
def _get_dataset_list_name(self):
"""
Returns the list for the dataset.
Returns:
dataset_list_name (str)
"""
if self.split == "train":
if self.cfg.TRAIN.TRAIN_VAL_COMBINE:
train_list = "train_val"
else:
train_list = "train"
name = "EPIC_100_{}.csv".format(
train_list if self.split == "train" else "validation" if not self.split == "submission" else "test_timestamps",
)
logger.info("Reading video list from file: {}".format(name))
return name
def _get_sample_info(self, index):
"""
Returns the sample info corresponding to the index.
Args:
index (int): target index
Returns:
sample_info (dict): contains different informations to be used later
"name": the name of the video
"path": the path of the video for the specified index
"verb_class": verb label of the video
"noun_class": noun label of the video
"""
if not self.split == "submission":
video_name = self._samples[index][0]
verb_class = self._samples[index][10]
noun_class = self._samples[index][12]
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
else:
# if the split is submission, then no label is available
# we simply set the verb class and the noun class to zero
video_name = self._samples[index][0]
verb_class = 0
noun_class = 0
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
if self.cfg.DATA.MULTI_LABEL or not hasattr(self.cfg.DATA, "TRAIN_VERSION"):
supervised_label = {
"verb_class": verb_class,
"noun_class": noun_class
}
else:
if self.cfg.DATA.TRAIN_VERSION == "only_train_verb":
supervised_label = verb_class
elif self.cfg.DATA.TRAIN_VERSION == "only_train_noun":
supervised_label = noun_class
sample_info = {
"name": video_name,
"path": video_path,
"supervised_label": supervised_label
}
return sample_info
def _config_transform(self):
"""
Configs the transform for the dataset.
For train, we apply random cropping, random horizontal flip, random color jitter (optionally),
normalization and random erasing (optionally).
For val and test, we apply controlled spatial cropping and normalization.
The transformations are stored as a callable function to "self.transforms".
"""
self.transform = None
if self.split == 'train' and not self.cfg.PRETRAIN.ENABLE:
std_transform_list = [
transforms.ToTensorVideo(),
KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TRAIN_JITTER_SCALES[0], self.cfg.DATA.TRAIN_JITTER_SCALES[1]],
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE,
),
transforms.RandomHorizontalFlipVideo()
]
# Add color aug
if self.cfg.AUGMENTATION.COLOR_AUG:
std_transform_list.append(
ColorJitter(
brightness=self.cfg.AUGMENTATION.BRIGHTNESS,
contrast=self.cfg.AUGMENTATION.CONTRAST,
saturation=self.cfg.AUGMENTATION.SATURATION,
hue=self.cfg.AUGMENTATION.HUE,
grayscale=self.cfg.AUGMENTATION.GRAYSCALE,
consistent=self.cfg.AUGMENTATION.CONSISTENT,
shuffle=self.cfg.AUGMENTATION.SHUFFLE,
gray_first=self.cfg.AUGMENTATION.GRAY_FIRST,
),
)
std_transform_list += [
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
RandomErasing(self.cfg)
]
self.transform = Compose(std_transform_list)
elif self.split == 'val' or self.split == 'test' or self.split == "submission":
self.resize_video = KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TEST_SCALE, self.cfg.DATA.TEST_SCALE],
crop_size = self.cfg.DATA.TEST_CROP_SIZE,
num_spatial_crops = self.cfg.TEST.NUM_SPATIAL_CROPS
)
std_transform_list = [
transforms.ToTensorVideo(),
self.resize_video,
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
)
]
self.transform = Compose(std_transform_list)
def _pre_transformation_config(self):
"""
Set transformation parameters if required.
"""
self.resize_video.set_spatial_index(self.spatial_idx)
| #!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
""" Epic-Kitchens dataset. """
import os
import random
import torch
import torch.utils.data
import utils.logging as logging
import time
import oss2 as oss
from torchvision.transforms import Compose
import torchvision.transforms._transforms_video as transforms
import torch.nn.functional as F
from datasets.utils.transformations import (
ColorJitter,
KineticsResizedCrop
)
from datasets.base.base_dataset import BaseVideoDataset
from datasets.utils.random_erasing import RandomErasing
import utils.bucket as bu
from datasets.base.builder import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Epickitchen100(BaseVideoDataset):
def __init__(self, cfg, split):
super(Epickitchen100, self).__init__(cfg, split)
if (self.split == "test" or self.split == "submission") and self.cfg.PRETRAIN.ENABLE == False:
self._pre_transformation_config_required = True
def _get_dataset_list_name(self):
"""
Returns the list for the dataset.
Returns:
dataset_list_name (str)
"""
if self.split == "train":
if self.cfg.TRAIN.TRAIN_VAL_COMBINE:
train_list = "train_val"
else:
train_list = "train"
name = "EPIC_100_{}.csv".format(
train_list if self.split == "train" else "validation" if not self.split == "submission" else "test_timestamps",
)
logger.info("Reading video list from file: {}".format(name))
return name
def _get_sample_info(self, index):
"""
Returns the sample info corresponding to the index.
Args:
index (int): target index
Returns:
sample_info (dict): contains different informations to be used later
"name": the name of the video
"path": the path of the video for the specified index
"verb_class": verb label of the video
"noun_class": noun label of the video
"""
if not self.split == "submission":
video_name = self._samples[index][0]
verb_class = self._samples[index][10]
noun_class = self._samples[index][12]
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
else:
# if the split is submission, then no label is available
# we simply set the verb class and the noun class to zero
video_name = self._samples[index][0]
verb_class = 0
noun_class = 0
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
if self.cfg.DATA.MULTI_LABEL or not hasattr(self.cfg.DATA, "TRAIN_VERSION"):
supervised_label = {
"verb_class": verb_class,
"noun_class": noun_class
}
else:
if self.cfg.DATA.TRAIN_VERSION == "only_train_verb":
supervised_label = verb_class
elif self.cfg.DATA.TRAIN_VERSION == "only_train_noun":
supervised_label = noun_class
sample_info = {
"name": video_name,
"path": video_path,
"supervised_label": supervised_label
}
return sample_info
def _config_transform(self):
"""
Configs the transform for the dataset.
For train, we apply random cropping, random horizontal flip, random color jitter (optionally),
normalization and random erasing (optionally).
For val and test, we apply controlled spatial cropping and normalization.
The transformations are stored as a callable function to "self.transforms".
"""
self.transform = None
if self.split == 'train' and not self.cfg.PRETRAIN.ENABLE:
std_transform_list = [
transforms.ToTensorVideo(),
KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TRAIN_JITTER_SCALES[0], self.cfg.DATA.TRAIN_JITTER_SCALES[1]],
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE,
),
transforms.RandomHorizontalFlipVideo()
]
# Add color aug
if self.cfg.AUGMENTATION.COLOR_AUG:
std_transform_list.append(
ColorJitter(
brightness=self.cfg.AUGMENTATION.BRIGHTNESS,
contrast=self.cfg.AUGMENTATION.CONTRAST,
saturation=self.cfg.AUGMENTATION.SATURATION,
hue=self.cfg.AUGMENTATION.HUE,
grayscale=self.cfg.AUGMENTATION.GRAYSCALE,
consistent=self.cfg.AUGMENTATION.CONSISTENT,
shuffle=self.cfg.AUGMENTATION.SHUFFLE,
gray_first=self.cfg.AUGMENTATION.GRAY_FIRST,
),
)
std_transform_list += [
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
RandomErasing(self.cfg)
]
self.transform = Compose(std_transform_list)
elif self.split == 'val' or self.split == 'test' or self.split == "submission":
self.resize_video = KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TEST_SCALE, self.cfg.DATA.TEST_SCALE],
crop_size = self.cfg.DATA.TEST_CROP_SIZE,
num_spatial_crops = self.cfg.TEST.NUM_SPATIAL_CROPS
)
std_transform_list = [
transforms.ToTensorVideo(),
self.resize_video,
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
)
]
self.transform = Compose(std_transform_list)
def _pre_transformation_config(self):
"""
Set transformation parameters if required.
"""
self.resize_video.set_spatial_index(self.spatial_idx)
| en | 000578429_jiangzeyinzi-EssentialMC2_epickitchen100_f4bf8df56d7a.py | unknown | 1,698 |
#!/usr/bin/python
import argparse
import sys
from ldif import LDIFParser, LDIFWriter
class ActiveDirectoryToOpenLdapLDIFConvertor(LDIFParser):
objectclassAddsBasedOnDN = { 'CN=ExchangeActiveSyncDevices' : 'exchangeActiveSyncDevices'
}
objectclassChangesBasedOnDN = { 'CN=_Template ': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template_': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template\, ': { 'user': 'customActiveDirectoryUserTemplate' }
}
objectclassMappings = { 'top' : 'mstop', 'user' : 'customActiveDirectoryUser', 'group' : 'customActiveDirectoryGroup',
'contact' : 'customActiveDirectoryContact' }
attributetypesValuesDuplicates = [ 'dSCorePropagationData' ]
def __init__(self, input, output):
LDIFParser.__init__(self, input)
self.writer = LDIFWriter(output)
def addObjectclassesBasedOnDN(self, dn, entry):
for objAdd in self.objectclassAddsBasedOnDN:
if objAdd.lower() in dn.lower(): # case insensitive match
if 'objectClass' not in entry.keys():
entry['objectClass'] = [ ]
entry['objectClass'].append(self.objectclassAddsBasedOnDN[objAdd]);
def changeObjectclassesBasedOnDN(self, dn, entry):
if 'objectClass' not in entry.keys():
return
for objChange in self.objectclassChangesBasedOnDN:
if objChange.lower() in dn.lower(): # case insensitive match
for objSource in self.objectclassChangesBasedOnDN[objChange]:
index = 0
for objTarget in entry['objectClass']:
if objSource == objTarget:
entry['objectClass'][index] = self.objectclassChangesBasedOnDN[objChange][objSource]
index += 1
def changeObjectclasses(self, dn, entry):
if 'objectClass' in entry.keys():
index = 0
for objectclass in entry['objectClass']:
for objMap in self.objectclassMappings:
if objMap == objectclass:
entry['objectClass'][index] = self.objectclassMappings[objMap]
index += 1
def removeDuplicateAttributeValues(self, dn, entry):
for attributetype in self.attributetypesValuesDuplicates:
if attributetype in entry.keys():
entry[attributetype] = list(set(entry[attributetype]))
def handle(self, dn, entry):
self.addObjectclassesBasedOnDN(dn, entry)
self.changeObjectclassesBasedOnDN(dn, entry)
self.changeObjectclasses(dn, entry)
self.removeDuplicateAttributeValues(dn, entry)
self.writer.unparse(dn, entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
)
parser.add_argument('--src', metavar='SOURCE', help='Source ldif')
parser.add_argument('--dst', metavar='DESTINATION', help='Destination ldif')
args = parser.parse_args()
adparser = ActiveDirectoryToOpenLdapLDIFConvertor(open(args.src, 'rb'), open(args.dst, 'wb'))
adparser.parse()
| #!/usr/bin/python
import argparse
import sys
from ldif import LDIFParser, LDIFWriter
class ActiveDirectoryToOpenLdapLDIFConvertor(LDIFParser):
objectclassAddsBasedOnDN = { 'CN=ExchangeActiveSyncDevices' : 'exchangeActiveSyncDevices'
}
objectclassChangesBasedOnDN = { 'CN=_Template ': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template_': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template\, ': { 'user': 'customActiveDirectoryUserTemplate' }
}
objectclassMappings = { 'top' : 'mstop', 'user' : 'customActiveDirectoryUser', 'group' : 'customActiveDirectoryGroup',
'contact' : 'customActiveDirectoryContact' }
attributetypesValuesDuplicates = [ 'dSCorePropagationData' ]
def __init__(self, input, output):
LDIFParser.__init__(self, input)
self.writer = LDIFWriter(output)
def addObjectclassesBasedOnDN(self, dn, entry):
for objAdd in self.objectclassAddsBasedOnDN:
if objAdd.lower() in dn.lower(): # case insensitive match
if 'objectClass' not in entry.keys():
entry['objectClass'] = [ ]
entry['objectClass'].append(self.objectclassAddsBasedOnDN[objAdd]);
def changeObjectclassesBasedOnDN(self, dn, entry):
if 'objectClass' not in entry.keys():
return
for objChange in self.objectclassChangesBasedOnDN:
if objChange.lower() in dn.lower(): # case insensitive match
for objSource in self.objectclassChangesBasedOnDN[objChange]:
index = 0
for objTarget in entry['objectClass']:
if objSource == objTarget:
entry['objectClass'][index] = self.objectclassChangesBasedOnDN[objChange][objSource]
index += 1
def changeObjectclasses(self, dn, entry):
if 'objectClass' in entry.keys():
index = 0
for objectclass in entry['objectClass']:
for objMap in self.objectclassMappings:
if objMap == objectclass:
entry['objectClass'][index] = self.objectclassMappings[objMap]
index += 1
def removeDuplicateAttributeValues(self, dn, entry):
for attributetype in self.attributetypesValuesDuplicates:
if attributetype in entry.keys():
entry[attributetype] = list(set(entry[attributetype]))
def handle(self, dn, entry):
self.addObjectclassesBasedOnDN(dn, entry)
self.changeObjectclassesBasedOnDN(dn, entry)
self.changeObjectclasses(dn, entry)
self.removeDuplicateAttributeValues(dn, entry)
self.writer.unparse(dn, entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
)
parser.add_argument('--src', metavar='SOURCE', help='Source ldif')
parser.add_argument('--dst', metavar='DESTINATION', help='Destination ldif')
args = parser.parse_args()
adparser = ActiveDirectoryToOpenLdapLDIFConvertor(open(args.src, 'rb'), open(args.dst, 'wb'))
adparser.parse()
| en | 000318413_mvrck0-active-directory-devcontainer_ldif-convertor_952e78ada320.py | unknown | 856 |
import os.path
from absl import logging
from icubam.www.handlers import base
class DisclaimerHandler(base.BaseHandler):
ROUTE = '/disclaimer'
def initialize(self, config, db_factory):
super().initialize(config, db_factory)
def get_disclaimer_html(self):
"""To show a disclaimer page if specified in configuration."""
path = self.config.server.disclaimer
if os.path.exists(path):
with open(path, 'r') as fp:
return fp.read()
else:
logging.warning(
f"Disclaimer file from config {path} is set but not available"
)
return ""
def get_current_user(self):
"""This route is not secured at first."""
return None
async def get(self):
"""Serves the page filled with the configuration specified file."""
if self.config.server.has_key('disclaimer'):
html = self.get_disclaimer_html()
data = {'disclaimer': html}
self.render('disclaimer.html', **data)
| import os.path
from absl import logging
from icubam.www.handlers import base
class DisclaimerHandler(base.BaseHandler):
ROUTE = '/disclaimer'
def initialize(self, config, db_factory):
super().initialize(config, db_factory)
def get_disclaimer_html(self):
"""To show a disclaimer page if specified in configuration."""
path = self.config.server.disclaimer
if os.path.exists(path):
with open(path, 'r') as fp:
return fp.read()
else:
logging.warning(
f"Disclaimer file from config {path} is set but not available"
)
return ""
def get_current_user(self):
"""This route is not secured at first."""
return None
async def get(self):
"""Serves the page filled with the configuration specified file."""
if self.config.server.has_key('disclaimer'):
html = self.get_disclaimer_html()
data = {'disclaimer': html}
self.render('disclaimer.html', **data)
| en | 000354583_rth-icubam_disclaimer_7c31f19828a0.py | unknown | 275 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
module for configuring player characters
"""
import datetime
from pyherc.data import SpecialTime
from pyherc.generators import creature_config, inventory_config
from pyherc.rules.calendar import get_special_events
def init_players(context):
"""
Initialise creatures
:returns: list of creature configurations
:rtype: [CreatureConfiguration]
"""
config = []
surface_manager = context.surface_manager
adventurer_f0 = surface_manager.add_icon('adventurer_f0', ':pc_adventurer_f0.png', '@', ['white', 'bold'])
adventurer_f1 = surface_manager.add_icon('adventurer_f1', ':pc_adventurer_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Adventurer',
body = 6,
finesse = 7,
mind = 8,
hp = 12,
speed = 2.5,
icons = (adventurer_f0, adventurer_f1),
attack = 1,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'spade',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'sword',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'leather armour',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'bow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'war arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'blunt arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'healing potion',
min_amount = 1,
max_amount = 2,
probability = 50),
inventory_config(
item_name = 'bag of small caltrops',
min_amount = 1,
max_amount = 1,
probability = 20)],
description = '\n'.join(['A skillful adventurer.',
'',
'Adventurer is armed and ready to explore any dungeon he sees. He is strong enough to survive combat with some of the dangers, while some he definitely should avoid',
'Adventurer also carries some potions that will help him on his journey.'])))
warrior_f0 = surface_manager.add_icon('warrior_f0', ':pc_warrior_f0.png', '@', ['white', 'bold'])
warrior_f1 = surface_manager.add_icon('warrior_f1', ':pc_warrior_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Warrior',
body = 8,
finesse = 7,
mind = 6,
hp = 16,
speed = 2.5,
icons = (warrior_f0, warrior_f1),
attack = 2,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'sword',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'warhammer',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'scale mail',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'dagger',
min_amount = 1,
max_amount = 1,
probability = 100)],
description = '\n'.join(['A stout warrior',
'',
'Warrior is armed to teeth and tends to solve his problems with brute force.',
'Warrior has nice selection of weapons to use but very little of anything else.'])))
surface_manager.add_icon('engineer_f0', ':/characters/pc_engineer_f0.png', '@', ['white', 'bold'])
surface_manager.add_icon('engineer_f1', ':/characters/pc_engineer_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Master Engineer',
body = 3,
finesse = 5,
mind = 11,
hp = 8,
speed = 2.5,
icons = ('engineer_f0', 'engineer_f1'),
attack = 1,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'dagger',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'robes',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'healing potion',
min_amount = 1,
max_amount = 2,
probability = 50),
inventory_config(
item_name = 'bag of brutal caltrops',
min_amount = 1,
max_amount = 2,
probability = 100),
inventory_config(
item_name = 'greater bag of caltrops',
min_amount = 1,
max_amount = 2,
probability = 100)],
description = '\n'.join(['A master engineer.',
'',
'Master engineer is physically weak and should avoid direct combat with enemies. Their skill lies in various tools and gadgets that can be used to defeat the foes.',
'Master engineer also carries some potions that are useful while exploring dungeons.'])))
date = datetime.date.today()
events = get_special_events(date.year, date.month, date.day)
if False and SpecialTime.aprilfools in events:
platino_f0 = surface_manager.add_icon('platino_f0', ':platino_f0.png', '@', ['white', 'bold'])
platino_f1 = surface_manager.add_icon('platino_f1', ':platino_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Dragon de Platino',
body = 6,
finesse = 7,
mind = 8,
hp = 9,
speed = 2.5,
icons = (platino_f0, platino_f1),
attack = 1,
ai = None,
effect_handles = None,
inventory = [],
description = '\n'.join(['Dragon de Platino',
'',
'Mysterious dragon who comes and goes as he wishes...'])))
if False and SpecialTime.christmas in events:
for character in config:
character.inventory.append(inventory_config(item_name = 'idol of snowman',
min_amount = 1,
max_amount = 1,
probability = 100))
return config
| # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
module for configuring player characters
"""
import datetime
from pyherc.data import SpecialTime
from pyherc.generators import creature_config, inventory_config
from pyherc.rules.calendar import get_special_events
def init_players(context):
"""
Initialise creatures
:returns: list of creature configurations
:rtype: [CreatureConfiguration]
"""
config = []
surface_manager = context.surface_manager
adventurer_f0 = surface_manager.add_icon('adventurer_f0', ':pc_adventurer_f0.png', '@', ['white', 'bold'])
adventurer_f1 = surface_manager.add_icon('adventurer_f1', ':pc_adventurer_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Adventurer',
body = 6,
finesse = 7,
mind = 8,
hp = 12,
speed = 2.5,
icons = (adventurer_f0, adventurer_f1),
attack = 1,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'spade',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'sword',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'leather armour',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'bow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'war arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'blunt arrow',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'healing potion',
min_amount = 1,
max_amount = 2,
probability = 50),
inventory_config(
item_name = 'bag of small caltrops',
min_amount = 1,
max_amount = 1,
probability = 20)],
description = '\n'.join(['A skillful adventurer.',
'',
'Adventurer is armed and ready to explore any dungeon he sees. He is strong enough to survive combat with some of the dangers, while some he definitely should avoid',
'Adventurer also carries some potions that will help him on his journey.'])))
warrior_f0 = surface_manager.add_icon('warrior_f0', ':pc_warrior_f0.png', '@', ['white', 'bold'])
warrior_f1 = surface_manager.add_icon('warrior_f1', ':pc_warrior_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Warrior',
body = 8,
finesse = 7,
mind = 6,
hp = 16,
speed = 2.5,
icons = (warrior_f0, warrior_f1),
attack = 2,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'sword',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'warhammer',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'scale mail',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'dagger',
min_amount = 1,
max_amount = 1,
probability = 100)],
description = '\n'.join(['A stout warrior',
'',
'Warrior is armed to teeth and tends to solve his problems with brute force.',
'Warrior has nice selection of weapons to use but very little of anything else.'])))
surface_manager.add_icon('engineer_f0', ':/characters/pc_engineer_f0.png', '@', ['white', 'bold'])
surface_manager.add_icon('engineer_f1', ':/characters/pc_engineer_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Master Engineer',
body = 3,
finesse = 5,
mind = 11,
hp = 8,
speed = 2.5,
icons = ('engineer_f0', 'engineer_f1'),
attack = 1,
ai = None,
effect_handles = None,
inventory = [inventory_config(
item_name = 'dagger',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'robes',
min_amount = 1,
max_amount = 1,
probability = 100),
inventory_config(
item_name = 'healing potion',
min_amount = 1,
max_amount = 2,
probability = 50),
inventory_config(
item_name = 'bag of brutal caltrops',
min_amount = 1,
max_amount = 2,
probability = 100),
inventory_config(
item_name = 'greater bag of caltrops',
min_amount = 1,
max_amount = 2,
probability = 100)],
description = '\n'.join(['A master engineer.',
'',
'Master engineer is physically weak and should avoid direct combat with enemies. Their skill lies in various tools and gadgets that can be used to defeat the foes.',
'Master engineer also carries some potions that are useful while exploring dungeons.'])))
date = datetime.date.today()
events = get_special_events(date.year, date.month, date.day)
if False and SpecialTime.aprilfools in events:
platino_f0 = surface_manager.add_icon('platino_f0', ':platino_f0.png', '@', ['white', 'bold'])
platino_f1 = surface_manager.add_icon('platino_f1', ':platino_f1.png', '@', ['white', 'bold'])
config.append(creature_config(name = 'Dragon de Platino',
body = 6,
finesse = 7,
mind = 8,
hp = 9,
speed = 2.5,
icons = (platino_f0, platino_f1),
attack = 1,
ai = None,
effect_handles = None,
inventory = [],
description = '\n'.join(['Dragon de Platino',
'',
'Mysterious dragon who comes and goes as he wishes...'])))
if False and SpecialTime.christmas in events:
for character in config:
character.inventory.append(inventory_config(item_name = 'idol of snowman',
min_amount = 1,
max_amount = 1,
probability = 100))
return config
| en | 000228796_tuturto-pyherc_player_characters_607dd916573d.py | unknown | 2,274 |
import torch
from torch import nn
class LiteConv3x3(nn.Module):
"""Lite 3x3 convolution"""
def __init__(self):
super(LiteConv3x3, self).__init__()
def forward(self, input):
return input
class AG(nn.Module):
"""Aggregation gate"""
def __init__(self):
super(AG, self).__init__()
def forward(self, input):
return input
class OSBlock(nn.Module):
"""Omni-scale block"""
def __init__(self):
super(OSBlock, self).__init__()
def forward(self, input):
return input
if __name__ == '__main__':
print('test OSBlock') | import torch
from torch import nn
class LiteConv3x3(nn.Module):
"""Lite 3x3 convolution"""
def __init__(self):
super(LiteConv3x3, self).__init__()
def forward(self, input):
return input
class AG(nn.Module):
"""Aggregation gate"""
def __init__(self):
super(AG, self).__init__()
def forward(self, input):
return input
class OSBlock(nn.Module):
"""Omni-scale block"""
def __init__(self):
super(OSBlock, self).__init__()
def forward(self, input):
return input
if __name__ == '__main__':
print('test OSBlock') | en | 000480716_CnybTseng-JDE_osblock_366d3097f6ca.py | unknown | 189 |
import datetime
import logging
import shutil
import sys
from random import randint, random
from typing import List, Tuple
import pandas as pd
from sklearn import preprocessing
from dbnd import log_dataframe, log_metric, pipeline, task
from dbnd._core.constants import DbndTargetOperationType
from dbnd._core.parameter.parameter_builder import parameter
from dbnd._core.utils.basics.range import period_dates
from dbnd_test_scenarios.pipelines.common.pandas_tasks import load_from_sql_data
from dbnd_test_scenarios.scenarios_repo import client_scoring_data
from dbnd_test_scenarios.utils.data_utils import get_hash_for_obj
from targets import target
logger = logging.getLogger(__name__)
def run_get_customer_data(partner_name, output_path, target_date_str):
target(output_path).mkdir_parent()
source_file = client_scoring_data.get_ingest_data(partner_name, target_date_str)
shutil.copy(source_file, output_path)
return output_path
@task
def clean_pii(
data: pd.DataFrame, pii_columns: List[str], target_date: datetime.date = None
) -> pd.DataFrame:
# I am not sure about this code, but this might help
if target_date and target_date >= datetime.date(2020, 7, 12):
if "10" not in data.columns:
log_metric("Fixed columns", ["10"])
data["10"] = 0
data[pii_columns] = data[pii_columns].apply(
lambda x: x.apply(get_hash_for_obj), axis=1
)
log_metric("PII items removed:", len(pii_columns) * data.shape[0])
log_dataframe("pii_clean", data)
return data
@task
def enrich_missing_fields(
raw_data=parameter(log_histograms=True)[pd.DataFrame],
columns_to_impute=None,
columns_min_max_scaler=None,
fill_with=0,
) -> pd.DataFrame:
columns_to_impute = columns_to_impute or ["10"]
columns_min_max_scaler = columns_min_max_scaler or []
counter = int(raw_data[columns_to_impute].copy().isna().sum())
noise = randint(-counter, counter)
log_metric(
"Replaced NaNs", int(raw_data[columns_to_impute].copy().isna().sum()) + noise
)
raw_data[columns_to_impute] = raw_data[columns_to_impute].fillna(fill_with)
for column_name in columns_min_max_scaler:
scaler = preprocessing.MinMaxScaler()
raw_data[column_name + "_norm"] = scaler.fit_transform(
raw_data[[column_name]].values.astype(float)
)
return raw_data
@task
def dedup_records(data: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
data = data.drop_duplicates(subset=columns)
item_count = len(columns) * data.shape[0]
noise = randint(-item_count, item_count)
log_metric("Removed Duplicates", len(columns) * data.shape[0] + noise)
return data
@task
def create_report(data: pd.DataFrame) -> pd.DataFrame:
avg_score = int(
data["score_label"].sum()
+ randint(-2 * len(data.columns), 2 * len(data.columns))
)
log_metric("Column Count", len(data.columns))
log_metric("Avg Score", avg_score)
log_dataframe("ready_data", data, with_histograms=True)
return pd.DataFrame(data=[[avg_score]], columns=["avg_score"])
@pipeline
def ingest_partner_data(
data=parameter(log_histograms=True)[pd.DataFrame],
name="customer",
dedup_columns=None,
columns_to_impute=None,
pii_columns=None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
pii_columns = pii_columns or ["name", "address", "phone"]
dedup_columns = dedup_columns or ["phone"]
columns_to_impute = columns_to_impute or ["10"]
clean = clean_pii(data, pii_columns)
enriched = enrich_missing_fields(clean, columns_to_impute)
deduped = dedup_records(enriched, columns=dedup_columns)
report = create_report(deduped)
return report, deduped
# PARTNERS DATA
def partner_file_data_location(name, task_target_date):
rand = random()
if rand < 0.2:
partner_file = "data/big_file.csv"
else:
partner_file = "data/small_file.csv"
return target(partner_file)
@pipeline
def fetch_partners_data(
task_target_date, selected_partners: List[str], period=datetime.timedelta(days=7)
) -> List[pd.DataFrame]:
all_data = []
for partner in selected_partners:
for d in period_dates(task_target_date, period):
partner_data_file = partner_file_data_location(
name=partner, task_target_date=d
)
partner_report, partner_data = ingest_partner_data(
name=partner, data=partner_data_file
)
all_data.append(partner_data)
return all_data
# ###########
# RUN FUNCTIONS, (not sure if we need them)
def run_process_customer_data_from_sql(query, sql_conn_str, output_file):
data = load_from_sql_data(sql_conn_str=sql_conn_str, query=query)
report = ingest_partner_data(data)
report[1].to_csv(output_file)
return output_file
def run_process_customer_data(input_file, output_file):
report = ingest_partner_data(pd.read_csv(input_file))
report[1].to_csv(output_file, index=False)
return output_file
def run_enrich_missing_fields(input_path, output_path, columns_to_impute=None):
enrich_missing_fields(
raw_data=pd.read_csv(input_path), columns_to_impute=columns_to_impute
).to_csv(output_path, index=False)
return output_path
def run_clean_piis(input_path, output_path, pii_columns, target_date_str=None):
target_date = datetime.datetime.strptime(target_date_str, "%Y-%m-%d").date()
data = pd.read_csv(input_path)
log_dataframe(
"data",
data,
path=input_path,
with_histograms=True,
operation_type=DbndTargetOperationType.read,
)
clean_pii(data=data, pii_columns=pii_columns, target_date=target_date).to_csv(
output_path, index=False
)
return output_path
def run_dedup_records(input_path, output_path, columns=None):
dedup_records(data=pd.read_csv(input_path), columns=columns).to_csv(
output_path, index=False
)
return output_path
def run_create_report(input_path, output_path):
data = pd.read_csv(input_path)
log_dataframe(
"data",
data,
path=input_path,
with_histograms=True,
operation_type=DbndTargetOperationType.write,
)
create_report(data,).to_csv(output_path, index=False)
return output_path
if __name__ == "__main__":
run_process_customer_data(sys.argv[1], sys.argv[2])
| import datetime
import logging
import shutil
import sys
from random import randint, random
from typing import List, Tuple
import pandas as pd
from sklearn import preprocessing
from dbnd import log_dataframe, log_metric, pipeline, task
from dbnd._core.constants import DbndTargetOperationType
from dbnd._core.parameter.parameter_builder import parameter
from dbnd._core.utils.basics.range import period_dates
from dbnd_test_scenarios.pipelines.common.pandas_tasks import load_from_sql_data
from dbnd_test_scenarios.scenarios_repo import client_scoring_data
from dbnd_test_scenarios.utils.data_utils import get_hash_for_obj
from targets import target
logger = logging.getLogger(__name__)
def run_get_customer_data(partner_name, output_path, target_date_str):
target(output_path).mkdir_parent()
source_file = client_scoring_data.get_ingest_data(partner_name, target_date_str)
shutil.copy(source_file, output_path)
return output_path
@task
def clean_pii(
data: pd.DataFrame, pii_columns: List[str], target_date: datetime.date = None
) -> pd.DataFrame:
# I am not sure about this code, but this might help
if target_date and target_date >= datetime.date(2020, 7, 12):
if "10" not in data.columns:
log_metric("Fixed columns", ["10"])
data["10"] = 0
data[pii_columns] = data[pii_columns].apply(
lambda x: x.apply(get_hash_for_obj), axis=1
)
log_metric("PII items removed:", len(pii_columns) * data.shape[0])
log_dataframe("pii_clean", data)
return data
@task
def enrich_missing_fields(
raw_data=parameter(log_histograms=True)[pd.DataFrame],
columns_to_impute=None,
columns_min_max_scaler=None,
fill_with=0,
) -> pd.DataFrame:
columns_to_impute = columns_to_impute or ["10"]
columns_min_max_scaler = columns_min_max_scaler or []
counter = int(raw_data[columns_to_impute].copy().isna().sum())
noise = randint(-counter, counter)
log_metric(
"Replaced NaNs", int(raw_data[columns_to_impute].copy().isna().sum()) + noise
)
raw_data[columns_to_impute] = raw_data[columns_to_impute].fillna(fill_with)
for column_name in columns_min_max_scaler:
scaler = preprocessing.MinMaxScaler()
raw_data[column_name + "_norm"] = scaler.fit_transform(
raw_data[[column_name]].values.astype(float)
)
return raw_data
@task
def dedup_records(data: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
data = data.drop_duplicates(subset=columns)
item_count = len(columns) * data.shape[0]
noise = randint(-item_count, item_count)
log_metric("Removed Duplicates", len(columns) * data.shape[0] + noise)
return data
@task
def create_report(data: pd.DataFrame) -> pd.DataFrame:
avg_score = int(
data["score_label"].sum()
+ randint(-2 * len(data.columns), 2 * len(data.columns))
)
log_metric("Column Count", len(data.columns))
log_metric("Avg Score", avg_score)
log_dataframe("ready_data", data, with_histograms=True)
return pd.DataFrame(data=[[avg_score]], columns=["avg_score"])
@pipeline
def ingest_partner_data(
data=parameter(log_histograms=True)[pd.DataFrame],
name="customer",
dedup_columns=None,
columns_to_impute=None,
pii_columns=None,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
pii_columns = pii_columns or ["name", "address", "phone"]
dedup_columns = dedup_columns or ["phone"]
columns_to_impute = columns_to_impute or ["10"]
clean = clean_pii(data, pii_columns)
enriched = enrich_missing_fields(clean, columns_to_impute)
deduped = dedup_records(enriched, columns=dedup_columns)
report = create_report(deduped)
return report, deduped
# PARTNERS DATA
def partner_file_data_location(name, task_target_date):
rand = random()
if rand < 0.2:
partner_file = "data/big_file.csv"
else:
partner_file = "data/small_file.csv"
return target(partner_file)
@pipeline
def fetch_partners_data(
task_target_date, selected_partners: List[str], period=datetime.timedelta(days=7)
) -> List[pd.DataFrame]:
all_data = []
for partner in selected_partners:
for d in period_dates(task_target_date, period):
partner_data_file = partner_file_data_location(
name=partner, task_target_date=d
)
partner_report, partner_data = ingest_partner_data(
name=partner, data=partner_data_file
)
all_data.append(partner_data)
return all_data
# ###########
# RUN FUNCTIONS, (not sure if we need them)
def run_process_customer_data_from_sql(query, sql_conn_str, output_file):
data = load_from_sql_data(sql_conn_str=sql_conn_str, query=query)
report = ingest_partner_data(data)
report[1].to_csv(output_file)
return output_file
def run_process_customer_data(input_file, output_file):
report = ingest_partner_data(pd.read_csv(input_file))
report[1].to_csv(output_file, index=False)
return output_file
def run_enrich_missing_fields(input_path, output_path, columns_to_impute=None):
enrich_missing_fields(
raw_data=pd.read_csv(input_path), columns_to_impute=columns_to_impute
).to_csv(output_path, index=False)
return output_path
def run_clean_piis(input_path, output_path, pii_columns, target_date_str=None):
target_date = datetime.datetime.strptime(target_date_str, "%Y-%m-%d").date()
data = pd.read_csv(input_path)
log_dataframe(
"data",
data,
path=input_path,
with_histograms=True,
operation_type=DbndTargetOperationType.read,
)
clean_pii(data=data, pii_columns=pii_columns, target_date=target_date).to_csv(
output_path, index=False
)
return output_path
def run_dedup_records(input_path, output_path, columns=None):
dedup_records(data=pd.read_csv(input_path), columns=columns).to_csv(
output_path, index=False
)
return output_path
def run_create_report(input_path, output_path):
data = pd.read_csv(input_path)
log_dataframe(
"data",
data,
path=input_path,
with_histograms=True,
operation_type=DbndTargetOperationType.write,
)
create_report(data,).to_csv(output_path, index=False)
return output_path
if __name__ == "__main__":
run_process_customer_data(sys.argv[1], sys.argv[2])
| en | 000586040_ipattarapong-dbnd_ingest_data_95cd8987aab5.py | unknown | 2,143 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import os
import argparse
import numpy as np
from mindspore import context
from src.dataset.testdataset import create_testdataset
parser = argparse.ArgumentParser(description="SRGAN eval")
parser.add_argument("--test_LR_path", type=str, default='./Set14/LR')
parser.add_argument("--test_GT_path", type=str, default='./Set14/HR')
parser.add_argument("--result_path", type=str, default='./preprocess_path')
parser.add_argument("--device_id", type=int, default=1, help="device id, default: 0.")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_id=args.device_id)
def padding(_img, target_shape):
h, w = target_shape[0], target_shape[1]
img_h, img_w, _ = _img.shape
dh, dw = h - img_h, w - img_w
if dh < 0 or dw < 0:
raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {_img.shape}")
if dh != 0 or dw != 0:
_img = np.pad(_img, ((0, dh), (0, dw), (0, 0)), "constant")
return _img
if __name__ == '__main__':
test_ds = create_testdataset(1, args.test_LR_path, args.test_GT_path)
test_data_loader = test_ds.create_dict_iterator(output_numpy=True)
i = 0
img_path = args.result_path
if not os.path.exists(img_path):
os.makedirs(img_path)
for data in test_data_loader:
file_name = "SRGAN_data" + "_" + str(i) + ".bin"
file_path = img_path + "/" + file_name
lr = data['LR']
lr = lr[0]
lr = lr.transpose(1, 2, 0)
org_img = padding(lr, [200, 200])
org_img = org_img.transpose(2, 0, 1)
img = org_img.copy()
img.tofile(file_path)
i = i + 1
| # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import os
import argparse
import numpy as np
from mindspore import context
from src.dataset.testdataset import create_testdataset
parser = argparse.ArgumentParser(description="SRGAN eval")
parser.add_argument("--test_LR_path", type=str, default='./Set14/LR')
parser.add_argument("--test_GT_path", type=str, default='./Set14/HR')
parser.add_argument("--result_path", type=str, default='./preprocess_path')
parser.add_argument("--device_id", type=int, default=1, help="device id, default: 0.")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_id=args.device_id)
def padding(_img, target_shape):
h, w = target_shape[0], target_shape[1]
img_h, img_w, _ = _img.shape
dh, dw = h - img_h, w - img_w
if dh < 0 or dw < 0:
raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {_img.shape}")
if dh != 0 or dw != 0:
_img = np.pad(_img, ((0, dh), (0, dw), (0, 0)), "constant")
return _img
if __name__ == '__main__':
test_ds = create_testdataset(1, args.test_LR_path, args.test_GT_path)
test_data_loader = test_ds.create_dict_iterator(output_numpy=True)
i = 0
img_path = args.result_path
if not os.path.exists(img_path):
os.makedirs(img_path)
for data in test_data_loader:
file_name = "SRGAN_data" + "_" + str(i) + ".bin"
file_path = img_path + "/" + file_name
lr = data['LR']
lr = lr[0]
lr = lr.transpose(1, 2, 0)
org_img = padding(lr, [200, 200])
org_img = org_img.transpose(2, 0, 1)
img = org_img.copy()
img.tofile(file_path)
i = i + 1
| en | 000514220_leelige-mindspore_preprocess_c643c5829af0.py | unknown | 764 |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qf_lib.backtesting.events.time_event.regular_time_event.daily_market_event import DailyMarketEvent
class AfterMarketCloseEvent(DailyMarketEvent):
"""
Rule which is triggered every day after market closes.
For example in order to set up 23:30 call before using the event:
``BeforeMarketOpenEvent.set_trigger_time({"hour": 23, "minute": 30, "second": 0, "microsecond": 0})``
The listeners for this event should implement the ``on_after_market_close()`` method.
"""
def notify(self, listener) -> None:
listener.on_after_market_close(self)
| # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qf_lib.backtesting.events.time_event.regular_time_event.daily_market_event import DailyMarketEvent
class AfterMarketCloseEvent(DailyMarketEvent):
"""
Rule which is triggered every day after market closes.
For example in order to set up 23:30 call before using the event:
``BeforeMarketOpenEvent.set_trigger_time({"hour": 23, "minute": 30, "second": 0, "microsecond": 0})``
The listeners for this event should implement the ``on_after_market_close()`` method.
"""
def notify(self, listener) -> None:
listener.on_after_market_close(self)
| en | 000260178_webclinic017-qf-lib_after_market_close_event_9ada76a4a54c.py | unknown | 339 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from hwt.hdl.constants import Time, NOP
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.mem.atomic.flipCntr import FlipCntr
class FlipCntrTC(SimTestCase):
@classmethod
def setUpClass(cls):
cls.u = FlipCntr()
cls.compileSim(cls.u)
def test_nop(self):
u = self.u
u.doIncr._ag.data.extend([0, 0])
self.runSim(90 * Time.ns)
self.assertValSequenceEqual(u.data._ag.din,
[0 for _ in range(8)])
def test_incr(self):
u = self.u
u.doIncr._ag.data.extend([0, 1, 0, 0, 0])
u.doFlip._ag.data.extend([NOP, NOP, 1, NOP, NOP])
self.runSim(90 * Time.ns)
self.assertValSequenceEqual(
u.data._ag.din,
[0, 0] + [1 for _ in range(6)])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(FlipCntrTC('test_nop'))
suite.addTest(unittest.makeSuite(FlipCntrTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from hwt.hdl.constants import Time, NOP
from hwt.simulator.simTestCase import SimTestCase
from hwtLib.mem.atomic.flipCntr import FlipCntr
class FlipCntrTC(SimTestCase):
@classmethod
def setUpClass(cls):
cls.u = FlipCntr()
cls.compileSim(cls.u)
def test_nop(self):
u = self.u
u.doIncr._ag.data.extend([0, 0])
self.runSim(90 * Time.ns)
self.assertValSequenceEqual(u.data._ag.din,
[0 for _ in range(8)])
def test_incr(self):
u = self.u
u.doIncr._ag.data.extend([0, 1, 0, 0, 0])
u.doFlip._ag.data.extend([NOP, NOP, 1, NOP, NOP])
self.runSim(90 * Time.ns)
self.assertValSequenceEqual(
u.data._ag.din,
[0, 0] + [1 for _ in range(6)])
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(FlipCntrTC('test_nop'))
suite.addTest(unittest.makeSuite(FlipCntrTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| en | 000413557_Nic30-hwtLib_flipCntr_test_1ce839567b85.py | unknown | 408 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from core import path_util
from devil.android.sdk import intent # pylint: disable=import-error
path_util.AddAndroidPylibToPath()
from pylib.utils import shared_preference_utils
from telemetry.core import android_platform
from telemetry.core import platform
from telemetry.core import util
from telemetry.internal.platform import android_device
from telemetry.page import shared_page_state
CARDBOARD_PATH = os.path.join('chrome', 'android', 'shared_preference_files',
'test', 'vr_cardboard_skipdon_setupcomplete.json')
FAKE_TRACKER_COMPONENT = ('com.google.vr.vrcore/'
'.tracking.HeadTrackingService')
SUPPORTED_POSE_TRACKER_MODES = [
'frozen', # Static pose looking straight forward.
'sweep', # Moves head back and forth horizontally.
'rotate', # Moves head continuously in a circle.
'circle_strafe', # Moves head continuously in a circle (also changes
# position if 6DoF supported?).
'motion_sickness', # Moves head in a sort of figure-eight pattern.
]
SUPPORTED_POSE_TRACKER_TYPES = [
'sensor', # Standard sensor-fusion-based pose tracker.
'tango', # Tango-based pose tracker.
'platform', # ?
'fake', # Fake pose tracker that can provide pre-defined pose sets.
]
class SharedAndroidVrPageState(shared_page_state.SharedPageState):
"""SharedPageState for VR Telemetry tests.
Performs the same functionality as SharedPageState, but with three main
differences:
1. It is currently restricted to Android
2. It performs VR-specific setup such as installing and configuring
additional APKs that are necessary for testing
3. It cycles the screen off then on before each story, similar to how
AndroidScreenRestorationSharedState ensures that the screen is on. See
_CycleScreen() for an explanation on the reasoning behind this.
"""
def __init__(self, test, finder_options, story_set):
# TODO(bsheedy): See about making this a cross-platform SharedVrPageState -
# Seems like we should be able to use SharedPageState's default platform
# property instead of specifying AndroidPlatform, and then just perform
# different setup based off the platform type
device = android_device.GetDevice(finder_options)
assert device, 'Android device is required for this story'
self._platform = platform.GetPlatformForDevice(device, finder_options)
assert self._platform, 'Unable to create Android platform'
assert isinstance(self._platform, android_platform.AndroidPlatform)
super(SharedAndroidVrPageState, self).__init__(test, finder_options,
story_set)
self._story_set = story_set
# Optimization so we're not doing redundant service starts before every
# story.
self._did_set_tracker = False
self._PerformAndroidVrSetup()
def _PerformAndroidVrSetup(self):
self._InstallVrCore()
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
self._finder_options.shared_prefs_file))
self._InstallNfcApk()
self._InstallKeyboardApk()
def _InstallVrCore(self):
"""Installs the VrCore APK."""
# TODO(bsheedy): Add support for temporarily replacing it if it's still
# installed as a system app on the test device
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_services',
'vr_services_current.apk'))
def _ConfigureVrCore(self, filepath):
"""Configures VrCore using the provided settings file."""
settings = shared_preference_utils.ExtractSettingsFromJson(filepath)
for setting in settings:
shared_pref = self._platform.GetSharedPrefs(
setting['package'], setting['filename'],
use_encrypted_path=setting.get('supports_encrypted_path', False))
shared_preference_utils.ApplySharedPreferenceSetting(
shared_pref, setting)
def _InstallNfcApk(self):
"""Installs the APK that allows VR tests to simulate a headset NFC scan."""
chromium_root = path_util.GetChromiumSrcDir()
# Find the most recently build APK
candidate_apks = []
for build_path in util.GetBuildDirectories(chromium_root):
apk_path = os.path.join(build_path, 'apks', 'VrNfcSimulator.apk')
if os.path.exists(apk_path):
last_changed = os.path.getmtime(apk_path)
candidate_apks.append((last_changed, apk_path))
if not candidate_apks:
raise RuntimeError(
'Could not find VrNfcSimulator.apk in a build output directory')
newest_apk_path = sorted(candidate_apks)[-1][1]
self._platform.InstallApplication(
os.path.join(chromium_root, newest_apk_path))
def _InstallKeyboardApk(self):
"""Installs the VR Keyboard APK."""
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_keyboard',
'vr_keyboard_current.apk'))
def _SetFakePoseTrackerIfNotSet(self):
if self._story_set.use_fake_pose_tracker and not self._did_set_tracker:
self.SetPoseTrackerType('fake')
self.SetPoseTrackerMode('sweep')
self._did_set_tracker = True
def SetPoseTrackerType(self, tracker_type):
"""Sets the VrCore pose tracker to the given type.
Only works if VrCore has been configured to use the VrCore-side tracker
by setting EnableVrCoreHeadTracking to true. This setting persists between
VR sessions and Chrome restarts.
Args:
tracker_type: A string corresponding to the tracker type to set.
Raises:
RuntimeError if the given |tracker_type| is not in the supported list.
"""
if tracker_type not in SUPPORTED_POSE_TRACKER_TYPES:
raise RuntimeError('Given tracker %s is not supported.' % tracker_type)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_TRACKER_TYPE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.TRACKER_TYPE': tracker_type}))
def SetPoseTrackerMode(self, tracker_mode):
"""Sets the fake VrCore pose tracker to provide poses in the given mode.
Only works after SetPoseTrackerType has been set to 'fake'. This setting
persists between VR sessions and Chrome restarts.
Args:
tracker_mode: A string corresponding to the tracker mode to set.
Raises:
RuntimeError if the given |tracker_mode| is not in the supported list.
"""
if tracker_mode not in SUPPORTED_POSE_TRACKER_MODES:
raise RuntimeError('Given mode %s is not supported.' % tracker_mode)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_FAKE_TRACKER_MODE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.FAKE_TRACKER_MODE': tracker_mode}))
def WillRunStory(self, page):
super(SharedAndroidVrPageState, self).WillRunStory(page)
if not self._finder_options.disable_screen_reset:
self._CycleScreen()
self._SetFakePoseTrackerIfNotSet()
def TearDownState(self):
super(SharedAndroidVrPageState, self).TearDownState()
# Reset the tracker type to use the actual sensor if it's been changed. When
# run on the bots, this shouldn't matter since the service will be killed
# during the automatic restart, but this could persist when run locally.
if self._did_set_tracker:
self.SetPoseTrackerType('sensor')
# Re-apply Cardboard as the viewer to leave the device in a consistent
# state after a benchmark run
# TODO(bsheedy): Remove this after crbug.com/772969 is fixed
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
CARDBOARD_PATH))
def _CycleScreen(self):
"""Cycles the screen off then on.
This is because VR test devices are set to have normal screen brightness and
automatically turn off after several minutes instead of the usual approach
of having the screen always on at minimum brightness. This is due to the
motion-to-photon latency test being sensitive to screen brightness, and min
brightness does not work well for it.
Simply using TurnScreenOn does not actually reset the timer for turning off
the screen, so instead cycle the screen to refresh it periodically.
"""
self.platform.android_action_runner.TurnScreenOff()
self.platform.android_action_runner.TurnScreenOn()
@property
def platform(self):
return self._platform
@property
def recording_wpr(self):
return self._finder_options.recording_wpr
| # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from core import path_util
from devil.android.sdk import intent # pylint: disable=import-error
path_util.AddAndroidPylibToPath()
from pylib.utils import shared_preference_utils
from telemetry.core import android_platform
from telemetry.core import platform
from telemetry.core import util
from telemetry.internal.platform import android_device
from telemetry.page import shared_page_state
CARDBOARD_PATH = os.path.join('chrome', 'android', 'shared_preference_files',
'test', 'vr_cardboard_skipdon_setupcomplete.json')
FAKE_TRACKER_COMPONENT = ('com.google.vr.vrcore/'
'.tracking.HeadTrackingService')
SUPPORTED_POSE_TRACKER_MODES = [
'frozen', # Static pose looking straight forward.
'sweep', # Moves head back and forth horizontally.
'rotate', # Moves head continuously in a circle.
'circle_strafe', # Moves head continuously in a circle (also changes
# position if 6DoF supported?).
'motion_sickness', # Moves head in a sort of figure-eight pattern.
]
SUPPORTED_POSE_TRACKER_TYPES = [
'sensor', # Standard sensor-fusion-based pose tracker.
'tango', # Tango-based pose tracker.
'platform', # ?
'fake', # Fake pose tracker that can provide pre-defined pose sets.
]
class SharedAndroidVrPageState(shared_page_state.SharedPageState):
"""SharedPageState for VR Telemetry tests.
Performs the same functionality as SharedPageState, but with three main
differences:
1. It is currently restricted to Android
2. It performs VR-specific setup such as installing and configuring
additional APKs that are necessary for testing
3. It cycles the screen off then on before each story, similar to how
AndroidScreenRestorationSharedState ensures that the screen is on. See
_CycleScreen() for an explanation on the reasoning behind this.
"""
def __init__(self, test, finder_options, story_set):
# TODO(bsheedy): See about making this a cross-platform SharedVrPageState -
# Seems like we should be able to use SharedPageState's default platform
# property instead of specifying AndroidPlatform, and then just perform
# different setup based off the platform type
device = android_device.GetDevice(finder_options)
assert device, 'Android device is required for this story'
self._platform = platform.GetPlatformForDevice(device, finder_options)
assert self._platform, 'Unable to create Android platform'
assert isinstance(self._platform, android_platform.AndroidPlatform)
super(SharedAndroidVrPageState, self).__init__(test, finder_options,
story_set)
self._story_set = story_set
# Optimization so we're not doing redundant service starts before every
# story.
self._did_set_tracker = False
self._PerformAndroidVrSetup()
def _PerformAndroidVrSetup(self):
self._InstallVrCore()
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
self._finder_options.shared_prefs_file))
self._InstallNfcApk()
self._InstallKeyboardApk()
def _InstallVrCore(self):
"""Installs the VrCore APK."""
# TODO(bsheedy): Add support for temporarily replacing it if it's still
# installed as a system app on the test device
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_services',
'vr_services_current.apk'))
def _ConfigureVrCore(self, filepath):
"""Configures VrCore using the provided settings file."""
settings = shared_preference_utils.ExtractSettingsFromJson(filepath)
for setting in settings:
shared_pref = self._platform.GetSharedPrefs(
setting['package'], setting['filename'],
use_encrypted_path=setting.get('supports_encrypted_path', False))
shared_preference_utils.ApplySharedPreferenceSetting(
shared_pref, setting)
def _InstallNfcApk(self):
"""Installs the APK that allows VR tests to simulate a headset NFC scan."""
chromium_root = path_util.GetChromiumSrcDir()
# Find the most recently build APK
candidate_apks = []
for build_path in util.GetBuildDirectories(chromium_root):
apk_path = os.path.join(build_path, 'apks', 'VrNfcSimulator.apk')
if os.path.exists(apk_path):
last_changed = os.path.getmtime(apk_path)
candidate_apks.append((last_changed, apk_path))
if not candidate_apks:
raise RuntimeError(
'Could not find VrNfcSimulator.apk in a build output directory')
newest_apk_path = sorted(candidate_apks)[-1][1]
self._platform.InstallApplication(
os.path.join(chromium_root, newest_apk_path))
def _InstallKeyboardApk(self):
"""Installs the VR Keyboard APK."""
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_keyboard',
'vr_keyboard_current.apk'))
def _SetFakePoseTrackerIfNotSet(self):
if self._story_set.use_fake_pose_tracker and not self._did_set_tracker:
self.SetPoseTrackerType('fake')
self.SetPoseTrackerMode('sweep')
self._did_set_tracker = True
def SetPoseTrackerType(self, tracker_type):
"""Sets the VrCore pose tracker to the given type.
Only works if VrCore has been configured to use the VrCore-side tracker
by setting EnableVrCoreHeadTracking to true. This setting persists between
VR sessions and Chrome restarts.
Args:
tracker_type: A string corresponding to the tracker type to set.
Raises:
RuntimeError if the given |tracker_type| is not in the supported list.
"""
if tracker_type not in SUPPORTED_POSE_TRACKER_TYPES:
raise RuntimeError('Given tracker %s is not supported.' % tracker_type)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_TRACKER_TYPE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.TRACKER_TYPE': tracker_type}))
def SetPoseTrackerMode(self, tracker_mode):
"""Sets the fake VrCore pose tracker to provide poses in the given mode.
Only works after SetPoseTrackerType has been set to 'fake'. This setting
persists between VR sessions and Chrome restarts.
Args:
tracker_mode: A string corresponding to the tracker mode to set.
Raises:
RuntimeError if the given |tracker_mode| is not in the supported list.
"""
if tracker_mode not in SUPPORTED_POSE_TRACKER_MODES:
raise RuntimeError('Given mode %s is not supported.' % tracker_mode)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_FAKE_TRACKER_MODE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.FAKE_TRACKER_MODE': tracker_mode}))
def WillRunStory(self, page):
super(SharedAndroidVrPageState, self).WillRunStory(page)
if not self._finder_options.disable_screen_reset:
self._CycleScreen()
self._SetFakePoseTrackerIfNotSet()
def TearDownState(self):
super(SharedAndroidVrPageState, self).TearDownState()
# Reset the tracker type to use the actual sensor if it's been changed. When
# run on the bots, this shouldn't matter since the service will be killed
# during the automatic restart, but this could persist when run locally.
if self._did_set_tracker:
self.SetPoseTrackerType('sensor')
# Re-apply Cardboard as the viewer to leave the device in a consistent
# state after a benchmark run
# TODO(bsheedy): Remove this after crbug.com/772969 is fixed
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
CARDBOARD_PATH))
def _CycleScreen(self):
"""Cycles the screen off then on.
This is because VR test devices are set to have normal screen brightness and
automatically turn off after several minutes instead of the usual approach
of having the screen always on at minimum brightness. This is due to the
motion-to-photon latency test being sensitive to screen brightness, and min
brightness does not work well for it.
Simply using TurnScreenOn does not actually reset the timer for turning off
the screen, so instead cycle the screen to refresh it periodically.
"""
self.platform.android_action_runner.TurnScreenOff()
self.platform.android_action_runner.TurnScreenOn()
@property
def platform(self):
return self._platform
@property
def recording_wpr(self):
return self._finder_options.recording_wpr
| en | 000118785_zipated-src_shared_android_vr_page_state_2f57ebea62e5.py | unknown | 2,450 |
#!/usr/bin/env vpython
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import patch_orderfile
import symbol_extractor
class TestPatchOrderFile(unittest.TestCase):
def testRemoveSuffixes(self):
no_clone = 'this.does.not.contain.clone'
self.assertEquals(no_clone, patch_orderfile.RemoveSuffixes(no_clone))
with_clone = 'this.does.contain.clone.'
self.assertEquals(
'this.does.contain', patch_orderfile.RemoveSuffixes(with_clone))
with_part = 'this.is.a.part.42'
self.assertEquals(
'this.is.a', patch_orderfile.RemoveSuffixes(with_part))
def testUniqueGenerator(self):
@patch_orderfile._UniqueGenerator
def TestIterator():
yield 1
yield 2
yield 1
yield 3
self.assertEqual(list(TestIterator()), [1,2,3])
def testMaxOutlinedIndex(self):
self.assertEquals(7, patch_orderfile._GetMaxOutlinedIndex(
{'OUTLINED_FUNCTION_{}'.format(idx): None
for idx in [1, 2, 3, 7]}))
self.assertRaises(AssertionError, patch_orderfile._GetMaxOutlinedIndex,
{'OUTLINED_FUNCTION_{}'.format(idx): None
for idx in [1, 200, 3, 11]})
self.assertEquals(None, patch_orderfile._GetMaxOutlinedIndex(
{'a': None, 'b': None}))
def testPatchedSymbols(self):
# From input symbols a b c d, symbols a and d match themselves, symbol
# b matches b and x, and symbol c is missing.
self.assertEquals(list('abxd'),
list(patch_orderfile._PatchedSymbols(
{'a': 'a', 'b': 'bx', 'd': 'd'},
'abcd', None)))
def testPatchedSymbolsWithOutlining(self):
# As above, but add outlined functions at the end. The aliased outlined
# function should be ignored.
self.assertEquals(
list('abd') + ['OUTLINED_FUNCTION_{}'.format(i) for i in range(5)],
list(
patch_orderfile._PatchedSymbols(
{
'a': 'a',
'b': ['b', 'OUTLINED_FUNCTION_4'],
'd': 'd'
}, ['a', 'b', 'OUTLINED_FUNCTION_2', 'c', 'd'], 2)))
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env vpython
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import patch_orderfile
import symbol_extractor
class TestPatchOrderFile(unittest.TestCase):
def testRemoveSuffixes(self):
no_clone = 'this.does.not.contain.clone'
self.assertEquals(no_clone, patch_orderfile.RemoveSuffixes(no_clone))
with_clone = 'this.does.contain.clone.'
self.assertEquals(
'this.does.contain', patch_orderfile.RemoveSuffixes(with_clone))
with_part = 'this.is.a.part.42'
self.assertEquals(
'this.is.a', patch_orderfile.RemoveSuffixes(with_part))
def testUniqueGenerator(self):
@patch_orderfile._UniqueGenerator
def TestIterator():
yield 1
yield 2
yield 1
yield 3
self.assertEqual(list(TestIterator()), [1,2,3])
def testMaxOutlinedIndex(self):
self.assertEquals(7, patch_orderfile._GetMaxOutlinedIndex(
{'OUTLINED_FUNCTION_{}'.format(idx): None
for idx in [1, 2, 3, 7]}))
self.assertRaises(AssertionError, patch_orderfile._GetMaxOutlinedIndex,
{'OUTLINED_FUNCTION_{}'.format(idx): None
for idx in [1, 200, 3, 11]})
self.assertEquals(None, patch_orderfile._GetMaxOutlinedIndex(
{'a': None, 'b': None}))
def testPatchedSymbols(self):
# From input symbols a b c d, symbols a and d match themselves, symbol
# b matches b and x, and symbol c is missing.
self.assertEquals(list('abxd'),
list(patch_orderfile._PatchedSymbols(
{'a': 'a', 'b': 'bx', 'd': 'd'},
'abcd', None)))
def testPatchedSymbolsWithOutlining(self):
# As above, but add outlined functions at the end. The aliased outlined
# function should be ignored.
self.assertEquals(
list('abd') + ['OUTLINED_FUNCTION_{}'.format(i) for i in range(5)],
list(
patch_orderfile._PatchedSymbols(
{
'a': 'a',
'b': ['b', 'OUTLINED_FUNCTION_4'],
'd': 'd'
}, ['a', 'b', 'OUTLINED_FUNCTION_2', 'c', 'd'], 2)))
if __name__ == '__main__':
unittest.main()
| en | 000352637_zealoussnow-chromium_patch_orderfile_unittest_7072846945cc.py | unknown | 725 |
import torch.nn as nn
class MaskL1Loss(nn.Module):
"""
Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss
"""
def __init__(self, ratio=1):
super(MaskL1Loss, self).__init__()
self.criterion = nn.L1Loss()
self.ratio = ratio
def forward(self, generated_img, target_img, mask):
pose_mask_l1 = self.criterion(generated_img * mask, target_img * mask)
return self.criterion(generated_img, target_img) + pose_mask_l1 * self.ratio
| import torch.nn as nn
class MaskL1Loss(nn.Module):
"""
Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss
"""
def __init__(self, ratio=1):
super(MaskL1Loss, self).__init__()
self.criterion = nn.L1Loss()
self.ratio = ratio
def forward(self, generated_img, target_img, mask):
pose_mask_l1 = self.criterion(generated_img * mask, target_img * mask)
return self.criterion(generated_img, target_img) + pose_mask_l1 * self.ratio
| en | 000134487_pasan1992-Human-Pose-Transfer_loss_0811e1f4ed8a.py | unknown | 161 |
"""Takes care of bans and post cooldowns"""
from typing import Tuple
from uchan.lib.exceptions import ArgumentError
from uchan.lib.mod_log import mod_log
from uchan.lib.model import BanModel, BoardModel, ThreadModel
from uchan.lib.proxy_request import get_request_ip4
from uchan.lib.repository import bans, posts
from uchan.lib.service import board_service
from uchan.lib.utils import now, ip4_to_str
NEW_THREAD_COOLDOWN = 600 * 1000
NEW_POST_COOLDOWN = 60 * 1000
MAX_BAN_TIME = 24 * 31 * 60 * 60 * 1000
MAX_REASON_LENGTH = 2000
MESSAGE_BAN_TOO_LONG = 'Ban too long'
MESSAGE_IP4_ILLEGAL_RANGE = 'ip4 end must be bigger than ip4'
MESSAGE_BOARD_NOT_FOUND = 'Board not found'
MESSAGE_BAN_TEXT_TOO_LONG = 'Ban reason text too long'
def is_request_banned(ip4, board):
bans = find_bans(ip4, board)
return len(bans) > 0
def is_request_suspended(ip4: int, board: BoardModel, thread: ThreadModel) -> Tuple[bool, int]:
timeout = NEW_THREAD_COOLDOWN if thread is None else NEW_POST_COOLDOWN
from_time = now() - timeout
post_list = posts.find_posts_by_ip4_from_time(ip4, from_time, by_thread=thread)
if post_list:
most_recent = post_list[0]
time_left = (most_recent.date + timeout - now()) // 1000
return True, time_left
return False, 0
def get_request_bans(clear_if_expired=False):
ip4 = get_request_ip4()
return find_bans(ip4, clear_if_expired=clear_if_expired)
def find_bans(ip4: int, board: BoardModel = None, clear_if_expired=False):
ban_list = bans.find_by_ip4(ip4, board)
applied_bans = list(filter(lambda i: ban_applies(i, ip4, board), ban_list))
if clear_if_expired:
# Delete the ban after the user has seen it when it expired
for ban in filter(lambda i: ban_expired(i), ban_list):
delete_ban(ban)
return applied_bans
def ban_applies(ban: BanModel, ip4: int, board: BoardModel) -> bool:
if ban.board and board and ban.board != board.name:
return False
if ban.ip4_end is not None:
return ban.ip4 < ip4 < ban.ip4_end
else:
return ban.ip4 == ip4
def ban_expired(ban: BanModel) -> bool:
if ban.length == 0:
return False
return now() > ban.date + ban.length
def add_ban(ban: BanModel) -> BanModel:
if ban.length > MAX_BAN_TIME:
raise ArgumentError(MESSAGE_BAN_TOO_LONG)
if ban.ip4_end is not None and ban.ip4_end <= ban.ip4:
raise ArgumentError(MESSAGE_IP4_ILLEGAL_RANGE)
if ban.board:
board = board_service.find_board(ban.board)
if not board:
raise ArgumentError(MESSAGE_BOARD_NOT_FOUND)
if ban.reason and len(ban.reason) > MAX_REASON_LENGTH:
raise ArgumentError(MESSAGE_BAN_TEXT_TOO_LONG)
ban.date = now()
ban = bans.create_ban(ban)
for_board_text = ' on {}'.format(ban.board) if ban.board else ''
ip4_end_text = ip4_to_str(ban.ip4_end) if ban.ip4_end is not None else '-'
f = 'ban add {} from {} to {}{} for {} hours reason {}'
text = f.format(ban.id, ip4_to_str(ban.ip4), ip4_end_text, for_board_text, ban.length / 60 / 60 / 1000, ban.reason)
mod_log(text)
return ban
def delete_ban(ban: BanModel):
bans.delete_ban(ban)
def find_ban_id(ban_id) -> BanModel:
return bans.find_by_id(ban_id)
| """Takes care of bans and post cooldowns"""
from typing import Tuple
from uchan.lib.exceptions import ArgumentError
from uchan.lib.mod_log import mod_log
from uchan.lib.model import BanModel, BoardModel, ThreadModel
from uchan.lib.proxy_request import get_request_ip4
from uchan.lib.repository import bans, posts
from uchan.lib.service import board_service
from uchan.lib.utils import now, ip4_to_str
NEW_THREAD_COOLDOWN = 600 * 1000
NEW_POST_COOLDOWN = 60 * 1000
MAX_BAN_TIME = 24 * 31 * 60 * 60 * 1000
MAX_REASON_LENGTH = 2000
MESSAGE_BAN_TOO_LONG = 'Ban too long'
MESSAGE_IP4_ILLEGAL_RANGE = 'ip4 end must be bigger than ip4'
MESSAGE_BOARD_NOT_FOUND = 'Board not found'
MESSAGE_BAN_TEXT_TOO_LONG = 'Ban reason text too long'
def is_request_banned(ip4, board):
bans = find_bans(ip4, board)
return len(bans) > 0
def is_request_suspended(ip4: int, board: BoardModel, thread: ThreadModel) -> Tuple[bool, int]:
timeout = NEW_THREAD_COOLDOWN if thread is None else NEW_POST_COOLDOWN
from_time = now() - timeout
post_list = posts.find_posts_by_ip4_from_time(ip4, from_time, by_thread=thread)
if post_list:
most_recent = post_list[0]
time_left = (most_recent.date + timeout - now()) // 1000
return True, time_left
return False, 0
def get_request_bans(clear_if_expired=False):
ip4 = get_request_ip4()
return find_bans(ip4, clear_if_expired=clear_if_expired)
def find_bans(ip4: int, board: BoardModel = None, clear_if_expired=False):
ban_list = bans.find_by_ip4(ip4, board)
applied_bans = list(filter(lambda i: ban_applies(i, ip4, board), ban_list))
if clear_if_expired:
# Delete the ban after the user has seen it when it expired
for ban in filter(lambda i: ban_expired(i), ban_list):
delete_ban(ban)
return applied_bans
def ban_applies(ban: BanModel, ip4: int, board: BoardModel) -> bool:
if ban.board and board and ban.board != board.name:
return False
if ban.ip4_end is not None:
return ban.ip4 < ip4 < ban.ip4_end
else:
return ban.ip4 == ip4
def ban_expired(ban: BanModel) -> bool:
if ban.length == 0:
return False
return now() > ban.date + ban.length
def add_ban(ban: BanModel) -> BanModel:
if ban.length > MAX_BAN_TIME:
raise ArgumentError(MESSAGE_BAN_TOO_LONG)
if ban.ip4_end is not None and ban.ip4_end <= ban.ip4:
raise ArgumentError(MESSAGE_IP4_ILLEGAL_RANGE)
if ban.board:
board = board_service.find_board(ban.board)
if not board:
raise ArgumentError(MESSAGE_BOARD_NOT_FOUND)
if ban.reason and len(ban.reason) > MAX_REASON_LENGTH:
raise ArgumentError(MESSAGE_BAN_TEXT_TOO_LONG)
ban.date = now()
ban = bans.create_ban(ban)
for_board_text = ' on {}'.format(ban.board) if ban.board else ''
ip4_end_text = ip4_to_str(ban.ip4_end) if ban.ip4_end is not None else '-'
f = 'ban add {} from {} to {}{} for {} hours reason {}'
text = f.format(ban.id, ip4_to_str(ban.ip4), ip4_end_text, for_board_text, ban.length / 60 / 60 / 1000, ban.reason)
mod_log(text)
return ban
def delete_ban(ban: BanModel):
bans.delete_ban(ban)
def find_ban_id(ban_id) -> BanModel:
return bans.find_by_id(ban_id)
| en | 000529290_alanbato-tchan_ban_service_b987f43ad5b4.py | unknown | 1,215 |
"""
Book: Django RESTful Web Services
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from rest_framework import serializers
from drones.models import DroneCategory
from drones.models import Drone
from drones.models import Pilot
from drones.models import Competition
import drones.views
from django.contrib.auth.models import User
class UserDroneSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Drone
fields = (
'url',
'name')
class UserSerializer(serializers.HyperlinkedModelSerializer):
drones = UserDroneSerializer(
many=True,
read_only=True)
class Meta:
model = User
fields = (
'url',
'pk',
'username',
'drone')
class DroneCategorySerializer(serializers.HyperlinkedModelSerializer):
drones = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='drone-detail')
class Meta:
model = DroneCategory
fields = (
'url',
'pk',
'name',
'drones')
class DroneSerializer(serializers.HyperlinkedModelSerializer):
# Display the category name
drone_category = serializers.SlugRelatedField(queryset=DroneCategory.objects.all(),
slug_field='name')
# Display the owner's username (read-only)
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Drone
fields = (
'url',
'name',
'drone_category',
'owner',
'manufacturing_date',
'has_it_competed',
'inserted_timestamp')
class CompetitionSerializer(serializers.HyperlinkedModelSerializer):
# Display all the details for the related drone
drone = DroneSerializer()
class Meta:
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'drone')
class PilotSerializer(serializers.HyperlinkedModelSerializer):
competitions = CompetitionSerializer(many=True, read_only=True)
gender = serializers.ChoiceField(
choices=Pilot.GENDER_CHOICES)
gender_description = serializers.CharField(
source='get_gender_display',
read_only=True)
class Meta:
model = Pilot
fields = (
'url',
'name',
'gender',
'gender_description',
'races_count',
'inserted_timestamp',
'competitions')
class PilotCompetitionSerializer(serializers.ModelSerializer):
# Display the pilot's name
pilot = serializers.SlugRelatedField(queryset=Pilot.objects.all(), slug_field='name')
# Display the drone's name
drone = serializers.SlugRelatedField(queryset=Drone.objects.all(), slug_field='name')
class Meta:
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'pilot',
'drone')
| """
Book: Django RESTful Web Services
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from rest_framework import serializers
from drones.models import DroneCategory
from drones.models import Drone
from drones.models import Pilot
from drones.models import Competition
import drones.views
from django.contrib.auth.models import User
class UserDroneSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Drone
fields = (
'url',
'name')
class UserSerializer(serializers.HyperlinkedModelSerializer):
drones = UserDroneSerializer(
many=True,
read_only=True)
class Meta:
model = User
fields = (
'url',
'pk',
'username',
'drone')
class DroneCategorySerializer(serializers.HyperlinkedModelSerializer):
drones = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='drone-detail')
class Meta:
model = DroneCategory
fields = (
'url',
'pk',
'name',
'drones')
class DroneSerializer(serializers.HyperlinkedModelSerializer):
# Display the category name
drone_category = serializers.SlugRelatedField(queryset=DroneCategory.objects.all(),
slug_field='name')
# Display the owner's username (read-only)
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Drone
fields = (
'url',
'name',
'drone_category',
'owner',
'manufacturing_date',
'has_it_competed',
'inserted_timestamp')
class CompetitionSerializer(serializers.HyperlinkedModelSerializer):
# Display all the details for the related drone
drone = DroneSerializer()
class Meta:
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'drone')
class PilotSerializer(serializers.HyperlinkedModelSerializer):
competitions = CompetitionSerializer(many=True, read_only=True)
gender = serializers.ChoiceField(
choices=Pilot.GENDER_CHOICES)
gender_description = serializers.CharField(
source='get_gender_display',
read_only=True)
class Meta:
model = Pilot
fields = (
'url',
'name',
'gender',
'gender_description',
'races_count',
'inserted_timestamp',
'competitions')
class PilotCompetitionSerializer(serializers.ModelSerializer):
# Display the pilot's name
pilot = serializers.SlugRelatedField(queryset=Pilot.objects.all(), slug_field='name')
# Display the drone's name
drone = serializers.SlugRelatedField(queryset=Drone.objects.all(), slug_field='name')
class Meta:
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'pilot',
'drone')
| en | 000518984_weiliy-Django-RESTful-Web-Services_serializers_a586f3d41e1d.py | unknown | 832 |
"""
One of Ploomber's main goals is to allow writing robust/reliable code in an
interactive way. Interactive workflows make people more productive but they
might come in detriment of writing high quality code (e.g. developing a
pipeline in a single ipynb file). The basic idea for this module is to provide
a way to transparently go back and forth between a Task in a DAG and a
temporary Jupyter notebook. Currently, we only provide this for PythonCallable
and NotebookRunner but the idea is to expand to other tasks, so we have to
decide on a common behavior for this, here are a few rules:
1) Temporary jupyter notebook are usually destroyed when the user closes the
jupyter applciation. But there are extraordinary cases where we don't want to
remove it, as it might cause code loss. e.g. if the user calls
PythonCallable.develop() and while it is editing the notebook the module where
the source function is defined, we risk corrupting the module file, so we abort
overriding changes but still keep the temporary notebook. For this reason,
we save temporary notebooks in the same location of the source being edited,
to make it easier to recognize which file is related to.
2) The current working directory (cwd) in the session where Task.develop() is
called can be different from the cwd in the Jupyter application. This happens
because Jupyter sets the cwd to the current parent folder, this means that
any relative path defined in the DAG, will break if the cwd in the Jupyter app
is not the same as in the DAg declaration. To fix this, we always add a top
cell in temporary notebooks to make the cwd the same folder where
Task.develop() was called.
3) [TODO] all temporary cells must have a tmp- preffx
TODO: move the logic that implements NotebookRunner.{develop, debug} to this
module
"""
import importlib
from itertools import chain
from pathlib import Path
import inspect
import warnings
import jupyter_client
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
from papermill.translators import PythonTranslator
import parso
import nbformat
from ploomber.util import chdir_code
from ploomber.sources.nb_utils import find_cell_with_tag
from ploomber.static_analysis.python import PythonCallableExtractor
from ploomber.sources.inspect import getfile
# TODO: test for locally defined objects
# TODO: reloading the fn causes trobule if it enters into an inconsistent
# state, e.g. a module that does not exist is saved, next time is realoaded,
# it will fail because it has to import such module
# TODO: if we remove upstream refernces from the functions body from jupyter
# the parameter is deleted from the signature but on reload (dag.render())
# signature validation fails bc it still loads the old signature, two options:
# either force reload all modules from all pythoncallables, or re-implement
# the signature check to get the signature using static analysis, not sure
# which is best
class CallableInteractiveDeveloper:
"""Convert callables to notebooks, edit and save back
Parameters
----------
fn : callable
Function to edit
params : dict
Parameters to call the function
Examples
--------
>>> wih CallableInteractiveDeveloper(fn, {'param': 1}) as path_to_nb:
... # do stuff with the notebook file
... pass
"""
def __init__(self, fn, params):
self.fn = fn
self.path_to_source = Path(inspect.getsourcefile(fn))
self.params = params
self.tmp_path = self.path_to_source.with_name(
self.path_to_source.with_suffix('').name + '-tmp.ipynb')
self._source_code = None
def _reload_fn(self):
# force to reload module to get the right information in case the
# original source code was modified and the function is no longer in
# the same position
# NOTE: are there any problems with this approach?
# we could also read the file directly and use ast/parso to get the
# function's information we need
mod = importlib.reload(inspect.getmodule(self.fn))
self.fn = getattr(mod, self.fn.__name__)
def to_nb(self, path=None):
"""
Converts the function to is notebook representation, Returns a
notebook object, if path is passed, it saves the notebook as well
Returns the function's body in a notebook (tmp location), inserts
params as variables at the top
"""
self._reload_fn()
body_elements, _ = parse_function(self.fn)
top, local, bottom = extract_imports(self.fn)
return function_to_nb(body_elements, top, local, bottom, self.params,
self.fn, path)
def overwrite(self, obj):
"""
Overwrite the function's body with the notebook contents, excluding
injected parameters and cells whose first line is "#". obj can be
either a notebook object or a path
"""
self._reload_fn()
if isinstance(obj, (str, Path)):
nb = nbformat.read(obj, as_version=nbformat.NO_CONVERT)
else:
nb = obj
nb.cells = nb.cells[:last_non_empty_cell(nb.cells)]
# remove cells that are only needed for the nb but not for the function
code_cells = [c['source'] for c in nb.cells if keep_cell(c)]
# add 4 spaces to each code cell, exclude white space lines
code_cells = [indent_cell(code) for code in code_cells]
# get the original file where the function is defined
content = self.path_to_source.read_text()
content_lines = content.splitlines()
trailing_newline = content[-1] == '\n'
# an upstream parameter
fn_starts, fn_ends = function_lines(self.fn)
# keep the file the same until you reach the function definition plus
# an offset to account for the signature (which might span >1 line)
_, body_start = parse_function(self.fn)
keep_until = fn_starts + body_start
header = content_lines[:keep_until]
# the footer is everything below the end of the original definition
footer = content_lines[fn_ends:]
# if there is anything at the end, we have to add an empty line to
# properly end the function definition, if this is the last definition
# in the file, we don't have to add this
if footer:
footer = [''] + footer
new_content = '\n'.join(header + code_cells + footer)
# replace old top imports with new ones
new_content_lines = new_content.splitlines()
_, line = extract_imports_top(parso.parse(new_content),
new_content_lines)
imports_top_cell, _ = find_cell_with_tag(nb, 'imports-top')
# ignore trailing whitespace in top imports cell but keep original
# amount of whitespace separating the last import and the first name
# definition
content_to_write = (imports_top_cell['source'].rstrip() + '\n' +
'\n'.join(new_content_lines[line - 1:]))
# if the original file had a trailing newline, keep it
if trailing_newline:
content_to_write += '\n'
# NOTE: this last part parses the code several times, we can improve
# performance by only parsing once
m = parso.parse(content_to_write)
fn_def = find_function_with_name(m, self.fn.__name__)
fn_code = fn_def.get_code()
has_upstream_dependencies = PythonCallableExtractor(
fn_code).extract_upstream()
upstream_in_func_sig = upstream_in_func_signature(fn_code)
if not upstream_in_func_sig and has_upstream_dependencies:
fn_code_new = add_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
elif upstream_in_func_sig and not has_upstream_dependencies:
fn_code_new = remove_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
self.path_to_source.write_text(content_to_write)
def __enter__(self):
self._source_code = self.path_to_source.read_text()
self.to_nb(path=self.tmp_path)
return str(self.tmp_path)
def __exit__(self, exc_type, exc_val, exc_tb):
current_source_code = self.path_to_source.read_text()
if self._source_code != current_source_code:
raise ValueError(f'File "{self.path_to_source}" (where '
f'callable "{self.fn.__name__}" is defined) '
'changed while editing the function in the '
'notebook app. This might lead to corrupted '
'source files. Changes from the notebook were '
'not saved back to the module. Notebook '
f'available at "{self.tmp_path}')
self.overwrite(self.tmp_path)
Path(self.tmp_path).unlink()
def __del__(self):
tmp = Path(self.tmp_path)
if tmp.exists():
tmp.unlink()
def last_non_empty_cell(cells):
"""Returns the index + 1 for the last non-empty cell
"""
idx = len(cells)
for cell in cells[::-1]:
if cell.source:
return idx
idx -= 1
return idx
def keep_cell(cell):
"""
Rule to decide whether to keep a cell or not. This is executed before
converting the notebook back to a function
"""
cell_tags = set(cell['metadata'].get('tags', {}))
# remove cell with this tag, they are not part of the function body
tags_to_remove = {
'injected-parameters',
'imports-top',
'imports-local',
'imports-bottom',
'debugging-settings',
}
has_tags_to_remove = len(cell_tags & tags_to_remove)
return (cell['cell_type'] == 'code' and not has_tags_to_remove
and cell['source'][:2] != '#\n')
def indent_line(lline):
return ' ' + lline if lline else ''
def indent_cell(code):
return '\n'.join([indent_line(line) for line in code.splitlines()])
def body_elements_from_source(source):
# getsource adds a new line at the end of the the function, we don't need
# this
body = parso.parse(source).children[0].children[-1]
# parso is adding a new line as first element, not sure if this
# happens always though
if isinstance(body.children[0], parso.python.tree.Newline):
body_elements = body.children[1:]
else:
body_elements = body.children
return body_elements, body.start_pos[0] - 1
def parse_function(fn):
"""
Extract function's source code, parse it and return function body
elements along with the # of the last line for the signature (which
marks the beginning of the function's body) and all the imports
"""
# TODO: exclude return at the end, what if we find more than one?
# maybe do not support functions with return statements for now
source = inspect.getsource(fn).rstrip()
body_elements, start_pos = body_elements_from_source(source)
return body_elements, start_pos
def extract_imports(fn):
source = Path(getfile(fn)).read_text()
module = parso.parse(source)
lines = source.splitlines()
imports_top, line = extract_imports_top(module, lines)
# any imports below the top imports
lines_bottom = '\n'.join(lines[line - 1:])
imports_bottom = '\n'.join(
imp.get_code() for imp in parso.parse(lines_bottom).iter_imports())
# generate imports from local definitions
imports_local = make_import_from_definitions(module, fn)
return (
imports_top,
imports_local,
imports_bottom if imports_bottom else None,
)
def extract_imports_top(module, lines):
ch = module.children[0]
while True:
if ch:
if not has_import(ch):
break
else:
break
ch = ch.get_next_sibling()
line, _ = ch.start_pos
# line numbers start at 1...
imports_top = '\n'.join(lines[:line - 1])
new_lines = trailing_newlines(imports_top)
return imports_top[:-new_lines], line - new_lines
def has_import(stmt):
"""
Check if statement contains an import
"""
for ch in stmt.children:
if ch.type in {'import_name', 'import_from'}:
return True
return False
def trailing_newlines(s):
n = 0
for char in reversed(s):
if char != '\n':
break
n += 1
return n
def function_lines(fn):
lines, start = inspect.getsourcelines(fn)
end = start + len(lines)
return start, end
def get_func_and_class_names(module):
return [
defs.name.get_code().strip()
for defs in chain(module.iter_funcdefs(), module.iter_classdefs())
]
def make_import_from_definitions(module, fn):
module_name = inspect.getmodule(fn).__name__
names = [
name for name in get_func_and_class_names(module)
if name != fn.__name__
]
if names:
names_all = ', '.join(names)
return f'from {module_name} import {names_all}'
def function_to_nb(body_elements, imports_top, imports_local, imports_bottom,
params, fn, path):
"""
Save function body elements to a notebook
"""
# TODO: Params should implement an option to call to_json_serializable
# on product to avoid repetition I'm using this same code in notebook
# runner. Also raise error if any of the params is not
# json serializable
try:
params = params.to_json_serializable()
params['product'] = params['product'].to_json_serializable()
except AttributeError:
pass
nb_format = nbformat.versions[nbformat.current_nbformat]
nb = nb_format.new_notebook()
# get the module where the function is declared
tokens = inspect.getmodule(fn).__name__.split('.')
module_name = '.'.join(tokens[:-1])
# add cell that chdirs for the current working directory
# add __package__, we need this for relative imports to work
# see: https://www.python.org/dev/peps/pep-0366/ for details
source = """
# Debugging settings (this cell will be removed before saving)
# change the current working directory to the one when .debug() happen
# to make relative paths work
import os
{}
__package__ = "{}"
""".format(chdir_code(Path('.').resolve()), module_name)
cell = nb_format.new_code_cell(source,
metadata={'tags': ['debugging-settings']})
nb.cells.append(cell)
# then add params passed to the function
cell = nb_format.new_code_cell(PythonTranslator.codify(params),
metadata={'tags': ['injected-parameters']})
nb.cells.append(cell)
# first three cells: imports
for code, tag in ((imports_top, 'imports-top'),
(imports_local, 'imports-local'), (imports_bottom,
'imports-bottom')):
if code:
nb.cells.append(
nb_format.new_code_cell(source=code,
metadata=dict(tags=[tag])))
for statement in body_elements:
lines, newlines = split_statement(statement)
# find indentation # of characters using the first line
idx = indentation_idx(lines[0])
# remove indentation from all function body lines
lines = [line[idx:] for line in lines]
# add one empty cell per leading new line
nb.cells.extend(
[nb_format.new_code_cell(source='') for _ in range(newlines)])
# add actual code as a single string
cell = nb_format.new_code_cell(source='\n'.join(lines))
nb.cells.append(cell)
k = jupyter_client.kernelspec.get_kernel_spec('python3')
nb.metadata.kernelspec = {
"display_name": k.display_name,
"language": k.language,
"name": 'python3'
}
if path:
nbformat.write(nb, path)
return nb
def split_statement(statement):
code = statement.get_code()
newlines = 0
for char in code:
if char != '\n':
break
newlines += 1
lines = code.strip('\n').split('\n')
return lines, newlines
def indentation_idx(line):
idx = len(line) - len(line.lstrip())
return idx
def upstream_in_func_signature(source):
_, params = _get_func_def_and_params(source)
return 'upstream' in set(p.name.get_code().strip() for p in params
if p.type == 'param')
def add_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
# add a "," if there is at least one param
params.insert(-1, ', upstream' if len(params) > 2 else 'upstream')
signature = try_get_code(params)
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def remove_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
params_names = (p.get_code().strip(', ') for p in params[1:-1])
params_list = ', '.join(p for p in params_names if p != 'upstream')
signature = f'({params_list})'
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def _get_func_def_and_params(source):
fn = parso.parse(source).children[0]
if fn.type != 'funcdef':
raise ValueError('Expected first element from parse source'
f' code to be "funcdef", got {fn.type!r}')
return fn, fn.children[2].children
def _replace_fn_source(content_to_write, fn_def, fn_code_new):
line_from, line_to = fn_def.start_pos[0], fn_def.end_pos[0]
lines = content_to_write.splitlines()
lines_new = (lines[:line_from - 1] + [fn_code_new] + lines[line_to - 1:])
return '\n'.join(lines_new)
def try_get_code(elements):
code = []
for p in elements:
try:
s = p.get_code()
except AttributeError:
s = p
code.append(s)
return ''.join(code)
def find_function_with_name(module, fn_name):
for fn_def in module.iter_funcdefs():
if fn_def.name.get_code().strip() == fn_name:
return fn_def
| """
One of Ploomber's main goals is to allow writing robust/reliable code in an
interactive way. Interactive workflows make people more productive but they
might come in detriment of writing high quality code (e.g. developing a
pipeline in a single ipynb file). The basic idea for this module is to provide
a way to transparently go back and forth between a Task in a DAG and a
temporary Jupyter notebook. Currently, we only provide this for PythonCallable
and NotebookRunner but the idea is to expand to other tasks, so we have to
decide on a common behavior for this, here are a few rules:
1) Temporary jupyter notebook are usually destroyed when the user closes the
jupyter applciation. But there are extraordinary cases where we don't want to
remove it, as it might cause code loss. e.g. if the user calls
PythonCallable.develop() and while it is editing the notebook the module where
the source function is defined, we risk corrupting the module file, so we abort
overriding changes but still keep the temporary notebook. For this reason,
we save temporary notebooks in the same location of the source being edited,
to make it easier to recognize which file is related to.
2) The current working directory (cwd) in the session where Task.develop() is
called can be different from the cwd in the Jupyter application. This happens
because Jupyter sets the cwd to the current parent folder, this means that
any relative path defined in the DAG, will break if the cwd in the Jupyter app
is not the same as in the DAg declaration. To fix this, we always add a top
cell in temporary notebooks to make the cwd the same folder where
Task.develop() was called.
3) [TODO] all temporary cells must have a tmp- preffx
TODO: move the logic that implements NotebookRunner.{develop, debug} to this
module
"""
import importlib
from itertools import chain
from pathlib import Path
import inspect
import warnings
import jupyter_client
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
from papermill.translators import PythonTranslator
import parso
import nbformat
from ploomber.util import chdir_code
from ploomber.sources.nb_utils import find_cell_with_tag
from ploomber.static_analysis.python import PythonCallableExtractor
from ploomber.sources.inspect import getfile
# TODO: test for locally defined objects
# TODO: reloading the fn causes trobule if it enters into an inconsistent
# state, e.g. a module that does not exist is saved, next time is realoaded,
# it will fail because it has to import such module
# TODO: if we remove upstream refernces from the functions body from jupyter
# the parameter is deleted from the signature but on reload (dag.render())
# signature validation fails bc it still loads the old signature, two options:
# either force reload all modules from all pythoncallables, or re-implement
# the signature check to get the signature using static analysis, not sure
# which is best
class CallableInteractiveDeveloper:
"""Convert callables to notebooks, edit and save back
Parameters
----------
fn : callable
Function to edit
params : dict
Parameters to call the function
Examples
--------
>>> wih CallableInteractiveDeveloper(fn, {'param': 1}) as path_to_nb:
... # do stuff with the notebook file
... pass
"""
def __init__(self, fn, params):
self.fn = fn
self.path_to_source = Path(inspect.getsourcefile(fn))
self.params = params
self.tmp_path = self.path_to_source.with_name(
self.path_to_source.with_suffix('').name + '-tmp.ipynb')
self._source_code = None
def _reload_fn(self):
# force to reload module to get the right information in case the
# original source code was modified and the function is no longer in
# the same position
# NOTE: are there any problems with this approach?
# we could also read the file directly and use ast/parso to get the
# function's information we need
mod = importlib.reload(inspect.getmodule(self.fn))
self.fn = getattr(mod, self.fn.__name__)
def to_nb(self, path=None):
"""
Converts the function to is notebook representation, Returns a
notebook object, if path is passed, it saves the notebook as well
Returns the function's body in a notebook (tmp location), inserts
params as variables at the top
"""
self._reload_fn()
body_elements, _ = parse_function(self.fn)
top, local, bottom = extract_imports(self.fn)
return function_to_nb(body_elements, top, local, bottom, self.params,
self.fn, path)
def overwrite(self, obj):
"""
Overwrite the function's body with the notebook contents, excluding
injected parameters and cells whose first line is "#". obj can be
either a notebook object or a path
"""
self._reload_fn()
if isinstance(obj, (str, Path)):
nb = nbformat.read(obj, as_version=nbformat.NO_CONVERT)
else:
nb = obj
nb.cells = nb.cells[:last_non_empty_cell(nb.cells)]
# remove cells that are only needed for the nb but not for the function
code_cells = [c['source'] for c in nb.cells if keep_cell(c)]
# add 4 spaces to each code cell, exclude white space lines
code_cells = [indent_cell(code) for code in code_cells]
# get the original file where the function is defined
content = self.path_to_source.read_text()
content_lines = content.splitlines()
trailing_newline = content[-1] == '\n'
# an upstream parameter
fn_starts, fn_ends = function_lines(self.fn)
# keep the file the same until you reach the function definition plus
# an offset to account for the signature (which might span >1 line)
_, body_start = parse_function(self.fn)
keep_until = fn_starts + body_start
header = content_lines[:keep_until]
# the footer is everything below the end of the original definition
footer = content_lines[fn_ends:]
# if there is anything at the end, we have to add an empty line to
# properly end the function definition, if this is the last definition
# in the file, we don't have to add this
if footer:
footer = [''] + footer
new_content = '\n'.join(header + code_cells + footer)
# replace old top imports with new ones
new_content_lines = new_content.splitlines()
_, line = extract_imports_top(parso.parse(new_content),
new_content_lines)
imports_top_cell, _ = find_cell_with_tag(nb, 'imports-top')
# ignore trailing whitespace in top imports cell but keep original
# amount of whitespace separating the last import and the first name
# definition
content_to_write = (imports_top_cell['source'].rstrip() + '\n' +
'\n'.join(new_content_lines[line - 1:]))
# if the original file had a trailing newline, keep it
if trailing_newline:
content_to_write += '\n'
# NOTE: this last part parses the code several times, we can improve
# performance by only parsing once
m = parso.parse(content_to_write)
fn_def = find_function_with_name(m, self.fn.__name__)
fn_code = fn_def.get_code()
has_upstream_dependencies = PythonCallableExtractor(
fn_code).extract_upstream()
upstream_in_func_sig = upstream_in_func_signature(fn_code)
if not upstream_in_func_sig and has_upstream_dependencies:
fn_code_new = add_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
elif upstream_in_func_sig and not has_upstream_dependencies:
fn_code_new = remove_upstream_to_func_signature(fn_code)
content_to_write = _replace_fn_source(content_to_write, fn_def,
fn_code_new)
self.path_to_source.write_text(content_to_write)
def __enter__(self):
self._source_code = self.path_to_source.read_text()
self.to_nb(path=self.tmp_path)
return str(self.tmp_path)
def __exit__(self, exc_type, exc_val, exc_tb):
current_source_code = self.path_to_source.read_text()
if self._source_code != current_source_code:
raise ValueError(f'File "{self.path_to_source}" (where '
f'callable "{self.fn.__name__}" is defined) '
'changed while editing the function in the '
'notebook app. This might lead to corrupted '
'source files. Changes from the notebook were '
'not saved back to the module. Notebook '
f'available at "{self.tmp_path}')
self.overwrite(self.tmp_path)
Path(self.tmp_path).unlink()
def __del__(self):
tmp = Path(self.tmp_path)
if tmp.exists():
tmp.unlink()
def last_non_empty_cell(cells):
"""Returns the index + 1 for the last non-empty cell
"""
idx = len(cells)
for cell in cells[::-1]:
if cell.source:
return idx
idx -= 1
return idx
def keep_cell(cell):
"""
Rule to decide whether to keep a cell or not. This is executed before
converting the notebook back to a function
"""
cell_tags = set(cell['metadata'].get('tags', {}))
# remove cell with this tag, they are not part of the function body
tags_to_remove = {
'injected-parameters',
'imports-top',
'imports-local',
'imports-bottom',
'debugging-settings',
}
has_tags_to_remove = len(cell_tags & tags_to_remove)
return (cell['cell_type'] == 'code' and not has_tags_to_remove
and cell['source'][:2] != '#\n')
def indent_line(lline):
return ' ' + lline if lline else ''
def indent_cell(code):
return '\n'.join([indent_line(line) for line in code.splitlines()])
def body_elements_from_source(source):
# getsource adds a new line at the end of the the function, we don't need
# this
body = parso.parse(source).children[0].children[-1]
# parso is adding a new line as first element, not sure if this
# happens always though
if isinstance(body.children[0], parso.python.tree.Newline):
body_elements = body.children[1:]
else:
body_elements = body.children
return body_elements, body.start_pos[0] - 1
def parse_function(fn):
"""
Extract function's source code, parse it and return function body
elements along with the # of the last line for the signature (which
marks the beginning of the function's body) and all the imports
"""
# TODO: exclude return at the end, what if we find more than one?
# maybe do not support functions with return statements for now
source = inspect.getsource(fn).rstrip()
body_elements, start_pos = body_elements_from_source(source)
return body_elements, start_pos
def extract_imports(fn):
source = Path(getfile(fn)).read_text()
module = parso.parse(source)
lines = source.splitlines()
imports_top, line = extract_imports_top(module, lines)
# any imports below the top imports
lines_bottom = '\n'.join(lines[line - 1:])
imports_bottom = '\n'.join(
imp.get_code() for imp in parso.parse(lines_bottom).iter_imports())
# generate imports from local definitions
imports_local = make_import_from_definitions(module, fn)
return (
imports_top,
imports_local,
imports_bottom if imports_bottom else None,
)
def extract_imports_top(module, lines):
ch = module.children[0]
while True:
if ch:
if not has_import(ch):
break
else:
break
ch = ch.get_next_sibling()
line, _ = ch.start_pos
# line numbers start at 1...
imports_top = '\n'.join(lines[:line - 1])
new_lines = trailing_newlines(imports_top)
return imports_top[:-new_lines], line - new_lines
def has_import(stmt):
"""
Check if statement contains an import
"""
for ch in stmt.children:
if ch.type in {'import_name', 'import_from'}:
return True
return False
def trailing_newlines(s):
n = 0
for char in reversed(s):
if char != '\n':
break
n += 1
return n
def function_lines(fn):
lines, start = inspect.getsourcelines(fn)
end = start + len(lines)
return start, end
def get_func_and_class_names(module):
return [
defs.name.get_code().strip()
for defs in chain(module.iter_funcdefs(), module.iter_classdefs())
]
def make_import_from_definitions(module, fn):
module_name = inspect.getmodule(fn).__name__
names = [
name for name in get_func_and_class_names(module)
if name != fn.__name__
]
if names:
names_all = ', '.join(names)
return f'from {module_name} import {names_all}'
def function_to_nb(body_elements, imports_top, imports_local, imports_bottom,
params, fn, path):
"""
Save function body elements to a notebook
"""
# TODO: Params should implement an option to call to_json_serializable
# on product to avoid repetition I'm using this same code in notebook
# runner. Also raise error if any of the params is not
# json serializable
try:
params = params.to_json_serializable()
params['product'] = params['product'].to_json_serializable()
except AttributeError:
pass
nb_format = nbformat.versions[nbformat.current_nbformat]
nb = nb_format.new_notebook()
# get the module where the function is declared
tokens = inspect.getmodule(fn).__name__.split('.')
module_name = '.'.join(tokens[:-1])
# add cell that chdirs for the current working directory
# add __package__, we need this for relative imports to work
# see: https://www.python.org/dev/peps/pep-0366/ for details
source = """
# Debugging settings (this cell will be removed before saving)
# change the current working directory to the one when .debug() happen
# to make relative paths work
import os
{}
__package__ = "{}"
""".format(chdir_code(Path('.').resolve()), module_name)
cell = nb_format.new_code_cell(source,
metadata={'tags': ['debugging-settings']})
nb.cells.append(cell)
# then add params passed to the function
cell = nb_format.new_code_cell(PythonTranslator.codify(params),
metadata={'tags': ['injected-parameters']})
nb.cells.append(cell)
# first three cells: imports
for code, tag in ((imports_top, 'imports-top'),
(imports_local, 'imports-local'), (imports_bottom,
'imports-bottom')):
if code:
nb.cells.append(
nb_format.new_code_cell(source=code,
metadata=dict(tags=[tag])))
for statement in body_elements:
lines, newlines = split_statement(statement)
# find indentation # of characters using the first line
idx = indentation_idx(lines[0])
# remove indentation from all function body lines
lines = [line[idx:] for line in lines]
# add one empty cell per leading new line
nb.cells.extend(
[nb_format.new_code_cell(source='') for _ in range(newlines)])
# add actual code as a single string
cell = nb_format.new_code_cell(source='\n'.join(lines))
nb.cells.append(cell)
k = jupyter_client.kernelspec.get_kernel_spec('python3')
nb.metadata.kernelspec = {
"display_name": k.display_name,
"language": k.language,
"name": 'python3'
}
if path:
nbformat.write(nb, path)
return nb
def split_statement(statement):
code = statement.get_code()
newlines = 0
for char in code:
if char != '\n':
break
newlines += 1
lines = code.strip('\n').split('\n')
return lines, newlines
def indentation_idx(line):
idx = len(line) - len(line.lstrip())
return idx
def upstream_in_func_signature(source):
_, params = _get_func_def_and_params(source)
return 'upstream' in set(p.name.get_code().strip() for p in params
if p.type == 'param')
def add_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
# add a "," if there is at least one param
params.insert(-1, ', upstream' if len(params) > 2 else 'upstream')
signature = try_get_code(params)
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def remove_upstream_to_func_signature(source):
fn, params = _get_func_def_and_params(source)
params_names = (p.get_code().strip(', ') for p in params[1:-1])
params_list = ', '.join(p for p in params_names if p != 'upstream')
signature = f'({params_list})'
fn.children[2] = signature
# delete leading newline code, to avoid duplicating it
return try_get_code(fn.children).lstrip('\n')
def _get_func_def_and_params(source):
fn = parso.parse(source).children[0]
if fn.type != 'funcdef':
raise ValueError('Expected first element from parse source'
f' code to be "funcdef", got {fn.type!r}')
return fn, fn.children[2].children
def _replace_fn_source(content_to_write, fn_def, fn_code_new):
line_from, line_to = fn_def.start_pos[0], fn_def.end_pos[0]
lines = content_to_write.splitlines()
lines_new = (lines[:line_from - 1] + [fn_code_new] + lines[line_to - 1:])
return '\n'.join(lines_new)
def try_get_code(elements):
code = []
for p in elements:
try:
s = p.get_code()
except AttributeError:
s = p
code.append(s)
return ''.join(code)
def find_function_with_name(module, fn_name):
for fn_def in module.iter_funcdefs():
if fn_def.name.get_code().strip() == fn_name:
return fn_def
| en | 000135484_MarcoJHB-ploomber_interact_8567958710c9.py | unknown | 5,196 |
import os
from pathlib import Path
from appdirs import user_data_dir
class EnvManager:
"""Stashes environment variables in a file and
retrieves them in (a different process) with get_environ
with failover to os.environ
"""
app_env_dir = Path(user_data_dir("NEBULO"))
app_env = app_env_dir / ".env"
def __init__(self, **env_vars):
# Delete if exists
try:
os.remove(self.app_env)
except OSError:
pass
self.app_env_dir.mkdir(parents=True, exist_ok=True)
self.app_env.touch()
self.vars = env_vars
def __enter__(self):
with self.app_env.open("w") as env_file:
for key, val in self.vars.items():
if val is not None:
env_file.write(f"{key}={val}\n")
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
os.remove(self.app_env)
except OSError:
pass
@classmethod
def get_environ(cls):
try:
with cls.app_env.open("r") as f:
for row in f:
key, value = row.split("=", 1)
os.environ[key.strip()] = value.strip()
except FileNotFoundError:
pass
return os.environ
| import os
from pathlib import Path
from appdirs import user_data_dir
class EnvManager:
"""Stashes environment variables in a file and
retrieves them in (a different process) with get_environ
with failover to os.environ
"""
app_env_dir = Path(user_data_dir("NEBULO"))
app_env = app_env_dir / ".env"
def __init__(self, **env_vars):
# Delete if exists
try:
os.remove(self.app_env)
except OSError:
pass
self.app_env_dir.mkdir(parents=True, exist_ok=True)
self.app_env.touch()
self.vars = env_vars
def __enter__(self):
with self.app_env.open("w") as env_file:
for key, val in self.vars.items():
if val is not None:
env_file.write(f"{key}={val}\n")
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
os.remove(self.app_env)
except OSError:
pass
@classmethod
def get_environ(cls):
try:
with cls.app_env.open("r") as f:
for row in f:
key, value = row.split("=", 1)
os.environ[key.strip()] = value.strip()
except FileNotFoundError:
pass
return os.environ
| en | 000103269_olirice-nebulo_env_c4f36f4a17b9.py | unknown | 387 |
#-*- coding:utf-8 -*-
import keras
import tensorflow as tf
from keras.layers import *
from keras.activations import softmax
from keras.models import Model
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.utils import multi_gpu_model
from encoder import EncoderBase
#refer:https://arxiv.org/abs/1609.06038
class ESIM(EncoderBase):
def __init__(self, **kwargs):
super(ESIM, self).__init__(**kwargs)
self.embedding_size = kwargs['embedding_size']
self.recurrent_units = 300
self.dense_units = 300
def update_features(self, features):
pass
def __call__(self, x_query, x_sample, reuse = tf.AUTO_REUSE, **kwargs):
#embedding_sequence_q1 = BatchNormalization(axis=2)(x_query)
#embedding_sequence_q2 = BatchNormalization(axis=2)(x_sample)
#final_embedding_sequence_q1 = SpatialDropout1D(0.25)(embedding_sequence_q1)
#final_embedding_sequence_q2 = SpatialDropout1D(0.25)(embedding_sequence_q2)
#################### 输入编码input encoding #######################
#分别对query和sample进行双向编码
rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_query)
rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_sample)
#rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q1)
#rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q2)
############## 局部推理local inference modeling ###################
#计算dot attention
attention = Dot(axes=-1)([rnn_layer_q1, rnn_layer_q2])
#分别计算query和sample进行attention后的结果
w_attn_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
w_attn_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
align_layer_1 = Dot(axes=1)([w_attn_1, rnn_layer_q1])
align_layer_2 = Dot(axes=1)([w_attn_2, rnn_layer_q2])
############# 推理组合Inference Composition #######################
subtract_layer_1 = subtract([rnn_layer_q1, align_layer_1])
subtract_layer_2 = subtract([rnn_layer_q2, align_layer_2])
multiply_layer_1 = multiply([rnn_layer_q1, align_layer_1])
multiply_layer_2 = multiply([rnn_layer_q2, align_layer_2])
m_q1 = concatenate([rnn_layer_q1, align_layer_1, subtract_layer_1, multiply_layer_1])
m_q2 = concatenate([rnn_layer_q2, align_layer_2, subtract_layer_2, multiply_layer_2])
############### 编码+池化 #######################
v_q1_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q1)
v_q2_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q2)
avgpool_q1 = GlobalAveragePooling1D()(v_q1_i)
avgpool_q2 = GlobalAveragePooling1D()(v_q2_i)
maxpool_q1 = GlobalMaxPooling1D()(v_q1_i)
maxpool_q2 = GlobalMaxPooling1D()(v_q2_i)
merged_q1 = concatenate([avgpool_q1, maxpool_q1])
merged_q2 = concatenate([avgpool_q2, maxpool_q2])
final_v = BatchNormalization()(concatenate([merged_q1, merged_q2]))
#output = Dense(units=self.dense_units, activation='relu')(final_v)
output = Dense(units=self.num_output, activation=None)(final_v)
#output = BatchNormalization()(output)
#output = Dropout(self.dropout_rate)(output)
#output = tf.nn.dropout(output, self.keep_prob)
#高级api tf.layer.dropout 与 keras的Dropout都使用dropout
#tf.nn.dropout使用keep_prob
#output = Dense(units=self.num_output, activation='sigmoid')(output)
#output = Dense(units=self.num_output, activation=None)(output)
#output = tf.squeeze(output, -1)
return output
| #-*- coding:utf-8 -*-
import keras
import tensorflow as tf
from keras.layers import *
from keras.activations import softmax
from keras.models import Model
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.utils import multi_gpu_model
from encoder import EncoderBase
#refer:https://arxiv.org/abs/1609.06038
class ESIM(EncoderBase):
def __init__(self, **kwargs):
super(ESIM, self).__init__(**kwargs)
self.embedding_size = kwargs['embedding_size']
self.recurrent_units = 300
self.dense_units = 300
def update_features(self, features):
pass
def __call__(self, x_query, x_sample, reuse = tf.AUTO_REUSE, **kwargs):
#embedding_sequence_q1 = BatchNormalization(axis=2)(x_query)
#embedding_sequence_q2 = BatchNormalization(axis=2)(x_sample)
#final_embedding_sequence_q1 = SpatialDropout1D(0.25)(embedding_sequence_q1)
#final_embedding_sequence_q2 = SpatialDropout1D(0.25)(embedding_sequence_q2)
#################### 输入编码input encoding #######################
#分别对query和sample进行双向编码
rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_query)
rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_sample)
#rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q1)
#rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q2)
############## 局部推理local inference modeling ###################
#计算dot attention
attention = Dot(axes=-1)([rnn_layer_q1, rnn_layer_q2])
#分别计算query和sample进行attention后的结果
w_attn_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
w_attn_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
align_layer_1 = Dot(axes=1)([w_attn_1, rnn_layer_q1])
align_layer_2 = Dot(axes=1)([w_attn_2, rnn_layer_q2])
############# 推理组合Inference Composition #######################
subtract_layer_1 = subtract([rnn_layer_q1, align_layer_1])
subtract_layer_2 = subtract([rnn_layer_q2, align_layer_2])
multiply_layer_1 = multiply([rnn_layer_q1, align_layer_1])
multiply_layer_2 = multiply([rnn_layer_q2, align_layer_2])
m_q1 = concatenate([rnn_layer_q1, align_layer_1, subtract_layer_1, multiply_layer_1])
m_q2 = concatenate([rnn_layer_q2, align_layer_2, subtract_layer_2, multiply_layer_2])
############### 编码+池化 #######################
v_q1_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q1)
v_q2_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q2)
avgpool_q1 = GlobalAveragePooling1D()(v_q1_i)
avgpool_q2 = GlobalAveragePooling1D()(v_q2_i)
maxpool_q1 = GlobalMaxPooling1D()(v_q1_i)
maxpool_q2 = GlobalMaxPooling1D()(v_q2_i)
merged_q1 = concatenate([avgpool_q1, maxpool_q1])
merged_q2 = concatenate([avgpool_q2, maxpool_q2])
final_v = BatchNormalization()(concatenate([merged_q1, merged_q2]))
#output = Dense(units=self.dense_units, activation='relu')(final_v)
output = Dense(units=self.num_output, activation=None)(final_v)
#output = BatchNormalization()(output)
#output = Dropout(self.dropout_rate)(output)
#output = tf.nn.dropout(output, self.keep_prob)
#高级api tf.layer.dropout 与 keras的Dropout都使用dropout
#tf.nn.dropout使用keep_prob
#output = Dense(units=self.num_output, activation='sigmoid')(output)
#output = Dense(units=self.num_output, activation=None)(output)
#output = tf.squeeze(output, -1)
return output
| en | 000090582_zhufz-nlp_research_esim_f05bea23dfa6.py | unknown | 1,262 |
import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.apt_helpers.apt_warning import apt_warning
from cave.utils.exceptions import Deactivated
class APTOverview(BaseAnalyzer):
"""
Overview of AutoPyTorch-Specific Configurations
"""
def __init__(self, runscontainer):
super().__init__(runscontainer)
self.output_dir = runscontainer.output_dir
if self.runscontainer.file_format != "APT":
raise Deactivated("{} deactivated, only designed for file-format APT (but detected {})".format(
self.get_name(), self.runscontainer.file_format
))
apt_warning(self.logger)
html_table = self.run()
self.result["General"] = {"table": html_table,
"tooltip": "AutoPyTorch configuration."}
def get_name(self):
return "Auto-PyTorch Overview"
def run(self):
""" Generate tables. """
# Run-specific / budget specific infos
runs = self.runscontainer.get_aggregated(keep_folders=True, keep_budgets=False)
apt_config_dict = self._runspec_dict_apt_config(runs)
results_fit_dict = self._runspec_dict_results_fit(runs)
for k, runspec_dict in [("Auto-PyTorch Configuration", apt_config_dict),
("Results of the fit()-call", results_fit_dict)]:
order_spec = list(list(runspec_dict.values())[0].keys()) # Get keys of any sub-dict for order
html_table_specific = DataFrame(runspec_dict)
html_table_specific = html_table_specific.reindex(order_spec)
html_table_specific = html_table_specific.to_html(escape=False, justify='left')
self.result[k] = {"table": html_table_specific}
def _runspec_dict_results_fit(self, runs):
runspec = OrderedDict()
for idx, run in enumerate(runs):
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder)
runspec[name] = OrderedDict()
for k, v in run.share_information['results_fit']['info'].items():
runspec[name]["Info: " + str(k)] = v
for k, v in run.share_information['results_fit']['optimized_hyperparameter_config'].items():
runspec[name]["Parameter: " + str(k)] = v
runspec[name]["Budget"] = run.share_information['results_fit']['budget']
runspec[name]["Loss"] = run.share_information['results_fit']['loss']
return runspec
def _runspec_dict_apt_config(self, runs):
runspec = OrderedDict()
for idx, run in enumerate(runs):
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder)
runspec[name] = OrderedDict()
for k, v in run.share_information['apt_config'].items():
runspec[name][k] = v
return runspec | import os
from collections import OrderedDict
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.apt_helpers.apt_warning import apt_warning
from cave.utils.exceptions import Deactivated
class APTOverview(BaseAnalyzer):
"""
Overview of AutoPyTorch-Specific Configurations
"""
def __init__(self, runscontainer):
super().__init__(runscontainer)
self.output_dir = runscontainer.output_dir
if self.runscontainer.file_format != "APT":
raise Deactivated("{} deactivated, only designed for file-format APT (but detected {})".format(
self.get_name(), self.runscontainer.file_format
))
apt_warning(self.logger)
html_table = self.run()
self.result["General"] = {"table": html_table,
"tooltip": "AutoPyTorch configuration."}
def get_name(self):
return "Auto-PyTorch Overview"
def run(self):
""" Generate tables. """
# Run-specific / budget specific infos
runs = self.runscontainer.get_aggregated(keep_folders=True, keep_budgets=False)
apt_config_dict = self._runspec_dict_apt_config(runs)
results_fit_dict = self._runspec_dict_results_fit(runs)
for k, runspec_dict in [("Auto-PyTorch Configuration", apt_config_dict),
("Results of the fit()-call", results_fit_dict)]:
order_spec = list(list(runspec_dict.values())[0].keys()) # Get keys of any sub-dict for order
html_table_specific = DataFrame(runspec_dict)
html_table_specific = html_table_specific.reindex(order_spec)
html_table_specific = html_table_specific.to_html(escape=False, justify='left')
self.result[k] = {"table": html_table_specific}
def _runspec_dict_results_fit(self, runs):
runspec = OrderedDict()
for idx, run in enumerate(runs):
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder)
runspec[name] = OrderedDict()
for k, v in run.share_information['results_fit']['info'].items():
runspec[name]["Info: " + str(k)] = v
for k, v in run.share_information['results_fit']['optimized_hyperparameter_config'].items():
runspec[name]["Parameter: " + str(k)] = v
runspec[name]["Budget"] = run.share_information['results_fit']['budget']
runspec[name]["Loss"] = run.share_information['results_fit']['loss']
return runspec
def _runspec_dict_apt_config(self, runs):
runspec = OrderedDict()
for idx, run in enumerate(runs):
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder)
runspec[name] = OrderedDict()
for k, v in run.share_information['apt_config'].items():
runspec[name][k] = v
return runspec | en | 000319754_deslay1-CAVE_apt_overview_2c742a4a24f3.py | unknown | 874 |
# -*- coding: utf-8 -*-
# Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Splits the specified apk file into each ABIs.
"Fat APK" contains multipul shared objects in order to run on all the ABIs.
But this means such APK is larger than "Thin" APK.
This script creates Thin APKs from Fat APK.
Version code format:
00000BBBBB:
B: Build number
or
0005BBBBBA
A: ABI (0: Fat, 5: x86, 4: armeabi-v7a, 3: armeabi, 1:mips)
B: Build number
Note:
- This process must be done before signing.
- Prefix 5 is introduced because of historical reason.
Previously Build Number (B) is placed after ABI (A) but
it's found that swpping the order is reasonable.
Previously version code for x86 is always greater than that for armeabi.
Therefore version-check rule like "Version code of update must be greater
than that of previous" cannot be introduced.
"""
__author__ = "matsuzakit"
import cStringIO
import logging
import optparse
import os
import re
import shutil
import tempfile
import zipfile
from build_tools import android_binary_xml
_UNSIGNED_APK_SUFFIX = '-unsigned.apk'
class Error(Exception):
"""Base exception class."""
class UnexpectedFormatError(Error):
pass
class IllegalArgumentError(Error):
pass
def ParseArgs():
parser = optparse.OptionParser()
parser.add_option('--dir', dest='bin_dir',
help='Binary directory. Files of which name ends with '
'"-unsigned.apk" are processed.')
options = parser.parse_args()[0]
if not options.bin_dir:
raise IllegalArgumentError('--dir is mandatory')
return options
# TODO(matsuzakit): Make zip relating logics independent
# from file-based operations.
# Currently they are file-based for reuseabilty.
# But file-based design is not good from the view points of
# performance and testability
def DeleteEntriesFromZip(zip_path, delete_file_names):
"""Deletes entries from zip file.
Args:
zip_path: Path to zip file.
delete_file_names: File names in archive to be deleted.
"""
logging.info('Deleting %s from %s', delete_file_names, zip_path)
tmp_file = cStringIO.StringIO()
in_zip_file = zipfile.ZipFile(zip_path)
try:
out_zip_file = zipfile.ZipFile(tmp_file, 'w')
try:
for zipinfo in in_zip_file.infolist():
if zipinfo.filename not in delete_file_names:
# Reusing zipinfo as 1st argument is mandatory
# because compression_type must be kept.
out_zip_file.writestr(zipinfo,
in_zip_file.read(zipinfo.filename))
finally:
out_zip_file.close()
finally:
in_zip_file.close()
with open(zip_path, 'w') as out_file:
out_file.write(tmp_file.getvalue())
def ReplaceFilesInZip(zip_path, base_directory, file_names,
compress_type=zipfile.ZIP_DEFLATED):
"""Replaces files in zip file with given file_names.
If no corresponding entries in zip file, simply appended.
Args:
zip_path: Path to zip file.
base_directory: Base direcotry of file_names.
file_names: File names to be appended.
compress_type: zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED.
"""
DeleteEntriesFromZip(zip_path, file_names)
logging.info('Appending %s to %s', file_names, zip_path)
zip_file = zipfile.ZipFile(zip_path, 'a')
try:
for file_name in file_names:
zip_file.write(os.path.join(base_directory, file_name),
file_name, compress_type)
finally:
zip_file.close()
def UnzipFiles(zip_path, file_names, out_dir):
"""Extracts files from zip file.
Args:
zip_path: Path to zip file.
file_names: File names to be extracted.
out_dir: Destination directory.
Returns:
Paths of extracted files.
"""
logging.info('Extracting %s from %s', file_names, zip_path)
result = []
zip_file = zipfile.ZipFile(zip_path)
try:
for zip_info in zip_file.infolist():
if zip_info.filename in file_names:
out_path = os.path.join(out_dir, zip_info.filename)
if not os.path.isdir(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
with open(out_path, 'w') as out_file:
out_file.write(zip_file.read(zip_info.filename))
result.append(out_path)
finally:
zip_file.close()
return result
def GetVersionCode(base_version_code, abi):
"""Gets version code based on base version code and abi."""
# armeabi-v7a's version code must be greater than armeabi's.
# By this v7a's apk is prioritized on the Play.
# Without this all the ARM devices download armeabi version
# because armeabi can be run on all of them (including v7a).
if abi == 'x86':
abi_code = 5
elif abi == 'armeabi-v7a':
abi_code = 4
elif abi == 'armeabi':
abi_code = 3
elif abi == 'mips':
abi_code = 1
else:
raise IllegalArgumentError('Unexpected ABI; %s' % abi)
if base_version_code >= 10000:
raise IllegalArgumentError('Version code is greater than 10000. '
'It is time to revisit version code scheme.')
return int('5%05d%d' % (base_version_code, abi_code))
def ModifyAndroidManifestFile(apk_path, abi):
"""Modifies given apk file to make it thin apk.
After the execution of this method,
unneeded .so files (evaluated by given abi name)
are removed and AndroidManifest.xml file's
version code is modified.
Args:
apk_path: the path to the apk file to be modified.
abi: the ABI name.
Raises:
UnexpectedFormatError: manifest element must be only one.
"""
logging.info('Modifing %s to ABI %s', apk_path, abi)
temp_dir_in = tempfile.mkdtemp()
temp_dir_out = tempfile.mkdtemp()
original_file_paths = UnzipFiles(apk_path,
'AndroidManifest.xml', temp_dir_in)
if len(original_file_paths) != 1:
raise UnexpectedFormatError(
'AndroidManifest.xml file is expected to be only one.')
original_file_path = original_file_paths[0]
document = android_binary_xml.AndroidBinaryXml(original_file_path)
manifest_elements = document.FindElements(None, 'manifest')
if len(manifest_elements) != 1:
raise UnexpectedFormatError('manifest element is expected to be only one.')
manifest_element = manifest_elements[0]
version_code_attribute = manifest_element.GetAttribute(
'http://schemas.android.com/apk/res/android', 'versionCode')
base_version_code = version_code_attribute.GetIntValue()
logging.info('new ver code %s', GetVersionCode(base_version_code, abi))
version_code_attribute.SetIntValue(GetVersionCode(base_version_code, abi))
document.Write(os.path.join(temp_dir_out, 'AndroidManifest.xml'))
ReplaceFilesInZip(apk_path, temp_dir_out, ['AndroidManifest.xml'])
def GetUnneededFiles(abi_to_files, abi):
unneeded_files = []
for entry_abi, entry_files in abi_to_files.iteritems():
if entry_abi != abi:
unneeded_files.extend(entry_files)
logging.info('Unneeded files are %s', unneeded_files)
return unneeded_files
def CreateCopyFile(original_file, abi_name):
# Original : Mozc-unsigned.apk
# Copy : Mozc-x86-unsigned.apk
copied_file = ''.join(
[original_file[:original_file.find(_UNSIGNED_APK_SUFFIX)],
'-', abi_name, _UNSIGNED_APK_SUFFIX])
logging.info('Copying from %s to %s', original_file, copied_file)
shutil.copyfile(original_file, copied_file)
return copied_file
def CreateAbiToFileMapping(file_name):
zip_file = zipfile.ZipFile(file_name)
try:
abi_to_files = {}
for zip_info in zip_file.infolist():
m = re.match(r'lib/(.+?)/.*', zip_info.filename)
if m:
files = abi_to_files.setdefault(m.group(1), [])
files.append(zip_info.filename)
logging.info('ABIs are: %s', abi_to_files.keys())
finally:
zip_file.close()
return abi_to_files
def main():
# Enable logging.info.
logging.getLogger().setLevel(logging.INFO)
options = ParseArgs()
for apk_file in [os.path.join(options.bin_dir, f)
for f in os.listdir(options.bin_dir)
if f.endswith(_UNSIGNED_APK_SUFFIX)]:
logging.info('Processing %s', apk_file)
abi_to_files = CreateAbiToFileMapping(apk_file)
for abi in abi_to_files:
logging.info('Processing ABI: %s', abi)
copied_file = CreateCopyFile(apk_file, abi)
unneeded_files = GetUnneededFiles(abi_to_files, abi)
DeleteEntriesFromZip(copied_file, unneeded_files)
ModifyAndroidManifestFile(copied_file, abi)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
# Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Splits the specified apk file into each ABIs.
"Fat APK" contains multipul shared objects in order to run on all the ABIs.
But this means such APK is larger than "Thin" APK.
This script creates Thin APKs from Fat APK.
Version code format:
00000BBBBB:
B: Build number
or
0005BBBBBA
A: ABI (0: Fat, 5: x86, 4: armeabi-v7a, 3: armeabi, 1:mips)
B: Build number
Note:
- This process must be done before signing.
- Prefix 5 is introduced because of historical reason.
Previously Build Number (B) is placed after ABI (A) but
it's found that swpping the order is reasonable.
Previously version code for x86 is always greater than that for armeabi.
Therefore version-check rule like "Version code of update must be greater
than that of previous" cannot be introduced.
"""
__author__ = "matsuzakit"
import cStringIO
import logging
import optparse
import os
import re
import shutil
import tempfile
import zipfile
from build_tools import android_binary_xml
_UNSIGNED_APK_SUFFIX = '-unsigned.apk'
class Error(Exception):
"""Base exception class."""
class UnexpectedFormatError(Error):
pass
class IllegalArgumentError(Error):
pass
def ParseArgs():
parser = optparse.OptionParser()
parser.add_option('--dir', dest='bin_dir',
help='Binary directory. Files of which name ends with '
'"-unsigned.apk" are processed.')
options = parser.parse_args()[0]
if not options.bin_dir:
raise IllegalArgumentError('--dir is mandatory')
return options
# TODO(matsuzakit): Make zip relating logics independent
# from file-based operations.
# Currently they are file-based for reuseabilty.
# But file-based design is not good from the view points of
# performance and testability
def DeleteEntriesFromZip(zip_path, delete_file_names):
"""Deletes entries from zip file.
Args:
zip_path: Path to zip file.
delete_file_names: File names in archive to be deleted.
"""
logging.info('Deleting %s from %s', delete_file_names, zip_path)
tmp_file = cStringIO.StringIO()
in_zip_file = zipfile.ZipFile(zip_path)
try:
out_zip_file = zipfile.ZipFile(tmp_file, 'w')
try:
for zipinfo in in_zip_file.infolist():
if zipinfo.filename not in delete_file_names:
# Reusing zipinfo as 1st argument is mandatory
# because compression_type must be kept.
out_zip_file.writestr(zipinfo,
in_zip_file.read(zipinfo.filename))
finally:
out_zip_file.close()
finally:
in_zip_file.close()
with open(zip_path, 'w') as out_file:
out_file.write(tmp_file.getvalue())
def ReplaceFilesInZip(zip_path, base_directory, file_names,
compress_type=zipfile.ZIP_DEFLATED):
"""Replaces files in zip file with given file_names.
If no corresponding entries in zip file, simply appended.
Args:
zip_path: Path to zip file.
base_directory: Base direcotry of file_names.
file_names: File names to be appended.
compress_type: zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED.
"""
DeleteEntriesFromZip(zip_path, file_names)
logging.info('Appending %s to %s', file_names, zip_path)
zip_file = zipfile.ZipFile(zip_path, 'a')
try:
for file_name in file_names:
zip_file.write(os.path.join(base_directory, file_name),
file_name, compress_type)
finally:
zip_file.close()
def UnzipFiles(zip_path, file_names, out_dir):
"""Extracts files from zip file.
Args:
zip_path: Path to zip file.
file_names: File names to be extracted.
out_dir: Destination directory.
Returns:
Paths of extracted files.
"""
logging.info('Extracting %s from %s', file_names, zip_path)
result = []
zip_file = zipfile.ZipFile(zip_path)
try:
for zip_info in zip_file.infolist():
if zip_info.filename in file_names:
out_path = os.path.join(out_dir, zip_info.filename)
if not os.path.isdir(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
with open(out_path, 'w') as out_file:
out_file.write(zip_file.read(zip_info.filename))
result.append(out_path)
finally:
zip_file.close()
return result
def GetVersionCode(base_version_code, abi):
"""Gets version code based on base version code and abi."""
# armeabi-v7a's version code must be greater than armeabi's.
# By this v7a's apk is prioritized on the Play.
# Without this all the ARM devices download armeabi version
# because armeabi can be run on all of them (including v7a).
if abi == 'x86':
abi_code = 5
elif abi == 'armeabi-v7a':
abi_code = 4
elif abi == 'armeabi':
abi_code = 3
elif abi == 'mips':
abi_code = 1
else:
raise IllegalArgumentError('Unexpected ABI; %s' % abi)
if base_version_code >= 10000:
raise IllegalArgumentError('Version code is greater than 10000. '
'It is time to revisit version code scheme.')
return int('5%05d%d' % (base_version_code, abi_code))
def ModifyAndroidManifestFile(apk_path, abi):
"""Modifies given apk file to make it thin apk.
After the execution of this method,
unneeded .so files (evaluated by given abi name)
are removed and AndroidManifest.xml file's
version code is modified.
Args:
apk_path: the path to the apk file to be modified.
abi: the ABI name.
Raises:
UnexpectedFormatError: manifest element must be only one.
"""
logging.info('Modifing %s to ABI %s', apk_path, abi)
temp_dir_in = tempfile.mkdtemp()
temp_dir_out = tempfile.mkdtemp()
original_file_paths = UnzipFiles(apk_path,
'AndroidManifest.xml', temp_dir_in)
if len(original_file_paths) != 1:
raise UnexpectedFormatError(
'AndroidManifest.xml file is expected to be only one.')
original_file_path = original_file_paths[0]
document = android_binary_xml.AndroidBinaryXml(original_file_path)
manifest_elements = document.FindElements(None, 'manifest')
if len(manifest_elements) != 1:
raise UnexpectedFormatError('manifest element is expected to be only one.')
manifest_element = manifest_elements[0]
version_code_attribute = manifest_element.GetAttribute(
'http://schemas.android.com/apk/res/android', 'versionCode')
base_version_code = version_code_attribute.GetIntValue()
logging.info('new ver code %s', GetVersionCode(base_version_code, abi))
version_code_attribute.SetIntValue(GetVersionCode(base_version_code, abi))
document.Write(os.path.join(temp_dir_out, 'AndroidManifest.xml'))
ReplaceFilesInZip(apk_path, temp_dir_out, ['AndroidManifest.xml'])
def GetUnneededFiles(abi_to_files, abi):
unneeded_files = []
for entry_abi, entry_files in abi_to_files.iteritems():
if entry_abi != abi:
unneeded_files.extend(entry_files)
logging.info('Unneeded files are %s', unneeded_files)
return unneeded_files
def CreateCopyFile(original_file, abi_name):
# Original : Mozc-unsigned.apk
# Copy : Mozc-x86-unsigned.apk
copied_file = ''.join(
[original_file[:original_file.find(_UNSIGNED_APK_SUFFIX)],
'-', abi_name, _UNSIGNED_APK_SUFFIX])
logging.info('Copying from %s to %s', original_file, copied_file)
shutil.copyfile(original_file, copied_file)
return copied_file
def CreateAbiToFileMapping(file_name):
zip_file = zipfile.ZipFile(file_name)
try:
abi_to_files = {}
for zip_info in zip_file.infolist():
m = re.match(r'lib/(.+?)/.*', zip_info.filename)
if m:
files = abi_to_files.setdefault(m.group(1), [])
files.append(zip_info.filename)
logging.info('ABIs are: %s', abi_to_files.keys())
finally:
zip_file.close()
return abi_to_files
def main():
# Enable logging.info.
logging.getLogger().setLevel(logging.INFO)
options = ParseArgs()
for apk_file in [os.path.join(options.bin_dir, f)
for f in os.listdir(options.bin_dir)
if f.endswith(_UNSIGNED_APK_SUFFIX)]:
logging.info('Processing %s', apk_file)
abi_to_files = CreateAbiToFileMapping(apk_file)
for abi in abi_to_files:
logging.info('Processing ABI: %s', abi)
copied_file = CreateCopyFile(apk_file, abi)
unneeded_files = GetUnneededFiles(abi_to_files, abi)
DeleteEntriesFromZip(copied_file, unneeded_files)
ModifyAndroidManifestFile(copied_file, abi)
if __name__ == '__main__':
main()
| en | 000099221_spanfish-JapaneseKeyboard_split_abi_7e3f446ae900.py | unknown | 3,130 |
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
class Model:
def __init__(self,confidence_thresh=0.6):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.model = DefaultPredictor(cfg)
def get_seg_output(self,image:np.array):
out = self.model(image)['instances']
outputs = [(out.pred_masks[i],out.pred_classes[i]) for i in range(len(out.pred_classes)) if out.pred_classes[i]==0]
return outputs
class Preprocessing:
def __init__(self,kernel,dilate_iter=5,erode_iter=1):
self.kernel = kernel
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
erode = cv2.erode(target_mask.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(target_mask.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[dilate == 0, 0] = 1
return trimap
| from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import torch
import numpy as np
import cv2
class Model:
def __init__(self,confidence_thresh=0.6):
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.model = DefaultPredictor(cfg)
def get_seg_output(self,image:np.array):
out = self.model(image)['instances']
outputs = [(out.pred_masks[i],out.pred_classes[i]) for i in range(len(out.pred_classes)) if out.pred_classes[i]==0]
return outputs
class Preprocessing:
def __init__(self,kernel,dilate_iter=5,erode_iter=1):
self.kernel = kernel
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
erode = cv2.erode(target_mask.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(target_mask.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[dilate == 0, 0] = 1
return trimap
| en | 000020513_rogo96-Background-removal_detectron_seg_c08462f8e604.py | unknown | 610 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.firebase.app.client.cpp.version_header."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google3.testing.pybase import googletest
from google3.firebase.app.client.cpp import version_header
EXPECTED_VERSION_HEADER = r"""// Copyright 2016 Google Inc. All Rights Reserved.
#ifndef FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
#define FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
/// @def FIREBASE_VERSION_MAJOR
/// @brief Major version number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_MAJOR 1
/// @def FIREBASE_VERSION_MINOR
/// @brief Minor version number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_MINOR 2
/// @def FIREBASE_VERSION_REVISION
/// @brief Revision number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_REVISION 3
/// @cond FIREBASE_APP_INTERNAL
#define FIREBASE_STRING_EXPAND(X) #X
#define FIREBASE_STRING(X) FIREBASE_STRING_EXPAND(X)
/// @endcond
// Version number.
// clang-format off
#define FIREBASE_VERSION_NUMBER_STRING \
FIREBASE_STRING(FIREBASE_VERSION_MAJOR) "." \
FIREBASE_STRING(FIREBASE_VERSION_MINOR) "." \
FIREBASE_STRING(FIREBASE_VERSION_REVISION)
// clang-format on
// Identifier for version string, e.g. kFirebaseVersionString.
#define FIREBASE_VERSION_IDENTIFIER(library) k##library##VersionString
// Concatenated version string, e.g. "Firebase C++ x.y.z".
#define FIREBASE_VERSION_STRING(library) \
#library " C++ " FIREBASE_VERSION_NUMBER_STRING
#if !defined(DOXYGEN)
#if !defined(_WIN32) && !defined(__CYGWIN__)
#define DEFINE_FIREBASE_VERSION_STRING(library) \
extern volatile __attribute__((weak)) \
const char* FIREBASE_VERSION_IDENTIFIER(library); \
volatile __attribute__((weak)) \
const char* FIREBASE_VERSION_IDENTIFIER(library) = \
FIREBASE_VERSION_STRING(library)
#else
#define DEFINE_FIREBASE_VERSION_STRING(library) \
static const char* FIREBASE_VERSION_IDENTIFIER(library) = \
FIREBASE_VERSION_STRING(library)
#endif // !defined(_WIN32) && !defined(__CYGWIN__)
#else // if defined(DOXYGEN)
/// @brief Namespace that encompasses all Firebase APIs.
namespace firebase {
/// @brief String which identifies the current version of the Firebase C++
/// SDK.
///
/// @see FIREBASE_VERSION_MAJOR
/// @see FIREBASE_VERSION_MINOR
/// @see FIREBASE_VERSION_REVISION
static const char* kFirebaseVersionString = FIREBASE_VERSION_STRING;
} // namespace firebase
#endif // !defined(DOXYGEN)
#endif // FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
"""
class VersionHeaderGeneratorTest(googletest.TestCase):
def test_generate_header(self):
result_header = version_header.generate_header(1, 2, 3)
self.assertEqual(result_header, EXPECTED_VERSION_HEADER)
if __name__ == '__main__':
googletest.main()
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.firebase.app.client.cpp.version_header."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google3.testing.pybase import googletest
from google3.firebase.app.client.cpp import version_header
EXPECTED_VERSION_HEADER = r"""// Copyright 2016 Google Inc. All Rights Reserved.
#ifndef FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
#define FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
/// @def FIREBASE_VERSION_MAJOR
/// @brief Major version number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_MAJOR 1
/// @def FIREBASE_VERSION_MINOR
/// @brief Minor version number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_MINOR 2
/// @def FIREBASE_VERSION_REVISION
/// @brief Revision number of the Firebase C++ SDK.
/// @see kFirebaseVersionString
#define FIREBASE_VERSION_REVISION 3
/// @cond FIREBASE_APP_INTERNAL
#define FIREBASE_STRING_EXPAND(X) #X
#define FIREBASE_STRING(X) FIREBASE_STRING_EXPAND(X)
/// @endcond
// Version number.
// clang-format off
#define FIREBASE_VERSION_NUMBER_STRING \
FIREBASE_STRING(FIREBASE_VERSION_MAJOR) "." \
FIREBASE_STRING(FIREBASE_VERSION_MINOR) "." \
FIREBASE_STRING(FIREBASE_VERSION_REVISION)
// clang-format on
// Identifier for version string, e.g. kFirebaseVersionString.
#define FIREBASE_VERSION_IDENTIFIER(library) k##library##VersionString
// Concatenated version string, e.g. "Firebase C++ x.y.z".
#define FIREBASE_VERSION_STRING(library) \
#library " C++ " FIREBASE_VERSION_NUMBER_STRING
#if !defined(DOXYGEN)
#if !defined(_WIN32) && !defined(__CYGWIN__)
#define DEFINE_FIREBASE_VERSION_STRING(library) \
extern volatile __attribute__((weak)) \
const char* FIREBASE_VERSION_IDENTIFIER(library); \
volatile __attribute__((weak)) \
const char* FIREBASE_VERSION_IDENTIFIER(library) = \
FIREBASE_VERSION_STRING(library)
#else
#define DEFINE_FIREBASE_VERSION_STRING(library) \
static const char* FIREBASE_VERSION_IDENTIFIER(library) = \
FIREBASE_VERSION_STRING(library)
#endif // !defined(_WIN32) && !defined(__CYGWIN__)
#else // if defined(DOXYGEN)
/// @brief Namespace that encompasses all Firebase APIs.
namespace firebase {
/// @brief String which identifies the current version of the Firebase C++
/// SDK.
///
/// @see FIREBASE_VERSION_MAJOR
/// @see FIREBASE_VERSION_MINOR
/// @see FIREBASE_VERSION_REVISION
static const char* kFirebaseVersionString = FIREBASE_VERSION_STRING;
} // namespace firebase
#endif // !defined(DOXYGEN)
#endif // FIREBASE_APP_CLIENT_CPP_SRC_VERSION_H_
"""
class VersionHeaderGeneratorTest(googletest.TestCase):
def test_generate_header(self):
result_header = version_header.generate_header(1, 2, 3)
self.assertEqual(result_header, EXPECTED_VERSION_HEADER)
if __name__ == '__main__':
googletest.main()
| en | 000272294_oliwilkinsonio-firebase-cpp-sdk_version_header_test_4bd3ca956f55.py | unknown | 1,086 |
#!/usr/bin/env python
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Storage provider implementation for AltFS, built on top of Windows WMI system.
system.
References:
- https://www.blackhat.com/docs/us-15/materials/us-15-Graeber-Abusing-Windows
-Management-Instrumentation-WMI-To-Build-A-Persistent%20Asynchronous-And-
Fileless-Backdoor-wp.pdf
- https://gallery.technet.microsoft.com/WMI-PowerShell-cmdlets-ac049637
- https://docs.microsoft.com/en-us/windows/desktop/wmisdk/
creating-a-base-class
- https://stackoverflow.com/questions/252417/
how-can-i-use-a-dll-file-from-python
- https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/
nn-wbemcli-iwbemservices
Author: Dor Azouri <dor.azouri@safebreach.com>
Date: 2019-01-01
"""
import ctypes
import logging
import os
from common import WMI_CLIENT_DLL_PATH
from exceptions_ import BucketValueMissingException
from providers.common.calculations import calculate_bits_sum, \
calculate_next_available_index
from StorageProvider import StorageProvider
import wmi
logger = logging.getLogger(__name__)
class WMIStorageProvider(StorageProvider):
"""
Concrete Storage provider implementation for AltFS.
Built on top of Windows WMI system (WBEM).
"""
PROPERTY_NAME_DELIMITER = "_"
TARGET_CLASS_NAME_SUFFIX = "Wow64_"
def __init__(self, machine_identification_string, **kwargs):
"""Constructor for UserDefaultsStorageProvider"""
super(WMIStorageProvider, self).__init__()
self._machine_id_string = machine_identification_string
self._wmi_client = wmi.WMI()
self._wmi_client_dll = ctypes.cdll.LoadLibrary(
os.path.join(os.path.dirname(__file__), WMI_CLIENT_DLL_PATH))
self._namespace = kwargs["namespace"]
self._class_name = self._generate_bucket_name()
# calculate number of available buckets, used for modulus division
# when calculating the bucket index
self._buckets_names = [self._class_name]
self._buckets_count = len(self._buckets_names)
self._create_bucket()
logger.debug("namespace: %s" % self._namespace)
logger.debug("root class name: %s" % self._class_name)
def _generate_bucket_name(self):
classes = list([klass for klass in self._wmi_client.subclasses_of()
if not klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
classes_count = len(classes)
logger.debug("found %s legitimate classes" % classes_count)
machine_id_checksum = calculate_bits_sum(
self._machine_id_string)
target_class_id = machine_id_checksum % classes_count - len(
[
klass for klass in list(
self._wmi_client.subclasses_of())[
:machine_id_checksum % classes_count] if klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
logger.debug("target class for name generation: %s" %
(classes[target_class_id]))
return WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX + \
classes[target_class_id].split("_")[-1]
def _create_bucket(self):
is_bucket_exist = self._class_name in self._wmi_client.subclasses_of()
if is_bucket_exist:
return
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
logger.debug("creating class: %s\\%s" %
(self._namespace, self._class_name))
self._wmi_client_dll.CreateClass(p_ns, p_cn)
def write_block(self, bucket_id, value_id, data=""):
"""Described in parent class"""
logger.debug("writing block at (%s:%s)" % (bucket_id, value_id))
try:
value_name = self._get_value_name(
bucket_id, value_id)
logger.debug("value with id already exists at (%s:%s)" %
(bucket_id, value_id))
except BucketValueMissingException:
logger.debug(
"value with id does not exist in specified bucket." +
" generating a new value name for bucket id %s" % bucket_id)
value_name = self._generate_value_name()
logger.debug("generated a new value name in bucket id %s: %s" % (
bucket_id, value_name))
target_value_id = WMIStorageProvider.value_name_to_value_id(value_name)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
p_data = ctypes.c_wchar_p(data)
logger.debug(
"creating a new property at (%s:%s): %s\\%s.%s" %
(bucket_id,
target_value_id,
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.CreateProperty(p_ns, p_cn, p_vn, p_data)
return target_value_id
def get_block(self, bucket_id, value_id):
"""Described in parent class"""
logger.debug("getting block at (%s:%s)" % (bucket_id, value_id))
data = self._wmi_client.get(self._class_name).wmi_property(
self._get_value_name(bucket_id, value_id)).value
return data
def delete_block(self, bucket_id, value_id):
"""Described in parent class"""
value_name = self._get_value_name(
bucket_id, value_id)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
logger.debug(
"deleting a property at (%s:%s): %s\\%s.%s" %
(bucket_id,
WMIStorageProvider.value_name_to_value_id(value_name),
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.DeleteProperty(p_ns, p_cn, p_vn)
def get_value_ids_in_bucket(self, bucket_id):
"""Described in parent class"""
return self._enumerate_applicable_values_dict().keys()
def generate_new_value_id_in_bucket(self, bucket_id):
"""Described in parent class"""
return WMIStorageProvider.value_name_to_value_id(
self._generate_value_name())
@staticmethod
def value_name_to_value_id(value_name):
"""Returns the value ID of the given value_name"""
return int(value_name.split(
WMIStorageProvider.PROPERTY_NAME_DELIMITER)[-1])
def _get_value_name(self, bucket_id, value_id):
logger.debug("looking for value name at (%s:%s)" %
(bucket_id, value_id))
if value_id is not None:
values_dict = self._enumerate_applicable_values_dict()
logger.debug("existing values: %s" % values_dict)
if value_id in values_dict:
logger.debug("value name exists at (%s:%s): %s" %
(bucket_id, value_id, values_dict[value_id]))
return values_dict[value_id]
logger.debug("no value name at (%s:%s)" % (bucket_id, value_id))
raise BucketValueMissingException(
"No applicable value found in bucket")
def _enumerate_applicable_values_dict(self):
values_names = self._enumerate_applicable_values()
return dict(zip([WMIStorageProvider.value_name_to_value_id(name)
for name in values_names], values_names))
def _enumerate_applicable_values(self):
return self._wmi_client.get(self._class_name).properties.keys()
def _get_bucket_name(self, bucket_id):
return self._buckets_names[bucket_id]
def _generate_value_name_machine_part(self):
return self._class_name.split(
"_")[1] + WMIStorageProvider.PROPERTY_NAME_DELIMITER
def _generate_value_name(self):
return self._generate_value_name_machine_part() + \
("%04d" % calculate_next_available_index(
self._enumerate_applicable_values_dict().keys()))
def _is_value_name_applicable(self, value_name):
return value_name.startswith(
self._generate_value_name_machine_part()) and all(
[char.isdigit() for char in value_name[-4:]])
| #!/usr/bin/env python
"""
BSD 3-Clause License
Copyright (c) 2017, SafeBreach Labs
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Storage provider implementation for AltFS, built on top of Windows WMI system.
system.
References:
- https://www.blackhat.com/docs/us-15/materials/us-15-Graeber-Abusing-Windows
-Management-Instrumentation-WMI-To-Build-A-Persistent%20Asynchronous-And-
Fileless-Backdoor-wp.pdf
- https://gallery.technet.microsoft.com/WMI-PowerShell-cmdlets-ac049637
- https://docs.microsoft.com/en-us/windows/desktop/wmisdk/
creating-a-base-class
- https://stackoverflow.com/questions/252417/
how-can-i-use-a-dll-file-from-python
- https://docs.microsoft.com/en-us/windows/desktop/api/wbemcli/
nn-wbemcli-iwbemservices
Author: Dor Azouri <dor.azouri@safebreach.com>
Date: 2019-01-01
"""
import ctypes
import logging
import os
from common import WMI_CLIENT_DLL_PATH
from exceptions_ import BucketValueMissingException
from providers.common.calculations import calculate_bits_sum, \
calculate_next_available_index
from StorageProvider import StorageProvider
import wmi
logger = logging.getLogger(__name__)
class WMIStorageProvider(StorageProvider):
"""
Concrete Storage provider implementation for AltFS.
Built on top of Windows WMI system (WBEM).
"""
PROPERTY_NAME_DELIMITER = "_"
TARGET_CLASS_NAME_SUFFIX = "Wow64_"
def __init__(self, machine_identification_string, **kwargs):
"""Constructor for UserDefaultsStorageProvider"""
super(WMIStorageProvider, self).__init__()
self._machine_id_string = machine_identification_string
self._wmi_client = wmi.WMI()
self._wmi_client_dll = ctypes.cdll.LoadLibrary(
os.path.join(os.path.dirname(__file__), WMI_CLIENT_DLL_PATH))
self._namespace = kwargs["namespace"]
self._class_name = self._generate_bucket_name()
# calculate number of available buckets, used for modulus division
# when calculating the bucket index
self._buckets_names = [self._class_name]
self._buckets_count = len(self._buckets_names)
self._create_bucket()
logger.debug("namespace: %s" % self._namespace)
logger.debug("root class name: %s" % self._class_name)
def _generate_bucket_name(self):
classes = list([klass for klass in self._wmi_client.subclasses_of()
if not klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
classes_count = len(classes)
logger.debug("found %s legitimate classes" % classes_count)
machine_id_checksum = calculate_bits_sum(
self._machine_id_string)
target_class_id = machine_id_checksum % classes_count - len(
[
klass for klass in list(
self._wmi_client.subclasses_of())[
:machine_id_checksum % classes_count] if klass.startswith(
WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX)])
logger.debug("target class for name generation: %s" %
(classes[target_class_id]))
return WMIStorageProvider.TARGET_CLASS_NAME_SUFFIX + \
classes[target_class_id].split("_")[-1]
def _create_bucket(self):
is_bucket_exist = self._class_name in self._wmi_client.subclasses_of()
if is_bucket_exist:
return
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
logger.debug("creating class: %s\\%s" %
(self._namespace, self._class_name))
self._wmi_client_dll.CreateClass(p_ns, p_cn)
def write_block(self, bucket_id, value_id, data=""):
"""Described in parent class"""
logger.debug("writing block at (%s:%s)" % (bucket_id, value_id))
try:
value_name = self._get_value_name(
bucket_id, value_id)
logger.debug("value with id already exists at (%s:%s)" %
(bucket_id, value_id))
except BucketValueMissingException:
logger.debug(
"value with id does not exist in specified bucket." +
" generating a new value name for bucket id %s" % bucket_id)
value_name = self._generate_value_name()
logger.debug("generated a new value name in bucket id %s: %s" % (
bucket_id, value_name))
target_value_id = WMIStorageProvider.value_name_to_value_id(value_name)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
p_data = ctypes.c_wchar_p(data)
logger.debug(
"creating a new property at (%s:%s): %s\\%s.%s" %
(bucket_id,
target_value_id,
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.CreateProperty(p_ns, p_cn, p_vn, p_data)
return target_value_id
def get_block(self, bucket_id, value_id):
"""Described in parent class"""
logger.debug("getting block at (%s:%s)" % (bucket_id, value_id))
data = self._wmi_client.get(self._class_name).wmi_property(
self._get_value_name(bucket_id, value_id)).value
return data
def delete_block(self, bucket_id, value_id):
"""Described in parent class"""
value_name = self._get_value_name(
bucket_id, value_id)
p_ns = ctypes.c_wchar_p(self._namespace)
p_cn = ctypes.c_wchar_p(self._class_name)
p_vn = ctypes.c_wchar_p(value_name)
logger.debug(
"deleting a property at (%s:%s): %s\\%s.%s" %
(bucket_id,
WMIStorageProvider.value_name_to_value_id(value_name),
self._namespace,
self._class_name,
value_name))
self._wmi_client_dll.DeleteProperty(p_ns, p_cn, p_vn)
def get_value_ids_in_bucket(self, bucket_id):
"""Described in parent class"""
return self._enumerate_applicable_values_dict().keys()
def generate_new_value_id_in_bucket(self, bucket_id):
"""Described in parent class"""
return WMIStorageProvider.value_name_to_value_id(
self._generate_value_name())
@staticmethod
def value_name_to_value_id(value_name):
"""Returns the value ID of the given value_name"""
return int(value_name.split(
WMIStorageProvider.PROPERTY_NAME_DELIMITER)[-1])
def _get_value_name(self, bucket_id, value_id):
logger.debug("looking for value name at (%s:%s)" %
(bucket_id, value_id))
if value_id is not None:
values_dict = self._enumerate_applicable_values_dict()
logger.debug("existing values: %s" % values_dict)
if value_id in values_dict:
logger.debug("value name exists at (%s:%s): %s" %
(bucket_id, value_id, values_dict[value_id]))
return values_dict[value_id]
logger.debug("no value name at (%s:%s)" % (bucket_id, value_id))
raise BucketValueMissingException(
"No applicable value found in bucket")
def _enumerate_applicable_values_dict(self):
values_names = self._enumerate_applicable_values()
return dict(zip([WMIStorageProvider.value_name_to_value_id(name)
for name in values_names], values_names))
def _enumerate_applicable_values(self):
return self._wmi_client.get(self._class_name).properties.keys()
def _get_bucket_name(self, bucket_id):
return self._buckets_names[bucket_id]
def _generate_value_name_machine_part(self):
return self._class_name.split(
"_")[1] + WMIStorageProvider.PROPERTY_NAME_DELIMITER
def _generate_value_name(self):
return self._generate_value_name_machine_part() + \
("%04d" % calculate_next_available_index(
self._enumerate_applicable_values_dict().keys()))
def _is_value_name_applicable(self, value_name):
return value_name.startswith(
self._generate_value_name_machine_part()) and all(
[char.isdigit() for char in value_name[-4:]])
| en | 000053894_g-mc-AltFS_WMIStorageProvider_103a15da17c6.py | unknown | 2,850 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import ort_flatbuffers_py.fbs as fbs
from .operator_type_usage_processors import OperatorTypeUsageManager
class OrtFormatModelProcessor:
"Class to process an ORT format model and determine required operators and types."
def __init__(self, model_path: str, required_ops: dict, processors: OperatorTypeUsageManager):
"""
Initialize ORT format model processor
:param model_path: Path to model to load
:param required_ops: Dictionary required operator information will be added to.
:param processors: Operator type usage processors which will be called for each matching Node.
"""
self._required_ops = required_ops # dictionary of {domain: {opset:[operators]}}
self._file = open(model_path, "rb").read()
self._buffer = bytearray(self._file)
if not fbs.InferenceSession.InferenceSession.InferenceSessionBufferHasIdentifier(self._buffer, 0):
raise RuntimeError("File does not appear to be a valid ORT format model: '{}'".format(model_path))
self._model = fbs.InferenceSession.InferenceSession.GetRootAsInferenceSession(self._buffer, 0).Model()
self._op_type_processors = processors
@staticmethod
def _setup_type_info(graph: fbs.Graph, outer_scope_value_typeinfo={}):
"""
Setup the node args for this level of Graph.
We copy the current list which represents the outer scope values, and add the local node args to that
to create the valid list of values for the current Graph.
:param graph: Graph to create NodeArg list for
:param outer_scope_value_typeinfo: TypeInfo for outer scope values. Empty for the top-level graph in a model.
:return: Dictionary of NodeArg name to TypeInfo
"""
value_name_to_typeinfo = outer_scope_value_typeinfo.copy()
for j in range(0, graph.NodeArgsLength()):
n = graph.NodeArgs(j)
value_name_to_typeinfo[n.Name()] = n.Type() # TypeInfo for this NodeArg's name
return value_name_to_typeinfo
def _add_required_op(self, domain: str, opset: int, op_type: str):
if domain not in self._required_ops:
self._required_ops[domain] = {opset: set([op_type])}
elif opset not in self._required_ops[domain]:
self._required_ops[domain][opset] = set([op_type])
else:
self._required_ops[domain][opset].add(op_type)
def _process_graph(self, graph: fbs.Graph, outer_scope_value_typeinfo: dict):
"""
Process one level of the Graph, descending into any subgraphs when they are found
:param outer_scope_value_typeinfo: Outer scope NodeArg dictionary from ancestor graphs
"""
# Merge the TypeInfo for all values in this level of the graph with the outer scope value TypeInfo.
value_name_to_typeinfo = OrtFormatModelProcessor._setup_type_info(graph, outer_scope_value_typeinfo)
for i in range(0, graph.NodesLength()):
node = graph.Nodes(i)
optype = node.OpType().decode()
domain = node.Domain().decode() or "ai.onnx" # empty domain defaults to ai.onnx
self._add_required_op(domain, node.SinceVersion(), optype)
if self._op_type_processors:
self._op_type_processors.process_node(node, value_name_to_typeinfo)
# Read all the attributes
for j in range(0, node.AttributesLength()):
attr = node.Attributes(j)
attr_type = attr.Type()
if attr_type == fbs.AttributeType.AttributeType.GRAPH:
self._process_graph(attr.G(), value_name_to_typeinfo)
elif attr_type == fbs.AttributeType.AttributeType.GRAPHS:
# the ONNX spec doesn't currently define any operators that have multiple graphs in an attribute
# so entering this 'elif' isn't currently possible
for k in range(0, attr.GraphsLength()):
self._process_graph(attr.Graphs(k), value_name_to_typeinfo)
def process(self):
graph = self._model.Graph()
outer_scope_value_typeinfo = {} # no outer scope values for the main graph
self._process_graph(graph, outer_scope_value_typeinfo)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import ort_flatbuffers_py.fbs as fbs
from .operator_type_usage_processors import OperatorTypeUsageManager
class OrtFormatModelProcessor:
"Class to process an ORT format model and determine required operators and types."
def __init__(self, model_path: str, required_ops: dict, processors: OperatorTypeUsageManager):
"""
Initialize ORT format model processor
:param model_path: Path to model to load
:param required_ops: Dictionary required operator information will be added to.
:param processors: Operator type usage processors which will be called for each matching Node.
"""
self._required_ops = required_ops # dictionary of {domain: {opset:[operators]}}
self._file = open(model_path, "rb").read()
self._buffer = bytearray(self._file)
if not fbs.InferenceSession.InferenceSession.InferenceSessionBufferHasIdentifier(self._buffer, 0):
raise RuntimeError("File does not appear to be a valid ORT format model: '{}'".format(model_path))
self._model = fbs.InferenceSession.InferenceSession.GetRootAsInferenceSession(self._buffer, 0).Model()
self._op_type_processors = processors
@staticmethod
def _setup_type_info(graph: fbs.Graph, outer_scope_value_typeinfo={}):
"""
Setup the node args for this level of Graph.
We copy the current list which represents the outer scope values, and add the local node args to that
to create the valid list of values for the current Graph.
:param graph: Graph to create NodeArg list for
:param outer_scope_value_typeinfo: TypeInfo for outer scope values. Empty for the top-level graph in a model.
:return: Dictionary of NodeArg name to TypeInfo
"""
value_name_to_typeinfo = outer_scope_value_typeinfo.copy()
for j in range(0, graph.NodeArgsLength()):
n = graph.NodeArgs(j)
value_name_to_typeinfo[n.Name()] = n.Type() # TypeInfo for this NodeArg's name
return value_name_to_typeinfo
def _add_required_op(self, domain: str, opset: int, op_type: str):
if domain not in self._required_ops:
self._required_ops[domain] = {opset: set([op_type])}
elif opset not in self._required_ops[domain]:
self._required_ops[domain][opset] = set([op_type])
else:
self._required_ops[domain][opset].add(op_type)
def _process_graph(self, graph: fbs.Graph, outer_scope_value_typeinfo: dict):
"""
Process one level of the Graph, descending into any subgraphs when they are found
:param outer_scope_value_typeinfo: Outer scope NodeArg dictionary from ancestor graphs
"""
# Merge the TypeInfo for all values in this level of the graph with the outer scope value TypeInfo.
value_name_to_typeinfo = OrtFormatModelProcessor._setup_type_info(graph, outer_scope_value_typeinfo)
for i in range(0, graph.NodesLength()):
node = graph.Nodes(i)
optype = node.OpType().decode()
domain = node.Domain().decode() or "ai.onnx" # empty domain defaults to ai.onnx
self._add_required_op(domain, node.SinceVersion(), optype)
if self._op_type_processors:
self._op_type_processors.process_node(node, value_name_to_typeinfo)
# Read all the attributes
for j in range(0, node.AttributesLength()):
attr = node.Attributes(j)
attr_type = attr.Type()
if attr_type == fbs.AttributeType.AttributeType.GRAPH:
self._process_graph(attr.G(), value_name_to_typeinfo)
elif attr_type == fbs.AttributeType.AttributeType.GRAPHS:
# the ONNX spec doesn't currently define any operators that have multiple graphs in an attribute
# so entering this 'elif' isn't currently possible
for k in range(0, attr.GraphsLength()):
self._process_graph(attr.Graphs(k), value_name_to_typeinfo)
def process(self):
graph = self._model.Graph()
outer_scope_value_typeinfo = {} # no outer scope values for the main graph
self._process_graph(graph, outer_scope_value_typeinfo)
| en | 000260138_mszhanyi-onnxruntime_ort_model_processor_795fd6b16d46.py | unknown | 1,163 |
from numpy import ndindex, savetxt
def save_array(
file, data, fmt="%7.2f", delimiter=",", header="data", slice="slice", sep=":"
):
"""Function to save numpy nD arrays. Therefore the array is sliced except for
the last 2 dimenions.
Parameters
----------
file: str
the save filename
header: str
some file header string
sep: str
seperator that delimits the shape information in the header
delimiter: str
data delimiter
fmt: str
string to define the output number format that is passed to numpy.savetext
Returns
-------
None
"""
# Write the array to disk
with open(file, "w") as outfile:
# writing a header to get the shape while loading
outfile.write(f"#{header}{sep}{data.shape}\n")
# iterating through ndarray except and write slices of the last 2 dims
if len(data.shape) > 2:
d = len(data.shape) - 2
for i in ndindex(data.shape[:d]):
# writing a break to indicate different slices...
outfile.write(f"#{slice}{sep}{i}\n")
savetxt(outfile, data[i], delimiter=delimiter, fmt=fmt)
else:
savetxt(outfile, data, delimiter=delimiter, fmt=fmt)
| from numpy import ndindex, savetxt
def save_array(
file, data, fmt="%7.2f", delimiter=",", header="data", slice="slice", sep=":"
):
"""Function to save numpy nD arrays. Therefore the array is sliced except for
the last 2 dimenions.
Parameters
----------
file: str
the save filename
header: str
some file header string
sep: str
seperator that delimits the shape information in the header
delimiter: str
data delimiter
fmt: str
string to define the output number format that is passed to numpy.savetext
Returns
-------
None
"""
# Write the array to disk
with open(file, "w") as outfile:
# writing a header to get the shape while loading
outfile.write(f"#{header}{sep}{data.shape}\n")
# iterating through ndarray except and write slices of the last 2 dims
if len(data.shape) > 2:
d = len(data.shape) - 2
for i in ndindex(data.shape[:d]):
# writing a break to indicate different slices...
outfile.write(f"#{slice}{sep}{i}\n")
savetxt(outfile, data[i], delimiter=delimiter, fmt=fmt)
else:
savetxt(outfile, data, delimiter=delimiter, fmt=fmt)
| en | 000101211_tobsen2code-pyleecan_save_array_3f2888761dde.py | unknown | 359 |
# Copyright 2019 Google LLC.
"""Renders a vectorized video to a video file."""
from absl import app
from absl import flags
from video_processing import processor_runner
from video_processing.processors import opencv_video_encoder
from video_processing.processors import vectorized_video_decoder
flags.DEFINE_string('input_json_file', '', 'Input file.')
flags.DEFINE_string('background_image_file', 'background.png',
'Background image to be used.')
flags.DEFINE_string('output_video_file', '', 'Output file.')
FLAGS = flags.FLAGS
def pipeline(input_json_file, background_image_file, output_video_file):
return [
vectorized_video_decoder.VectorizedVideoDecoderProcessor({
'input_json_file': input_json_file,
'background_image_file': background_image_file
}),
opencv_video_encoder.OpenCVVideoEncoderProcessor(
{'output_video_file': output_video_file})
]
def main(unused_argv):
processor_runner.run_processor_chain(
pipeline(FLAGS.input_json_file, FLAGS.background_image_file,
FLAGS.output_video_file))
if __name__ == '__main__':
app.run(main)
| # Copyright 2019 Google LLC.
"""Renders a vectorized video to a video file."""
from absl import app
from absl import flags
from video_processing import processor_runner
from video_processing.processors import opencv_video_encoder
from video_processing.processors import vectorized_video_decoder
flags.DEFINE_string('input_json_file', '', 'Input file.')
flags.DEFINE_string('background_image_file', 'background.png',
'Background image to be used.')
flags.DEFINE_string('output_video_file', '', 'Output file.')
FLAGS = flags.FLAGS
def pipeline(input_json_file, background_image_file, output_video_file):
return [
vectorized_video_decoder.VectorizedVideoDecoderProcessor({
'input_json_file': input_json_file,
'background_image_file': background_image_file
}),
opencv_video_encoder.OpenCVVideoEncoderProcessor(
{'output_video_file': output_video_file})
]
def main(unused_argv):
processor_runner.run_processor_chain(
pipeline(FLAGS.input_json_file, FLAGS.background_image_file,
FLAGS.output_video_file))
if __name__ == '__main__':
app.run(main)
| en | 000296546_learningequality-video-vectorization_render_vector_video_1184da8684fd.py | unknown | 333 |
import gzip
import shutil
def retrieve_from_gz(archive_path: str, output_path: str):
"""The retrieving gz-archived data from `archive_path` to `output_path`.
+-----------------+--------------------------------------+
| **Parameters** | | **archive_path: string** |
| | | The archive path. |
| | | **output_path: string** |
| | | The retrieved data path. |
+-----------------+--------------------------------------+
| **Returns** | **None** |
+-----------------+--------------------------------------+
"""
with gzip.open(archive_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
| import gzip
import shutil
def retrieve_from_gz(archive_path: str, output_path: str):
"""The retrieving gz-archived data from `archive_path` to `output_path`.
+-----------------+--------------------------------------+
| **Parameters** | | **archive_path: string** |
| | | The archive path. |
| | | **output_path: string** |
| | | The retrieved data path. |
+-----------------+--------------------------------------+
| **Returns** | **None** |
+-----------------+--------------------------------------+
"""
with gzip.open(archive_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
| en | 000658624_duketemon-pyuplift_retriever_620bc9ad1b8e.py | unknown | 212 |
def extractExplore(item):
"""
Explore
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
chp_prefixes = [
('geww ', 'Ghost Emperor Wild Wife: Dandy Eldest Miss', 'translated'),
('VGAFH', 'Village girl as head of the family: picked up a general for farming', 'translated'),
('The Rebirth of Deceased Consort that Astounded the World chapter ', 'The Rebirth of Deceased Consort that Astounded the World', 'translated'),
('Man Man Qing Luo chapter ', 'Man Man Qing Luo', 'translated'),
('Hilarious Pampered Consort ', 'Hilarious Pampered Consort', 'translated'),
('BTTS ', 'Back to the Sixties: Farm, Get Wealthy & Raise the Cubs', 'translated'),
('Campus Rebirth: The Strongest Female Agent', 'Campus Rebirth: The Strongest Female Agent', 'translated'),
('ESWHYMY ', 'Eldest Sister, Why Haven\'t You Married Yet', 'translated'),
('TVHISLAA ', 'Today Villain Husband Is Still Lying About Amnesia (Novel Transmigration)', 'translated'),
('Transmigrated into the Cannon Fodder\'s Daughter ', 'Transmigrated into the Cannon Fodder\'s Daughter', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['title'].lower().startswith('geww '):
return buildReleaseMessageWithType(item, 'Ghost Emperor Wild Wife: Dandy Eldest Miss', vol, chp, frag=frag, postfix=postfix)
return False | def extractExplore(item):
"""
Explore
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
chp_prefixes = [
('geww ', 'Ghost Emperor Wild Wife: Dandy Eldest Miss', 'translated'),
('VGAFH', 'Village girl as head of the family: picked up a general for farming', 'translated'),
('The Rebirth of Deceased Consort that Astounded the World chapter ', 'The Rebirth of Deceased Consort that Astounded the World', 'translated'),
('Man Man Qing Luo chapter ', 'Man Man Qing Luo', 'translated'),
('Hilarious Pampered Consort ', 'Hilarious Pampered Consort', 'translated'),
('BTTS ', 'Back to the Sixties: Farm, Get Wealthy & Raise the Cubs', 'translated'),
('Campus Rebirth: The Strongest Female Agent', 'Campus Rebirth: The Strongest Female Agent', 'translated'),
('ESWHYMY ', 'Eldest Sister, Why Haven\'t You Married Yet', 'translated'),
('TVHISLAA ', 'Today Villain Husband Is Still Lying About Amnesia (Novel Transmigration)', 'translated'),
('Transmigrated into the Cannon Fodder\'s Daughter ', 'Transmigrated into the Cannon Fodder\'s Daughter', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['title'].lower().startswith('geww '):
return buildReleaseMessageWithType(item, 'Ghost Emperor Wild Wife: Dandy Eldest Miss', vol, chp, frag=frag, postfix=postfix)
return False | en | 000462498_fake-name-ReadableWebProxy_feed_parse_extractExplore_44ab0b91a571.py | unknown | 524 |
class RNNConfig(object):
embedding_dim = 64
num_classes = 101
num_layers= 2 # num hidden layers
hidden_dim = 256 # num hidden
rnn = 'gru' # lstm 或 gru
dropout_keep_prob = 0.8 # dropout keep prob
learning_rate = 1e-3 #
batch_size = 128 #
print_per_batch = 100 # display
save_per_batch = 10 # each how batch save to tensorboard
keep_prob = 0.8
trainable = True
weight_decay = 0.0005
class CNNConfig(object):
basemodel = 'net.inception_resnet_v2' # cnn encoder model
batch_size = 4 # num of images in one batch
val_batch_size = 4 # validate batch size
decay_size = 5000 # num of batch in one epoch
nrof_max_epoch_iters = 200000 # max iters of epoch
validate_every_n_epochs = 5000 # validata every num epochs
gpu_memory_fraction = 0.8 # Upper bound on the amount of GPU memory that will be used by the process.
resized_width = 299
resized_height = 299
keep_probability = 0.8
weight_decay = 5e-4
random_crop =True
random_rotate = True
random_flip = True
use_fixed_image_standardization =True
# choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],help='The optimization algorithm
# to use', default='ADAGRAD')
optimizer = 'ADAM'
learning_rate_decay_epochs = 100
# Number of epochs between learning rate decay.
# 'Initial learning rate. If set to a negative value a learning rate ,
# schedule can be specified in the file "learning_rate_schedule.txt"
learning_rate = 0.0
learning_rate_schedule_file = './config/learning_rate_schedule_classifier_ucf.txt'
learning_rate_decay_factor = 1.0 # Learning rate decay factor.
moving_average_decay = 0.9999 # Exponential decay for tracking of training parameters.
embedding_size = 1024
| class RNNConfig(object):
embedding_dim = 64
num_classes = 101
num_layers= 2 # num hidden layers
hidden_dim = 256 # num hidden
rnn = 'gru' # lstm 或 gru
dropout_keep_prob = 0.8 # dropout keep prob
learning_rate = 1e-3 #
batch_size = 128 #
print_per_batch = 100 # display
save_per_batch = 10 # each how batch save to tensorboard
keep_prob = 0.8
trainable = True
weight_decay = 0.0005
class CNNConfig(object):
basemodel = 'net.inception_resnet_v2' # cnn encoder model
batch_size = 4 # num of images in one batch
val_batch_size = 4 # validate batch size
decay_size = 5000 # num of batch in one epoch
nrof_max_epoch_iters = 200000 # max iters of epoch
validate_every_n_epochs = 5000 # validata every num epochs
gpu_memory_fraction = 0.8 # Upper bound on the amount of GPU memory that will be used by the process.
resized_width = 299
resized_height = 299
keep_probability = 0.8
weight_decay = 5e-4
random_crop =True
random_rotate = True
random_flip = True
use_fixed_image_standardization =True
# choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],help='The optimization algorithm
# to use', default='ADAGRAD')
optimizer = 'ADAM'
learning_rate_decay_epochs = 100
# Number of epochs between learning rate decay.
# 'Initial learning rate. If set to a negative value a learning rate ,
# schedule can be specified in the file "learning_rate_schedule.txt"
learning_rate = 0.0
learning_rate_schedule_file = './config/learning_rate_schedule_classifier_ucf.txt'
learning_rate_decay_factor = 1.0 # Learning rate decay factor.
moving_average_decay = 0.9999 # Exponential decay for tracking of training parameters.
embedding_size = 1024
| en | 000201794_yuqj1990-deepano_train_config_d8d370e56d6e.py | unknown | 628 |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import errno
import shutil
import radical.utils as ru
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .base import AgentStagingOutputComponent
from ...staging_directives import complete_url
# ------------------------------------------------------------------------------
#
class Default(AgentStagingOutputComponent):
"""
This component performs all agent side output staging directives for compute
tasks. It gets tasks from the agent_staging_output_queue, in
AGENT_STAGING_OUTPUT_PENDING state, will advance them to
AGENT_STAGING_OUTPUT state while performing the staging, and then moves then
to the TMGR_STAGING_OUTPUT_PENDING state, which at the moment requires the
state change to be published to MongoDB (no push into a queue).
Note that this component also collects stdout/stderr of the tasks (which
can also be considered staging, really).
"""
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentStagingOutputComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
self._pwd = os.getcwd()
self.register_input(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE, self.work)
# we don't need an output queue -- tasks are picked up via mongodb
self.register_output(rps.TMGR_STAGING_OUTPUT_PENDING, None) # drop
# --------------------------------------------------------------------------
#
def work(self, tasks):
if not isinstance(tasks, list):
tasks = [tasks]
self.advance(tasks, rps.AGENT_STAGING_OUTPUT, publish=True, push=False)
# we first filter out any tasks which don't need any input staging, and
# advance them again as a bulk. We work over the others one by one, and
# advance them individually, to avoid stalling from slow staging ops.
no_staging_tasks = list()
staging_tasks = list()
for task in tasks:
uid = task['uid']
# From here on, any state update will hand control over to the tmgr
# again. The next task update should thus push *all* task details,
# not only state.
task['$all'] = True
task['control'] = 'tmgr_pending'
# we always dig for stdout/stderr
self._handle_task_stdio(task)
# NOTE: all tasks get here after execution, even those which did not
# finish successfully. We do that so that we can make
# stdout/stderr available for failed tasks (see
# _handle_task_stdio above). But we don't need to perform any
# other staging for those tasks, and in fact can make them
# final.
if task['target_state'] != rps.DONE \
and not task['description'].get('stage_on_error'):
task['state'] = task['target_state']
self._log.debug('task %s skips staging: %s', uid, task['state'])
no_staging_tasks.append(task)
continue
# check if we have any staging directives to be enacted in this
# component
actionables = list()
for sd in task['description'].get('output_staging', []):
if sd['action'] in [rpc.LINK, rpc.COPY, rpc.MOVE]:
actionables.append(sd)
if actionables:
# this task needs some staging
staging_tasks.append([task, actionables])
else:
# this task does not need any staging at this point, and can be
# advanced
task['state'] = rps.TMGR_STAGING_OUTPUT_PENDING
no_staging_tasks.append(task)
if no_staging_tasks:
self.advance(no_staging_tasks, publish=True, push=True)
for task,actionables in staging_tasks:
self._handle_task_staging(task, actionables)
# --------------------------------------------------------------------------
#
def _handle_task_stdio(self, task):
sbox = task['task_sandbox_path']
uid = task['uid']
self._prof.prof('staging_stdout_start', uid=uid)
# self._log.debug('out: %s', task.get('stdout_file'))
# TODO: disable this at scale?
if task.get('stdout_file') and os.path.isfile(task['stdout_file']):
with ru.ru_open(task['stdout_file'], 'r') as stdout_f:
try:
txt = ru.as_string(stdout_f.read())
except UnicodeDecodeError:
txt = "task stdout is binary -- use file staging"
task['stdout'] += rpu.tail(txt)
self._prof.prof('staging_stdout_stop', uid=uid)
self._prof.prof('staging_stderr_start', uid=uid)
# TODO: disable this at scale?
if task.get('stderr_file') and os.path.isfile(task['stderr_file']):
with ru.ru_open(task['stderr_file'], 'r') as stderr_f:
try:
txt = ru.as_string(stderr_f.read())
except UnicodeDecodeError:
txt = "task stderr is binary -- use file staging"
task['stderr'] += rpu.tail(txt)
# to help with ID mapping, also parse for PRTE output:
# [batch3:122527] JOB [3673,4] EXECUTING
with ru.ru_open(task['stderr_file'], 'r') as stderr_f:
for line in stderr_f.readlines():
line = line.strip()
if not line:
continue
if line[0] == '[' and line.endswith('EXECUTING'):
elems = line.replace('[', '').replace(']', '').split()
tid = elems[2]
self._log.info('PRTE IDMAP: %s:%s' % (tid, uid))
self._prof.prof('staging_stderr_stop', uid=uid)
self._prof.prof('staging_uprof_start', uid=uid)
task_prof = "%s/%s.prof" % (sbox, uid)
if os.path.isfile(task_prof):
try:
with ru.ru_open(task_prof, 'r') as prof_f:
txt = ru.as_string(prof_f.read())
for line in txt.split("\n"):
if line:
ts, event, comp, tid, _uid, state, msg = \
line.split(',')
self._prof.prof(ts=float(ts), event=event,
comp=comp, tid=tid, uid=_uid,
state=state, msg=msg)
except Exception as e:
self._log.error("Pre/Post profile read failed: `%s`" % e)
self._prof.prof('staging_uprof_stop', uid=uid)
# --------------------------------------------------------------------------
#
def _handle_task_staging(self, task, actionables):
uid = task['uid']
# By definition, this compoentn lives on the pilot's target resource.
# As such, we *know* that all staging ops which would refer to the
# resource now refer to file://localhost, and thus translate the task,
# pilot and resource sandboxes into that scope. Some assumptions are
# made though:
#
# * paths are directly translatable across schemas
# * resource level storage is in fact accessible via file://
#
# FIXME: this is costly and should be cached.
task_sandbox = ru.Url(task['task_sandbox'])
pilot_sandbox = ru.Url(task['pilot_sandbox'])
resource_sandbox = ru.Url(task['resource_sandbox'])
task_sandbox.schema = 'file'
pilot_sandbox.schema = 'file'
resource_sandbox.schema = 'file'
task_sandbox.host = 'localhost'
pilot_sandbox.host = 'localhost'
resource_sandbox.host = 'localhost'
src_context = {'pwd' : str(task_sandbox), # !!!
'task' : str(task_sandbox),
'pilot' : str(pilot_sandbox),
'resource' : str(resource_sandbox)}
tgt_context = {'pwd' : str(task_sandbox), # !!!
'task' : str(task_sandbox),
'pilot' : str(pilot_sandbox),
'resource' : str(resource_sandbox)}
# we can now handle the actionable staging directives
for sd in actionables:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
self._prof.prof('staging_out_start', uid=uid, msg=did)
assert(action in [rpc.COPY, rpc.LINK, rpc.MOVE, rpc.TRANSFER]), \
'invalid staging action'
# we only handle staging which does *not* include 'client://' src or
# tgt URLs - those are handled by the tmgr staging components
if src.startswith('client://'):
self._log.debug('skip staging for src %s', src)
self._prof.prof('staging_out_skip', uid=uid, msg=did)
continue
if tgt.startswith('client://'):
self._log.debug('skip staging for tgt %s', tgt)
self._prof.prof('staging_out_skip', uid=uid, msg=did)
continue
# Fix for when the target PATH is empty
# we assume current directory is the task staging 'task://'
# and we assume the file to be copied is the base filename
# of the source
if tgt is None: tgt = ''
if tgt.strip() == '':
tgt = 'task:///{}'.format(os.path.basename(src))
# Fix for when the target PATH is exists *and* it is a folder
# we assume the 'current directory' is the target folder
# and we assume the file to be copied is the base filename
# of the source
elif os.path.exists(tgt.strip()) and os.path.isdir(tgt.strip()):
tgt = os.path.join(tgt, os.path.basename(src))
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
# Currently, we use the same schema for files and folders.
assert(src.schema == 'file'), 'staging src must be file://'
if action in [rpc.COPY, rpc.LINK, rpc.MOVE]:
assert(tgt.schema == 'file'), 'staging tgt expected as file://'
# SAGA will take care of dir creation - but we do it manually
# for local ops (copy, link, move)
if flags & rpc.CREATE_PARENTS and action != rpc.TRANSFER:
tgtdir = os.path.dirname(tgt.path)
if tgtdir != task_sandbox.path:
self._log.debug("mkdir %s", tgtdir)
ru.rec_makedir(tgtdir)
if action == rpc.COPY:
try:
shutil.copytree(src.path, tgt.path)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src.path, tgt.path)
else:
raise
elif action == rpc.LINK:
# Fix issue/1513 if link source is file and target is folder
# should support POSIX standard where link is created
# with the same name as the source
if os.path.isfile(src.path) and os.path.isdir(tgt.path):
os.symlink(src.path,
os.path.join(tgt.path,
os.path.basename(src.path)))
else: # default behavior
os.symlink(src.path, tgt.path)
elif action == rpc.MOVE: shutil.move(src.path, tgt.path)
elif action == rpc.TRANSFER: pass
# This is currently never executed. Commenting it out.
# Uncomment and implement when uploads directly to remote URLs
# from tasks are supported.
# FIXME: we only handle srm staging right now, and only for
# a specific target proxy. Other TRANSFER directives are
# left to tmgr output staging. We should use SAGA to
# attempt all staging ops which do not target the client
# machine.
# if tgt.schema == 'srm':
# # FIXME: cache saga handles
# srm_dir = rs.filesystem.Directory('srm://proxy/?SFN=bogus')
# srm_dir.copy(src, tgt)
# srm_dir.close()
# else:
# self._log.error('no transfer for %s -> %s', src, tgt)
# self._prof.prof('staging_out_fail', uid=uid, msg=did)
# raise NotImplementedError('unsupported transfer %s' % tgt)
self._prof.prof('staging_out_stop', uid=uid, msg=did)
# all agent staging is done -- pass on to tmgr output staging
self.advance(task, rps.TMGR_STAGING_OUTPUT_PENDING,
publish=True, push=False)
# ------------------------------------------------------------------------------
|
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import errno
import shutil
import radical.utils as ru
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .base import AgentStagingOutputComponent
from ...staging_directives import complete_url
# ------------------------------------------------------------------------------
#
class Default(AgentStagingOutputComponent):
"""
This component performs all agent side output staging directives for compute
tasks. It gets tasks from the agent_staging_output_queue, in
AGENT_STAGING_OUTPUT_PENDING state, will advance them to
AGENT_STAGING_OUTPUT state while performing the staging, and then moves then
to the TMGR_STAGING_OUTPUT_PENDING state, which at the moment requires the
state change to be published to MongoDB (no push into a queue).
Note that this component also collects stdout/stderr of the tasks (which
can also be considered staging, really).
"""
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentStagingOutputComponent.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def initialize(self):
self._pwd = os.getcwd()
self.register_input(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE, self.work)
# we don't need an output queue -- tasks are picked up via mongodb
self.register_output(rps.TMGR_STAGING_OUTPUT_PENDING, None) # drop
# --------------------------------------------------------------------------
#
def work(self, tasks):
if not isinstance(tasks, list):
tasks = [tasks]
self.advance(tasks, rps.AGENT_STAGING_OUTPUT, publish=True, push=False)
# we first filter out any tasks which don't need any input staging, and
# advance them again as a bulk. We work over the others one by one, and
# advance them individually, to avoid stalling from slow staging ops.
no_staging_tasks = list()
staging_tasks = list()
for task in tasks:
uid = task['uid']
# From here on, any state update will hand control over to the tmgr
# again. The next task update should thus push *all* task details,
# not only state.
task['$all'] = True
task['control'] = 'tmgr_pending'
# we always dig for stdout/stderr
self._handle_task_stdio(task)
# NOTE: all tasks get here after execution, even those which did not
# finish successfully. We do that so that we can make
# stdout/stderr available for failed tasks (see
# _handle_task_stdio above). But we don't need to perform any
# other staging for those tasks, and in fact can make them
# final.
if task['target_state'] != rps.DONE \
and not task['description'].get('stage_on_error'):
task['state'] = task['target_state']
self._log.debug('task %s skips staging: %s', uid, task['state'])
no_staging_tasks.append(task)
continue
# check if we have any staging directives to be enacted in this
# component
actionables = list()
for sd in task['description'].get('output_staging', []):
if sd['action'] in [rpc.LINK, rpc.COPY, rpc.MOVE]:
actionables.append(sd)
if actionables:
# this task needs some staging
staging_tasks.append([task, actionables])
else:
# this task does not need any staging at this point, and can be
# advanced
task['state'] = rps.TMGR_STAGING_OUTPUT_PENDING
no_staging_tasks.append(task)
if no_staging_tasks:
self.advance(no_staging_tasks, publish=True, push=True)
for task,actionables in staging_tasks:
self._handle_task_staging(task, actionables)
# --------------------------------------------------------------------------
#
def _handle_task_stdio(self, task):
sbox = task['task_sandbox_path']
uid = task['uid']
self._prof.prof('staging_stdout_start', uid=uid)
# self._log.debug('out: %s', task.get('stdout_file'))
# TODO: disable this at scale?
if task.get('stdout_file') and os.path.isfile(task['stdout_file']):
with ru.ru_open(task['stdout_file'], 'r') as stdout_f:
try:
txt = ru.as_string(stdout_f.read())
except UnicodeDecodeError:
txt = "task stdout is binary -- use file staging"
task['stdout'] += rpu.tail(txt)
self._prof.prof('staging_stdout_stop', uid=uid)
self._prof.prof('staging_stderr_start', uid=uid)
# TODO: disable this at scale?
if task.get('stderr_file') and os.path.isfile(task['stderr_file']):
with ru.ru_open(task['stderr_file'], 'r') as stderr_f:
try:
txt = ru.as_string(stderr_f.read())
except UnicodeDecodeError:
txt = "task stderr is binary -- use file staging"
task['stderr'] += rpu.tail(txt)
# to help with ID mapping, also parse for PRTE output:
# [batch3:122527] JOB [3673,4] EXECUTING
with ru.ru_open(task['stderr_file'], 'r') as stderr_f:
for line in stderr_f.readlines():
line = line.strip()
if not line:
continue
if line[0] == '[' and line.endswith('EXECUTING'):
elems = line.replace('[', '').replace(']', '').split()
tid = elems[2]
self._log.info('PRTE IDMAP: %s:%s' % (tid, uid))
self._prof.prof('staging_stderr_stop', uid=uid)
self._prof.prof('staging_uprof_start', uid=uid)
task_prof = "%s/%s.prof" % (sbox, uid)
if os.path.isfile(task_prof):
try:
with ru.ru_open(task_prof, 'r') as prof_f:
txt = ru.as_string(prof_f.read())
for line in txt.split("\n"):
if line:
ts, event, comp, tid, _uid, state, msg = \
line.split(',')
self._prof.prof(ts=float(ts), event=event,
comp=comp, tid=tid, uid=_uid,
state=state, msg=msg)
except Exception as e:
self._log.error("Pre/Post profile read failed: `%s`" % e)
self._prof.prof('staging_uprof_stop', uid=uid)
# --------------------------------------------------------------------------
#
def _handle_task_staging(self, task, actionables):
uid = task['uid']
# By definition, this compoentn lives on the pilot's target resource.
# As such, we *know* that all staging ops which would refer to the
# resource now refer to file://localhost, and thus translate the task,
# pilot and resource sandboxes into that scope. Some assumptions are
# made though:
#
# * paths are directly translatable across schemas
# * resource level storage is in fact accessible via file://
#
# FIXME: this is costly and should be cached.
task_sandbox = ru.Url(task['task_sandbox'])
pilot_sandbox = ru.Url(task['pilot_sandbox'])
resource_sandbox = ru.Url(task['resource_sandbox'])
task_sandbox.schema = 'file'
pilot_sandbox.schema = 'file'
resource_sandbox.schema = 'file'
task_sandbox.host = 'localhost'
pilot_sandbox.host = 'localhost'
resource_sandbox.host = 'localhost'
src_context = {'pwd' : str(task_sandbox), # !!!
'task' : str(task_sandbox),
'pilot' : str(pilot_sandbox),
'resource' : str(resource_sandbox)}
tgt_context = {'pwd' : str(task_sandbox), # !!!
'task' : str(task_sandbox),
'pilot' : str(pilot_sandbox),
'resource' : str(resource_sandbox)}
# we can now handle the actionable staging directives
for sd in actionables:
action = sd['action']
flags = sd['flags']
did = sd['uid']
src = sd['source']
tgt = sd['target']
self._prof.prof('staging_out_start', uid=uid, msg=did)
assert(action in [rpc.COPY, rpc.LINK, rpc.MOVE, rpc.TRANSFER]), \
'invalid staging action'
# we only handle staging which does *not* include 'client://' src or
# tgt URLs - those are handled by the tmgr staging components
if src.startswith('client://'):
self._log.debug('skip staging for src %s', src)
self._prof.prof('staging_out_skip', uid=uid, msg=did)
continue
if tgt.startswith('client://'):
self._log.debug('skip staging for tgt %s', tgt)
self._prof.prof('staging_out_skip', uid=uid, msg=did)
continue
# Fix for when the target PATH is empty
# we assume current directory is the task staging 'task://'
# and we assume the file to be copied is the base filename
# of the source
if tgt is None: tgt = ''
if tgt.strip() == '':
tgt = 'task:///{}'.format(os.path.basename(src))
# Fix for when the target PATH is exists *and* it is a folder
# we assume the 'current directory' is the target folder
# and we assume the file to be copied is the base filename
# of the source
elif os.path.exists(tgt.strip()) and os.path.isdir(tgt.strip()):
tgt = os.path.join(tgt, os.path.basename(src))
src = complete_url(src, src_context, self._log)
tgt = complete_url(tgt, tgt_context, self._log)
# Currently, we use the same schema for files and folders.
assert(src.schema == 'file'), 'staging src must be file://'
if action in [rpc.COPY, rpc.LINK, rpc.MOVE]:
assert(tgt.schema == 'file'), 'staging tgt expected as file://'
# SAGA will take care of dir creation - but we do it manually
# for local ops (copy, link, move)
if flags & rpc.CREATE_PARENTS and action != rpc.TRANSFER:
tgtdir = os.path.dirname(tgt.path)
if tgtdir != task_sandbox.path:
self._log.debug("mkdir %s", tgtdir)
ru.rec_makedir(tgtdir)
if action == rpc.COPY:
try:
shutil.copytree(src.path, tgt.path)
except OSError as exc:
if exc.errno == errno.ENOTDIR:
shutil.copy(src.path, tgt.path)
else:
raise
elif action == rpc.LINK:
# Fix issue/1513 if link source is file and target is folder
# should support POSIX standard where link is created
# with the same name as the source
if os.path.isfile(src.path) and os.path.isdir(tgt.path):
os.symlink(src.path,
os.path.join(tgt.path,
os.path.basename(src.path)))
else: # default behavior
os.symlink(src.path, tgt.path)
elif action == rpc.MOVE: shutil.move(src.path, tgt.path)
elif action == rpc.TRANSFER: pass
# This is currently never executed. Commenting it out.
# Uncomment and implement when uploads directly to remote URLs
# from tasks are supported.
# FIXME: we only handle srm staging right now, and only for
# a specific target proxy. Other TRANSFER directives are
# left to tmgr output staging. We should use SAGA to
# attempt all staging ops which do not target the client
# machine.
# if tgt.schema == 'srm':
# # FIXME: cache saga handles
# srm_dir = rs.filesystem.Directory('srm://proxy/?SFN=bogus')
# srm_dir.copy(src, tgt)
# srm_dir.close()
# else:
# self._log.error('no transfer for %s -> %s', src, tgt)
# self._prof.prof('staging_out_fail', uid=uid, msg=did)
# raise NotImplementedError('unsupported transfer %s' % tgt)
self._prof.prof('staging_out_stop', uid=uid, msg=did)
# all agent staging is done -- pass on to tmgr output staging
self.advance(task, rps.TMGR_STAGING_OUTPUT_PENDING,
publish=True, push=False)
# ------------------------------------------------------------------------------
| en | 000226546_radical-cybertools-radical.pilot_default_dc7c3d841674.py | unknown | 3,538 |
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing utility function for CLIF."""
# CLIF postprocessor for a C++ function with signature:
# bool MyFunc(input_arg1, ..., *output_arg1, *output_arg2, ..., *error)
#
# If MyFunc returns True, returns (output_arg1, output_arg2, ...)
# If MyFunc returns False, raises ValueError(error).
def ValueErrorOnFalse(ok, *output_args):
"""Raises ValueError if not ok, otherwise returns the output arguments."""
n_outputs = len(output_args)
if n_outputs < 2:
raise ValueError("Expected 2 or more output_args. Got: %d" % n_outputs)
if not ok:
error = output_args[-1]
raise ValueError(error)
if n_outputs == 2:
output = output_args[0]
else:
output = output_args[0:-1]
return output
# CLIF postprocessor for a C++ function with signature:
# *result MyFactory(input_arg1, ..., *error)
#
# If result is not null, returns result.
# If result is null, raises ValueError(error).
def ValueErrorOnNull(result, error):
"""Raises ValueError(error) if result is None, otherwise returns result."""
if result is None:
raise ValueError(error)
return result
| # Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing utility function for CLIF."""
# CLIF postprocessor for a C++ function with signature:
# bool MyFunc(input_arg1, ..., *output_arg1, *output_arg2, ..., *error)
#
# If MyFunc returns True, returns (output_arg1, output_arg2, ...)
# If MyFunc returns False, raises ValueError(error).
def ValueErrorOnFalse(ok, *output_args):
"""Raises ValueError if not ok, otherwise returns the output arguments."""
n_outputs = len(output_args)
if n_outputs < 2:
raise ValueError("Expected 2 or more output_args. Got: %d" % n_outputs)
if not ok:
error = output_args[-1]
raise ValueError(error)
if n_outputs == 2:
output = output_args[0]
else:
output = output_args[0:-1]
return output
# CLIF postprocessor for a C++ function with signature:
# *result MyFactory(input_arg1, ..., *error)
#
# If result is not null, returns result.
# If result is null, raises ValueError(error).
def ValueErrorOnNull(result, error):
"""Raises ValueError(error) if result is None, otherwise returns result."""
if result is None:
raise ValueError(error)
return result
| en | 000136053_xychu-models_postproc_92b06d409291.py | unknown | 494 |
from django.core.management.base import BaseCommand
from zoo.repos import github, gitlab, zoo_yml
from zoo.services.models import Service
class Command(BaseCommand):
help = "generate .zoo.yml file for all services in the database that do not have it"
ZOO_YML = ".zoo.yml"
ZOO_COMMIT_MSG = "feat(zoo): generate .zoo.yml file"
def handle(self, *args, **options):
for service in Service.objects.all():
remote_id, provider = (
service.repository.remote_id,
self.get_provider(service.repository.provider),
)
if not provider:
continue
if self.file_exists(remote_id, Command.ZOO_YML, provider):
continue
yml = zoo_yml.generate(service)
actions = [
{"action": "create", "content": yml, "file_path": Command.ZOO_YML}
]
branch = "master"
provider.create_remote_commit(
remote_id, Command.ZOO_COMMIT_MSG, actions, branch, provider
)
def get_provider(self, provider):
providers = {
"github": github,
"gitlab": gitlab,
}
return providers[provider]
def file_exists(self, remote_id, path, provider, ref="master"):
try:
content = provider.get_file_content(remote_id, path, ref)
return bool(content)
except FileNotFoundError:
return False
| from django.core.management.base import BaseCommand
from zoo.repos import github, gitlab, zoo_yml
from zoo.services.models import Service
class Command(BaseCommand):
help = "generate .zoo.yml file for all services in the database that do not have it"
ZOO_YML = ".zoo.yml"
ZOO_COMMIT_MSG = "feat(zoo): generate .zoo.yml file"
def handle(self, *args, **options):
for service in Service.objects.all():
remote_id, provider = (
service.repository.remote_id,
self.get_provider(service.repository.provider),
)
if not provider:
continue
if self.file_exists(remote_id, Command.ZOO_YML, provider):
continue
yml = zoo_yml.generate(service)
actions = [
{"action": "create", "content": yml, "file_path": Command.ZOO_YML}
]
branch = "master"
provider.create_remote_commit(
remote_id, Command.ZOO_COMMIT_MSG, actions, branch, provider
)
def get_provider(self, provider):
providers = {
"github": github,
"gitlab": gitlab,
}
return providers[provider]
def file_exists(self, remote_id, path, provider, ref="master"):
try:
content = provider.get_file_content(remote_id, path, ref)
return bool(content)
except FileNotFoundError:
return False
| en | 000649909_aexvir-the-zoo_generatezooyml_96b45da817a9.py | unknown | 394 |
import taso as ts
import sys
seq_length = 512
hidden_dims = 768
batch_size = int(sys.argv[1])
def attention(graph, input, heads):
embed = input.dim(1) # embedding len
assert input.dim(1) % heads == 0
weights = list()
for i in range(3):
weights.append(graph.new_weight(dims=(embed, embed)))
# compute query, key, value tensors
q = graph.matmul(input, weights[0])
k = graph.matmul(input, weights[1])
v = graph.matmul(input, weights[2])
# reshape query, key, value to multiple heads
q = graph.reshape(q, shape=(batch_size, 512, 12, 64))
k = graph.reshape(k, shape=(batch_size, 512, 12, 64))
v = graph.reshape(v, shape=(batch_size, 512, 12, 64))
# transpose query, key, value for batched matmul
q = graph.transpose(q, perm=(0, 2, 1, 3), shuffle=True)
k = graph.transpose(k, perm=(0, 2, 3, 1), shuffle=True)
v = graph.transpose(v, perm=(0, 2, 1, 3), shuffle=True)
# perform matrix multiplications
logits = graph.matmul(q, k)
output = graph.matmul(logits, v)
# transpose the output back
output = graph.transpose(output, perm=(0, 2, 1, 3), shuffle=True)
output = graph.reshape(output, shape=(batch_size, 512, 768))
# a final linear layer
linear = graph.new_weight(dims=(batch_size, embed, embed))
linear2 = graph.new_weight(dims=(batch_size, embed, embed))
output = graph.matmul(output, linear)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.reshape(output, shape=(batch_size, 512, 768))
output = graph.matmul(output, linear2)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.add(output, input)
output = graph.reshape(output, shape=(batch_size * 512, 768))
# output = graph.new_weight(dims=(seq_length, embed))
return output
graph = ts.new_graph()
input = graph.new_input(dims=(batch_size * seq_length, hidden_dims))
input = graph.relu(input)
t = input
for i in range(12):
t = attention(graph, t, 16)
new_graph = ts.optimize(graph, alpha=1.0, budget=100)
print(graph.run_time())
print(new_graph.run_time()) | import taso as ts
import sys
seq_length = 512
hidden_dims = 768
batch_size = int(sys.argv[1])
def attention(graph, input, heads):
embed = input.dim(1) # embedding len
assert input.dim(1) % heads == 0
weights = list()
for i in range(3):
weights.append(graph.new_weight(dims=(embed, embed)))
# compute query, key, value tensors
q = graph.matmul(input, weights[0])
k = graph.matmul(input, weights[1])
v = graph.matmul(input, weights[2])
# reshape query, key, value to multiple heads
q = graph.reshape(q, shape=(batch_size, 512, 12, 64))
k = graph.reshape(k, shape=(batch_size, 512, 12, 64))
v = graph.reshape(v, shape=(batch_size, 512, 12, 64))
# transpose query, key, value for batched matmul
q = graph.transpose(q, perm=(0, 2, 1, 3), shuffle=True)
k = graph.transpose(k, perm=(0, 2, 3, 1), shuffle=True)
v = graph.transpose(v, perm=(0, 2, 1, 3), shuffle=True)
# perform matrix multiplications
logits = graph.matmul(q, k)
output = graph.matmul(logits, v)
# transpose the output back
output = graph.transpose(output, perm=(0, 2, 1, 3), shuffle=True)
output = graph.reshape(output, shape=(batch_size, 512, 768))
# a final linear layer
linear = graph.new_weight(dims=(batch_size, embed, embed))
linear2 = graph.new_weight(dims=(batch_size, embed, embed))
output = graph.matmul(output, linear)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.reshape(output, shape=(batch_size, 512, 768))
output = graph.matmul(output, linear2)
output = graph.relu(graph.reshape(output, shape=(batch_size * 512, 768)))
output = graph.add(output, input)
output = graph.reshape(output, shape=(batch_size * 512, 768))
# output = graph.new_weight(dims=(seq_length, embed))
return output
graph = ts.new_graph()
input = graph.new_input(dims=(batch_size * seq_length, hidden_dims))
input = graph.relu(input)
t = input
for i in range(12):
t = attention(graph, t, 16)
new_graph = ts.optimize(graph, alpha=1.0, budget=100)
print(graph.run_time())
print(new_graph.run_time()) | en | 000477241_hgl71964-PET_bert_6b9387c9d2c7.py | unknown | 814 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
## Luong et al ##
class Attention(nn.Module):
def __init__(self, dim, transform=0):
super(Attention, self).__init__()
if transform != 0:
self.transform = True
self.linear_in = nn.Linear(dim, transform)
self.linear_out = nn.Linear(transform*2, transform)
else:
self.transform = False
self.linear_out = nn.Linear(dim*2, dim)
# self.U = nn.Linear(dim,dim)
def forward(self, output, context):
# output: decoder hidden state
# context: encoder outputs
if self.transform: output = self.linear_in(output)
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
# context = self.U(context)
attn = torch.bmm(output, context.transpose(1, 2))
attn = F.softmax(attn.view(-1, input_size),dim=1).view(batch_size, -1, input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(batch_size, -1, hidden_size)
return output, attn
## Dot ##
class Attention1(nn.Module):
def __init__(self, dim):
super(Attention1, self).__init__()
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
attn = torch.bmm(decoder_hidden, encoder_outputs.transpose(1, 2))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
## General ##
class Attention2(nn.Module):
def __init__(self, dim):
super(Attention2, self).__init__()
self.U = nn.Linear(dim,dim)
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
encoder_outputs = self.U(encoder_outputs)
attn = torch.bmm(decoder_hidden, encoder_outputs.transpose(1, 2))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
## Concatenate ##
class Attention3(nn.Module):
def __init__(self, dim):
super(Attention3, self).__init__()
self.W = nn.Linear(dim,dim)
self.U = nn.Linear(dim,dim)
self.v = nn.Linear(dim,1)
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
encoder_length = encoder_outputs.size(1)
attn = self.v(F.tanh(self.W(decoder_hidden) + self.U(encoder_outputs)))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
| import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
## Luong et al ##
class Attention(nn.Module):
def __init__(self, dim, transform=0):
super(Attention, self).__init__()
if transform != 0:
self.transform = True
self.linear_in = nn.Linear(dim, transform)
self.linear_out = nn.Linear(transform*2, transform)
else:
self.transform = False
self.linear_out = nn.Linear(dim*2, dim)
# self.U = nn.Linear(dim,dim)
def forward(self, output, context):
# output: decoder hidden state
# context: encoder outputs
if self.transform: output = self.linear_in(output)
batch_size = output.size(0)
hidden_size = output.size(2)
input_size = context.size(1)
# context = self.U(context)
attn = torch.bmm(output, context.transpose(1, 2))
attn = F.softmax(attn.view(-1, input_size),dim=1).view(batch_size, -1, input_size)
mix = torch.bmm(attn, context)
combined = torch.cat((mix, output), dim=2)
output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(batch_size, -1, hidden_size)
return output, attn
## Dot ##
class Attention1(nn.Module):
def __init__(self, dim):
super(Attention1, self).__init__()
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
attn = torch.bmm(decoder_hidden, encoder_outputs.transpose(1, 2))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
## General ##
class Attention2(nn.Module):
def __init__(self, dim):
super(Attention2, self).__init__()
self.U = nn.Linear(dim,dim)
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
encoder_outputs = self.U(encoder_outputs)
attn = torch.bmm(decoder_hidden, encoder_outputs.transpose(1, 2))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
## Concatenate ##
class Attention3(nn.Module):
def __init__(self, dim):
super(Attention3, self).__init__()
self.W = nn.Linear(dim,dim)
self.U = nn.Linear(dim,dim)
self.v = nn.Linear(dim,1)
def forward(self, decoder_hidden, encoder_outputs):
batch_size = decoder_hidden.size(0)
encoder_length = encoder_outputs.size(1)
attn = self.v(F.tanh(self.W(decoder_hidden) + self.U(encoder_outputs)))
attn = F.softmax(attn.view(-1, encoder_outputs.size(1)),dim=1).view(batch_size, -1, encoder_outputs.size(1))
context = torch.bmm(attn, encoder_outputs)
return context, attn
| en | 000442055_leonardocunha2107-LaMP_Attention_6b36133eb1f8.py | unknown | 962 |
# -*- coding: utf-8 -*-
import base64
from PyQt5.QtWebEngineWidgets import (
QWebEngineView,
QWebEngineProfile,
QWebEngineSettings
)
from PyQt5.QtCore import (
QObject, QSize, Qt, QTimer, pyqtSlot, QEvent,
QPointF, QPoint, pyqtSignal, QUrl,
QSizeF,
)
from twisted.internet import defer
from splash import defaults
from splash.browser_tab import (
BrowserTab,
skip_if_closing,
webpage_option_setter,
webpage_option_getter
)
from splash.qtutils import WrappedSignal, parse_size
from splash.errors import RenderErrorInfo
from splash.render_options import validate_size_str
from .webpage import ChromiumWebPage
from .constants import RenderProcessTerminationStatus
from .screenshot import QtChromiumScreenshotRenderer
class ChromiumBrowserTab(BrowserTab):
def __init__(self, render_options, verbosity):
super().__init__(render_options, verbosity)
self.profile = QWebEngineProfile() # don't share cookies
self.web_page = ChromiumWebPage(self.profile)
self.web_view = QWebEngineView()
self.web_view.setPage(self.web_page)
self.web_view.setAttribute(Qt.WA_DeleteOnClose, True)
# TODO: is it ok? :)
# self.web_view.setAttribute(Qt.WA_DontShowOnScreen, True)
# FIXME: required for screenshots?
# Also, without .show() in JS window.innerWidth/innerHeight are zeros
self.web_view.show()
self._setup_webpage_events()
self._set_default_webpage_options()
self._html_d = None
# ensure that default window size is not 640x480.
self.set_viewport(defaults.VIEWPORT_SIZE)
def _setup_webpage_events(self):
self._load_finished = WrappedSignal(self.web_view.loadFinished)
self._render_terminated = WrappedSignal(self.web_view.renderProcessTerminated)
self.web_view.renderProcessTerminated.connect(self._on_render_terminated)
self.web_view.loadFinished.connect(self._on_load_finished)
# main_frame.urlChanged.connect(self._on_url_changed)
# main_frame.javaScriptWindowObjectCleared.connect(
# self._on_javascript_window_object_cleared)
# self.logger.add_web_page(self.web_page)
def _set_default_webpage_options(self):
""" Set QWebPage options. TODO: allow to customize defaults. """
settings = self.web_page.settings()
settings.setAttribute(QWebEngineSettings.ScreenCaptureEnabled, True)
settings.setAttribute(QWebEngineSettings.JavascriptCanOpenWindows, False)
settings.setAttribute(QWebEngineSettings.LocalContentCanAccessRemoteUrls, True)
settings.setAttribute(QWebEngineSettings.ShowScrollBars, False)
# TODO
# if self.visible:
# settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
# TODO: options
# self.set_js_enabled(True)
# self.set_plugins_enabled(defaults.PLUGINS_ENABLED)
# self.set_request_body_enabled(defaults.REQUEST_BODY_ENABLED)
# self.set_response_body_enabled(defaults.RESPONSE_BODY_ENABLED)
# self.set_indexeddb_enabled(defaults.INDEXEDDB_ENABLED)
# self.set_webgl_enabled(defaults.WEBGL_ENABLED)
# self.set_html5_media_enabled(defaults.HTML5_MEDIA_ENABLED)
# self.set_media_source_enabled(defaults.MEDIA_SOURCE_ENABLED)
def go(self, url, callback, errback):
callback_id = self._load_finished.connect(
self._on_content_ready,
callback=callback,
errback=errback,
)
self.logger.log("callback %s is connected to loadFinished" % callback_id, min_level=3)
self.web_view.load(QUrl(url))
@skip_if_closing
def _on_content_ready(self, ok, callback, errback, callback_id):
"""
This method is called when a QWebEnginePage finishes loading.
"""
self.logger.log("loadFinished: disconnecting callback %s" % callback_id,
min_level=3)
self._load_finished.disconnect(callback_id)
if ok:
callback()
else:
error_info = RenderErrorInfo(
type='Unknown',
code=0,
text="loadFinished ok=False",
url=self.web_view.url().toString()
)
errback(error_info)
def _on_load_finished(self, ok):
self.logger.log("loadFinished, ok=%s" % ok, min_level=2)
def _on_render_terminated(self, status, code):
status_details = RenderProcessTerminationStatus.get(status, 'unknown')
self.logger.log("renderProcessTerminated: %s (%s), exit_code=%s" % (
status, status_details, code), min_level=1)
def html(self):
""" Return HTML of the current main frame """
self.logger.log("getting HTML", min_level=2)
if self._html_d is not None:
self.logger.log("HTML is already requested", min_level=1)
return self._html_d
self._html_d = defer.Deferred()
self.web_view.page().toHtml(self._on_html_ready)
return self._html_d
def _on_html_ready(self, html):
self.logger.log("HTML ready", min_level=2)
self._html_d.callback(html)
self._html_d = None
def png(self, width=None, height=None, b64=False, render_all=False,
scale_method=None, region=None):
""" Return screenshot in PNG format """
# FIXME: move to base class
self.logger.log(
"Getting PNG: width=%s, height=%s, "
"render_all=%s, scale_method=%s, region=%s" %
(width, height, render_all, scale_method, region), min_level=2)
if render_all:
raise ValueError("render_all=True is not supported yet")
image = self._get_image('PNG', width, height, render_all,
scale_method, region=region)
result = image.to_png()
if b64:
result = base64.b64encode(result).decode('utf-8')
# self.store_har_timing("_onPngRendered")
return result
def jpeg(self, width=None, height=None, b64=False, render_all=False,
scale_method=None, quality=None, region=None):
""" Return screenshot in JPEG format. """
# FIXME: move to base class
self.logger.log(
"Getting JPEG: width=%s, height=%s, "
"render_all=%s, scale_method=%s, quality=%s, region=%s" %
(width, height, render_all, scale_method, quality, region),
min_level=2)
if render_all:
raise ValueError("render_all=True is not supported yet")
image = self._get_image('JPEG', width, height, render_all,
scale_method, region=region)
result = image.to_jpeg(quality=quality)
if b64:
result = base64.b64encode(result).decode('utf-8')
# self.store_har_timing("_onJpegRendered")
return result
def _get_image(self, image_format, width, height, render_all,
scale_method, region):
renderer = QtChromiumScreenshotRenderer(
self.web_page, self.logger, image_format,
width=width, height=height, scale_method=scale_method,
region=region)
return renderer.render_qwebpage()
def set_viewport(self, size, raise_if_empty=False):
"""
Set viewport size.
If size is "full" viewport size is detected automatically.
If can also be "<width>x<height>".
FIXME: Currently the implementation just resizes the window, which
causes Splash to crash on large sizes(?).
Actully it is not changing the viewport.
XXX: As an effect, this function changes window.outerWidth/outerHeight,
while in Webkit implementation window.innerWidth/innerHeight
is changed.
"""
if size == 'full':
size = self.web_page.contentsSize()
self.logger.log("Contents size: %s" % size, min_level=2)
if size.isEmpty():
if raise_if_empty:
raise RuntimeError("Cannot detect viewport size")
else:
size = defaults.VIEWPORT_SIZE
self.logger.log("Viewport is empty, falling back to: %s" %
size)
if not isinstance(size, (QSize, QSizeF)):
validate_size_str(size)
size = parse_size(size)
w, h = int(size.width()), int(size.height())
# XXX: it was crashing with large windows, but then the problem
# seemed to go away. Need to keep an eye on it.
# # FIXME: don't resize the window?
# # FIXME: figure out exact limits
# MAX_WIDTH = 1280
# MAX_HEIGHT = 1920
#
# if w > MAX_WIDTH:
# raise RuntimeError("Width {} > {} is currently prohibited".format(
# w, MAX_WIDTH
# ))
#
# if h > MAX_HEIGHT:
# raise RuntimeError("Height {} > {} is currently prohibited".format(
# h, MAX_HEIGHT
# ))
self.web_view.resize(w, h)
# self._force_relayout()
self.logger.log("viewport size is set to %sx%s" % (w, h), min_level=2)
self.logger.log("real viewport size: %s" % self.web_view.size(), min_level=2)
return w, h
def stop_loading(self):
self.logger.log("stop_loading", min_level=2)
self.web_view.stop()
@skip_if_closing
def close(self):
""" Destroy this tab """
super().close()
self.web_view.stop()
self.web_view.close()
self.web_page.deleteLater()
self.web_view.deleteLater()
# TODO
# self._cancel_all_timers()
| # -*- coding: utf-8 -*-
import base64
from PyQt5.QtWebEngineWidgets import (
QWebEngineView,
QWebEngineProfile,
QWebEngineSettings
)
from PyQt5.QtCore import (
QObject, QSize, Qt, QTimer, pyqtSlot, QEvent,
QPointF, QPoint, pyqtSignal, QUrl,
QSizeF,
)
from twisted.internet import defer
from splash import defaults
from splash.browser_tab import (
BrowserTab,
skip_if_closing,
webpage_option_setter,
webpage_option_getter
)
from splash.qtutils import WrappedSignal, parse_size
from splash.errors import RenderErrorInfo
from splash.render_options import validate_size_str
from .webpage import ChromiumWebPage
from .constants import RenderProcessTerminationStatus
from .screenshot import QtChromiumScreenshotRenderer
class ChromiumBrowserTab(BrowserTab):
def __init__(self, render_options, verbosity):
super().__init__(render_options, verbosity)
self.profile = QWebEngineProfile() # don't share cookies
self.web_page = ChromiumWebPage(self.profile)
self.web_view = QWebEngineView()
self.web_view.setPage(self.web_page)
self.web_view.setAttribute(Qt.WA_DeleteOnClose, True)
# TODO: is it ok? :)
# self.web_view.setAttribute(Qt.WA_DontShowOnScreen, True)
# FIXME: required for screenshots?
# Also, without .show() in JS window.innerWidth/innerHeight are zeros
self.web_view.show()
self._setup_webpage_events()
self._set_default_webpage_options()
self._html_d = None
# ensure that default window size is not 640x480.
self.set_viewport(defaults.VIEWPORT_SIZE)
def _setup_webpage_events(self):
self._load_finished = WrappedSignal(self.web_view.loadFinished)
self._render_terminated = WrappedSignal(self.web_view.renderProcessTerminated)
self.web_view.renderProcessTerminated.connect(self._on_render_terminated)
self.web_view.loadFinished.connect(self._on_load_finished)
# main_frame.urlChanged.connect(self._on_url_changed)
# main_frame.javaScriptWindowObjectCleared.connect(
# self._on_javascript_window_object_cleared)
# self.logger.add_web_page(self.web_page)
def _set_default_webpage_options(self):
""" Set QWebPage options. TODO: allow to customize defaults. """
settings = self.web_page.settings()
settings.setAttribute(QWebEngineSettings.ScreenCaptureEnabled, True)
settings.setAttribute(QWebEngineSettings.JavascriptCanOpenWindows, False)
settings.setAttribute(QWebEngineSettings.LocalContentCanAccessRemoteUrls, True)
settings.setAttribute(QWebEngineSettings.ShowScrollBars, False)
# TODO
# if self.visible:
# settings.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
# TODO: options
# self.set_js_enabled(True)
# self.set_plugins_enabled(defaults.PLUGINS_ENABLED)
# self.set_request_body_enabled(defaults.REQUEST_BODY_ENABLED)
# self.set_response_body_enabled(defaults.RESPONSE_BODY_ENABLED)
# self.set_indexeddb_enabled(defaults.INDEXEDDB_ENABLED)
# self.set_webgl_enabled(defaults.WEBGL_ENABLED)
# self.set_html5_media_enabled(defaults.HTML5_MEDIA_ENABLED)
# self.set_media_source_enabled(defaults.MEDIA_SOURCE_ENABLED)
def go(self, url, callback, errback):
callback_id = self._load_finished.connect(
self._on_content_ready,
callback=callback,
errback=errback,
)
self.logger.log("callback %s is connected to loadFinished" % callback_id, min_level=3)
self.web_view.load(QUrl(url))
@skip_if_closing
def _on_content_ready(self, ok, callback, errback, callback_id):
"""
This method is called when a QWebEnginePage finishes loading.
"""
self.logger.log("loadFinished: disconnecting callback %s" % callback_id,
min_level=3)
self._load_finished.disconnect(callback_id)
if ok:
callback()
else:
error_info = RenderErrorInfo(
type='Unknown',
code=0,
text="loadFinished ok=False",
url=self.web_view.url().toString()
)
errback(error_info)
def _on_load_finished(self, ok):
self.logger.log("loadFinished, ok=%s" % ok, min_level=2)
def _on_render_terminated(self, status, code):
status_details = RenderProcessTerminationStatus.get(status, 'unknown')
self.logger.log("renderProcessTerminated: %s (%s), exit_code=%s" % (
status, status_details, code), min_level=1)
def html(self):
""" Return HTML of the current main frame """
self.logger.log("getting HTML", min_level=2)
if self._html_d is not None:
self.logger.log("HTML is already requested", min_level=1)
return self._html_d
self._html_d = defer.Deferred()
self.web_view.page().toHtml(self._on_html_ready)
return self._html_d
def _on_html_ready(self, html):
self.logger.log("HTML ready", min_level=2)
self._html_d.callback(html)
self._html_d = None
def png(self, width=None, height=None, b64=False, render_all=False,
scale_method=None, region=None):
""" Return screenshot in PNG format """
# FIXME: move to base class
self.logger.log(
"Getting PNG: width=%s, height=%s, "
"render_all=%s, scale_method=%s, region=%s" %
(width, height, render_all, scale_method, region), min_level=2)
if render_all:
raise ValueError("render_all=True is not supported yet")
image = self._get_image('PNG', width, height, render_all,
scale_method, region=region)
result = image.to_png()
if b64:
result = base64.b64encode(result).decode('utf-8')
# self.store_har_timing("_onPngRendered")
return result
def jpeg(self, width=None, height=None, b64=False, render_all=False,
scale_method=None, quality=None, region=None):
""" Return screenshot in JPEG format. """
# FIXME: move to base class
self.logger.log(
"Getting JPEG: width=%s, height=%s, "
"render_all=%s, scale_method=%s, quality=%s, region=%s" %
(width, height, render_all, scale_method, quality, region),
min_level=2)
if render_all:
raise ValueError("render_all=True is not supported yet")
image = self._get_image('JPEG', width, height, render_all,
scale_method, region=region)
result = image.to_jpeg(quality=quality)
if b64:
result = base64.b64encode(result).decode('utf-8')
# self.store_har_timing("_onJpegRendered")
return result
def _get_image(self, image_format, width, height, render_all,
scale_method, region):
renderer = QtChromiumScreenshotRenderer(
self.web_page, self.logger, image_format,
width=width, height=height, scale_method=scale_method,
region=region)
return renderer.render_qwebpage()
def set_viewport(self, size, raise_if_empty=False):
"""
Set viewport size.
If size is "full" viewport size is detected automatically.
If can also be "<width>x<height>".
FIXME: Currently the implementation just resizes the window, which
causes Splash to crash on large sizes(?).
Actully it is not changing the viewport.
XXX: As an effect, this function changes window.outerWidth/outerHeight,
while in Webkit implementation window.innerWidth/innerHeight
is changed.
"""
if size == 'full':
size = self.web_page.contentsSize()
self.logger.log("Contents size: %s" % size, min_level=2)
if size.isEmpty():
if raise_if_empty:
raise RuntimeError("Cannot detect viewport size")
else:
size = defaults.VIEWPORT_SIZE
self.logger.log("Viewport is empty, falling back to: %s" %
size)
if not isinstance(size, (QSize, QSizeF)):
validate_size_str(size)
size = parse_size(size)
w, h = int(size.width()), int(size.height())
# XXX: it was crashing with large windows, but then the problem
# seemed to go away. Need to keep an eye on it.
# # FIXME: don't resize the window?
# # FIXME: figure out exact limits
# MAX_WIDTH = 1280
# MAX_HEIGHT = 1920
#
# if w > MAX_WIDTH:
# raise RuntimeError("Width {} > {} is currently prohibited".format(
# w, MAX_WIDTH
# ))
#
# if h > MAX_HEIGHT:
# raise RuntimeError("Height {} > {} is currently prohibited".format(
# h, MAX_HEIGHT
# ))
self.web_view.resize(w, h)
# self._force_relayout()
self.logger.log("viewport size is set to %sx%s" % (w, h), min_level=2)
self.logger.log("real viewport size: %s" % self.web_view.size(), min_level=2)
return w, h
def stop_loading(self):
self.logger.log("stop_loading", min_level=2)
self.web_view.stop()
@skip_if_closing
def close(self):
""" Destroy this tab """
super().close()
self.web_view.stop()
self.web_view.close()
self.web_page.deleteLater()
self.web_view.deleteLater()
# TODO
# self._cancel_all_timers()
| en | 000626062_Germey-splash_browser_tab_1f52dca21ec7.py | unknown | 2,848 |
from io import BytesIO
import os
from PIL import Image
from torch.utils.data import Dataset
class FFHQ_Dataset(Dataset):
'''
Usage:
Self-coded class for loading the FFHQ data
'''
def __init__(self, image_folder, transform = None):
images_list = os.listdir(image_folder)
self.images_list = sorted([os.path.join(image_folder, image) for image in images_list])
self.transform = transform
def __getitem__(self, index):
img_id = self.images_list[index]
img = Image.open(img_id).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.images_list)
| from io import BytesIO
import os
from PIL import Image
from torch.utils.data import Dataset
class FFHQ_Dataset(Dataset):
'''
Usage:
Self-coded class for loading the FFHQ data
'''
def __init__(self, image_folder, transform = None):
images_list = os.listdir(image_folder)
self.images_list = sorted([os.path.join(image_folder, image) for image in images_list])
self.transform = transform
def __getitem__(self, index):
img_id = self.images_list[index]
img = Image.open(img_id).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.images_list)
| en | 000767958_lychenyoko-content-aware-gan-compression_dataset_42e2bc3e2e88.py | unknown | 208 |
"""empty message
Revision ID: 0099_tfl_dar
Revises: 0098_service_inbound_api
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0099_tfl_dar'
down_revision = '0098_service_inbound_api'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
'tfl'
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
| """empty message
Revision ID: 0099_tfl_dar
Revises: 0098_service_inbound_api
Create Date: 2017-06-05 16:15:17.744908
"""
# revision identifiers, used by Alembic.
revision = '0099_tfl_dar'
down_revision = '0098_service_inbound_api'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
TFL_DAR_ID = '1d70f564-919b-4c68-8bdf-b8520d92516e'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'tfl_dar_x2.png',
'tfl'
)""".format(TFL_DAR_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(TFL_DAR_ID))
| en | 000548287_tlwr-notifications-api_0099_tfl_dar_5e7e282878e2.py | unknown | 284 |
"""
Main model: Balance
"""
from typing import List, Optional
from pydantic import Field
from aioqiwi.types import BaseModel
class AccountBalance(BaseModel):
"""Object: balance"""
amount: float = Field(..., alias="amount")
currency: int = Field(..., alias="currency")
class Type(BaseModel):
"""Object: type"""
id: str = Field(..., alias="id")
title: str = Field(..., alias="title")
class Accounts(BaseModel):
"""Object: accounts"""
alias: str = Field(..., alias="alias")
fs_alias: str = Field(..., alias="fsAlias")
title: str = Field(..., alias="title")
has_balance: bool = Field(..., alias="hasBalance")
currency: int = Field(..., alias="currency")
type: Type = Field(..., alias="type")
balance: Optional[AccountBalance] = None
class Balance(BaseModel):
"""Object: Balance"""
accounts: List[Accounts] = Field(..., alias="accounts")
| """
Main model: Balance
"""
from typing import List, Optional
from pydantic import Field
from aioqiwi.types import BaseModel
class AccountBalance(BaseModel):
"""Object: balance"""
amount: float = Field(..., alias="amount")
currency: int = Field(..., alias="currency")
class Type(BaseModel):
"""Object: type"""
id: str = Field(..., alias="id")
title: str = Field(..., alias="title")
class Accounts(BaseModel):
"""Object: accounts"""
alias: str = Field(..., alias="alias")
fs_alias: str = Field(..., alias="fsAlias")
title: str = Field(..., alias="title")
has_balance: bool = Field(..., alias="hasBalance")
currency: int = Field(..., alias="currency")
type: Type = Field(..., alias="type")
balance: Optional[AccountBalance] = None
class Balance(BaseModel):
"""Object: Balance"""
accounts: List[Accounts] = Field(..., alias="accounts")
| en | 000441127_Derad6709-aioqiwi_balance_347fee1b0411.py | unknown | 261 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
import time
class BarcodeOps(Model):
_name = "barcode.ops"
_transient = True
_fields = {
"production_id": fields.Many2One("production.order", "Production Order", condition=[["state", "=", "in_progress"]]),
"workcenter_id": fields.Many2One("workcenter", "Workcenter"),
}
def start(self, ids, context={}):
obj = self.browse(ids)[0]
order = obj.production_id
found = False
for op in order.operations:
if op.workcenter_id.id == obj.workcenter_id.id:
found = True
if op.time_start:
raise Exception("Start time already recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
op.write({"time_start": time.strftime("%Y-%m-%d %H:%M:%S")})
break
if not found:
raise Exception("Workcenter %s not found in production order %s" % (obj.workcenter_id.name, order.number))
obj.write({
"production_id": None,
"workcenter_id": None,
})
return {
"flash": "Operation start time recorded successfully",
"focus_field": "production_id",
}
def stop(self, ids, context={}):
obj = self.browse(ids)[0]
order = obj.production_id
found = False
for op in order.operations:
if op.workcenter_id.id == obj.workcenter_id.id:
found = True
if not op.time_start:
raise Exception("Start time not yet recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
if op.time_stop:
raise Exception("Stop time already recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
op.write({"time_stop": time.strftime("%Y-%m-%d %H:%M:%S")})
break
if not found:
raise Exception("Workcenter %s not found in production order %s" % (obj.workcenter_id.code, order.number))
obj.write({
"production_id": None,
"workcenter_id": None,
})
return {
"flash": "Operation stop time recorded successfully",
"focus_field": "production_id",
}
BarcodeOps.register()
| # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
import time
class BarcodeOps(Model):
_name = "barcode.ops"
_transient = True
_fields = {
"production_id": fields.Many2One("production.order", "Production Order", condition=[["state", "=", "in_progress"]]),
"workcenter_id": fields.Many2One("workcenter", "Workcenter"),
}
def start(self, ids, context={}):
obj = self.browse(ids)[0]
order = obj.production_id
found = False
for op in order.operations:
if op.workcenter_id.id == obj.workcenter_id.id:
found = True
if op.time_start:
raise Exception("Start time already recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
op.write({"time_start": time.strftime("%Y-%m-%d %H:%M:%S")})
break
if not found:
raise Exception("Workcenter %s not found in production order %s" % (obj.workcenter_id.name, order.number))
obj.write({
"production_id": None,
"workcenter_id": None,
})
return {
"flash": "Operation start time recorded successfully",
"focus_field": "production_id",
}
def stop(self, ids, context={}):
obj = self.browse(ids)[0]
order = obj.production_id
found = False
for op in order.operations:
if op.workcenter_id.id == obj.workcenter_id.id:
found = True
if not op.time_start:
raise Exception("Start time not yet recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
if op.time_stop:
raise Exception("Stop time already recorded for workcenter %s in production order %s" %
(obj.workcenter_id.code, order.number))
op.write({"time_stop": time.strftime("%Y-%m-%d %H:%M:%S")})
break
if not found:
raise Exception("Workcenter %s not found in production order %s" % (obj.workcenter_id.code, order.number))
obj.write({
"production_id": None,
"workcenter_id": None,
})
return {
"flash": "Operation stop time recorded successfully",
"focus_field": "production_id",
}
BarcodeOps.register()
| en | 000122933_nfco-netforce_barcode_ops_39a46dc709bd.py | unknown | 950 |
from datetime import datetime, timezone
import os
from scipy.misc import imsave
import numpy as np
def pre_process_image_tensor(images):
if images.dtype != np.float32:
images = images.astype(np.float32) / 255.
if images.shape[-1] == 3:
images = np.rollaxis(images, 3, 1)
return images
def post_process_image_tensor(images):
if images.dtype != np.uint8:
images = (images * 255).astype('uint8')
if images.shape[-1] != 3:
images = np.rollaxis(images, 1, 4)
return images
def save_images_collage(images, save_path, pre_processed=True):
if pre_processed:
images = post_process_image_tensor(images)
npad = ((0, 0), (2, 2), (2, 2), (0, 0))
images = np.pad(images, pad_width=npad, mode='constant', constant_values=255)
n_samples = images.shape[0]
rows = int(np.sqrt(n_samples))
while n_samples % rows != 0:
rows -= 1
nh, nw = rows, n_samples // rows
if images.ndim == 2:
images = np.reshape(images, (images.shape[0], int(np.sqrt(images.shape[1])), int(np.sqrt(images.shape[1]))))
if images.ndim == 4:
h, w = images[0].shape[:2]
img = np.zeros((h * nh, w * nw, 3))
elif images.ndim == 3:
h, w = images[0].shape[:2]
img = np.zeros((h * nh, w * nw))
for n, images in enumerate(images):
j = n // nw
i = n % nw
img[j * h:j * h + h, i * w:i * w + w] = images
imsave(save_path, img)
def mkdir(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def log(id, message):
print(str(datetime.now(timezone.utc)) + " [" + str(id) + "] " + str(message))
| from datetime import datetime, timezone
import os
from scipy.misc import imsave
import numpy as np
def pre_process_image_tensor(images):
if images.dtype != np.float32:
images = images.astype(np.float32) / 255.
if images.shape[-1] == 3:
images = np.rollaxis(images, 3, 1)
return images
def post_process_image_tensor(images):
if images.dtype != np.uint8:
images = (images * 255).astype('uint8')
if images.shape[-1] != 3:
images = np.rollaxis(images, 1, 4)
return images
def save_images_collage(images, save_path, pre_processed=True):
if pre_processed:
images = post_process_image_tensor(images)
npad = ((0, 0), (2, 2), (2, 2), (0, 0))
images = np.pad(images, pad_width=npad, mode='constant', constant_values=255)
n_samples = images.shape[0]
rows = int(np.sqrt(n_samples))
while n_samples % rows != 0:
rows -= 1
nh, nw = rows, n_samples // rows
if images.ndim == 2:
images = np.reshape(images, (images.shape[0], int(np.sqrt(images.shape[1])), int(np.sqrt(images.shape[1]))))
if images.ndim == 4:
h, w = images[0].shape[:2]
img = np.zeros((h * nh, w * nw, 3))
elif images.ndim == 3:
h, w = images[0].shape[:2]
img = np.zeros((h * nh, w * nw))
for n, images in enumerate(images):
j = n // nw
i = n % nw
img[j * h:j * h + h, i * w:i * w + w] = images
imsave(save_path, img)
def mkdir(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def log(id, message):
print(str(datetime.now(timezone.utc)) + " [" + str(id) + "] " + str(message))
| en | 000551666_mehrdad-shokri-WorldModels-1_utils_b85776d5fa08.py | unknown | 618 |
#!/usr/bin/env python3
#
# Copyright 2018, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for the app_startup_runner.py script.
Install:
$> sudo apt-get install python3-pytest ## OR
$> pip install -U pytest
See also https://docs.pytest.org/en/latest/getting-started.html
Usage:
$> ./app_startup_runner_test.py
$> pytest app_startup_runner_test.py
$> python -m pytest app_startup_runner_test.py
See also https://docs.pytest.org/en/latest/usage.html
"""
# global imports
from contextlib import contextmanager
import io
import shlex
import sys
import typing
# pip imports
import pytest
# local imports
import app_startup_runner as asr
#
# Argument Parsing Helpers
#
@contextmanager
def ignore_stdout_stderr():
"""Ignore stdout/stderr output for duration of this context."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
@contextmanager
def argparse_bad_argument(msg):
"""
Assert that a SystemExit is raised when executing this context.
If the assertion fails, print the message 'msg'.
"""
with pytest.raises(SystemExit, message=msg):
with ignore_stdout_stderr():
yield
def assert_bad_argument(args, msg):
"""
Assert that the command line arguments in 'args' are malformed.
Prints 'msg' if the assertion fails.
"""
with argparse_bad_argument(msg):
parse_args(args)
def parse_args(args):
"""
:param args: command-line like arguments as a single string
:return: dictionary of parsed key/values
"""
# "-a b -c d" => ['-a', 'b', '-c', 'd']
return vars(asr.parse_options(shlex.split(args)))
def default_dict_for_parsed_args(**kwargs):
"""
# Combine it with all of the "optional" parameters' default values.
"""
d = {'compiler_filters': None, 'simulate': False, 'debug': False, 'output': None, 'timeout': None, 'loop_count': 1, 'inodes': None}
d.update(kwargs)
return d
def default_mock_dict_for_parsed_args(include_optional=True, **kwargs):
"""
Combine default dict with all optional parameters with some mock required parameters.
"""
d = {'packages': ['com.fake.package'], 'readaheads': ['warm']}
if include_optional:
d.update(default_dict_for_parsed_args())
d.update(kwargs)
return d
def parse_optional_args(str):
"""
Parse an argument string which already includes all the required arguments
in default_mock_dict_for_parsed_args.
"""
req = "--package com.fake.package --readahead warm"
return parse_args("%s %s" %(req, str))
def test_argparse():
# missing arguments
assert_bad_argument("", "-p and -r are required")
assert_bad_argument("-r warm", "-p is required")
assert_bad_argument("--readahead warm", "-p is required")
assert_bad_argument("-p com.fake.package", "-r is required")
assert_bad_argument("--package com.fake.package", "-r is required")
# required arguments are parsed correctly
ad = default_dict_for_parsed_args # assert dict
assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], readaheads=['warm'])
assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], readaheads=['warm'])
assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
# optional arguments are parsed correctly.
mad = default_mock_dict_for_parsed_args # mock assert dict
assert parse_optional_args("--output filename.csv") == mad(output='filename.csv')
assert parse_optional_args("-o filename.csv") == mad(output='filename.csv')
assert parse_optional_args("--timeout 123") == mad(timeout=123)
assert parse_optional_args("-t 456") == mad(timeout=456)
assert parse_optional_args("--loop-count 123") == mad(loop_count=123)
assert parse_optional_args("-lc 456") == mad(loop_count=456)
assert parse_optional_args("--inodes bar") == mad(inodes="bar")
assert parse_optional_args("-in baz") == mad(inodes="baz")
def generate_run_combinations(*args):
# expand out the generator values so that assert x == y works properly.
return [i for i in asr.generate_run_combinations(*args)]
def test_generate_run_combinations():
blank_nd = typing.NamedTuple('Blank')
assert generate_run_combinations(blank_nd, {}) == [()], "empty"
assert generate_run_combinations(blank_nd, {'a' : ['a1', 'a2']}) == [()], "empty filter"
a_nd = typing.NamedTuple('A', [('a', str)])
assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None"
assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ('a2',)], "one item"
assert generate_run_combinations(a_nd,
{'a' : ['a1', 'a2'], 'b': ['b1', 'b2']}) == [('a1',), ('a2',)],\
"one item filter"
ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)])
assert generate_run_combinations(ab_nd,
{'a': ['a1', 'a2'],
'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
ab_nd('a1', 'b2'),
ab_nd('a2', 'b1'),
ab_nd('a2', 'b2')],\
"two items"
assert generate_run_combinations(ab_nd,
{'as': ['a1', 'a2'],
'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
ab_nd('a1', 'b2'),
ab_nd('a2', 'b1'),
ab_nd('a2', 'b2')],\
"two items plural"
def test_key_to_cmdline_flag():
assert asr.key_to_cmdline_flag("abc") == "--abc"
assert asr.key_to_cmdline_flag("foos") == "--foo"
assert asr.key_to_cmdline_flag("ba_r") == "--ba-r"
assert asr.key_to_cmdline_flag("ba_zs") == "--ba-z"
def test_make_script_command_with_temp_output():
cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=[], count=1)
with tmp_file:
assert cmd_str == ["fake_script", "--count", "1", "--output", tmp_file.name]
cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=['a', 'b'], count=2)
with tmp_file:
assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
def test_parse_run_script_csv_file():
# empty file -> empty list
f = io.StringIO("")
assert asr.parse_run_script_csv_file(f) == []
# common case
f = io.StringIO("1,2,3")
assert asr.parse_run_script_csv_file(f) == [1,2,3]
# ignore trailing comma
f = io.StringIO("1,2,3,4,5,")
assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5]
if __name__ == '__main__':
pytest.main()
| #!/usr/bin/env python3
#
# Copyright 2018, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for the app_startup_runner.py script.
Install:
$> sudo apt-get install python3-pytest ## OR
$> pip install -U pytest
See also https://docs.pytest.org/en/latest/getting-started.html
Usage:
$> ./app_startup_runner_test.py
$> pytest app_startup_runner_test.py
$> python -m pytest app_startup_runner_test.py
See also https://docs.pytest.org/en/latest/usage.html
"""
# global imports
from contextlib import contextmanager
import io
import shlex
import sys
import typing
# pip imports
import pytest
# local imports
import app_startup_runner as asr
#
# Argument Parsing Helpers
#
@contextmanager
def ignore_stdout_stderr():
"""Ignore stdout/stderr output for duration of this context."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
@contextmanager
def argparse_bad_argument(msg):
"""
Assert that a SystemExit is raised when executing this context.
If the assertion fails, print the message 'msg'.
"""
with pytest.raises(SystemExit, message=msg):
with ignore_stdout_stderr():
yield
def assert_bad_argument(args, msg):
"""
Assert that the command line arguments in 'args' are malformed.
Prints 'msg' if the assertion fails.
"""
with argparse_bad_argument(msg):
parse_args(args)
def parse_args(args):
"""
:param args: command-line like arguments as a single string
:return: dictionary of parsed key/values
"""
# "-a b -c d" => ['-a', 'b', '-c', 'd']
return vars(asr.parse_options(shlex.split(args)))
def default_dict_for_parsed_args(**kwargs):
"""
# Combine it with all of the "optional" parameters' default values.
"""
d = {'compiler_filters': None, 'simulate': False, 'debug': False, 'output': None, 'timeout': None, 'loop_count': 1, 'inodes': None}
d.update(kwargs)
return d
def default_mock_dict_for_parsed_args(include_optional=True, **kwargs):
"""
Combine default dict with all optional parameters with some mock required parameters.
"""
d = {'packages': ['com.fake.package'], 'readaheads': ['warm']}
if include_optional:
d.update(default_dict_for_parsed_args())
d.update(kwargs)
return d
def parse_optional_args(str):
"""
Parse an argument string which already includes all the required arguments
in default_mock_dict_for_parsed_args.
"""
req = "--package com.fake.package --readahead warm"
return parse_args("%s %s" %(req, str))
def test_argparse():
# missing arguments
assert_bad_argument("", "-p and -r are required")
assert_bad_argument("-r warm", "-p is required")
assert_bad_argument("--readahead warm", "-p is required")
assert_bad_argument("-p com.fake.package", "-r is required")
assert_bad_argument("--package com.fake.package", "-r is required")
# required arguments are parsed correctly
ad = default_dict_for_parsed_args # assert dict
assert parse_args("--package xyz --readahead warm") == ad(packages=['xyz'], readaheads=['warm'])
assert parse_args("-p xyz -r warm") == ad(packages=['xyz'], readaheads=['warm'])
assert parse_args("-p xyz -r warm -s") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
assert parse_args("-p xyz -r warm --simulate") == ad(packages=['xyz'], readaheads=['warm'], simulate=True)
# optional arguments are parsed correctly.
mad = default_mock_dict_for_parsed_args # mock assert dict
assert parse_optional_args("--output filename.csv") == mad(output='filename.csv')
assert parse_optional_args("-o filename.csv") == mad(output='filename.csv')
assert parse_optional_args("--timeout 123") == mad(timeout=123)
assert parse_optional_args("-t 456") == mad(timeout=456)
assert parse_optional_args("--loop-count 123") == mad(loop_count=123)
assert parse_optional_args("-lc 456") == mad(loop_count=456)
assert parse_optional_args("--inodes bar") == mad(inodes="bar")
assert parse_optional_args("-in baz") == mad(inodes="baz")
def generate_run_combinations(*args):
# expand out the generator values so that assert x == y works properly.
return [i for i in asr.generate_run_combinations(*args)]
def test_generate_run_combinations():
blank_nd = typing.NamedTuple('Blank')
assert generate_run_combinations(blank_nd, {}) == [()], "empty"
assert generate_run_combinations(blank_nd, {'a' : ['a1', 'a2']}) == [()], "empty filter"
a_nd = typing.NamedTuple('A', [('a', str)])
assert generate_run_combinations(a_nd, {'a': None}) == [(None,)], "None"
assert generate_run_combinations(a_nd, {'a': ['a1', 'a2']}) == [('a1',), ('a2',)], "one item"
assert generate_run_combinations(a_nd,
{'a' : ['a1', 'a2'], 'b': ['b1', 'b2']}) == [('a1',), ('a2',)],\
"one item filter"
ab_nd = typing.NamedTuple('AB', [('a', str), ('b', str)])
assert generate_run_combinations(ab_nd,
{'a': ['a1', 'a2'],
'b': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
ab_nd('a1', 'b2'),
ab_nd('a2', 'b1'),
ab_nd('a2', 'b2')],\
"two items"
assert generate_run_combinations(ab_nd,
{'as': ['a1', 'a2'],
'bs': ['b1', 'b2']}) == [ab_nd('a1', 'b1'),
ab_nd('a1', 'b2'),
ab_nd('a2', 'b1'),
ab_nd('a2', 'b2')],\
"two items plural"
def test_key_to_cmdline_flag():
assert asr.key_to_cmdline_flag("abc") == "--abc"
assert asr.key_to_cmdline_flag("foos") == "--foo"
assert asr.key_to_cmdline_flag("ba_r") == "--ba-r"
assert asr.key_to_cmdline_flag("ba_zs") == "--ba-z"
def test_make_script_command_with_temp_output():
cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=[], count=1)
with tmp_file:
assert cmd_str == ["fake_script", "--count", "1", "--output", tmp_file.name]
cmd_str, tmp_file = asr.make_script_command_with_temp_output("fake_script", args=['a', 'b'], count=2)
with tmp_file:
assert cmd_str == ["fake_script", "a", "b", "--count", "2", "--output", tmp_file.name]
def test_parse_run_script_csv_file():
# empty file -> empty list
f = io.StringIO("")
assert asr.parse_run_script_csv_file(f) == []
# common case
f = io.StringIO("1,2,3")
assert asr.parse_run_script_csv_file(f) == [1,2,3]
# ignore trailing comma
f = io.StringIO("1,2,3,4,5,")
assert asr.parse_run_script_csv_file(f) == [1,2,3,4,5]
if __name__ == '__main__':
pytest.main()
| en | 000575710_rio-31-android_frameworks_base-1_app_startup_runner_test_962ee2a61dcc.py | unknown | 2,446 |
#Copyright 2013 RobustNet Lab, University of Michigan. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'sanae@umich.edu (Sanae Rosen)'
# I think it was actually originally Haokun that wrote this...
import logging
import ipaddr
from gspeedometer.measurement.measurement_wrapper import MeasurementWrapper
class RRC(MeasurementWrapper):
"""Encapsulates RRC data and provides methods for analyzing it."""
vals = dict()
def __init__(self, params, values):
""" Initializes the RRC object """
self.vals = values
def GetHTML(self):
"""Returns an HTML representation of this measurement."""
output = ""
for key, value in sorted(self.vals.items()):
output += str(key) + ": " + str(value) + " <br>\n"
return output
# TODO do this properly
def Validate(self):
"""
Parses data and returns a dict with validation results.
valid -> boolean: true if data is good
error_types -> list: list of errors found
"""
results = dict()
results["valid"] = True
results["error_types"] = []
return results
| #Copyright 2013 RobustNet Lab, University of Michigan. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'sanae@umich.edu (Sanae Rosen)'
# I think it was actually originally Haokun that wrote this...
import logging
import ipaddr
from gspeedometer.measurement.measurement_wrapper import MeasurementWrapper
class RRC(MeasurementWrapper):
"""Encapsulates RRC data and provides methods for analyzing it."""
vals = dict()
def __init__(self, params, values):
""" Initializes the RRC object """
self.vals = values
def GetHTML(self):
"""Returns an HTML representation of this measurement."""
output = ""
for key, value in sorted(self.vals.items()):
output += str(key) + ": " + str(value) + " <br>\n"
return output
# TODO do this properly
def Validate(self):
"""
Parses data and returns a dict with validation results.
valid -> boolean: true if data is good
error_types -> list: list of errors found
"""
results = dict()
results["valid"] = True
results["error_types"] = []
return results
| en | 000166632_gurupras-mobiperf_rrc_a4145ba76186.py | unknown | 431 |
import logging
import re
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, Literal, Optional, Tuple, Union
from ptgnn.baseneuralmodel import AbstractNeuralModel, ModuleWithMetrics
from ptgnn.neuralmodels.embeddings.strelementrepresentationmodel import StrElementRepresentationModel
from ptgnn.neuralmodels.gnn import GraphNeuralNetworkModel
from buglab.models.gnn import GnnBugLabModel
from buglab.models.gnnlayerdefs import create_ggnn_mp_layers, create_mlp_mp_layers
from buglab.models.seqmodel import SeqBugLabModel
LOGGER = logging.getLogger(__name__)
def const_schedule(epoch_idx: int, const_weight: float) -> float:
return const_weight
WARMDOWN_WEIGHT_REGEX = re.compile("warmdown\\(([0-9]+),\\s?([0-9]*\\.[0-9]+)\\)")
def linear_warmdown(epoch_idx: int, num_warmdown_epochs: int, target_weight: float) -> float:
return max(target_weight, epoch_idx * (target_weight - 1) / num_warmdown_epochs + 1)
def buggy_sample_weight_schedule(weight_spec: Union[str, int, float]) -> Callable[[int], float]:
"""Return a (serializable) function with the appropriate schedule"""
if isinstance(weight_spec, (int, float)):
return partial(const_schedule, const_weight=weight_spec)
warmdown = WARMDOWN_WEIGHT_REGEX.match(weight_spec)
if warmdown:
num_warmdown_epochs = int(warmdown.group(1))
target_weight = float(warmdown.group(2))
# Linear decay up to the target
return partial(linear_warmdown, num_warmdown_epochs=num_warmdown_epochs, target_weight=target_weight)
raise Exception(f"Unrecognized buggy sample weighting `{weight_spec}`")
def gnn(
*,
mp_layer,
add_self_edge: bool,
use_all_gnn_layer_outputs: bool = False,
hidden_state_size: int = 128,
dropout_rate: float = 0.2,
node_representations: Optional[Dict[str, Any]] = None,
selector_loss_type="classify-max-loss",
stop_extending_minibatch_after_num_nodes: int = 30000,
max_nodes_per_graph: int = 35000,
buggy_samples_weight_spec: Union[str, int, float] = 1.0,
edge_feature_size: int = 0,
**kwargs,
):
if node_representations is None:
node_representations = {}
if "token_splitting" not in node_representations:
node_representations["token_splitting"] = "subtoken"
if "max_num_subtokens" not in node_representations:
node_representations["max_num_subtokens"] = 6
if "subtoken_combination" not in node_representations:
node_representations["subtoken_combination"] = "max"
if "vocabulary_size" not in node_representations:
node_representations["vocabulary_size"] = 15000
if edge_feature_size > 0:
edge_representation_model = StrElementRepresentationModel(
token_splitting="token",
embedding_size=edge_feature_size,
)
else:
edge_representation_model = None
return GnnBugLabModel(
GraphNeuralNetworkModel(
node_representation_model=StrElementRepresentationModel(
embedding_size=hidden_state_size, **node_representations
),
edge_representation_model=edge_representation_model,
add_self_edges=add_self_edge,
message_passing_layer_creator=lambda n_edges: mp_layer(
hidden_state_size, dropout_rate, n_edges, features_dimension=edge_feature_size
),
stop_extending_minibatch_after_num_nodes=stop_extending_minibatch_after_num_nodes,
max_nodes_per_graph=max_nodes_per_graph,
),
use_all_gnn_layer_outputs=use_all_gnn_layer_outputs,
generator_loss_type=selector_loss_type,
buggy_samples_weight_schedule=buggy_sample_weight_schedule(buggy_samples_weight_spec),
)
def seq_transformer(
*,
layer_type: Literal["great", "rat", "transformer", "gru"],
hidden_state_size: int = 256,
dropout_rate: float = 0.1,
vocab_size: int = 15000,
selector_loss_type: str = "classify-max-loss",
num_layers: int = 5,
num_heads: int = 8,
max_seq_size: int = 400,
intermediate_dimension_size: int = 1024,
buggy_samples_weight_spec: Union[str, int, float] = 1.0,
rezero_mode: Literal["off", "scalar", "vector"] = "off",
normalisation_mode: Literal["off", "prenorm", "postnorm"] = "postnorm",
**__,
):
return SeqBugLabModel(
hidden_state_size,
max_subtoken_vocab_size=vocab_size,
dropout_rate=dropout_rate,
layer_type=layer_type,
generator_loss_type=selector_loss_type,
intermediate_dimension_size=intermediate_dimension_size,
buggy_samples_weight_schedule=buggy_sample_weight_schedule(buggy_samples_weight_spec),
max_seq_size=max_seq_size,
num_heads=num_heads,
num_layers=num_layers,
rezero_mode=rezero_mode,
normalisation_mode=normalisation_mode,
)
def construct_model_dict(gnn_constructor: Callable, seq_constructor: Callable) -> Dict[str, Callable]:
return {
"gnn-mlp": lambda kwargs: gnn_constructor(mp_layer=create_mlp_mp_layers, add_self_edge=True, **kwargs),
"ggnn": lambda kwargs: gnn_constructor(mp_layer=create_ggnn_mp_layers, add_self_edge=False, **kwargs),
"seq-great": lambda kwargs: seq_constructor(layer_type="great", **kwargs),
"seq-rat": lambda kwargs: seq_constructor(layer_type="rat", **kwargs),
"seq-transformer": lambda kwargs: seq_constructor(layer_type="transformer", **kwargs),
"seq-gru": lambda kwargs: seq_constructor(layer_type="gru", **kwargs),
}
def load_model(
model_spec: Dict[str, Any],
model_path: Path,
restore_path: Optional[str] = None,
restore_if_model_exists: bool = False,
type_model: bool = False,
) -> Tuple[AbstractNeuralModel, ModuleWithMetrics, bool]:
assert model_path.name.endswith(".pkl.gz"), "MODEL_FILENAME must have a `.pkl.gz` suffix."
initialize_metadata = True
if restore_path is not None or (restore_if_model_exists and model_path.exists()):
import torch
LOGGER.info("Resuming training from %s." % model_path)
initialize_metadata = False
model, nn = AbstractNeuralModel.restore_model(
model_path, torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
)
else:
nn = None
models = construct_model_dict(gnn, seq_transformer)
if model_spec["modelName"] not in models:
raise ValueError("Unknown model `%s`. Known models: %s", model_spec["modelName"], models.keys())
spec = dict(model_spec)
del spec["modelName"]
model = models[model_spec["modelName"]](spec)
return model, nn, initialize_metadata
| import logging
import re
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, Literal, Optional, Tuple, Union
from ptgnn.baseneuralmodel import AbstractNeuralModel, ModuleWithMetrics
from ptgnn.neuralmodels.embeddings.strelementrepresentationmodel import StrElementRepresentationModel
from ptgnn.neuralmodels.gnn import GraphNeuralNetworkModel
from buglab.models.gnn import GnnBugLabModel
from buglab.models.gnnlayerdefs import create_ggnn_mp_layers, create_mlp_mp_layers
from buglab.models.seqmodel import SeqBugLabModel
LOGGER = logging.getLogger(__name__)
def const_schedule(epoch_idx: int, const_weight: float) -> float:
return const_weight
WARMDOWN_WEIGHT_REGEX = re.compile("warmdown\\(([0-9]+),\\s?([0-9]*\\.[0-9]+)\\)")
def linear_warmdown(epoch_idx: int, num_warmdown_epochs: int, target_weight: float) -> float:
return max(target_weight, epoch_idx * (target_weight - 1) / num_warmdown_epochs + 1)
def buggy_sample_weight_schedule(weight_spec: Union[str, int, float]) -> Callable[[int], float]:
"""Return a (serializable) function with the appropriate schedule"""
if isinstance(weight_spec, (int, float)):
return partial(const_schedule, const_weight=weight_spec)
warmdown = WARMDOWN_WEIGHT_REGEX.match(weight_spec)
if warmdown:
num_warmdown_epochs = int(warmdown.group(1))
target_weight = float(warmdown.group(2))
# Linear decay up to the target
return partial(linear_warmdown, num_warmdown_epochs=num_warmdown_epochs, target_weight=target_weight)
raise Exception(f"Unrecognized buggy sample weighting `{weight_spec}`")
def gnn(
*,
mp_layer,
add_self_edge: bool,
use_all_gnn_layer_outputs: bool = False,
hidden_state_size: int = 128,
dropout_rate: float = 0.2,
node_representations: Optional[Dict[str, Any]] = None,
selector_loss_type="classify-max-loss",
stop_extending_minibatch_after_num_nodes: int = 30000,
max_nodes_per_graph: int = 35000,
buggy_samples_weight_spec: Union[str, int, float] = 1.0,
edge_feature_size: int = 0,
**kwargs,
):
if node_representations is None:
node_representations = {}
if "token_splitting" not in node_representations:
node_representations["token_splitting"] = "subtoken"
if "max_num_subtokens" not in node_representations:
node_representations["max_num_subtokens"] = 6
if "subtoken_combination" not in node_representations:
node_representations["subtoken_combination"] = "max"
if "vocabulary_size" not in node_representations:
node_representations["vocabulary_size"] = 15000
if edge_feature_size > 0:
edge_representation_model = StrElementRepresentationModel(
token_splitting="token",
embedding_size=edge_feature_size,
)
else:
edge_representation_model = None
return GnnBugLabModel(
GraphNeuralNetworkModel(
node_representation_model=StrElementRepresentationModel(
embedding_size=hidden_state_size, **node_representations
),
edge_representation_model=edge_representation_model,
add_self_edges=add_self_edge,
message_passing_layer_creator=lambda n_edges: mp_layer(
hidden_state_size, dropout_rate, n_edges, features_dimension=edge_feature_size
),
stop_extending_minibatch_after_num_nodes=stop_extending_minibatch_after_num_nodes,
max_nodes_per_graph=max_nodes_per_graph,
),
use_all_gnn_layer_outputs=use_all_gnn_layer_outputs,
generator_loss_type=selector_loss_type,
buggy_samples_weight_schedule=buggy_sample_weight_schedule(buggy_samples_weight_spec),
)
def seq_transformer(
*,
layer_type: Literal["great", "rat", "transformer", "gru"],
hidden_state_size: int = 256,
dropout_rate: float = 0.1,
vocab_size: int = 15000,
selector_loss_type: str = "classify-max-loss",
num_layers: int = 5,
num_heads: int = 8,
max_seq_size: int = 400,
intermediate_dimension_size: int = 1024,
buggy_samples_weight_spec: Union[str, int, float] = 1.0,
rezero_mode: Literal["off", "scalar", "vector"] = "off",
normalisation_mode: Literal["off", "prenorm", "postnorm"] = "postnorm",
**__,
):
return SeqBugLabModel(
hidden_state_size,
max_subtoken_vocab_size=vocab_size,
dropout_rate=dropout_rate,
layer_type=layer_type,
generator_loss_type=selector_loss_type,
intermediate_dimension_size=intermediate_dimension_size,
buggy_samples_weight_schedule=buggy_sample_weight_schedule(buggy_samples_weight_spec),
max_seq_size=max_seq_size,
num_heads=num_heads,
num_layers=num_layers,
rezero_mode=rezero_mode,
normalisation_mode=normalisation_mode,
)
def construct_model_dict(gnn_constructor: Callable, seq_constructor: Callable) -> Dict[str, Callable]:
return {
"gnn-mlp": lambda kwargs: gnn_constructor(mp_layer=create_mlp_mp_layers, add_self_edge=True, **kwargs),
"ggnn": lambda kwargs: gnn_constructor(mp_layer=create_ggnn_mp_layers, add_self_edge=False, **kwargs),
"seq-great": lambda kwargs: seq_constructor(layer_type="great", **kwargs),
"seq-rat": lambda kwargs: seq_constructor(layer_type="rat", **kwargs),
"seq-transformer": lambda kwargs: seq_constructor(layer_type="transformer", **kwargs),
"seq-gru": lambda kwargs: seq_constructor(layer_type="gru", **kwargs),
}
def load_model(
model_spec: Dict[str, Any],
model_path: Path,
restore_path: Optional[str] = None,
restore_if_model_exists: bool = False,
type_model: bool = False,
) -> Tuple[AbstractNeuralModel, ModuleWithMetrics, bool]:
assert model_path.name.endswith(".pkl.gz"), "MODEL_FILENAME must have a `.pkl.gz` suffix."
initialize_metadata = True
if restore_path is not None or (restore_if_model_exists and model_path.exists()):
import torch
LOGGER.info("Resuming training from %s." % model_path)
initialize_metadata = False
model, nn = AbstractNeuralModel.restore_model(
model_path, torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
)
else:
nn = None
models = construct_model_dict(gnn, seq_transformer)
if model_spec["modelName"] not in models:
raise ValueError("Unknown model `%s`. Known models: %s", model_spec["modelName"], models.keys())
spec = dict(model_spec)
del spec["modelName"]
model = models[model_spec["modelName"]](spec)
return model, nn, initialize_metadata
| en | 000375151_microsoft-neurips21-self-supervised-bug-detection-and-repair_modelregistry_dccebe272dac.py | unknown | 2,112 |
from fairing.backend.kubeflow import KubeflowBackend
from fairing.utils import get_image_full
class BasicArchitecture():
def add_jobs(self, svc, count, repository, image_name, image_tag, volumes, volume_mounts):
full_image_name = get_image_full(repository, image_name, image_tag)
tfjobs = []
for ix in range(count):
tfjobs.append({
"name": "{}-{}-{}".format(image_name, image_tag, ix),
"replicaSpecs": [{
"replicaType": "MASTER",
"replicas": 1,
"containers": [
{
"image": full_image_name,
"volumeMounts": volume_mounts
}
],
"volumes": volumes
}]
})
svc["tfJobs"] = tfjobs
return svc
def get_associated_backend(self):
return KubeflowBackend()
| from fairing.backend.kubeflow import KubeflowBackend
from fairing.utils import get_image_full
class BasicArchitecture():
def add_jobs(self, svc, count, repository, image_name, image_tag, volumes, volume_mounts):
full_image_name = get_image_full(repository, image_name, image_tag)
tfjobs = []
for ix in range(count):
tfjobs.append({
"name": "{}-{}-{}".format(image_name, image_tag, ix),
"replicaSpecs": [{
"replicaType": "MASTER",
"replicas": 1,
"containers": [
{
"image": full_image_name,
"volumeMounts": volume_mounts
}
],
"volumes": volumes
}]
})
svc["tfJobs"] = tfjobs
return svc
def get_associated_backend(self):
return KubeflowBackend()
| en | 000092629_wbuchwalter-fairing-1_basic_2b34abbcd045.py | unknown | 246 |
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
'infer_address' : self.infer_address
}
def infer_address(self, hostname, hostvars):
for t in ('ansible_ssh_host', 'ansible_host', 'inventory_hostname'):
if t in hostvars[hostname]:
return hostvars[hostname][t]
return None
|
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
'infer_address' : self.infer_address
}
def infer_address(self, hostname, hostvars):
for t in ('ansible_ssh_host', 'ansible_host', 'inventory_hostname'):
if t in hostvars[hostname]:
return hostvars[hostname][t]
return None
| en | 000289614_AkashMainali-automate-tower-ha-dr_filters_4a81d8e7a43d.py | unknown | 110 |
from django import forms
from .models import *
from academic.models import ClassInfo
class AcademicInfoForm(forms.ModelForm):
class Meta:
model = AcademicInfo
exclude = ['registration_no', 'status', 'personal_info', 'address_info', 'guardian_info', 'emergency_contact_info', 'previous_academic_info', 'previous_academic_certificate', 'is_delete']
widgets = {
'class_info': forms.Select(attrs={'class': 'form-control'})
}
class PersonalInfoForm(forms.ModelForm):
class Meta:
model = PersonalInfo
fields = '__all__'
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'photo': forms.ClearableFileInput(attrs={'class': 'form-control'}),
'blood_group': forms.Select(attrs={'class': 'form-control'}),
'date_of_birth': forms.TextInput(attrs={'class': 'form-control'}),
'gender': forms.Select(attrs={'class': 'form-control'}),
'phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
'birth_certificate_no': forms.TextInput(attrs={'class': 'form-control'}),
'religion': forms.Select(attrs={'class': 'form-control'}),
'nationality': forms.Select(attrs={'class': 'form-control'})
}
class StudentAddressInfoForm(forms.ModelForm):
class Meta:
model = StudentAddressInfo
fields = '__all__'
widgets = {
'district': forms.Select(attrs={'class': 'form-control'}),
'upazilla': forms.Select(attrs={'class': 'form-control'}),
'union': forms.Select(attrs={'class': 'form-control'}),
'village': forms.TextInput(attrs={'class': 'form-control'})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['upazilla'].queryset = Upazilla.objects.none()
if 'upazilla' in self.data:
try:
district_id = int(self.data.get('district'))
self.fields['upazilla'].queryset = Upazilla.objects.filter(district_id=district_id).order_by('name')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['upazilla'].queryset = self.instance.district.upazilla_set.order_by('name')
self.fields['union'].queryset = Union.objects.none()
if 'union' in self.data:
try:
upazilla_id = int(self.data.get('upazilla'))
self.fields['union'].queryset = Union.objects.filter(upazilla_id=upazilla_id).order_by('name')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['union'].queryset = self.instance.upazilla.union_set.order_by('name')
class GuardianInfoForm(forms.ModelForm):
class Meta:
model = GuardianInfo
fields = '__all__'
widgets = {
'father_name': forms.TextInput(attrs={'class': 'form-control'}),
'father_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'father_occupation': forms.Select(attrs={'class': 'form-control'}),
'father_yearly_income': forms.TextInput(attrs={'class': 'form-control'}),
'mother_name': forms.TextInput(attrs={'class': 'form-control'}),
'mother_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'mother_occupation': forms.Select(attrs={'class': 'form-control'}),
'guardian_name': forms.TextInput(attrs={'class': 'form-control'}),
'guardian_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'guardian_email': forms.TextInput(attrs={'class': 'form-control'}),
'relationship_with_student': forms.Select(attrs={'class': 'form-control'}),
}
class EmergencyContactDetailsForm(forms.ModelForm):
class Meta:
model = EmergencyContactDetails
fields = '__all__'
widgets = {
'emergency_guardian_name': forms.TextInput(attrs={'class': 'form-control'}),
'address': forms.Textarea(attrs={'class': 'form-control'}),
'relationship_with_student': forms.Select(attrs={'class': 'form-control'}),
'phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
}
class PreviousAcademicInfoForm(forms.ModelForm):
class Meta:
model = PreviousAcademicInfo
fields = '__all__'
widgets = {
'institute_name': forms.TextInput(attrs={'class': 'form-control'}),
'name_of_exam': forms.TextInput(attrs={'class': 'form-control'}),
'group': forms.TextInput(attrs={'class': 'form-control'}),
'gpa': forms.TextInput(attrs={'class': 'form-control'}),
'board_roll': forms.TextInput(attrs={'class': 'form-control'}),
'passing_year': forms.TextInput(attrs={'class': 'form-control'}),
}
class PreviousAcademicCertificateForm(forms.ModelForm):
class Meta:
model = PreviousAcademicCertificate
fields = '__all__'
class StudentSearchForm(forms.Form):
class_info = forms.ModelChoiceField(required=False, queryset=ClassInfo.objects.all())
registration_no = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'placeholder': 'Registration No', 'aria-controls': 'DataTables_Table_0'}))
class EnrolledStudentForm(forms.Form):
class_name = forms.ModelChoiceField(queryset=ClassInfo.objects.all())
class StudentEnrollForm(forms.Form):
class_name = forms.ModelChoiceField(queryset=ClassRegistration.objects.all(), widget=forms.Select(attrs={'class': 'form-control'}))
roll_no = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': 'Enter Roll', 'class': 'form-control'}))
class SearchEnrolledStudentForm(forms.Form):
reg_class = forms.ModelChoiceField(queryset=ClassRegistration.objects.all())
roll_no = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'placeholder': 'Enter Roll'})) | from django import forms
from .models import *
from academic.models import ClassInfo
class AcademicInfoForm(forms.ModelForm):
class Meta:
model = AcademicInfo
exclude = ['registration_no', 'status', 'personal_info', 'address_info', 'guardian_info', 'emergency_contact_info', 'previous_academic_info', 'previous_academic_certificate', 'is_delete']
widgets = {
'class_info': forms.Select(attrs={'class': 'form-control'})
}
class PersonalInfoForm(forms.ModelForm):
class Meta:
model = PersonalInfo
fields = '__all__'
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'photo': forms.ClearableFileInput(attrs={'class': 'form-control'}),
'blood_group': forms.Select(attrs={'class': 'form-control'}),
'date_of_birth': forms.TextInput(attrs={'class': 'form-control'}),
'gender': forms.Select(attrs={'class': 'form-control'}),
'phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
'birth_certificate_no': forms.TextInput(attrs={'class': 'form-control'}),
'religion': forms.Select(attrs={'class': 'form-control'}),
'nationality': forms.Select(attrs={'class': 'form-control'})
}
class StudentAddressInfoForm(forms.ModelForm):
class Meta:
model = StudentAddressInfo
fields = '__all__'
widgets = {
'district': forms.Select(attrs={'class': 'form-control'}),
'upazilla': forms.Select(attrs={'class': 'form-control'}),
'union': forms.Select(attrs={'class': 'form-control'}),
'village': forms.TextInput(attrs={'class': 'form-control'})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['upazilla'].queryset = Upazilla.objects.none()
if 'upazilla' in self.data:
try:
district_id = int(self.data.get('district'))
self.fields['upazilla'].queryset = Upazilla.objects.filter(district_id=district_id).order_by('name')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['upazilla'].queryset = self.instance.district.upazilla_set.order_by('name')
self.fields['union'].queryset = Union.objects.none()
if 'union' in self.data:
try:
upazilla_id = int(self.data.get('upazilla'))
self.fields['union'].queryset = Union.objects.filter(upazilla_id=upazilla_id).order_by('name')
except (ValueError, TypeError):
pass
elif self.instance.pk:
self.fields['union'].queryset = self.instance.upazilla.union_set.order_by('name')
class GuardianInfoForm(forms.ModelForm):
class Meta:
model = GuardianInfo
fields = '__all__'
widgets = {
'father_name': forms.TextInput(attrs={'class': 'form-control'}),
'father_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'father_occupation': forms.Select(attrs={'class': 'form-control'}),
'father_yearly_income': forms.TextInput(attrs={'class': 'form-control'}),
'mother_name': forms.TextInput(attrs={'class': 'form-control'}),
'mother_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'mother_occupation': forms.Select(attrs={'class': 'form-control'}),
'guardian_name': forms.TextInput(attrs={'class': 'form-control'}),
'guardian_phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'guardian_email': forms.TextInput(attrs={'class': 'form-control'}),
'relationship_with_student': forms.Select(attrs={'class': 'form-control'}),
}
class EmergencyContactDetailsForm(forms.ModelForm):
class Meta:
model = EmergencyContactDetails
fields = '__all__'
widgets = {
'emergency_guardian_name': forms.TextInput(attrs={'class': 'form-control'}),
'address': forms.Textarea(attrs={'class': 'form-control'}),
'relationship_with_student': forms.Select(attrs={'class': 'form-control'}),
'phone_no': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control'}),
}
class PreviousAcademicInfoForm(forms.ModelForm):
class Meta:
model = PreviousAcademicInfo
fields = '__all__'
widgets = {
'institute_name': forms.TextInput(attrs={'class': 'form-control'}),
'name_of_exam': forms.TextInput(attrs={'class': 'form-control'}),
'group': forms.TextInput(attrs={'class': 'form-control'}),
'gpa': forms.TextInput(attrs={'class': 'form-control'}),
'board_roll': forms.TextInput(attrs={'class': 'form-control'}),
'passing_year': forms.TextInput(attrs={'class': 'form-control'}),
}
class PreviousAcademicCertificateForm(forms.ModelForm):
class Meta:
model = PreviousAcademicCertificate
fields = '__all__'
class StudentSearchForm(forms.Form):
class_info = forms.ModelChoiceField(required=False, queryset=ClassInfo.objects.all())
registration_no = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'placeholder': 'Registration No', 'aria-controls': 'DataTables_Table_0'}))
class EnrolledStudentForm(forms.Form):
class_name = forms.ModelChoiceField(queryset=ClassInfo.objects.all())
class StudentEnrollForm(forms.Form):
class_name = forms.ModelChoiceField(queryset=ClassRegistration.objects.all(), widget=forms.Select(attrs={'class': 'form-control'}))
roll_no = forms.IntegerField(widget=forms.NumberInput(attrs={'placeholder': 'Enter Roll', 'class': 'form-control'}))
class SearchEnrolledStudentForm(forms.Form):
reg_class = forms.ModelChoiceField(queryset=ClassRegistration.objects.all())
roll_no = forms.IntegerField(required=False, widget=forms.NumberInput(attrs={'placeholder': 'Enter Roll'})) | en | 000565622_ShwethaRGowda-FADB_forms_c1963c034a65.py | unknown | 1,725 |
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
from sphinx.locale import _
class exception_hierarchy(nodes.General, nodes.Element):
pass
def visit_exception_hierarchy_node(self, node):
self.body.append(self.starttag(node, "div", CLASS="exception-hierarchy-content"))
def depart_exception_hierarchy_node(self, node):
self.body.append("</div>\n")
class ExceptionHierarchyDirective(Directive):
has_content = True
def run(self):
self.assert_has_content()
node = exception_hierarchy("\n".join(self.content))
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def setup(app):
app.add_node(exception_hierarchy, html=(visit_exception_hierarchy_node, depart_exception_hierarchy_node))
app.add_directive("exception_hierarchy", ExceptionHierarchyDirective)
| from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
from sphinx.locale import _
class exception_hierarchy(nodes.General, nodes.Element):
pass
def visit_exception_hierarchy_node(self, node):
self.body.append(self.starttag(node, "div", CLASS="exception-hierarchy-content"))
def depart_exception_hierarchy_node(self, node):
self.body.append("</div>\n")
class ExceptionHierarchyDirective(Directive):
has_content = True
def run(self):
self.assert_has_content()
node = exception_hierarchy("\n".join(self.content))
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
def setup(app):
app.add_node(exception_hierarchy, html=(visit_exception_hierarchy_node, depart_exception_hierarchy_node))
app.add_directive("exception_hierarchy", ExceptionHierarchyDirective)
| en | 000726948_b4skyx-enhanced-discord.py_exception_hierarchy_8d12cfe74301.py | unknown | 288 |
import unittest
import util
class InlineDemoTransformTest(unittest.TestCase):
def test_comment_strip(self):
codesnippet = '''```html
<custom-element-demo width="500" height="500">
<template>
<link rel=import polymer-foo>
<next-code-block></next-code-block>
</template>
</custom-element-demo>
```
'''
prefix = '== some markdown ==\n'
suffix = '=== more markdown ===\n'
markdown = prefix + '<!---\n' + codesnippet + '-->\n' + suffix
expected = prefix + codesnippet + suffix
self.assertEqual(util.inline_demo_transform(markdown), expected)
def test_generate_prefixes(self):
self.assertEqual(util.generate_prefixes('thisisword'),
['thi', 'this', 'thisi', 'thisis', 'thisisw', 'thisiswo', 'thisiswor'])
self.assertEqual(util.generate_prefixes('this'), ['thi'])
self.assertEqual(util.generate_prefixes('thi'), [])
def test_tokenise_more(self):
self.assertEqual(util.tokenise_more('ThisIsWord'), ['This', 'Is', 'Word'])
def test_generate_prefixes_from_list(self):
self.assertEqual(util.generate_prefixes_from_list(['ThisIsWord', 'moon']),
['thisiswo', 'thisiswor', 'this', 'thisisw', 'wor', 'thisi', 'thisis', 'moo', 'thi'])
def test_generate_prefixes_split(self):
self.assertEqual(sorted(util.generate_prefixes_from_list(util.safe_split_strip('material-toggle/button'))),
['but', 'butt', 'butto', 'mat', 'mate', 'mater', 'materi', 'materia', 'tog', 'togg', 'toggl'])
if __name__ == '__main__':
unittest.main()
| import unittest
import util
class InlineDemoTransformTest(unittest.TestCase):
def test_comment_strip(self):
codesnippet = '''```html
<custom-element-demo width="500" height="500">
<template>
<link rel=import polymer-foo>
<next-code-block></next-code-block>
</template>
</custom-element-demo>
```
'''
prefix = '== some markdown ==\n'
suffix = '=== more markdown ===\n'
markdown = prefix + '<!---\n' + codesnippet + '-->\n' + suffix
expected = prefix + codesnippet + suffix
self.assertEqual(util.inline_demo_transform(markdown), expected)
def test_generate_prefixes(self):
self.assertEqual(util.generate_prefixes('thisisword'),
['thi', 'this', 'thisi', 'thisis', 'thisisw', 'thisiswo', 'thisiswor'])
self.assertEqual(util.generate_prefixes('this'), ['thi'])
self.assertEqual(util.generate_prefixes('thi'), [])
def test_tokenise_more(self):
self.assertEqual(util.tokenise_more('ThisIsWord'), ['This', 'Is', 'Word'])
def test_generate_prefixes_from_list(self):
self.assertEqual(util.generate_prefixes_from_list(['ThisIsWord', 'moon']),
['thisiswo', 'thisiswor', 'this', 'thisisw', 'wor', 'thisi', 'thisis', 'moo', 'thi'])
def test_generate_prefixes_split(self):
self.assertEqual(sorted(util.generate_prefixes_from_list(util.safe_split_strip('material-toggle/button'))),
['but', 'butt', 'butto', 'mat', 'mate', 'mater', 'materi', 'materia', 'tog', 'togg', 'toggl'])
if __name__ == '__main__':
unittest.main()
| en | 000263024_peterblazejewicz-webcomponents.org_util_test_9b03e80df14d.py | unknown | 513 |
# -*- coding: utf-8 -*-
#Created on Fri Apr 27 12:37:56 2018
#@author: ryanday
#MIT License
#Copyright (c) 2018 Ryan Patrick Day
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import matplotlib.tri as mtri
import chinook.Ylm as Ylm
class wavefunction:
'''
This class acts to reorganize basis and wavefunction information in a more
suitable data structure than the native orbital class, or the sake of plotting
orbital wavefunctions. The relevant eigenvector can be redefined, so long as it
represents a projection onto the same orbital basis set as defined previously.
*args*:
- **basis**: list of orbital objects
- **vector**: numpy array of complex float, eigenvector projected onto the basis orbitals
'''
def __init__(self,basis,vector):
if len(basis)==len(vector):
self.basis = basis
self.centres,self.centre_pointer = self.find_centres()
self.harmonics,self.harmonic_pointer,self.projections =self.find_harmonics()
self.vector = vector
else:
print('ERROR: incompatible basis and vector input. Check that both have same length.')
def redefine_vector(self,vector):
'''
Update vector definition
*args*:
- **vector**: numpy array of complex float, same length as self.vector
***
'''
try:
self.vector[:] = vector
except ValueError:
print('Error: Input vector is not of the same shape as original selection. Please check input vector.')
def find_harmonics(self):
'''
Create a pointer array of basis indices and the associated spherical harmonics, as well as
aa more convenient vector form of the projections themselves, as lists of complex float
*return*:
- **all_lm**: list of int, l,m pairs of all spherical harmonics relevant to calculation
- **lm_pointers**: list of int, pointer indices relating each basis orbital projection to the
lm pairs in *all_lm*
- **projectors**: list of arrays of complex float, providing the complex projection of basis
onto the related spherical harmonics
***
'''
all_lm = []
lm_pointers = []
projectors = []
for o in self.basis:
proj_pointers = np.zeros(len(o.proj))
proj_vals = np.zeros(len(o.proj),dtype=complex)
for oi in range(len(o.proj)):
proj_vals[oi] = o.proj[oi][0]+1.0j*o.proj[oi][1]
lm = np.array([o.proj[oi][2],o.proj[oi][3]]).astype(int)
try:
d_lm = np.linalg.norm(np.array([lm_ii - lm for lm_ii in all_lm]),axis=1)
if d_lm.min()==0:
index = np.where(d_lm==0)[0][0]
proj_pointers[oi]=index
else:
all_lm.append(lm)
proj_pointers[oi] = len(all_lm)-1
except ValueError:
all_lm.append(lm)
proj_pointers[0] = 0
lm_pointers.append(list(proj_pointers.astype(int)))
projectors.append(proj_vals)
return all_lm,lm_pointers,projectors
def find_centres(self):
'''
Create a Pointer array of basis indices and the centres of these basis orbitals.
*return*:
- **all_centres**: list of numpy array of length 3, indicating unique positions in the basis set
- **centre_pointers**: list of int, indicating the indices of position array, associated with the
location of the related orbital in real space.
'''
all_centres = []
centre_pointers = []
for o in self.basis:
centre = o.pos
try:
d_centres = np.linalg.norm(np.array([centre-ac for ac in all_centres]),axis=1)
if d_centres.min()==0.0:
index = np.where(d_centres==0)[0][0]
centre_pointers.append(index)
else:
all_centres.append(centre)
centre_pointers.append(len(all_centres)-1)
except ValueError:
all_centres.append(centre)
centre_pointers.append(0)
return all_centres,centre_pointers
def calc_Ylm(self,th,ph):
'''
Calculate all spherical harmonics needed for present calculation
*return*:
- numpy array of complex float, of shape (len(self.harmonics),len(th))
***
'''
return np.array([Ylm.Y(int(lm[0]),int(lm[1]),th,ph) for lm in self.harmonics])
def triangulate_wavefunction(self,n,plotting=True,ax=None):
'''
Plot the wavefunction stored in the class attributes as self.vector as a projection
over the basis of spherical harmonics. The radial wavefunctions are not explicitly included,
in the event of multiple basis atom sites, the length scale is set by the mean interatomic
distance. The wavefunction phase is encoded in the colourscale of the mesh plot. The user
sets the smoothness of the orbital projection by the integer argument *n*
*args*:
- **n**: int, number of angles in the mesh: Theta from 0 to pi is divided 2n times, and
Phi from 0 to 2pi is divided 4n times
*kwargs*:
- **plotting**: boolean, turn on/off to display plot
- **ax**: matplotlib Axes, for plotting on existing plot
*return*:
- **vertices**: numpy array of float, shape (len(centres), len(th)*len(ph), 3) locations of vertices
- **triangulations**: numpy array of int, indicating the vertices connecting each surface patch
- **colours**: numpy array of float, of shape (len(centres),len(triangles)) encoding the orbital phase for each surface patch of the plotting
- **ax**: matplotlib Axes, for further modifications
***
'''
th,ph = make_angle_mesh(n)
all_Ylm = self.calc_Ylm(th,ph)
if len(self.centres)>1:
ad = 0.5*np.mean(np.array([np.linalg.norm(self.centres[i]-self.centres[j]) for i in range(len(self.centres)) for j in range(i,len(self.centres))]))
else:
ad = 4.0
ncentres = len(self.centres)
vertices = np.zeros((ncentres,len(th),3))
radii = np.zeros((ncentres,len(th)),dtype=complex)
triangulations = mtri.Triangulation(th,ph)
colours = []
for bi in range(len(self.basis)):
radii[self.centre_pointer[bi],:] += np.sum(np.array([all_Ylm[self.harmonic_pointer[bi][j]]*self.vector[bi]*self.projections[bi][j] for j in range(len(self.harmonic_pointer[bi]))]),axis=0)
rescale = ad/np.mean(abs(radii)**2)
for ni in range(ncentres):
vertices[ni,:,:]+=rescale*np.array([abs(radii[ni])**2*np.cos(ph)*np.sin(th),abs(radii[ni])**2*np.sin(th)*np.sin(ph),abs(radii[ni])**2*np.cos(th)]).T
colours.append(col_phase(radii[ni,triangulations.triangles][:,1]))
vertices[ni,:]+=self.centres[ni]
colours = np.array(colours)
if plotting:
_,ax = self.plot_wavefunction(vertices,triangulations,colours,plot_ax=ax)
return vertices,triangulations,colours,ax
def plot_wavefunction(self,vertices,triangulations,colours,plot_ax = None,cbar_ax= None):
'''
Plotting function, for visualizing orbitals.
*args*:
- **vertices**: numpy array of float, shape (len(centres), len(th)*len(ph), 3) locations of vertices
- **triangulations**: numpy array of int, indicating the vertices connecting each surface patch
- **colours**: numpy array of float, of shape (len(centres),len(triangles)) encoding the orbital phase for each surface patch of the plotting
- **plot_ax**: matplotlib Axes, for plotting on existing axes
- **cbar_ax**: matplotlib Axes, for use in drawing colourbar
*return*:
- **plots**: list of plotted surfaces
- **plot_ax**: matplotlib Axes, for further modifications
***
'''
ncentres = len(self.centres)
plots = []
if plot_ax is None:
fig = plt.figure()
plot_ax = fig.add_subplot(111,projection='3d')
for ni in range(ncentres):
plots.append(plot_ax.plot_trisurf(vertices[ni,:,0],vertices[ni,:,1],vertices[ni,:,2],triangles=triangulations.triangles,cmap=cm.hsv,antialiased=True,edgecolors='w',linewidth=0.2))
plots[-1].set_array(colours[ni])
plots[-1].set_clim(-np.pi,np.pi)
plot_ax.set_xlabel('X')
plot_ax.set_ylabel('Y')
plot_ax.set_zlabel('Z')
plt.colorbar(plots[-1],ax=plot_ax,cax=cbar_ax)
return plots,plot_ax
def make_angle_mesh(n):
'''
Quick utility function for generating an angular mesh over spherical surface
*args*:
- **n**: int, number of divisions of the angular space
*return*:
- **th**: numpy array of 2n float from 0 to pi
- **ph**: numpy array of 4n float from 0 to 2pi
***
'''
th = np.linspace(0,np.pi,2*n)
ph = np.linspace(0,2*np.pi,4*n)
th,ph = np.meshgrid(th,ph)
th,ph = th.flatten(),ph.flatten()
return th,ph
def col_phase(vals):
'''
Define the phase of a complex number
*args*:
- **vals**: complex float, or numpy array of complex float
*return*:
- float, or numpy array of float of same shape as vals, from -pi to pi
***
'''
x,y=np.real(vals),np.imag(vals)
return np.arctan2(y,x)
def rephase_wavefunctions(vecs,index=-1):
'''
The wavefunction at different k-points can choose an arbitrary phase, as can
a subspace of degenerate eigenstates. As such, it is often advisable to choose
a global phase definition when comparing several different vectors. The user here
passes a set of vectors, and they are rephased. The user has the option of specifying
which basis index they would like to set the phasing. It is essential however that the
projection onto at least one basis element is non-zero over the entire set of vectors
for this rephasing to work.
*args*:
- **vecs**: numpy array of complex float, ordered as rows:vector index, columns: basis index
*kwargs*:
- **index**: int, optional choice of basis phase selection
*return*:
- **rephase**: numpy array of complex float of same shape as *vecs*
***
'''
rephase = np.copy(vecs)
if index>-1:
#check that user has selected a viable phase choice
if abs(vecs[:,index]).min()<1e-10:
print('Warning, the chosen basis index is invalid. Please make another selection.\n')
print('Finite projection onto the basis element of choice must be finite. If you are\n')
print('unsure, the computer can attempt to make a viable selection in the absence of\n')
print('an indicated basis index.')
return rephase
else:
min_projs = np.array([abs(vecs[:,i]).min() for i in range(np.shape(vecs)[0])])
index = np.where(min_projs>0)[0][0]
phase_factors = np.conj(vecs[:,index])/abs(vecs[:,index])
rephase = np.einsum('ij,i->ij',rephase,phase_factors)
return rephase
| # -*- coding: utf-8 -*-
#Created on Fri Apr 27 12:37:56 2018
#@author: ryanday
#MIT License
#Copyright (c) 2018 Ryan Patrick Day
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import matplotlib.tri as mtri
import chinook.Ylm as Ylm
class wavefunction:
'''
This class acts to reorganize basis and wavefunction information in a more
suitable data structure than the native orbital class, or the sake of plotting
orbital wavefunctions. The relevant eigenvector can be redefined, so long as it
represents a projection onto the same orbital basis set as defined previously.
*args*:
- **basis**: list of orbital objects
- **vector**: numpy array of complex float, eigenvector projected onto the basis orbitals
'''
def __init__(self,basis,vector):
if len(basis)==len(vector):
self.basis = basis
self.centres,self.centre_pointer = self.find_centres()
self.harmonics,self.harmonic_pointer,self.projections =self.find_harmonics()
self.vector = vector
else:
print('ERROR: incompatible basis and vector input. Check that both have same length.')
def redefine_vector(self,vector):
'''
Update vector definition
*args*:
- **vector**: numpy array of complex float, same length as self.vector
***
'''
try:
self.vector[:] = vector
except ValueError:
print('Error: Input vector is not of the same shape as original selection. Please check input vector.')
def find_harmonics(self):
'''
Create a pointer array of basis indices and the associated spherical harmonics, as well as
aa more convenient vector form of the projections themselves, as lists of complex float
*return*:
- **all_lm**: list of int, l,m pairs of all spherical harmonics relevant to calculation
- **lm_pointers**: list of int, pointer indices relating each basis orbital projection to the
lm pairs in *all_lm*
- **projectors**: list of arrays of complex float, providing the complex projection of basis
onto the related spherical harmonics
***
'''
all_lm = []
lm_pointers = []
projectors = []
for o in self.basis:
proj_pointers = np.zeros(len(o.proj))
proj_vals = np.zeros(len(o.proj),dtype=complex)
for oi in range(len(o.proj)):
proj_vals[oi] = o.proj[oi][0]+1.0j*o.proj[oi][1]
lm = np.array([o.proj[oi][2],o.proj[oi][3]]).astype(int)
try:
d_lm = np.linalg.norm(np.array([lm_ii - lm for lm_ii in all_lm]),axis=1)
if d_lm.min()==0:
index = np.where(d_lm==0)[0][0]
proj_pointers[oi]=index
else:
all_lm.append(lm)
proj_pointers[oi] = len(all_lm)-1
except ValueError:
all_lm.append(lm)
proj_pointers[0] = 0
lm_pointers.append(list(proj_pointers.astype(int)))
projectors.append(proj_vals)
return all_lm,lm_pointers,projectors
def find_centres(self):
'''
Create a Pointer array of basis indices and the centres of these basis orbitals.
*return*:
- **all_centres**: list of numpy array of length 3, indicating unique positions in the basis set
- **centre_pointers**: list of int, indicating the indices of position array, associated with the
location of the related orbital in real space.
'''
all_centres = []
centre_pointers = []
for o in self.basis:
centre = o.pos
try:
d_centres = np.linalg.norm(np.array([centre-ac for ac in all_centres]),axis=1)
if d_centres.min()==0.0:
index = np.where(d_centres==0)[0][0]
centre_pointers.append(index)
else:
all_centres.append(centre)
centre_pointers.append(len(all_centres)-1)
except ValueError:
all_centres.append(centre)
centre_pointers.append(0)
return all_centres,centre_pointers
def calc_Ylm(self,th,ph):
'''
Calculate all spherical harmonics needed for present calculation
*return*:
- numpy array of complex float, of shape (len(self.harmonics),len(th))
***
'''
return np.array([Ylm.Y(int(lm[0]),int(lm[1]),th,ph) for lm in self.harmonics])
def triangulate_wavefunction(self,n,plotting=True,ax=None):
'''
Plot the wavefunction stored in the class attributes as self.vector as a projection
over the basis of spherical harmonics. The radial wavefunctions are not explicitly included,
in the event of multiple basis atom sites, the length scale is set by the mean interatomic
distance. The wavefunction phase is encoded in the colourscale of the mesh plot. The user
sets the smoothness of the orbital projection by the integer argument *n*
*args*:
- **n**: int, number of angles in the mesh: Theta from 0 to pi is divided 2n times, and
Phi from 0 to 2pi is divided 4n times
*kwargs*:
- **plotting**: boolean, turn on/off to display plot
- **ax**: matplotlib Axes, for plotting on existing plot
*return*:
- **vertices**: numpy array of float, shape (len(centres), len(th)*len(ph), 3) locations of vertices
- **triangulations**: numpy array of int, indicating the vertices connecting each surface patch
- **colours**: numpy array of float, of shape (len(centres),len(triangles)) encoding the orbital phase for each surface patch of the plotting
- **ax**: matplotlib Axes, for further modifications
***
'''
th,ph = make_angle_mesh(n)
all_Ylm = self.calc_Ylm(th,ph)
if len(self.centres)>1:
ad = 0.5*np.mean(np.array([np.linalg.norm(self.centres[i]-self.centres[j]) for i in range(len(self.centres)) for j in range(i,len(self.centres))]))
else:
ad = 4.0
ncentres = len(self.centres)
vertices = np.zeros((ncentres,len(th),3))
radii = np.zeros((ncentres,len(th)),dtype=complex)
triangulations = mtri.Triangulation(th,ph)
colours = []
for bi in range(len(self.basis)):
radii[self.centre_pointer[bi],:] += np.sum(np.array([all_Ylm[self.harmonic_pointer[bi][j]]*self.vector[bi]*self.projections[bi][j] for j in range(len(self.harmonic_pointer[bi]))]),axis=0)
rescale = ad/np.mean(abs(radii)**2)
for ni in range(ncentres):
vertices[ni,:,:]+=rescale*np.array([abs(radii[ni])**2*np.cos(ph)*np.sin(th),abs(radii[ni])**2*np.sin(th)*np.sin(ph),abs(radii[ni])**2*np.cos(th)]).T
colours.append(col_phase(radii[ni,triangulations.triangles][:,1]))
vertices[ni,:]+=self.centres[ni]
colours = np.array(colours)
if plotting:
_,ax = self.plot_wavefunction(vertices,triangulations,colours,plot_ax=ax)
return vertices,triangulations,colours,ax
def plot_wavefunction(self,vertices,triangulations,colours,plot_ax = None,cbar_ax= None):
'''
Plotting function, for visualizing orbitals.
*args*:
- **vertices**: numpy array of float, shape (len(centres), len(th)*len(ph), 3) locations of vertices
- **triangulations**: numpy array of int, indicating the vertices connecting each surface patch
- **colours**: numpy array of float, of shape (len(centres),len(triangles)) encoding the orbital phase for each surface patch of the plotting
- **plot_ax**: matplotlib Axes, for plotting on existing axes
- **cbar_ax**: matplotlib Axes, for use in drawing colourbar
*return*:
- **plots**: list of plotted surfaces
- **plot_ax**: matplotlib Axes, for further modifications
***
'''
ncentres = len(self.centres)
plots = []
if plot_ax is None:
fig = plt.figure()
plot_ax = fig.add_subplot(111,projection='3d')
for ni in range(ncentres):
plots.append(plot_ax.plot_trisurf(vertices[ni,:,0],vertices[ni,:,1],vertices[ni,:,2],triangles=triangulations.triangles,cmap=cm.hsv,antialiased=True,edgecolors='w',linewidth=0.2))
plots[-1].set_array(colours[ni])
plots[-1].set_clim(-np.pi,np.pi)
plot_ax.set_xlabel('X')
plot_ax.set_ylabel('Y')
plot_ax.set_zlabel('Z')
plt.colorbar(plots[-1],ax=plot_ax,cax=cbar_ax)
return plots,plot_ax
def make_angle_mesh(n):
'''
Quick utility function for generating an angular mesh over spherical surface
*args*:
- **n**: int, number of divisions of the angular space
*return*:
- **th**: numpy array of 2n float from 0 to pi
- **ph**: numpy array of 4n float from 0 to 2pi
***
'''
th = np.linspace(0,np.pi,2*n)
ph = np.linspace(0,2*np.pi,4*n)
th,ph = np.meshgrid(th,ph)
th,ph = th.flatten(),ph.flatten()
return th,ph
def col_phase(vals):
'''
Define the phase of a complex number
*args*:
- **vals**: complex float, or numpy array of complex float
*return*:
- float, or numpy array of float of same shape as vals, from -pi to pi
***
'''
x,y=np.real(vals),np.imag(vals)
return np.arctan2(y,x)
def rephase_wavefunctions(vecs,index=-1):
'''
The wavefunction at different k-points can choose an arbitrary phase, as can
a subspace of degenerate eigenstates. As such, it is often advisable to choose
a global phase definition when comparing several different vectors. The user here
passes a set of vectors, and they are rephased. The user has the option of specifying
which basis index they would like to set the phasing. It is essential however that the
projection onto at least one basis element is non-zero over the entire set of vectors
for this rephasing to work.
*args*:
- **vecs**: numpy array of complex float, ordered as rows:vector index, columns: basis index
*kwargs*:
- **index**: int, optional choice of basis phase selection
*return*:
- **rephase**: numpy array of complex float of same shape as *vecs*
***
'''
rephase = np.copy(vecs)
if index>-1:
#check that user has selected a viable phase choice
if abs(vecs[:,index]).min()<1e-10:
print('Warning, the chosen basis index is invalid. Please make another selection.\n')
print('Finite projection onto the basis element of choice must be finite. If you are\n')
print('unsure, the computer can attempt to make a viable selection in the absence of\n')
print('an indicated basis index.')
return rephase
else:
min_projs = np.array([abs(vecs[:,i]).min() for i in range(np.shape(vecs)[0])])
index = np.where(min_projs>0)[0][0]
phase_factors = np.conj(vecs[:,index])/abs(vecs[:,index])
rephase = np.einsum('ij,i->ij',rephase,phase_factors)
return rephase
| en | 000434999_jminar-chinook_orbital_plotting_749fb9a09d52.py | unknown | 3,565 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from senlin.common import context
from senlin.engine.notifications import base
from senlin import objects
from senlin.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
class HeatNotificationEndpoint(base.Endpoints):
STACK_FAILURE_EVENTS = {
'orchestration.stack.delete.end': 'DELETE',
}
def __init__(self, project_id, cluster_id, recover_action):
super(HeatNotificationEndpoint, self).__init__(
project_id, cluster_id, recover_action
)
self.filter_rule = messaging.NotificationFilter(
publisher_id='^orchestration.*',
event_type='^orchestration\.stack\..*',
context={'project_id': '^%s$' % project_id})
self.rpc = rpc_client.get_engine_client()
self.target = messaging.Target(
topic=cfg.CONF.health_manager.heat_notification_topic,
exchange=cfg.CONF.health_manager.heat_control_exchange,
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
if event_type not in self.STACK_FAILURE_EVENTS:
return
tags = payload['tags']
if tags is None or tags == []:
return
cluster_id = None
node_id = None
for tag in tags:
if cluster_id is None:
start = tag.find('cluster_id')
if start == 0 and tag[11:] == self.cluster_id:
cluster_id = tag[11:]
if node_id is None:
start = tag.find('cluster_node_id')
if start == 0:
node_id = tag[16:]
if cluster_id is None or node_id is None:
return
params = {
'event': self.STACK_FAILURE_EVENTS[event_type],
'state': payload.get('state', 'Unknown'),
'stack_id': payload.get('stack_identity', 'Unknown'),
'timestamp': metadata['timestamp'],
'publisher': publisher_id,
'operation': self.recover_action['operation'],
}
LOG.info("Requesting stack recovery: %s", node_id)
ctx = context.get_service_context(project_id=self.project_id,
user_id=payload['user_identity'])
req = objects.NodeRecoverRequest(identity=node_id, params=params)
self.rpc.call(ctx, 'node_recover', req)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from senlin.common import context
from senlin.engine.notifications import base
from senlin import objects
from senlin.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
class HeatNotificationEndpoint(base.Endpoints):
STACK_FAILURE_EVENTS = {
'orchestration.stack.delete.end': 'DELETE',
}
def __init__(self, project_id, cluster_id, recover_action):
super(HeatNotificationEndpoint, self).__init__(
project_id, cluster_id, recover_action
)
self.filter_rule = messaging.NotificationFilter(
publisher_id='^orchestration.*',
event_type='^orchestration\.stack\..*',
context={'project_id': '^%s$' % project_id})
self.rpc = rpc_client.get_engine_client()
self.target = messaging.Target(
topic=cfg.CONF.health_manager.heat_notification_topic,
exchange=cfg.CONF.health_manager.heat_control_exchange,
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
if event_type not in self.STACK_FAILURE_EVENTS:
return
tags = payload['tags']
if tags is None or tags == []:
return
cluster_id = None
node_id = None
for tag in tags:
if cluster_id is None:
start = tag.find('cluster_id')
if start == 0 and tag[11:] == self.cluster_id:
cluster_id = tag[11:]
if node_id is None:
start = tag.find('cluster_node_id')
if start == 0:
node_id = tag[16:]
if cluster_id is None or node_id is None:
return
params = {
'event': self.STACK_FAILURE_EVENTS[event_type],
'state': payload.get('state', 'Unknown'),
'stack_id': payload.get('stack_identity', 'Unknown'),
'timestamp': metadata['timestamp'],
'publisher': publisher_id,
'operation': self.recover_action['operation'],
}
LOG.info("Requesting stack recovery: %s", node_id)
ctx = context.get_service_context(project_id=self.project_id,
user_id=payload['user_identity'])
req = objects.NodeRecoverRequest(identity=node_id, params=params)
self.rpc.call(ctx, 'node_recover', req)
| en | 000483823_openstack-senlin_heat_endpoint_65f3fd52ca2c.py | unknown | 821 |
def persian_num2english(input_string: str, reverse: bool = False):
"""
Converts persian numbers to english
Args:
input_string:
reverse: If set to True, converts english 2 persian!
Returns:
"""
NUM_MAP = {
"۱": "1",
"۲": "2",
"۳": "3",
"۴": "4",
"۵": "5",
"۶": "6",
"۷": "7",
"۸": "8",
"۹": "9",
"۰": "0"}
if reverse:
NUM_MAP = {v: k for k, v in NUM_MAP.items()}
output_string = "".join([NUM_MAP.get(c, c) for c in input_string])
return output_string
def arabic_char2fa_char(input_string: str):
arabic2persian = {
"ك": "ک",
"ي": "ی",
}
out_string = "".join(arabic2persian.get(s, s) for s in input_string)
return out_string
| def persian_num2english(input_string: str, reverse: bool = False):
"""
Converts persian numbers to english
Args:
input_string:
reverse: If set to True, converts english 2 persian!
Returns:
"""
NUM_MAP = {
"۱": "1",
"۲": "2",
"۳": "3",
"۴": "4",
"۵": "5",
"۶": "6",
"۷": "7",
"۸": "8",
"۹": "9",
"۰": "0"}
if reverse:
NUM_MAP = {v: k for k, v in NUM_MAP.items()}
output_string = "".join([NUM_MAP.get(c, c) for c in input_string])
return output_string
def arabic_char2fa_char(input_string: str):
arabic2persian = {
"ك": "ک",
"ي": "ی",
}
out_string = "".join(arabic2persian.get(s, s) for s in input_string)
return out_string
| en | 000707904_pooya-mohammadi-deep_utils_utils_320ab1a65f76.py | unknown | 291 |
from model.base_model import BaseModel
import thundergbm as tgb
import time
import numpy as np
import utils.data_utils as du
from model.datasets import Dataset
class ThunderGBMModel(BaseModel):
def __init__(self, depth=6, n_device=1, n_parallel_trees=1,
verbose=0, column_sampling_rate=1.0, bagging=0, tree_method='auto'):
BaseModel.__init__(self)
self.verbose = verbose
self.n_device = n_device
self.column_sampling_rate = column_sampling_rate
self.bagging = bagging
self.n_parallel_trees = n_parallel_trees
self.tree_method = tree_method
self.objective = ""
self.num_class = 1
def _config_model(self, data):
if data.task == "Regression":
self.objective = "reg:linear"
elif data.task == "Multiclass classification":
self.objective = "multi:softmax"
self.num_class = int(np.max(data.y_test) + 1)
elif data.task == "Classification":
self.objective = "binary:logistic"
elif data.task == "Ranking":
self.objective = "rank:ndcg"
else:
raise ValueError("Unknown task: " + data.task)
def _train_model(self, data):
if data.task is 'Regression':
self.model = tgb.TGBMRegressor(tree_method=self.tree_method, depth = self.max_depth, n_trees = 40, n_gpus = 1, \
min_child_weight = 1.0, lambda_tgbm = 1.0, gamma = 1.0,\
max_num_bin = 255, verbose = 0, column_sampling_rate = 1.0,\
bagging = 0, n_parallel_trees = 1, learning_rate = 1.0, \
objective = "reg:linear", num_class = 1)
else:
self.model = tgb.TGBMClassifier(bagging=1, lambda_tgbm=1, learning_rate=0.07, min_child_weight=1.2, n_gpus=1, verbose=0,
n_parallel_trees=40, gamma=0.2, depth=self.max_depth, n_trees=40, tree_method=self.tree_method, objective='multi:softprob')
start = time.time()
self.model.fit(data.X_train, data.y_train)
elapsed = time.time() - start
print("##################!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! %.5f" % elapsed)
return elapsed
def _predict(self, data):
pred = self.model.predict(data.X_test)
metric = self.eval(data, pred)
return metric
def model_name(self):
name = "thundergbm_"
use_cpu = "gpu_" if self.use_gpu else "cpu_"
nr = str(self.num_rounds) + "_"
return name + use_cpu + nr + str(self.max_depth)
if __name__ == "__main__":
# X, y = du.get_higgs()
dataset = Dataset(name='higgs', task='Regression', metric='RMSE', get_func=du.get_realsim)
print(dataset.X_train.shape)
print(dataset.y_test.shape)
t_start = time.time()
tgmModel = ThunderGBMModel()
tgmModel.tree_method = 'hist'
tgmModel.run_model(data=dataset)
eplased = time.time() - t_start
print("--------->> " + str(eplased)) | from model.base_model import BaseModel
import thundergbm as tgb
import time
import numpy as np
import utils.data_utils as du
from model.datasets import Dataset
class ThunderGBMModel(BaseModel):
def __init__(self, depth=6, n_device=1, n_parallel_trees=1,
verbose=0, column_sampling_rate=1.0, bagging=0, tree_method='auto'):
BaseModel.__init__(self)
self.verbose = verbose
self.n_device = n_device
self.column_sampling_rate = column_sampling_rate
self.bagging = bagging
self.n_parallel_trees = n_parallel_trees
self.tree_method = tree_method
self.objective = ""
self.num_class = 1
def _config_model(self, data):
if data.task == "Regression":
self.objective = "reg:linear"
elif data.task == "Multiclass classification":
self.objective = "multi:softmax"
self.num_class = int(np.max(data.y_test) + 1)
elif data.task == "Classification":
self.objective = "binary:logistic"
elif data.task == "Ranking":
self.objective = "rank:ndcg"
else:
raise ValueError("Unknown task: " + data.task)
def _train_model(self, data):
if data.task is 'Regression':
self.model = tgb.TGBMRegressor(tree_method=self.tree_method, depth = self.max_depth, n_trees = 40, n_gpus = 1, \
min_child_weight = 1.0, lambda_tgbm = 1.0, gamma = 1.0,\
max_num_bin = 255, verbose = 0, column_sampling_rate = 1.0,\
bagging = 0, n_parallel_trees = 1, learning_rate = 1.0, \
objective = "reg:linear", num_class = 1)
else:
self.model = tgb.TGBMClassifier(bagging=1, lambda_tgbm=1, learning_rate=0.07, min_child_weight=1.2, n_gpus=1, verbose=0,
n_parallel_trees=40, gamma=0.2, depth=self.max_depth, n_trees=40, tree_method=self.tree_method, objective='multi:softprob')
start = time.time()
self.model.fit(data.X_train, data.y_train)
elapsed = time.time() - start
print("##################!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! %.5f" % elapsed)
return elapsed
def _predict(self, data):
pred = self.model.predict(data.X_test)
metric = self.eval(data, pred)
return metric
def model_name(self):
name = "thundergbm_"
use_cpu = "gpu_" if self.use_gpu else "cpu_"
nr = str(self.num_rounds) + "_"
return name + use_cpu + nr + str(self.max_depth)
if __name__ == "__main__":
# X, y = du.get_higgs()
dataset = Dataset(name='higgs', task='Regression', metric='RMSE', get_func=du.get_realsim)
print(dataset.X_train.shape)
print(dataset.y_test.shape)
t_start = time.time()
tgmModel = ThunderGBMModel()
tgmModel.tree_method = 'hist'
tgmModel.run_model(data=dataset)
eplased = time.time() - t_start
print("--------->> " + str(eplased)) | en | 000668355_zeyiwen-gbdt_thundergbm_model_9f3ad0b5ae32.py | unknown | 983 |
# Copyright CERFACS (http://cerfacs.fr/)
# Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
#
# Author: Natalia Tatarinova
from netCDF4 import MFDataset
import numpy
import sys
from . import util_dt
def get_tile_dimension(in_files, var_name, transfer_limit_Mbytes=None, time_range=None):
'''
Computes the total size of 3D variable array and returns the optimal tile dimension for spatial chunking.
:param in_files: absolute path(s) to NetCDF dataset(s) (including OPeNDAP URLs)
:type in_files: list
:param var_name: variable name to process
:type var_name: str
:param transfer_limit_Mbytes: maximum OPeNDAP/THREDDS transfer limit in Mbytes (default: None)
:type transfer_limit_Mbytes: float
:param time_range: time range
:type time_range: list of 2 datetime objects: [dt1, dt2]
rtype: int
.. warning:: only for 3D variables
'''
if transfer_limit_Mbytes==None:
return 0
else:
transfer_limit_bytes = transfer_limit_Mbytes * 1024 * 1024 # Mbytes --> bytes
in_files.sort()
mfnc = MFDataset(in_files, 'r', aggdim='time')
ndim = mfnc.variables[var_name].ndim
if ndim != 3:
print("ERROR: The variable to process must be 3D")
v = mfnc.variables[var_name]
v_shape = v.shape
v_dtype = v.dtype
v_nb_bytes = v_dtype.itemsize
if time_range == None:
total_array_size_bytes = v_shape[0] * v_shape[1] * v_shape[2] * v_nb_bytes
optimal_tile_dimension = int( numpy.sqrt( transfer_limit_bytes / (v.shape[0] * v_nb_bytes) ) )
else:
var_time = mfnc.variables['time']
try:
time_calend = var_time.calendar
except:
time_calend = 'gregorian'
time_units = var_time.units
time_arr = var_time[:]
dt_arr = numpy.array([util_dt.num2date(dt, calend=time_calend, units=time_units) for dt in time_arr])
indices_subset = util_dt.get_indices_subset(dt_arr, time_range)
nb_time_steps_after_subset = len(indices_subset)
total_array_size_bytes = nb_time_steps_after_subset * v_shape[1] * v_shape[2] * v_nb_bytes
optimal_tile_dimension = int( numpy.sqrt( transfer_limit_bytes / (nb_time_steps_after_subset * v_nb_bytes) ) )
mfnc.close()
return optimal_tile_dimension
| # Copyright CERFACS (http://cerfacs.fr/)
# Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
#
# Author: Natalia Tatarinova
from netCDF4 import MFDataset
import numpy
import sys
from . import util_dt
def get_tile_dimension(in_files, var_name, transfer_limit_Mbytes=None, time_range=None):
'''
Computes the total size of 3D variable array and returns the optimal tile dimension for spatial chunking.
:param in_files: absolute path(s) to NetCDF dataset(s) (including OPeNDAP URLs)
:type in_files: list
:param var_name: variable name to process
:type var_name: str
:param transfer_limit_Mbytes: maximum OPeNDAP/THREDDS transfer limit in Mbytes (default: None)
:type transfer_limit_Mbytes: float
:param time_range: time range
:type time_range: list of 2 datetime objects: [dt1, dt2]
rtype: int
.. warning:: only for 3D variables
'''
if transfer_limit_Mbytes==None:
return 0
else:
transfer_limit_bytes = transfer_limit_Mbytes * 1024 * 1024 # Mbytes --> bytes
in_files.sort()
mfnc = MFDataset(in_files, 'r', aggdim='time')
ndim = mfnc.variables[var_name].ndim
if ndim != 3:
print("ERROR: The variable to process must be 3D")
v = mfnc.variables[var_name]
v_shape = v.shape
v_dtype = v.dtype
v_nb_bytes = v_dtype.itemsize
if time_range == None:
total_array_size_bytes = v_shape[0] * v_shape[1] * v_shape[2] * v_nb_bytes
optimal_tile_dimension = int( numpy.sqrt( transfer_limit_bytes / (v.shape[0] * v_nb_bytes) ) )
else:
var_time = mfnc.variables['time']
try:
time_calend = var_time.calendar
except:
time_calend = 'gregorian'
time_units = var_time.units
time_arr = var_time[:]
dt_arr = numpy.array([util_dt.num2date(dt, calend=time_calend, units=time_units) for dt in time_arr])
indices_subset = util_dt.get_indices_subset(dt_arr, time_range)
nb_time_steps_after_subset = len(indices_subset)
total_array_size_bytes = nb_time_steps_after_subset * v_shape[1] * v_shape[2] * v_nb_bytes
optimal_tile_dimension = int( numpy.sqrt( transfer_limit_bytes / (nb_time_steps_after_subset * v_nb_bytes) ) )
mfnc.close()
return optimal_tile_dimension
| en | 000415246_bzah-icclim_arr_size_a3e097130343.py | unknown | 811 |
"""
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Example counter
@copyright: See LICENSE
"""
class Counter:
def __init__(self, value=0):
self.value = value
def decrement(self):
self.value -= 1
def increment(self):
self.value += 1
def getvalue(self):
return self.value
def __str__(self):
return "The counter value is %d" % self.value
| """
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Example counter
@copyright: See LICENSE
"""
class Counter:
def __init__(self, value=0):
self.value = value
def decrement(self):
self.value -= 1
def increment(self):
self.value += 1
def getvalue(self):
return self.value
def __str__(self):
return "The counter value is %d" % self.value
| en | 000475479_denizalti-concoord_counter_0f9f6f5e1c29.py | unknown | 122 |
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for registering DPUCZDX8G u280 target """
import os
import json
import pyxir
import logging
from pyxir.graph.transformers import subgraph
from .common import xgraph_dpu_optimizer, xgraph_dpu_quantizer
from .vai_c import VAICompiler
logger = logging.getLogger('pyxir')
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
def xgraph_dpu_u280_build_func(xgraph, work_dir=os.getcwd(), **kwargs):
# TODO here or in optimizer, both?
# DPU layers are in NHWC format because of the tensorflow
# intemediate structure we use to communicate with
# DECENT/DNNC
return subgraph.xgraph_build_func(
xgraph=xgraph,
target='DPUCAHX8H-u280',
xtype='DPU',
layout='NHWC',
work_dir=work_dir
)
def xgraph_dpu_u280_compiler(xgraph, **kwargs):
# Vitis-AI 1.3 - ...
arch = "/opt/vitis_ai/compiler/arch/DPUCAHX8H/U280/arch.json"
if not os.path.isfile(arch):
raise ValueError("Arch file: {} does not exist".format(arch))
compiler = VAICompiler(xgraph, arch=arch, **kwargs)
c_xgraph = compiler.compile()
return c_xgraph
| # Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for registering DPUCZDX8G u280 target """
import os
import json
import pyxir
import logging
from pyxir.graph.transformers import subgraph
from .common import xgraph_dpu_optimizer, xgraph_dpu_quantizer
from .vai_c import VAICompiler
logger = logging.getLogger('pyxir')
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
def xgraph_dpu_u280_build_func(xgraph, work_dir=os.getcwd(), **kwargs):
# TODO here or in optimizer, both?
# DPU layers are in NHWC format because of the tensorflow
# intemediate structure we use to communicate with
# DECENT/DNNC
return subgraph.xgraph_build_func(
xgraph=xgraph,
target='DPUCAHX8H-u280',
xtype='DPU',
layout='NHWC',
work_dir=work_dir
)
def xgraph_dpu_u280_compiler(xgraph, **kwargs):
# Vitis-AI 1.3 - ...
arch = "/opt/vitis_ai/compiler/arch/DPUCAHX8H/U280/arch.json"
if not os.path.isfile(arch):
raise ValueError("Arch file: {} does not exist".format(arch))
compiler = VAICompiler(xgraph, arch=arch, **kwargs)
c_xgraph = compiler.compile()
return c_xgraph
| en | 000398028_anilmartha-pyxir_u280_f43db945ad56.py | unknown | 547 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from telegram.ext import Dispatcher, MessageHandler, Filters, CallbackQueryHandler
from handlers.process_drive_links import process_drive_links
from utils.config_loader import config
from utils.helper import parse_folder_id_from_url, alert_users
from utils.process import leave_chat_from_message
from utils.restricted import restricted_admin, restricted
logger = logging.getLogger(__name__)
def init(dispatcher: Dispatcher):
"""Provide handlers initialization."""
dispatcher.add_handler(
MessageHandler(Filters.group & Filters.chat(config.GROUP_IDS) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message))
dispatcher.add_handler(
MessageHandler(Filters.chat(config.USER_IDS[0]) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message_from_authorised_user))
dispatcher.add_handler(
MessageHandler((~Filters.group) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message))
dispatcher.add_handler(CallbackQueryHandler(ignore_callback, pattern=r'^#$'))
dispatcher.add_handler(CallbackQueryHandler(get_warning))
def ignore_callback(update, context):
query = update.callback_query
query.answer(text='')
def get_warning(update, context):
query = update.callback_query
alert_users(context, update.effective_user, 'unknown query data', query.data)
query.answer(text='哟呵', show_alert=True)
def leave_from_chat(update, context):
if update.channel_post:
if update.channel_post.chat_id < 0 and update.channel_post.chat_id not in config.GROUP_IDS:
leave_chat_from_message(update.channel_post, context)
return
elif update.message.chat_id < 0 and update.message.chat_id not in config.GROUP_IDS:
leave_chat_from_message(update.message, context)
return
@restricted_admin
def process_message_from_authorised_user(update, context):
logger.debug(update.message)
if update.message.caption:
text_urled = update.message.caption_html_urled
else:
text_urled = update.message.text_html_urled
if parse_folder_id_from_url(text_urled):
process_drive_links(update, context)
return
@restricted
def process_message(update, context):
if not update.message:
return
if update.message.chat_id == config.USER_IDS[0]:
pass
else:
logger.debug(update.message)
if update.message.caption:
text_urled = update.message.caption_html_urled
else:
text_urled = update.message.text_html_urled
if parse_folder_id_from_url(text_urled):
process_drive_links(update, context)
return
| #!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from telegram.ext import Dispatcher, MessageHandler, Filters, CallbackQueryHandler
from handlers.process_drive_links import process_drive_links
from utils.config_loader import config
from utils.helper import parse_folder_id_from_url, alert_users
from utils.process import leave_chat_from_message
from utils.restricted import restricted_admin, restricted
logger = logging.getLogger(__name__)
def init(dispatcher: Dispatcher):
"""Provide handlers initialization."""
dispatcher.add_handler(
MessageHandler(Filters.group & Filters.chat(config.GROUP_IDS) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message))
dispatcher.add_handler(
MessageHandler(Filters.chat(config.USER_IDS[0]) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message_from_authorised_user))
dispatcher.add_handler(
MessageHandler((~Filters.group) &
(Filters.text | Filters.caption) &
~Filters.update.edited_message,
process_message))
dispatcher.add_handler(CallbackQueryHandler(ignore_callback, pattern=r'^#$'))
dispatcher.add_handler(CallbackQueryHandler(get_warning))
def ignore_callback(update, context):
query = update.callback_query
query.answer(text='')
def get_warning(update, context):
query = update.callback_query
alert_users(context, update.effective_user, 'unknown query data', query.data)
query.answer(text='哟呵', show_alert=True)
def leave_from_chat(update, context):
if update.channel_post:
if update.channel_post.chat_id < 0 and update.channel_post.chat_id not in config.GROUP_IDS:
leave_chat_from_message(update.channel_post, context)
return
elif update.message.chat_id < 0 and update.message.chat_id not in config.GROUP_IDS:
leave_chat_from_message(update.message, context)
return
@restricted_admin
def process_message_from_authorised_user(update, context):
logger.debug(update.message)
if update.message.caption:
text_urled = update.message.caption_html_urled
else:
text_urled = update.message.text_html_urled
if parse_folder_id_from_url(text_urled):
process_drive_links(update, context)
return
@restricted
def process_message(update, context):
if not update.message:
return
if update.message.chat_id == config.USER_IDS[0]:
pass
else:
logger.debug(update.message)
if update.message.caption:
text_urled = update.message.caption_html_urled
else:
text_urled = update.message.text_html_urled
if parse_folder_id_from_url(text_urled):
process_drive_links(update, context)
return
| en | 000461102_winkxx-telegram_gcloner_process_message_558e562c69af.py | unknown | 816 |
import os
import argparse
import sys
import cv2
import numpy as np
import time
from tfservingclient.client import Client
def parse_args(args):
parser = argparse.ArgumentParser("test model")
parser.add_argument('--pic-dir',default="../../images/pothole_pictures")
parser.add_argument('--class-names',default="../../dataset/pothole.names")
return parser.parse_args(args)
def main(args):
if not os.path.exists(args.pic_dir):
raise ValueError("{} don't exist!".format(args.pic_dir))
if not os.path.exists(args.class_names):
raise ValueError("{} don't exist!".format(args.class_names))
with open(args.class_names) as f1:
class_names = f1.read().splitlines()
client = Client()
client.init(host='127.0.0.1',port=8500)
while True:
for img_name in os.listdir(args.pic_dir):
img = cv2.imread(os.path.join(args.pic_dir,img_name))
img = np.expand_dims(img, axis=0)
img = client.preprocess(img,(416,416))
boxes, scores, classes, valid_detections = client.predict(img,score_thr=0.1)
for index, num_det in enumerate(valid_detections):
show_img = client.draw_result(img[index], boxes[index][0:num_det], scores[index][0:num_det],
classes[index][0:num_det],class_names)
cv2.imshow('dd', show_img)
cv2.waitKey(0)
if __name__=='__main__':
args = parse_args(sys.argv[1:])
main(args)
|
import os
import argparse
import sys
import cv2
import numpy as np
import time
from tfservingclient.client import Client
def parse_args(args):
parser = argparse.ArgumentParser("test model")
parser.add_argument('--pic-dir',default="../../images/pothole_pictures")
parser.add_argument('--class-names',default="../../dataset/pothole.names")
return parser.parse_args(args)
def main(args):
if not os.path.exists(args.pic_dir):
raise ValueError("{} don't exist!".format(args.pic_dir))
if not os.path.exists(args.class_names):
raise ValueError("{} don't exist!".format(args.class_names))
with open(args.class_names) as f1:
class_names = f1.read().splitlines()
client = Client()
client.init(host='127.0.0.1',port=8500)
while True:
for img_name in os.listdir(args.pic_dir):
img = cv2.imread(os.path.join(args.pic_dir,img_name))
img = np.expand_dims(img, axis=0)
img = client.preprocess(img,(416,416))
boxes, scores, classes, valid_detections = client.predict(img,score_thr=0.1)
for index, num_det in enumerate(valid_detections):
show_img = client.draw_result(img[index], boxes[index][0:num_det], scores[index][0:num_det],
classes[index][0:num_det],class_names)
cv2.imshow('dd', show_img)
cv2.waitKey(0)
if __name__=='__main__':
args = parse_args(sys.argv[1:])
main(args)
| en | 000222322_jundeli-Scaled-YOLOv4-tensorflow2_demo_beba2851977a.py | unknown | 478 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def set_task_start_datetime(apps, schema_editor):
"""Fill in legacy `Task.start_datetime` as `Project.start_datetime.`"""
Task = apps.get_model('orchestra', 'Task') # noqa
for task in Task.objects.all():
task.start_datetime = task.project.start_datetime
task.save()
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0017_auto_20151012_1719'),
]
operations = [
migrations.RunPython(set_task_start_datetime), # manually-reviewed
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def set_task_start_datetime(apps, schema_editor):
"""Fill in legacy `Task.start_datetime` as `Project.start_datetime.`"""
Task = apps.get_model('orchestra', 'Task') # noqa
for task in Task.objects.all():
task.start_datetime = task.project.start_datetime
task.save()
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0017_auto_20151012_1719'),
]
operations = [
migrations.RunPython(set_task_start_datetime), # manually-reviewed
]
| en | 000609512_code-review-doctor-orchestra_0018_auto_20151014_1432_eeeff02d7768.py | unknown | 192 |
import logging
import typing
import gym
import numpy as np
from DeepRL.Env import EnvAbstract, EnvState
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class MountainCarContinuousEnv(EnvAbstract):
def __init__(self):
super().__init__()
self.g = gym.make('MountainCarContinuous-v0')
self.o: np.ndarray = None
self.total_step = 0
self.total_reward = 0.0
self.render = False
def startNewGame(self):
self.o = self.g.reset()
self.o = self.o.astype(np.float32)
self.total_reward = 0.0
self.in_game = True
def getState(self) -> EnvState:
return EnvState(self.in_game, self.o)
def doAction(self, _action: np.ndarray) -> float:
self.o, reward, is_quit, _ = self.g.step(_action)
self.o = self.o.astype(np.float32)
self.in_game = not is_quit
self.total_reward += reward
if not self.in_game:
logger.info('total_reward: {}'.format(self.total_reward))
# if not self.render and self.total_reward > 90.0:
# self.render = True
if self.render:
self.g.render()
return min(reward, 1.0)
def getInputs(self, _state_list: typing.Sequence[EnvState]) -> np.ndarray:
return np.array([d.state for d in _state_list])
def getRandomActions(
self, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
def getBestActions(
self, _data: np.ndarray, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
def getSoftActions(
self, _data: np.ndarray, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
| import logging
import typing
import gym
import numpy as np
from DeepRL.Env import EnvAbstract, EnvState
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class MountainCarContinuousEnv(EnvAbstract):
def __init__(self):
super().__init__()
self.g = gym.make('MountainCarContinuous-v0')
self.o: np.ndarray = None
self.total_step = 0
self.total_reward = 0.0
self.render = False
def startNewGame(self):
self.o = self.g.reset()
self.o = self.o.astype(np.float32)
self.total_reward = 0.0
self.in_game = True
def getState(self) -> EnvState:
return EnvState(self.in_game, self.o)
def doAction(self, _action: np.ndarray) -> float:
self.o, reward, is_quit, _ = self.g.step(_action)
self.o = self.o.astype(np.float32)
self.in_game = not is_quit
self.total_reward += reward
if not self.in_game:
logger.info('total_reward: {}'.format(self.total_reward))
# if not self.render and self.total_reward > 90.0:
# self.render = True
if self.render:
self.g.render()
return min(reward, 1.0)
def getInputs(self, _state_list: typing.Sequence[EnvState]) -> np.ndarray:
return np.array([d.state for d in _state_list])
def getRandomActions(
self, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
def getBestActions(
self, _data: np.ndarray, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
def getSoftActions(
self, _data: np.ndarray, _state_list: typing.Sequence[EnvState]
) -> typing.Sequence[int]:
pass
| en | 000671203_panghantian-kavout-DeepRL_mountain_car_continous_env_1ad5a01d78fd.py | unknown | 582 |
from logging import warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(project_id, issue_id, issue_web_url):
comments = []
detail = gitlab.get_issue_comments(project_id, issue_id)
if validate.api_result(detail):
for item in detail:
legit_comments = 0
for note in item['notes']:
if note['system']: # ignore system notes: https://docs.gitlab.com/ee/api/discussions.html
continue
comments.append(types.Comment('issue', issue_web_url, note['body']))
legit_comments += 1
if legit_comments > 0:
warning("[*] Found %s comments for issue %s", legit_comments, issue_web_url)
return comments
def sniff_secrets(comment):
monitor = types.SecretsMonitor()
return monitor.sniff_secrets({comment.parent_url: comment.comment_body})
| from logging import warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(project_id, issue_id, issue_web_url):
comments = []
detail = gitlab.get_issue_comments(project_id, issue_id)
if validate.api_result(detail):
for item in detail:
legit_comments = 0
for note in item['notes']:
if note['system']: # ignore system notes: https://docs.gitlab.com/ee/api/discussions.html
continue
comments.append(types.Comment('issue', issue_web_url, note['body']))
legit_comments += 1
if legit_comments > 0:
warning("[*] Found %s comments for issue %s", legit_comments, issue_web_url)
return comments
def sniff_secrets(comment):
monitor = types.SecretsMonitor()
return monitor.sniff_secrets({comment.parent_url: comment.comment_body})
| en | 000292895_codeEmitter-token-hunter_issue_comments_a4dad5811897.py | unknown | 264 |
import ctypes
import ida_ida
import ida_funcs
import ida_graph
import ida_idaapi
import ida_kernwin
import ida_hexrays
from PyQt5 import QtWidgets, QtGui, QtCore, sip
from lucid.ui.sync import MicroCursorHighlight
from lucid.ui.subtree import MicroSubtreeView
from lucid.util.python import register_callback, notify_callback
from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels
from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position
#------------------------------------------------------------------------------
# Microcode Explorer
#------------------------------------------------------------------------------
#
# The Microcode Explorer UI is mostly implemented following a standard
# Model-View-Controller pattern. This is a little abnormal for Qt, but
# I've come to appreciate it more for its portability and testability.
#
class MicrocodeExplorer(object):
"""
The controller component of the microcode explorer.
The role of the controller is to handle user gestures, map user actions to
model updates, and change views based on controls. In theory, the
controller should be able to drive the 'view' headlessly or simulate user
UI interaction.
"""
def __init__(self):
self.model = MicrocodeExplorerModel()
self.view = MicrocodeExplorerView(self, self.model)
self.view._code_sync.enable_sync(True) # XXX/HACK
def show(self, address=None):
"""
Show the microcode explorer.
"""
if address is None:
address = ida_kernwin.get_screen_ea()
self.select_function(address)
self.view.show()
def show_subtree(self, insn_token):
"""
Show the sub-instruction graph for the given instruction token.
"""
graph = MicroSubtreeView(insn_token.insn)
graph.show()
# TODO/HACK: this is dumb, but moving it breaks my centering code so
# i'll figure it out later...
gv = ida_graph.get_graph_viewer(graph.GetWidget())
ida_graph.viewer_set_titlebar_height(gv, 15)
#-------------------------------------------------------------------------
# View Toggles
#-------------------------------------------------------------------------
def set_highlight_mutual(self, status):
"""
Toggle the highlighting of lines containing the same active address.
"""
if status:
self.view._code_sync.hook()
else:
self.view._code_sync.unhook()
ida_kernwin.refresh_idaview_anyway()
def set_verbose(self, status):
"""
Toggle the verbosity of the printed microcode text.
"""
self.model.verbose = status
ida_kernwin.refresh_idaview_anyway()
#-------------------------------------------------------------------------
# View Controls
#-------------------------------------------------------------------------
def select_function(self, address):
"""
Switch the microcode view to the specified function.
"""
func = ida_funcs.get_func(address)
if not func:
return False
for maturity in get_mmat_levels():
mba = get_microcode(func, maturity)
mtext = MicrocodeText(mba, self.model.verbose)
self.model.update_mtext(mtext, maturity)
self.view.refresh()
ida_kernwin.refresh_idaview_anyway()
return True
def select_maturity(self, maturity_name):
"""
Switch the microcode view to the specified maturity level.
"""
self.model.active_maturity = get_mmat(maturity_name)
#self.view.refresh()
def select_address(self, address):
"""
Select a token in the microcode view matching the given address.
"""
tokens = self.model.mtext.get_tokens_for_address(address)
if not tokens:
return None
token_line_num, token_x = self.model.mtext.get_pos_of_token(tokens[0])
rel_y = self.model.current_position[2]
if self.model.current_position[2] == 0:
rel_y = 30
self.model.current_position = (token_line_num, token_x, rel_y)
return tokens[0]
def select_position(self, line_num, x, y):
"""
Select the given text position in the microcode view.
"""
self.model.current_position = (line_num, x, y)
#print(" - hovered token: %s" % self.model.current_token.text)
#print(" - hovered taddr: 0x%08X" % self.model.current_token.address)
#print(" - hovered laddr: 0x%08X" % self.model.current_address)
def activate_position(self, line_num, x, y):
"""
Activate (eg. double click) the given text position in the microcode view.
"""
token = self.model.mtext.get_token_at_position(line_num, x)
if isinstance(token, AddressToken):
ida_kernwin.jumpto(token.target_address, -1, 0)
return
if isinstance(token, BlockNumberToken) or (isinstance(token, MicroOperandToken) and token.mop.t == ida_hexrays.mop_b):
blk_idx = token.blk_idx if isinstance(token, BlockNumberToken) else token.mop.b
blk_token = self.model.mtext.blks[blk_idx]
blk_line_num, _ = self.model.mtext.get_pos_of_token(blk_token.lines[0])
self.model.current_position = (blk_line_num, 0, y)
self.view._code_view.Jump(*self.model.current_position)
return
class MicrocodeExplorerModel(object):
"""
The model component of the microcode explorer.
The role of the model is to encapsulate application state, respond to
state queries, and notify views of changes. Ideally, the model could be
serialized / unserialized to save and restore state.
"""
def __init__(self):
#
# 'mtext' is short for MicrocodeText objects (see microtext.py)
#
# this dictionary will contain a mtext object (the renderable text
# mapping of a given hexrays mba_t) for each microcode maturity level
# of the current function.
#
# at any given time, one mtext will be 'active' in the model, and
# therefore visible in the UI/Views
#
self._mtext = {x: None for x in get_mmat_levels()}
#
# there is a 'cursor' (ViewCursor) for each microcode maturity level /
# mtext object. cursors don't actually contain the 'position' in the
# rendered text (line_num, x), but also information to position the
# cursor within the line view (y)
#
self._view_cursors = {x: None for x in get_mmat_levels()}
#
# the currently active / selected maturity level of the model. this
# determines which mtext is currently visible / active in the
# microcode view, and which cursor will be used
#
self._active_maturity = ida_hexrays.MMAT_GENERATED
# this flag tracks the verbosity toggle state
self._verbose = False
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._mtext_refreshed_callbacks = []
self._position_changed_callbacks = []
self._maturity_changed_callbacks = []
#-------------------------------------------------------------------------
# Read-Only Properties
#-------------------------------------------------------------------------
@property
def mtext(self):
"""
Return the microcode text mapping for the current maturity level.
"""
return self._mtext[self._active_maturity]
@property
def current_line(self):
"""
Return the line token at the current viewport cursor position.
"""
if not self.mtext:
return None
line_num, _, _ = self.current_position
return self.mtext.lines[line_num]
@property
def current_function(self):
"""
Return the current function address.
"""
if not self.mtext:
return ida_idaapi.BADADDR
return self.mtext.mba.entry_ea
@property
def current_token(self):
"""
Return the token at the current viewport cursor position.
"""
return self.mtext.get_token_at_position(*self.current_position[:2])
@property
def current_address(self):
"""
Return the address at the current viewport cursor position.
"""
return self.mtext.get_address_at_position(*self.current_position[:2])
@property
def current_cursor(self):
"""
Return the current viewport cursor.
"""
return self._view_cursors[self._active_maturity]
#-------------------------------------------------------------------------
# Mutable Properties
#-------------------------------------------------------------------------
@property
def current_position(self):
"""
Return the current viewport cursor position (line_num, view_x, view_y).
"""
return self.current_cursor.viewport_position
@current_position.setter
def current_position(self, value):
"""
Set the cursor position of the viewport.
"""
self._gen_cursors(value, self.active_maturity)
self._notify_position_changed()
@property
def verbose(self):
"""
Return the microcode verbosity status of the viewport.
"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Set the verbosity of the microcode displayed by the viewport.
"""
if self._verbose == value:
return
# update the active verbosity setting
self._verbose = value
# verbosity must have changed, so force a mtext refresh
self.refresh_mtext()
@property
def active_maturity(self):
"""
Return the active microcode maturity level.
"""
return self._active_maturity
@active_maturity.setter
def active_maturity(self, new_maturity):
"""
Set the active microcode maturity level.
"""
self._active_maturity = new_maturity
self._notify_maturity_changed()
#----------------------------------------------------------------------
# Misc
#----------------------------------------------------------------------
def update_mtext(self, mtext, maturity):
"""
Set the mtext for a given microcode maturity level.
"""
self._mtext[maturity] = mtext
self._view_cursors[maturity] = ViewCursor(0, 0, 0)
def refresh_mtext(self):
"""
Regenerate the rendered text for all microcode maturity levels.
TODO: This is a bit sloppy, and is basically only used for the
verbosity toggle.
"""
for maturity, mtext in self._mtext.items():
if maturity == self.active_maturity:
new_mtext = MicrocodeText(mtext.mba, self.verbose)
self._mtext[maturity] = new_mtext
self.current_position = translate_mtext_position(self.current_position, mtext, new_mtext)
continue
mtext.refresh(self.verbose)
self._notify_mtext_refreshed()
def _gen_cursors(self, position, mmat_src):
"""
Generate the cursors for all levels from a source position and maturity.
"""
mmat_levels = get_mmat_levels()
mmat_first, mmat_final = mmat_levels[0], mmat_levels[-1]
# clear out all the existing cursor mappings
self._view_cursors = {x: None for x in mmat_levels}
# save the starting cursor
line_num, x, y = position
self._view_cursors[mmat_src] = ViewCursor(line_num, x, y, True)
# map the cursor backwards from the source maturity
mmat_lower = range(mmat_first, mmat_src)[::-1]
current_maturity = mmat_src
for next_maturity in mmat_lower:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
# map the cursor forward from the source maturity
mmat_higher = range(mmat_src+1, mmat_final + 1)
current_maturity = mmat_src
for next_maturity in mmat_higher:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
def _transfer_cursor(self, mmat_src, mmat_dst):
"""
Translate the cursor position from one maturity to the next.
"""
position = self._view_cursors[mmat_src].viewport_position
mapped = self._view_cursors[mmat_src].mapped
# attempt to translate the position in one mtext to another
projection = translate_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# if translation failed, we will generate an approximate cursor
if not projection:
mapped = False
projection = remap_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# save the generated cursor
line_num, x, y = projection
self._view_cursors[mmat_dst] = ViewCursor(line_num, x, y, mapped)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def mtext_refreshed(self, callback):
"""
Subscribe a callback for mtext refresh events.
"""
register_callback(self._mtext_refreshed_callbacks, callback)
def _notify_mtext_refreshed(self):
"""
Notify listeners of a mtext refresh event.
"""
notify_callback(self._mtext_refreshed_callbacks)
def position_changed(self, callback):
"""
Subscribe a callback for cursor position changed events.
"""
register_callback(self._position_changed_callbacks, callback)
def _notify_position_changed(self):
"""
Notify listeners of a cursor position changed event.
"""
notify_callback(self._position_changed_callbacks)
def maturity_changed(self, callback):
"""
Subscribe a callback for maturity changed events.
"""
register_callback(self._maturity_changed_callbacks, callback)
def _notify_maturity_changed(self):
"""
Notify listeners of a maturity changed event.
"""
notify_callback(self._maturity_changed_callbacks)
#-----------------------------------------------------------------------------
# UI Components
#-----------------------------------------------------------------------------
class MicrocodeExplorerView(QtWidgets.QWidget):
"""
The view component of the Microcode Explorer.
"""
WINDOW_TITLE = "Microcode Explorer"
def __init__(self, controller, model):
super(MicrocodeExplorerView, self).__init__()
self.visible = False
# the backing model, and controller for this view (eg, mvc pattern)
self.model = model
self.controller = controller
# initialize the plugin UI
self._ui_init()
self._ui_init_signals()
#--------------------------------------------------------------------------
# Pseudo Widget Functions
#--------------------------------------------------------------------------
def show(self):
self.refresh()
# show the dockable widget
flags = ida_kernwin.PluginForm.WOPN_DP_RIGHT | 0x200 # WOPN_SZHINT
ida_kernwin.display_widget(self._twidget, flags)
ida_kernwin.set_dock_pos(self.WINDOW_TITLE, "IDATopLevelDockArea", ida_kernwin.DP_RIGHT)
self._code_sync.hook()
def _cleanup(self):
self.visible = False
self._twidget = None
self.widget = None
self._code_sync.unhook()
self._ui_hooks.unhook()
# TODO cleanup controller / model
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
self._ui_init_widget()
# initialize our ui elements
self._ui_init_list()
self._ui_init_code()
self._ui_init_settings()
# layout the populated ui just before showing it
self._ui_layout()
def _ui_init_widget(self):
"""
Initialize an IDA widget for this UI control.
"""
# create a dockable widget, and save a reference to it for later use
self._twidget = ida_kernwin.create_empty_widget(self.WINDOW_TITLE)
# cast the IDA 'twidget' to a less opaque QWidget object
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
# hooks to help track the container/widget lifetime
class ExplorerUIHooks(ida_kernwin.UI_Hooks):
def widget_invisible(_, twidget):
if twidget == self._twidget:
self.visible = False
self._cleanup()
def widget_visible(_, twidget):
if twidget == self._twidget:
self.visible = True
# install the widget lifetime hooks
self._ui_hooks = ExplorerUIHooks()
self._ui_hooks.hook()
def _ui_init_list(self):
"""
Initialize the microcode maturity list.
"""
self._maturity_list = LayerListWidget()
def _ui_init_code(self):
"""
Initialize the microcode view(s).
"""
self._code_view = MicrocodeView(self.model)
self._code_sync = MicroCursorHighlight(self.controller, self.model)
self._code_sync.track_view(self._code_view.widget)
def _ui_init_settings(self):
"""
Initialize the explorer settings groupbox.
"""
self._checkbox_cursor = QtWidgets.QCheckBox("Highlight mutual")
self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)
self._checkbox_verbose = QtWidgets.QCheckBox("Show use/def")
self._checkbox_sync = QtWidgets.QCheckBox("Sync hexrays")
self._checkbox_sync.setCheckState(QtCore.Qt.Checked)
self._groupbox_settings = QtWidgets.QGroupBox("Settings")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._checkbox_cursor)
layout.addWidget(self._checkbox_verbose)
layout.addWidget(self._checkbox_sync)
self._groupbox_settings.setLayout(layout)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
layout = QtWidgets.QGridLayout()
# arrange the widgets in a 'grid' row col row span col span
layout.addWidget(self._code_view.widget, 0, 0, 0, 1)
layout.addWidget(self._maturity_list, 0, 1, 1, 1)
layout.addWidget(self._groupbox_settings, 1, 1, 1, 1)
# apply the layout to the widget
self.widget.setLayout(layout)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
self._maturity_list.currentItemChanged.connect(lambda x, y: self.controller.select_maturity(x.text()))
self._code_view.connect_signals(self.controller)
self._code_view.OnClose = self.hide # HACK
# checkboxes
self._checkbox_cursor.stateChanged.connect(lambda x: self.controller.set_highlight_mutual(bool(x)))
self._checkbox_verbose.stateChanged.connect(lambda x: self.controller.set_verbose(bool(x)))
self._checkbox_sync.stateChanged.connect(lambda x: self._code_sync.enable_sync(bool(x)))
# model signals
self.model.mtext_refreshed(self.refresh)
self.model.maturity_changed(self.refresh)
#--------------------------------------------------------------------------
# Misc
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the microcode explorer UI based on the model state.
"""
self._maturity_list.setCurrentRow(self.model.active_maturity - 1)
self._code_view.refresh()
class LayerListWidget(QtWidgets.QListWidget):
"""
The microcode maturity list widget
"""
def __init__(self):
super(LayerListWidget, self).__init__()
# populate the list widget with the microcode maturity levels
self.addItems([get_mmat_name(x) for x in get_mmat_levels()])
# select the first maturity level, by default
self.setCurrentRow(0)
# make the list widget a fixed size, slightly wider than it needs to be
width = self.sizeHintForColumn(0)
self.setMaximumWidth(int(width + width * 0.10))
def wheelEvent(self, event):
"""
Handle mouse wheel scroll events.
"""
y = event.angleDelta().y()
# scrolling down, clamp to last row
if y < 0:
next_row = min(self.currentRow()+1, self.count()-1)
# scrolling up, clamp to first row (0)
elif y > 0:
next_row = max(self.currentRow()-1, 0)
# horizontal scroll ? nothing to do..
else:
return
self.setCurrentRow(next_row)
class MicrocodeView(ida_kernwin.simplecustviewer_t):
"""
An IDA-based text area that will render the Hex-Rays microcode.
TODO: I'll probably rip this out in the future, as I'll have finer
control over the interaction / implementation if I just roll my own
microcode text widget.
For that reason, excuse its hacky-ness / lack of comments.
"""
def __init__(self, model):
super(MicrocodeView, self).__init__()
self.model = model
self.Create()
def connect_signals(self, controller):
self.controller = controller
self.OnCursorPosChanged = lambda: controller.select_position(*self.GetPos())
self.OnDblClick = lambda _: controller.activate_position(*self.GetPos())
self.model.position_changed(self.refresh_cursor)
def refresh(self):
self.ClearLines()
for line in self.model.mtext.lines:
self.AddLine(line.tagged_text)
self.refresh_cursor()
def refresh_cursor(self):
if not self.model.current_position:
return
self.Jump(*self.model.current_position)
def Create(self):
if not super(MicrocodeView, self).Create(None):
return False
self._twidget = self.GetWidget()
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
return True
def OnClose(self):
pass
def OnCursorPosChanged(self):
pass
def OnDblClick(self, shift):
pass
def OnPopup(self, form, popup_handle):
controller = self.controller
#
# so, i'm pretty picky about my UI / interactions. IDA puts items in
# the right click context menus of custom (code) viewers.
#
# these items aren't really relevant (imo) to the microcode viewer,
# so I do some dirty stuff here to filter them out and ensure only
# my items will appear in the context menu.
#
# there's only one right click context item right now, but in the
# future i'm sure there will be more.
#
class FilterMenu(QtCore.QObject):
def __init__(self, qmenu):
super(QtCore.QObject, self).__init__()
self.qmenu = qmenu
def eventFilter(self, obj, event):
if event.type() != QtCore.QEvent.Polish:
return False
for action in self.qmenu.actions():
if action.text() in ["&Font...", "&Synchronize with"]: # lol..
qmenu.removeAction(action)
self.qmenu.removeEventFilter(self)
self.qmenu = None
return True
p_qmenu = ctypes.cast(int(popup_handle), ctypes.POINTER(ctypes.c_void_p))[0]
qmenu = sip.wrapinstance(int(p_qmenu), QtWidgets.QMenu)
self.filter = FilterMenu(qmenu)
qmenu.installEventFilter(self.filter)
# only handle right clicks on lines containing micro instructions
ins_token = self.model.mtext.get_ins_for_line(self.model.current_line)
if not ins_token:
return False
class MyHandler(ida_kernwin.action_handler_t):
def activate(self, ctx):
controller.show_subtree(ins_token)
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
# inject the 'View subtree' action into the right click context menu
desc = ida_kernwin.action_desc_t(None, 'View subtree', MyHandler())
ida_kernwin.attach_dynamic_action_to_popup(form, popup_handle, desc, None)
return True
#-----------------------------------------------------------------------------
# Util
#-----------------------------------------------------------------------------
class ViewCursor(object):
"""
TODO
"""
def __init__(self, line_num, x, y, mapped=True):
self.line_num = line_num
self.x = x
self.y = y
self.mapped = mapped
@property
def text_position(self):
return (self.line_num, self.x)
@property
def viewport_position(self):
return (self.line_num, self.x, self.y)
| import ctypes
import ida_ida
import ida_funcs
import ida_graph
import ida_idaapi
import ida_kernwin
import ida_hexrays
from PyQt5 import QtWidgets, QtGui, QtCore, sip
from lucid.ui.sync import MicroCursorHighlight
from lucid.ui.subtree import MicroSubtreeView
from lucid.util.python import register_callback, notify_callback
from lucid.util.hexrays import get_microcode, get_mmat, get_mmat_name, get_mmat_levels
from lucid.microtext import MicrocodeText, MicroInstructionToken, MicroOperandToken, AddressToken, BlockNumberToken, translate_mtext_position, remap_mtext_position
#------------------------------------------------------------------------------
# Microcode Explorer
#------------------------------------------------------------------------------
#
# The Microcode Explorer UI is mostly implemented following a standard
# Model-View-Controller pattern. This is a little abnormal for Qt, but
# I've come to appreciate it more for its portability and testability.
#
class MicrocodeExplorer(object):
"""
The controller component of the microcode explorer.
The role of the controller is to handle user gestures, map user actions to
model updates, and change views based on controls. In theory, the
controller should be able to drive the 'view' headlessly or simulate user
UI interaction.
"""
def __init__(self):
self.model = MicrocodeExplorerModel()
self.view = MicrocodeExplorerView(self, self.model)
self.view._code_sync.enable_sync(True) # XXX/HACK
def show(self, address=None):
"""
Show the microcode explorer.
"""
if address is None:
address = ida_kernwin.get_screen_ea()
self.select_function(address)
self.view.show()
def show_subtree(self, insn_token):
"""
Show the sub-instruction graph for the given instruction token.
"""
graph = MicroSubtreeView(insn_token.insn)
graph.show()
# TODO/HACK: this is dumb, but moving it breaks my centering code so
# i'll figure it out later...
gv = ida_graph.get_graph_viewer(graph.GetWidget())
ida_graph.viewer_set_titlebar_height(gv, 15)
#-------------------------------------------------------------------------
# View Toggles
#-------------------------------------------------------------------------
def set_highlight_mutual(self, status):
"""
Toggle the highlighting of lines containing the same active address.
"""
if status:
self.view._code_sync.hook()
else:
self.view._code_sync.unhook()
ida_kernwin.refresh_idaview_anyway()
def set_verbose(self, status):
"""
Toggle the verbosity of the printed microcode text.
"""
self.model.verbose = status
ida_kernwin.refresh_idaview_anyway()
#-------------------------------------------------------------------------
# View Controls
#-------------------------------------------------------------------------
def select_function(self, address):
"""
Switch the microcode view to the specified function.
"""
func = ida_funcs.get_func(address)
if not func:
return False
for maturity in get_mmat_levels():
mba = get_microcode(func, maturity)
mtext = MicrocodeText(mba, self.model.verbose)
self.model.update_mtext(mtext, maturity)
self.view.refresh()
ida_kernwin.refresh_idaview_anyway()
return True
def select_maturity(self, maturity_name):
"""
Switch the microcode view to the specified maturity level.
"""
self.model.active_maturity = get_mmat(maturity_name)
#self.view.refresh()
def select_address(self, address):
"""
Select a token in the microcode view matching the given address.
"""
tokens = self.model.mtext.get_tokens_for_address(address)
if not tokens:
return None
token_line_num, token_x = self.model.mtext.get_pos_of_token(tokens[0])
rel_y = self.model.current_position[2]
if self.model.current_position[2] == 0:
rel_y = 30
self.model.current_position = (token_line_num, token_x, rel_y)
return tokens[0]
def select_position(self, line_num, x, y):
"""
Select the given text position in the microcode view.
"""
self.model.current_position = (line_num, x, y)
#print(" - hovered token: %s" % self.model.current_token.text)
#print(" - hovered taddr: 0x%08X" % self.model.current_token.address)
#print(" - hovered laddr: 0x%08X" % self.model.current_address)
def activate_position(self, line_num, x, y):
"""
Activate (eg. double click) the given text position in the microcode view.
"""
token = self.model.mtext.get_token_at_position(line_num, x)
if isinstance(token, AddressToken):
ida_kernwin.jumpto(token.target_address, -1, 0)
return
if isinstance(token, BlockNumberToken) or (isinstance(token, MicroOperandToken) and token.mop.t == ida_hexrays.mop_b):
blk_idx = token.blk_idx if isinstance(token, BlockNumberToken) else token.mop.b
blk_token = self.model.mtext.blks[blk_idx]
blk_line_num, _ = self.model.mtext.get_pos_of_token(blk_token.lines[0])
self.model.current_position = (blk_line_num, 0, y)
self.view._code_view.Jump(*self.model.current_position)
return
class MicrocodeExplorerModel(object):
"""
The model component of the microcode explorer.
The role of the model is to encapsulate application state, respond to
state queries, and notify views of changes. Ideally, the model could be
serialized / unserialized to save and restore state.
"""
def __init__(self):
#
# 'mtext' is short for MicrocodeText objects (see microtext.py)
#
# this dictionary will contain a mtext object (the renderable text
# mapping of a given hexrays mba_t) for each microcode maturity level
# of the current function.
#
# at any given time, one mtext will be 'active' in the model, and
# therefore visible in the UI/Views
#
self._mtext = {x: None for x in get_mmat_levels()}
#
# there is a 'cursor' (ViewCursor) for each microcode maturity level /
# mtext object. cursors don't actually contain the 'position' in the
# rendered text (line_num, x), but also information to position the
# cursor within the line view (y)
#
self._view_cursors = {x: None for x in get_mmat_levels()}
#
# the currently active / selected maturity level of the model. this
# determines which mtext is currently visible / active in the
# microcode view, and which cursor will be used
#
self._active_maturity = ida_hexrays.MMAT_GENERATED
# this flag tracks the verbosity toggle state
self._verbose = False
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._mtext_refreshed_callbacks = []
self._position_changed_callbacks = []
self._maturity_changed_callbacks = []
#-------------------------------------------------------------------------
# Read-Only Properties
#-------------------------------------------------------------------------
@property
def mtext(self):
"""
Return the microcode text mapping for the current maturity level.
"""
return self._mtext[self._active_maturity]
@property
def current_line(self):
"""
Return the line token at the current viewport cursor position.
"""
if not self.mtext:
return None
line_num, _, _ = self.current_position
return self.mtext.lines[line_num]
@property
def current_function(self):
"""
Return the current function address.
"""
if not self.mtext:
return ida_idaapi.BADADDR
return self.mtext.mba.entry_ea
@property
def current_token(self):
"""
Return the token at the current viewport cursor position.
"""
return self.mtext.get_token_at_position(*self.current_position[:2])
@property
def current_address(self):
"""
Return the address at the current viewport cursor position.
"""
return self.mtext.get_address_at_position(*self.current_position[:2])
@property
def current_cursor(self):
"""
Return the current viewport cursor.
"""
return self._view_cursors[self._active_maturity]
#-------------------------------------------------------------------------
# Mutable Properties
#-------------------------------------------------------------------------
@property
def current_position(self):
"""
Return the current viewport cursor position (line_num, view_x, view_y).
"""
return self.current_cursor.viewport_position
@current_position.setter
def current_position(self, value):
"""
Set the cursor position of the viewport.
"""
self._gen_cursors(value, self.active_maturity)
self._notify_position_changed()
@property
def verbose(self):
"""
Return the microcode verbosity status of the viewport.
"""
return self._verbose
@verbose.setter
def verbose(self, value):
"""
Set the verbosity of the microcode displayed by the viewport.
"""
if self._verbose == value:
return
# update the active verbosity setting
self._verbose = value
# verbosity must have changed, so force a mtext refresh
self.refresh_mtext()
@property
def active_maturity(self):
"""
Return the active microcode maturity level.
"""
return self._active_maturity
@active_maturity.setter
def active_maturity(self, new_maturity):
"""
Set the active microcode maturity level.
"""
self._active_maturity = new_maturity
self._notify_maturity_changed()
#----------------------------------------------------------------------
# Misc
#----------------------------------------------------------------------
def update_mtext(self, mtext, maturity):
"""
Set the mtext for a given microcode maturity level.
"""
self._mtext[maturity] = mtext
self._view_cursors[maturity] = ViewCursor(0, 0, 0)
def refresh_mtext(self):
"""
Regenerate the rendered text for all microcode maturity levels.
TODO: This is a bit sloppy, and is basically only used for the
verbosity toggle.
"""
for maturity, mtext in self._mtext.items():
if maturity == self.active_maturity:
new_mtext = MicrocodeText(mtext.mba, self.verbose)
self._mtext[maturity] = new_mtext
self.current_position = translate_mtext_position(self.current_position, mtext, new_mtext)
continue
mtext.refresh(self.verbose)
self._notify_mtext_refreshed()
def _gen_cursors(self, position, mmat_src):
"""
Generate the cursors for all levels from a source position and maturity.
"""
mmat_levels = get_mmat_levels()
mmat_first, mmat_final = mmat_levels[0], mmat_levels[-1]
# clear out all the existing cursor mappings
self._view_cursors = {x: None for x in mmat_levels}
# save the starting cursor
line_num, x, y = position
self._view_cursors[mmat_src] = ViewCursor(line_num, x, y, True)
# map the cursor backwards from the source maturity
mmat_lower = range(mmat_first, mmat_src)[::-1]
current_maturity = mmat_src
for next_maturity in mmat_lower:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
# map the cursor forward from the source maturity
mmat_higher = range(mmat_src+1, mmat_final + 1)
current_maturity = mmat_src
for next_maturity in mmat_higher:
self._transfer_cursor(current_maturity, next_maturity)
current_maturity = next_maturity
def _transfer_cursor(self, mmat_src, mmat_dst):
"""
Translate the cursor position from one maturity to the next.
"""
position = self._view_cursors[mmat_src].viewport_position
mapped = self._view_cursors[mmat_src].mapped
# attempt to translate the position in one mtext to another
projection = translate_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# if translation failed, we will generate an approximate cursor
if not projection:
mapped = False
projection = remap_mtext_position(position, self._mtext[mmat_src], self._mtext[mmat_dst])
# save the generated cursor
line_num, x, y = projection
self._view_cursors[mmat_dst] = ViewCursor(line_num, x, y, mapped)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def mtext_refreshed(self, callback):
"""
Subscribe a callback for mtext refresh events.
"""
register_callback(self._mtext_refreshed_callbacks, callback)
def _notify_mtext_refreshed(self):
"""
Notify listeners of a mtext refresh event.
"""
notify_callback(self._mtext_refreshed_callbacks)
def position_changed(self, callback):
"""
Subscribe a callback for cursor position changed events.
"""
register_callback(self._position_changed_callbacks, callback)
def _notify_position_changed(self):
"""
Notify listeners of a cursor position changed event.
"""
notify_callback(self._position_changed_callbacks)
def maturity_changed(self, callback):
"""
Subscribe a callback for maturity changed events.
"""
register_callback(self._maturity_changed_callbacks, callback)
def _notify_maturity_changed(self):
"""
Notify listeners of a maturity changed event.
"""
notify_callback(self._maturity_changed_callbacks)
#-----------------------------------------------------------------------------
# UI Components
#-----------------------------------------------------------------------------
class MicrocodeExplorerView(QtWidgets.QWidget):
"""
The view component of the Microcode Explorer.
"""
WINDOW_TITLE = "Microcode Explorer"
def __init__(self, controller, model):
super(MicrocodeExplorerView, self).__init__()
self.visible = False
# the backing model, and controller for this view (eg, mvc pattern)
self.model = model
self.controller = controller
# initialize the plugin UI
self._ui_init()
self._ui_init_signals()
#--------------------------------------------------------------------------
# Pseudo Widget Functions
#--------------------------------------------------------------------------
def show(self):
self.refresh()
# show the dockable widget
flags = ida_kernwin.PluginForm.WOPN_DP_RIGHT | 0x200 # WOPN_SZHINT
ida_kernwin.display_widget(self._twidget, flags)
ida_kernwin.set_dock_pos(self.WINDOW_TITLE, "IDATopLevelDockArea", ida_kernwin.DP_RIGHT)
self._code_sync.hook()
def _cleanup(self):
self.visible = False
self._twidget = None
self.widget = None
self._code_sync.unhook()
self._ui_hooks.unhook()
# TODO cleanup controller / model
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
self._ui_init_widget()
# initialize our ui elements
self._ui_init_list()
self._ui_init_code()
self._ui_init_settings()
# layout the populated ui just before showing it
self._ui_layout()
def _ui_init_widget(self):
"""
Initialize an IDA widget for this UI control.
"""
# create a dockable widget, and save a reference to it for later use
self._twidget = ida_kernwin.create_empty_widget(self.WINDOW_TITLE)
# cast the IDA 'twidget' to a less opaque QWidget object
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
# hooks to help track the container/widget lifetime
class ExplorerUIHooks(ida_kernwin.UI_Hooks):
def widget_invisible(_, twidget):
if twidget == self._twidget:
self.visible = False
self._cleanup()
def widget_visible(_, twidget):
if twidget == self._twidget:
self.visible = True
# install the widget lifetime hooks
self._ui_hooks = ExplorerUIHooks()
self._ui_hooks.hook()
def _ui_init_list(self):
"""
Initialize the microcode maturity list.
"""
self._maturity_list = LayerListWidget()
def _ui_init_code(self):
"""
Initialize the microcode view(s).
"""
self._code_view = MicrocodeView(self.model)
self._code_sync = MicroCursorHighlight(self.controller, self.model)
self._code_sync.track_view(self._code_view.widget)
def _ui_init_settings(self):
"""
Initialize the explorer settings groupbox.
"""
self._checkbox_cursor = QtWidgets.QCheckBox("Highlight mutual")
self._checkbox_cursor.setCheckState(QtCore.Qt.Checked)
self._checkbox_verbose = QtWidgets.QCheckBox("Show use/def")
self._checkbox_sync = QtWidgets.QCheckBox("Sync hexrays")
self._checkbox_sync.setCheckState(QtCore.Qt.Checked)
self._groupbox_settings = QtWidgets.QGroupBox("Settings")
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self._checkbox_cursor)
layout.addWidget(self._checkbox_verbose)
layout.addWidget(self._checkbox_sync)
self._groupbox_settings.setLayout(layout)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
layout = QtWidgets.QGridLayout()
# arrange the widgets in a 'grid' row col row span col span
layout.addWidget(self._code_view.widget, 0, 0, 0, 1)
layout.addWidget(self._maturity_list, 0, 1, 1, 1)
layout.addWidget(self._groupbox_settings, 1, 1, 1, 1)
# apply the layout to the widget
self.widget.setLayout(layout)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
self._maturity_list.currentItemChanged.connect(lambda x, y: self.controller.select_maturity(x.text()))
self._code_view.connect_signals(self.controller)
self._code_view.OnClose = self.hide # HACK
# checkboxes
self._checkbox_cursor.stateChanged.connect(lambda x: self.controller.set_highlight_mutual(bool(x)))
self._checkbox_verbose.stateChanged.connect(lambda x: self.controller.set_verbose(bool(x)))
self._checkbox_sync.stateChanged.connect(lambda x: self._code_sync.enable_sync(bool(x)))
# model signals
self.model.mtext_refreshed(self.refresh)
self.model.maturity_changed(self.refresh)
#--------------------------------------------------------------------------
# Misc
#--------------------------------------------------------------------------
def refresh(self):
"""
Refresh the microcode explorer UI based on the model state.
"""
self._maturity_list.setCurrentRow(self.model.active_maturity - 1)
self._code_view.refresh()
class LayerListWidget(QtWidgets.QListWidget):
"""
The microcode maturity list widget
"""
def __init__(self):
super(LayerListWidget, self).__init__()
# populate the list widget with the microcode maturity levels
self.addItems([get_mmat_name(x) for x in get_mmat_levels()])
# select the first maturity level, by default
self.setCurrentRow(0)
# make the list widget a fixed size, slightly wider than it needs to be
width = self.sizeHintForColumn(0)
self.setMaximumWidth(int(width + width * 0.10))
def wheelEvent(self, event):
"""
Handle mouse wheel scroll events.
"""
y = event.angleDelta().y()
# scrolling down, clamp to last row
if y < 0:
next_row = min(self.currentRow()+1, self.count()-1)
# scrolling up, clamp to first row (0)
elif y > 0:
next_row = max(self.currentRow()-1, 0)
# horizontal scroll ? nothing to do..
else:
return
self.setCurrentRow(next_row)
class MicrocodeView(ida_kernwin.simplecustviewer_t):
"""
An IDA-based text area that will render the Hex-Rays microcode.
TODO: I'll probably rip this out in the future, as I'll have finer
control over the interaction / implementation if I just roll my own
microcode text widget.
For that reason, excuse its hacky-ness / lack of comments.
"""
def __init__(self, model):
super(MicrocodeView, self).__init__()
self.model = model
self.Create()
def connect_signals(self, controller):
self.controller = controller
self.OnCursorPosChanged = lambda: controller.select_position(*self.GetPos())
self.OnDblClick = lambda _: controller.activate_position(*self.GetPos())
self.model.position_changed(self.refresh_cursor)
def refresh(self):
self.ClearLines()
for line in self.model.mtext.lines:
self.AddLine(line.tagged_text)
self.refresh_cursor()
def refresh_cursor(self):
if not self.model.current_position:
return
self.Jump(*self.model.current_position)
def Create(self):
if not super(MicrocodeView, self).Create(None):
return False
self._twidget = self.GetWidget()
self.widget = ida_kernwin.PluginForm.TWidgetToPyQtWidget(self._twidget)
return True
def OnClose(self):
pass
def OnCursorPosChanged(self):
pass
def OnDblClick(self, shift):
pass
def OnPopup(self, form, popup_handle):
controller = self.controller
#
# so, i'm pretty picky about my UI / interactions. IDA puts items in
# the right click context menus of custom (code) viewers.
#
# these items aren't really relevant (imo) to the microcode viewer,
# so I do some dirty stuff here to filter them out and ensure only
# my items will appear in the context menu.
#
# there's only one right click context item right now, but in the
# future i'm sure there will be more.
#
class FilterMenu(QtCore.QObject):
def __init__(self, qmenu):
super(QtCore.QObject, self).__init__()
self.qmenu = qmenu
def eventFilter(self, obj, event):
if event.type() != QtCore.QEvent.Polish:
return False
for action in self.qmenu.actions():
if action.text() in ["&Font...", "&Synchronize with"]: # lol..
qmenu.removeAction(action)
self.qmenu.removeEventFilter(self)
self.qmenu = None
return True
p_qmenu = ctypes.cast(int(popup_handle), ctypes.POINTER(ctypes.c_void_p))[0]
qmenu = sip.wrapinstance(int(p_qmenu), QtWidgets.QMenu)
self.filter = FilterMenu(qmenu)
qmenu.installEventFilter(self.filter)
# only handle right clicks on lines containing micro instructions
ins_token = self.model.mtext.get_ins_for_line(self.model.current_line)
if not ins_token:
return False
class MyHandler(ida_kernwin.action_handler_t):
def activate(self, ctx):
controller.show_subtree(ins_token)
def update(self, ctx):
return ida_kernwin.AST_ENABLE_ALWAYS
# inject the 'View subtree' action into the right click context menu
desc = ida_kernwin.action_desc_t(None, 'View subtree', MyHandler())
ida_kernwin.attach_dynamic_action_to_popup(form, popup_handle, desc, None)
return True
#-----------------------------------------------------------------------------
# Util
#-----------------------------------------------------------------------------
class ViewCursor(object):
"""
TODO
"""
def __init__(self, line_num, x, y, mapped=True):
self.line_num = line_num
self.x = x
self.y = y
self.mapped = mapped
@property
def text_position(self):
return (self.line_num, self.x)
@property
def viewport_position(self):
return (self.line_num, self.x, self.y)
| en | 000039633_gaasedelen-lucid_explorer_60cf1f9d01e8.py | unknown | 6,753 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce_terminal.view import NFView,make_view
from netforce.database import Transaction
from netforce.model import get_model
import curses
import curses.textpad
class AddProduct(NFView):
_name="add_product"
def __init__(self,opts):
super().__init__(opts)
self.data={
"product_id": None,
"lot_no": None,
"qty": None,
"uom_id": None,
"prev_product_id": opts.get("prev_product_id"),
}
def render(self):
with Transaction():
self.win.clear()
curses.curs_set(0)
self.win.addstr(0,0,"Netforce Terminal",curses.A_BOLD|curses.color_pair(1))
self.win.addstr(1,0,"Add Product",curses.A_BOLD)
opts={
"win": self.win.subwin(1,80,3,0),
"key": 1,
"string": "Product",
"name": "product_id",
"relation": "product",
"data": self.data,
"name_field": "code",
}
self.subviews["product_id"]=make_view("field_m2o",opts)
opts={
"win": self.win.subwin(1,80,4,0),
"key": "2",
"string": "Lot Number",
"name": "lot_no",
"data": self.data,
}
self.subviews["lot_no"]=make_view("field_char",opts)
opts={
"win": self.win.subwin(1,80,5,0),
"key": "3",
"string": "Qty",
"name": "qty",
"data": self.data,
}
if self.data["product_id"]:
prod_id=self.data["product_id"][0]
prod=get_model("product").browse(prod_id)
opts["string"]="Qty (%s)"%prod.uom_id.name
self.subviews["qty"]=make_view("field_decimal",opts)
self.win.addstr(6,0,"4.",curses.A_BOLD|curses.color_pair(2))
self.win.addstr(6,3,"Add Product")
if self.data.get("prev_product_id"):
self.win.addstr(7,0,"5.",curses.A_BOLD|curses.color_pair(2))
self.win.addstr(7,3,"Select Previous Product")
for n,view in self.subviews.items():
view.render()
def focus(self):
while True:
c=self.win.getch()
try:
if c==27:
return
elif c==ord("1"):
self.subviews["product_id"].focus()
self.render()
elif c==ord("2"):
self.subviews["lot_no"].focus()
self.render()
elif c==ord("3"):
self.subviews["qty"].focus()
self.render()
elif c==ord("4"):
if not self.data["product_id"]:
raise Exception("Missing product")
if not self.data["qty"]:
raise Exception("Missing qty")
return self.data
elif c==ord("5"):
self.data["product_id"]=self.data["prev_product_id"]
self.render()
except Exception as e:
make_view("error",{"message": str(e)}).focus()
self.render()
AddProduct.register()
| # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce_terminal.view import NFView,make_view
from netforce.database import Transaction
from netforce.model import get_model
import curses
import curses.textpad
class AddProduct(NFView):
_name="add_product"
def __init__(self,opts):
super().__init__(opts)
self.data={
"product_id": None,
"lot_no": None,
"qty": None,
"uom_id": None,
"prev_product_id": opts.get("prev_product_id"),
}
def render(self):
with Transaction():
self.win.clear()
curses.curs_set(0)
self.win.addstr(0,0,"Netforce Terminal",curses.A_BOLD|curses.color_pair(1))
self.win.addstr(1,0,"Add Product",curses.A_BOLD)
opts={
"win": self.win.subwin(1,80,3,0),
"key": 1,
"string": "Product",
"name": "product_id",
"relation": "product",
"data": self.data,
"name_field": "code",
}
self.subviews["product_id"]=make_view("field_m2o",opts)
opts={
"win": self.win.subwin(1,80,4,0),
"key": "2",
"string": "Lot Number",
"name": "lot_no",
"data": self.data,
}
self.subviews["lot_no"]=make_view("field_char",opts)
opts={
"win": self.win.subwin(1,80,5,0),
"key": "3",
"string": "Qty",
"name": "qty",
"data": self.data,
}
if self.data["product_id"]:
prod_id=self.data["product_id"][0]
prod=get_model("product").browse(prod_id)
opts["string"]="Qty (%s)"%prod.uom_id.name
self.subviews["qty"]=make_view("field_decimal",opts)
self.win.addstr(6,0,"4.",curses.A_BOLD|curses.color_pair(2))
self.win.addstr(6,3,"Add Product")
if self.data.get("prev_product_id"):
self.win.addstr(7,0,"5.",curses.A_BOLD|curses.color_pair(2))
self.win.addstr(7,3,"Select Previous Product")
for n,view in self.subviews.items():
view.render()
def focus(self):
while True:
c=self.win.getch()
try:
if c==27:
return
elif c==ord("1"):
self.subviews["product_id"].focus()
self.render()
elif c==ord("2"):
self.subviews["lot_no"].focus()
self.render()
elif c==ord("3"):
self.subviews["qty"].focus()
self.render()
elif c==ord("4"):
if not self.data["product_id"]:
raise Exception("Missing product")
if not self.data["qty"]:
raise Exception("Missing qty")
return self.data
elif c==ord("5"):
self.data["product_id"]=self.data["prev_product_id"]
self.render()
except Exception as e:
make_view("error",{"message": str(e)}).focus()
self.render()
AddProduct.register()
| en | 000602164_nfco-netforce_add_product_3faf0c944ee0.py | unknown | 1,258 |
#!/usr/bin/env python2.7
import os, sys
import requests
from bs4 import BeautifulSoup
url = sys.argv[1]
directory = sys.argv[2]
os.makedirs(directory)
def download_script(uri):
address = url + uri if uri[0] == '/' else uri
filename = address[address.rfind("/")+1:address.rfind("js")+2]
req = requests.get(url)
with open(directory + '/' + filename, 'wb') as file:
file.write(req.content)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for script in soup.find_all('script'):
if script.get('src'): download_script(script.get('src'))
| #!/usr/bin/env python2.7
import os, sys
import requests
from bs4 import BeautifulSoup
url = sys.argv[1]
directory = sys.argv[2]
os.makedirs(directory)
def download_script(uri):
address = url + uri if uri[0] == '/' else uri
filename = address[address.rfind("/")+1:address.rfind("js")+2]
req = requests.get(url)
with open(directory + '/' + filename, 'wb') as file:
file.write(req.content)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for script in soup.find_all('script'):
if script.get('src'): download_script(script.get('src'))
| en | 000388868_PacktPublishing-Hands-On-Bug-Hunting-for-Penetration-Testers_grabjs_768b06745bc9.py | unknown | 206 |
import argparse
import glob
import io
import multiprocessing as mp
import os
from pathlib import Path
from PIL import Image
###############################################################################
# Convert image to jpeg
###############################################################################
def from_file_to_file(input_file, output_file=None):
"""Convert image file to jpeg"""
# Default output filename is same as input but with JPEG extension
if output_file is None:
output_file = input_file.with_suffix('.jpg')
# Open image file
image = Image.open(input_file)
# Create raw byte buffer
buffer = io.BytesIO()
# Perform compression to 25% of the original file size
image.save(buffer, 'JPEG', quality=25)
# Write the buffer to a file
with open(output_file, 'w') as file:
file.write(buffer.contents())
def from_files_to_files(input_files, output_files=None):
"""Convert audio files to mp3"""
# Convert to paths
input_files = [Path(file) for file in input_files]
# Default output filename is same as input but with MP3 extension
if output_files is None:
output_files = [file.with_suffix('.jpg') for file in input_files]
# Multiprocess conversion
with mp.Pool() as pool:
pool.starmap(from_file_to_file, zip(input_files, output_files))
# for input_file, output_file in zip(input_files, output_files):
# from_file_to_file(input_file, output_file)
###############################################################################
# Entry point
###############################################################################
def expand_files(files):
"""Expands a wildcard to a list of paths for Windows compatibility"""
# Split at whitespace
files = files.split()
# Handle wildcard expansion
if len(files) == 1 and '*' in files[0]:
files = glob.glob(files[0])
# Convert to Path objects
return files
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(description='Convert images to JPEG')
# Handle wildcards across platforms
if os.name == 'nt':
parser.add_argument(
'--input_files',
type=expand_files,
help='The image files to convert to jpeg')
else:
parser.add_argument(
'--input_files',
nargs='+',
help='The image files to convert to jpeg')
parser.add_argument(
'--output_files',
type=Path,
nargs='+',
help='The corresponding output files. ' +
'Uses same filename with jpg extension by default')
return parser.parse_args()
if __name__ == '__main__':
from_files_to_files(**vars(parse_args()))
| import argparse
import glob
import io
import multiprocessing as mp
import os
from pathlib import Path
from PIL import Image
###############################################################################
# Convert image to jpeg
###############################################################################
def from_file_to_file(input_file, output_file=None):
"""Convert image file to jpeg"""
# Default output filename is same as input but with JPEG extension
if output_file is None:
output_file = input_file.with_suffix('.jpg')
# Open image file
image = Image.open(input_file)
# Create raw byte buffer
buffer = io.BytesIO()
# Perform compression to 25% of the original file size
image.save(buffer, 'JPEG', quality=25)
# Write the buffer to a file
with open(output_file, 'w') as file:
file.write(buffer.contents())
def from_files_to_files(input_files, output_files=None):
"""Convert audio files to mp3"""
# Convert to paths
input_files = [Path(file) for file in input_files]
# Default output filename is same as input but with MP3 extension
if output_files is None:
output_files = [file.with_suffix('.jpg') for file in input_files]
# Multiprocess conversion
with mp.Pool() as pool:
pool.starmap(from_file_to_file, zip(input_files, output_files))
# for input_file, output_file in zip(input_files, output_files):
# from_file_to_file(input_file, output_file)
###############################################################################
# Entry point
###############################################################################
def expand_files(files):
"""Expands a wildcard to a list of paths for Windows compatibility"""
# Split at whitespace
files = files.split()
# Handle wildcard expansion
if len(files) == 1 and '*' in files[0]:
files = glob.glob(files[0])
# Convert to Path objects
return files
def parse_args():
"""Parse command-line arguments"""
parser = argparse.ArgumentParser(description='Convert images to JPEG')
# Handle wildcards across platforms
if os.name == 'nt':
parser.add_argument(
'--input_files',
type=expand_files,
help='The image files to convert to jpeg')
else:
parser.add_argument(
'--input_files',
nargs='+',
help='The image files to convert to jpeg')
parser.add_argument(
'--output_files',
type=Path,
nargs='+',
help='The corresponding output files. ' +
'Uses same filename with jpg extension by default')
return parser.parse_args()
if __name__ == '__main__':
from_files_to_files(**vars(parse_args()))
| en | 000664855_reseval-reseval_image_1ad687b8c505.py | unknown | 715 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhihuUserItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.scrapy.Field()
id = scrapy.Field()
name = scrapy.Field()
avatar_url = scrapy.Field()
headline = scrapy.Field()
description = scrapy.Field()
url = scrapy.Field()
url_token = scrapy.Field()
gender = scrapy.Field()
cover_url = scrapy.Field()
type = scrapy.Field()
badge = scrapy.Field()
answer_count = scrapy.Field()
articles_count = scrapy.Field()
commercial_question_count = scrapy.Field()
favorite_count = scrapy.Field()
favorited_count = scrapy.Field()
follower_count = scrapy.Field()
following_columns_count = scrapy.Field()
following_count = scrapy.Field()
pins_count = scrapy.Field()
question_count = scrapy.Field()
thank_from_count = scrapy.Field()
thank_to_count = scrapy.Field()
thanked_count = scrapy.Field()
vote_from_count = scrapy.Field()
vote_to_count = scrapy.Field()
voteup_count = scrapy.Field()
following_favlists_count = scrapy.Field()
following_question_count = scrapy.Field()
following_topic_count = scrapy.Field()
marked_answers_count = scrapy.Field()
mutual_followees_count = scrapy.Field()
hosted_live_count = scrapy.Field()
participated_live_count = scrapy.Field()
locations = scrapy.Field()
educations = scrapy.Field()
employments = scrapy.Field()
| # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ZhihuUserItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.scrapy.Field()
id = scrapy.Field()
name = scrapy.Field()
avatar_url = scrapy.Field()
headline = scrapy.Field()
description = scrapy.Field()
url = scrapy.Field()
url_token = scrapy.Field()
gender = scrapy.Field()
cover_url = scrapy.Field()
type = scrapy.Field()
badge = scrapy.Field()
answer_count = scrapy.Field()
articles_count = scrapy.Field()
commercial_question_count = scrapy.Field()
favorite_count = scrapy.Field()
favorited_count = scrapy.Field()
follower_count = scrapy.Field()
following_columns_count = scrapy.Field()
following_count = scrapy.Field()
pins_count = scrapy.Field()
question_count = scrapy.Field()
thank_from_count = scrapy.Field()
thank_to_count = scrapy.Field()
thanked_count = scrapy.Field()
vote_from_count = scrapy.Field()
vote_to_count = scrapy.Field()
voteup_count = scrapy.Field()
following_favlists_count = scrapy.Field()
following_question_count = scrapy.Field()
following_topic_count = scrapy.Field()
marked_answers_count = scrapy.Field()
mutual_followees_count = scrapy.Field()
hosted_live_count = scrapy.Field()
participated_live_count = scrapy.Field()
locations = scrapy.Field()
educations = scrapy.Field()
employments = scrapy.Field()
| en | 000551306_fst034356-crawler_items_d699d9adf1a2.py | unknown | 515 |
import logging
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponse, JsonResponse
from django.views.generic import TemplateView, View
from zentral.core.stores import frontend_store
logger = logging.getLogger("server.base.views")
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('OK')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = "base/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
app_list = []
for app_name, app_config in apps.app_configs.items():
if getattr(app_config, "events_module", None) is not None:
app_list.append(app_name)
app_list.sort()
context["apps"] = app_list
return context
class AppHistogramDataView(LoginRequiredMixin, View):
INTERVAL_DATE_FORMAT = {
"hour": "%H:%M",
"day": "%d/%m",
"week": "%d/%m",
"month": "%m/%y",
}
def get(self, request, *args, **kwargs):
app = kwargs['app']
try:
zentral_app = apps.app_configs[app]
search_dict = getattr(zentral_app.events_module, "ALL_EVENTS_SEARCH_DICT")
except (KeyError, AttributeError):
raise Http404
interval = kwargs["interval"]
try:
date_format = self.INTERVAL_DATE_FORMAT[interval]
except KeyError:
raise Http404
labels = []
event_count_data = []
unique_msn_data = []
for dt, event_count, unique_msn in frontend_store.get_app_hist_data(interval, int(kwargs["bucket_number"]),
**search_dict):
labels.append(dt.strftime(date_format))
event_count_data.append(event_count)
unique_msn_data.append(unique_msn)
datasets = {"event_count": {
"label": "{} events".format(app),
"backgroundColor": "rgba(122, 182, 160, 0.7)",
"data": event_count_data
},
"unique_msn": {
"label": "{} machines".format(app),
"backgroundColor": "rgba(225, 100, 86, 0.7)",
"data": unique_msn_data
}}
return JsonResponse({"app": app,
"labels": labels,
"datasets": datasets})
| import logging
from django.apps import apps
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponse, JsonResponse
from django.views.generic import TemplateView, View
from zentral.core.stores import frontend_store
logger = logging.getLogger("server.base.views")
class HealthCheckView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('OK')
class IndexView(LoginRequiredMixin, TemplateView):
template_name = "base/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
app_list = []
for app_name, app_config in apps.app_configs.items():
if getattr(app_config, "events_module", None) is not None:
app_list.append(app_name)
app_list.sort()
context["apps"] = app_list
return context
class AppHistogramDataView(LoginRequiredMixin, View):
INTERVAL_DATE_FORMAT = {
"hour": "%H:%M",
"day": "%d/%m",
"week": "%d/%m",
"month": "%m/%y",
}
def get(self, request, *args, **kwargs):
app = kwargs['app']
try:
zentral_app = apps.app_configs[app]
search_dict = getattr(zentral_app.events_module, "ALL_EVENTS_SEARCH_DICT")
except (KeyError, AttributeError):
raise Http404
interval = kwargs["interval"]
try:
date_format = self.INTERVAL_DATE_FORMAT[interval]
except KeyError:
raise Http404
labels = []
event_count_data = []
unique_msn_data = []
for dt, event_count, unique_msn in frontend_store.get_app_hist_data(interval, int(kwargs["bucket_number"]),
**search_dict):
labels.append(dt.strftime(date_format))
event_count_data.append(event_count)
unique_msn_data.append(unique_msn)
datasets = {"event_count": {
"label": "{} events".format(app),
"backgroundColor": "rgba(122, 182, 160, 0.7)",
"data": event_count_data
},
"unique_msn": {
"label": "{} machines".format(app),
"backgroundColor": "rgba(225, 100, 86, 0.7)",
"data": unique_msn_data
}}
return JsonResponse({"app": app,
"labels": labels,
"datasets": datasets})
| en | 000030474_arubdesu-zentral_views_73195c69669f.py | unknown | 725 |
import os
import time
from munch import munchify
from ray import tune
from ..core.recommender import Recommender
from ..models.userKNN import UserKNNEngine
from ..utils.monitor import Monitor
def tune_train(config):
"""Train the model with a hyper-parameter tuner (ray).
Args:
config (dict): All the parameters for the model.
"""
data = config["data"]
train_engine = UserKNN(munchify(config))
result = train_engine.train(data)
while train_engine.eval_engine.n_worker > 0:
time.sleep(20)
tune.report(
valid_metric=result["valid_metric"],
model_save_dir=result["model_save_dir"],
)
class UserKNN(Recommender):
"""The User-based K Nearest Neighbour Model."""
def __init__(self, config):
"""Initialize the config of this recommender.
Args:
config:
"""
super(UserKNN, self).__init__(config, name="UserKNN")
def init_engine(self, data):
"""Initialize the required parameters for the model.
Args:
data: the Dataset object.
"""
self.config["model"]["n_users"] = data.n_users
self.config["model"]["n_items"] = data.n_items
self.engine = UserKNNEngine(self.config)
def train(self, data):
"""Training the model.
Args:
data: the Dataset object.
Returns:
dict: {}
"""
self.gpu_id, self.config["device_str"] = self.get_device() # Train the model.
self.config["model"]["n_users"] = data.n_users
self.config["model"]["n_items"] = data.n_items
self.monitor = Monitor(
log_dir=self.config["system"]["run_dir"], delay=1, gpu_id=self.gpu_id
)
self.init_engine(data)
print(type(data.train))
print(data.train.head())
self.engine.model.prepare_model(data)
self.model_save_dir = os.path.join(
self.config["system"]["model_save_dir"], self.config["model"]["save_name"]
)
self.config["run_time"] = self.monitor.stop()
return "data loaded"
| import os
import time
from munch import munchify
from ray import tune
from ..core.recommender import Recommender
from ..models.userKNN import UserKNNEngine
from ..utils.monitor import Monitor
def tune_train(config):
"""Train the model with a hyper-parameter tuner (ray).
Args:
config (dict): All the parameters for the model.
"""
data = config["data"]
train_engine = UserKNN(munchify(config))
result = train_engine.train(data)
while train_engine.eval_engine.n_worker > 0:
time.sleep(20)
tune.report(
valid_metric=result["valid_metric"],
model_save_dir=result["model_save_dir"],
)
class UserKNN(Recommender):
"""The User-based K Nearest Neighbour Model."""
def __init__(self, config):
"""Initialize the config of this recommender.
Args:
config:
"""
super(UserKNN, self).__init__(config, name="UserKNN")
def init_engine(self, data):
"""Initialize the required parameters for the model.
Args:
data: the Dataset object.
"""
self.config["model"]["n_users"] = data.n_users
self.config["model"]["n_items"] = data.n_items
self.engine = UserKNNEngine(self.config)
def train(self, data):
"""Training the model.
Args:
data: the Dataset object.
Returns:
dict: {}
"""
self.gpu_id, self.config["device_str"] = self.get_device() # Train the model.
self.config["model"]["n_users"] = data.n_users
self.config["model"]["n_items"] = data.n_items
self.monitor = Monitor(
log_dir=self.config["system"]["run_dir"], delay=1, gpu_id=self.gpu_id
)
self.init_engine(data)
print(type(data.train))
print(data.train.head())
self.engine.model.prepare_model(data)
self.model_save_dir = os.path.join(
self.config["system"]["model_save_dir"], self.config["model"]["save_name"]
)
self.config["run_time"] = self.monitor.stop()
return "data loaded"
| en | 000274332_mengzaiqiao-TVBR_userKNN_94025c76269e.py | unknown | 623 |
import os
from typing import Optional, Union, Callable
from platypush.context import get_bus
from platypush.message.event.ngrok import NgrokProcessStartedEvent, NgrokTunnelStartedEvent, NgrokTunnelStoppedEvent, \
NgrokProcessStoppedEvent
from platypush.plugins import Plugin, action
from platypush.schemas.ngrok import NgrokTunnelSchema
class NgrokPlugin(Plugin):
"""
Plugin to dynamically create and manage network tunnels using `ngrok <https://ngrok.com/>`_.
Requires:
* **pyngrok** (``pip install pyngrok``)
Triggers:
* :class:`platypush.message.event.ngrok.NgrokProcessStartedEvent` when the ``ngrok`` process is started.
* :class:`platypush.message.event.ngrok.NgrokProcessStoppedEvent` when the ``ngrok`` process is stopped.
* :class:`platypush.message.event.ngrok.NgrokTunnelStartedEvent` when a tunnel is started.
* :class:`platypush.message.event.ngrok.NgrokTunnelStoppedEvent` when a tunnel is stopped.
"""
def __init__(self, auth_token: Optional[str] = None, ngrok_bin: Optional[str] = None, region: Optional[str] = None,
**kwargs):
"""
:param auth_token: Specify the ``ngrok`` auth token, enabling authenticated features (e.g. more concurrent
tunnels, custom subdomains, etc.).
:param ngrok_bin: By default ``pyngrok`` manages its own version of the ``ngrok`` binary, but you can specify
this option if you want to use a different binary installed on the system.
:param region: ISO code of the region/country that should host the ``ngrok`` tunnel (default: ``us``).
"""
from pyngrok import conf, ngrok
super().__init__(**kwargs)
conf.get_default().log_event_callback = self._get_event_callback()
self._active_tunnels_by_url = {}
if auth_token:
ngrok.set_auth_token(auth_token)
if ngrok_bin:
conf.get_default().ngrok_path = os.path.expanduser(ngrok_bin)
if region:
conf.get_default().region = region
@property
def _active_tunnels_by_name(self) -> dict:
return {
tunnel['name']: tunnel
for tunnel in self._active_tunnels_by_url.values()
}
def _get_event_callback(self) -> Callable:
from pyngrok.process import NgrokLog
def callback(log: NgrokLog):
if log.msg == 'client session established':
get_bus().post(NgrokProcessStartedEvent())
elif log.msg == 'started tunnel':
# noinspection PyUnresolvedReferences
tunnel = dict(
name=log.name,
url=log.url,
protocol=log.url.split(':')[0]
)
self._active_tunnels_by_url[tunnel['url']] = tunnel
get_bus().post(NgrokTunnelStartedEvent(**tunnel))
elif (
log.msg == 'end' and
int(getattr(log, 'status', 0)) == 204 and
getattr(log, 'pg', '').startswith('/api/tunnels')
):
# noinspection PyUnresolvedReferences
tunnel = log.pg.split('/')[-1]
tunnel = self._active_tunnels_by_name.pop(tunnel, self._active_tunnels_by_url.pop(tunnel, None))
if tunnel:
get_bus().post(NgrokTunnelStoppedEvent(**tunnel))
elif log.msg == 'received stop request':
get_bus().post(NgrokProcessStoppedEvent())
return callback
@action
def create_tunnel(self, resource: Union[int, str] = 80, protocol: str = 'tcp',
name: Optional[str] = None, auth: Optional[str] = None, **kwargs) -> dict:
"""
Create an ``ngrok`` tunnel to the specified localhost port/protocol.
:param resource: This can be any of the following:
- A TCP or UDP port exposed on localhost.
- A local network address (or ``address:port``) to expose.
- The absolute path (starting with ``file://``) to a local folder - in such case, the specified directory
will be served over HTTP through an ``ngrok`` endpoint (see https://ngrok.com/docs#http-file-urls).
Default: localhost port 80.
:param protocol: Network protocol (default: ``tcp``).
:param name: Optional tunnel name.
:param auth: HTTP basic authentication credentials associated with the tunnel, in the format of
``username:password``.
:param kwargs: Extra arguments supported by the ``ngrok`` tunnel, such as ``hostname``, ``subdomain`` or
``remote_addr`` - see the `ngrok documentation <https://ngrok.com/docs#tunnel-definitions>`_ for a full
list.
:return: .. schema:: ngrok.NgrokTunnelSchema
"""
from pyngrok import ngrok
if isinstance(resource, str) and resource.startswith('file://'):
protocol = None
tunnel = ngrok.connect(resource, proto=protocol, name=name, auth=auth, **kwargs)
return NgrokTunnelSchema().dump(tunnel)
@action
def close_tunnel(self, tunnel: str):
"""
Close an ``ngrok`` tunnel.
:param tunnel: Name or public URL of the tunnel to be closed.
"""
from pyngrok import ngrok
if tunnel in self._active_tunnels_by_name:
tunnel = self._active_tunnels_by_name[tunnel]['url']
assert tunnel in self._active_tunnels_by_url, f'No such tunnel URL or name: {tunnel}'
ngrok.disconnect(tunnel)
@action
def get_tunnels(self):
"""
Get the list of active ``ngrok`` tunnels.
:return: .. schema:: ngrok.NgrokTunnelSchema(many=True)
"""
from pyngrok import ngrok
tunnels = ngrok.get_tunnels()
return NgrokTunnelSchema().dump(tunnels, many=True)
@action
def kill_process(self):
"""
The first created tunnel instance also starts the ``ngrok`` process.
The process will stay alive until the Python interpreter is stopped or this action is invoked.
"""
from pyngrok import ngrok
proc = ngrok.get_ngrok_process()
assert proc and proc.proc, 'The ngrok process is not running'
proc.proc.kill()
get_bus().post(NgrokProcessStoppedEvent())
# vim:sw=4:ts=4:et:
| import os
from typing import Optional, Union, Callable
from platypush.context import get_bus
from platypush.message.event.ngrok import NgrokProcessStartedEvent, NgrokTunnelStartedEvent, NgrokTunnelStoppedEvent, \
NgrokProcessStoppedEvent
from platypush.plugins import Plugin, action
from platypush.schemas.ngrok import NgrokTunnelSchema
class NgrokPlugin(Plugin):
"""
Plugin to dynamically create and manage network tunnels using `ngrok <https://ngrok.com/>`_.
Requires:
* **pyngrok** (``pip install pyngrok``)
Triggers:
* :class:`platypush.message.event.ngrok.NgrokProcessStartedEvent` when the ``ngrok`` process is started.
* :class:`platypush.message.event.ngrok.NgrokProcessStoppedEvent` when the ``ngrok`` process is stopped.
* :class:`platypush.message.event.ngrok.NgrokTunnelStartedEvent` when a tunnel is started.
* :class:`platypush.message.event.ngrok.NgrokTunnelStoppedEvent` when a tunnel is stopped.
"""
def __init__(self, auth_token: Optional[str] = None, ngrok_bin: Optional[str] = None, region: Optional[str] = None,
**kwargs):
"""
:param auth_token: Specify the ``ngrok`` auth token, enabling authenticated features (e.g. more concurrent
tunnels, custom subdomains, etc.).
:param ngrok_bin: By default ``pyngrok`` manages its own version of the ``ngrok`` binary, but you can specify
this option if you want to use a different binary installed on the system.
:param region: ISO code of the region/country that should host the ``ngrok`` tunnel (default: ``us``).
"""
from pyngrok import conf, ngrok
super().__init__(**kwargs)
conf.get_default().log_event_callback = self._get_event_callback()
self._active_tunnels_by_url = {}
if auth_token:
ngrok.set_auth_token(auth_token)
if ngrok_bin:
conf.get_default().ngrok_path = os.path.expanduser(ngrok_bin)
if region:
conf.get_default().region = region
@property
def _active_tunnels_by_name(self) -> dict:
return {
tunnel['name']: tunnel
for tunnel in self._active_tunnels_by_url.values()
}
def _get_event_callback(self) -> Callable:
from pyngrok.process import NgrokLog
def callback(log: NgrokLog):
if log.msg == 'client session established':
get_bus().post(NgrokProcessStartedEvent())
elif log.msg == 'started tunnel':
# noinspection PyUnresolvedReferences
tunnel = dict(
name=log.name,
url=log.url,
protocol=log.url.split(':')[0]
)
self._active_tunnels_by_url[tunnel['url']] = tunnel
get_bus().post(NgrokTunnelStartedEvent(**tunnel))
elif (
log.msg == 'end' and
int(getattr(log, 'status', 0)) == 204 and
getattr(log, 'pg', '').startswith('/api/tunnels')
):
# noinspection PyUnresolvedReferences
tunnel = log.pg.split('/')[-1]
tunnel = self._active_tunnels_by_name.pop(tunnel, self._active_tunnels_by_url.pop(tunnel, None))
if tunnel:
get_bus().post(NgrokTunnelStoppedEvent(**tunnel))
elif log.msg == 'received stop request':
get_bus().post(NgrokProcessStoppedEvent())
return callback
@action
def create_tunnel(self, resource: Union[int, str] = 80, protocol: str = 'tcp',
name: Optional[str] = None, auth: Optional[str] = None, **kwargs) -> dict:
"""
Create an ``ngrok`` tunnel to the specified localhost port/protocol.
:param resource: This can be any of the following:
- A TCP or UDP port exposed on localhost.
- A local network address (or ``address:port``) to expose.
- The absolute path (starting with ``file://``) to a local folder - in such case, the specified directory
will be served over HTTP through an ``ngrok`` endpoint (see https://ngrok.com/docs#http-file-urls).
Default: localhost port 80.
:param protocol: Network protocol (default: ``tcp``).
:param name: Optional tunnel name.
:param auth: HTTP basic authentication credentials associated with the tunnel, in the format of
``username:password``.
:param kwargs: Extra arguments supported by the ``ngrok`` tunnel, such as ``hostname``, ``subdomain`` or
``remote_addr`` - see the `ngrok documentation <https://ngrok.com/docs#tunnel-definitions>`_ for a full
list.
:return: .. schema:: ngrok.NgrokTunnelSchema
"""
from pyngrok import ngrok
if isinstance(resource, str) and resource.startswith('file://'):
protocol = None
tunnel = ngrok.connect(resource, proto=protocol, name=name, auth=auth, **kwargs)
return NgrokTunnelSchema().dump(tunnel)
@action
def close_tunnel(self, tunnel: str):
"""
Close an ``ngrok`` tunnel.
:param tunnel: Name or public URL of the tunnel to be closed.
"""
from pyngrok import ngrok
if tunnel in self._active_tunnels_by_name:
tunnel = self._active_tunnels_by_name[tunnel]['url']
assert tunnel in self._active_tunnels_by_url, f'No such tunnel URL or name: {tunnel}'
ngrok.disconnect(tunnel)
@action
def get_tunnels(self):
"""
Get the list of active ``ngrok`` tunnels.
:return: .. schema:: ngrok.NgrokTunnelSchema(many=True)
"""
from pyngrok import ngrok
tunnels = ngrok.get_tunnels()
return NgrokTunnelSchema().dump(tunnels, many=True)
@action
def kill_process(self):
"""
The first created tunnel instance also starts the ``ngrok`` process.
The process will stay alive until the Python interpreter is stopped or this action is invoked.
"""
from pyngrok import ngrok
proc = ngrok.get_ngrok_process()
assert proc and proc.proc, 'The ngrok process is not running'
proc.proc.kill()
get_bus().post(NgrokProcessStoppedEvent())
# vim:sw=4:ts=4:et:
| en | 000685785_BlackLight-platypush_init_8753f62ed0db.py | unknown | 1,739 |
from .FeatureFuser import Wav2vec2Wrapper
import pytorch_lightning.core.lightning as pl
class MinimalClassifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.wav2vec2 = Wav2vec2Wrapper(pretrain=False)
def forward(self, x, length=None):
reps = self.wav2vec2(x, length)
return reps
| from .FeatureFuser import Wav2vec2Wrapper
import pytorch_lightning.core.lightning as pl
class MinimalClassifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.wav2vec2 = Wav2vec2Wrapper(pretrain=False)
def forward(self, x, length=None):
reps = self.wav2vec2(x, length)
return reps
| en | 000592282_ishine-FG-transformer-TTS_wrapper_3d37bfab8119.py | unknown | 110 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from copy import deepcopy
import sys
from typing import Any, Dict, Optional, Text, TypeVar # noqa: F401
from .backends.base import BaseChannel
from .exceptions import ImproperlyConfigured
from .types import SendOptions # noqa: F401
C = TypeVar("C", bound=BaseChannel)
class Kawasemi(object):
def __init__(self, settings):
self.settings = settings
self._backends = {} # type: Dict[str, C]
def _load_module(self, name):
# type: (str) -> Any
__import__(name)
return sys.modules[name]
def _load_backend(self, name):
# type: (str) -> C
try:
return self._backends[name]
except KeyError:
module_name, klass_name = name.rsplit(".", 1)
module = self._load_module(str(module_name))
self._backends[name] = getattr(module, klass_name)
return self._backends[name]
def send(self, message, channel_name=None, fail_silently=False,
options=None):
# type: (Text, Optional[str], bool, Optional[SendOptions]) -> None
"""Send a notification to channels
:param message: A message
"""
if channel_name is None:
channels = self.settings["CHANNELS"]
else:
try:
channels = {
"__selected__": self.settings["CHANNELS"][channel_name]
}
except KeyError:
raise Exception("channels does not exist %s", channel_name)
for _, config in channels.items():
if "_backend" not in config:
raise ImproperlyConfigured(
"Specify the backend class in the channel configuration")
backend = self._load_backend(config["_backend"]) # type: Any
config = deepcopy(config)
del config["_backend"]
channel = backend(**config)
channel.send(message, fail_silently=fail_silently, options=options)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from copy import deepcopy
import sys
from typing import Any, Dict, Optional, Text, TypeVar # noqa: F401
from .backends.base import BaseChannel
from .exceptions import ImproperlyConfigured
from .types import SendOptions # noqa: F401
C = TypeVar("C", bound=BaseChannel)
class Kawasemi(object):
def __init__(self, settings):
self.settings = settings
self._backends = {} # type: Dict[str, C]
def _load_module(self, name):
# type: (str) -> Any
__import__(name)
return sys.modules[name]
def _load_backend(self, name):
# type: (str) -> C
try:
return self._backends[name]
except KeyError:
module_name, klass_name = name.rsplit(".", 1)
module = self._load_module(str(module_name))
self._backends[name] = getattr(module, klass_name)
return self._backends[name]
def send(self, message, channel_name=None, fail_silently=False,
options=None):
# type: (Text, Optional[str], bool, Optional[SendOptions]) -> None
"""Send a notification to channels
:param message: A message
"""
if channel_name is None:
channels = self.settings["CHANNELS"]
else:
try:
channels = {
"__selected__": self.settings["CHANNELS"][channel_name]
}
except KeyError:
raise Exception("channels does not exist %s", channel_name)
for _, config in channels.items():
if "_backend" not in config:
raise ImproperlyConfigured(
"Specify the backend class in the channel configuration")
backend = self._load_backend(config["_backend"]) # type: Any
config = deepcopy(config)
del config["_backend"]
channel = backend(**config)
channel.send(message, fail_silently=fail_silently, options=options)
| en | 000670553_ymyzk-django-channels_kawasemi_b82c8e36531f.py | unknown | 544 |
import numpy as np
import argparse
import os
import torch
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
from torchvision.transforms import ToTensor, Normalize, Compose, Lambda
def get_data(args: argparse.Namespace):
"""
Load the proper dataset based on the parsed arguments
:param args: The arguments in which is specified which dataset should be used
:return: a 5-tuple consisting of:
- The train data set
- The project data set (usually train data set without augmentation)
- The test data set
- a tuple containing all possible class labels
- a tuple containing the shape (depth, width, height) of the input images
"""
if args.dataset =='CUB-200-2011':
return get_birds(True, './data/CUB_200_2011/dataset/train_corners', './data/CUB_200_2011/dataset/train_crop', './data/CUB_200_2011/dataset/test_full')
if args.dataset == 'CARS':
return get_cars(True, './data/cars/dataset/train', './data/cars/dataset/train', './data/cars/dataset/test')
raise Exception(f'Could not load data set "{args.dataset}"!')
def get_dataloaders(args: argparse.Namespace):
"""
Get data loaders
"""
# Obtain the dataset
trainset, projectset, testset, classes, shape = get_data(args)
c, w, h = shape
# Determine if GPU should be used
cuda = not args.disable_cuda and torch.cuda.is_available()
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=cuda
)
projectloader = torch.utils.data.DataLoader(projectset,
# batch_size=args.batch_size,
batch_size=int(args.batch_size/4), #make batch size smaller to prevent out of memory errors during projection
shuffle=False,
pin_memory=cuda
)
testloader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=cuda
)
print("Num classes (k) = ", len(classes), flush=True)
return trainloader, projectloader, testloader, classes, c
def get_birds(augment: bool, train_dir:str, project_dir: str, test_dir:str, img_size = 224):
shape = (3, img_size, img_size)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
normalize = transforms.Normalize(mean=mean,std=std)
transform_no_augment = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize
])
if augment:
transform = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.RandomOrder([
transforms.RandomPerspective(distortion_scale=0.2, p = 0.5),
transforms.ColorJitter((0.6,1.4), (0.6,1.4), (0.6,1.4), (-0.02,0.02)),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(degrees=10, shear=(-2,2),translate=[0.05,0.05]),
]),
transforms.ToTensor(),
normalize,
])
else:
transform = transform_no_augment
trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
projectset = torchvision.datasets.ImageFolder(project_dir, transform=transform_no_augment)
testset = torchvision.datasets.ImageFolder(test_dir, transform=transform_no_augment)
classes = trainset.classes
for i in range(len(classes)):
classes[i]=classes[i].split('.')[1]
return trainset, projectset, testset, classes, shape
def get_cars(augment: bool, train_dir:str, project_dir: str, test_dir:str, img_size = 224):
shape = (3, img_size, img_size)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
normalize = transforms.Normalize(mean=mean,std=std)
transform_no_augment = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize
])
if augment:
transform = transforms.Compose([
transforms.Resize(size=(img_size+32, img_size+32)), #resize to 256x256
transforms.RandomOrder([
transforms.RandomPerspective(distortion_scale=0.5, p = 0.5),
transforms.ColorJitter((0.6,1.4), (0.6,1.4), (0.6,1.4), (-0.4,0.4)),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(degrees=15,shear=(-2,2)),
]),
transforms.RandomCrop(size=(img_size, img_size)), #crop to 224x224
transforms.ToTensor(),
normalize,
])
else:
transform = transform_no_augment
trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
projectset = torchvision.datasets.ImageFolder(project_dir, transform=transform_no_augment)
testset = torchvision.datasets.ImageFolder(test_dir, transform=transform_no_augment)
classes = trainset.classes
return trainset, projectset, testset, classes, shape
|
import numpy as np
import argparse
import os
import torch
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
from torchvision.transforms import ToTensor, Normalize, Compose, Lambda
def get_data(args: argparse.Namespace):
"""
Load the proper dataset based on the parsed arguments
:param args: The arguments in which is specified which dataset should be used
:return: a 5-tuple consisting of:
- The train data set
- The project data set (usually train data set without augmentation)
- The test data set
- a tuple containing all possible class labels
- a tuple containing the shape (depth, width, height) of the input images
"""
if args.dataset =='CUB-200-2011':
return get_birds(True, './data/CUB_200_2011/dataset/train_corners', './data/CUB_200_2011/dataset/train_crop', './data/CUB_200_2011/dataset/test_full')
if args.dataset == 'CARS':
return get_cars(True, './data/cars/dataset/train', './data/cars/dataset/train', './data/cars/dataset/test')
raise Exception(f'Could not load data set "{args.dataset}"!')
def get_dataloaders(args: argparse.Namespace):
"""
Get data loaders
"""
# Obtain the dataset
trainset, projectset, testset, classes, shape = get_data(args)
c, w, h = shape
# Determine if GPU should be used
cuda = not args.disable_cuda and torch.cuda.is_available()
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=cuda
)
projectloader = torch.utils.data.DataLoader(projectset,
# batch_size=args.batch_size,
batch_size=int(args.batch_size/4), #make batch size smaller to prevent out of memory errors during projection
shuffle=False,
pin_memory=cuda
)
testloader = torch.utils.data.DataLoader(testset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=cuda
)
print("Num classes (k) = ", len(classes), flush=True)
return trainloader, projectloader, testloader, classes, c
def get_birds(augment: bool, train_dir:str, project_dir: str, test_dir:str, img_size = 224):
shape = (3, img_size, img_size)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
normalize = transforms.Normalize(mean=mean,std=std)
transform_no_augment = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize
])
if augment:
transform = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.RandomOrder([
transforms.RandomPerspective(distortion_scale=0.2, p = 0.5),
transforms.ColorJitter((0.6,1.4), (0.6,1.4), (0.6,1.4), (-0.02,0.02)),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(degrees=10, shear=(-2,2),translate=[0.05,0.05]),
]),
transforms.ToTensor(),
normalize,
])
else:
transform = transform_no_augment
trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
projectset = torchvision.datasets.ImageFolder(project_dir, transform=transform_no_augment)
testset = torchvision.datasets.ImageFolder(test_dir, transform=transform_no_augment)
classes = trainset.classes
for i in range(len(classes)):
classes[i]=classes[i].split('.')[1]
return trainset, projectset, testset, classes, shape
def get_cars(augment: bool, train_dir:str, project_dir: str, test_dir:str, img_size = 224):
shape = (3, img_size, img_size)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
normalize = transforms.Normalize(mean=mean,std=std)
transform_no_augment = transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize
])
if augment:
transform = transforms.Compose([
transforms.Resize(size=(img_size+32, img_size+32)), #resize to 256x256
transforms.RandomOrder([
transforms.RandomPerspective(distortion_scale=0.5, p = 0.5),
transforms.ColorJitter((0.6,1.4), (0.6,1.4), (0.6,1.4), (-0.4,0.4)),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(degrees=15,shear=(-2,2)),
]),
transforms.RandomCrop(size=(img_size, img_size)), #crop to 224x224
transforms.ToTensor(),
normalize,
])
else:
transform = transform_no_augment
trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
projectset = torchvision.datasets.ImageFolder(project_dir, transform=transform_no_augment)
testset = torchvision.datasets.ImageFolder(test_dir, transform=transform_no_augment)
classes = trainset.classes
return trainset, projectset, testset, classes, shape
| en | 000745284_M-Nauta-ProtoTree_data_b444ae8bee27.py | unknown | 1,569 |
import logging
from unittest import TestCase
import datetime
from wikidata import wikidata
from wikidata import government as wikidata_government
logger = logging.getLogger(__name__)
class TestSearchParliamentMembers(TestCase):
def test_search_ids_all(self):
member_ids = wikidata.search_parliament_member_ids()
self.assertEqual(len(member_ids), len(set(member_ids)))
self.assertTrue(len(member_ids) > 2200)
def test_search_ids_with_start_date(self):
member_ids = wikidata.search_parliament_member_ids_with_start_date()
self.assertEqual(len(member_ids), len(set(member_ids)))
self.assertTrue(len(member_ids) > 530)
class TestGetParliamentMemberInfo(TestCase):
def test_get_frans_timmermans(self):
logger.info('BEGIN')
wikidata_id = 'Q32681' # Frans Timmermans
item = wikidata.WikidataItem(wikidata_id)
fullname = item.get_label()
self.assertEqual(fullname, 'Frans Timmermans')
given_name = item.get_given_names()[0]
self.assertEqual(given_name, 'Frans')
birth_date = item.get_birth_date()
self.assertEqual(birth_date, datetime.date(day=6, month=5, year=1961))
parlement_positions = item.get_parliament_positions_held()
self.assertEqual(len(parlement_positions), 2)
logger.info('END')
def test_get_fraction(self):
wikidata_id = 'Q2801440' # Martin van Rooijen, 50Plus
item_50plus_id = 'Q27122891'
item = wikidata.WikidataItem(wikidata_id)
positions = item.get_positions_held()
fraction_id = None
for position in positions:
if position['id'] == wikidata.PARLIAMENT_MEMBER_DUTCH_ITEM_ID:
fraction_id = position['part_of_id']
self.assertEqual(fraction_id, item_50plus_id)
class TestPositionHeld(TestCase):
wikidata_id_ft = 'Q32681' # Frans Timmermans
wikidata_id_wa = 'Q474763' # Willem Aantjes
wikidata_id_mr = 'Q57792' # Mark Rutte
def test_search_all(self):
item = wikidata.WikidataItem(self.wikidata_id_ft)
positions = item.get_positions_held()
self.assertEqual(len(positions), 9)
item = wikidata.WikidataItem(self.wikidata_id_wa)
positions = item.get_positions_held()
self.assertEqual(len(positions), 2)
def test_search_parliament_member(self):
item = wikidata.WikidataItem(self.wikidata_id_ft)
positions = item.get_parliament_positions_held()
self.assertEqual(len(positions), 2)
for position in positions:
self.assertEqual(position['id'], wikidata.PARLIAMENT_MEMBER_DUTCH_ITEM_ID)
item = wikidata.WikidataItem(self.wikidata_id_mr)
positions = item.get_parliament_positions_held()
self.assertEqual(len(positions), 4)
class TestFindPoliticalParty(TestCase):
def test_search_pvdd(self):
wikidata_id = wikidata.search_political_party_id('PvdD', language='nl')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Partij voor de Dieren')
def test_search_groenlinks(self):
wikidata_id = wikidata.search_political_party_id('GL', language='nl')
self.assertIsNotNone(wikidata_id)
self.assertGreaterEqual(wikidata_id, 'Q667680')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'GroenLinks')
def test_search_vvd(self):
wikidata_id = wikidata.search_political_party_id('VVD', language='nl')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Volkspartij voor Vrijheid en Democratie')
def test_is_political_party(self):
wikidata_id = 'Q275441' # PvdA
item = wikidata.WikidataItem(wikidata_id)
is_pp = item.is_political_party()
self.assertTrue(is_pp)
def test_is_fractie(self):
wikidata_id = 'Q28044800' # Lid-Monasch
item = wikidata.WikidataItem(wikidata_id)
is_fractie = item.is_fractie()
self.assertTrue(is_fractie)
def test_search_group_houwers(self):
wikidata_id = wikidata.search_political_party_id('Houwers', language='nl')
self.assertEqual(wikidata_id, 'Q28044763')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Lid-Houwers')
def test_search_socialist_party(self):
wikidata_id = wikidata.search_political_party_id('Socialistische Partij', language='nl')
self.assertEqual(wikidata_id, 'Q849580')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Socialistische Partij')
class TestDate(TestCase):
def test_date(self):
date_str = '+2016-12-25T00:00:00Z'
date = wikidata.WikidataItem.get_date(date_str)
self.assertEqual(date.day, 25)
self.assertEqual(date.month, 12)
self.assertEqual(date.year, 2016)
date_str = '+2016-00-00T00:00:00Z'
date = wikidata.WikidataItem.get_date(date_str)
self.assertEqual(date.day, 1)
self.assertEqual(date.month, 1)
self.assertEqual(date.year, 2016)
class TestPersonProperties(TestCase):
def test_get_twitter_username(self):
wikidata_id = 'Q560780'
item = wikidata.WikidataItem(wikidata_id)
self.assertEqual(item.get_twitter_username(), 'diederiksamsom')
class TestGovernmentScraper(TestCase):
rutte_2_wikidata_id = 'Q1638648'
def test(self):
government = wikidata_government.get_government(self.rutte_2_wikidata_id)
self.assertEqual(government['name'], 'Kabinet-Rutte II')
self.assertEqual(government['start_date'], datetime.date(2012, 11, 5))
def test_get_members(self):
members = wikidata_government.get_government_members(self.rutte_2_wikidata_id)
self.assertGreater(len(members), 10)
def test_get_parlement_and_politiek_id(self):
person_wikidata_id = 'Q32681'
expected_id = 'vg09llk9rzrp'
item = wikidata.WikidataItem(person_wikidata_id)
parlement_id = item.get_parlement_and_politiek_id()
self.assertEqual(parlement_id, expected_id)
| import logging
from unittest import TestCase
import datetime
from wikidata import wikidata
from wikidata import government as wikidata_government
logger = logging.getLogger(__name__)
class TestSearchParliamentMembers(TestCase):
def test_search_ids_all(self):
member_ids = wikidata.search_parliament_member_ids()
self.assertEqual(len(member_ids), len(set(member_ids)))
self.assertTrue(len(member_ids) > 2200)
def test_search_ids_with_start_date(self):
member_ids = wikidata.search_parliament_member_ids_with_start_date()
self.assertEqual(len(member_ids), len(set(member_ids)))
self.assertTrue(len(member_ids) > 530)
class TestGetParliamentMemberInfo(TestCase):
def test_get_frans_timmermans(self):
logger.info('BEGIN')
wikidata_id = 'Q32681' # Frans Timmermans
item = wikidata.WikidataItem(wikidata_id)
fullname = item.get_label()
self.assertEqual(fullname, 'Frans Timmermans')
given_name = item.get_given_names()[0]
self.assertEqual(given_name, 'Frans')
birth_date = item.get_birth_date()
self.assertEqual(birth_date, datetime.date(day=6, month=5, year=1961))
parlement_positions = item.get_parliament_positions_held()
self.assertEqual(len(parlement_positions), 2)
logger.info('END')
def test_get_fraction(self):
wikidata_id = 'Q2801440' # Martin van Rooijen, 50Plus
item_50plus_id = 'Q27122891'
item = wikidata.WikidataItem(wikidata_id)
positions = item.get_positions_held()
fraction_id = None
for position in positions:
if position['id'] == wikidata.PARLIAMENT_MEMBER_DUTCH_ITEM_ID:
fraction_id = position['part_of_id']
self.assertEqual(fraction_id, item_50plus_id)
class TestPositionHeld(TestCase):
wikidata_id_ft = 'Q32681' # Frans Timmermans
wikidata_id_wa = 'Q474763' # Willem Aantjes
wikidata_id_mr = 'Q57792' # Mark Rutte
def test_search_all(self):
item = wikidata.WikidataItem(self.wikidata_id_ft)
positions = item.get_positions_held()
self.assertEqual(len(positions), 9)
item = wikidata.WikidataItem(self.wikidata_id_wa)
positions = item.get_positions_held()
self.assertEqual(len(positions), 2)
def test_search_parliament_member(self):
item = wikidata.WikidataItem(self.wikidata_id_ft)
positions = item.get_parliament_positions_held()
self.assertEqual(len(positions), 2)
for position in positions:
self.assertEqual(position['id'], wikidata.PARLIAMENT_MEMBER_DUTCH_ITEM_ID)
item = wikidata.WikidataItem(self.wikidata_id_mr)
positions = item.get_parliament_positions_held()
self.assertEqual(len(positions), 4)
class TestFindPoliticalParty(TestCase):
def test_search_pvdd(self):
wikidata_id = wikidata.search_political_party_id('PvdD', language='nl')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Partij voor de Dieren')
def test_search_groenlinks(self):
wikidata_id = wikidata.search_political_party_id('GL', language='nl')
self.assertIsNotNone(wikidata_id)
self.assertGreaterEqual(wikidata_id, 'Q667680')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'GroenLinks')
def test_search_vvd(self):
wikidata_id = wikidata.search_political_party_id('VVD', language='nl')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Volkspartij voor Vrijheid en Democratie')
def test_is_political_party(self):
wikidata_id = 'Q275441' # PvdA
item = wikidata.WikidataItem(wikidata_id)
is_pp = item.is_political_party()
self.assertTrue(is_pp)
def test_is_fractie(self):
wikidata_id = 'Q28044800' # Lid-Monasch
item = wikidata.WikidataItem(wikidata_id)
is_fractie = item.is_fractie()
self.assertTrue(is_fractie)
def test_search_group_houwers(self):
wikidata_id = wikidata.search_political_party_id('Houwers', language='nl')
self.assertEqual(wikidata_id, 'Q28044763')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Lid-Houwers')
def test_search_socialist_party(self):
wikidata_id = wikidata.search_political_party_id('Socialistische Partij', language='nl')
self.assertEqual(wikidata_id, 'Q849580')
item = wikidata.WikidataItem(wikidata_id)
label = item.get_label(language='nl')
self.assertEqual(label, 'Socialistische Partij')
class TestDate(TestCase):
def test_date(self):
date_str = '+2016-12-25T00:00:00Z'
date = wikidata.WikidataItem.get_date(date_str)
self.assertEqual(date.day, 25)
self.assertEqual(date.month, 12)
self.assertEqual(date.year, 2016)
date_str = '+2016-00-00T00:00:00Z'
date = wikidata.WikidataItem.get_date(date_str)
self.assertEqual(date.day, 1)
self.assertEqual(date.month, 1)
self.assertEqual(date.year, 2016)
class TestPersonProperties(TestCase):
def test_get_twitter_username(self):
wikidata_id = 'Q560780'
item = wikidata.WikidataItem(wikidata_id)
self.assertEqual(item.get_twitter_username(), 'diederiksamsom')
class TestGovernmentScraper(TestCase):
rutte_2_wikidata_id = 'Q1638648'
def test(self):
government = wikidata_government.get_government(self.rutte_2_wikidata_id)
self.assertEqual(government['name'], 'Kabinet-Rutte II')
self.assertEqual(government['start_date'], datetime.date(2012, 11, 5))
def test_get_members(self):
members = wikidata_government.get_government_members(self.rutte_2_wikidata_id)
self.assertGreater(len(members), 10)
def test_get_parlement_and_politiek_id(self):
person_wikidata_id = 'Q32681'
expected_id = 'vg09llk9rzrp'
item = wikidata.WikidataItem(person_wikidata_id)
parlement_id = item.get_parlement_and_politiek_id()
self.assertEqual(parlement_id, expected_id)
| en | 000208808_openkamer-openkamer_tests_3405b6d79cb4.py | unknown | 2,267 |
# Copyright (C) 2013 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from flask import Blueprint, g, abort
from sqlalchemy.orm import joinedload, subqueryload
from mbdata.models import (
Recording,
RecordingGIDRedirect,
)
from mbdata.utils import get_something_by_gid
from mbdata.api.data import load_links, query_recording
from mbdata.api.includes import RecordingIncludes
from mbdata.api.utils import (
get_param,
response_ok,
response_error,
)
from mbdata.api.errors import NOT_FOUND_ERROR, INCLUDE_DEPENDENCY_ERROR
from mbdata.api.serialize import serialize_recording
blueprint = Blueprint('recording', __name__)
def get_recording_by_gid(query, gid):
return get_something_by_gid(query, RecordingGIDRedirect, gid)
@blueprint.route('/get')
def handle_get():
gid = get_param('id', type='uuid', required=True)
include = get_param('include', type='enum+', container=RecordingIncludes.parse)
if include.artist and include.artists:
abort(response_error(INCLUDE_DEPENDENCY_ERROR, 'include=artist and include=artists are mutually exclusive'))
recording = get_recording_by_gid(query_recording(g.db, include), gid)
if recording is None:
abort(response_error(NOT_FOUND_ERROR, 'recording not found'))
if include.relationships:
load_links(g.db, [recording], include.relationships)
return response_ok(recording=serialize_recording(recording, include))
| # Copyright (C) 2013 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from flask import Blueprint, g, abort
from sqlalchemy.orm import joinedload, subqueryload
from mbdata.models import (
Recording,
RecordingGIDRedirect,
)
from mbdata.utils import get_something_by_gid
from mbdata.api.data import load_links, query_recording
from mbdata.api.includes import RecordingIncludes
from mbdata.api.utils import (
get_param,
response_ok,
response_error,
)
from mbdata.api.errors import NOT_FOUND_ERROR, INCLUDE_DEPENDENCY_ERROR
from mbdata.api.serialize import serialize_recording
blueprint = Blueprint('recording', __name__)
def get_recording_by_gid(query, gid):
return get_something_by_gid(query, RecordingGIDRedirect, gid)
@blueprint.route('/get')
def handle_get():
gid = get_param('id', type='uuid', required=True)
include = get_param('include', type='enum+', container=RecordingIncludes.parse)
if include.artist and include.artists:
abort(response_error(INCLUDE_DEPENDENCY_ERROR, 'include=artist and include=artists are mutually exclusive'))
recording = get_recording_by_gid(query_recording(g.db, include), gid)
if recording is None:
abort(response_error(NOT_FOUND_ERROR, 'recording not found'))
if include.relationships:
load_links(g.db, [recording], include.relationships)
return response_ok(recording=serialize_recording(recording, include))
| en | 000542546_markweaversonos-mbdata_recording_80b6321a9edc.py | unknown | 443 |
# ============================================================================
# FILE: junkfile.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from .base import Base
from time import strftime
from denite.util import expand
import os
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'junkfile'
self.kind = 'file'
def gather_candidates(self, context):
self.vim.call('junkfile#init')
base = expand(self.vim.vars['junkfile#directory'])
candidates = []
if context['args'] and context['args'][0] == 'new':
context['is_interactive'] = True
filename = strftime('%Y/%m/%Y-%m-%d-%H%M%S.') + context['input']
candidates.append({
'word': os.path.basename(filename),
'abbr': '[new] ' + os.path.basename(filename),
'action__path': os.path.join(base, filename),
})
else:
for root, dirs, files in os.walk(base):
for f in files:
candidates.append({
'word': f,
'action__path': os.path.join(root, f),
})
candidates = sorted(candidates, key=lambda x:
os.path.getmtime(x['action__path']),
reverse=True)
return candidates
| # ============================================================================
# FILE: junkfile.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from .base import Base
from time import strftime
from denite.util import expand
import os
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'junkfile'
self.kind = 'file'
def gather_candidates(self, context):
self.vim.call('junkfile#init')
base = expand(self.vim.vars['junkfile#directory'])
candidates = []
if context['args'] and context['args'][0] == 'new':
context['is_interactive'] = True
filename = strftime('%Y/%m/%Y-%m-%d-%H%M%S.') + context['input']
candidates.append({
'word': os.path.basename(filename),
'abbr': '[new] ' + os.path.basename(filename),
'action__path': os.path.join(base, filename),
})
else:
for root, dirs, files in os.walk(base):
for f in files:
candidates.append({
'word': f,
'action__path': os.path.join(root, f),
})
candidates = sorted(candidates, key=lambda x:
os.path.getmtime(x['action__path']),
reverse=True)
return candidates
| en | 000045255_hironei-junkfile.vim_junkfile_cd39e9180084.py | unknown | 382 |
import math
import numpy as np
import skimage.color as color
import skimage.transform as transform
import skimage.util as util
rgb2gray = color.rgb2gray
gray2rgb = color.gray2rgb
imresize = transform.resize
imrescale = transform.rescale
def imcrop(image, x1, y1, x2, y2, pad_mode='constant', **pad_kwargs):
"""Crop an image with padding non-exisiting range.
Parameters
----------
pad_mode:
To be passed to skimage.util.pad as `mode` parameter.
pad_kwargs:
To be passed to skimage.util.pad.
"""
before_h = after_h = before_w = after_w = 0
if y2 > image.shape[0]:
after_h = y2 - image.shape[0]
if y1 < 0:
before_h = -y1
if x2 > image.shape[1]:
after_w = x2 - image.shape[1]
if x1 < 0:
before_w = -x1
x1 += before_w
x2 += before_w
y1 += before_h
y2 += before_h
image = util.pad(image,
[(before_h, after_h), (before_w, after_w)] + [(0, 0)] * (image.ndim - 2),
mode=pad_mode,
**pad_kwargs)
return image[y1:y2, x1:x2, ...]
def immerge(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
shape = (h * n_rows + padding * (n_rows - 1),
w * n_cols + padding * (n_cols - 1))
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
return img
def grid_split(image, h, w):
"""Split the image into a grid."""
n_rows = math.ceil(image.shape[0] / h)
n_cols = math.ceil(image.shape[1] / w)
rows = []
for r in range(n_rows):
cols = []
for c in range(n_cols):
cols.append(image[r * h: (r + 1) * h, c * w: (c + 1) * w, ...])
rows.append(cols)
return rows
def grid_merge(grid, padding=(0, 0), pad_value=(0, 0)):
"""Merge the grid as an image."""
padding = padding if isinstance(padding, (list, tuple)) else [padding, padding]
pad_value = pad_value if isinstance(pad_value, (list, tuple)) else [pad_value, pad_value]
new_rows = []
for r, row in enumerate(grid):
new_cols = []
for c, col in enumerate(row):
if c != 0:
new_cols.append(np.full([col.shape[0], padding[1], col.shape[2]], pad_value[1], dtype=col.dtype))
new_cols.append(col)
new_cols = np.concatenate(new_cols, axis=1)
if r != 0:
new_rows.append(np.full([padding[0], new_cols.shape[1], new_cols.shape[2]], pad_value[0], dtype=new_cols.dtype))
new_rows.append(new_cols)
grid_merged = np.concatenate(new_rows, axis=0)
return grid_merged
| import math
import numpy as np
import skimage.color as color
import skimage.transform as transform
import skimage.util as util
rgb2gray = color.rgb2gray
gray2rgb = color.gray2rgb
imresize = transform.resize
imrescale = transform.rescale
def imcrop(image, x1, y1, x2, y2, pad_mode='constant', **pad_kwargs):
"""Crop an image with padding non-exisiting range.
Parameters
----------
pad_mode:
To be passed to skimage.util.pad as `mode` parameter.
pad_kwargs:
To be passed to skimage.util.pad.
"""
before_h = after_h = before_w = after_w = 0
if y2 > image.shape[0]:
after_h = y2 - image.shape[0]
if y1 < 0:
before_h = -y1
if x2 > image.shape[1]:
after_w = x2 - image.shape[1]
if x1 < 0:
before_w = -x1
x1 += before_w
x2 += before_w
y1 += before_h
y2 += before_h
image = util.pad(image,
[(before_h, after_h), (before_w, after_w)] + [(0, 0)] * (image.ndim - 2),
mode=pad_mode,
**pad_kwargs)
return image[y1:y2, x1:x2, ...]
def immerge(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
shape = (h * n_rows + padding * (n_rows - 1),
w * n_cols + padding * (n_cols - 1))
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
return img
def grid_split(image, h, w):
"""Split the image into a grid."""
n_rows = math.ceil(image.shape[0] / h)
n_cols = math.ceil(image.shape[1] / w)
rows = []
for r in range(n_rows):
cols = []
for c in range(n_cols):
cols.append(image[r * h: (r + 1) * h, c * w: (c + 1) * w, ...])
rows.append(cols)
return rows
def grid_merge(grid, padding=(0, 0), pad_value=(0, 0)):
"""Merge the grid as an image."""
padding = padding if isinstance(padding, (list, tuple)) else [padding, padding]
pad_value = pad_value if isinstance(pad_value, (list, tuple)) else [pad_value, pad_value]
new_rows = []
for r, row in enumerate(grid):
new_cols = []
for c, col in enumerate(row):
if c != 0:
new_cols.append(np.full([col.shape[0], padding[1], col.shape[2]], pad_value[1], dtype=col.dtype))
new_cols.append(col)
new_cols = np.concatenate(new_cols, axis=1)
if r != 0:
new_rows.append(np.full([padding[0], new_cols.shape[1], new_cols.shape[2]], pad_value[0], dtype=new_cols.dtype))
new_rows.append(new_cols)
grid_merged = np.concatenate(new_rows, axis=0)
return grid_merged
| en | 000731864_AlexBlack2202-EigenGAN-Tensorflow_transform_f5124294c7f5.py | unknown | 1,269 |
from typing import List
from loguru import logger
from app.schemas.verdict import Detail, Verdict
from app.services.emailrep import EmailRep
class EmailRepVerdictFactory:
def __init__(self, email: str):
self.email = email
self.name = "EmailRep"
async def to_model(self) -> Verdict:
details: List[Detail] = []
malicious = False
email_rep = EmailRep()
try:
res = await email_rep.get(self.email)
if res.suspicious is True:
malicious = True
description = f"{self.email} is suspicious. See https://emailrep.io/{self.email} for details."
details.append(Detail(key="EmailRep", description=description))
else:
description = f"{self.email} is not suspicious. See https://emailrep.io/{self.email} for details."
details.append(Detail(key="EmailRep", description=description))
except Exception as error:
logger.error(error)
return Verdict(name=self.name, malicious=malicious, details=details)
@classmethod
async def from_email(cls, email) -> Verdict:
obj = cls(email)
return await obj.to_model()
| from typing import List
from loguru import logger
from app.schemas.verdict import Detail, Verdict
from app.services.emailrep import EmailRep
class EmailRepVerdictFactory:
def __init__(self, email: str):
self.email = email
self.name = "EmailRep"
async def to_model(self) -> Verdict:
details: List[Detail] = []
malicious = False
email_rep = EmailRep()
try:
res = await email_rep.get(self.email)
if res.suspicious is True:
malicious = True
description = f"{self.email} is suspicious. See https://emailrep.io/{self.email} for details."
details.append(Detail(key="EmailRep", description=description))
else:
description = f"{self.email} is not suspicious. See https://emailrep.io/{self.email} for details."
details.append(Detail(key="EmailRep", description=description))
except Exception as error:
logger.error(error)
return Verdict(name=self.name, malicious=malicious, details=details)
@classmethod
async def from_email(cls, email) -> Verdict:
obj = cls(email)
return await obj.to_model()
| en | 000555755_tapsykrett-emailanalyze_emailrep_44f0d0e107f3.py | unknown | 319 |
import sys
import os
import subprocess
import shutil
import time
import logging
from Bio import SeqIO
from multiprocessing import Pool
import pysam
from telr.TELR_utility import mkdir, check_exist, format_time
def get_local_contigs(
assembler,
polisher,
contig_dir,
vcf_parsed,
out,
sample_name,
bam,
raw_reads,
thread,
presets,
polish_iterations,
):
"""Perform local assembly using reads from parsed VCF file in parallel"""
# Prepare reads used for local assembly and polishing
sv_reads_dir = os.path.join(out, "sv_reads")
try:
prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv"
)
except Exception as e:
print(e)
print("Prepare local assembly input data failed, exiting...")
sys.exit(1)
mkdir(contig_dir)
k = 0
asm_pa_list = []
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
contig_name = "_".join([entry[0], entry[1], entry[2]])
# rename variant reads
sv_reads = sv_reads_dir + "/contig" + str(k)
sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa"
os.rename(sv_reads, sv_reads_rename)
thread_asm = 1
asm_pa = [
sv_reads_rename,
contig_dir,
contig_name,
thread_asm,
presets,
assembler,
polisher,
polish_iterations,
]
asm_pa_list.append(asm_pa)
k = k + 1
# run assembly in parallel
logging.info("Perform local assembly of non-reference TE loci...")
start_time = time.time()
try:
pool = Pool(processes=thread)
contig_list = pool.map(run_assembly_polishing, asm_pa_list)
pool.close()
pool.join()
except Exception as e:
print(e)
print("Local assembly failed, exiting...")
sys.exit(1)
proc_time = time.time() - start_time
# merge all contigs
assembly_passed_loci = set()
merged_contigs = os.path.join(out, sample_name + ".contigs.fa")
with open(merged_contigs, "w") as merged_output_handle:
for contig in contig_list:
if check_exist(contig):
contig_name = os.path.basename(contig).replace(".cns.fa", "")
assembly_passed_loci.add(contig_name)
parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa")
with open(contig, "r") as input:
records = SeqIO.parse(input, "fasta")
for record in records:
if record.id == "ctg1" or record.id == "contig_1":
record.id = contig_name
record.description = "len=" + str(len(record.seq))
SeqIO.write(record, merged_output_handle, "fasta")
with open(parsed_contig, "w") as parsed_output_handle:
SeqIO.write(record, parsed_output_handle, "fasta")
logging.info("Local assembly finished in " + format_time(proc_time))
return merged_contigs, assembly_passed_loci
def run_assembly_polishing(args):
reads = args[0]
asm_dir = args[1]
contig_name = args[2]
thread = args[3]
presets = args[4]
assembler = args[5]
polisher = args[6]
polish_iterations = args[7]
# run assembly
if assembler == "wtdbg2":
asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)
else:
asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)
if not check_exist(asm_cns):
print("assembly failed")
return None
# run polishing
if polish_iterations > 0:
if polisher == "wtdbg2":
asm_cns = run_wtdbg2_polishing(
asm_cns, reads, thread, polish_iterations, presets
)
else:
asm_cns = run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
)
if check_exist(asm_cns):
return asm_cns
else:
return None
def run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
):
"""Run Flye polishing"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
"--polish-target",
asm_cns,
presets_flye,
reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
str(polish_iterations),
]
)
except Exception as e:
print(e)
print("Polishing failed, exiting...")
return None
# rename contig file
polished_contig = os.path.join(
tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta"
)
if check_exist(polished_contig):
os.rename(polished_contig, asm_cns)
shutil.rmtree(tmp_out_dir)
return asm_cns
else:
return None
def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):
"""Run wtdbg2 polishing"""
if presets == "pacbio":
presets_minimap2 = "map-pb"
else:
presets_minimap2 = "map-ont"
# polish consensus
threads = str(min(threads, 4))
bam = asm_cns + ".bam"
k = 0
while True:
# align reads to contigs
command = (
"minimap2 -t "
+ threads
+ " -ax "
+ presets_minimap2
+ " -r2k "
+ asm_cns
+ " "
+ reads
+ " | samtools sort -@"
+ threads
+ " > "
+ bam
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to map reads to contig: " + asm_cns)
return
# run wtpoa-cns to get polished contig
cns_tmp = asm_cns + ".tmp"
command = (
"samtools view -F0x900 "
+ bam
+ " | wtpoa-cns -t "
+ threads
+ " -d "
+ asm_cns
+ " -i - -fo "
+ cns_tmp
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to polish contig: " + asm_cns)
return
if check_exist(cns_tmp):
os.rename(cns_tmp, asm_cns)
os.remove(bam)
else:
break
k = k + 1
if k >= polish_iterations:
break
if check_exist(asm_cns):
return asm_cns
else:
print("polishing failed for " + asm_cns + "\n")
return None
def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run Flye assembly"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
presets_flye,
sv_reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
"0",
]
)
except Exception as e:
print(e)
print("Assembly failed, exiting...")
return
# rename contigs
contig_path = os.path.join(tmp_out_dir, "assembly.fasta")
contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa")
if check_exist(contig_path):
os.rename(contig_path, contig_path_new)
# remove tmp files
shutil.rmtree(tmp_out_dir)
return contig_path_new
else:
print("assembly failed")
return None
def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run wtdbg2 assembly"""
if presets == "pacbio":
presets_wtdbg2 = "rs"
else:
presets_wtdbg2 = "ont"
prefix = sv_reads.replace(".reads.fa", "")
try:
subprocess.run(
[
"wtdbg2",
"-x",
presets_wtdbg2,
"-q",
"-AS",
"1",
"-g",
"30k",
"-t",
str(thread),
"-i",
sv_reads,
"-fo",
prefix,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to build contig layout for contig: " + contig_name)
return
except Exception as e:
print(e)
print("wtdbg2 failed, exiting...")
return None
# derive consensus
contig_layout = prefix + ".ctg.lay.gz"
if check_exist(contig_layout):
cns_thread = str(min(thread, 4))
consensus = prefix + ".cns.fa"
try:
subprocess.run(
[
"wtpoa-cns",
"-q",
"-t",
cns_thread,
"-i",
contig_layout,
"-fo",
consensus,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to assemble contig: " + contig_name)
return None
if check_exist(consensus):
consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa")
os.rename(consensus, consensus_rename)
return consensus_rename
else:
return None
def prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv"
):
"""Prepare reads for local assembly"""
# logging.info("Prepare reads for local assembly")
if read_type == "sv": # TODO: figure out what this does
# extract read IDs
read_ids = os.path.join(out, sample_name + ".id")
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output:
for line in input:
entry = line.replace("\n", "").split("\t")
read_list = entry[8].split(",")
for read in read_list:
output.write(read + "\n")
else: # TODO: think about using this for assembly, filter for cigar reads
window = 1000
samfile = pysam.AlignmentFile(bam, "rb")
read_ids = os.path.join(out, sample_name + ".id")
vcf_parsed_new = vcf_parsed + ".new"
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open(
vcf_parsed_new, "w"
) as VCF:
for line in input:
entry = line.replace("\n", "").split("\t")
# get sniffles read list
read_list = entry[8].split(",")
reads_sniffles = set(read_list)
ins_chr = entry[0]
ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)
start = ins_breakpoint - window
end = ins_breakpoint + window
reads = set()
# coverage = 0
for read in samfile.fetch(ins_chr, start, end):
reads.add(read.query_name)
for read in reads:
output.write(read + "\n")
# write
out_line = line.replace("\n", "") + "\t" + str(len(reads))
VCF.write(out_line + "\n")
vcf_parsed = vcf_parsed_new
# generate unique ID list
read_ids_unique = read_ids + ".unique"
command = "cat " + read_ids + " | sort | uniq"
with open(read_ids_unique, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# filter raw reads using read list
subset_fa = os.path.join(out, sample_name + ".subset.fa")
command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a"
with open(subset_fa, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# reorder reads
subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa"
extract_reads(subset_fa, read_ids, subset_fa_reorder)
# separate reads into multiple files, using csplit
mkdir(reads_dir)
csplit_prefix = reads_dir + "/contig"
m = []
k = 1
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
if read_type == "sv":
k = k + 2 * (len(entry[8].split(",")))
else:
k = k + 2 * int(entry[14])
m.append(k)
if len(m) == 1:
subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"])
elif len(m) == 0:
print("No insertion detected, exiting...")
else:
m = m[:-1]
index = " ".join(str(i) for i in m)
command = (
"csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index
)
subprocess.call(command, shell=True)
# remove tmp files
os.remove(read_ids)
os.remove(read_ids_unique)
os.remove(subset_fa)
os.remove(subset_fa_reorder)
def extract_reads(reads, list, out):
"""Extract reads from fasta using read ID list"""
record_dict = SeqIO.index(reads, "fasta")
with open(out, "wb") as output_handle, open(list, "r") as ID:
for entry in ID:
entry = entry.replace("\n", "")
output_handle.write(record_dict.get_raw(entry))
| import sys
import os
import subprocess
import shutil
import time
import logging
from Bio import SeqIO
from multiprocessing import Pool
import pysam
from telr.TELR_utility import mkdir, check_exist, format_time
def get_local_contigs(
assembler,
polisher,
contig_dir,
vcf_parsed,
out,
sample_name,
bam,
raw_reads,
thread,
presets,
polish_iterations,
):
"""Perform local assembly using reads from parsed VCF file in parallel"""
# Prepare reads used for local assembly and polishing
sv_reads_dir = os.path.join(out, "sv_reads")
try:
prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv"
)
except Exception as e:
print(e)
print("Prepare local assembly input data failed, exiting...")
sys.exit(1)
mkdir(contig_dir)
k = 0
asm_pa_list = []
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
contig_name = "_".join([entry[0], entry[1], entry[2]])
# rename variant reads
sv_reads = sv_reads_dir + "/contig" + str(k)
sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa"
os.rename(sv_reads, sv_reads_rename)
thread_asm = 1
asm_pa = [
sv_reads_rename,
contig_dir,
contig_name,
thread_asm,
presets,
assembler,
polisher,
polish_iterations,
]
asm_pa_list.append(asm_pa)
k = k + 1
# run assembly in parallel
logging.info("Perform local assembly of non-reference TE loci...")
start_time = time.time()
try:
pool = Pool(processes=thread)
contig_list = pool.map(run_assembly_polishing, asm_pa_list)
pool.close()
pool.join()
except Exception as e:
print(e)
print("Local assembly failed, exiting...")
sys.exit(1)
proc_time = time.time() - start_time
# merge all contigs
assembly_passed_loci = set()
merged_contigs = os.path.join(out, sample_name + ".contigs.fa")
with open(merged_contigs, "w") as merged_output_handle:
for contig in contig_list:
if check_exist(contig):
contig_name = os.path.basename(contig).replace(".cns.fa", "")
assembly_passed_loci.add(contig_name)
parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa")
with open(contig, "r") as input:
records = SeqIO.parse(input, "fasta")
for record in records:
if record.id == "ctg1" or record.id == "contig_1":
record.id = contig_name
record.description = "len=" + str(len(record.seq))
SeqIO.write(record, merged_output_handle, "fasta")
with open(parsed_contig, "w") as parsed_output_handle:
SeqIO.write(record, parsed_output_handle, "fasta")
logging.info("Local assembly finished in " + format_time(proc_time))
return merged_contigs, assembly_passed_loci
def run_assembly_polishing(args):
reads = args[0]
asm_dir = args[1]
contig_name = args[2]
thread = args[3]
presets = args[4]
assembler = args[5]
polisher = args[6]
polish_iterations = args[7]
# run assembly
if assembler == "wtdbg2":
asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)
else:
asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)
if not check_exist(asm_cns):
print("assembly failed")
return None
# run polishing
if polish_iterations > 0:
if polisher == "wtdbg2":
asm_cns = run_wtdbg2_polishing(
asm_cns, reads, thread, polish_iterations, presets
)
else:
asm_cns = run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
)
if check_exist(asm_cns):
return asm_cns
else:
return None
def run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
):
"""Run Flye polishing"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
"--polish-target",
asm_cns,
presets_flye,
reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
str(polish_iterations),
]
)
except Exception as e:
print(e)
print("Polishing failed, exiting...")
return None
# rename contig file
polished_contig = os.path.join(
tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta"
)
if check_exist(polished_contig):
os.rename(polished_contig, asm_cns)
shutil.rmtree(tmp_out_dir)
return asm_cns
else:
return None
def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):
"""Run wtdbg2 polishing"""
if presets == "pacbio":
presets_minimap2 = "map-pb"
else:
presets_minimap2 = "map-ont"
# polish consensus
threads = str(min(threads, 4))
bam = asm_cns + ".bam"
k = 0
while True:
# align reads to contigs
command = (
"minimap2 -t "
+ threads
+ " -ax "
+ presets_minimap2
+ " -r2k "
+ asm_cns
+ " "
+ reads
+ " | samtools sort -@"
+ threads
+ " > "
+ bam
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to map reads to contig: " + asm_cns)
return
# run wtpoa-cns to get polished contig
cns_tmp = asm_cns + ".tmp"
command = (
"samtools view -F0x900 "
+ bam
+ " | wtpoa-cns -t "
+ threads
+ " -d "
+ asm_cns
+ " -i - -fo "
+ cns_tmp
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to polish contig: " + asm_cns)
return
if check_exist(cns_tmp):
os.rename(cns_tmp, asm_cns)
os.remove(bam)
else:
break
k = k + 1
if k >= polish_iterations:
break
if check_exist(asm_cns):
return asm_cns
else:
print("polishing failed for " + asm_cns + "\n")
return None
def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run Flye assembly"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
presets_flye,
sv_reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
"0",
]
)
except Exception as e:
print(e)
print("Assembly failed, exiting...")
return
# rename contigs
contig_path = os.path.join(tmp_out_dir, "assembly.fasta")
contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa")
if check_exist(contig_path):
os.rename(contig_path, contig_path_new)
# remove tmp files
shutil.rmtree(tmp_out_dir)
return contig_path_new
else:
print("assembly failed")
return None
def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run wtdbg2 assembly"""
if presets == "pacbio":
presets_wtdbg2 = "rs"
else:
presets_wtdbg2 = "ont"
prefix = sv_reads.replace(".reads.fa", "")
try:
subprocess.run(
[
"wtdbg2",
"-x",
presets_wtdbg2,
"-q",
"-AS",
"1",
"-g",
"30k",
"-t",
str(thread),
"-i",
sv_reads,
"-fo",
prefix,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to build contig layout for contig: " + contig_name)
return
except Exception as e:
print(e)
print("wtdbg2 failed, exiting...")
return None
# derive consensus
contig_layout = prefix + ".ctg.lay.gz"
if check_exist(contig_layout):
cns_thread = str(min(thread, 4))
consensus = prefix + ".cns.fa"
try:
subprocess.run(
[
"wtpoa-cns",
"-q",
"-t",
cns_thread,
"-i",
contig_layout,
"-fo",
consensus,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to assemble contig: " + contig_name)
return None
if check_exist(consensus):
consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa")
os.rename(consensus, consensus_rename)
return consensus_rename
else:
return None
def prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv"
):
"""Prepare reads for local assembly"""
# logging.info("Prepare reads for local assembly")
if read_type == "sv": # TODO: figure out what this does
# extract read IDs
read_ids = os.path.join(out, sample_name + ".id")
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output:
for line in input:
entry = line.replace("\n", "").split("\t")
read_list = entry[8].split(",")
for read in read_list:
output.write(read + "\n")
else: # TODO: think about using this for assembly, filter for cigar reads
window = 1000
samfile = pysam.AlignmentFile(bam, "rb")
read_ids = os.path.join(out, sample_name + ".id")
vcf_parsed_new = vcf_parsed + ".new"
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open(
vcf_parsed_new, "w"
) as VCF:
for line in input:
entry = line.replace("\n", "").split("\t")
# get sniffles read list
read_list = entry[8].split(",")
reads_sniffles = set(read_list)
ins_chr = entry[0]
ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)
start = ins_breakpoint - window
end = ins_breakpoint + window
reads = set()
# coverage = 0
for read in samfile.fetch(ins_chr, start, end):
reads.add(read.query_name)
for read in reads:
output.write(read + "\n")
# write
out_line = line.replace("\n", "") + "\t" + str(len(reads))
VCF.write(out_line + "\n")
vcf_parsed = vcf_parsed_new
# generate unique ID list
read_ids_unique = read_ids + ".unique"
command = "cat " + read_ids + " | sort | uniq"
with open(read_ids_unique, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# filter raw reads using read list
subset_fa = os.path.join(out, sample_name + ".subset.fa")
command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a"
with open(subset_fa, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# reorder reads
subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa"
extract_reads(subset_fa, read_ids, subset_fa_reorder)
# separate reads into multiple files, using csplit
mkdir(reads_dir)
csplit_prefix = reads_dir + "/contig"
m = []
k = 1
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
if read_type == "sv":
k = k + 2 * (len(entry[8].split(",")))
else:
k = k + 2 * int(entry[14])
m.append(k)
if len(m) == 1:
subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"])
elif len(m) == 0:
print("No insertion detected, exiting...")
else:
m = m[:-1]
index = " ".join(str(i) for i in m)
command = (
"csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index
)
subprocess.call(command, shell=True)
# remove tmp files
os.remove(read_ids)
os.remove(read_ids_unique)
os.remove(subset_fa)
os.remove(subset_fa_reorder)
def extract_reads(reads, list, out):
"""Extract reads from fasta using read ID list"""
record_dict = SeqIO.index(reads, "fasta")
with open(out, "wb") as output_handle, open(list, "r") as ID:
for entry in ID:
entry = entry.replace("\n", "")
output_handle.write(record_dict.get_raw(entry))
| en | 000000397_dominik-handler-TELR_TELR_assembly_bea2b6ca1ee6.py | unknown | 4,381 |
from packaging.version import parse as Version
import sys
import requests
def get_pypi_xmlrpc_client():
"""This is actually deprecated client."""
import xmlrpc.client
return xmlrpc.client.ServerProxy("https://pypi.python.org/pypi", use_datetime=True)
class PyPIClient:
def __init__(self, host="https://pypi.org"):
self._host = host
self._session = requests.Session()
def project(self, package_name):
response = self._session.get(
"{host}/pypi/{project_name}/json".format(host=self._host, project_name=package_name)
)
response.raise_for_status()
return response.json()
def project_release(self, package_name, version):
response = self._session.get(
"{host}/pypi/{project_name}/{version}/json".format(
host=self._host, project_name=package_name, version=version
)
)
response.raise_for_status()
return response.json()
def filter_packages_for_compatibility(self, package_name, version_set):
# only need the packaging.specifiers import if we're actually executing this filter.
from packaging.specifiers import SpecifierSet
results = []
for version in version_set:
requires_python = self.project_release(package_name, version)["info"]["requires_python"]
if requires_python:
if Version(".".join(map(str, sys.version_info[:3]))) in SpecifierSet(requires_python):
results.append(version)
else:
results.append(version)
return results
def get_ordered_versions(self, package_name, filter_by_compatibility=False):
project = self.project(package_name)
versions = [Version(package_version) for package_version in project["releases"].keys()]
versions.sort()
if filter_by_compatibility:
return self.filter_packages_for_compatibility(package_name, versions)
return versions
def get_relevant_versions(self, package_name):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (versions[-1], pre_releases[-1])
| from packaging.version import parse as Version
import sys
import requests
def get_pypi_xmlrpc_client():
"""This is actually deprecated client."""
import xmlrpc.client
return xmlrpc.client.ServerProxy("https://pypi.python.org/pypi", use_datetime=True)
class PyPIClient:
def __init__(self, host="https://pypi.org"):
self._host = host
self._session = requests.Session()
def project(self, package_name):
response = self._session.get(
"{host}/pypi/{project_name}/json".format(host=self._host, project_name=package_name)
)
response.raise_for_status()
return response.json()
def project_release(self, package_name, version):
response = self._session.get(
"{host}/pypi/{project_name}/{version}/json".format(
host=self._host, project_name=package_name, version=version
)
)
response.raise_for_status()
return response.json()
def filter_packages_for_compatibility(self, package_name, version_set):
# only need the packaging.specifiers import if we're actually executing this filter.
from packaging.specifiers import SpecifierSet
results = []
for version in version_set:
requires_python = self.project_release(package_name, version)["info"]["requires_python"]
if requires_python:
if Version(".".join(map(str, sys.version_info[:3]))) in SpecifierSet(requires_python):
results.append(version)
else:
results.append(version)
return results
def get_ordered_versions(self, package_name, filter_by_compatibility=False):
project = self.project(package_name)
versions = [Version(package_version) for package_version in project["releases"].keys()]
versions.sort()
if filter_by_compatibility:
return self.filter_packages_for_compatibility(package_name, versions)
return versions
def get_relevant_versions(self, package_name):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (versions[-1], pre_releases[-1])
| en | 000167507_rsdoherty-azure-sdk-for-python_pypi_2535e1ffee84.py | unknown | 628 |
# coding: utf-8
"""封装常用的设计模式以及对内置函数和方法进行约定性的简化
"""
import types
import inspect
from functools import wraps
from girlfriend.exception import GirlFriendSysException
_singletons = {}
def singleton(clazz):
"""单例修饰器,被修饰的类在系统中都是单例的
非线程安全,请勿用在多线程环境当中
"""
@wraps(clazz)
def constructor(*args, **kws):
global _singletons
instance = _singletons.get(clazz)
if instance is None:
instance = clazz(*args, **kws)
_singletons[clazz] = instance
return instance
return constructor
class DelegateMeta(type):
"""该元类用于实现委托
基本用法:
class A(object):
__metaclass__ = DelegateMeta
def __init__(self, delegate):
self.delegate = delegate
lst = [1,2,3]
a = A(lst)
a.append(4)
对象a的append方法会自动委托到lst对象的append方法。
这样可以满足多数情况,但是碰到内置方法比如__getitem__无法自动实现委托
如果要委托内置方法,那么需要通过类属性delegate_internal_methods去指明
class B(object):
__metaclass__ = DelegateMeta
delegate_internal_methods = (
"__getitem__",
"__hash__",
"__eq__"
)
def __init__(self, delegate):
self.delegate = delegate
需要值得注意的是,不要委托特殊方法:__init__、__new__
另外还有__getattr__以及__getattribute__也不可以委托,因为DelegateMeta会用到这两个方法
如果需要在委托类中对访问属性做控制,那么可以使用__myattr__(self, fieldname)
对于未定义属性,DelegateMeta会优先拦截__myattr__,__myattr__通过抛出UnknownAttrError通知
委托类进行接下来的处理。
还可以使用delegate_methods属性显式指定委托方法:
class C(object):
__metaclass__ = DelegateMeta
delegate_methods = (
"append",
"__getitem__",
"__eq__"
)
def __init__(self, delegate):
self.delegate = delegate
"""
class UnknownAttrError(GirlFriendSysException):
pass
def __new__(cls, name, bases, attrs):
delegate_methods = attrs.get("delegate_methods", tuple())
if delegate_methods:
DelegateMeta.register_delegates(delegate_methods, attrs)
return type(name, bases, attrs)
def getter(self, method_name):
if "__myattr__" in attrs:
try:
return self.__myattr__(method_name)
except DelegateMeta.UnknownAttrError:
pass
def method(*args, **kws):
mtd = getattr(self.delegate, method_name)
if mtd:
return mtd(*args, **kws)
else:
raise AttributeError(
"No method found %s" % method_name)
return method
attrs["__getattr__"] = getter
# 痛!
delegate_internal_methods = attrs.get(
"delegate_internal_methods", tuple())
DelegateMeta.register_delegates(delegate_internal_methods, attrs)
return type(name, bases, attrs)
@staticmethod
def register_delegates(delegate_methods, attrs):
for mtd_name in delegate_methods:
def make_method(method_name):
def method(self, *args, **kws):
return getattr(self.delegate,
method_name)(*args, **kws)
return method
attrs[mtd_name] = make_method(mtd_name)
def args2fields(private=True):
"""专门是应用于构造函数的修饰器
可以将构造函数除self以外的参数悉数赋值给类属性
比如
class A(object):
def __init__(self, a, b, c):
self._a = a
self._b = b
self._c = c
self.sum = self._a + self._b + self._c
只要写成这样就好:
class A(object):
@args2fields()
def __init__(self, a, b, c):
self.sum = self._a + self._b + self._c
不必再去写上面那些无聊的赋值语句了
:param private 是否转变为私有字段,如果为True,那么会在所有字段名前加个下划线
"""
def _field_name(arg_name):
return "_" + arg_name if private else arg_name
def _args2fields(constructor):
@wraps(constructor)
def _wrapped_constuctor(self, *args, **kws):
args_spec = inspect.getargspec(constructor)
for idx, arg in enumerate(args, start=1):
arg_name = args_spec.args[idx]
field_name = _field_name(arg_name)
setattr(self, field_name, arg)
for arg_name, arg in kws.items():
field_name = _field_name(arg_name)
setattr(self, field_name, arg)
# 处理没有赋值的默认参数
default_args = get_default_args(args_spec)
if default_args:
for arg_name, default_value in default_args.items():
if arg_name == "self":
continue
field_name = _field_name(arg_name)
if hasattr(self, field_name):
continue
setattr(self, field_name, default_value)
constructor(self, *args, **kws)
return _wrapped_constuctor
return _args2fields
def get_default_args(o):
"""获取函数的默认参数名-值映射
"""
argspec = o
if not isinstance(o, inspect.ArgSpec):
argspec = inspect.getargspec(o)
if not argspec.defaults:
return {}
return dict(zip(argspec.args[-len(argspec.defaults):],
argspec.defaults))
# 线性集合类型
SequenceCollectionType = (types.ListType, types.TupleType)
def parse_context_var(context, variable_name):
"""解析上下文中的变量
如果以'$'字符开头,那么返回上下文中的对应变量
其它的情况会直接返回字符串
开头两个$$连续为转义,比如'$$aa$$a'为'$aa$$a'
:param context 上下文
:param variable_name
"""
if not isinstance(variable_name, str):
return variable_name
elif variable_name.startswith("$$"):
return variable_name.replace("$$", "$")
elif variable_name.startswith("$"):
return context[variable_name[1:]]
else:
return variable_name
class ObjDictModel(object):
def __getattr__(self, name):
return self.__dict__[name]
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
class SafeOperation(object):
"""包装一个对象进行安全操作,
像某些语言的安全操作符
避免None引用引发的错误
每年空指针错误带来的损失是十个亿啊,还是美元!同志们!
"""
def __init__(self, obj):
self.__dict__["_SafeOperation__obj"] = obj
def __getattr__(self, attrname):
obj = self.__dict__["_SafeOperation__obj"]
if obj is None:
return self
return getattr(obj, attrname)
def __setattr__(self, attrname, value):
obj = self.__obj
if obj is None:
return
return setattr(obj, attrname, value)
def __call__(self, *args, **kwds):
return self
| # coding: utf-8
"""封装常用的设计模式以及对内置函数和方法进行约定性的简化
"""
import types
import inspect
from functools import wraps
from girlfriend.exception import GirlFriendSysException
_singletons = {}
def singleton(clazz):
"""单例修饰器,被修饰的类在系统中都是单例的
非线程安全,请勿用在多线程环境当中
"""
@wraps(clazz)
def constructor(*args, **kws):
global _singletons
instance = _singletons.get(clazz)
if instance is None:
instance = clazz(*args, **kws)
_singletons[clazz] = instance
return instance
return constructor
class DelegateMeta(type):
"""该元类用于实现委托
基本用法:
class A(object):
__metaclass__ = DelegateMeta
def __init__(self, delegate):
self.delegate = delegate
lst = [1,2,3]
a = A(lst)
a.append(4)
对象a的append方法会自动委托到lst对象的append方法。
这样可以满足多数情况,但是碰到内置方法比如__getitem__无法自动实现委托
如果要委托内置方法,那么需要通过类属性delegate_internal_methods去指明
class B(object):
__metaclass__ = DelegateMeta
delegate_internal_methods = (
"__getitem__",
"__hash__",
"__eq__"
)
def __init__(self, delegate):
self.delegate = delegate
需要值得注意的是,不要委托特殊方法:__init__、__new__
另外还有__getattr__以及__getattribute__也不可以委托,因为DelegateMeta会用到这两个方法
如果需要在委托类中对访问属性做控制,那么可以使用__myattr__(self, fieldname)
对于未定义属性,DelegateMeta会优先拦截__myattr__,__myattr__通过抛出UnknownAttrError通知
委托类进行接下来的处理。
还可以使用delegate_methods属性显式指定委托方法:
class C(object):
__metaclass__ = DelegateMeta
delegate_methods = (
"append",
"__getitem__",
"__eq__"
)
def __init__(self, delegate):
self.delegate = delegate
"""
class UnknownAttrError(GirlFriendSysException):
pass
def __new__(cls, name, bases, attrs):
delegate_methods = attrs.get("delegate_methods", tuple())
if delegate_methods:
DelegateMeta.register_delegates(delegate_methods, attrs)
return type(name, bases, attrs)
def getter(self, method_name):
if "__myattr__" in attrs:
try:
return self.__myattr__(method_name)
except DelegateMeta.UnknownAttrError:
pass
def method(*args, **kws):
mtd = getattr(self.delegate, method_name)
if mtd:
return mtd(*args, **kws)
else:
raise AttributeError(
"No method found %s" % method_name)
return method
attrs["__getattr__"] = getter
# 痛!
delegate_internal_methods = attrs.get(
"delegate_internal_methods", tuple())
DelegateMeta.register_delegates(delegate_internal_methods, attrs)
return type(name, bases, attrs)
@staticmethod
def register_delegates(delegate_methods, attrs):
for mtd_name in delegate_methods:
def make_method(method_name):
def method(self, *args, **kws):
return getattr(self.delegate,
method_name)(*args, **kws)
return method
attrs[mtd_name] = make_method(mtd_name)
def args2fields(private=True):
"""专门是应用于构造函数的修饰器
可以将构造函数除self以外的参数悉数赋值给类属性
比如
class A(object):
def __init__(self, a, b, c):
self._a = a
self._b = b
self._c = c
self.sum = self._a + self._b + self._c
只要写成这样就好:
class A(object):
@args2fields()
def __init__(self, a, b, c):
self.sum = self._a + self._b + self._c
不必再去写上面那些无聊的赋值语句了
:param private 是否转变为私有字段,如果为True,那么会在所有字段名前加个下划线
"""
def _field_name(arg_name):
return "_" + arg_name if private else arg_name
def _args2fields(constructor):
@wraps(constructor)
def _wrapped_constuctor(self, *args, **kws):
args_spec = inspect.getargspec(constructor)
for idx, arg in enumerate(args, start=1):
arg_name = args_spec.args[idx]
field_name = _field_name(arg_name)
setattr(self, field_name, arg)
for arg_name, arg in kws.items():
field_name = _field_name(arg_name)
setattr(self, field_name, arg)
# 处理没有赋值的默认参数
default_args = get_default_args(args_spec)
if default_args:
for arg_name, default_value in default_args.items():
if arg_name == "self":
continue
field_name = _field_name(arg_name)
if hasattr(self, field_name):
continue
setattr(self, field_name, default_value)
constructor(self, *args, **kws)
return _wrapped_constuctor
return _args2fields
def get_default_args(o):
"""获取函数的默认参数名-值映射
"""
argspec = o
if not isinstance(o, inspect.ArgSpec):
argspec = inspect.getargspec(o)
if not argspec.defaults:
return {}
return dict(zip(argspec.args[-len(argspec.defaults):],
argspec.defaults))
# 线性集合类型
SequenceCollectionType = (types.ListType, types.TupleType)
def parse_context_var(context, variable_name):
"""解析上下文中的变量
如果以'$'字符开头,那么返回上下文中的对应变量
其它的情况会直接返回字符串
开头两个$$连续为转义,比如'$$aa$$a'为'$aa$$a'
:param context 上下文
:param variable_name
"""
if not isinstance(variable_name, str):
return variable_name
elif variable_name.startswith("$$"):
return variable_name.replace("$$", "$")
elif variable_name.startswith("$"):
return context[variable_name[1:]]
else:
return variable_name
class ObjDictModel(object):
def __getattr__(self, name):
return self.__dict__[name]
def __setattr__(self, name, value):
self.__dict__[name] = value
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
class SafeOperation(object):
"""包装一个对象进行安全操作,
像某些语言的安全操作符
避免None引用引发的错误
每年空指针错误带来的损失是十个亿啊,还是美元!同志们!
"""
def __init__(self, obj):
self.__dict__["_SafeOperation__obj"] = obj
def __getattr__(self, attrname):
obj = self.__dict__["_SafeOperation__obj"]
if obj is None:
return self
return getattr(obj, attrname)
def __setattr__(self, attrname, value):
obj = self.__obj
if obj is None:
return
return setattr(obj, attrname, value)
def __call__(self, *args, **kwds):
return self
| en | 000409102_chihongze-girlfriend_lang_3a7e33c6ae52.py | unknown | 2,069 |
from leapp.models import Model, fields
from leapp.topics import SystemFactsTopic
class InstalledDesktopsFacts(Model):
"""
The model includes fact about installe
"""
topic = SystemFactsTopic
gnome_installed = fields.Boolean(default=False)
kde_installed = fields.Boolean(default=False)
| from leapp.models import Model, fields
from leapp.topics import SystemFactsTopic
class InstalledDesktopsFacts(Model):
"""
The model includes fact about installe
"""
topic = SystemFactsTopic
gnome_installed = fields.Boolean(default=False)
kde_installed = fields.Boolean(default=False)
| en | 000389650_sm00th-leapp-repository_installeddesktopsfacts_c43a79aac7e1.py | unknown | 86 |
import six
import warnings
import numpy as np
import os
import os.path as osp
import re
from six.moves import cPickle
from multiprocessing import Pool
import csv
from latent_3d_points.python_plyfile.plyfile import PlyElement, PlyData
def create_dir(dir_path):
''' Creates a directory (or nested directories) if they don't exist.
'''
if not osp.exists(dir_path):
os.makedirs(dir_path)
return dir_path
def pickle_data(file_name, *args):
'''Using (c)Pickle to save multiple python objects in a single file.
'''
myFile = open(file_name, 'wb')
cPickle.dump(len(args), myFile, protocol=2)
for item in args:
cPickle.dump(item, myFile, protocol=2)
myFile.close()
def unpickle_data(file_name):
'''Restore data previously saved with pickle_data().
'''
inFile = open(file_name, 'rb')
size = cPickle.load(inFile)
for _ in range(size):
yield cPickle.load(inFile)
inFile.close()
def files_in_subdirs(top_dir, search_pattern):
regex = re.compile(search_pattern)
for path, _, files in os.walk(top_dir):
for name in files:
full_name = osp.join(path, name)
if regex.search(full_name):
yield full_name
def load_ply(file_name, with_faces=False, with_color=False):
ply_data = PlyData.read(file_name)
points = ply_data['vertex']
points = np.vstack([points['x'], points['y'], points['z']]).T
ret_val = [points]
if with_faces:
faces = np.vstack(ply_data['face']['vertex_indices'])
ret_val.append(faces)
if with_color:
r = np.vstack(ply_data['vertex']['red'])
g = np.vstack(ply_data['vertex']['green'])
b = np.vstack(ply_data['vertex']['blue'])
color = np.hstack((r, g, b))
ret_val.append(color)
if len(ret_val) == 1: # Unwrap the list
ret_val = ret_val[0]
return ret_val
def output_point_cloud_ply(xyz, filepath ):
print('write: ' + filepath)
with open( filepath, 'w') as f:
pn = xyz.shape[0]
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % (pn) )
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for i in range(pn):
f.write('%f %f %f\n' % (xyz[i][0], xyz[i][1], xyz[i][2]) )
def pc_loader(f_name):
''' loads a point-cloud saved under ShapeNet's "standar" folder scheme:
i.e. /syn_id/model_name.ply
'''
tokens = f_name.split('/')
model_id = tokens[-1].split('.')[0]
synet_id = tokens[-2]
return load_ply(f_name), model_id, synet_id
def load_point_clouds_under_folder(top_dir, n_threads=20, file_ending='.ply', verbose=False):
file_names = [f for f in files_in_subdirs(top_dir, file_ending)]
file_names = sorted(file_names)
if len(file_names) == 10:
print( file_names )
print('len(file_names) = ' + str(len(file_names)))
loader = pc_loader
pc = loader(file_names[0])[0]
pclouds = np.empty([len(file_names), pc.shape[0], pc.shape[1]], dtype=np.float32)
model_names = np.empty([len(file_names)], dtype=object)
class_ids = np.empty([len(file_names)], dtype=object)
pool = Pool(n_threads)
for i, data in enumerate(pool.imap(loader, file_names)):
pclouds[i, :, :], model_names[i], class_ids[i] = data
pool.close()
pool.join()
if len(np.unique(model_names)) != len(pclouds):
warnings.warn('Point clouds with the same model name were loaded.')
if verbose:
print('{0} pclouds were loaded. They belong in {1} shape-classes.'.format(len(pclouds),
len(np.unique(class_ids))))
model_ids = model_names
syn_ids = class_ids
labels = syn_ids + '_' + model_ids
while pclouds.shape[0] < 64:
pclouds = np.concatenate((pclouds, pclouds), axis=0)
labels = np.concatenate(( labels, labels), axis=0)
return PointCloudDataSet(pclouds, labels=labels, init_shuffle=False)
class PointCloudDataSet(object):
def __init__(self, point_clouds, labels=None, latent_codes=None, copy=True, init_shuffle=True, disableShuffle=False, padFor128=False ):
self.num_examples = point_clouds.shape[0]
self.n_points = point_clouds.shape[1]
self.disableShuffle = disableShuffle
if labels is not None:
assert point_clouds.shape[0] == labels.shape[0], ('points.shape: %s labels.shape: %s' % (point_clouds.shape, labels.shape))
if copy:
self.labels = labels.copy()
else:
self.labels = labels
else:
self.labels = np.ones(self.num_examples, dtype=np.int8)
if latent_codes is not None:
assert point_clouds.shape[0] == latent_codes.shape[0], ('point_clouds.shape: %s latent_codes.shape: %s' % (point_clouds.shape, latent_codes.shape))
else:
self.latent_codes = None
if copy:
self.point_clouds = point_clouds.copy()
if latent_codes is not None:
self.latent_codes = latent_codes.copy()
else:
self.point_clouds = point_clouds
if latent_codes is not None:
self.latent_codes = latent_codes
self.epochs_completed = 0
self._index_in_epoch = 0
if init_shuffle:
self.shuffle_data()
if padFor128:
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
if self.latent_codes is not None:
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
if self.labels is not None:
labelsss = self.labels.reshape([self.num_examples, 1])
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
self.labels = np.squeeze(labelsss)
self.num_examples = self.point_clouds.shape[0]
def shuffle_data(self, seed=None):
if self.disableShuffle:
return self
if seed is not None:
np.random.seed(seed)
perm = np.arange(self.num_examples)
np.random.shuffle(perm)
self.point_clouds = self.point_clouds[perm]
self.labels = self.labels[perm]
if self.latent_codes is not None:
self.latent_codes = self.latent_codes[perm]
return self
def next_batch(self, batch_size, seed=None):
'''Return the next batch_size examples from this data set.
'''
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self.num_examples:
self.epochs_completed += 1 # Finished epoch.
self.shuffle_data(seed)
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
if self.latent_codes is not None:
return self.point_clouds[start:end], self.labels[start:end], self.latent_codes[start:end]
else:
return self.point_clouds[start:end], self.labels[start:end], None
def full_epoch_data(self, shuffle=True, seed=None):
'''Returns a copy of the examples of the entire data set (i.e. an epoch's data), shuffled.
'''
if shuffle and seed is not None:
np.random.seed(seed)
perm = np.arange(self.num_examples) # Shuffle the data.
if shuffle:
np.random.shuffle(perm)
pc = self.point_clouds[perm]
lb = self.labels[perm]
if self.latent_codes is not None:
lc = self.latent_codes[perm]
return pc, lb, lc
else:
return pc, lb, None
def merge(self, other_data_set):
self._index_in_epoch = 0
self.epochs_completed = 0
self.point_clouds = np.vstack((self.point_clouds, other_data_set.point_clouds))
labels_1 = self.labels.reshape([self.num_examples, 1]) # TODO = move to init.
labels_2 = other_data_set.labels.reshape([other_data_set.num_examples, 1])
self.labels = np.vstack((labels_1, labels_2))
self.labels = np.squeeze(self.labels)
if self.latent_codes is not None:
self.latent_codes = np.vstack((self.latent_codes, other_data_set.latent_codes))
self.num_examples = self.point_clouds.shape[0]
return self
| import six
import warnings
import numpy as np
import os
import os.path as osp
import re
from six.moves import cPickle
from multiprocessing import Pool
import csv
from latent_3d_points.python_plyfile.plyfile import PlyElement, PlyData
def create_dir(dir_path):
''' Creates a directory (or nested directories) if they don't exist.
'''
if not osp.exists(dir_path):
os.makedirs(dir_path)
return dir_path
def pickle_data(file_name, *args):
'''Using (c)Pickle to save multiple python objects in a single file.
'''
myFile = open(file_name, 'wb')
cPickle.dump(len(args), myFile, protocol=2)
for item in args:
cPickle.dump(item, myFile, protocol=2)
myFile.close()
def unpickle_data(file_name):
'''Restore data previously saved with pickle_data().
'''
inFile = open(file_name, 'rb')
size = cPickle.load(inFile)
for _ in range(size):
yield cPickle.load(inFile)
inFile.close()
def files_in_subdirs(top_dir, search_pattern):
regex = re.compile(search_pattern)
for path, _, files in os.walk(top_dir):
for name in files:
full_name = osp.join(path, name)
if regex.search(full_name):
yield full_name
def load_ply(file_name, with_faces=False, with_color=False):
ply_data = PlyData.read(file_name)
points = ply_data['vertex']
points = np.vstack([points['x'], points['y'], points['z']]).T
ret_val = [points]
if with_faces:
faces = np.vstack(ply_data['face']['vertex_indices'])
ret_val.append(faces)
if with_color:
r = np.vstack(ply_data['vertex']['red'])
g = np.vstack(ply_data['vertex']['green'])
b = np.vstack(ply_data['vertex']['blue'])
color = np.hstack((r, g, b))
ret_val.append(color)
if len(ret_val) == 1: # Unwrap the list
ret_val = ret_val[0]
return ret_val
def output_point_cloud_ply(xyz, filepath ):
print('write: ' + filepath)
with open( filepath, 'w') as f:
pn = xyz.shape[0]
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % (pn) )
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('end_header\n')
for i in range(pn):
f.write('%f %f %f\n' % (xyz[i][0], xyz[i][1], xyz[i][2]) )
def pc_loader(f_name):
''' loads a point-cloud saved under ShapeNet's "standar" folder scheme:
i.e. /syn_id/model_name.ply
'''
tokens = f_name.split('/')
model_id = tokens[-1].split('.')[0]
synet_id = tokens[-2]
return load_ply(f_name), model_id, synet_id
def load_point_clouds_under_folder(top_dir, n_threads=20, file_ending='.ply', verbose=False):
file_names = [f for f in files_in_subdirs(top_dir, file_ending)]
file_names = sorted(file_names)
if len(file_names) == 10:
print( file_names )
print('len(file_names) = ' + str(len(file_names)))
loader = pc_loader
pc = loader(file_names[0])[0]
pclouds = np.empty([len(file_names), pc.shape[0], pc.shape[1]], dtype=np.float32)
model_names = np.empty([len(file_names)], dtype=object)
class_ids = np.empty([len(file_names)], dtype=object)
pool = Pool(n_threads)
for i, data in enumerate(pool.imap(loader, file_names)):
pclouds[i, :, :], model_names[i], class_ids[i] = data
pool.close()
pool.join()
if len(np.unique(model_names)) != len(pclouds):
warnings.warn('Point clouds with the same model name were loaded.')
if verbose:
print('{0} pclouds were loaded. They belong in {1} shape-classes.'.format(len(pclouds),
len(np.unique(class_ids))))
model_ids = model_names
syn_ids = class_ids
labels = syn_ids + '_' + model_ids
while pclouds.shape[0] < 64:
pclouds = np.concatenate((pclouds, pclouds), axis=0)
labels = np.concatenate(( labels, labels), axis=0)
return PointCloudDataSet(pclouds, labels=labels, init_shuffle=False)
class PointCloudDataSet(object):
def __init__(self, point_clouds, labels=None, latent_codes=None, copy=True, init_shuffle=True, disableShuffle=False, padFor128=False ):
self.num_examples = point_clouds.shape[0]
self.n_points = point_clouds.shape[1]
self.disableShuffle = disableShuffle
if labels is not None:
assert point_clouds.shape[0] == labels.shape[0], ('points.shape: %s labels.shape: %s' % (point_clouds.shape, labels.shape))
if copy:
self.labels = labels.copy()
else:
self.labels = labels
else:
self.labels = np.ones(self.num_examples, dtype=np.int8)
if latent_codes is not None:
assert point_clouds.shape[0] == latent_codes.shape[0], ('point_clouds.shape: %s latent_codes.shape: %s' % (point_clouds.shape, latent_codes.shape))
else:
self.latent_codes = None
if copy:
self.point_clouds = point_clouds.copy()
if latent_codes is not None:
self.latent_codes = latent_codes.copy()
else:
self.point_clouds = point_clouds
if latent_codes is not None:
self.latent_codes = latent_codes
self.epochs_completed = 0
self._index_in_epoch = 0
if init_shuffle:
self.shuffle_data()
if padFor128:
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] ))
if self.latent_codes is not None:
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] ))
if self.labels is not None:
labelsss = self.labels.reshape([self.num_examples, 1])
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
labelsss = np.vstack((labelsss, labelsss[-32:] ))
self.labels = np.squeeze(labelsss)
self.num_examples = self.point_clouds.shape[0]
def shuffle_data(self, seed=None):
if self.disableShuffle:
return self
if seed is not None:
np.random.seed(seed)
perm = np.arange(self.num_examples)
np.random.shuffle(perm)
self.point_clouds = self.point_clouds[perm]
self.labels = self.labels[perm]
if self.latent_codes is not None:
self.latent_codes = self.latent_codes[perm]
return self
def next_batch(self, batch_size, seed=None):
'''Return the next batch_size examples from this data set.
'''
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self.num_examples:
self.epochs_completed += 1 # Finished epoch.
self.shuffle_data(seed)
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
if self.latent_codes is not None:
return self.point_clouds[start:end], self.labels[start:end], self.latent_codes[start:end]
else:
return self.point_clouds[start:end], self.labels[start:end], None
def full_epoch_data(self, shuffle=True, seed=None):
'''Returns a copy of the examples of the entire data set (i.e. an epoch's data), shuffled.
'''
if shuffle and seed is not None:
np.random.seed(seed)
perm = np.arange(self.num_examples) # Shuffle the data.
if shuffle:
np.random.shuffle(perm)
pc = self.point_clouds[perm]
lb = self.labels[perm]
if self.latent_codes is not None:
lc = self.latent_codes[perm]
return pc, lb, lc
else:
return pc, lb, None
def merge(self, other_data_set):
self._index_in_epoch = 0
self.epochs_completed = 0
self.point_clouds = np.vstack((self.point_clouds, other_data_set.point_clouds))
labels_1 = self.labels.reshape([self.num_examples, 1]) # TODO = move to init.
labels_2 = other_data_set.labels.reshape([other_data_set.num_examples, 1])
self.labels = np.vstack((labels_1, labels_2))
self.labels = np.squeeze(self.labels)
if self.latent_codes is not None:
self.latent_codes = np.vstack((self.latent_codes, other_data_set.latent_codes))
self.num_examples = self.point_clouds.shape[0]
return self
| en | 000683379_kangxue-LOGAN_in_out_5fd953ef2b2b.py | unknown | 2,957 |
import torch
from .basic import to_one_hot
def gumbel_noise(*sizes, epsilon=1e-9, **kwargs):
""" Sample noise from gumbel distribution """
return -torch.log(-torch.log(torch.rand(*sizes, **kwargs) + epsilon) + epsilon)
def gumbel_softmax(logits, dim=-1, tau=1.0, noise=1.0, hard=False, **kwargs):
"""
Softmax with gumbel noise
:param logits: inputs for softmax
:param dim: normalize softmax along this dimension
:param tau: gumbel softmax temperature
:param hard: if True, works like onehot(sample) during forward pass,
gumbel-softmax for backward pass
:return: gumbel-softmax "probabilities", tensor of same shape as logits
"""
if noise != 0:
z = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
logits = logits + noise * z
if tau != 1.0:
logits = logits / tau
probs_gumbel = torch.softmax(logits, dim=dim)
if hard:
_, argmax_indices = torch.max(probs_gumbel, dim=dim)
hard_argmax_onehot = to_one_hot(argmax_indices, depth=logits.shape[dim])
if dim != -1 and dim != len(logits.shape) - 1:
new_dim_order = list(range(len(logits.shape) - 1))
new_dim_order.insert(dim, -1)
hard_argmax_onehot = hard_argmax_onehot.permute(*new_dim_order)
# forward pass: onehot sample, backward pass: gumbel softmax
probs_gumbel = (hard_argmax_onehot - probs_gumbel).detach() + probs_gumbel
return probs_gumbel
def gumbel_sigmoid(logits, tau=1.0, noise=1.0, hard=False, **kwargs):
"""
A special case of gumbel softmax with 2 classes: [logit] and 0
:param logits: sigmoid inputs
:param tau: same as gumbel softmax temperature
:param hard: if True, works like bernoulli sample for forward pass,
gumbel sigmoid for backward pass
:return: tensor with same shape as logits
"""
if noise != 0.0:
z1 = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
z2 = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
logits = logits + noise *(z1 - z2)
if tau != 1.0:
logits /= tau
sigm = torch.sigmoid(logits)
if hard:
hard_sample = torch.ge(sigm, 0.5).to(dtype=logits.dtype)
sigm = (hard_sample - sigm).detach() + sigm
return sigm
| import torch
from .basic import to_one_hot
def gumbel_noise(*sizes, epsilon=1e-9, **kwargs):
""" Sample noise from gumbel distribution """
return -torch.log(-torch.log(torch.rand(*sizes, **kwargs) + epsilon) + epsilon)
def gumbel_softmax(logits, dim=-1, tau=1.0, noise=1.0, hard=False, **kwargs):
"""
Softmax with gumbel noise
:param logits: inputs for softmax
:param dim: normalize softmax along this dimension
:param tau: gumbel softmax temperature
:param hard: if True, works like onehot(sample) during forward pass,
gumbel-softmax for backward pass
:return: gumbel-softmax "probabilities", tensor of same shape as logits
"""
if noise != 0:
z = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
logits = logits + noise * z
if tau != 1.0:
logits = logits / tau
probs_gumbel = torch.softmax(logits, dim=dim)
if hard:
_, argmax_indices = torch.max(probs_gumbel, dim=dim)
hard_argmax_onehot = to_one_hot(argmax_indices, depth=logits.shape[dim])
if dim != -1 and dim != len(logits.shape) - 1:
new_dim_order = list(range(len(logits.shape) - 1))
new_dim_order.insert(dim, -1)
hard_argmax_onehot = hard_argmax_onehot.permute(*new_dim_order)
# forward pass: onehot sample, backward pass: gumbel softmax
probs_gumbel = (hard_argmax_onehot - probs_gumbel).detach() + probs_gumbel
return probs_gumbel
def gumbel_sigmoid(logits, tau=1.0, noise=1.0, hard=False, **kwargs):
"""
A special case of gumbel softmax with 2 classes: [logit] and 0
:param logits: sigmoid inputs
:param tau: same as gumbel softmax temperature
:param hard: if True, works like bernoulli sample for forward pass,
gumbel sigmoid for backward pass
:return: tensor with same shape as logits
"""
if noise != 0.0:
z1 = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
z2 = gumbel_noise(*logits.shape, device=logits.device, dtype=logits.dtype)
logits = logits + noise *(z1 - z2)
if tau != 1.0:
logits /= tau
sigm = torch.sigmoid(logits)
if hard:
hard_sample = torch.ge(sigm, 0.5).to(dtype=logits.dtype)
sigm = (hard_sample - sigm).detach() + sigm
return sigm
| en | 000090466_xtinkt-editable_gumbel_5e527fbaff90.py | unknown | 770 |
# Copyright 2020 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for managing Nomulus deployment records on GCS."""
from typing import Dict, FrozenSet, Set
from google.cloud import storage
import common
def _get_version_map_name(env: str):
return f'nomulus.{env}.versions'
def _get_schema_tag_file(env: str):
return f'sql.{env}.tag'
class GcsClient:
"""Manages Nomulus deployment records on GCS."""
def __init__(self, project: str, gcs_client=None) -> None:
"""Initializes the instance for a GCP project.
Args:
project: The GCP project with Nomulus deployment records.
gcs_client: Optional API client to use.
"""
self._project = project
if gcs_client is not None:
self._client = gcs_client
else:
self._client = storage.Client(self._project)
@property
def project(self):
return self._project
def _get_deploy_bucket_name(self):
return f'{self._project}-deployed-tags'
def _get_release_to_version_mapping(
self, env: str) -> Dict[common.VersionKey, str]:
"""Returns the content of the release to version mapping file.
File content is returned in utf-8 encoding. Each line in the file is
in this format:
'{RELEASE_TAG},{APP_ENGINE_SERVICE_ID},{APP_ENGINE_VERSION}'.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_version_map_name(env)).download_as_text()
mapping = {}
for line in file_content.splitlines(False):
tag, service_id, version_id = line.split(',')
mapping[common.VersionKey(service_id, version_id)] = tag
return mapping
def get_versions_by_release(self, env: str,
nom_tag: str) -> FrozenSet[common.VersionKey]:
"""Returns AppEngine version ids of a given Nomulus release tag.
Fetches the version mapping file maintained by the deployment process
and parses its content into a collection of VersionKey instances.
A release may map to multiple versions in a service if it has been
deployed multiple times. This is not intended behavior and may only
happen by mistake.
Args:
env: The environment of the deployed release, e.g., sandbox.
nom_tag: The Nomulus release tag.
Returns:
An immutable set of VersionKey instances.
"""
mapping = self._get_release_to_version_mapping(env)
return frozenset(
[version for version in mapping if mapping[version] == nom_tag])
def get_releases_by_versions(
self, env: str,
versions: Set[common.VersionKey]) -> Dict[common.VersionKey, str]:
"""Gets the release tags of the AppEngine versions.
Args:
env: The environment of the deployed release, e.g., sandbox.
versions: The AppEngine versions.
Returns:
A mapping of versions to release tags.
"""
mapping = self._get_release_to_version_mapping(env)
return {
version: tag
for version, tag in mapping.items() if version in versions
}
def get_recent_deployments(
self, env: str, num_records: int) -> Dict[common.VersionKey, str]:
"""Gets the most recent deployment records.
Deployment records are stored in a file, with one line per service.
Caller should adjust num_records according to the number of services
in AppEngine.
Args:
env: The environment of the deployed release, e.g., sandbox.
num_records: the number of lines to go back.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_version_map_name(env)).download_as_text()
mapping = {}
for line in file_content.splitlines(False)[-num_records:]:
tag, service_id, version_id = line.split(',')
mapping[common.VersionKey(service_id, version_id)] = tag
return mapping
def get_schema_tag(self, env: str) -> str:
"""Gets the release tag of the SQL schema in the given environment.
This tag is needed for the server/schema compatibility test.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_schema_tag_file(env)).download_as_text().splitlines(False)
assert len(
file_content
) == 1, f'Unexpected content in {_get_schema_tag_file(env)}.'
return file_content[0]
| # Copyright 2020 The Nomulus Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for managing Nomulus deployment records on GCS."""
from typing import Dict, FrozenSet, Set
from google.cloud import storage
import common
def _get_version_map_name(env: str):
return f'nomulus.{env}.versions'
def _get_schema_tag_file(env: str):
return f'sql.{env}.tag'
class GcsClient:
"""Manages Nomulus deployment records on GCS."""
def __init__(self, project: str, gcs_client=None) -> None:
"""Initializes the instance for a GCP project.
Args:
project: The GCP project with Nomulus deployment records.
gcs_client: Optional API client to use.
"""
self._project = project
if gcs_client is not None:
self._client = gcs_client
else:
self._client = storage.Client(self._project)
@property
def project(self):
return self._project
def _get_deploy_bucket_name(self):
return f'{self._project}-deployed-tags'
def _get_release_to_version_mapping(
self, env: str) -> Dict[common.VersionKey, str]:
"""Returns the content of the release to version mapping file.
File content is returned in utf-8 encoding. Each line in the file is
in this format:
'{RELEASE_TAG},{APP_ENGINE_SERVICE_ID},{APP_ENGINE_VERSION}'.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_version_map_name(env)).download_as_text()
mapping = {}
for line in file_content.splitlines(False):
tag, service_id, version_id = line.split(',')
mapping[common.VersionKey(service_id, version_id)] = tag
return mapping
def get_versions_by_release(self, env: str,
nom_tag: str) -> FrozenSet[common.VersionKey]:
"""Returns AppEngine version ids of a given Nomulus release tag.
Fetches the version mapping file maintained by the deployment process
and parses its content into a collection of VersionKey instances.
A release may map to multiple versions in a service if it has been
deployed multiple times. This is not intended behavior and may only
happen by mistake.
Args:
env: The environment of the deployed release, e.g., sandbox.
nom_tag: The Nomulus release tag.
Returns:
An immutable set of VersionKey instances.
"""
mapping = self._get_release_to_version_mapping(env)
return frozenset(
[version for version in mapping if mapping[version] == nom_tag])
def get_releases_by_versions(
self, env: str,
versions: Set[common.VersionKey]) -> Dict[common.VersionKey, str]:
"""Gets the release tags of the AppEngine versions.
Args:
env: The environment of the deployed release, e.g., sandbox.
versions: The AppEngine versions.
Returns:
A mapping of versions to release tags.
"""
mapping = self._get_release_to_version_mapping(env)
return {
version: tag
for version, tag in mapping.items() if version in versions
}
def get_recent_deployments(
self, env: str, num_records: int) -> Dict[common.VersionKey, str]:
"""Gets the most recent deployment records.
Deployment records are stored in a file, with one line per service.
Caller should adjust num_records according to the number of services
in AppEngine.
Args:
env: The environment of the deployed release, e.g., sandbox.
num_records: the number of lines to go back.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_version_map_name(env)).download_as_text()
mapping = {}
for line in file_content.splitlines(False)[-num_records:]:
tag, service_id, version_id = line.split(',')
mapping[common.VersionKey(service_id, version_id)] = tag
return mapping
def get_schema_tag(self, env: str) -> str:
"""Gets the release tag of the SQL schema in the given environment.
This tag is needed for the server/schema compatibility test.
"""
file_content = self._client.get_bucket(
self._get_deploy_bucket_name()).get_blob(
_get_schema_tag_file(env)).download_as_text().splitlines(False)
assert len(
file_content
) == 1, f'Unexpected content in {_get_schema_tag_file(env)}.'
return file_content[0]
| en | 000752442_weiminyu-nomulus_gcs_aaf700f52c97.py | unknown | 1,374 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.