content stringlengths 5 1.05M |
|---|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import json
from pants.option.errors import ParseError
def _parse_error(s, msg):
"""Return a ParseError with a usefully formatted message, for the caller to throw.
:param s: The option value we're parsing.
:param msg: An extra message to add to the ParseError.
"""
return ParseError('Error while parsing option value {0}: {1}'.format(s, msg))
def dict_type(s):
"""An option of type 'dict'.
The value (on the command-line, in an env var or in the config file) must be a JSON object.
"""
if isinstance(s, dict):
return s
try:
ret = json.loads(s)
except ValueError as e:
raise _parse_error(s, e.message)
if not isinstance(ret, dict):
raise _parse_error(s, 'Value is not dict')
return ret
def list_type(s):
"""An option of type 'list'.
The value (on the command-line, in an env var or in the config file) must be a JSON list.
"""
if isinstance(s, (list, tuple)):
return s
try:
ret = json.loads(s)
except ValueError as e:
raise _parse_error(s, e.message)
if not isinstance(ret, list):
raise _parse_error(s, 'Value is not list')
return ret
|
#from .geometry import Scope
class ContextSimple():
def getStartObj(self):
return 0
def split(self, obj):
print("hello")
print(obj)
return (obj, obj+1)
def colour(self, obj, colour):
print("yo")
print(obj)
print(colour)
|
from rob.cli import cli
from rob.console import print_
# Entry point for application
cli() # pylint: disable=no-value-for-parameter
# TODO: should we try to clean up library and filesystem if left in an inconsistent state?
print_("")
|
# -*- coding: utf-8 -*-
"""
This module contains the flask view functions for the
web application.
:Authors: Balwinder Sodhi
"""
from flask import current_app as app
from flask import (Flask, jsonify, request, session)
from flask.blueprints import Blueprint
from werkzeug.utils import secure_filename
from passlib.hash import pbkdf2_sha256
from common.auth import auth_check
from common.utils import _process_photos_zip, _get_upload_folder, _update_entity, _save_entity, _ok_json, _error_json, _np_to_json, merge_form_to_model, random_str
from models.user import *
from models.known_face import *
from playhouse.shortcuts import *
from datetime import datetime as DT
import logging
import json
import os
import csv
from pathlib import Path
from zipfile import ZipFile
import base64
import frecapi as fapi
import numpy as np
from config import cfg
logger = logging.getLogger('peewee')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
B64_HDR = cfg["B64_HDR"]
# For pagination
PAGE_SIZE = cfg["PAGE_SIZE"]
vbp = Blueprint('kbp', __name__, template_folder='templates')
@auth_check
def kface_delete():
try:
fd = request.get_json(force=True)
ids = fd.get("ids")
rc = 0
if ids:
rc = KnownFace.delete().where(KnownFace.id << ids).execute()
return _ok_json("Deleted {} records.".format(rc))
except Exception as ex:
msg = "Error occurred when deleting faces."
logging.exception(msg)
return _error_json(msg)
@auth_check
def kface_find():
try:
fd = request.get_json(force=True)
pg_no = int(fd.get('pg_no', 1))
query = KnownFace.select().join(User)
if fd.get("first_name"):
query = query.where(User.first_name.contains(fd.get("first_name")))
if fd.get("login_id"):
query = query.where(User.login_id.contains(fd.get("login_id")))
faces = query.order_by(KnownFace.id).paginate(pg_no, PAGE_SIZE)
serialized = [model_to_dict(
r, exclude=[KnownFace.user.password_hashed]) for r in faces]
has_next = len(faces) >= PAGE_SIZE
res = {"faces": serialized, "pg_no": pg_no, "pg_size": PAGE_SIZE,
"has_next": has_next}
return _ok_json(res)
except Exception as ex:
msg = "Error when finding known faces."
logging.exception(msg)
return _error_json(msg)
@auth_check
def kface_view(id=None):
try:
kf = KnownFace.get_by_id(id)
if kf:
obj = model_to_dict(kf, exclude=[KnownFace.user.password_hashed])
return _ok_json(obj)
else:
return _error_json("Record not found for ID {}".format(id))
except Exception as ex:
msg = "Error when fetching known face."
logging.exception(msg)
return _error_json("{0}: {1}".format(msg, ex))
@auth_check
def kface_bulk_add():
try:
zipf = request.files['zip_file']
if zipf.filename == '':
return _error_json("No file supplied!")
filename = secure_filename(zipf.filename)
file_path = os.path.join(_get_upload_folder(), filename)
zipf.save(file_path)
recs = _process_photos_zip(file_path)
return _ok_json("Saved {0} photos from file {1}.".format(recs, filename))
except Exception as ex:
msg = "Error when handling ZIP file."
logging.exception(msg)
return _error_json("{0}: {1}".format(msg, ex))
@auth_check
def kface_save():
try:
fd = request.get_json(force=True)
logging.info("Saving kface: {}".format(fd))
sid = int(fd.get("id") or 0)
face_enc = None
if fd.get("photo"):
ph = fd.get("photo")
if ph.startswith(B64_HDR):
photo_b64 = ph[len(B64_HDR):]
face_enc = fapi.get_face_encoding_b64(photo_b64)
else:
raise Exception(
"Please supply a JPG format image. Mere renaming to .jpg won't work!")
else:
logging.debug("No photo supplied.")
kf = KnownFace()
if sid:
kf = KnownFace.get_by_id(sid)
# TODO: Check ownership
merge_form_to_model(kf, fd)
kf.face_enc = _np_to_json(face_enc)
with db.transaction() as txn:
try:
rc = _update_entity(KnownFace, kf, exclude=[KnownFace.user])
usr = kf.user
merge_form_to_model(usr, fd["user"])
rc += _update_entity(User, usr, exclude=[User.role, User.password_hashed])
if rc != 2:
raise IntegrityError("Could not update. Please try again.")
txn.commit()
except DatabaseError as dbe:
txn.rollback()
raise dbe
logging.debug("Updated known face: {}".format(kf))
else:
with db.transaction() as txn:
try:
u = User()
merge_form_to_model(u, fd["user"])
u.password_hashed = pbkdf2_sha256.hash(random_str(10))
_save_entity(u)
merge_form_to_model(kf, fd)
kf.user = u
kf.face_enc = _np_to_json(face_enc)
_save_entity(kf)
txn.commit()
except DatabaseError as dbe:
txn.rollback()
raise dbe
logging.info("Inserted: {}".format(kf))
return _ok_json(model_to_dict(kf))
except Exception as ex:
msg = "Error when saving known face."
logging.exception(msg)
return _error_json("{0}: {1}".format(msg, ex))
############################################################################################
# Only kept for demonstration purposes, to show how much has been refactored.
# These functions have been refactored and imported from common/utils.py now.
##########################################################################################
def _process_photos_zip(zip_file):
recs = 0
try:
with ZipFile(zip_file) as myzip:
zitems = [x for x in myzip.namelist()
if x.lower().endswith(".jpg") and "MACOSX" not in x]
logging.debug("ZIP file {0} contains {1} items.".format(
zip_file, len(zitems)))
for zn in zitems:
try:
logging.debug("Extracting JPG from ZIP entry: "+str(zn))
with myzip.open(zn) as zf:
logging.debug("Processing ZIP entry: {}".format(zn))
photo = zf.read()
if not photo:
logging.warning(
"Photo not found in ZIP entry: {}".format(zn))
continue
# login_id.jpg
login_id = zn.split(".")[0]
kf = KnownFace()
u = User.select().where(User.login_id == login_id)
kf.user = u
fenc = fapi.get_face_encoding(photo)
kf.face_enc = _np_to_json(fenc)
kf.photo = "{0}{1}".format(B64_HDR,
base64.b64encode(photo).decode())
_save_entity(kf)
recs += 1
except Exception as ex:
logging.exception("Error when processing photo. "+str(ex))
except Exception as ex:
logging.exception("Error when processing ZIP file. "+str(ex))
return recs
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def _np_to_json(obj):
return json.dumps({'obj': obj}, cls=NumpyEncoder)
def _ok_json(obj):
return jsonify({"status": "OK", "body": obj})
def _error_json(obj):
return jsonify({"status": "ERROR", "body": obj})
def _save_entity(ent):
curr_user = _logged_in_user()
ent.txn_login_id = curr_user.login_id if curr_user else "None"
ent.upd_ts = DT.now()
ent.ins_ts = DT.now()
ent.save()
def _update_entity(Ent, obj, exclude=None):
txn_no = int(obj.txn_no)
obj.txn_no = 1 + txn_no
obj.upd_ts = DT.now()
obj.txn_login_id = _logged_in_user().login_id
return Ent.update(model_to_dict(obj, recurse=False,
exclude=exclude)).where(
(Ent.txn_no == obj.txn_no - 1) &
(Ent.id == obj.id)).execute()
def _get_upload_folder():
# Ensure that the uploads folder for this user exists
uf = os.path.join(app.config['UPLOAD_FOLDER'], session["user"]["login_id"])
Path(uf).mkdir(parents=True, exist_ok=True)
return uf
def _logged_in_user():
if "user" in session:
u = session['user']
return User.get(User.login_id == u["login_id"])
|
# License: MIT
import time
import sys
import traceback
from openbox.utils.constants import MAXINT, SUCCESS, FAILED, TIMEOUT
from openbox.utils.limit import time_limit, TimeoutException
from openbox.core.message_queue.worker_messager import WorkerMessager
class mqmfWorker(object):
"""
message queue worker for multi-fidelity optimization
"""
def __init__(self, objective_function, ip="127.0.0.1", port=13579, authkey=b'abc'):
self.objective_function = objective_function
self.worker_messager = WorkerMessager(ip, port, authkey=authkey)
def run(self):
while True:
# Get config
try:
msg = self.worker_messager.receive_message()
except Exception as e:
print("Worker receive message error:", str(e))
return
if msg is None:
# Wait for configs
time.sleep(1)
continue
print("Worker: get config. start working.")
config, extra_conf, time_limit_per_trial, n_iteration, trial_id = msg
# Start working
start_time = time.time()
trial_state = SUCCESS
ref_id = None
early_stop = False
try:
args, kwargs = (config, n_iteration, extra_conf), dict()
timeout_status, _result = time_limit(self.objective_function,
time_limit_per_trial,
args=args, kwargs=kwargs)
if timeout_status:
raise TimeoutException(
'Timeout: time limit for this evaluation is %.1fs' % time_limit_per_trial)
else:
if _result is None:
perf = MAXINT
elif isinstance(_result, dict):
perf = _result['objective_value']
if perf is None:
perf = MAXINT
ref_id = _result.get('ref_id', None)
early_stop = _result.get('early_stop', False)
else:
perf = _result
except Exception as e:
if isinstance(e, TimeoutException):
trial_state = TIMEOUT
else:
traceback.print_exc(file=sys.stdout)
trial_state = FAILED
perf = MAXINT
time_taken = time.time() - start_time
return_info = dict(loss=perf,
n_iteration=n_iteration,
ref_id=ref_id,
early_stop=early_stop,
trial_state=trial_state)
observation = [return_info, time_taken, trial_id, config]
# Send result
print("Worker: perf=%f. time=%d. sending result." % (perf, int(time_taken)))
try:
self.worker_messager.send_message(observation)
except Exception as e:
print("Worker send message error:", str(e))
return
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from main.models import User
from main.defaults import get_default_username
from sms.lib import format_number
class UserSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
username = serializers.CharField(allow_blank=True, required=False)
password = serializers.CharField(
allow_blank=True, write_only=True, style={'input_type': 'password', 'placeholder': 'Password'}, required=False
)
email = serializers.EmailField(allow_blank=True, required=False)
phone = serializers.CharField(allow_blank=True, required=False)
photo = serializers.FileField(read_only=True, allow_null=True)
verified_email = serializers.EmailField(read_only=True)
verified_phone = serializers.CharField(read_only=True)
class Meta:
model = User
fields = (
'id', 'first_name', 'last_name', 'email', 'phone', 'language', 'password', 'username', 'title', 'photo',
'verified_email', 'verified_phone',
)
def validate(self, attrs):
if not self.instance: # if we are updating, the fields are not required.
if 'email' not in attrs and 'phone' not in attrs:
raise serializers.ValidationError(_('email or phone number is required.'))
return attrs
def validate_password(self, value):
if self.instance: # If we are updating, only return the new password if it was provided else the existing one.
if value:
return value
else:
return self.instance.password
else: # If we are creating a new object, ensure value is provided else raise validation error.
if value:
return value
else:
raise serializers.ValidationError(_('password is required.'))
def create(self, validated_data):
user = super(UserSerializer, self).create(validated_data)
user.set_password(user.password)
user.save()
return user
def update(self, instance, validated_data):
user = super(UserSerializer, self).update(instance, validated_data)
if 'password' in validated_data: # if a new password was set, encrypt it.
user.set_password(user.password)
user.save()
return user
def validate_email(self, value):
# Checks to ensure the email provided does not exist already.
if not self.instance: # If we are creating a new user, validate the email
if value and User.objects.filter(email=value).exists():
raise serializers.ValidationError(_('an account with this email already exist.'))
else:
return value
else:
# Else if we are updating, only validate email if the email is different.
if self.instance.email == value: # then do not validate.
return value
else: # The user has updated the email field, validate the new email.
if User.objects.filter(email=value).exists():
raise serializers.ValidationError(_('an account with this email already exist.'))
else:
return value
def validate_phone(self, value):
# Checks to ensure the phone number provided does not exist already and that is well formatted.
formatted = format_number(value)
if formatted:
value = formatted
else:
raise serializers.ValidationError(_('phone number is invalid.'))
if not self.instance: # If we are creating a new user, validate the email
if value and User.objects.filter(phone=value).exists():
raise serializers.ValidationError(_('an account with this phone number already exist.'))
else:
return value
else:
# Else if we are updating, only validate phone number if the phone number is different.
if self.instance.phone == value: # then do not validate.
return value
else: # The user has updated the email field, validate the new email.
if User.objects.filter(phone=value).exists():
raise serializers.ValidationError(_('an account with this phone number already exist.'))
else:
return value
def validate_username(self, value):
# Checks to ensure the phone number provided does not exist already and that is well formatted
if not self.instance: # If we are creating a new user, validate the username
if value:
if User.objects.filter(username=value).exists():
raise serializers.ValidationError(_('an account with this username already exist.'))
else:
return value
else:
return get_default_username()
else:
# Else if we are updating, only validate if it's different.
if self.instance.username == value: # then do not validate.
return value
else: # The user has updated the username field, validate the new username.
if User.objects.filter(username=value).exists():
raise serializers.ValidationError(_('an account with this username already exist.'))
else:
return value
class ProfilePictureSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('photo',)
class PasswordResetSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
# code = serializers.CharField(r)
class Meta:
model = User
fields = ('password', 'id', 'code')
def update(self, instance, validated_data):
user = super(PasswordResetSerializer, self).update(instance, validated_data)
if 'password' in validated_data: # if a new password was set, encrypt it.
user.set_password(user.password)
user.save()
return user
|
from zeit.cms.browser.resources import Resource, Library
lib = Library('zeit.campus', 'resources')
Resource('editor.css')
|
# Joshua Nelson Gomes (Joshua)
# CIS 41A Spring 2020
# Take Home Assignment G
def main():
# Part Three – – Dictionary Keys and Sets
states = {}
file = open('USPresidents.txt', 'rt')
for line in file:
x = line.split()
if x[0] in states:
states[x[0]] += 1
else:
states[x[0]] = 1
for state in states:
print(f'{state} : {states.get(state)}')
file.close()
if __name__ == '__main__':
main()
'''
Execution Results:
Highest population state in the Midwest is: IL 12802000.
The state with the most presidents is VA with 8 presidents:
George_Washington
James_Madison
James_Monroe
John_Tyler
Thomas_Jefferson
William_Henry_Harrison
Woodrow_Wilson
Zachary_Taylor
8 of the 10 high population states have had presidents born in them:
CA 1
GA 1
IL 1
NC 2
NY 5
OH 7
PA 1
TX 2
'''
|
"""VCD pattern matcher."""
from typing import Dict
from hdltools.patterns.matcher import PatternMatcher
from hdltools.vcd.parser import BaseVCDParser, VAR_PARSER
from hdltools.vcd.variable import VCDVariable
# TODO: extract pattern matching logic and abstract to generic data source
class VCDPatternMatcher(BaseVCDParser):
"""VCD pattern matcher."""
def __init__(
self,
oneshot_patterns: bool = True,
**watchlist: Dict[str, PatternMatcher]
):
"""Initialize.
Arguments
---------
watchlist
A variable name to pattern mapping list
oneshot_patterns
Whether to match a pattern multiple times or not
"""
super().__init__()
for pattern in watchlist.values():
if not isinstance(pattern, PatternMatcher):
raise TypeError(
"values of watchlist mapping must be PatternMatcher objects"
)
# set default callback
if pattern.match_cb is None:
pattern.match_cb = self._match_callback
# store watchlist
self._match_map = watchlist
# internal variable mapping
self._var_map = {}
self._oneshot = oneshot_patterns
def header_statement_handler(self, stmt, fields):
"""Handle header statements."""
if stmt == VAR_PARSER:
# build variable mapping
if fields["name"] in self._match_map:
var = VCDVariable(
fields["id"],
var_type=fields["vtype"],
size=fields["width"],
name=fields["name"],
)
self._var_map[fields["id"]] = var
def initial_value_handler(self, stmt, fields):
"""Handle initial value assignments."""
if fields["var"] in self._var_map:
self._match_map[
self._var_map[fields["var"]].name
].initial = fields["value"]
def value_change_handler(self, stmt, fields):
"""Handle value changes."""
var = (
self._match_map[self._var_map[fields["var"]].name]
if fields["var"] in self._var_map
else None
)
if var is not None:
value = fields["value"]
# try to match
if not self._oneshot or (self._oneshot and not var.finished):
var.new_state(value)
if var.finished and not self._oneshot:
var.restart()
def _match_callback(self, matcher_obj):
"""Match callback."""
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
from fsspec.implementations.local import LocalFileSystem
from transformer.transform import Transform
sys.path.append('..')
from disemvowel import disemvowel # noqa
'''
Sample usage:
./main.py banana.txt bnn.txt [-w]
'''
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('src', help="source path")
parser.add_argument('dest', help="destination path")
parser.add_argument('--overwrite',
'-w',
help='write over destination file',
action='store_true')
args = parser.parse_args()
src = args.src
dest = args.dest
# local file system and local files
fs = LocalFileSystem()
tr = Transform(src_fs=fs, dest_fs=fs, overwrite=args.overwrite)
tr(src, dest, disemvowel, [])
with fs.open(dest, 'r') as rdr:
for line in rdr:
for c in line:
assert(c.lower() not in 'aeiou')
|
# Generated by Django 2.2.13 on 2020-06-25 14:05
from django.contrib.auth.models import User
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0054_blog_author'),
]
def add_blog_authors(apps, schema_editor):
BlogAuthor = apps.get_model('cms', 'BlogAuthor')
BlogPost = apps.get_model('cms', 'BlogPost')
# Add all current users as authors username first_name last_name
for user in User.objects.all():
author, created = BlogAuthor.objects.get_or_create(
author_name=user.username,
first_name=user.first_name,
last_name=user.last_name
)
for post in BlogPost.objects.filter(owner__pk=user.pk):
post.author = author
print("{}:{}\n".format(author.author_name, post))
post.save()
operations = [
migrations.RunPython(add_blog_authors),
]
|
#!/usr/bin/env python3
#
# Solve a crackme using instruction counting. Works if the flag is
# checked byte for byte.
#
# Requires pin and the pintools from the ManualExamples folder.
#
import sys
import os
def run(data):
with open('input', 'wb') as f:
f.write(bytes(data))
os.system("../../../../pin -t inscount0.so -- ~/crackme < input > /dev/null")
with open('inscount.out', 'r') as f:
# TODO pintool needs to be modified for this to work
return int(f.read())
flag = [0x00] * 80
for i in range(len(flag)):
basecount = run(flag)
for b in range(0x20, 0x80):
flag[i] = b
if run(flag) > basecount:
print("flag[{}] = {}".format(i, chr(b)))
break
else:
print("Failed")
sys.exit(-1)
print(bytes(flag))
|
import xml.dom.minidom
import type_mapper
class adiosConfig:
def __init__ (self, config_file_name):
self.config_file_name = config_file_name
#This would be a good time to parse the file...
doc = xml.dom.minidom.parse (config_file_name)
nodes = doc.childNodes
if (nodes.length != 1):
print 'malformed adios config file, should contain only a single adios-config element'
raise SystemExit
self.config_node = nodes[0]
# for each of the groups, instantiate an adiosGroup object, and store in self.adios_groups
self.adios_groups = []
self.methods = []
self.buffer = None
for node in self.config_node.getElementsByTagName ('adios-group'):
self.adios_groups.append (adiosGroup (node) )
for node in self.config_node.getElementsByTagName ('method'):
self.methods.append (method (node) )
for node in self.config_node.getElementsByTagName ('buffer'):
# there should be only one of these... this code ignores all but the last one.
self.buffer = buffer (node)
# We are currently ignoring any analysis declarations
def get_filename (self):
return self.config_file_name
def get_groups (self):
return self.adios_groups
def get_buffer (self):
#return the buffer info
print 'get_buffer is not yet implemented'
def get_host_language (self):
return self.config_node.getAttribute ('host-language')
class adiosGroup:
def __init__ (self, group_node):
self.group_node = group_node
self.time_index = self.group_node.getAttribute ('time-index')
self.vars = []
self.vardict = {}
self.vars_and_gwrites_and_attrs = []
self.attrs = []
self.attrdict = {}
for node in self.group_node.childNodes:
if node.localName == 'var':
newvar = var (node, self, self.time_index)
self.vars.append (newvar)
#print 'Add to dict local var ['+newvar.get_fullpath()+']'
self.vardict [newvar.get_fullpath()] = newvar
self.vars_and_gwrites_and_attrs.append (newvar)
#elif node.localName == 'attribute':
#handle attribute
elif node.localName == 'gwrite':
self.vars_and_gwrites_and_attrs.append (gwrite (node) )
elif node.localName == 'global-bounds':
for gb_node in node.childNodes:
if gb_node.localName == 'var':
newvar = var (gb_node, self, self.time_index)
self.vars.append (newvar)
#print 'Add to dict global var ['+newvar.get_fullpath()+']'
self.vardict [newvar.get_fullpath()] = newvar
self.vars_and_gwrites_and_attrs.append (newvar)
elif gb_node.localName == 'gwrite':
self.vars_and_gwrites_and_attrs.append (gwrite (node) )
elif node.localName == 'attribute':
newattr = attr (node)
self.attrs.append (newattr)
self.attrdict [newattr.get_name()] = newattr
self.vars_and_gwrites_and_attrs.append (newattr)
# Returns the name of the group
def get_name (self):
return self.group_node.getAttribute ('name')
# Returns a list of var objects for all of the variables in the group
def get_vars (self):
return self.vars
# Returns the variable from this group with the specified name, or None
def get_var (self, varfullpath):
#print ' get_var('+varfullpath+')'
if self.vardict.has_key (varfullpath):
return self.vardict [varfullpath]
return None
# Returns a list containing all of the vars and gwrites and attributes in the same order
# as was specified in the xml
def get_ordered_contents (self):
return self.vars_and_gwrites_and_attrs
class gwrite:
def __init__(self, gwrite_node):
self.gwrite_text = gwrite_node.getAttribute ('src')
def get_src (self):
return self.gwrite_text
class method:
def __init__ (self, method_node):
self.method_node = method_node
class buffer:
def __init__ (self, buffer_node):
self.buffer_node = buffer_node
class var:
def __init__ (self, var_node, group, time_index=None, global_bounds_node=None):
self.var_node = var_node
self.group = group
self.time_index = time_index
self.global_bounds_node = global_bounds_node
def get_path (self):
path = self.var_node.getAttribute ('path')
return path
def get_name (self):
name = self.var_node.getAttribute ('name')
return name
def get_fullpath (self):
path = self.get_path()
name = self.get_name()
if (path == ''):
fullpath = name
elif (path[-1:] == '/'):
fullpath = path + name
else:
fullpath = path + '/' + name
return fullpath
def get_gwrite (self):
gw = self.var_node.getAttribute ('gwrite')
if (gw == ''):
gw = self.get_name()
return gw
def get_group (self):
return self.group
def get_c_type (self):
return type_mapper.get_c_type (self.var_node.getAttribute ('type') )
def get_type (self):
return self.var_node.getAttribute ('type')
def get_dimensions (self):
if (self.var_node.getAttribute ('dimensions') == ''):
return None
else:
# place the dimensions in a list and remove the time-index if it is there.
dims = filter (lambda x : x != self.time_index, self.var_node.getAttribute ('dimensions').split(',') )
cleandims = []
#print ' get_dimensions of var '+self.get_fullpath()
for d in dims:
#print ' dim "'+str(d)+'"'
if d.isdigit():
cleandims.append (d)
continue
# Here we need to translate the variable name for this dimension (if it's a var) into the gwrite
# for that variable
dim_var = self.get_group().get_var (d)
if dim_var != None:
#print ' dim var found, get name...'
d = dim_var.get_gwrite()
#else:
#print ' dim var NOT found'
cleandims.append (d)
return cleandims
def is_scalar (self):
return self.get_dimensions() == None
class attr:
def __init__ (self, attr_node):
self.attr_node = attr_node
def get_name (self):
return self.attr_node.getAttribute ('name')
|
# Copyright (c) <2011>, Steven Caron <steven@steven-caron.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Shotgun Note
This is a cut-down, modified version of the original that just maps
Softimage KeyUp & KeyDown events to the equivelant QtKeyEvent in Qt.
Additionally it uses the qt module from sgtk.platform.qt rather than
directly importing PySide/PyQT
"""
import sys
import win32com
from win32com.client import constants
def XSILoadPlugin( in_reg ):
"""
Plug-in Load
"""
in_reg.Author = "Shotgun Software"
in_reg.Name = "Shotgun Qt Keyboard Event Handlers"
in_reg.Major = 1
in_reg.Minor = 0
"""
import sys
path = in_reg.OriginPath
if path not in sys.path:
sys.path.append( path )
"""
# register Shotgun specific events - this avoids possible
# conflict if PyQtForSoftimage is also loaded!
in_reg.RegisterEvent( "Shotgun Qt Events KeyDown", constants.siOnKeyDown )
in_reg.RegisterEvent( "Shotgun Qt Events KeyUp", constants.siOnKeyUp )
# also, register a timer event to ensure the Qt event loop is
# processed at some stage!
#
# The effect of not processing events frequently is more noticeable on
# Linux whilst processing too frequently can result in odd behaviour on
# Windows, hence the different frequencies!
timer_frequency = 1000
if sys.platform == "win32":
timer_frequency = 1000
elif sys.platform == "linux2":
timer_frequency = 20
in_reg.RegisterTimerEvent("Shotgun Qt Event Loop", timer_frequency, 0)
return True
def XSIUnloadPlugin( in_reg ):
"""
Plug-in Unload
"""
Application.LogMessage( in_reg.Name + " has been unloaded.",constants.siVerbose)
return True
#########################################################################################################################
def ShotgunQtEventLoop_OnEvent(in_ctxt):
"""
Process QApplication events in a Softimage
timer event just to be on the safe side!
"""
try:
import sgtk
from sgtk.platform.qt import QtGui
QtGui.QApplication.processEvents()
QtGui.QApplication.sendPostedEvents(None, 0)
#QtGui.QApplication.flush()
#Application.Desktop.RedrawUI()
except:
pass
def ShotgunQtEventsKeyDown_OnEvent( in_ctxt ):
"""
Block XSI keys from processing, pass along to Qt
"""
if _is_qt_widget_focused():
# process the key
_consume_key( in_ctxt, True )
# Block the Signal from XSI
in_ctxt.SetAttribute( 'Consumed', True )
return True
def ShotgunQtEventsKeyUp_OnEvent( in_ctxt ):
"""
Block XSI keys from processing, pass along to Qt
"""
if _is_qt_widget_focused():
# process the key
_consume_key( in_ctxt, False )
# Block the Signal from XSI
in_ctxt.SetAttribute( 'Consumed', True )
return True
_SI_TO_QT_KEY_MAP = None
def _get_key_map():
"""
Return the key map - fill it out if this is the first
time it's been requested!
"""
global _SI_TO_QT_KEY_MAP
if _SI_TO_QT_KEY_MAP == None:
from sgtk.platform.qt import QtCore
_SI_TO_QT_KEY_MAP = {
# key: ( Qt::Key, ascii, modifiers )
8: ( QtCore.Qt.Key_Backspace, '', None ),
9: ( QtCore.Qt.Key_Tab, '\t', None ),
13: ( QtCore.Qt.Key_Enter, '\n', None ),
16: ( QtCore.Qt.Key_Shift, '', None ),
17: ( QtCore.Qt.Key_Control, '', None ),
18: ( QtCore.Qt.Key_Alt, '', None ),
19: ( QtCore.Qt.Key_Pause, '', None ),
20: ( QtCore.Qt.Key_CapsLock, '', None ),
27: ( QtCore.Qt.Key_Escape, '', None ),
32: ( QtCore.Qt.Key_Space, ' ', None ),
33: ( QtCore.Qt.Key_PageUp, '', None ),
34: ( QtCore.Qt.Key_PageDown, '', None ),
35: ( QtCore.Qt.Key_End, '', None ),
36: ( QtCore.Qt.Key_Home, '', None ),
37: ( QtCore.Qt.Key_Left, '', None ),
38: ( QtCore.Qt.Key_Up, '', None ),
39: ( QtCore.Qt.Key_Right, '', None ),
40: ( QtCore.Qt.Key_Down, '', None ),
44: ( QtCore.Qt.Key_SysReq, '', None ),
45: ( QtCore.Qt.Key_Insert, '', None ),
46: ( QtCore.Qt.Key_Delete, '', None ),
48: ( QtCore.Qt.Key_0, '0', None ),
49: ( QtCore.Qt.Key_1, '1', None ),
50: ( QtCore.Qt.Key_2, '2', None ),
51: ( QtCore.Qt.Key_3, '3', None ),
52: ( QtCore.Qt.Key_4, '4', None ),
53: ( QtCore.Qt.Key_5, '5', None ),
54: ( QtCore.Qt.Key_6, '6', None ),
55: ( QtCore.Qt.Key_7, '7', None ),
56: ( QtCore.Qt.Key_8, '8', None ),
57: ( QtCore.Qt.Key_9, '9', None ),
65: ( QtCore.Qt.Key_A, 'a', None ),
66: ( QtCore.Qt.Key_B, 'b', None ),
67: ( QtCore.Qt.Key_C, 'c', None ),
68: ( QtCore.Qt.Key_D, 'd', None ),
69: ( QtCore.Qt.Key_E, 'e', None ),
70: ( QtCore.Qt.Key_F, 'f', None ),
71: ( QtCore.Qt.Key_G, 'g', None ),
72: ( QtCore.Qt.Key_H, 'h', None ),
73: ( QtCore.Qt.Key_I, 'i', None ),
74: ( QtCore.Qt.Key_J, 'j', None ),
75: ( QtCore.Qt.Key_K, 'k', None ),
76: ( QtCore.Qt.Key_L, 'l', None ),
77: ( QtCore.Qt.Key_M, 'm', None ),
78: ( QtCore.Qt.Key_N, 'n', None ),
79: ( QtCore.Qt.Key_O, 'o', None ),
80: ( QtCore.Qt.Key_P, 'p', None ),
81: ( QtCore.Qt.Key_Q, 'q', None ),
82: ( QtCore.Qt.Key_R, 'r', None ),
83: ( QtCore.Qt.Key_S, 's', None ),
84: ( QtCore.Qt.Key_T, 't', None ),
85: ( QtCore.Qt.Key_U, 'u', None ),
86: ( QtCore.Qt.Key_V, 'v', None ),
87: ( QtCore.Qt.Key_W, 'w', None ),
88: ( QtCore.Qt.Key_X, 'x', None ),
89: ( QtCore.Qt.Key_Y, 'y', None ),
90: ( QtCore.Qt.Key_Z, 'z', None ),
93: ( QtCore.Qt.Key_Print, '', None ),
96: ( QtCore.Qt.Key_0, '0', QtCore.Qt.KeypadModifier ),
97: ( QtCore.Qt.Key_1, '1', QtCore.Qt.KeypadModifier ),
98: ( QtCore.Qt.Key_2, '2', QtCore.Qt.KeypadModifier ),
99: ( QtCore.Qt.Key_3, '3', QtCore.Qt.KeypadModifier ),
100: ( QtCore.Qt.Key_4, '4', QtCore.Qt.KeypadModifier ),
101: ( QtCore.Qt.Key_5, '5', QtCore.Qt.KeypadModifier ),
102: ( QtCore.Qt.Key_5, '6', QtCore.Qt.KeypadModifier ),
103: ( QtCore.Qt.Key_5, '7', QtCore.Qt.KeypadModifier ),
104: ( QtCore.Qt.Key_5, '8', QtCore.Qt.KeypadModifier ),
105: ( QtCore.Qt.Key_5, '9', QtCore.Qt.KeypadModifier ),
106: ( QtCore.Qt.Key_Asterisk, '*', QtCore.Qt.KeypadModifier ),
107: ( QtCore.Qt.Key_Plus, '+', QtCore.Qt.KeypadModifier ),
109: ( QtCore.Qt.Key_Minus, '-', QtCore.Qt.KeypadModifier ),
110: ( QtCore.Qt.Key_Period, '.', QtCore.Qt.KeypadModifier ),
111: ( QtCore.Qt.Key_Slash, '/', QtCore.Qt.KeypadModifier ),
112: ( QtCore.Qt.Key_F1, '', None ),
113: ( QtCore.Qt.Key_F2, '', None ),
114: ( QtCore.Qt.Key_F3, '', None ),
115: ( QtCore.Qt.Key_F4, '', None ),
116: ( QtCore.Qt.Key_F5, '', None ),
117: ( QtCore.Qt.Key_F6, '', None ),
118: ( QtCore.Qt.Key_F7, '', None ),
119: ( QtCore.Qt.Key_F8, '', None ),
120: ( QtCore.Qt.Key_F9, '', None ),
121: ( QtCore.Qt.Key_F10, '', None ),
122: ( QtCore.Qt.Key_F11, '', None ),
113: ( QtCore.Qt.Key_F12, '', None ),
144: ( QtCore.Qt.Key_NumLock, '', None ),
145: ( QtCore.Qt.Key_ScrollLock, '', None ),
186: ( QtCore.Qt.Key_Semicolon, ';', None ),
187: ( QtCore.Qt.Key_Equal, '=', None ),
188: ( QtCore.Qt.Key_Comma, ',', None ),
189: ( QtCore.Qt.Key_Minus, '-', None ),
190: ( QtCore.Qt.Key_Period, '.', None ),
191: ( QtCore.Qt.Key_Slash, '/', None ),
192: ( QtCore.Qt.Key_QuoteLeft, '`', None ),
219: ( QtCore.Qt.Key_BracketLeft, '[', None ),
220: ( QtCore.Qt.Key_Backslash, '\\', None ),
221: ( QtCore.Qt.Key_BraceRight, ']', None ),
222: ( QtCore.Qt.Key_QuoteLeft, "'", None ),
# Calculate the SHIFT key as 300 + key value
348: ( QtCore.Qt.Key_ParenRight, ')', None ), # Shift+0
349: ( QtCore.Qt.Key_Exclam, '!', None ), # Shift+1
350: ( QtCore.Qt.Key_At, '@', None ), # Shift+2
351: ( QtCore.Qt.Key_NumberSign, '#', None ), # Shift+3
352: ( QtCore.Qt.Key_Dollar, '$', None ), # Shift+4
353: ( QtCore.Qt.Key_Percent, '%', None ), # Shift+5
354: ( QtCore.Qt.Key_6, '6', None ),
355: ( QtCore.Qt.Key_Ampersand, '&', None ), # Shift+7
356: ( QtCore.Qt.Key_Asterisk, '*', None ), # Shift+8
357: ( QtCore.Qt.Key_ParenLeft, '(', None ), # Shift+9
365: ( QtCore.Qt.Key_A, 'A', None ),
366: ( QtCore.Qt.Key_B, 'B', None ),
367: ( QtCore.Qt.Key_C, 'C', None ),
368: ( QtCore.Qt.Key_D, 'D', None ),
369: ( QtCore.Qt.Key_E, 'E', None ),
370: ( QtCore.Qt.Key_F, 'F', None ),
371: ( QtCore.Qt.Key_G, 'G', None ),
372: ( QtCore.Qt.Key_H, 'H', None ),
373: ( QtCore.Qt.Key_I, 'I', None ),
374: ( QtCore.Qt.Key_J, 'J', None ),
375: ( QtCore.Qt.Key_K, 'K', None ),
376: ( QtCore.Qt.Key_L, 'L', None ),
377: ( QtCore.Qt.Key_M, 'M', None ),
378: ( QtCore.Qt.Key_N, 'N', None ),
379: ( QtCore.Qt.Key_O, 'O', None ),
380: ( QtCore.Qt.Key_P, 'P', None ),
381: ( QtCore.Qt.Key_Q, 'Q', None ),
382: ( QtCore.Qt.Key_R, 'R', None ),
383: ( QtCore.Qt.Key_S, 'S', None ),
384: ( QtCore.Qt.Key_T, 'T', None ),
385: ( QtCore.Qt.Key_U, 'U', None ),
386: ( QtCore.Qt.Key_V, 'V', None ),
387: ( QtCore.Qt.Key_W, 'W', None ),
388: ( QtCore.Qt.Key_X, 'X', None ),
389: ( QtCore.Qt.Key_Y, 'Y', None ),
390: ( QtCore.Qt.Key_Z, 'Z', None ),
486: ( QtCore.Qt.Key_Colon, ':', None ), # Shift+;
487: ( QtCore.Qt.Key_Plus, '+', None ), # Shift++
488: ( QtCore.Qt.Key_Less, '<', None ), # Shift+,
489: ( QtCore.Qt.Key_Underscore, '_', None ), # Shift+-
490: ( QtCore.Qt.Key_Greater, '>', None ), # Shift+>
491: ( QtCore.Qt.Key_Question, '?', None ), # Shift+?
492: ( QtCore.Qt.Key_AsciiTilde, '~', None ), # Shift+`
519: ( QtCore.Qt.Key_BraceLeft, '{', None ), # Shift+[
520: ( QtCore.Qt.Key_Bar, '|', None ), # Shift+\
521: ( QtCore.Qt.Key_BraceRight, '}', None ), # Shift+]
522: ( QtCore.Qt.Key_QuoteDbl, '"', None ), # Shift+'
}
return _SI_TO_QT_KEY_MAP
def _consume_key( ctxt, pressed ):
"""
build the proper QKeyEvent from Softimage key event and send the it along to the focused widget
"""
from sgtk.platform.qt import QtCore, QtGui
kcode = ctxt.GetAttribute( 'KeyCode' )
mask = ctxt.GetAttribute( 'ShiftMask' )
# Build the modifiers
modifier = QtCore.Qt.NoModifier
if ( mask & constants.siShiftMask ):
if ( kcode + 300 in _get_key_map() ):
kcode += 300
modifier |= QtCore.Qt.ShiftModifier
if ( mask & constants.siCtrlMask ):
modifier |= QtCore.Qt.ControlModifier
if ( mask & constants.siAltMask ):
modifier |= QtCore.Qt.AltModifier
# Generate a Qt Key Event to be processed
result = _get_key_map().get( kcode )
if ( result ):
if ( pressed ):
event = QtGui.QKeyEvent.KeyPress
else:
event = QtGui.QKeyEvent.KeyRelease
if ( result[2] ):
modifier |= result[2]
# Send the event along to the focused widget
QtGui.QApplication.sendEvent( QtGui.QApplication.instance().focusWidget(), QtGui.QKeyEvent( event, result[0], modifier, result[1] ) )
def _is_qt_widget_focused():
"""
return true if the global qApp has any focused widgets
"""
from sgtk.platform.qt import QtGui
if not QtGui.QApplication.instance():
return False
# get the currently focused widget:
focus_widget = QtGui.QApplication.instance().focusWidget()
if not focus_widget:
return False
# Qt widget will retain focus even if the window it's in
# isn't the foreground window so try to handle this:
import sys
if sys.platform == "win32":
# on Windows, get the forground window and compare
# to see if it is the Qt window with the focused
# widget:
import win32gui
foreground_hwnd = win32gui.GetForegroundWindow()
window = focus_widget.window()
if not window or not foreground_hwnd:
return False
# need to convert the Qt winId to an HWND
import ctypes
ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ ctypes.py_object ]
window_hwnd = ctypes.pythonapi.PyCObject_AsVoidPtr(window.winId())
# and compare
if window_hwnd != foreground_hwnd:
return False
else:
# check the cursor is inside the widgets top-level window:
window = focus_widget.window()
if not window or not window.geometry().contains( QtGui.QCursor.pos() ):
return False
return True
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
from StringIO import StringIO
from lxml import etree
def download_image(image_url, local_folder, folder_index):
ufile = urllib2.urlopen(image_url)
root = etree.parse(ufile, etree.HTMLParser())
dwnld = root.xpath('//div[@class="wallpaper_big"]//div[@class="wb_preview"]//a//img/@src')
f = open(local_folder + "/" + str(folder_index)+".jpg", 'wb')
f.write(urllib2.urlopen("http:"+dwnld[0]).read())
f.close()
def retrieve_image_direct_links(query, page_idx, links_accum):
page_str = ""
if page_idx >= 2:
page_str = "/page" + str(page_idx)
url = "https://wallpaperscraft.com/search/keywords?q=" + query + page_str
ufile = urllib2.urlopen(url)
root = etree.parse(ufile, etree.HTMLParser())
for entry in root.xpath('//div[@class="wallpaper_pre"]//a/@href'):
links_accum.append("https:"+entry)
my_queries = ["forest", "river", "bike", "warmth", "sun", "game", "rain", "fish",
"sea", "travel", "city", "wheel", "train", "bridge", "skirt"]
def main():
image_page_links = []
for mq in my_queries:
for p in range(1, 20):
retrieve_image_direct_links(mq, p, image_page_links)
i = 0
image_file_links = []
for al in image_page_links:
download_image(al, "/home/olia/Pictures", i)
i = i+1
if __name__ == '__main__':
main() |
from enum import IntEnum, auto
class Dir(IntEnum):
East = 0
South = 1
West = 2
North = 3
ACTIONS = []
with open('input.txt') as f:
for fl in f:
ACTIONS.append(fl.strip())
ship_x, ship_y, ship_dir = 0, 0, Dir.East
def turn_ship(param, direction):
d = ship_dir
while param > 0:
param -= 90
d += direction
d %= 4
return d
def move_ship(param):
x, y = ship_x, ship_y
if ship_dir == Dir.East: x += param
elif ship_dir == Dir.South: y -= param
elif ship_dir == Dir.West: x -= param
elif ship_dir == Dir.North: y += param
return x, y
for a in ACTIONS:
a_type, a_param = a[0], int(a[1:])
if a_type == "F": ship_x, ship_y = move_ship(a_param)
elif a_type == "N": ship_y += a_param
elif a_type == "S": ship_y -= a_param
elif a_type == "E": ship_x += a_param
elif a_type == "W": ship_x -= a_param
elif a_type == "R": ship_dir = turn_ship(a_param, +1)
elif a_type == "L": ship_dir = turn_ship(a_param, -1)
else:
print(a_dir, a_len)
raise RuntimeError
print("x: %d" % ship_x)
print("y: %d" % ship_y)
print("%d" % (abs(ship_x)+abs(ship_y)))
del ship_dir
ship_x, ship_y = 0, 0
point_dx, point_dy = 10, 1
def turn_point(param, direction):
px, py = point_dx, point_dy
while param > 0:
param -= 90
px, py = +direction * py, -direction * px
return px, py
def move_point(param):
return ship_x + param * point_dx, ship_y + param * point_dy
for a in ACTIONS:
a_type, a_param = a[0], int(a[1:])
if a_type == "F": ship_x, ship_y = move_point(a_param)
elif a_type == "N": point_dy += a_param
elif a_type == "S": point_dy -= a_param
elif a_type == "E": point_dx += a_param
elif a_type == "W": point_dx -= a_param
elif a_type == "R": point_dx, point_dy = turn_point(a_param, +1)
elif a_type == "L": point_dx, point_dy = turn_point(a_param, -1)
else:
print(a_dir, a_len)
raise RuntimeError
print("x: %d" % ship_x)
print("y: %d" % ship_y)
print("%d" % (abs(ship_x)+abs(ship_y)))
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pile dataset."""
import json
import datasets
_CITATION = """\
@article{pile,
title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
"""
_DESCRIPTION = """\
The Pile is a 825 GiB diverse, open source language modeling data set that consists
of 22 smaller, high-quality datasets combined together. To score well on Pile
BPB (bits per byte), a model must be able to understand many disparate domains
including books, github repositories, webpages, chat logs, and medical, physics,
math, computer science, and philosophy papers.
"""
_HOMEPAGE = "https://pile.eleuther.ai/"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URLS = {
"validation": "http://eaidata.bmk.sh/data/pile/val.jsonl.zst",
"test": "http://eaidata.bmk.sh/data/pile/test.jsonl.zst",
}
_NAMES = {
"pile_arxiv": "ArXiv",
"pile_books3": "Books3",
"pile_bookcorpus2": "BookCorpus2",
"pile_dm-mathematics": "DM Mathematics",
"pile_enron": "Enron Emails",
"pile_europarl": "EuroParl",
"pile_freelaw": "FreeLaw",
"pile_github": "Github",
"pile_gutenberg": "Gutenberg (PG-19)",
"pile_hackernews": "HackerNews",
"pile_nih-exporter": "NIH ExPorter",
"pile_opensubtitles": "OpenSubtitles",
"pile_openwebtext2": "OpenWebText2",
"pile_philpapers": "PhilPapers",
"pile_pile-cc": "Pile-CC",
"pile_pubmed-abstracts": "PubMed Abstracts",
"pile_pubmed-central": "PubMed Central",
"pile_stackexchange": "StackExchange",
"pile_upsto": "USPTO Backgrounds",
"pile_ubuntu-irc": "Ubuntu IRC",
"pile_wikipedia": "Wikipedia (en)",
"pile_youtubesubtitles": "YoutubeSubtitles",
}
class Pile(datasets.GeneratorBasedBuilder):
"""The Pile is a 825 GiB diverse, open source language modeling dataset."""
VERSION = datasets.Version("0.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=version, description=_NAMES[name])
for name, version in zip(_NAMES.keys(), [VERSION] * len(_NAMES))
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=f"{_DESCRIPTION}\n{self.config.description}",
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = {"validation": _URLS["validation"], "test": _URLS["test"]}
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir["test"],
"split": "test"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir["validation"],
"split": "validation",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
for key, row in enumerate(f):
data = json.loads(row)
if data["meta"]["pile_set_name"] == _NAMES[self.config.name]:
yield key, {
"text": data["text"],
}
|
# -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2017
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from .procparams import ProcParams
class SlopeParams(ProcParams):
@staticmethod
def get_default_signal_type():
return 'voice'
@staticmethod
def get_port_info():
return {
'in_00': 'input',
'out_00': 'slope',
}
def __init__(self, proc_id, controller):
super().__init__(proc_id, controller)
def get_absolute(self):
return self._get_value('p_b_absolute.json', False)
def set_absolute(self, enabled):
self._set_value('p_b_absolute.json', enabled)
def get_smoothing(self):
return self._get_value('p_f_smoothing.json', 0.0)
def set_smoothing(self, value):
self._set_value('p_f_smoothing.json', value)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 9 16:47:14 2021
@author: Mike
"""
import algoritmosSumas
import random as rn
import copy
from time import time
def crear_lista(longitud):
lista = []
for i in range(0,longitud):
lista.append(rn.randint(-150, 150))
return lista
if __name__=="__main__":
archivo = open("sumaMax1.csv", "w")
archivo.write("N;Tiempo\n")
lista = crear_lista(100)
x = 2
for i in range(100,150,2):
lista_nueva = copy.deepcopy(lista[:x])
inicio_tiempo = time()
algoritmosSumas.sumaMaxima(lista_nueva)
transcurrido = time() - inicio_tiempo
archivo.write(str(x)+";"+format(transcurrido,".5f")+"\n")
x = x + 2
archivo.close()
archivo = open("sumaMax2.csv", "w")
archivo.write("N;Tiempo\n")
x = 2
for i in range(100,150,2):
lista_nueva = copy.deepcopy(lista[:x])
inicio_tiempo = time()
algoritmosSumas.sumaMaxima2(lista_nueva)
transcurrido = time() - inicio_tiempo
archivo.write(str(x)+";"+format(transcurrido,".5f")+"\n")
x = x + 2
archivo.close()
archivo = open("sumaMax3.csv", "w")
archivo.write("N;Tiempo\n")
x = 2
for i in range(100,150,2):
lista_nueva = copy.deepcopy(lista[:x])
inicio_tiempo = time()
algoritmosSumas.sumaMaxima3(lista_nueva)
transcurrido = time() - inicio_tiempo
archivo.write(str(x)+";"+format(transcurrido,".5f")+"\n")
x = x + 2
archivo.close()
print(lista)
print(algoritmosSumas.sumaMaxima(lista))
print(algoritmosSumas.sumaMaxima2(lista))
print(algoritmosSumas.sumaMaxima3(lista)) |
import asyncio
import websockets
from socket import AddressFamily
from EventWriter import EventWriter
from events.PleaseRestartEvent import PleaseRestartEvent
class WSBroadcastEventWriter(EventWriter):
def __init__(self, ws_gap_closer_callback):
super().__init__()
self.__clients = set()
self.__server = asyncio.get_event_loop().run_until_complete(websockets.serve(self.__handle_new_client, '', 0, compression=None))
self.__port = [s for s in self.__server.server.sockets if s.family == AddressFamily.AF_INET][0].getsockname()[1]
self.__ws_gap_closer_callback = ws_gap_closer_callback
def port(self):
return self.__port
def terminate(self):
if super().is_open():
super()._EventWriter__close()
self.__server.close() # TODO is this sufficient?
async def __handle_new_client(self, websocket, path):
# see https://github.com/aaugustin/websockets/issues/551 for errors in logs
self.__clients.add(websocket)
try:
handshakeDone = False
async for msg in websocket:
if not handshakeDone:
assert msg.endswith('$')
msg = msg[:-1]
fail_or_data_to_close_the_gap = self.__ws_gap_closer_callback(msg)
if fail_or_data_to_close_the_gap is None:
await websocket.send(PleaseRestartEvent().builder().to_json())
elif 0 == len(fail_or_data_to_close_the_gap):
pass
else:
events = fail_or_data_to_close_the_gap
for ev in events:
await websocket.send(ev.builder().to_json())
handshakeDone = True
finally:
self.__clients.remove(websocket)
async def write(self, event_message_builder):
assert super().is_open()
if len(self.__clients):
await asyncio.wait([c.send(event_message_builder.to_json()) for c in self.__clients])
|
import os
from django.db import migrations
def store_filename(apps, schema_editor):
Example = apps.get_model("examples", "Example")
for example in Example.objects.all():
example.upload_name = os.path.basename(example.filename.name)
example.save()
class Migration(migrations.Migration):
dependencies = [
("examples", "0004_example_upload_name"),
]
operations = [migrations.RunPython(code=store_filename, reverse_code=migrations.RunPython.noop)]
|
import json
import torch
import numpy as np
import time
import os
import sys
import copy
from copy import deepcopy
import cloudvolume as cv
from pdb import set_trace as st
from helpers import normalize, create_model, get_np
from taskqueue import RegisteredTask, MockTaskQueue, TaskQueue
class NCCTask(RegisteredTask):
def __init__(self, start_section=None, end_section=None,
path_template = 'gs://seunglab_minnie_phase3/alignment/unaligned/'
):
super(NCCTask, self).__init__(start_section, end_section, path_template)
# attributes passed to super().__init__ are automatically assigned
# use this space to perform additional processing such as:
self.start_section = int(start_section)
self.end_section = int(end_section)
self.path_template = path_template
def execute(self):
if self.start_section and self.end_section:
ncc_section_range(self.start_section, self.end_section,
self.path_template)
else:
print(self)
def ncc_section_range(start_section, end_section, path_template):
img_in_out_mip = [(6, 6), (6, 7), (7, 8)]
for img_in_mip, img_out_mip in img_in_out_mip:
pyramid_name = "ncc_m{}".format(img_out_mip)
if img_out_mip == 6:
cv_src_path = path_template + 'm6_normalized'
cv_dst_path = path_template + 'ncc/ncc_m{}'.format(img_out_mip)
elif img_out_mip in [7, 8]:
cv_src_path = path_template + 'ncc/ncc_m{}'.format(img_in_mip)
cv_dst_path = path_template + 'ncc/ncc_m{}'.format(img_out_mip)
else:
raise Exception("Unkown mip")
cv_src = cv.CloudVolume(cv_src_path, mip=img_in_mip, fill_missing=True, bounded=False, progress=False)
cv_dst = cv.CloudVolume(cv_dst_path, mip=img_out_mip,
fill_missing=True, bounded=False,
progress=False, parallel=5,
info=deepcopy(cv_src.info), non_aligned_writes=True)
cv_dst.info['data_type'] = 'float32'
cv_dst.commit_info()
cv_xy_start = [0, 0]
crop = 256
if img_in_mip == 6:
cv_xy_start = [256*0, 1024*0]
cv_xy_end = [8096, 8096]#[1024 * 8 - 256*0, 1024 * 8 - 256*0]
patch_size = 8096 // 4
elif img_in_mip == 7:
cv_xy_start = [256*0, 1024*0]
cv_xy_end = [4048, 4048]#[1024 * 8 - 256*0, 1024 * 8 - 256*0]
patch_size = 4048// 2
elif img_in_mip == 8:
cv_xy_end = [2024, 2048]#[1024 * 8 - 256*0, 1024 * 8 - 256*0]
patch_size = 2024
global_start = 0
scale_factor = 2**(img_out_mip - img_in_mip)
encoder = create_model("model",
checkpoint_folder="./models/{}".format(pyramid_name))
for z in range(start_section, end_section):
print ("MIP {} Section {}".format(img_out_mip, z))
s = time.time()
cv_src_data = cv_src[cv_xy_start[0]:cv_xy_end[0], cv_xy_start[1]:cv_xy_end[1], z].squeeze()
src_data = torch.cuda.FloatTensor(cv_src_data)
src_data = src_data.unsqueeze(0)
in_shape = src_data.shape
dst = torch.zeros((1, in_shape[-2]//scale_factor, in_shape[-1]//scale_factor),
device=src_data.device)
for i in range(0, src_data.shape[-2]//patch_size):
for j in range(0, src_data.shape[-1]//patch_size):
x = [global_start + i*patch_size, global_start + (i + 1) * patch_size]
y = [global_start + j*patch_size, global_start + (j + 1) * patch_size]
x_padded = copy.copy(x)
y_padded = copy.copy(y)
if i != 0:
x_padded[0] = x[0] - crop
if i != src_data.shape[-2]//patch_size - 1:
x_padded[1] = x[1] + crop
if j != 0:
y_padded[0] = y[0] - crop
if j != src_data.shape[-1]//patch_size - 1:
y_padded[1] = y[1] + crop
patch = src_data[..., x_padded[0]:x_padded[1], y_padded[0]:y_padded[1]].squeeze()
with torch.no_grad():
processed_patch = encoder(patch.unsqueeze(0).unsqueeze(0)).squeeze()
if i != 0:
processed_patch = processed_patch[crop//scale_factor:, :]
if i != src_data.shape[-2]//patch_size - 1:
processed_patch = processed_patch[:-crop//scale_factor, :]
if j != 0:
processed_patch = processed_patch[:, crop//scale_factor:]
if j != src_data.shape[-1]//patch_size - 1:
processed_patch = processed_patch[:, :-crop//scale_factor]
dst[..., x[0]//scale_factor:x[1]//scale_factor,
y[0]//scale_factor:y[1]//scale_factor] = processed_patch
if torch.any(processed_patch != processed_patch):
raise Exception("None result occured")
with torch.no_grad():
if scale_factor == 2:
black_mask = src_data != 0
black_frac = float(torch.sum(black_mask == False)) / float(torch.sum(src_data > -10000))
black_mask = torch.nn.MaxPool2d(2)(black_mask.unsqueeze(0).float()) != 0
black_mask = black_mask.squeeze(0)
elif scale_factor == 4:
black_mask = src_data != 0
black_frac = float(torch.sum(black_mask == False)) / float(torch.sum(src_data > -10000))
black_mask = torch.nn.MaxPool2d(2)(black_mask.unsqueeze(0).float()) != 0
black_mask = black_mask.squeeze(0)
black_mask = torch.nn.MaxPool2d(2)(black_mask.unsqueeze(0).float()) != 0
black_mask = black_mask.squeeze(0)
elif scale_factor == 1:
black_mask = (src_data > -10) * (src_data != 0)
black_frac = float(torch.sum(black_mask == False)) / float(torch.sum(src_data > -10000))
else:
raise Exception("Unimplemented")
if torch.any(dst != dst):
raise Exception("None result occured")
dst_norm = normalize(dst, mask=black_mask, mask_fill=0)
if torch.any(dst_norm != dst_norm):
raise Exception("None result occured")
cv_data = get_np(dst_norm.squeeze().unsqueeze(2).unsqueeze(2)).astype(np.float32)
cv_dst[cv_xy_start[0]//scale_factor:cv_xy_end[0]//scale_factor,
cv_xy_start[1]//scale_factor:cv_xy_end[1]//scale_factor, z] = cv_data
e = time.time()
print (e - s, " sec")
def work(tq):
tq.poll(lease_Seconds=int(300))
if __name__ == "__main__":
tq = TaskQueue(sys.argv[2])
if (sys.argv[1] == 'worker'):
work(tq)
elif sys.argv[1] == 'master':
# w000ohhooooo
start = 14780
end = 27883
for i in range(start, end):
tq.insert(NCCTask(i, 1 + i))
#work(tq)
st()
#t = NormalizeTask(15000, 16000)
#t.execute()
#t = NormalizeTask(15000, 16000)
#t.execute()
|
from cloudr.config import REDIS_URL
broker_url = REDIS_URL
result_backend = REDIS_URL
task_serializer = 'json'
result_serializer = 'json'
accept_content = ['json']
beat_schedule = {
'refresh': {
'task': 'task.download.refresh',
'schedule': 10.0
}
}
|
import json
import os, sys
import jsonschema
from jsonschema import validate
# local imports
from src.log import rootLogger
logger = rootLogger.getChild('CONFIG')
cfg = None
schema = """
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"threads": {
"type": "string"
},
"deemix": {
"type": "object",
"properties": {
"arl": {
"type": "string"
},
"download_path": {
"type": "string"
},
"max_bitrate": {
"type": "string"
},
"skip_low_quality": {
"type": "boolean"
}
},
"required": [
"arl",
"download_path",
"max_bitrate",
"skip_low_quality"
]
},
"logging": {
"type": "object",
"properties": {
"level": {
"type": "string"
},
"path": {
"type": "string"
}
},
"required": [
"level",
"path"
]
},
"spotify": {
"type": "object",
"properties": {
"client_id": {
"type": "string"
},
"client_secret": {
"type": "string"
},
"username": {
"type": "string"
},
"scope": {
"type": "string"
},
"redirect_uri_port": {
"type": "string"
},
"playlists": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"excluded": {
"type": "array",
"items": {
"type": "string"
}
}
},
"required": [
"enabled",
"excluded"
]
}
},
"required": [
"client_id",
"client_secret",
"username",
"scope",
"redirect_uri_port",
"playlists"
]
},
"pushover": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"user_key": {
"type": "string"
},
"api_token": {
"type": "string"
}
},
"required": [
"enabled",
"user_key",
"api_token"
]
},
"autoscan": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"endpoint": {
"type": "string"
},
"auth_enabled": {
"type": "boolean"
},
"username": {
"type": "string"
},
"password": {
"type": "string"
}
},
"required": [
"enabled",
"endpoint",
"auth_enabled",
"username",
"password"
]
},
"git": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
}
},
"required": [
"enabled"
]
},
"data": {
"type": "object",
"properties": {
"persistent_data_root": {
"type": "string"
},
"files": {
"type": "object",
"properties": {
"liked_songs": {
"type": "string"
},
"processed_songs": {
"type": "string"
},
"playlist_mapping": {
"type": "string"
}
},
"required": [
"liked_songs",
"processed_songs",
"playlist_mapping"
]
}
},
"required": [
"persistent_data_root",
"files"
]
}
},
"required": [
"threads",
"deemix",
"logging",
"spotify",
"pushover",
"autoscan",
"git",
"data"
]
}
"""
class Configuration:
def __init__(self):
self.config = {}
self._load_config()
def _load_config(self):
logger.debug(f'Loading config')
loaded = self._load_config_file()
self._validate(loaded)
self.config = self._flatten_settings(loaded)
@staticmethod
def _validate(config):
logger.debug('Validating config against JSON Schema')
try:
validate(instance=config, schema=json.loads(schema))
except jsonschema.exceptions.ValidationError as err:
logger.error(f'Failed to validate config file against schema, error was: {err.message}')
sys.exit(1)
@staticmethod
def _load_config_file():
file = os.path.join(os.path.dirname(__file__), '..', 'config.json')
if not os.path.isfile(file):
logger.error(f'Unable to find config file at: {file}')
sys.exit(1)
try:
with open(os.path.join(file), mode='r', encoding='utf-8') as f:
return json.load(f)
except Exception as ex:
logger.error(f'Failed to open config file {file}, exception was: ', ex)
sys.exit(1)
@staticmethod
def _flatten_settings(settings: dict):
logger.debug(f'Flattening settings from nested dictionary')
ret = {}
for k in settings.keys():
if isinstance(settings[k], dict):
for k2 in settings[k].keys():
if isinstance(settings[k][k2], dict):
for k3 in settings[k][k2].keys():
value = settings[k][k2][k3]
ret["_".join([k.upper(), k2.upper(), k3.upper()])] = value
else:
value = settings[k][k2]
ret["_".join([k.upper(), k2.upper()])] = value
else:
ret[k.upper()] = settings[k]
return ret
def load():
global cfg
if cfg is None:
cfg = Configuration().config
os.environ["SPOTIFY_SYNC_LOG_LEVEL"] = cfg["LOGGING_LEVEL"]
os.environ["SPOTIFY_SYNC_LOG_PATH"] = cfg["LOGGING_PATH"]
return cfg |
from .models import UserPoints, PointSetting
from ..payments.models import Payment
from ..users.services import FCMService
class PointsService(object):
def __init__(self):
self.settings = PointSetting.objects.first()
def book_approval_award(self, user, book_id):
if self.settings and self.settings.book_approved and not PointsService._awarded(book_id, 'book_approved',
user.id):
points = UserPoints.objects.create(user=user, point_num=self.settings.book_approved, action_id=book_id,
type='book_approved')
FCMService.notify_points_awarded(user, points.point_num)
def paper_approval_award(self, user, paper_id):
if self.settings and self.settings.paper_approved and not PointsService._awarded(paper_id, 'paper_approved',
user.id):
points = UserPoints.objects.create(user=user, point_num=self.settings.paper_approved, action_id=paper_id,
type='paper_approved')
FCMService.notify_points_awarded(user, points.point_num)
def thesis_approved_award(self, user, thesis_id):
if self.settings and self.settings.thesis_approved and not PointsService._awarded(thesis_id, 'thesis_approved',
user.id):
points = UserPoints.objects.create(user=user, point_num=self.settings.thesis_approved, action_id=thesis_id,
type='thesis_approved')
FCMService.notify_points_awarded(user, points.point_num)
def audio_approved_award(self, user, audio_id):
if self.settings and self.settings.audio_approved and not PointsService._awarded(audio_id, 'audio_approved',
user.id):
points = UserPoints.objects.create(user=user, point_num=self.settings.audio_approved, action_id=audio_id,
type='audio_approved')
FCMService.notify_points_awarded(user, points.point_num)
def donation_award(self, user, payment: Payment):
if self.settings and self.settings.donation and not PointsService._awarded(payment.id, 'donation',
user.id):
total = self.settings.donation * round(payment.amount)
points = UserPoints.objects.create(user=user, point_num=total, action_id=payment.id, type='donation')
FCMService.notify_points_awarded(user, points.point_num)
def reload_settings(self):
self.settings = PointSetting.objects.first()
@staticmethod
def _awarded(action_id, award_type, user_id):
return UserPoints.objects.filter(action_id=action_id, type=award_type, user_id=user_id).exists()
|
from pathlib import Path
from diot import Diot
from bioprocs.utils import shell2 as shell, logger
from bioprocs.utils.parallel import Parallel, distributeList
{%from os import path%}
{%from pyppl.utils import always_list%}
infile = {{i.infile | quote}}
afile = {{i.afile | ?path.isfile | =readlines | !always_list | repr}}
outfile = Path({{o.outfile | quote}})
allfile = {{o.outfile | prefix | @append: '.all' | @append: ext(o.outfile) | quote}}
netmhc = {{args.netmhc | quote}}
isfa = {{args.isfa | repr}}
nthread = {{args.nthread | repr}}
params = {{args.params | repr}}
tmpdir = {{args.tmpdir | repr}}
lens = {{args.lens | ?isinstance: list | =@join: ',' | quote}}
shell.load_config(netmhc = netmhc)
# support HLA-A*03:79 -> HLA-A0379
alleles = [allele.strip().replace('*', '').replace(':', '') for allele in afile if 'HLA-' in allele]
valid_alleles = shell.netmhc(listMHC = True).splitlines()
for i in range(nthread):
shell.mkdir(p = outfile.parent.joinpath('threads', str(i+1)))
# split infile
if isfa:
seqs = [line.strip() for line in shell.grep('>', infile).splitlines() if line.strip()]
seqs_to_threads = distributeList(seqs, nthread)
seqs = {}
for i, tseqs in enumerate(seqs_to_threads):
for tseq in tseqs:
seqs[tseq] = i
handlers = {}
lastindex = None
with open(infile) as fin:
for line in fin:
if line.startswith('>'):
seq = line.strip()
index = seqs[seq]
if index not in handlers:
handlers[index] = open(outfile.parent.joinpath('threads', str(index+1), 'peptides.txt'), 'w')
handlers[index].write(line)
lastindex = index
elif lastindex is None:
raise IndexError('Sequence tag not found!')
else:
handlers[lastindex].write(line)
for handler in handlers.values():
if not handler.closed:
handler.close()
else:
with open(infile) as fin:
peptides = fin.readlines()
pep_to_threads = distributeList(peptides, threads)
for i, pep in enumerate(pep_to_threads):
with open(outfile.parent.joinpath('threads', str(i+1), 'peptides.txt'), 'w') as fpep:
fpep.write(''.join(pep))
"""
PARAMETER DEFAULT VALUE DESCRIPTION
[-a filename] HLA-A0201 HLA allele name
[-f filename] Input file (by default in FASTA format)
[-p] 0 Switch on if input is a list of peptides (Peptide format)
[-l string] 9 Peptide length (multiple lengths separated by comma e.g. 8,9,10)
[-s] 0 Sort output on decreasing affinity
[-rth float] 0.500000 Threshold for high binding peptides (%Rank)
[-rlt float] 2.000000 Threshold for low binding peptides (%Rank)
[-listMHC] 0 Print list of alleles included in netMHC
[-xls] 0 Save output to xls file
[-xlsfile filename] NetMHC_out.xls File name for xls output
[-t float] -99.900002 Threshold for output
[-thrfmt filename] $NETMHC/data/threshold/%s.thr Format for threshold filenames
[-hlalist filename] $NETMHC/data/allelelist File with covered HLA names
[-rdir filename] $NETMHC Home directory for NetMHC
[-tdir filename] $TMPDIR Temporary directory (Default $$)
[-syn filename] $NETMHC/data/synlists/%s.synlist Format of synlist file
[-v] 0 Verbose mode
[-dirty] 0 Dirty mode, leave tmp dir+files
[-inptype int] 0 Input type [0] FASTA [1] Peptide
[-version filename] $NETMHC/data/version File with version information
[-w] 0 w option for webface
"""
# common options
params.tdir = tmpdir
params.l = lens
def do_one(allele, ifile, ithread):
ps = params.copy()
ps.p = not isfa
ps.f = ifile
ps.a = allele
ps._out = outfile.parent.joinpath('threads', str(ithread+1), allele + '.out.txt')
ps._debug = True
shell.netmhc(**ps)
args = []
for allele in alleles:
if allele not in valid_alleles:
logger.warning('Not a valid allele: %s', allele)
for i in range(nthread):
if outfile.parent.joinpath('threads', str(i+1), 'peptides.txt').is_file():
args.append((allele, outfile.parent.joinpath('threads', str(i+1), 'peptides.txt'), i))
if not args:
raise ValueError('No valid alleles found.')
para = Parallel(nthread = nthread)
para.run(do_one, args)
# merge results
with open(outfile, 'w') as fout, open(allfile, 'w') as fall:
header_written = False
pos_written = False
for i, ofile in enumerate(outfile.parent.joinpath('threads').glob('*/*.out.txt')):
with open(ofile) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('-'):
continue
if header_written and line.startswith('#'):
continue
if i == 0 and line.startswith('#'):
fout.write(line + '\n')
fall.write(line + '\n')
else:
header_written = True
parts = line.split()
if parts and parts[0] == 'pos' and i == 0 and not pos_written:
fout.write('\t'.join(parts) + '\n')
fall.write('\t'.join(parts) + '\n')
pos_written = True
elif not parts or parts[0] in ('pos', 'Protein'):
continue
elif len(parts) > 14:
del parts[-2]
fout.write('\t'.join(parts) + '\n')
fall.write('\t'.join(parts) + '\n')
else:
fall.write('\t'.join(parts) + '\n')
|
from collections import defaultdict
def get_short_names(names):
'''
Given an iterable names, return the shortest prefix for each element
so that all prefixes are different. All elements in names must be unique.
>>> get_short_names(['cat', 'castor', 'could', 'fire', 'first'])
['cat', 'cas', 'co', 'fire', 'firs']
'''
lengths = {name: 1 for name in names}
if len(lengths) != len(names):
raise ValueError('names must only contain unique values')
while True:
buckets = defaultdict(list)
valid = True
for name in names:
bucket = buckets[name[:lengths[name]]]
if bucket:
valid = False
bucket.append(name)
for bucket in buckets.values():
if len(bucket) > 1:
for name in bucket:
lengths[name] += 1
if valid:
break
return [name[:lengths[name]] for name in names]
def text_width(text):
return max((len(line) for line in text.split('\n')), default=0)
def wrap(text, width, tabwidth=4):
text = text.replace('\t', ' ' * tabwidth)
if not text:
yield ''
return
for index in range(0, len(text), width):
yield text[index:index + width]
def align(text, width, alignment):
if alignment == '=':
return text.center(width)
elif alignment == '<':
return text.ljust(width)
elif alignment == '>':
return text.rjust(width)
else:
raise ValueError(f'Invalid value for align {alignment}')
def colorize(text, color):
code = f"\033[{color}m"
restore = f"\033[0m"
return "".join([code, text, restore])
def wrap_align(text, width=None, alignment='<'):
'''
Return value justified to the given width, either
to the left if align is '<', to the right if it is '>' or centered
if it is '='.
'''
lines = text.split('\n')
width = max(map(len, lines)) if width is None else width
lines = [sub_line for line in lines for sub_line in wrap(line, width)]
return '\n'.join(
align(line, width=width, alignment=alignment) for line in lines)
def join(columns, separator=''):
widths = [len(column[0].split('\n', 1)[0]) for column in columns]
joined = []
for rows in zip(*columns):
rows_lines = [row.split('\n') for row in rows]
height = max(map(len, rows_lines))
rows_lines = [
row_lines + [' ' * width] * (height - len(row_lines))
for row_lines, width in zip(rows_lines, widths)
]
joined_row = []
for lines in zip(*rows_lines):
joined_row.append(separator.join(lines))
joined.append('\n'.join(joined_row))
return joined
|
from functools import wraps
def check_instrument_attributes(func):
@wraps(func)
def wrapper(*args, **kwargs):
inst = args[0]
if not hasattr(inst, "accuracy"):
raise NotImplementedError("Please add instrument attributes")
return func(*args, **kwargs)
return wrapper
|
# Write a program that keeps all the unique chemical elements.
# On the first line you will be given a number n - the count of input lines that you are going to receive.
# On the next n lines, you will be receiving chemical compounds, separated by a single space.
# Your task is to print all the unique ones on separate lines (the order does not matter):
elem = set()
for _ in range(int(input())):
elem.update(input().split())
print(*elem, sep='\n') |
# Copyright 2021 Andreas Steck (steck.andi@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from carebt.treeNode import TreeNode
class ExecutionContext():
def __init__(self, parent: TreeNode, node: TreeNode, params: str):
self.call_in_params: List[str] = []
self.call_out_params: List[str] = []
if(params is not None):
params = params.replace(' ', ' ')
params = params.replace(' ', ' ')
# extract call input params if available
for p in filter(None, params.split('=>')[0].split(' ')):
# param is a careBt variable (starts with ?)
if(p[0] == '?'):
self.call_in_params.append(p)
else:
try:
# param is a member variable of the parent
self.call_in_params.append(eval(f'parent.{p}'))
except SyntaxError:
# param is a value
self.call_in_params.append(eval(p))
self.call_in_params = tuple(self.call_in_params)
# extract call output params if available
if(len(params.split('=>')) == 2):
for p in filter(None, params.split('=>')[1].split(' ')):
self.call_out_params.append(p)
self.call_out_params = tuple(self.call_out_params)
# the node
self.node = node
# placeholder for the instance of the node
self.instance = None
|
# Link: https://github.com/AI-secure/Meta-Nerual-Trojan-Detection
# Author: @xiaojunxu
# License: MIT
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import librosa
class Model(nn.Module):
def __init__(self, gpu=False):
super(Model, self).__init__()
self.gpu = gpu
self.lstm = nn.LSTM(input_size=40, hidden_size=100, num_layers=2, batch_first=True)
self.lstm_att = nn.Linear(100, 1)
self.output = nn.Linear(100, 10)
if gpu:
self.cuda()
def forward(self, x):
if self.gpu:
x = x.cuda()
# Torch version of melspectrogram , equivalent to:
# mel_f = librosa.feature.melspectrogram(x, sr=sample_rate, n_mels=40)
# mel_feature = librosa.core.power_to_db(mel_f)
window = torch.hann_window(2048)
if self.gpu:
window = window.cuda()
stft = (torch.stft(x, n_fft=2048, window=window).norm(p=2,dim=-1))**2
mel_basis = torch.FloatTensor(librosa.filters.mel(16000, 2048, n_mels=40))
if self.gpu:
mel_basis = mel_basis.cuda()
mel_f = torch.matmul(mel_basis, stft)
mel_feature = 10 * torch.log10(torch.clamp(mel_f, min=1e-10))
feature = (mel_feature.transpose(-1,-2) + 50) / 50
lstm_out, _ = self.lstm(feature)
att_val = F.softmax(self.lstm_att(lstm_out).squeeze(2), dim=1)
emb = (lstm_out * att_val.unsqueeze(2)).sum(1)
score = self.output(emb)
return (score)
def loss(self, pred, label):
if self.gpu:
label = label.cuda()
return F.cross_entropy(pred, label)
def random_troj_setting(troj_type):
MAX_SIZE = 16000
CLASS_NUM = 10
if troj_type == 'jumbo':
p_size = np.random.choice([800,1600,2400,3200,MAX_SIZE], 1)[0]
if p_size < MAX_SIZE:
alpha = np.random.uniform(0.2, 0.6)
if alpha > 0.5:
alpha = 1.0
else:
alpha = np.random.uniform(0.05, 0.2)
elif troj_type == 'M':
p_size = np.random.choice([800,1600,2400,3200], 1)[0]
alpha = 1.0
elif troj_type == 'B':
p_size = MAX_SIZE
alpha = np.random.uniform(0.05, 0.2)
if p_size < MAX_SIZE:
loc = np.random.randint(MAX_SIZE-p_size)
else:
loc = 0
pattern = np.random.uniform(size=p_size)*0.2
target_y = np.random.randint(CLASS_NUM)
inject_p = np.random.uniform(0.05, 0.5)
return p_size, pattern, loc, alpha, target_y, inject_p
def troj_gen_func(X, y, atk_setting):
p_size, pattern, loc, alpha, target_y, inject_p = atk_setting
X_new = X.clone()
X_new[loc:loc+p_size] = alpha * torch.FloatTensor(pattern) + (1-alpha) * X_new[loc:loc+p_size]
y_new = target_y
return X_new, y_new
|
from django.conf.urls import url
from . import views
app_name = "nominations"
urlpatterns = [
url(r"^elections/$", views.ElectionsList.as_view(), name="elections_list"),
url(r"^election/(?P<election>[-\w]+)/$", views.ElectionDetail.as_view(), name="election_detail"),
url(
r"^elections/(?P<election>[-\w]+)/nominees/$",
views.NomineeList.as_view(),
name="nominees_list",
),
url(
r"^elections/(?P<election>[-\w]+)/nominees/(?P<slug>[-\w]+)/$",
views.NomineeDetail.as_view(),
name="nominee_detail",
),
url(
r"^(?P<election>[-\w]+)/create/$",
views.NominationCreate.as_view(),
name="nomination_create",
),
url(
r"^(?P<election>[-\w]+)/(?P<pk>\d+)/$",
views.NominationView.as_view(),
name="nomination_detail",
),
url(
r"^(?P<election>[-\w]+)/(?P<pk>\d+)/edit/$",
views.NominationEdit.as_view(),
name="nomination_edit",
),
]
|
from dice import *
from pool import *
import sys, re
def get_all_eote_dice():
"""Returns a dict of all the dice in the Edge of the Empire game."""
ddict = {
'Boost': Dice('Boost', ['', '', 'Success', ['Success', 'Advantage'], ['Advantage', 'Advantage'], 'Advantage']),
'Setback': Dice('Setback', ['', '', 'Failure', 'Failure', 'Threat', 'Threat']),
'Ability': Dice('Ability', ['', 'Success', 'Success', ['Success', 'Success'], 'Advantage', 'Advantage', ['Advantage', 'Success'], ['Advantage', 'Advantage']]),
'Difficulty':Dice('Difficulty', ['', 'Failure', ['Failure', 'Failure'], 'Threat', 'Threat', 'Threat', ['Threat', 'Threat'], ['Failure', 'Threat']]),
'Proficiency': Dice('Proficiency', ['', 'Success', 'Success', ['Success', 'Success'], ['Success', 'Success'], 'Advantage', ['Success', 'Advantage'], ['Success', 'Advantage'], ['Success', 'Advantage'], ['Advantage', 'Advantage'], ['Advantage', 'Advantage'], 'Triumph']),
'Challenge': Dice('Challenge', ['', 'Failure', 'Failure', ['Failure', 'Failure'], ['Failure', 'Failure'], 'Threat', 'Threat', ['Failure', 'Threat'], ['Failure', 'Threat'], ['Threat', 'Threat'], ['Threat', 'Threat'], 'Despair' ]),
'Force': Dice('Force', ['Dark', 'Dark', 'Dark', 'Dark', 'Dark', 'Dark', ['Dark', 'Dark'], 'Light', 'Light', ['Light', 'Light'], ['Light', 'Light'], ['Light', 'Light']])
}
return ddict
def pprint_roll(roll):
"""Takes the result of a DicePool roll and prettily prints it"""
result_count = {
'Success': 0,
'Failure': 0,
'Advantage': 0,
'Threat': 0,
'Triumph': 0,
'Despair': 0
}
for result in roll:
if type(result) is list:
for item in result:
result_count[item] += 1
elif result in result_count:
result_count[result] += 1
for item in result_count:
if result_count[item] > 0:
print '%s: %d' % (item, result_count[item])
def main():
dice_dict = get_all_eote_dice()
dicePool = DicePool()
pool_str = sys.argv[1]
matches = re.findall('[0-9]+[a-z]+', pool_str)
dice_type_abbrev = {'b': 'Boost', 's': 'Setback', 'a': 'Ability', 'd': 'Difficulty', 'p': 'Proficiency', 'c': 'Challenge', 'f': 'Force'}
for match in matches:
num_dice = match[0]
dice_type = match[1]
if dice_type not in dice_type_abbrev:
raise KeyError('Dice type {} not in database'.format(dice_type))
for i in range(int(num_dice)):
dicePool.add_dice(dice_dict[dice_type_abbrev[dice_type]])
pprint_roll(dicePool.roll())
if __name__ == '__main__':
main()
|
from .dqn import DQN
from .dueling import DuelingDQN
from .double import DoubleDQN
|
from timeit import default_timer as timer
start_time = timer()
arr = list(range(1, 1001))
ans = -1
for a in arr:
flag = False
for b in arr:
c = math.sqrt(a**2 + b**2)
if math.floor(c) == c and a+b+c == 1000:
flag = True
ans = c
break
if flag:
break
diff_time = timer()
# 425.0 in 0.614862781375 seconds
print("{0} in {1} seconds".format(ans, diff_time))
|
# -*- coding: utf-8 -*-
# __author__ = 'elkan1788@gmail.com'
from ppytools.compresshelper import zipFile, tarFile
import unittest
import logging
import os
logger = logging.getLogger(__name__)
class TestCompressHelperCase(unittest.TestCase):
"""TestCompressHelperCase
"""
@classmethod
def setUpClass(cls):
work_dir = os.path.dirname(os.path.realpath(__file__))
cls.large_file = os.path.join(work_dir, 'large_file中文.txt')
with open(cls.large_file, 'wb') as tmp:
tmp.seek(5 * 1024 * 1024)
tmp.write(b'\x00')
@classmethod
def tearDownClass(cls):
os.remove(cls.large_file)
pref_path = cls.large_file[0:cls.large_file.rindex('.')]
os.remove(pref_path+'.zip')
os.remove(pref_path+'.tar.gz')
logger.info('Remove testing files...')
logger.info('%s', cls.large_file)
logger.info('%s.zip', pref_path)
logger.info('%s.tar.gz', pref_path)
def testZipFile(self):
old_size = os.path.getsize(self.large_file)
zip_file = zipFile(self.large_file)
cps_size = os.path.getsize(zip_file)
self.assertGreaterEqual(old_size, cps_size)
self.assertGreaterEqual((old_size - cps_size), 0)
def testTarFile(self):
old_size = os.path.getsize(self.large_file)
tar_file = tarFile(self.large_file)
cps_size = os.path.getsize(tar_file)
self.assertGreaterEqual(old_size, cps_size)
self.assertGreaterEqual((old_size - cps_size), 0)
|
"""Analyzer
Analyzer module for setting menu bar setup for OSX
"""
__author__ = "ales lerch"
import os
import cv2
import numpy
from PIL import Image
def check_image_color(image):
"""Returns string containing 'ligh' or 'dark' that tells if image is for day or night,
Y -- converting to gray to detect if it's really dark or light"""
def check_color(i, j, k):
""" Function used only for DEBUGGING"""
img.show()
image = Image.new("RGB", (200, 200), (int(Y), int(Y), int(Y)))
image.show()
image = Image.new("RGB", (200, 200), (int(i), int(j), int(k)))
image.show()
if not os.path.isfile(image):
return "Image not found"
def calculate_bgr(data):
average_color_per_row = numpy.average(data, axis=0)
average_color = numpy.average(average_color_per_row, axis=0)
return tuple(average_color)
def calculate_y(r, g, b):
alpha = 0.299
betta = 0.587
gamma = 0.114
return alpha * r + betta * g + gamma * b
# split the image for four squares calucate averate pixel for them and take higest value
# blure image and save to /Library/Caches as com.apple.desktop.admin.png
# in case using blur tool --> blur = cv2.blur(img,(5,5))
try:
img_cv_data = cv2.imread(image)
B, G, R = calculate_bgr(img_cv_data)
Y = calculate_y(B, G, R)
height, width = img_cv_data.shape[:2]
except Exception as err:
print(f"[ERROR] {err} with image: {image}")
return "Error parsing image"
# image detection
if Y < 72.0:
_type = "dark"
elif Y >= 73.0 and Y <= 108.0:
_type = "evening"
else:
_type = "light"
return _type
|
index = 0
while index < 10:
print("Szoveg")
index = index + 1
|
# Calculate the sum of two integers a and b, but you are not allowed to use the operator + and -.
# Example 1:
# Input: a = 1, b = 2
# Output: 3
# Example 2:
# Input: a = -2, b = 3
# Output: 1
class Solution:
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MAX = 0x7FFFFFFF
mask = 0xFFFFFFFF
while b != 0:
a, b = (a ^ b) & mask, ( (a & b) << 1 ) & mask
return a if a < MAX else ~ (a ^ mask)
# Time: O(1)
# Space: O(1)
# Difficulty: easy |
import json
from difflib import get_close_matches
data = json.load(open("data.json"))
def english_word(w):
w=w.lower()
if w in data:
return data[w]
elif w.title() in data: #if user entered "delhi" this will check for "Delhi" as well.
return data[w.title()]
elif w.upper() in data: #in case user enters words like USA or NATO
return data[w.upper()]
elif len(get_close_matches(w,data.keys())) > 0:
check = input(f"Did you mean {get_close_matches(w,data.keys())[0]} instead ? (Y)es or (N)o:" )
check = check.upper()
if check == 'Y':
return data[get_close_matches(w,data.keys())[0]]
elif check == 'N':
return f"Word doesn't exist.Please double check it."
else:
return f"We didn't understand your entry."
else:
return f"Word doesn't exist.Please double check it."
word = input('Enter your word: ')
result = english_word(word)
if type(result) == list:
for meaning in result:
print(meaning)
else:
print(result)
|
import numpy as np
import math
def factorial(n):
fact = 1
for i in range(1,n+1):
fact *= i
return fact
summation = 1
for i in range(1,7):
val = (-1)**i * (math.pi/3)**(2*i) / factorial(2*i)
summation += val
print "val: ", val
print "summation: ", summation
print "Final summation: ", summation |
"""
Write a function that takes string characters as input and returns each characters hexadecimal value.
The output should be in the form of a string.
input_string = 'Python'
Expected output = '50 79 74 68 6f 6e'
"""
def convert_to_hex(input_string):
new = input_string.encode('utf-8')
final = new.hex()
n = 2
return final[:n] + " " + final[n:n+2] + " " + final[n+2:n+4] + " " + final[n+4:n+6] + " " + final[n+6:n+8] + " " + final[n+8:n+10]
|
from setuptools import setup
setup(
name='UniqueBotsKR',
version='1.2',
packages=['UniqueBotsKR'],
url='https://github.com/gunyu1019/UniqueBotsKR',
license='MIT',
author='gunyu1019',
author_email='gunyu1019@yhs.kr',
description='UniqueBots를 위한 비공식 파이썬 API 레퍼입니다.',
python_requires='>=3.6',
long_description=open('README.md', encoding='UTF-8').read(),
long_description_content_type='text/markdown',
include_package_data=True,
install_requires=open('requirements.txt', encoding='UTF-8').read(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
]
)
|
import numpy as np
import sys
def PYN(approx, real):
app = set(list(map(tuple, approx)))
rea = set(list(map(tuple, real)))
return len(app & rea)/len(real)
def DM(approx, real):
nadir = np.array([real[:, 0].min(), real[:, 1].min()])
ideal = np.array([real[:, 0].max(), real[:, 1].max()])
p = np.empty(2, dtype=float)
for k in range(2):
p[k] = 1 / (ideal[k] - nadir[k])
bestDists = np.full(real.shape[0], sys.float_info.max, dtype=float)
for i, point in enumerate(real):
for appPoint in approx:
dist = np.sqrt(p[0]*((point[0] - appPoint[0])**2) + p[1]*((point[1] - appPoint[1])**2))
if dist < bestDists[i]:
bestDists[i] = dist
return bestDists.mean()
|
from unittest import TestCase
from salesforce.dbapi.subselect import (
find_closing_parenthesis, split_subquery, transform_except_subquery,
mark_quoted_strings, subst_quoted_strings, simplify_expression,
)
class TestSubSelectSearch(TestCase):
def test_parenthesis(self):
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 0), (0, 2))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 2), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 3), (3, 12))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 6), (7, 11))
self.assertEqual(find_closing_parenthesis('() (() (())) ()', 13), (13, 15))
self.assertRaises(AssertionError, find_closing_parenthesis, '() (() (())) ()', 1)
def test_subquery(self):
def func(x): # pylint:disable=unused-argument
return '*transformed*'
sql = "SELECT a, (SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
expected = "*transformed*(SELECT x FROM y)*transformed*(SELECT p FROM q WHERE r = %s)*transformed*"
self.assertEqual(transform_except_subquery(sql, func), expected)
def test_split_subquery(self):
sql = " SELECT a, ( SELECT x FROM y) FROM b WHERE (c IN (SELECT p FROM q WHERE r = %s) AND c = %s)"
expected = ("SELECT a, (&) FROM b WHERE (c IN (&) AND c=%s)",
[("SELECT x FROM y", []),
("SELECT p FROM q WHERE r=%s", [])
])
self.assertEqual(split_subquery(sql), expected)
def test_nested_subquery(self):
def func(x): # pylint:disable=unused-argument
return '*transformed*'
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
expected = "*transformed*(SELECT x, (SELECT p FROM q) FROM y)*transformed*"
self.assertEqual(transform_except_subquery(sql, func), expected)
def test_split_nested_subquery(self):
sql = "SELECT a, (SELECT x, (SELECT p FROM q) FROM y) FROM b"
expected = ("SELECT a, (&) FROM b",
[("SELECT x, (&) FROM y",
[("SELECT p FROM q", [])]
)]
)
self.assertEqual(split_subquery(sql), expected)
class ReplaceQuotedStringsTest(TestCase):
def test_subst_quoted_strings(self):
def inner(sql, expected):
result = mark_quoted_strings(sql)
self.assertEqual(result, expected)
self.assertEqual(subst_quoted_strings(*result), sql)
inner("where x=''", ("where x=@", ['']))
inner("a'bc'd", ("a@d", ['bc']))
inner(r"a'bc\\'d", ("a@d", ['bc\\']))
inner(r"a'\'\\'b''''", ("a@b@@", ['\'\\', '', '']))
self.assertRaises(AssertionError, mark_quoted_strings, r"a'bc'\\d")
self.assertRaises(AssertionError, mark_quoted_strings, "a'bc''d")
def test_simplify_expression(self):
self.assertEqual(simplify_expression(' a \t b c . . d '), 'a b c..d')
|
import json
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from products.models import Product
from home.models import User
# Create your views here.
def get_products_list(request):
if request.method == 'GET':
products_list = Product.objects.all()
all_products = [product.json() for product in products_list]
return JsonResponse({'products_list': all_products, 'success': True}, status=200)
return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405)
def get_product_details(request, **kwargs):
if request.method == 'GET':
product_id = kwargs['product_id']
try:
product = Product.objects.get(pk=product_id)
return JsonResponse({'product': product.json(), 'success': True}, status=200)
except Product.DoesNotExist:
return JsonResponse({'success': False, 'error': 'Product does not exist'}, status=400)
except:
return JsonResponse({'success': False, 'error': 'Something Went Wrong'}, status=500)
return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405)
@csrf_exempt
def add_product(request):
if request.method == 'POST':
session_data = request.headers['Session']
user_id = request.session.decode(session_data)['id']
try:
User.objects.filter(is_superuser=True).filter(is_staff=True).get(pk=user_id)
data = json.loads(request.body)
company = data['company']
series = data['series']
model = data['model']
price = data['price']
quantity = data['quantity']
if Product.objects.filter(Company=company).filter(Series=series).filter(Model=model).count() > 0:
return JsonResponse({'success': False, 'error': 'Product already exists'}, status=400)
product = Product(Company=company, Series=series, Model=model, Price=price, Quantity=quantity)
product.save()
return JsonResponse({'success': True, 'addedProduct': product.json()}, status=200)
except Exception as e:
return JsonResponse({'success': False, 'error': 'Unauthorized'}, status=403)
return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405)
@csrf_exempt
def update_product(request, **kwargs):
if request.method == 'PUT':
product_id = kwargs['product']
try:
product = Product.objects.get(pk=product_id)
data = json.loads(request.body)
price = data.get('price', product.Price)
quantity = data.get('quantity', product.Quantity)
product.Price = price
product.Quantity = quantity
product.save()
return JsonResponse({'success': True, 'updated product': product.json()}, status=200)
except Product.DoesNotExist:
return JsonResponse({'success': False, 'error': 'Product Does Not Exist'}, status=400)
except:
return JsonResponse({'success': False, 'error': 'Something Went Wrong'}, status=500)
return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) |
# Generated by Django 3.2.5 on 2021-08-25 19:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("public_data", "0008_artifcommune"),
("project", "0005_auto_20210825_1559"),
]
operations = [
migrations.AlterField(
model_name="project",
name="cities",
field=models.ManyToManyField(
to="public_data.ArtifCommune", verbose_name="cities"
),
),
]
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class UserStatus(Enum):
ACTIVE = "ACTIVE"
DEACTIVATED = "DEACTIVATED"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: object) -> "UserStatus":
return cls.MISSING_ENUM
|
PE_LOGIN_URL = "http://58.192.114.239/student/studentFrame.jsp"
PE_PC_URL = "http://58.192.114.239/student/queryCheckInfo.jsp"
CONNECT_TIME_OUT = 3
API_SERVER_HOST = ''
API_SERVER_PORT = ''
API_SERVER_KEY = ''
SECRET_KEY1 = 0
SECRET_KEY2 = 0
A = hex(SECRET_KEY1^SECRET_KEY2)[2:-1] + API_SERVER_KEY + hex(SECRET_KEY1)[2:]
daymap = {'Mon':1, 'Tue':2, 'Wed':3, 'Thu':4, 'Fri':5, 'Sat':6, 'Sun':7}
finay_day = '2016-01-08'
final_date = 5
loginurl1 = "http://ids2.seu.edu.cn/amserver/UI/Login"#?goto=http%3A%2F%2Fzccx.seu.edu.cn%2F"
runurl = "http://zccx.seu.edu.cn" |
# -*- coding: utf-8 -*-
"""A Simple Flask App main.py
process request and response
"""
import os
import sys
import inspect
from flask import Flask, request
from werkzeug import secure_filename
def insert_path():
"""insert parentdir"""
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
insert_path()
from PictureToAscii import PictureToAscii
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
"""sent index html"""
return app.send_static_file("index.html")
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
"""receive file and return ans"""
if request.method == 'POST':
pic = request.files['file']
pic_path = "temp." + secure_filename(pic.filename)
pic.save(pic_path)
pir_obj = PictureToAscii()
response = "<plaintext>" + pir_obj.picture_to_ascii(pic_path, None)
os.remove(pic_path)
return response
@app.route('/<path:filename>')
def stacic_file(filename):
"""sent static file"""
return app.send_static_file(filename)
if __name__ == '__main__':
app.run(port=9010)
# app.run(host="45.32.21.140", port=9010, debug = False)
|
"""Search a 2D Matrix
https://www.lintcode.com/problem/search-a-2d-matrix/description
"""
class Solution:
"""
@param matrix: matrix, a list of lists of integers
@param target: An integer
@return: a boolean, indicate whether matrix contains target
"""
def searchMatrix(self, matrix, target):
# write your code here
if matrix == []:
return False
m, n = len(matrix), len(matrix[0])
start, end = 0, m * n - 1
while start <= end:
mid = start + (end - start) // 2
x, y = mid // n, mid % n
if matrix[x][y] < target:
start = mid + 1
elif matrix[x][y] > target:
end = mid - 1
else:
return True
return False
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Suspicious_TLD(Signature):
name = "suspicious_tld"
description = "Resolves a suspicious Top Level Domain (TLD)"
severity = 2
categories = ["tldwatch", "network"]
authors = ["RedSocks", "Kevin Ross"]
minimum = "2.0"
domains_re = [
(".*\\.by$", "Belarus domain TLD"),
(".*\\.cc$", "Cocos Islands domain TLD"),
(".*\\.onion$", "TOR hidden services domain TLD"),
(".*\\.pw$", "Palau domain TLD"),
(".*\\.ru$", "Russian Federation domain TLD"),
(".*\\.su$", "Soviet Union domain TLD"),
(".*\\.top$", "Generic top level domain TLD"),
]
queried_domains = []
def on_complete(self):
for indicator in self.domains_re:
for tld in self.check_domain(pattern=indicator[0], regex=True, all=True):
if tld not in self.queried_domains:
self.queried_domains.append(tld)
self.mark(
domain=tld,
description=indicator[1],
)
return self.has_marks()
|
import rdkit.Chem as Chem
import networkx as nx
class molecule():
def __init__(self,mol):
self.mol_name = mol
self.mol = Chem.MolFromMolFile(mol,removeHs=False)
self.atoms = self.mol.GetAtoms()
self.atoms_idx = [i.GetIdx() for i in self.atoms]
self.charge=[atom.GetFormalCharge() for atom in self.atoms]
self.symbol=[i.GetSymbol() for i in self.atoms]
self.bonds = self.mol.GetBonds()
self.bonds_idx = [[bond.GetBeginAtomIdx(),bond.GetEndAtomIdx()] for bond in self.bonds]
self.G = nx.Graph()
self.G.add_edges_from(self.bonds_idx)
self.angles_idx=[]
self.dihedrals_idx=[]
for node in self.G.nodes():
for n1 in self.G.neighbors(node):
for n2 in self.G.neighbors(n1):
if n2 != node:
if n2>node:
self.angles_idx.append([node,n1,n2])
for n3 in self.G.neighbors(n2):
if n3!=n1 and n3>node:
self.dihedrals_idx.append([node,n1,n2,n3])
# The first id is the center atom id. The same definition in lammps.
# For OPLS, the second id is the center atom id in many Force Field file.?
# For OPLSAA/AMBER, the 3rd id is the center atom id in many Force Field file.?
self.impropers_idx = [[i, *list(self.G.neighbors(i))] for i in self.G.nodes() if self.G.degree(i) == 3]
|
# coding: utf-8
import smtplib
import ssl
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.multipart import MIMEMultipart # New line
from email.mime.base import MIMEBase # New line
from email import encoders # New line
import pandas as pd
grado = input("GRADO: ")
File = pd.read_csv("CSV/ENCARGADOS " + grado + ".csv", encoding='utf-8')
# User configuration
sender_email = 'no.responder.lng@gmail.com'
sender_name = 'Colegio Mixto La Niñez Guatemalteca'
password = 'W@rlockz1000'
receiver_emails = []
receiver_names = []
for index, row in File.iterrows():
contador = 0
for i in row:
contador += 1
if contador > 2 and str(row[contador-1]) != 'nan' and "@" in str(row[contador-1]):
receiver_names.append(row[1])
receiver_emails.append(row[contador-1])
# Email body
email_body = ''
name = row[0]
filename = f'{grado} BASICO/pdf/{name}.pdf'
filenameJPG = f'{grado} BASICO/jpg/{name}.jpg'
for receiver_email, receiver_name in zip(receiver_emails, receiver_names):
print(f"Sending the email {receiver_name}, {receiver_email}...")
# Configurating user's info
msg = MIMEMultipart()
msg['To'] = formataddr((receiver_name, receiver_email))
msg['From'] = formataddr((sender_name, sender_email))
msg['Subject'] = 'Tarjeta de Calificaciones: ' + receiver_name
msg.attach(MIMEText(email_body, 'html'))
try:
# Open PDF file in binary mode
with open(filename, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
msg.attach(part)
except Exception as e:
print(f'Oh no! We didnt found the attachment!n{e}')
break
try:
# Creating a SMTP session | use 587 with TLS, 465 SSL and 25
server = smtplib.SMTP('smtp.gmail.com', 587)
# Encrypts the email
context = ssl.create_default_context()
server.starttls(context=context)
# We log in into our Google account
server.login(sender_email, password)
# Sending email from sender, to receiver with the email body
server.sendmail(sender_email, receiver_email, msg.as_string())
print('Email sent!')
except Exception as e:
print(f'Oh no! Something bad happened!n{e}')
break
finally:
print('Closing the server...')
server.quit()
receiver_emails = []
receiver_names = [] |
from direct.actor.Actor import Actor
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from direct.interval.IntervalGlobal import *
from panda3d.core import *
from toontown.toonbase import ToontownGlobals
class DistributedTTCHQEventMgr(DistributedObject.DistributedObject):
HQCollisionName = 'HQ-Collision-{}'
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedTTCHQEventMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.task = None
self.oldHQ = None
self.newHQ = None
self.destroySeq = None
self.colNodePath = None
return
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.task = taskMgr.add(self._findHQ)
self.cr.hq = self
def _findHQ(self, task):
hq = render.find('**/*hqTT*')
if hq:
self.placeHQ(hq)
return task.done
else:
return task.cont
def placeHQ(self, hq):
self.oldHQ = hq
self.newHQ = Actor('phase_14/models/modules/hqTT', {'shake': 'phase_14/models/modules/hqTT-shake', 'destroy': 'phase_14/models/modules/hqTT-fall',
'destroyed': 'phase_14/models/modules/hqTT-destroyed'})
self.newHQ.reparentTo(render)
self.newHQ.setPos(24.6425, 24.8587, 4.00001)
self.newHQ.setH(135)
self.newHQ.stash()
cs = CollisionSphere(24.6425, 24.8587, 4.00001, 25)
cs.setTangible(0)
self.colNodePath = self.oldHQ.attachNewNode(CollisionNode('cnode'))
self.colNodePath.node().addSolid(cs)
self.colNodePath.node().setCollideMask(ToontownGlobals.WallBitmask)
self.colNodePath.node().setName(self.HQCollisionName.format(self.getDoId()))
self.accept('enter' + self.HQCollisionName.format(self.getDoId()), self.d_acceptStink)
self.accept('exit' + self.HQCollisionName.format(self.getDoId()), self.d_rejectStink)
def disable(self):
self.ignoreAll()
if self.destroySeq is not None:
self.destroySeq.finish()
self.destroySeq = None
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
if self.task is not None:
taskMgr.remove(self.task)
self.task = None
if self.newHQ is not None:
self.newHQ.cleanup()
self.newHQ.removeNode()
self.newHQ = None
self.oldHQ = None
DistributedObject.DistributedObject.delete(self)
return
def die(self):
self.destroySeq = Sequence(Func(self.newHQ.unstash), Func(self.oldHQ.stash), Func(self.newHQ.loop, 'shake'), Wait(2), ActorInterval(self.newHQ, 'destroy', startFrame=0, endFrame=24), Func(self.newHQ.loop, 'destroyed'), Wait(5), Func(self.newHQ.setPlayRate, -1, 'destroy'), ActorInterval(self.newHQ, 'destroy', startFrame=24, endFrame=0), Func(self.newHQ.setPlayRate, 1, 'destroy'), Func(self.oldHQ.unstash), Func(self.newHQ.stash))
self.destroySeq.start()
def d_acceptStink(self, _=None):
self.sendUpdate('acceptStink', [])
def d_rejectStink(self, _=None):
self.sendUpdate('rejectStink', []) |
import pytest
import brownie
from brownie import ZERO_ADDRESS
yDaiV2Address = "0x16de59092dAE5CcF4A1E6439D611fd0653f0Bd01"
ethZapAddress = "0x5A0bade607eaca65A0FE6d1437E0e3EC2144d540"
@pytest.fixture
def earnGenerator(AddressesGeneratorEarn, oracle, helper, management):
registryAddress = "0x62a4e0E7574E5407656A65CC8DbDf70f3C6EB04B"
generator = AddressesGeneratorEarn.deploy(
registryAddress,
{"from": management},
)
return generator
# def test_generator_info(earnGenerator):
# adapterInfo = earnGenerator.generatorInfo()
# assert adapterInfo[0] == earnGenerator
# assert adapterInfo[1] == "EARN"
# assert adapterInfo[2] == "SAFE"
# def test_registry_address(earnGenerator):
# assert not earnGenerator.registry() == ZERO_ADDRESS
# def test_assets_length(earnGenerator):
# assetsLength = earnGenerator.assetsLength()
# assert assetsLength > 0
def test_set_asset_deprecated(earnGenerator, management):
originalAssetsLength = earnGenerator.assetsLength()
originalAssetsAddressesLength = len(earnGenerator.assetsAddresses())
assert originalAssetsLength > 0
earnGenerator.setAssetDeprecated(yDaiV2Address, True, {"from": management})
newAssetsLength = earnGenerator.assetsLength()
newAssetsAddressesLength = len(earnGenerator.assetsAddresses())
newAssetsLength = earnGenerator.assetsLength()
assert earnGenerator.assetDeprecated(yDaiV2Address) == True
assert newAssetsAddressesLength == originalAssetsAddressesLength - 1
assert earnGenerator.numberOfDeprecatedAssets() > 0
assert newAssetsLength == originalAssetsLength - 1
def test_assets_addresses(earnGenerator):
assetsAddresses = earnGenerator.assetsAddresses()
assert len(assetsAddresses) > 0
assert not assetsAddresses[0] == ZERO_ADDRESS
def test_set_position_spender_addresses(earnGenerator, management, rando):
with brownie.reverts():
earnGenerator.setPositionSpenderAddresses([ethZapAddress], {"from": rando})
earnGenerator.setPositionSpenderAddresses([ethZapAddress], {"from": management})
assert earnGenerator.positionSpenderAddresses(0) == ethZapAddress
spenderAddresses = earnGenerator.getPositionSpenderAddresses()
assert len(spenderAddresses) > 0
|
import json
def convert_json_to_dict(text: str):
"""Function that converts JSON to dictionary
Args:
text (str): text to be processed
Returns:
str: processed text
"""
return json.loads(text)
class JSONString:
"""Class that handles conversion of JSON strings
Args:
text (str): text to be processed
"""
def __init__(self, text: str) -> None:
self.text = text
def load_text_in_dict(self) -> None:
self.text_in_dict = convert_json_to_dict(self.text)
|
import pandas as pd
import csv
import glob
import os
from pathlib import Path
import xlrd
from money import Money
def get_path():
return str(Path().absolute()) + "/"
def convert_to_csv(output_file_name,my_file, my_sheet_name="Sheet1",my_index=False):
wb = xlrd.open_workbook(str(my_file), logfile=open(os.devnull, 'w')) # I did this to suppress the Warning msg.
return pd.read_excel(wb, sheet_name=my_sheet_name, engine='xlrd').to_csv(output_file_name, index=my_index)
def open_csv(csv_file):
data = []
with open(csv_file, newline='') as f:
reader = csv.reader(f)
for k in reader:
data.append(k)#list(filter(None,k)))
return data
def filter_transactions(raw_datas):
response = []
seen = []
for data in raw_datas:
# data[1][22]--> header
# End of Transaction Cell --> "Saldo atual"
last = 0
for row in data[1:]:
for j, content in enumerate(row[:]):
# Find End of Transactions:
if content[2] == "Saldo atual": # Next thing after transactions ends.
# ToDo: Create the test for this: total # rows - position of 'Saldo atual' = 33.
# test.assert(len(row)-j,33)
# print(j, len(row), " = ", len(row)- j)
# input()
last = j
transactions = data[1][22:last]
if transactions not in seen:
response.append([data[0],transactions])
seen.append(transactions)
return response
def list_excel(path = get_path()):
return glob.glob(path + "/*.xls*")
def list_csv(path = get_path()):
return glob.glob(path + "/*.csv")
def exists(filepath):
if os.path.exists(filepath):
print(filepath, 'exists.')
return True
def convert_all(input_folder, output_folder):
excel_files = list_excel(input_folder)
for excel_file in excel_files:
filepath = "{0}{1}.csv".format(output_folder,excel_file[len(input_folder):-4]) # without .xls
if not exists(filepath):
convert_to_csv(filepath, excel_file)
def consolidate_reports(consolidated_data, folder_name):
data = []
# seen = []
datas = []
csv_files = list_csv(get_path() + folder_name)
for csv_file in csv_files:
data = open_csv(csv_file)
for row in data:
# if row not in seen:
datas.append(row)
# seen.append(row)
return datas
# return a list of lists :/
# raw_data = [[report], [report], [report]]
def consolidate_data(folder_name):
data = []
raw_datas = []
csv_files = list_csv(get_path() + folder_name)
for csv_file in csv_files:
data = open_csv(csv_file)
raw_datas.append([csv_file[len(get_path()):], data])
# print(raw_datas[0])
# input()
return raw_datas
def save_single_report(output_file, reports):
# reports = [[filname, report1], [filename, report2], [...]]
# reportN = [[headers],[transaction1], [transaction2], [transactionN...]]
# print(reports)
# input()
row_headers = reports[2][1][0]
headers =[]
headers.append("Filename")
for header in row_headers:
if header:
headers.append(header)# =[header for header in row_headers if header]
# print(headers)
# input()
positions = []
for i, header in enumerate(row_headers):
positions.append(i) if header else None
# print(headers)
# print(positions)
# input()
f = csv.writer(open(output_file, "w", encoding="mac-roman"))
f.writerow(headers)
# print(reports[0][0])
# input()
response = []
# response.append(headers)
data = {}
for transactions in reports:
# check if transactions[1][0] == headers
for position in positions:
if transactions[1][0][position] not in headers:
print(transactions[0][position])
input("Dados Diferente?")
for i, transaction in enumerate(transactions[1][1:]): # header not needed.
datas = []
datas.append(transactions[0][:])
for pos, header in enumerate(headers[1:]):
data[header] = transaction[positions[pos]]
try:
# todo
data[header] = Money(data[header], "BRL")
except:
None
datas.append(data[header]) #.replace(".",",")
# if data[header] == '30000':
# print(datas)
# input()
if not all(value is '' for value in datas[1:]):
response.append(datas)
f.writerow(datas)
df = pd.DataFrame(response, columns=headers)
return df
def replace_decimal_notation(raw_datas):
# raw_datas = [[report1], [report2], [report...]]
# # report1 = [transacton1, transaction2]
for report in raw_datas:
for transaction in report:
for field in transaction:
field = field.replace(".", ",")
# datas = [field.replace(".",",") for field in transaction for transaction in report for report in datas]
return report
def outputs_to(folder_name) -> str:
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def main(output_report='statements.csv', csv_folder_name='reports_csv', excel_folder_name='reports_xls'):
print("##### Converting all Excels to CSV")
convert_all(excel_folder_name, outputs_to(csv_folder_name))
print("##### Consolidating datas") # list with every report.
# raw_datas will be [[report1], [report2], [report...] ]
raw_datas = consolidate_data(csv_folder_name)
# print("##### Consolidating reports")
# datas = consolidate_reports(raw_datas, csv_folder_name)
print("##### Filtering Transactions")
# datas_mov will be [[transactions_from_report1], [transactions_from_report2], [...]]
datas_mov = filter_transactions(raw_datas)
# print("##### Replacing decimal notation '.' -> ','")
# datas_mov = replace_decimal_notation(datas_mov)
print("##### Saving in a single Report")
# response = []
response = save_single_report(output_report, datas_mov)
return [raw_datas, datas_mov, response]
if __name__ == '__main__':
raw_data, datas_mov, df = main(output_report='statements.csv', csv_folder_name='reports_csv', excel_folder_name='reports_xls') |
handshake = {
'key': 'API_KEY_HERE'
},
fishbowl = {
'host': 'HOST_ADDRESS_OF_FISHBOWL_SERVER',
'user': 'USER_NAME_OF_FISHBOWL_ADMIN',
'password': 'PASSWORD_FOR_FISHBOWL_ADMIN'
},
# handbowl_settings = {
# 'sync_products': True,
# 'sync_inventory': False,
# 'sync_customers': True,
# 'sync_orders': True
# }
|
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
from typing import Tuple, Callable
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common.graph.base_graph import Graph
from model_compression_toolkit.core.common.graph.graph_matchers import EdgeMatcher, NodeOperationMatcher
from model_compression_toolkit.core.common.graph.base_node import BaseNode
class ResidualCollapsing(common.BaseSubstitution):
"""
Collapse Add residual into previous Conv2D (No non-linear activation between them)
"""
def __init__(self,
first_node: NodeOperationMatcher,
second_node: NodeOperationMatcher,
residual_collapsing_fn: Callable,
kernel_str: str,
layer_name_str: str = None):
"""
Collapsing Add residual node into previous Conv2D node
Args:
first_node: Node matcher for convolution type nodes.
second_node: Node matcher for add type nodes.
residual_collapsing_fn: Function for updating the convolution kernel
kernel_str: The framework specific attribute name of the convolution layer's weight/kernel.
layer_name_str: The framework specific attribute name of layer's name.
"""
super().__init__(matcher_instance=EdgeMatcher(first_node, second_node))
self.residual_collapsing_fn = residual_collapsing_fn
self.kernel_str = kernel_str
self.layer_name_str = layer_name_str
def substitute(self,
graph: Graph,
edge_nodes: Tuple[BaseNode, BaseNode]) -> Graph:
"""
Collapse residual Add layer into previous Conv2D layers.
Args:
graph: Graph we apply the substitution on.
edge_nodes: Tuple of two linear nodes
Returns:
Graph after applying the substitution.
"""
first_node = edge_nodes[0]
second_node = edge_nodes[1]
# If the linear operator is part of a reused group (it is the "base" node, or a reused node),
# we should skip the substitution.
if first_node.reuse or first_node.reuse_group is not None:
return graph
if second_node.reuse or second_node.reuse_group is not None:
return graph
# Check if convolution and residual satisfy the collapsing conditions, otherwise skip substitution
if len(graph.get_next_nodes(first_node)) > 1 or len(graph.get_prev_nodes(second_node)) != 2:
return graph
# Check if Add is residual connection, otherwise skip substitution
conv_prev_nodes = graph.get_prev_nodes(first_node)
add_prev_nodes = graph.get_prev_nodes(second_node)
add_prev_nodes.remove(first_node)
if conv_prev_nodes[0] != add_prev_nodes[0]:
return graph
# New collapsed weights
kernel_collapsed = self.residual_collapsing_fn(first_node, self.kernel_str)
num_nodes_before_substition = len(graph.nodes)
num_edges_before_substition = len(graph.edges)
# New collapsed node
conv_collapsed = copy.deepcopy(first_node)
conv_collapsed_name = first_node.name + '_' + second_node.name + "_collapsed"
conv_collapsed.name = conv_collapsed_name
conv_collapsed.set_weights_by_keys(self.kernel_str, kernel_collapsed)
if self.layer_name_str is not None:
conv_collapsed.framework_attr[self.layer_name_str] = conv_collapsed_name
# Update graph
graph.add_node(conv_collapsed)
graph.reconnect_out_edges(current_node=second_node, new_node=conv_collapsed)
graph.reconnect_in_edges(current_node=first_node, new_node=conv_collapsed)
graph.replace_output_node(current_node=second_node, new_node=conv_collapsed)
graph.remove_edge(first_node, second_node)
graph.remove_edge(add_prev_nodes[0], second_node)
graph.remove_node(first_node)
graph.remove_node(second_node)
# Sanity check
assert num_nodes_before_substition - len(graph.nodes) == 1
assert num_edges_before_substition - len(graph.edges) == 2
return graph
|
from os_ios_prepare_ipa_file import ipa_preparer as ip
ip.prepare_ipa_file(ipa_file_path="path/to/ipa_file.ipa",
ipa_file_path_in_server= "www.my_website.com/storage/ipa_file.ipa",
html_file_path_in_server="www.my_website.com/pages/download_latest_version/download.html",
bundle_identifier="com.app.bundleidentifier",
app_name="MyAppName") |
from __future__ import absolute_import, unicode_literals
import os
import tornado.web
from mopidy import config, ext
__version__ = '0.3.1'
class PartyRequestHandler(tornado.web.RequestHandler):
def initialize(self, core, data, config):
self.core = core
self.data = data
self.requiredVotes = config["party"]["votes_to_skip"]
def get(self):
currentTrack = self.core.playback.get_current_track().get()
if (currentTrack == None): return
currentTrackURI = currentTrack.uri
# If the current track is different to the one stored, clear votes
if (currentTrackURI != self.data["track"]):
self.data["track"] = currentTrackURI
self.data["votes"] = []
if (self.request.remote_ip in self.data["votes"]): # User has already voted
self.write("You have already voted to skip this song =)")
else: # Valid vote
self.data["votes"].append(self.request.remote_ip)
if (len(self.data["votes"]) == self.requiredVotes):
self.core.playback.next()
self.write("Skipping...")
else:
self.write("You have voted to skip this song. ("+str(self.requiredVotes-len(self.data["votes"]))+" more votes needed)")
def party_factory(config, core):
data = {'track':"", 'votes':[]}
return [
('/vote', PartyRequestHandler, {'core': core, 'data':data, 'config':config})
]
class Extension(ext.Extension):
dist_name = 'Mopidy-Party'
ext_name = 'party'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['votes_to_skip'] = config.Integer(minimum=0)
return schema
def setup(self, registry):
registry.add('http:static', {
'name': self.ext_name,
'path': os.path.join(os.path.dirname(__file__), 'static'),
})
registry.add('http:app', {
'name': self.ext_name,
'factory': party_factory,
})
|
from experiment_helper_logo import *
from shape import *
import dsl
from DSL import logo
def print_clown():
eye1 = Polygon(5,3)
eye1.move(-2,-2)
eye2 = Polygon(5,3)
eye2.move(2,-2)
s4 = Circle(4)
smile1 = Rectangle(10, 3)
smile1.move(-1, 2)
smile2 = Rectangle(10, 3)
smile2.move(1, 2)
draw_all_shape_show()
def test_Rectangle_superposed():
print("Test Rectangle masked")
clear_list_shape()
rec3 = Rectangle(9, 3)
rec1 = Rectangle(8, 8)
rec2 = Rectangle(5, 5)
test = Polygon(10, 8)
print("This should print True : " + str(rec1.is_superposed_on(rec2)))
print("This should print False : " + str(rec2.is_superposed_on(rec1)))
print("This should print False : " + str(rec3.is_superposed_on(rec1)))
print("This should print False : " + str(rec3.is_superposed_on(rec2)))
draw_all_shape_show()
def test_superposed_img_detection():
print("Shape detection test")
clear_list_shape()
rec1 = Rectangle(8, 8)
rec2 = Rectangle(5, 5)
test = Polygon(10, 4)
test.move(3, -2)
ls = get_list_shape()
print("This should be 0: " + str(evaluate_superposition_img(ls)))
draw_all_shape_show()
def test_color_next():
global start_color
start_color = [1, 0, 0]
print("Start color test")
print(start_color)
next_color()
print(start_color)
for i in range(254):
next_color()
print(start_color)
c = (start_color)
print(tuple(c))
start_color = [1, 0, 0]
##############################################################
max_program_depth = 15
logoDSL = dsl.DSL(logo.semantics, logo.primitive_types)
type_request = logo.FIXED
logo_cfg = logoDSL.DSL_to_CFG(type_request, max_program_depth=max_program_depth)
logo_pcfg = logo_cfg.CFG_to_Uniform_PCFG()
generator = logo_pcfg.sampling(seed=5)
selection_loop = False
for i in range(5):
clear_list_shape()
a = generator.__next__()
a.eval_naive(logoDSL, None)
ls = get_list_shape()
if selection_loop:
while evaluate_superposition_img(ls) != 0 or len(ls) <= 2:
clear_list_shape()
a = generator.__next__()
a.eval_naive(logoDSL, None)
ls = get_list_shape()
print(a)
print("\n")
draw_all_shape_show()
|
# https://leetcode.com/problems/sum-of-two-integers/
class Solution:
def getSum(self, a: int, b: int) -> int:
x = 0xffffffff
# python default int size is not 32bit, it is very large number, so to prevent overflow and stop running into infinite loop, we use 32bit mask to limit int size to 32bit
while(b & x > 0):
c = (a & b) << 1
a ^= b
b = c
return (a & x) if b > 0 else a
|
{
"targets": [
{
"target_name": "EspressoLogicMinimizer",
"cflags": ["-std=c99", "-Wno-misleading-indentation", "-Wno-unused-result", "-Wno-format-overflow", "-Wno-implicit-fallthrough"],
"sources": [
"bridge/addon.cc",
"bridge/bridge.c",
"espresso-src/black_white.c",
"espresso-src/canonical.c",
"espresso-src/cofactor.c",
"espresso-src/cols.c",
"espresso-src/compl.c",
"espresso-src/contain.c",
"espresso-src/cpu_time.c",
"espresso-src/cubestr.c",
"espresso-src/cvrin.c",
"espresso-src/cvrm.c",
"espresso-src/cvrmisc.c",
"espresso-src/cvrout.c",
"espresso-src/dominate.c",
"espresso-src/equiv.c",
"espresso-src/espresso.c",
"espresso-src/essen.c",
"espresso-src/essentiality.c",
"espresso-src/exact.c",
"espresso-src/expand.c",
"espresso-src/gasp.c",
"espresso-src/gimpel.c",
"espresso-src/globals.c",
"espresso-src/hack.c",
"espresso-src/indep.c",
"espresso-src/irred.c",
"espresso-src/map.c",
"espresso-src/matrix.c",
"espresso-src/mincov.c",
"espresso-src/opo.c",
"espresso-src/pair.c",
"espresso-src/part.c",
"espresso-src/primes.c",
"espresso-src/prtime.c",
"espresso-src/reduce.c",
"espresso-src/rows.c",
"espresso-src/set.c",
"espresso-src/setc.c",
"espresso-src/sharp.c",
"espresso-src/sigma.c",
"espresso-src/signature.c",
"espresso-src/signature_exact.c",
"espresso-src/sminterf.c",
"espresso-src/solution.c",
"espresso-src/sparse.c",
"espresso-src/unate.c",
"espresso-src/util_signature.c",
"espresso-src/verify.c"
],
"include_dirs" : [
"<!(node -e \"require('nan')\")",
"espresso-src"
]
}
]
}
|
# This code is written at BigVision LLC. It is based on the OpenCV project. It is subject to the license terms in the LICENSE file found in this distribution and at http://opencv.org/license.html
# Usage example: python3 object_detection_yolo.py --image=gBanana1.jepg
import cv2 as cv
import argparse
import sys
import numpy as np
import os.path
cam = cv.VideoCapture(0)
cv.namedWindow("test")
img_counter = 0
frame = []
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv.imshow("test", frame)
k = cv.waitKey(1)
if k%256 == 27:
print("Escape hit, closing...")
break
elif k%256 == 32:
frame = frame
img_counter += 1
break
cam.release()
cv.destroyAllWindows()
# Initialize the parameters
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
classesFile = "coco.names"
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "yolov3.cfg"
modelWeights = "yolov3.weights"
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
#Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 1, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold and classId == 46:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
winName = 'Banana detection in OpenCV with YOLOV3'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
# Open the image file
while cv.waitKey(1) < 0:
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
postprocess(frame, outs)
print("Done processing !!!")
cv.imshow(winName, frame)
cv.waitKey(3000)
sys.exit(1)
|
from lib import action
class ColorTempMiredAction(action.BaseAction):
def run(self, light_id, temperature, transition_time):
light = self.hue.lights.get(light_id)
light.ct(temperature, transition_time)
|
from __future__ import absolute_import
import numpy as np
import os
from . import shocktube
"Runs the shocktube test problem with the Python HLLC solver."
claw = shocktube.setup(kernel_language='Python')
claw.run()
test_solution = claw.solution.state.get_q_global()
if test_solution is not None:
thisdir = os.path.dirname(__file__)
expected_density = np.loadtxt(os.path.join(thisdir,'shocktube_regression_density.txt'))
test_density = test_solution[0,:]
test_err = np.linalg.norm(expected_density-test_density)
assert test_err < 1.e-4
|
from pyvdp.visadirect import VisaDirectDispatcher
def send(data):
"""Submits a WatchlistInquiry request.
:param WatchListInquiryModel data: **Required**.
Instance of :func:`~pyvdp.visadirect.watchlist.WatchListInquiryModel`.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect.watchlist import watchlistinquiry, WatchListInquiryModel
wli_address_kwargs = {
"cardIssuerCountryCode": "USA",
"city": "San Francisco"
}
data_kwargs = {
"acquirerCountryCode": "840",
"acquiringBin": "408999",
"address": WatchListInquiryModel.WatchListInquiryAddress(**wli_address_kwargs),
"name": "Mohammed Qasim",
"referenceNumber": "330000550000"
}
data = WatchListInquiryModel(**data_kwargs)
result = watchlistinquiry.send(data)
print(result)
"""
c = VisaDirectDispatcher(resource='visadirect',
api='watchlistscreening',
method='watchlistinquiry',
http_verb='POST',
data=data)
return c.send()
|
"""CLI for the Data Delivery System."""
####################################################################################################
# IMPORTS ################################################################################ IMPORTS #
####################################################################################################
# Standard library
import concurrent.futures
import itertools
import logging
import os
from re import T
import sys
# Installed
import rich_click as click
import click_pathlib
import rich
import rich.logging
import rich.markup
import rich.progress
import rich.prompt
import questionary
# Own modules
import dds_cli
import dds_cli.account_manager
import dds_cli.unit_manager
import dds_cli.motd_manager
import dds_cli.data_getter
import dds_cli.data_lister
import dds_cli.data_putter
import dds_cli.data_remover
import dds_cli.directory
import dds_cli.project_creator
import dds_cli.auth
import dds_cli.project_status
import dds_cli.user
import dds_cli.utils
from dds_cli.options import (
email_arg,
email_option,
folder_option,
num_threads_option,
project_option,
sort_projects_option,
source_option,
source_path_file_option,
token_path_option,
break_on_fail_flag,
json_flag,
nomail_flag,
silent_flag,
size_flag,
tree_flag,
usage_flag,
users_flag,
)
####################################################################################################
# START LOGGING CONFIG ###################################################### START LOGGING CONFIG #
####################################################################################################
LOG = logging.getLogger()
# Configuration for rich-click output
click.rich_click.MAX_WIDTH = 100
## # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# MMMM MMMM AAAA II NNNN NN #
# MM MM MM MM AA AA II NN NN NN #
# MM MMM MM AA AA II NN NN NN #
# MM M MM AAAAAAAAAA II NN NN NN #
# MM MM AA AA II NN NNNN #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ##
dds_url = dds_cli.DDSEndpoint.BASE_ENDPOINT
# Print header to STDERR
dds_cli.utils.stderr_console.print(
"[green] ︵",
"\n[green] ︵ ( ) ︵",
"\n[green]( ) ) ( ( )[/] [bold]SciLifeLab Data Delivery System",
"\n[green] ︶ ( ) ) ([/] [blue][link={0}]{0}/[/link]".format(
dds_url[: dds_url.index("/", 8)]
),
f"\n[green] ︶ ( )[/] [dim]Version {dds_cli.__version__}",
"\n[green] ︶",
highlight=False,
)
# -- dds -- #
@click.group()
@click.option(
"-v", "--verbose", is_flag=True, default=False, help="Print verbose output to the console."
)
@click.option("-l", "--log-file", help="Save a log to a file.", metavar="<filename>")
@click.option(
"--no-prompt", is_flag=True, default=False, help="Run without any interactive features."
)
@token_path_option()
@click.version_option(
version=dds_cli.__version__,
prog_name=dds_cli.__title__,
help="Display the version of this software.",
)
@click.help_option(
help="List the options of any DDS subcommand and its default settings.",
)
@click.pass_context
def dds_main(click_ctx, verbose, log_file, no_prompt, token_path):
"""SciLifeLab Data Delivery System (DDS) command line interface.
Access token is saved in a .dds_cli_token file in the home directory.
The token is valid for 7 days. Make sure your token is valid long enough for the
delivery to finish. To avoid that a delivery fails because of an expired token, we recommend
reauthenticating yourself before each delivery ('dds data put' / 'get').
"""
# Get token metadata
username = dds_cli.user.User.get_user_name_if_logged_in(token_path=token_path)
if username:
dds_cli.utils.stderr_console.print(
f"[green]Current user:[/] [red]{username}", highlight=False
)
if "--help" not in sys.argv:
# Set the base logger to output DEBUG
LOG.setLevel(logging.DEBUG)
# Set up logs to the console
LOG.addHandler(
rich.logging.RichHandler(
level=logging.DEBUG if verbose else logging.INFO,
console=dds_cli.utils.stderr_console,
show_time=False,
markup=True,
show_path=verbose,
)
)
# Set up logs to a file if we asked for one
if log_file:
log_fh = logging.FileHandler(log_file, encoding="utf-8")
log_fh.setLevel(logging.DEBUG)
log_fh.setFormatter(
logging.Formatter("[%(asctime)s] %(name)-20s [%(levelname)-7s] %(message)s")
)
LOG.addHandler(log_fh)
# Create context object
click_ctx.obj = {"NO_PROMPT": no_prompt, "TOKEN_PATH": token_path}
# ************************************************************************************************ #
# MAIN DDS COMMANDS ************************************************************ MAIN DDS COMMANDS #
# ************************************************************************************************ #
# -- dds ls -- #
@dds_main.command(name="ls")
# Options
@project_option(required=False)
@sort_projects_option()
@folder_option(help_message="List contents of this project folder.")
@click.option(
"--binary",
"-b",
required=False,
is_flag=True,
default=False,
help=(
"Use binary unit prefixes (e.g. KiB instead of KB, "
"MiB instead of MB) for size and usage columns."
),
)
# Flags
@json_flag(help_message="Output in JSON format.")
@size_flag(help_message="Show size of project contents.")
@tree_flag(help_message="Display the entire project(s) directory tree.")
@usage_flag(help_message="Show the usage for available projects, in GBHours and cost.")
@users_flag(help_message="Display users associated with a project(Requires a project id).")
@click.option("--projects", "-lp", is_flag=True, help="List all project connected to your account.")
@click.pass_obj
def list_projects_and_contents(
click_ctx, project, folder, sort, json, size, tree, usage, binary, users, projects
):
"""List the projects you have access to or the project contents.
To list all projects, run `dds ls` without any arguments, or use the `--projects` flag.
Specify a Project ID to list the files within a project.
You can also follow this with a subfolder path to show files within that folder.
"""
try:
# List all projects if project is None and all files if project spec
if project is None:
with dds_cli.data_lister.DataLister(
project=project,
show_usage=usage,
no_prompt=click_ctx.get("NO_PROMPT", False),
json=json,
token_path=click_ctx.get("TOKEN_PATH"),
binary=binary,
) as lister:
projects = lister.list_projects(sort_by=sort)
if json:
dds_cli.utils.console.print_json(data=projects)
else:
# If an interactive terminal, ask user if they want to view files for a project
if sys.stdout.isatty() and not lister.no_prompt:
project_ids = [p["Project ID"] for p in projects]
LOG.info(
"Would you like to view files in a specific project? "
"Leave blank to exit."
)
# Keep asking until we get a valid response
while project not in project_ids:
try:
project = questionary.autocomplete(
"Project ID:",
choices=project_ids,
validate=lambda x: x in project_ids or x == "",
style=dds_cli.dds_questionary_styles,
).unsafe_ask()
assert project and project != ""
# If didn't enter anything, convert to None and exit
except (KeyboardInterrupt, AssertionError):
break
# List all files in a project if we know a project ID
if project:
with dds_cli.data_lister.DataLister(
project=project,
tree=tree,
no_prompt=click_ctx.get("NO_PROMPT", False),
json=json,
token_path=click_ctx.get("TOKEN_PATH"),
) as lister:
if json:
json_output = {"project_name": project}
if users:
user_list = lister.list_users()
json_output["users"] = user_list
if tree:
folders = lister.list_recursive(show_size=size)
json_output["project_files_and_directories"] = folders
else:
LOG.warning(
"JSON output for file listing only possible for the complete file tree."
" Please use the '--tree' option to view complete contens in JSON or "
"remove the '--json' option to list files interactively"
)
dds_cli.utils.console.print_json(data=json_output)
else:
if users:
user_list = lister.list_users()
if tree:
folders = lister.list_recursive(show_size=size)
else:
folders = lister.list_files(folder=folder, show_size=size)
# If an interactive terminal, ask user if they want to view files for a proj
if sys.stdout.isatty() and (not lister.no_prompt) and len(folders) > 0:
LOG.info(
"Would you like to view files within a directory? "
"Leave blank to exit."
)
last_folder = None
while folder is None or folder != last_folder:
last_folder = folder
try:
folder = questionary.autocomplete(
"Folder:",
choices=folders,
validate=lambda x: x in folders or x == "",
style=dds_cli.dds_questionary_styles,
).unsafe_ask()
assert folder != ""
assert folder is not None
# If didn't enter anything, convert to None and exit
except (KeyboardInterrupt, AssertionError):
break
# Prepend existing file path
if last_folder is not None and folder is not None:
folder = os.path.join(last_folder, folder)
# List files
folders = lister.list_files(folder=folder, show_size=size)
if len(folders) == 0:
break
except (dds_cli.exceptions.NoDataError) as err:
LOG.warning(err)
sys.exit(0)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## AUTH #################################################################################### AUTH ##
####################################################################################################
####################################################################################################
@dds_main.group(name="auth", no_args_is_help=True)
@click.pass_obj
def auth_group_command(_):
"""Group command for creating and managing authenticated sessions.
Authenticate yourself once and run multiple commands within a certain amount of time
(currently 7 days) without specifying your user credentials.
If you do not authenticate yourself and start a new session, you will need to provide your
DDS username when running the other commands.
All subcommands are usable by all user roles.
"""
# ************************************************************************************************ #
# AUTH COMMANDS ******************************************************************** AUTH COMMANDS #
# ************************************************************************************************ #
# -- dds auth login -- #
@auth_group_command.command(name="login")
@click.pass_obj
def login(click_ctx):
"""Start or renew an authenticated session.
Creates or renews the authentication token stored in the '.dds_cli_token' file.
Run this command before running the cli in a non-interactive fashion as this enables the longest
possible session time before a password needs to be entered again.
"""
no_prompt = click_ctx.get("NO_PROMPT", False)
if no_prompt:
LOG.warning("The --no-prompt flag is ignored for `dds auth login`")
try:
with dds_cli.auth.Auth(token_path=click_ctx.get("TOKEN_PATH")):
# Authentication token renewed in the init method.
LOG.info("[green] :white_check_mark: Authentication token created![/green]")
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds auth logout -- #
@auth_group_command.command(name="logout")
@click.pass_obj
def logout(click_ctx):
"""End authenticated session.
Removes the saved authentication token by deleting the '.dds_cli_token' file.
"""
try:
with dds_cli.auth.Auth(
authenticate=False, token_path=click_ctx.get("TOKEN_PATH")
) as authenticator:
authenticator.logout()
except (dds_cli.exceptions.DDSCLIException, dds_cli.exceptions.ApiRequestError) as err:
LOG.error(err)
sys.exit(1)
# -- dds auth info -- #
@auth_group_command.command(name="info")
@click.pass_obj
def info(click_ctx):
"""Display information about ongoing authenticated session.
\b
Information displayed:
- If the token is about to expire
- Time of token expiration
"""
try:
with dds_cli.auth.Auth(
authenticate=False, token_path=click_ctx.get("TOKEN_PATH")
) as authenticator:
authenticator.check()
except (dds_cli.exceptions.DDSCLIException, dds_cli.exceptions.ApiRequestError) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## USER #################################################################################### USER ##
####################################################################################################
####################################################################################################
@dds_main.group(name="user", no_args_is_help=True)
@click.pass_obj
def user_group_command(_):
"""Group command for managing user accounts, including your own."""
# ************************************************************************************************ #
# USER COMMANDS ******************************************************************** USER COMMANDS #
# ************************************************************************************************ #
# -- dds user ls -- #
# TODO: Move this to dds unit?
@user_group_command.command(name="ls")
@click.option(
"--unit",
"-u",
required=False,
type=str,
help="Super Admins only: The unit which you wish to list the users in.",
)
@click.pass_obj
def list_users(click_ctx, unit):
"""List Unit Admins and Personnel connected to a specific unit.
\b
Super Admins:
- Required to specify a public unit ID.
- Can list users within all units.
\b
Unit Admins / Personnel:
- Any unit specified with `--unit` will be ignored.
- You can only list users connected to your specific unit.
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as lister:
lister.list_unit_users(unit=unit)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds user add -- #
@user_group_command.command(name="add", no_args_is_help=True)
# Positional args
@email_arg(required=True)
# Options
@project_option(
required=False, help_message="Existing Project you want the user to be associated to."
)
@click.option(
"--role",
"-r",
"role",
required=True,
type=click.Choice(
choices=["Super Admin", "Unit Admin", "Unit Personnel", "Project Owner", "Researcher"],
case_sensitive=False,
),
help=(
"Type of account. To include a space in the chosen role, use quotes "
'(e.g. "Unit Personnel") or escape the space (e.g. Unit\ Personnel)'
),
)
@click.option(
"--unit",
required=False,
help="Super Admins only: To specify which unit the user should belong to.",
)
@nomail_flag(help_message="Do not send e-mail notifications regarding project updates.")
@click.pass_obj
def add_user(click_ctx, email, role, project, unit, no_mail):
"""Invite a new user to the DDS or add an existing one to a hosted project.
Not available for Researchers, unless they are marked as Project Owner for a specific project.
\b
Invite new user:
- Email
- Role
\b
Add user to project:
- Email
- Project ID (`dds ls`)
- Role: Researcher / Project Owner only in this case.
Unit Admins / Personnel are automatically added to all projects within that specific unit.
If the user doesn't exist in the system yet, an invitation email will be sent automatically
to that person.
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as inviter:
inviter.add_user(email=email, role=role, project=project, no_mail=no_mail, unit=unit)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds user delete -- #
@user_group_command.command(name="delete", no_args_is_help=True)
# Positional args
@email_arg(required=False)
# Options
# Flags
@click.option(
"--self",
"self",
required=False,
is_flag=True,
default=False,
help="Request deletion of own account.",
)
@click.option(
"--is-invite",
required=False,
is_flag=True,
default=False,
help="Delete an ongoing and unanswered invite.",
)
@click.pass_obj
def delete_user(click_ctx, email, self, is_invite):
"""Delete user accounts from the Data Delivery System.
Use this command with caution. Deletion of accounts cannot be undone.
To request the removal of your own account, use the `--self` flag without any arguments.
An e-mail will be sent to you asking to confirm the deletion.
If you have sufficient admin privileges, you may also delete the accounts of some other users.
Specify the e-mail address as argument to the main command to initiate the removal process.
Deleting a user will not delete any data.
\b
Super Admins: All users.
Unit Admins: Unit Admins / Personnel. Not Researchers since they can be involved in projects
connected to other units.
"""
if click_ctx.get("NO_PROMPT", False):
proceed_deletion = True
else:
if is_invite and self:
LOG.error("You cannot specify both `--self` and `--is-invite. Choose one.")
sys.exit(0)
if not self and not email:
LOG.error(
"You must specify an email adress associated to the user you're requesting to delete."
)
sys.exit(0)
if is_invite:
proceed_deletion = rich.prompt.Confirm.ask(
f"Delete invitation of {email} to Data Delivery System?"
)
else:
if self:
proceed_deletion = rich.prompt.Confirm.ask(
"Are you sure? Deleted accounts can't be restored!"
)
else:
proceed_deletion = rich.prompt.Confirm.ask(
f"Delete Data Delivery System user account associated with {email}"
)
if proceed_deletion:
try:
with dds_cli.account_manager.AccountManager(
method="delete",
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as manager:
if self and not email:
manager.delete_own_account()
elif email and not self:
manager.delete_user(email=email, is_invite=is_invite)
else:
LOG.error(
"You must either specify the '--self' flag "
"or the e-mail address of the user to be deleted"
)
sys.exit(1)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds user info -- #
@user_group_command.command(name="info")
# Options
# Flags
@click.pass_obj
def get_info_user(click_ctx):
"""Display information connected to your own DDS account.
Usable by all user roles.
\b
The following information should be displayed:
- Username
- Role
- Name
- Primary email
- Associated emails (not useful yet)
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as get_info:
get_info.get_user_info()
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds user activate -- #
@user_group_command.command(name="activate", no_args_is_help=True)
# Positional args
@email_arg(required=True)
# Options
# Flags
@click.pass_obj
def activate_user(click_ctx, email):
"""Activate/Reactivate user accounts.
\b
Usable only by Super Admins and Unit Admins.
Super Admins: All users
Unit Admins: Unit Admins / Personnel
"""
if click_ctx.get("NO_PROMPT", False):
pass
else:
proceed_activation = rich.prompt.Confirm.ask(
f"Activate Data Delivery System user account associated with {email}?"
)
if proceed_activation:
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as manager:
manager.user_activation(email=email, action="reactivate")
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds user deactivate -- #
@user_group_command.command(name="deactivate", no_args_is_help=True)
# Positional args
@email_arg(required=True)
# Options
# Flags
@click.pass_obj
def deactivate_user(click_ctx, email):
"""Deactivate user accounts in the Data Delivery System.
\b
Usable only by Super Admins and Unit Admins.
Super Admins: All users
Unit Admins: Unit Admins / Personnel
"""
if click_ctx.get("NO_PROMPT", False):
pass
else:
proceed_deactivation = rich.prompt.Confirm.ask(
f"Deactivate Data Delivery System user account associated with {email}?"
)
if proceed_deactivation:
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as manager:
manager.user_activation(email=email, action="deactivate")
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## PROJECT ############################################################################## PROJECT ##
####################################################################################################
####################################################################################################
@dds_main.group(name="project", no_args_is_help=True)
@click.pass_obj
def project_group_command(_):
"""Group command for creating and managing projects within the DDS."""
# ************************************************************************************************ #
# PROJECT COMMANDS ************************************************************** PROJECT COMMANDS #
# ************************************************************************************************ #
# -- dds project ls -- #
@project_group_command.command(name="ls")
# Options
@sort_projects_option()
# Flags
@usage_flag(help_message="Show the usage for available projects, in GBHours and cost.")
@json_flag(help_message="Output project list as json.") # users, json, tree
@click.pass_context
def list_projects(ctx, json, sort, usage):
"""List all projects you have access to in the DDS.
Calls the `dds ls` function.
"""
ctx.invoke(list_projects_and_contents, json=json, sort=sort, usage=usage)
# -- dds project create -- #
@project_group_command.command(no_args_is_help=True)
# Options
@click.option(
"--title",
"-t",
required=True,
type=str,
help="The title of the project.",
)
@click.option(
"--description",
"-d",
required=True,
type=str,
help="A description of the project.",
)
@click.option(
"--principal-investigator",
"-pi",
required=True,
type=str,
help="The name of the Principal Investigator.",
)
@click.option(
"--researcher",
required=False,
multiple=True,
help="Email of a user to be added to the project as Researcher."
+ dds_cli.utils.multiple_help_text(item="researcher"),
)
# Flags
@click.option(
"--owner",
"owner",
required=False,
multiple=True,
help="Email of user to be added to the project as Project Owner."
+ dds_cli.utils.multiple_help_text(item="project owner"),
)
@click.option(
"--non-sensitive",
required=False,
is_flag=True,
default=False,
help=(
"Indicate whether the project contains only non-sensitive data. "
"NB! Currently all data is encrypted independent of whether the "
"projects is marked as sensitive or not."
),
)
@click.pass_obj
def create(
click_ctx,
title,
description,
principal_investigator,
non_sensitive,
owner,
researcher,
):
"""Create a project within the DDS.
Only usable by Unit Admins / Personnel.
To give new or existing users access to the new project, specify their emails with
`--researcher` or `--owner`. Both of these will give the user the role Researcher, but `--owner`
will mark the user as a Project Owner for this specific project, which will give that person
some additional administrative rights within the project such as adding users etc.
"""
try:
with dds_cli.project_creator.ProjectCreator(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as creator:
emails_roles = []
if owner or researcher:
email_overlap = set(owner) & set(researcher)
if email_overlap:
LOG.info(
f"The email(s) {email_overlap} specified as both owner and researcher! "
"Please specify a unique role for each email."
)
sys.exit(1)
if owner:
emails_roles.extend([{"email": x, "role": "Project Owner"} for x in owner])
if researcher:
emails_roles.extend([{"email": x, "role": "Researcher"} for x in researcher])
created, project_id, user_addition_messages, err = creator.create_project(
title=title,
description=description,
principal_investigator=principal_investigator,
non_sensitive=non_sensitive,
users_to_add=emails_roles,
)
if created:
dds_cli.utils.console.print(
f"Project created with id: {project_id}",
)
if user_addition_messages:
for msg in user_addition_messages:
dds_cli.utils.console.print(msg)
dds_cli.utils.console.print(
"[red]Any users with errors were not added to the project[/red]"
)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# ************************************************************************************************ #
# PROJECT SUB GROUPS ********************************************************** PROJECT SUB GROUPS #
# ************************************************************************************************ #
# STATUS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STATUS #
@project_group_command.group(name="status", no_args_is_help=True)
@click.pass_obj
def project_status(_):
"""Manage project statuses.
Display or change the status of a project.
Displaying the project status is available for all user roles. Changing the project status
is limited to Unit Admins and Personnel.
"""
# -- dds project status display -- #
@project_status.command(name="display", no_args_is_help=True)
# Options
@project_option(required=True)
# Flags
@click.option(
"--show-history",
required=False,
is_flag=True,
help="Show history of project statuses in addition to current status.",
)
@click.pass_obj
def display_project_status(click_ctx, project, show_history):
"""Display the status of a specific project.
Use `--show-history` to see all previous statuses of the project.
Usable by all user roles.
"""
try:
with dds_cli.project_status.ProjectStatusManager(
project=project,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as updater:
updater.get_status(show_history)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project status release -- #
@project_status.command(name="release", no_args_is_help=True)
# Options
@project_option(required=True)
@click.option(
"--deadline",
required=False,
type=int,
help="Deadline in days when releasing a project.",
)
@nomail_flag(help_message="Do not send e-mail notifications regarding project updates.")
@click.pass_obj
def release_project(click_ctx, project, deadline, no_mail):
"""Change project status to 'Available'.
Make project data available for user download. Data cannot be deleted and additional data cannot
be uploaded. The count-down for when the data access expires starts.
The `--deadline` option can be used when changing the project status from 'In Progress' to
'Available' for the first time. In all other cases the deadline option will be ignored.
Only usable by: Unit Admins / Personnel.
"""
try:
with dds_cli.project_status.ProjectStatusManager(
project=project,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as updater:
updater.update_status(new_status="Available", deadline=deadline, no_mail=no_mail)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project status retract -- #
@project_status.command(name="retract", no_args_is_help=True)
# Options
@project_option(required=True)
@click.pass_obj
def retract_project(click_ctx, project):
"""Change the project status to 'In Progress'.
'In Progress' is the default status when a project is created. Retracting the project changes
the status from 'Available' to 'In Progress' again.
Make project data unavailable to Researchers, and allow Unit Admins / Personnel to upload
additional data to the project. Data cannot be deleted. Data cannot be overwritten.
"""
try:
with dds_cli.project_status.ProjectStatusManager(
project=project,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as updater:
updater.update_status(new_status="In Progress")
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project status archive -- #
@project_status.command(name="archive", no_args_is_help=True)
# Options
@project_option(required=True)
# Flags
@click.option(
"--abort",
required=False,
is_flag=True,
default=False,
help="Something has one wrong in the project.",
)
@click.pass_obj
def archive_project(click_ctx, project: str, abort: bool = False):
"""Change the project status to 'Archived'.
Certain meta data is kept and it will still be listed in your projects. All data within the
project is deleted. You cannot revert this change.
Use the `--abort` flag to indicate that something has gone wrong in the project.
"""
proceed_deletion = (
True
if click_ctx.get("NO_PROMPT", False)
else dds_cli.utils.get_deletion_confirmation(action="archive", project=project)
)
if proceed_deletion:
try:
with dds_cli.project_status.ProjectStatusManager(
project=project,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as updater:
updater.update_status(new_status="Archived", is_aborted=abort)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project status delete -- #
@project_status.command(name="delete", no_args_is_help=True)
# Options
@project_option(required=True)
@click.pass_obj
def delete_project(click_ctx, project: str):
"""Delete an unreleased project (change project status to 'Deleted').
Certain meta data is kept (nothing sensitive) and it will still be listed in your projects. All
data within the project is deleted. You cannot revert this change.
"""
proceed_deletion = (
True
if click_ctx.get("NO_PROMPT", False)
else dds_cli.utils.get_deletion_confirmation(action="delete", project=project)
)
if proceed_deletion:
try:
with dds_cli.project_status.ProjectStatusManager(
project=project,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as updater:
updater.update_status(new_status="Deleted")
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# ACCESS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ACCESS #
@project_group_command.group(name="access")
@click.pass_obj
def project_access(_):
"""Manage specific users access to a project."""
# -- dds project access grant -- #
@project_access.command(name="grant", no_args_is_help=True)
# Options
@project_option(required=True)
@email_option(help_message="Email of the user you would like to grant access to the project.")
# Flags
@click.option(
"--owner",
"owner",
required=False,
is_flag=True,
help=(
"Grant access as project owner. If not specified, "
"the user gets Researcher permissions within the project."
),
)
@nomail_flag(help_message="Do not send e-mail notifications regarding project updates.")
@click.pass_obj
def grant_project_access(click_ctx, project, email, owner, no_mail):
"""Grant a user access to a project.
Users can only grant project access to project they themselves have access to, and only to
users with the role 'Researcher'. To set the Researcher as a Project Owner in this
specific project, use the `--owner` flag.
Limited to Unit Admins, Unit Personnel and Researchers set as Project Owners for the project
in question.
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as granter:
role = "Researcher"
if owner:
role = "Project Owner"
granter.add_user(email=email, role=role, project=project, no_mail=no_mail)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project access revoke -- #
@project_access.command(name="revoke", no_args_is_help=True)
# Options
@project_option(required=True)
@email_option(help_message="Email of the user for whom project access is to be revoked.")
@click.pass_obj
def revoke_project_access(click_ctx, project, email):
"""Revoke a users access to a project.
Users can only revoke project access for users with the role 'Researcher'. To set the Researcher
as a Project Owner in this specific project, use the `--owner` flag.
Limited to Unit Admins, Unit Personnel and Researchers set as Project Owners for the project
in question.
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as revoker:
revoker.revoke_project_access(project, email)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds project access fix -- #
@project_access.command(name="fix", no_args_is_help=True)
# Positional arguments
@email_arg(required=True)
# Options
@project_option(required=False)
@click.pass_obj
def fix_project_access(click_ctx, email, project):
"""Re-grant project access to user that has lost access due to password reset.
When a password is reset, all project access is lost. To use the DDS in a meaningful way again,
the access to the active projects need to be updated.
Limited to Unit Admins, Unit Personnel and Researchers set as Project Owners for the project
in question.
"""
try:
with dds_cli.account_manager.AccountManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as fixer:
fixer.fix_project_access(email=email, project=project)
except (
dds_cli.exceptions.APIError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## DATA #################################################################################### DATA ##
####################################################################################################
####################################################################################################
@dds_main.group(name="data", no_args_is_help=True)
@click.pass_obj
def data_group_command(_):
"""Group command for uploading, downloading and managing project data."""
# ************************************************************************************************ #
# DATA COMMANDS ******************************************************************** DATA COMMANDS #
# ************************************************************************************************ #
# -- dds data put -- #
@data_group_command.command(name="put", no_args_is_help=True)
# Options
@click.option(
"--mount-dir",
"-md",
required=False,
type=click_pathlib.Path(exists=False, file_okay=False, dir_okay=True, resolve_path=True),
help=(
"New directory where the files will be mounted before upload "
"and any error log files will be saved for a specific upload."
),
)
@project_option(required=True, help_message="Project ID to which you're uploading data.")
@source_option(
help_message="Path to file or directory (local).", option_type=click.Path(exists=True)
)
@source_path_file_option()
@num_threads_option()
@click.option(
"--overwrite",
is_flag=True,
default=False,
show_default=True,
help="Overwrite files if already uploaded.",
)
# Flags
@break_on_fail_flag(help_message="Cancel upload of all files if one fails.")
@silent_flag(
help_message="Turn off progress bar for each individual file. Summary bars still visible."
)
@click.pass_obj
def put_data(
click_ctx,
mount_dir,
project,
source,
source_path_file,
break_on_fail,
overwrite,
num_threads,
silent,
):
"""Upload data to a project.
Limited to Unit Admins and Personnel.
To upload a file (with the same name) a second time, use the `--overwrite` flag.
Prior to the upload, the DDS checks if the files are compressed and if not compresses them,
followed by encryption. After this the files are uploaded to the cloud.
NB! The current setup requires compression and encryption to be performed locally. Make sure you
have enough space. This will be improved on in future releases.
The default number of files to compress, encrypt and upload at a time is four. This can be
changed by altering the `--num-threads` option, but whether or not it works depends on the
machine you are running the CLI on.
The token is valid for 7 days. Make sure your token is valid long enough for the
delivery to finish. To avoid that a delivery fails because of an expired token, we recommend
reauthenticating yourself before uploading data.
"""
try:
dds_cli.data_putter.put(
mount_dir=mount_dir,
project=project,
source=source,
source_path_file=source_path_file,
break_on_fail=break_on_fail,
overwrite=overwrite,
num_threads=num_threads,
silent=silent,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.UploadError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds data get -- #
@data_group_command.command(name="get", no_args_is_help=True)
# Options
@project_option(required=True, help_message="Project ID from which you're downloading data.")
@num_threads_option()
@source_option(help_message="Path to file or directory.", option_type=str)
@source_path_file_option()
@click.option(
"--destination",
"-d",
required=False,
type=click_pathlib.Path(exists=False, file_okay=False, dir_okay=True, resolve_path=True),
multiple=False,
help="Destination of downloaded files.",
)
# Flags
@break_on_fail_flag(help_message="Cancel download of all files if one fails.")
@silent_flag(
help_message="Turn off progress bar for each individual file. Summary bars still visible."
)
@click.option(
"--get-all",
"-a",
is_flag=True,
default=False,
show_default=True,
help="Download all project contents.",
)
@click.option(
"--verify-checksum",
is_flag=True,
default=False,
show_default=True,
help="Perform SHA-256 checksum verification after download (slower).",
)
@click.pass_obj
def get_data(
click_ctx,
project,
get_all,
source,
source_path_file,
destination,
break_on_fail,
num_threads,
silent,
verify_checksum,
):
"""Download data from a project.
To download the data to a specific destination, use the `--destination` option. This cannot be
an existing directory, for security reasons. This will be improved on in future releases.
Following to the download, the DDS decrypts the files, checks if the files are compressed and if
so decompresses them.
NB! The current setup requires decryption and decompression to be performed locally. Make sure
you have enough space. This will be improved on in future releases.
The default number of files to download, decrypt and decompress at a time is four. This can be
changed by altering the `--num-threads` option, but whether or not it works depends on the
machine you are running the CLI on.
The token is valid for 7 days. Make sure your token is valid long enough for the
delivery to finish. To avoid that a delivery fails because of an expired token, we recommend
reauthenticating yourself before downloading data.
"""
if get_all and (source or source_path_file):
LOG.error(
"Flag '--get-all' cannot be used together with options '--source'/'--source-path-fail'."
)
sys.exit(1)
try:
# Begin delivery
with dds_cli.data_getter.DataGetter(
project=project,
get_all=get_all,
source=source,
source_path_file=source_path_file,
break_on_fail=break_on_fail,
destination=destination,
silent=silent,
verify_checksum=verify_checksum,
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as getter:
with rich.progress.Progress(
"{task.description}",
rich.progress.BarColumn(bar_width=None),
" • ",
"[progress.percentage]{task.percentage:>3.1f}%",
refresh_per_second=2,
console=dds_cli.utils.stderr_console,
) as progress:
# Keep track of futures
download_threads = {}
# Iterator to keep track of which files have been handled
iterator = iter(getter.filehandler.data.copy())
with concurrent.futures.ThreadPoolExecutor() as texec:
task_dwnld = progress.add_task(
"Download", total=len(getter.filehandler.data), step="summary"
)
# Schedule the first num_threads futures for upload
for file in itertools.islice(iterator, num_threads):
LOG.debug(f"Starting: {rich.markup.escape(str(file))}")
# Execute download
download_threads[
texec.submit(getter.download_and_verify, file=file, progress=progress)
] = file
while download_threads:
# Wait for the next future to complete
ddone, _ = concurrent.futures.wait(
download_threads, return_when=concurrent.futures.FIRST_COMPLETED
)
new_tasks = 0
for dfut in ddone:
downloaded_file = download_threads.pop(dfut)
LOG.debug(
f"Future done: {rich.markup.escape(str(downloaded_file))}",
)
# Get result
try:
file_downloaded = dfut.result()
LOG.debug(
f"Download of {rich.markup.escape(str(downloaded_file))} successful: {file_downloaded}"
)
except concurrent.futures.BrokenExecutor as err:
LOG.critical(
f"Download of file {rich.markup.escape(str(downloaded_file))} failed! Error: {err}"
)
continue
new_tasks += 1
progress.advance(task_dwnld)
# Schedule the next set of futures for download
for next_file in itertools.islice(iterator, new_tasks):
LOG.debug(f"Starting: {rich.markup.escape(str(next_file))}")
# Execute download
download_threads[
texec.submit(
getter.download_and_verify,
file=next_file,
progress=progress,
)
] = next_file
except (
dds_cli.exceptions.InvalidMethodError,
OSError,
dds_cli.exceptions.TokenNotFoundError,
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.ApiResponseError,
SystemExit,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.NoDataError,
dds_cli.exceptions.DownloadError,
) as err:
LOG.error(err)
sys.exit(1)
# -- dds data ls -- #
@data_group_command.command(name="ls", no_args_is_help=True)
# Options
@project_option(required=True)
@folder_option(help_message="List contents in this project folder.")
# Flags
@json_flag(help_message="Output in JSON format.")
@size_flag(help_message="Show size of project contents.")
@tree_flag(help_message="Display the entire project(s) directory tree.")
@users_flag(help_message="Display users associated with a project(Requires a project id).")
@click.pass_context
def list_data(ctx, project, folder, json, size, tree, users):
"""List project contents.
Same as `dds ls --p`.
"""
ctx.invoke(
list_projects_and_contents,
project=project,
folder=folder,
size=size,
tree=tree,
users=users,
json=json,
)
# -- dds data rm -- #
@data_group_command.command(name="rm", no_args_is_help=True)
# Options
@project_option(required=True)
@folder_option(
help_message="Path to folder to remove.",
short="-fl",
multiple=True,
)
@click.option(
"--file",
"-f",
required=False,
type=str,
multiple=True,
help="Path to file to be removed." + dds_cli.utils.multiple_help_text(item="file"),
)
# Flags
@click.option(
"--rm-all",
"-a",
is_flag=True,
default=False,
help="Remove all project contents.",
)
@click.pass_obj
def rm_data(click_ctx, project, file, folder, rm_all):
"""Delete data within a specific project.
Limited to Unit Admins and Personnel.
Project data can only be deleted if the project has the status 'In Progress' and it has never
had the status 'Available'.
This command should be used with caution; once the data is deleted there is no getting it back.
"""
no_prompt = click_ctx.get("NO_PROMPT", False)
# Either all or a file
if rm_all and (file or folder):
LOG.error("The options '--rm-all' and '--file'/'--folder' cannot be used together.")
sys.exit(1)
# Will not delete anything if no file or folder specified
if project and not any([rm_all, file, folder]):
LOG.error(
"One of the options must be specified to perform data deletion: "
"'--rm-all' / '--file' / '--folder'."
)
sys.exit(1)
# Warn if trying to remove all contents
if rm_all:
if no_prompt:
LOG.warning(f"Deleting all files within project '{project}'")
else:
if not rich.prompt.Confirm.ask(
f"Are you sure you want to delete all files within project '{project}'?"
):
LOG.info("Probably for the best. Exiting.")
sys.exit(0)
try:
with dds_cli.data_remover.DataRemover(
project=project,
no_prompt=no_prompt,
token_path=click_ctx.get("TOKEN_PATH"),
) as remover:
if rm_all:
remover.remove_all()
else:
if file:
remover.remove_file(files=file)
if folder:
remover.remove_folder(folder=folder)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.APIError,
dds_cli.exceptions.DDSCLIException,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## UNIT #################################################################################### UNIT ##
####################################################################################################
####################################################################################################
@dds_main.group(name="unit", no_args_is_help=True)
@click.pass_obj
def unit_group_command(_):
"""Group command for managing units.
Limited to Super Admins.
"""
# ************************************************************************************************ #
# UNIT COMMANDS ******************************************************************** UNIT COMMANDS #
# ************************************************************************************************ #
# -- dds unit ls -- #
@unit_group_command.command(name="ls", no_args_is_help=False)
@click.pass_obj
def list_units(click_ctx):
"""List all units and their information."""
try:
with dds_cli.unit_manager.UnitManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as lister:
lister.list_all_units()
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
####################################################################################################
####################################################################################################
## MOTD #################################################################################### MOTD ##
####################################################################################################
####################################################################################################
# Will rethink and discuss the name of the group and command
# Probably need a super admin only group or similar
# For now this is good, just need the functionality
@dds_main.group(name="motd", no_args_is_help=True)
@click.pass_obj
def motd_group_command(_):
"""Group command for managing Message of the Day within DDS.
Limited to Super Admins.
"""
# ************************************************************************************************ #
# MOTD COMMANDS ******************************************************************** MOTD COMMANDS #
# ************************************************************************************************ #
# -- dds motd add-- #
@motd_group_command.command(name="add", no_args_is_help=True)
@click.argument("message", metavar="[MESSAGE]", nargs=1, type=str, required=True)
@click.pass_obj
def add_new_motd(click_ctx, message):
"""Add a new Message Of The Day.
Only usable by Super Admins.
[MESSAGE] is the MOTD that you wish do display to the DDS users.
"""
try:
with dds_cli.motd_manager.MotdManager(
no_prompt=click_ctx.get("NO_PROMPT", False),
token_path=click_ctx.get("TOKEN_PATH"),
) as setter:
setter.add_new_motd(message)
except (
dds_cli.exceptions.AuthenticationError,
dds_cli.exceptions.ApiResponseError,
dds_cli.exceptions.ApiRequestError,
dds_cli.exceptions.DDSCLIException,
) as err:
LOG.error(err)
sys.exit(1)
|
from typing import List, Literal
import blessed
from blessed.keyboard import Keystroke
import time
import tictactoe
import os
from utils import Vec
from draw_circle import draw_circle, draw_cross
import logging
if "logs" not in os.listdir():
os.mkdir("logs")
logging.basicConfig(filename="logs/debug.log", level=logging.DEBUG)
class Cursor:
def __init__(self):
self.cursor = "->"
self.cell = [0, 1]
self.cell_loc = [[Vec(0, 0) for _ in range(3)] for _ in range(3)]
for i in range(3):
for j in range(3):
self.cell_loc[i][j] = start_pos + cell_size * (i, j) + (i + 1, j + 1)
self.current_screen_loc = self.cell_loc[self.cell[0]][self.cell[1]]
self.old_screen_loc = None
def update(self, direction: Literal["UP", "DOWN", "LEFT", "RIGHT"]):
new_cell = self.cell[:]
if direction == "UP":
new_cell[1] -= 1
elif direction == "DOWN":
new_cell[1] += 1
elif direction == "LEFT":
new_cell[0] -= 1
else:
new_cell[0] += 1
if (new_cell[0] in (0, 1, 2)) and (new_cell[1] in (0, 1, 2)):
self.cell = new_cell
self.old_screen_loc = self.current_screen_loc
self.current_screen_loc = self.cell_loc[self.cell[0]][self.cell[1]]
print(
term.move_xy(*self.old_screen_loc)
+ " " * len(self.cursor)
+ term.move_xy(*self.current_screen_loc)
+ self.cursor,
)
else:
return
# This characters have been used to create the table"
strokes = "│ ─ ┌ ┬ ┐ ├ ┼ ┤ └ ┴ ┘"
term = blessed.Terminal()
# getting the window size and and board size
window_size = Vec(term.width, term.height)
board_size = Vec(43, 19)
start_pos = (window_size - board_size) / 2
cell_size = (board_size - 4) / 3
# drawing the frame of the board
def draw_board():
global board_size, start_pos
init_board = term.clear
top_rule = "┌" + "┬".join("─" * cell_size.x for _ in range(3)) + "┐"
mid_rule = "├" + "┼".join("─" * cell_size.x for _ in range(3)) + "┤"
bottom_rule = "└" + "┴".join("─" * cell_size.x for _ in range(3)) + "┘"
rules = [top_rule, mid_rule, mid_rule, bottom_rule]
for row in range(board_size.y):
draw_from = start_pos + (0, row)
init_board += term.move_xy(*draw_from) # type: ignore
if row % (cell_size.y + 1) == 0:
init_board += rules[row // cell_size.y]
continue
init_board += "│" + "│".join(term.move_right(cell_size.x) for _ in range(3)) + "│" # type: ignore
print(init_board)
update_board(board, cursor)
# drawing XO should happen here
def update_board(board, cursor: Cursor):
xs = []
os = []
for i in range(3):
for j in range(3):
if board[i][j] == "X":
xs.append((i, j))
elif board[i][j] == "O":
os.append((i, j))
for o in os:
o_loc = cursor.cell_loc[o[1]][o[0]]
center_x, center_y = o_loc + cell_size / 2
draw_circle(coords=(center_x, center_y), radius=cell_size.y // 2, rgb=(0, 0, 0))
print(term.black)
for x in xs:
x_loc = cursor.cell_loc[x[1]][x[0]]
center_x, center_y = x_loc + cell_size / 2
draw_cross(
coords=(center_x, center_y), radius=cell_size.y // 2 + 1, rgb=(0, 0, 0)
)
print(term.black)
def refresh(val: Keystroke, cursor: Cursor):
if val.is_sequence:
name = val.name
if name[4:] in ("UP", "DOWN", "LEFT", "RIGHT"):
cursor.update(val.name[4:]) # type: ignore
return
if str(val) == " " or val.name == "KEY_ENTER":
x, y = cursor.cell
return (y, x)
return
board = tictactoe.initial_state()
print(f"{term.home}{term.black_on_skyblue}{term.clear}")
cursor = Cursor()
# main event loop
with term.cbreak():
draw_board()
cursor.update("UP")
val = ""
while val.lower() != "q" and not tictactoe.terminal(board):
val = term.inkey(timeout=3)
# user input
move = refresh(val, cursor)
if move in tictactoe.actions(board):
board = tictactoe.move(move)
update_board(board, cursor) # draw board
if not tictactoe.terminal(board):
# bot
board = tictactoe.move(tictactoe.minimax(board))
txt = "bot is thinking..."
txt_x = (term.width - len(txt)) // 2
txt_y = term.height - 2
print(term.move_xy(txt_x, txt_y) + txt)
time.sleep(5)
update_board(board, cursor) # draw board
print(term.move_xy(txt_x, txt_y) + " " * len(txt))
if tictactoe.utility(board) == 0:
print(
term.move_xy(1, 1)
+ term.blink((term.underline_bold_black_on_yellow(f"Draw!")))
)
else:
print(
term.move_xy(1, 1)
+ term.blink(
(
term.underline_bold_black_on_yellow(
f"The winner is ......{tictactoe.winner(board)}"
)
)
)
)
print(term.move_xy(1, 2) + "bye!")
time.sleep(5)
print(term.normal + term.home + term.clear)
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Pigweed Console entry point."""
import argparse
import inspect
import logging
from pathlib import Path
import sys
import pw_cli.log
import pw_cli.argument_types
import pw_console
import pw_console.python_logging
from pw_console.plugins.calc_pane import CalcPane
from pw_console.plugins.clock_pane import ClockPane
_LOG = logging.getLogger(__package__)
# TODO(tonymd): Remove this when no downstream projects are using it.
def create_temp_log_file():
return pw_console.python_logging.create_temp_log_file()
def _build_argument_parser() -> argparse.ArgumentParser:
"""Setup argparse."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l',
'--loglevel',
type=pw_cli.argument_types.log_level,
default=logging.DEBUG,
help='Set the log level'
'(debug, info, warning, error, critical)')
parser.add_argument('--logfile', help='Pigweed Console log file.')
parser.add_argument('--test-mode',
action='store_true',
help='Enable fake log messages for testing purposes.')
parser.add_argument('--config-file',
type=Path,
help='Path to a pw_console yaml config file.')
parser.add_argument('--console-debug-log-file',
help='Log file to send console debug messages to.')
return parser
def main() -> int:
"""Pigweed Console."""
parser = _build_argument_parser()
args = parser.parse_args()
if not args.logfile:
# Create a temp logfile to prevent logs from appearing over stdout. This
# would corrupt the prompt toolkit UI.
args.logfile = pw_console.python_logging.create_temp_log_file()
pw_cli.log.install(level=args.loglevel,
use_color=True,
hide_timestamp=False,
log_file=args.logfile)
if args.console_debug_log_file:
pw_cli.log.install(level=logging.DEBUG,
use_color=True,
hide_timestamp=False,
log_file=args.console_debug_log_file,
logger=logging.getLogger('pw_console'))
global_vars = None
default_loggers = {}
if args.test_mode:
fake_logger = logging.getLogger(
pw_console.console_app.FAKE_DEVICE_LOGGER_NAME)
default_loggers = {
# Don't include pw_console package logs (_LOG) in the log pane UI.
# Add the fake logger for test_mode.
'Fake Device Logs': [fake_logger],
'PwConsole Debug': [logging.getLogger('pw_console')],
}
# Give access to adding log messages from the repl via: `LOG.warning()`
global_vars = dict(LOG=fake_logger)
help_text = None
app_title = None
if args.test_mode:
app_title = 'Console Test Mode'
help_text = inspect.cleandoc("""
Welcome to the Pigweed Console Test Mode!
Example commands:
rpcs.pw.rpc.EchoService.Echo(msg='hello!')
LOG.warning('Message appears console log window.')
""")
console = pw_console.PwConsoleEmbed(
global_vars=global_vars,
loggers=default_loggers,
test_mode=args.test_mode,
help_text=help_text,
app_title=app_title,
config_file_path=args.config_file,
)
# Add example plugins used to validate behavior in the Pigweed Console
# manual test procedure: https://pigweed.dev/pw_console/testing.html
if args.test_mode:
console.add_window_plugin(ClockPane())
console.add_window_plugin(CalcPane())
console.embed()
if args.logfile:
print(f'Logs saved to: {args.logfile}')
return 0
if __name__ == '__main__':
sys.exit(main())
|
import os
import boto3
from datetime import datetime
def lambda_handler(event, context):
print('Helper triggered...')
# Environment Variables
replication_function_name = os.environ['ONGOING_REPLICATION_FUNCTION_NAME']
ssm_event_source_mapping_uuid = os.environ['SSM_EVENT_SOURCE_MAPPING_UUID']
ssm_workflow_status = os.environ['SSM_WORKFLOW_STATUS']
print('replication lambda: ' + replication_function_name)
print('ssm param name [even source mapping uuid]: ' + ssm_event_source_mapping_uuid)
print('ssm param name [workflow status]: ' + ssm_workflow_status)
ssm_param_started_date = ssm_workflow_status.replace('workflow_status', 'started_date')
datetime_format = '%Y-%m-%d %H:%M:%S.%f'
# This function is performing one of the following actions:
# get workflow status (SSM param)
# update workflow status (SSM param)
# enable ongoing replication (lambda)
action = event['parameters']['action']
print('performing action: ' + action)
ssm_client = boto3.client('ssm')
if action == 'enable_ongoing_replication':
started_date = ssm_client.get_parameter(
Name=ssm_param_started_date,
WithDecryption=False
)
print('started date: ' + str(started_date))
max_age = int((datetime.now() - datetime.strptime(started_date['Parameter']['Value'], datetime_format))
.total_seconds()) + 120
print('max_age: ' + str(max_age))
enable_ongoing_replication(ssm_client, ssm_event_source_mapping_uuid,
replication_function_name, max_age)
else:
if action == 'get_workflow_status':
workflow_status = ssm_client.get_parameter(
Name=ssm_workflow_status,
WithDecryption=False
)
ws_value = workflow_status['Parameter']['Value']
print('current workflow status: ' + ws_value)
if ws_value == 'enabled':
ssm_client.put_parameter(
Name=ssm_param_started_date,
Value=datetime.now().strftime(datetime_format),
Overwrite=True,
Type='String'
)
print('Stored current time in ssm: ' + ssm_param_started_date)
return {
'workflow_status': ws_value
}
else:
if action == 'update_workflow_status':
value = event['parameters']['value']
ssm_client.put_parameter(
Name=ssm_workflow_status,
Value=value,
Overwrite=True,
Type='String'
)
print('Updating workflow status in ssm : ' + ssm_workflow_status + ' to ' + value)
else:
return {
'status': 'unknown_action'
}
def enable_ongoing_replication(ssm_client, ssm_event_source_mapping_uuid, replication_function_name, max_age):
uuid = ssm_client.get_parameter(
Name=ssm_event_source_mapping_uuid,
WithDecryption=False
)
print('stream uuid: ' + uuid['Parameter']['Value'])
lambda_client = boto3.client('lambda')
try:
response = lambda_client.update_event_source_mapping(
Enabled=True,
MaximumRecordAgeInSeconds=max_age,
FunctionName=replication_function_name,
UUID=uuid['Parameter']['Value']
)
print('lambda source mapping successfully updated')
print(response)
except Exception as exc:
print('ERROR', exc)
|
from setuptools import setup
setup(
name='postdoc',
version='0.4.0',
description='A helper for Postgres + Docker that works for free',
long_description=open('README.rst').read(),
author='Chris Chang',
author_email='c@crccheck.com',
url='https://github.com/crccheck/postdoc',
py_modules=['postdoc'],
entry_points={
'console_scripts': [
'phd = postdoc:main',
],
},
license='Apache',
tests_require=[
'mock==1.0.1',
],
test_suite='test_postdoc',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Database',
'Topic :: Utilities',
],
)
|
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from logging import critical
def is_libpsp():
"""Was libbinding successfully loaded in this module?"""
return __is_libpsp__
__is_libpsp__ = True
try:
# Load all `libbinding` depending modules in one go, otherwise nothing
# dependent on `libbinding` is exposed.
from .table import * # noqa: F401, F403
from .manager import * # noqa: F401, F403
from .tornado_handler import * # noqa: F401, F403
from .viewer import * # noqa: F401, F403
from .table.libbinding import make_computations
make_computations()
except ImportError:
__is_libpsp__ = False
critical("Failed to import C++ bindings for Perspective "
"probably as it could not be built for your architecture "
"(check install logs for more details).\n",
exc_info=True)
critical("You can still use `PerspectiveWidget` in client mode using JupyterLab.")
|
import os
import shutil
folders = os.listdir()
for folder in folders:
if not os.path.isdir(folder):
continue
files = os.listdir(folder)
for file in files:
if file.split('.')[-1] == 'txt':
continue
src = os.path.join(folder,file)
batch = file.split('_')[0]
dir = os.path.join('..','packed',batch)
if not os.path.isdir(dir):
os.mkdir(dir)
dst = os.path.join(dir,folder+'_'+file)
shutil.copy(src,dst) |
from data_maker import DataDescriptor, DataMaker, TruncatedNormalParameters
from utils import SolutionVisualizer
from greedy_planner import Planner
if __name__ == '__main__':
size = [60, 120, 180]
covid = [0.5]
anesthesia = [0.8]
anesthetists = [1, 2]
for s in size:
for c in covid:
for a in anesthesia:
for at in anesthetists:
dataDescriptor = DataDescriptor()
dataDescriptor.patients = 80
dataDescriptor.days = 5
dataDescriptor.anesthetists = 2
dataDescriptor.covidFrequence = 0.5
dataDescriptor.anesthesiaFrequence = 0.5
dataDescriptor.specialtyBalance = 0.17
dataDescriptor.operatingDayDuration = 270
dataDescriptor.anesthesiaTime = 270
dataDescriptor.operatingTimeDistribution = TruncatedNormalParameters(low=30,
high=120,
mean=60,
stdDev=20)
dataDescriptor.priorityDistribution = TruncatedNormalParameters(low=1,
high=120,
mean=60,
stdDev=10)
dataMaker = DataMaker(seed=52876)
dataContainer = dataMaker.create_data_container(dataDescriptor)
dataDictionary = dataMaker.create_data_dictionary(dataContainer, dataDescriptor)
# print("Data description:\n")
# print(dataDescriptor)
# dataMaker.print_data(dataDictionary)
planner = Planner(dataDictionary)
solution = planner.compute_solution()
sv = SolutionVisualizer()
sv.print_solution(solution)
sv.plot_graph(solution)
print("Objective function value: " + str(planner.compute_objective_value()))
|
import copy
import numpy as np
import os
import glob
import pdb
import subprocess
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from sdss import yanny
from apogee.speclib import atmos
from apogee.utils import spectra
from apogee.plan import mkslurm
def mkgriddirs(configfile,nosynth=False,synthonly=False,writeraw=False,queryport=1052) :
""" Script to create output directories and plan and batch queue files for all grids listed in master grid configuration file
"""
# Read grid configuration file
if not os.path.isfile(configfile+'.yml'):
print('{:s} does not exist'.format(configfile+'.yml'))
return
p=yaml.safe_load(open(configfile+'.yml','r'))
# loop over each grid
for i in range(len(p['GRID'])) :
# do both "raw" directory and final directory: former may be repeated!
specdir=p['GRID'][i]['specdir']
smooth=p['GRID'][i]['smooth']
synthcode=p['GRID'][i]['synthcode']
atmos=p['GRID'][i]['atmos']
if synthonly : names = [ specdir ]
elif nosynth : names = [ specdir+'_'+smooth ]
else : names = [ specdir+'_'+smooth, specdir ]
for igrid,name in enumerate(names) :
# construct name and create output directory
if abs(p['GRID'][i]['solarisotopes']) == 1 :
iso = 'solarisotopes'
elif abs(p['GRID'][i]['solarisotopes']) == 2 :
iso = 'giantisotopes'
elif p['GRID'][i]['solarisotopes'] < 0 :
iso = 'tests/'+iso
dir = os.getenv('APOGEE_SPECLIB')+'/synth/'+synthcode.strip("'")+'/'+atmos+'/'+iso+'/'+name+'/plan/'
print(dir)
try: os.makedirs(dir)
except: pass
# remove any old plan files
os.chdir(dir)
for filePath in glob.glob("*.yml"):
if os.path.isfile(filePath): os.remove(filePath)
# move GRID keys up one level
out = copy.deepcopy(p)
for key in out['GRID'][i].keys() : out[key] = out['GRID'][i][key]
out.pop('GRID')
fp = open(dir+name+'.yml','w')
fp.write(yaml.dump(out,sort_keys=False))
fp.close()
for elem in p['GRID'][i]['elem'] : speclib_split(dir+name,el=elem)
# make pbs scripts
os.chdir('..')
specdir = synthcode.strip("'")+'/'+atmos+'/'+iso+'/'+name
if name == p['GRID'][i]['specdir'] :
speclib_split(dir+name,amsplit=False)
mkslurm.write('mkgrid plan/'+name+'_a[mp]*vp20.yml plan/'+name+'_a[mp]*vp48.yml plan/'+name+'_a[mp]*vp??.yml',queryhost=os.uname()[1],queryport=queryport,maxrun=32)
mkslurm.write('mkrbf plan/'+name+'_c[mp]*vp??.yml',queryhost=os.uname()[1],queryport=queryport,maxrun=1,time='72:00:00')
mkslurm.write('mkrbf --nofill plan/'+name+'.yml',name='mkrbfholes',runplans=False,time='72:00:00')
else :
if writeraw : raw = '--writeraw'
else : raw = ''
mkslurm.write('mkgridlsf plan/'+name+'_a[mp]*vp??.yml',queryhost=os.uname()[1],queryport=queryport,maxrun=12,time='24:00:00')
#mkslurm.write('bundle plan/'+name+'_??.yml',queryhost=os.uname()[1],queryport=queryport,maxrun=32)
mkslurm.write('pca --pcas 12 75 --incremental --threads 0 '+raw+' plan/'+name+'.yml',runplans=False,time='72:00:00')
mkslurm.write('mkgridlsf plan/'+name+'_a[mp]*vp??.yml',queryhost=os.uname()[1],queryport=queryport,maxrun=12,time='72:00:00',
postcmd='pca --pcas 12 75 --incremental --threads 0 '+raw+' plan/'+name+'.yml',name='mkgridlsf_pca')
def speclib_split(planfile,amsplit=True,cmsplit=True,nmsplit=True,oasplit=True,vtsplit=True,el='') :
""" Make a bunch of individual plan files from master, splitting [alpha/M],[C/M],[N/M],vt
"""
# read master plan file
print('splitting: ', planfile)
p=yaml.safe_load(open(planfile+'.yml','r'))
# some cards removed in split par files
p.pop('npart',None)
p.pop('npca',None)
p.pop('vmsuffix',None)
if el is not '' : p['elem'] = el
else: p.pop('elem',None)
# make specdir the full path relative to $APOGEE_SPECLIB/synth
if int(p['solarisotopes']) == 1 : isodir='solarisotopes'
elif int(p['solarisotopes']) == 2 : isodir='giantisotopes'
#for key in ['synthcode','atmos','specdir','linelist','config'] :
# p[key] = p[key].decode().strip("'")
p['specdir'] = p['synthcode']+'/'+p['atmos']+'/'+isodir+'/'+p['specdir']
# get ranges in [alpha/M], [C/M], [N/M], and vt
if amsplit :
amrange=spectra.vector(p['am0'],p['dam'],p['nam'])
p['nam'] = 1
else :
amrange = [0]
if cmsplit :
cmrange=spectra.vector(p['cm0'],p['dcm'],p['ncm'])
p['ncm'] = 1
else :
cmrange = [0]
if nmsplit :
nmrange=spectra.vector(p['nm0'],p['dnm'],p['nnm'])
p['nnm'] = 1
else :
nmrange = [0]
if oasplit :
try :
oarange=spectra.vector(p['oa0'],p['doa'],p['noa'])
except :
oasplit=False
oarange=[0.]
p['noa'] = 1
else :
oarange = [0]
if int(p['vmicrofit']) == 0 :
vtrange=spectra.vector(p['vt0'],p['dvt'],p['nvt'])
p.pop('vt0')
p.pop('dvt')
p.pop('nvt')
else :
vtrange = [0]
vtsplit = False
# loop through all and make individual plan files
dw = float(p['dw'])
for am in amrange :
if amsplit : p['am0'] = float(am)
for cm in cmrange :
if cmsplit : p['cm0'] = float(cm)
for nm in nmrange :
if nmsplit : p['nm0'] = float(nm)
for oa in oarange :
if oasplit : p['oa0'] = float(oa)
for vt in vtrange :
# vmicro handled differently
if int(p['vmicrofit']) == 0 :
p['vmicro'] = [float(10.**vt)]
# special handling for dw
if np.isclose(dw,-1.) :
if p['vmicro'] < 3.99 : p['dw'] = 0.05
else : p['dw'] = 0.10
suffix=''
if amsplit : suffix+='a'+atmos.cval(am)
if cmsplit : suffix+='c'+atmos.cval(cm)
if nmsplit : suffix+='n'+atmos.cval(nm)
if oasplit : suffix+='o'+atmos.cval(oa)
if vtsplit : suffix+='v'+atmos.cval(10.**vt)
p['name'] = suffix
with open(planfile+'_'+suffix+el+'.yml', 'w') as fp:
fp.write(yaml.dump(p,sort_keys=False,Dumper=Dumper))
|
# coding: utf-8
import socketserver
from pathlib import Path # for checking if files exist
# Copyright 2013 Abram Hindle, Eddie Antonio Santos, Peter Weckend
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
# print ("Got a request of: %s\n" % self.data)
split_req_data = self.data.decode("utf-8").split('\n')[0].split(' ')
try:
inside_slashes = split_req_data[1].split('/')
except:
inside_slashes = -1
response_proto = 'HTTP/1.1 '
response_status = '200 OK\r\n'
response_content_type = 'Content-Type: text/html\r\n' # use html by default
location = ''
returned_content = ''
if split_req_data[0] != 'GET':
response_status = '405 Method Not Allowed\r\n'
else:
# default home page
if len(inside_slashes) == 2 and split_req_data[1][1:] == '':
returned_content = self.fetch_content('./www/index.html')
elif Path('./www/'+split_req_data[1][1:]).is_file() \
and (split_req_data[1][-3:] == 'css' or split_req_data[1][-4:] == 'html'):
returned_content = self.fetch_content('./www/' + split_req_data[1][1:])
# support css mime type
if split_req_data[1][-3:] == 'css':
response_content_type = 'Content-Type: text/css\r\n'
# paths ending in /
elif Path('./www/'+split_req_data[1][1:]).is_dir() and split_req_data[1][1:] != 'etc':
# if no slash at the end, return a 301 and the address with the slash
if split_req_data[1][-1] != '/':
response_status = '301 Moved Permanently\r\n'
location = 'Location: ' + split_req_data[1] + '/' + '\r\n'
else:
returned_content = self.fetch_content('./www/' + split_req_data[1][1:] + "/index.html")
# support css mime type
if split_req_data[1][-3:] == 'css':
response_content_type = 'Content-Type: text/css\r\n'
else:
response_status = '404 Not Found\r\n'
returned_content = '''404 - The page you're looking for could not be found.'''
response = response_proto + response_status + location + response_content_type + '\r\n' + returned_content + '\r\n'
self.request.sendall(bytearray(response, 'utf-8'))
def fetch_content(self, file_path):
content_file = open(file_path, 'r')
content_string = ''
for line in content_file:
content_string += line
content_file.close()
return content_string
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
"""Docstring de uma linha"""
variavel = 'valor'
def um():
return 1
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import pathlib
import sqlite3
import json
class DyntaxaDbCache:
""" """
def __init__(self, db_file="data_in/dyntaxa_db_cache_2.db"):
""" """
self.db_file = db_file
self.db_path = pathlib.Path(self.db_file)
def create_db(self):
""" """
db_con = sqlite3.connect(self.db_path)
with db_con:
# From DynTaxa Excel sheets.
db_con.execute(
"CREATE TABLE dyntaxa(taxon_id varchar(20) PRIMARY KEY, data json)"
)
db_con.execute("CREATE TABLE dyntaxa_name(data json)")
db_con.execute("CREATE TABLE dyntaxa_parent(data json)")
# Calculated stuff.
db_con.execute(
"CREATE TABLE taxa(taxon_id varchar(20) PRIMARY KEY, data json)"
)
def connect(self):
""" """
if not self.db_path.exists():
self.create_db()
#
return sqlite3.connect(self.db_path)
def add_dyntaxa_list(self, dyntaxa_list, append=False):
""" """
with self.connect() as con:
if append == False:
con.execute("delete from dyntaxa")
for dyntaxa_dict in dyntaxa_list:
taxon_id = dyntaxa_dict.get("TaxonId", "")
if taxon_id != "":
try:
con.execute(
"insert into dyntaxa values (?, ?)",
(
taxon_id,
json.dumps(
dyntaxa_dict,
),
),
)
except Exception as e:
print("Exception in dyntaxa, id: ", taxon_id, " ", e)
def get_dyntaxa_dict(self):
""" """
dyntaxa_dict = {}
with self.connect() as con:
cur = con.cursor()
cur.execute("select data from dyntaxa")
for row in cur:
row_dict = json.loads(row[0])
taxa_id = row_dict["TaxonId"]
dyntaxa_dict[taxa_id] = row_dict
cur.close()
# print("Length dyntaxa: ", len(dyntaxa_dict))
return dyntaxa_dict
def add_dyntaxa_name_list(self, dyntaxa_name_list, append=False):
""" """
with self.connect() as con:
if append == False:
con.execute("delete from dyntaxa_name")
for dyntaxa_dict in dyntaxa_name_list:
taxon_id = dyntaxa_dict.get("TaxonId", "")
if taxon_id != "":
try:
con.execute(
"insert into dyntaxa_name values (?)",
(json.dumps(dyntaxa_dict),),
)
except Exception as e:
print("Exception in dyntaxa_name, id: ", taxon_id, " ", e)
def get_dyntaxa_name_list(self):
""" """
dyntaxa_name_list = []
with self.connect() as con:
cur = con.cursor()
cur.execute("select data from dyntaxa_name")
for row in cur:
row_dict = json.loads(row[0])
dyntaxa_name_list.append(row_dict)
cur.close()
# print("Length dyntaxa_name: ", len(dyntaxa_name_list))
return dyntaxa_name_list
def add_dyntaxa_parent_list(self, dyntaxa_parent_list, append=False):
""" """
with self.connect() as con:
if append == False:
con.execute("delete from dyntaxa_parent")
for dyntaxa_dict in dyntaxa_parent_list:
# taxon_id = taxon_dict.get("TaxonId", "")
taxon_id = dyntaxa_dict.get("ChildTaxonId", "")
if taxon_id != "":
try:
con.execute(
"insert into dyntaxa_parent values (?)",
(json.dumps(dyntaxa_dict),),
)
except Exception as e:
print("Exception in dyntaxa_parent, id: ", taxon_id, " ", e)
def get_dyntaxa_parent_list(self):
""" """
dyntaxa_parent_list = []
with self.connect() as con:
cur = con.cursor()
cur.execute("select data from dyntaxa_parent")
for row in cur:
row_dict = json.loads(row[0])
dyntaxa_parent_list.append(row_dict)
cur.close()
# print("Length dyntaxa_parent: ", len(dyntaxa_parent_list))
return dyntaxa_parent_list
def add_taxa_list(self, taxa_list, append=False):
""" """
with self.connect() as con:
if append == False:
con.execute("delete from taxa")
for taxa_dict in taxa_list:
taxon_id = taxa_dict.get("TaxonId", "")
if taxon_id != "":
try:
con.execute(
"insert into taxa values (?, ?)",
(
taxon_id,
json.dumps(
taxa_dict,
),
),
)
except Exception as e:
print("Exception in taxa, id: ", taxon_id, " ", e)
def get_taxa_dict(self):
""" """
taxa_dict = {}
with self.connect() as con:
cur = con.cursor()
cur.execute("select data from taxa")
for row in cur:
row_dict = json.loads(row[0])
taxa_id = row_dict["TaxonId"]
taxa_dict[taxa_id] = row_dict
cur.close()
# print("Length taxa: ", len(taxa_dict))
return taxa_dict
|
import h5py
def save_file(data, filename, dataset_name="data"):
"""Save a np.array in hdf5 format.
Args:
data (np.array): An array.
filename (str): Name of the file.
Examples:
>>> a = np.array([[1,2,3],[4,5,6]])
>>> save_file(a, 'file.hdf5')
>>> os.path.isfile('file.hdf5')
True
>>> os.remove('file.hdf5')
>>> os.path.isfile('file.hdf5')
False
"""
with h5py.File(filename, "w") as f:
f[dataset_name] = data
def read_file(filename, dataset_name="data"):
"""Read a hdf5 file.
Args:
filename (str): Name of the file.
Returns:
np.array: An array.
Examples:
>>> read_file('share/data.hdf5')
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)
"""
with h5py.File(filename, "r") as f:
X = f[dataset_name][...]
return X
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
try:
with open("manifesto.txt") as f:
manifesto = f.readlines()
manifesto = [word.strip() for word in manifesto]
except Exception as e:
print(e)
activated_commands = [
'!help',
'!bug',
'!femops',
'!eightball',
'!roulette',
'!export',
'!dirtylinks',
'!sentence',
'!afk',
'!afklist',
'!shorten',
'!gay',
'!fap',
'!idle',
'!nifle',
'!strpn',
'!strapon',
'!bj',
'!blowjob',
'!daddy'
]
roulette_achievements = {
5:"It's ok to cry",
10:"Depressive Player",
15:"Mad Russian Roulette Player",
20:"Suicide is sometimes a Solution",
25:"Are you coding in PHP ?",
30:"Forced to use Windows",
35:"Done with Life"
}
eightball_list = [
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes definitely",
"You may rely on it",
"As I see it yes",
"Most likely",
"Outlook good",
"Yes",
"Signs point to yes",
"Reply hazy try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
daddys = [
"Depado",
"krion"
]
help = [
"I need somebody, Help, not just anybody, Help, you know I need someone, Ahawaaa"
]
basics = {
"!nom":"is going to eat.",
"!smoke":"is going to smoke.",
"!drug":"is going to smoke a big joint.",
"!coffee":"is going to drink a coffee",
"!bifle":"saute en l'air dans un mouvement circulaire, son sexe biflant toute la room.",
}
extra = {
"!flip":"(╯°□°)╯︵ ┻━┻",
"!chill":"┬─┬ ノ( ゜-゜ノ",
"!flipyou":"(╯°□°)╯︵ /(.□ . \)",
"!dunno":"¯\(°_o)/¯",
}
|
import numpy as np
from scipy.spatial import distance_matrix
def distance2(A, B):
M = A.shape[0]
N = B.shape[0]
A_dots = (A*A).sum(axis=1).reshape((M, 1))*np.ones(shape=(1, N))
B_dots = (B*B).sum(axis=1)*np.ones(shape=(M, 1))
D_squared = A_dots + B_dots - 2*A.dot(B.T)
zero_mask = np.less(D_squared, 0.0)
D_squared[zero_mask] = 0.0
# return distance_matrix(A, B)
return np.sqrt(D_squared)
|
import torch
import logging
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.utils import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
from ..builder import BACKBONES
from mmcv.runner import load_checkpoint
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
constant_init, kaiming_init)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1, padding=None) -> nn.Conv2d:
"""3x3 convolution with padding"""
if padding is None:
padding = dilation
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, groups=groups, dilation=dilation, bias=True)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
if stride == 1:
self.conv1 = conv3x3(inplanes, planes, stride)
elif stride == 2:
self.conv1 = conv3x3(inplanes, planes, stride, padding=0)
self.bn1 = norm_layer(planes)
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.pth_to_tf_var_mapping = {}
map_dict = dict(conv1='Conva', conv2='Convb')
for conv_name, bn_name in zip(['conv1', 'conv2'], ['bn1', 'bn2']):
map_name = map_dict.get(conv_name)
self.pth_to_tf_var_mapping[f'{conv_name}.weight'] = f'{map_name}/weight'
self.pth_to_tf_var_mapping[f'{conv_name}.bias'] = f'{map_name}/bias'
self.pth_to_tf_var_mapping[f'{bn_name}.weight'] = f'{map_name}/batch_norm/gamma'
self.pth_to_tf_var_mapping[f'{bn_name}.bias'] = f'{map_name}/batch_norm/beta'
self.pth_to_tf_var_mapping[f'{bn_name}.running_var'] = f'{map_name}/batch_norm/moving_variance'
self.pth_to_tf_var_mapping[f'{bn_name}.running_mean'] = f'{map_name}/batch_norm/moving_mean'
if downsample is not None:
self.pth_to_tf_var_mapping[f'downsample.0.weight'] = (f'Shortcut/weight')
self.pth_to_tf_var_mapping[f'downsample.1.weight'] = (f'Shortcut/batch_norm/gamma')
self.pth_to_tf_var_mapping[f'downsample.1.bias'] = (f'Shortcut/batch_norm/beta')
self.pth_to_tf_var_mapping[f'downsample.1.running_var'] = (f'Shortcut/batch_norm/moving_variance')
self.pth_to_tf_var_mapping[f'downsample.1.running_mean'] = (f'Shortcut/batch_norm/moving_mean')
def forward(self, x: Tensor) -> Tensor:
identity = x
if self.stride == 2:
out = F.pad(x, pad=(0,1,0,1))
elif self.stride == 1:
out = x
out = self.conv1(out)
out = self.relu(out)
out = self.bn1(out)
out = self.conv2(out)
out = self.relu(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
out = self.conv2(out)
out = self.relu(out)
out = self.bn2(out)
out = self.conv3(out)
out = self.relu(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class FPN(nn.Module):
def __init__(self,
start_level=3,
length = 6,
fin=[64,64,128,256,512,512],
fout=512,
):
super(FPN, self).__init__()
self.lconv_list = nn.ModuleList()
self.fconv_list = nn.ModuleList()
self.pth_to_tf_var_mapping = {}
for i in range(start_level, length):
self.lconv_list.append(nn.Conv2d(fin[i], fout, kernel_size=3, bias=True, padding=1))
self.pth_to_tf_var_mapping[f'lconv_list.{i-start_level}.weight'] = f'lconv_{i}/weight'
self.pth_to_tf_var_mapping[f'lconv_list.{i-start_level}.bias'] = f'lconv_{i}/bias'
self.fconv_list.append(nn.Conv2d(fout, fout, kernel_size=3, bias=True, padding=1))
self.pth_to_tf_var_mapping[f'fconv_list.{i-start_level}.weight'] = f'fconv_{i-start_level}/weight'
self.pth_to_tf_var_mapping[f'fconv_list.{i-start_level}.bias'] = f'fconv_{i-start_level}/bias'
self.start_level = start_level
def forward(self, inputs,):
laterals = []
for i in range(self.start_level, len(inputs)):
laterals.append(self.lconv_list[i-self.start_level](inputs[i]))
flevel = len(laterals)
for i in range(flevel-1, 0, -1):
laterals[i-1] += F.interpolate(laterals[i], mode='nearest', scale_factor=2) + laterals[i-1]
outputs = []
for i in range(flevel):
outputs.append(self.fconv_list[i](laterals[i]))
return outputs
class DFuse(nn.Module):
def __init__(self,
length=3,
fin=512,
fout=512,
fuse=True):
super(DFuse, self).__init__()
self.fuse_conv_list = nn.ModuleList()
self.length = length
self.fuse = fuse
self.pth_to_tf_var_mapping = {}
for i in range(length):
fuse_conv = nn.Conv2d(fin, fout, kernel_size=3, padding=1, bias=True)
self.fuse_conv_list.append(fuse_conv)
self.pth_to_tf_var_mapping[f'fuse_conv_list.{i}.weight'] = f'fuse_conv_{i}/weight'
self.pth_to_tf_var_mapping[f'fuse_conv_list.{i}.bias'] = f'fuse_conv_{i}/bias'
def forward(self, inputs):
assert len(inputs) == self.length, 'input length must be equal with self.length'
for i in range(len(inputs)-1):
for j in range(0, len(inputs)-1-i):
inputs[j] = F.avg_pool2d(inputs[j], kernel_size=2, stride=2)
for i in range(len(inputs)):
inputs[i] = self.fuse_conv_list[i](inputs[i])
if self.fuse:
for i in range(len(inputs)-1):
inputs[i] = inputs[i] + inputs[-1]
return inputs
class CodeHead(nn.Module):
def __init__(
self,
in_planes,
latent_size,
norm_layer=nn.BatchNorm2d):
super().__init__()
self.fc = nn.Linear(in_planes, latent_size, bias=True)
self.norm = norm_layer(latent_size)
self.pth_to_tf_var_mapping = {}
self.pth_to_tf_var_mapping[f'fc.weight'] = f'weight'
self.pth_to_tf_var_mapping[f'fc.bias'] = f'bias'
self.pth_to_tf_var_mapping[f'norm.weight'] = f'batch_norm/gamma'
self.pth_to_tf_var_mapping[f'norm.bias'] = f'batch_norm/beta'
self.pth_to_tf_var_mapping[f'norm.running_var'] = f'batch_norm/moving_variance'
self.pth_to_tf_var_mapping[f'norm.running_mean'] = f'batch_norm/moving_mean'
def forward(
self,
input):
if len(input.shape) > 2:
input = input.view(input.shape[0], -1)
latent = self.fc(input)
latent = latent[..., None, None]
latent = self.norm(latent)
return latent
@BACKBONES.register_module()
class ResNetEncoder(nn.Module):
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2, 1)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(
self,
depth = 18,
# block: Type[Union[BasicBlock, Bottleneck]],
# layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
filter_max: int = 512,
with_fpn = True,
with_ds_fuse = True,
multi_level = True,
frozen = True,
norm_eval = True,
pretrained = None,
# out_layers = (3,4,5), # res1, res2, res3, res4, res5, res6, latent_w
) -> None:
super(ResNetEncoder, self).__init__()
block, layers = self.arch_settings[depth]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.frozen = frozen
self.norm_eval = norm_eval
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.pth_to_tf_var_mapping = {}
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=0,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=False)
self.maxpool = nn.AvgPool2d(kernel_size=2, stride=2)
self.pth_to_tf_var_mapping['conv1.weight'] = ('Conv0/weight')
self.pth_to_tf_var_mapping['bn1.weight'] = ('Conv0/batch_norm/gamma')
self.pth_to_tf_var_mapping['bn1.bias'] = ('Conv0/batch_norm/beta')
self.pth_to_tf_var_mapping['bn1.running_mean'] = ('Conv0/batch_norm/moving_mean')
self.pth_to_tf_var_mapping['bn1.running_var'] = ('Conv0/batch_norm/moving_variance')
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.layer5 = self._make_layer(block, 512, layers[4], stride=2,
dilate=replace_stride_with_dilation[2])
for layer_idx,layer in enumerate(layers):
layer_idx += 1
llayer = getattr(self, f'layer{layer_idx}')
blocks = [block for block in llayer.children()]
for block_idx, block in enumerate(blocks):
pth_pattern = f'layer{layer_idx}.{block_idx}'
if layer_idx <= 4:
tf_pattern = f'stage{layer_idx}_unit{block_idx+1}'
else:
tf_pattern = f'stage{layer_idx}'
for key, val in block.pth_to_tf_var_mapping.items():
self.pth_to_tf_var_mapping[f'{pth_pattern}.{key}'] = (f'{tf_pattern}/{val}')
self.with_fpn = with_fpn
self.with_ds_fuse = with_ds_fuse
self.multi_level = multi_level
if self.with_fpn:
self.fpn = FPN(fout=512)
for key, val in self.fpn.pth_to_tf_var_mapping.items():
self.pth_to_tf_var_mapping[f'fpn.{key}'] = (f'fpn/{val}')
self.dfuse = DFuse(length=3, fin=512, fout=512, fuse=self.with_ds_fuse)
for key, val in self.dfuse.pth_to_tf_var_mapping.items():
self.pth_to_tf_var_mapping[f'dfuse.{key}'] = (f'{val}')
if self.multi_level:
max_length = 1024
self.dsize = [max_length] * 8 + [max_length // 2] * 2 + [max_length // 4] * 2 + [max_length // 8] * 2
self.low_level = CodeHead(in_planes=512*4*4, latent_size=sum(self.dsize[:4]))
self.mid_level = CodeHead(in_planes=512*4*4, latent_size=sum(self.dsize[4:8]))
self.high_level = CodeHead(in_planes=512*4*4, latent_size=sum(self.dsize[8:]))
level_mapping = dict(low_level='LowLevel',
mid_level='MediaLevel',
high_level='HighLevel')
for level_key, level_val in level_mapping.items():
level_block = getattr(self, level_key)
for key, val in level_block.pth_to_tf_var_mapping.items():
self.pth_to_tf_var_mapping[f'{level_key}.{key}'] = (f'{level_val}/{val}')
self.init_weights(pretrained=pretrained, frozen=frozen)
# pth_var_keys = list(self.pth_to_tf_var_mapping.keys())
# weights_keys = list(self.state_dict().keys())
# self.avBgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# # Zero-initialize the last BN in each residual branch,
# # so that the residual branch starts with zeros, and each residual block behaves like an identity.
# # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
# if zero_init_residual:
# for m in self.modules():
# if isinstance(m, Bottleneck):
# nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
# elif isinstance(m, BasicBlock):
# nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion or True:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def init_weights(self, pretrained=None, frozen=True):
# super(ResNetEncoder, self).init_weights(pretrained)
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
if frozen:
logger.info('Froze backbone weights!')
for name, param in self.named_parameters():
param.requires_grad = False
elif pretrained is None:
print('Random Initialize Weights!')
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if frozen:
print('Froze backbone weights!')
for name, param in self.named_parameters():
param.requires_grad = False
# use default initializer or customized initializer in subclasses
else:
raise TypeError('pretrained must be a str or None.'
f' But received {type(pretrained)}.')
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
init_inputs= x
x = F.pad(x,pad=(2,3,2,3))
x = self.conv1(x)
conv1 = x
x = self.bn1(x)
bn1 = x
x = self.relu(x)
relu1 = x
x = self.maxpool(x)
down1 = x
res1 = x
x = self.layer1(x)
res2 = x
x = self.layer2(x)
res3 = x
x = self.layer3(x)
res4 = x
x = self.layer4(x)
res5 = x
x = self.layer5(x)
res6 = x
inputs = (res1, res2, res3, res4, res5, res6)
if self.with_fpn:
inputs = self.fpn(inputs)
else:
inputs = (res4, res5, res6)
inputs = self.dfuse(inputs)
res4, res5, res6 = inputs
if self.multi_level:
latent_w0 = self.low_level(res6)
latent_w0 = latent_w0.reshape(-1, 4, self.dsize[0])
latent_w1 = self.mid_level(res5)
latent_w1 = latent_w1.reshape(-1, 4, self.dsize[4])
latent_w2 = self.high_level(res4)
latent_w20 = latent_w2[:, :sum(self.dsize[8:10])].reshape(-1, 2, self.dsize[8])
latent_w21 = latent_w2[:, sum(self.dsize[8:10]):sum(self.dsize[8:12])].reshape(-1, 2, self.dsize[10])
latent_w22 = latent_w2[:, sum(self.dsize[8:12]):].reshape(-1, 2, self.dsize[12])
# tile tensor
latent_w20 = latent_w20.repeat(1, 1, self.dsize[0]//self.dsize[8])
latent_w21 = latent_w21.repeat(1, 1, self.dsize[0]//self.dsize[10])
latent_w22 = latent_w22.repeat(1, 1, self.dsize[0]//self.dsize[12])
latent_w2 = torch.cat([latent_w20, latent_w21, latent_w22], dim=1)
# group adain
latent_w = torch.cat([latent_w0, latent_w1, latent_w2], dim=1)
else:
latent_w = init_inputs
# raise NotImplementedError
# x = self.avgpool(x)
# x = torch.flatten(x, 1)
# x = self.fc(x)
# print(res1.mean(), res2.mean(), res3.mean(), res4.mean(), res5.mean(), res6.mean())
return res1, res2, res3, res4, res5, res6, latent_w
def forward(self, x: Tensor) -> Tensor:
res1, res2, res3, res4, res5, res6, latent_w = self._forward_impl(x)
res4_gp = F.adaptive_avg_pool2d(res4, (1,1))
res5_gp = F.adaptive_avg_pool2d(res5, (1,1))
res6_gp = F.adaptive_avg_pool2d(res6, (1,1))
neck_f = torch.cat([res4_gp, res5_gp, res6_gp], dim=1)
# neck_f = latent_w[:,:4]
neck_f = torch.flatten(neck_f, 1)
return neck_f
def train(self, mode=True):
super(ResNetEncoder, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
ResNet = ResNetEncoder
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2, 1], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
if __name__ == '__main__':
model = resnet18().cuda()
data = torch.randn(2,3,256,256).cuda()
output = model(data)
|
def get_y(event, context):
return dict(oh="yaaaaa!")
function_mapping = {
"GET:/api/Y": get_y
}
def route_request(event, context):
if "route" not in event:
raise ValueError("must have 'route' in event dictionary")
if event["route"] not in function_mapping:
raise ValueError("cannot find {0} in function mapping".format(event["route"]))
func = function_mapping[event["route"]]
return func(event, context)
def lambda_handler(event, context=None):
print("event: %s" % event)
return route_request(event, context)
|
# Time complexity: O(n*n)
# Approach: Storing words in Trie and applying dfs.
class TrieNode:
def __init__(self):
self.child = defaultdict(TrieNode)
self.isWord = False
def addWord(self, word):
curr = self
for c in word:
curr = curr.child[c]
curr.isWord = True
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
n = len(s)
root = TrieNode()
for word in wordDict:
root.addWord(word)
@lru_cache(None)
def dp(start):
if start == n: # Found a valid way to break words
return True
curr = root
for end in range(start + 1, n + 1): # O(N)
c = s[end-1]
if c not in curr.child: break
curr = curr.child[c]
if curr.isWord and dp(end):
return True
return False
return dp(0)
|
def funk(a, b = 5, c = 10):
print("a je ", a, ", a b je ", b, " i c je ", c)
funk(3,7)
funk(25, c=24)
funk(c=50, a=100)
|
from dataclasses import dataclass
from typing import Any, Callable
from avilla.core.service import ExportInterface
from avilla.core.service.entity import BehaviourDescription
from avilla.core.service.session import BehaviourSession
from avilla.core.stream import Stream
@dataclass
class PostConnected(BehaviourDescription[Callable[[ExportInterface, BehaviourSession, dict], Any]]):
pass
@dataclass
class DataReceived(
BehaviourDescription[Callable[[ExportInterface, BehaviourSession, dict, Stream[bytes]], Any]]
):
pass
@dataclass
class PostDisconnected(BehaviourDescription[Callable[[ExportInterface, BehaviourSession, dict], Any]]):
pass
@dataclass
class PreConnected(BehaviourDescription[Callable[[ExportInterface, BehaviourSession, dict], Any]]):
pass
|
from flask import Flask,render_template, request,redirect
from instamojo_wrapper import Instamojo
API_KEY ="test_4b6c991d2d6f02dc9b4aa45127e"
AUTH_TOKEN = "test_fe8287ed5f08248dc08558dd8b8"
app = Flask(__name__,template_folder='template')
api = Instamojo(api_key=API_KEY,auth_token=AUTH_TOKEN,endpoint='https://test.instamojo.com/api/1.1/')
@app.route('/')
def home():
return render_template('index.html')
@app.route('/form')
def form():
return render_template('form.html')
@app.route('/success')
def success():
return render_template('success.html')
@app.route('/pay', methods=['POST', 'GET'])
def pay():
if request.method == 'POST':
name = request.form.get('name')
purpose = request.form.get('purpose')
email = request.form.get('email')
amount = request.form.get('amount')
response = api.payment_request_create(
amount=amount,
purpose=purpose,
buyer_name=name,
send_email=True,
email=email,
redirect_url="http://localhost:5000/success"
)
return redirect(response['payment_request']['longurl'])
else:
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
|
from rest_framework import viewsets
from admirer_app.models import Secret_Messages
from admirer_app.serializers import SecretMessagesSerializer
from django_filters.rest_framework import DjangoFilterBackend
# Create your views here.
class SecretMessageViewSet(viewsets.ModelViewSet):
queryset = Secret_Messages.objects.all()
serializer_class = SecretMessagesSerializer
http_method_names = ['get', 'post']
filter_backends = [DjangoFilterBackend]
filterset_fields = ['recipient'] |
import os
from colorama import Fore, Style
from instascrape.commands import COOKIES_DIR, OBJECT_FILE, error_print, warn_print, load_obj, error_catcher
def logout_handler(**args):
real = args.get("real")
insta = load_obj()
if insta is None:
error_print("No session is logged in currently.", exit=1)
username = insta.my_username
filename = username + ".cookie"
cookie_path = os.path.join(COOKIES_DIR, filename)
if not os.path.isfile(cookie_path):
# force to log session out from server if cookie file does not exist
warn_print("Failed to locate the cookie file associated with this session. Do real logout.")
real = True
# Logout: remove the saved insta object (session) file
# Remove object file
print("• Removing object file...")
os.remove(OBJECT_FILE)
if not real:
print(Fore.LIGHTBLUE_EX + Style.BRIGHT + "❙❙Paused session of " + Fore.WHITE + "@{}".format(username))
return
# Real Logout: log session out from server. cookies will no longer be valid.
# Log out from Instagram
print("• Logging session out from server...")
with error_catcher():
insta.logout()
# Remove cookie file
if os.path.isfile(cookie_path):
print("• Removing cookie file...")
os.remove(cookie_path)
# ---
print(Fore.LIGHTGREEN_EX + Style.BRIGHT + "⏏ Logged out from " + Fore.WHITE + "@{}".format(username))
|
from logging import getLogger
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.utils.functional import cached_property
from hornet.client import Client
from hornet.models import Account
logger = getLogger(__name__)
class ClientCommand(BaseCommand):
def __init__(self, *args, **kwargs):
super().__init__(stdout=None, stderr=None, no_color=False)
self._account_username = None
def create_parser(self, prog_name, subcommand):
parser = super(ClientCommand, self).create_parser(prog_name, subcommand)
parser.add_argument("--account", type=int)
return parser
def execute(self, *args, **options):
self._account_username = options.pop("account", None)
super().execute(*args, **options)
@cached_property
def client(self):
if self._account_username:
account = Account.get_account(self._account_username)
else:
account = Account.objects.first()
if not account:
raise CommandError("Account doesn't found")
return Client(account)
def handle(self, *args, **options):
super(ClientCommand, self).handle(*args, **options)
|
# Get the desired future value.
future_value = float(input('Enter the desired future value: '))
# Get the annual interest rate.
rate = float(input('Enter the annual interest rate: '))
# Get the number of years that the money will appreciate.
years = int(input('Enter the number of years the money will grow: '))
# Calculate the amount needed to deposit.
present_value = future_value / (1.0 + rate)**years
# Display the amount needed to deposit.
print('You will need to deposit this amount:', present_value)
|
# SPDX-License-Identifier: Apache-2.0
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
from typing import Sequence
class NormalizeStrings(Base):
@staticmethod
def export(): # type: () -> None
def make_graph(node, input_shape, output_shape): # type: (onnx.helper.NodeProto, Sequence[int], Sequence[int]) -> onnx.helper.GraphProto
graph = onnx.helper.make_graph(
nodes=[node],
name='StringNormalizer',
inputs=[onnx.helper.make_tensor_value_info('x',
onnx.TensorProto.STRING,
input_shape)],
outputs=[onnx.helper.make_tensor_value_info('y',
onnx.TensorProto.STRING,
output_shape)])
return graph
#1st model_monday_casesensintive_nochangecase
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_nochangecase")
#2nd model_nostopwords_nochangecase
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
is_case_sensitive=1
)
x = np.array([u'monday', u'tuesday']).astype(np.object)
y = x
graph = make_graph(node, [2], [2])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_nostopwords_nochangecase")
# 3rd model_monday_casesensintive_lower
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='LOWER',
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_lower")
#4 model_monday_casesensintive_upper
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=1,
stopwords=stopwords
)
x = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)
y = np.array([u'TUESDAY', u'WEDNESDAY', u'THURSDAY']).astype(np.object)
graph = make_graph(node, [4], [3])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_casesensintive_upper")
#5 monday_insensintive_upper_twodim
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
stopwords=stopwords
)
input_shape = [1, 6]
output_shape = [1, 4]
x = np.array([u'Monday', u'tuesday', u'wednesday', u'Monday', u'tuesday', u'wednesday']).astype(np.object).reshape(input_shape)
y = np.array([u'TUESDAY', u'WEDNESDAY', u'TUESDAY', u'WEDNESDAY']).astype(np.object).reshape(output_shape)
graph = make_graph(node, input_shape, output_shape)
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_insensintive_upper_twodim")
#6 monday_empty_output
stopwords = [u'monday']
node = onnx.helper.make_node(
'StringNormalizer',
inputs=['x'],
outputs=['y'],
case_change_action='UPPER',
is_case_sensitive=0,
stopwords=stopwords
)
x = np.array([u'monday', u'monday']).astype(np.object)
y = np.array([u'']).astype(np.object)
graph = make_graph(node, [2], [1])
model = onnx.helper.make_model(graph, producer_name='backend-test')
expect(model, inputs=[x], outputs=[y], name="test_strnorm_model_monday_empty_output")
|
import petl as etl
from fhir_petl.util import preprocess, resolve, mkdirp
mkdirp(resolve('work'))
preprocess(
etl.io.csv.fromcsv(resolve('Table_1_Demographics_New_Cohorts.csv'))
).tocsv(resolve('work/Patient.csv'))
preprocess(
etl.io.csv.fromcsv(resolve('Diagnoses.csv'))
).tocsv(resolve('work/Condition.csv'))
preprocess(
etl.io.csv.fromcsv(resolve('fairbanks_cv.dedup.csv'))
).tocsv(resolve('work/Observation.csv'))
preprocess(
etl.io.csv.fromcsv(resolve('Prescriptions.csv'))
).tocsv(resolve('work/MedicationRequest.csv'))
preprocess(
etl.io.csv.fromcsv(resolve('Procedures.csv'))
).tocsv(resolve('work/Procedure.csv'))
|
## ----- Uzdevums-2 -----
"""
Uzrakstiet funkciju is_palindrome(text)
kas atgriež bool True vai False atkarībā vai vārds vai teikums ir lasāms vienādi no abām pusēm.
PS no sākuma varat sākt ar viena vārda risinājumu, bet pilns risinājums ignorēs atstarpes(whitespace) un lielos/mazos burtus
is_palindrome("Alus ari ira sula") -> True
"""
# def is_palindrome(text: str) -> bool:
# text_neat = text.lower().replace(' ', '') # could use upper()
# text_neat_reversed = text_neat[::-1]
# return text_neat == text_neat_reversed
# # return text_neat in text_neat_reversed and text_neat_reversed in text_neat
def is_palindrome(text):
return text.replace(" ", "").lower() == text[::-1].replace(" ", "").lower() # twice the work compare to upper
print(is_palindrome("AbBA"))
print(is_palindrome("Alus ari ira sula"))
print(is_palindrome("Alus ari ir a sula "))
print(is_palindrome("nav palindroms"))
|
"""The shogoth reader."""
import re
from typing import Any
from lark import (
Lark,
Token,
Transformer,
Tree,
v_args,
)
from shogoth.parser import parse
from shogoth.types import Keyword, Symbol
# Monkeypatching for py3.10 matching
Tree.__match_args__ = ("data", "children")
Token.__match_args__ = ("type", "value")
class Reader(object):
"""An extension of parsing that produces meaningful trees. Can be extended with userland hooks."""
def __init__(self):
pass
def _parse(self, buffer):
return parse(buffer)
def symbol(self, x):
return Symbol(x)
def keyword(self, x):
return Keyword(x)
def pattern(self, x):
return re.compile(x[1:-1].replace("\\/", "/"))
def _read(self, tree: Tree) -> Any:
match tree:
case Tree(Token("RULE", "expr"), children):
return self._read(children[0])
case Tree(Token("RULE", "plist"), children):
return [self._read(c) for c in children]
case Tree(Token("RULE", "blist"), children):
return [self._read(c) for c in children]
case Tree(Token("RULE", "mapping"), children):
return dict(self._read(c) for c in children)
case Tree(Token("RULE", "kv"), [k, v]):
return (self._read(k), self._read(v))
case Tree(Token("RULE", "atom"), [a]):
return self._read(a)
case Token("INT", x):
return int(x)
case Token("FLOAT", x):
return float(x)
case Token("KEYWORD", x):
return self.keyword(x)
case Token("PATTERN", x):
return self.pattern(x)
case Token("TRUE", _):
return True
case Token("FALSE", _):
return False
case Token("NIL", _):
return None
# Symbol is very much the catch-all of the grammar
case Token("SYMBOL", x):
return self.symbol(x)
case _:
return tree
def read(self, buffer):
return self._read(self._parse(buffer))
|
from modules.ranking.models.ranking import Ranking
from modules.ranking.query.base import MISSION_PACKAGE_RANKING_BASE_QUERY
class MissionPackagesRanking(Ranking):
BASE_QUERY = MISSION_PACKAGE_RANKING_BASE_QUERY
def __init__(self, mission_package):
self.mission_package = mission_package
def get_base_query(self):
return MISSION_PACKAGE_RANKING_BASE_QUERY.format(
self.mission_package.mission_id
)
|
import json
import math
import os
import random
import tempfile
from typing import Optional, Union, List, Dict
from datasets import load_dataset
from torch.utils.data import DataLoader
from lm_eval.pl_models import (
LitGPT2,
LitT5, VAL_LOSS, VAL_ACC
)
from transformers import set_seed
import pytorch_lightning as pl
from dataclasses import (
dataclass,
asdict
)
VAL_LOSS_TASKS = ["arc_easy", "copa", "openbookqa", "lambada_cloze", "triviaqa", "piqa", "webqs", "nq_open",
"winogrande", "race", "race_middle", "mrqa_natural_questions", "mrqa_natural_questions_open",
"mrqa_triviaqa", "mrqa_triviaqa_open", "commonsense_qa", "boolq_open", "nq_v3", "nq_v3_open",
"nq_v3_mc", "xsum", "common_gen", "nq_open_no_overlap", "nq_webqs", "open_squad1", "open_newsqa",
"open_newsqa_mrqa_f", "open_searchqa_mrqa_f", "triviaqa_ours"]
VAL_EM_TASKS = ["rte", "sst", "wic", "multirc", "anli_r1", "wsc", "boolq", "squad2", "squad1", "drop",
"piqa_extractive", "copa_extractive", "winogrande_non_partial", "winogrande_explicit",
"copa_explicit", "copa_timo", "piqa_extractive", "copa_extractive", "arc_easy_extractive",
"commonsense_qa_extractive", "squad_drop", "squad_natural_questions", "arc_easy_ir",
"mrqa_hotpotqa", "mrqa_newsqa"]
@dataclass
class TrainArgs:
train_set_size: int
task_name: str
model_type: str
gradient_clip_val: int
weight_decay: float
learning_rate: float
optimizer_type: str
lr_scheduler_type: str
per_device_train_batch_size: int
gradient_accumulation_steps: int
save_prefix: str = None
dropout: float = None
pretrained: str = None
device: str = None
min_train_steps: int = None
num_train_epochs: int = 10
verbose: str = ""
per_device_eval_batch_size: int = 1
preprocessing_num_workers: int = 1
overwrite_cache: bool = True
min_warmup_steps: int = 100
num_warmup_steps: int = None
warmup_ratio: float = 0.1
seed: int = 1234
max_train_steps: int = None
monitor: str = None
monitor_mode: str = None
def write_datasets_fo_read(train_set, save_prefix):
num_in_train = max((len(train_set) * 3) // 4, len(train_set) - 400)
train_set, dev_set = train_set[:num_in_train], train_set[num_in_train:]
temp_dir = tempfile.TemporaryDirectory(prefix=save_prefix)
print(f"saving stuff in {temp_dir}")
train_file = f"{temp_dir.name}/train.json"
dev_file = train_file.replace("train", "dev")
with open(train_file, "w") as f:
for dp in train_set:
f.write(json.dumps(dp) + '\n')
with open(dev_file, "w") as f:
for dp in dev_set:
f.write(json.dumps(dp) + '\n')
return train_file, dev_file, temp_dir
def load_raw_datasests(train_set, save_prefix):
train_file, dev_file, temp_dir = write_datasets_fo_read(train_set, save_prefix)
extension = train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files={"train": train_file, "validation": dev_file})
# Log a few random samples from the training set:
for index in random.sample(range(len(raw_datasets["train"])), 3):
print(f"Sample {index} of the training set: {raw_datasets['train'][index]}.")
return raw_datasets, temp_dir
def get_monitor_name(task_name):
if task_name in VAL_LOSS_TASKS:
return VAL_LOSS, "min"
elif task_name in VAL_EM_TASKS:
return VAL_ACC, "max"
else:
raise ValueError(f"We don't support task {task_name}")
def train_lm(model, tokenizer, train_set, task_name, train_args):
model = model.to('cpu')
train_args = TrainArgs(train_set_size=len(train_set), task_name=task_name, **train_args)
# If passed along, set the training seed now.
set_seed(train_args.seed)
logger = pl.loggers.CometLogger(
api_key=os.environ.get('COMET_API_KEY'),
project_name=os.environ.get('COMET_PROJECT', "few-shot"),
workspace=os.environ.get('COMET_WORKSPACE', "yuvalkirstain"),
# save_dir=temp_dir.name,
# offline=True
)
raw_datasets, temp_dir = load_raw_datasests(train_set, train_args.save_prefix)
lr_monitor = pl.callbacks.LearningRateMonitor(logging_interval='step')
train_args.monitor, train_args.monitor_mode = get_monitor_name(task_name)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
monitor=train_args.monitor,
dirpath=os.path.join(temp_dir.name, "checkpoints"),
save_top_k=1,
mode=train_args.monitor_mode,
)
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# Scheduler and math around the number of training steps.
# TODO not sure if this is accurate for multi-gpu
num_update_steps_per_epoch = math.ceil(
len(raw_datasets["train"]) / (train_args.gradient_accumulation_steps * train_args.per_device_train_batch_size))
if train_args.num_train_epochs * num_update_steps_per_epoch < train_args.min_train_steps and train_args.max_train_steps is None:
train_args.max_train_steps = train_args.min_train_steps
if train_args.max_train_steps is None:
train_args.max_train_steps = train_args.num_train_epochs * num_update_steps_per_epoch
else:
train_args.num_train_epochs = math.ceil(train_args.max_train_steps / num_update_steps_per_epoch)
train_args.num_warmup_steps = max(train_args.min_warmup_steps,
int(train_args.max_train_steps * train_args.warmup_ratio))
model_cls = LitGPT2 if train_args.model_type == "gpt2" else LitT5
model_finetuner = model_cls(model,
train_args.lr_scheduler_type,
train_args.num_warmup_steps,
train_args.max_train_steps,
train_args.weight_decay,
train_args.learning_rate,
tokenizer)
tokenized_datasets = raw_datasets.map(
model_finetuner.tokenize_function,
batched=True,
num_proc=train_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not train_args.overwrite_cache,
)
tokenized_datasets = tokenized_datasets
train_dataset = tokenized_datasets["train"]
eval_dataset = tokenized_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
print(f"Sample {index} of the training set: {train_dataset[index]}.")
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=model_finetuner.collate_fn,
batch_size=train_args.per_device_train_batch_size
)
val_dataloader = DataLoader(
eval_dataset, collate_fn=model_finetuner.collate_fn, batch_size=train_args.per_device_eval_batch_size
)
logger.log_hyperparams(asdict(train_args))
print(train_args)
trainer = pl.Trainer(
gpus="0",
accumulate_grad_batches=train_args.gradient_accumulation_steps,
plugins=None,
precision=32,
logger=logger,
max_steps=train_args.max_train_steps,
min_steps=train_args.min_train_steps,
gradient_clip_val=train_args.gradient_clip_val,
callbacks=[checkpoint_callback, lr_monitor]
# accelerator='ddp_sharded'
# log_every_n_steps=1,
# flush_logs_every_n_steps=1
)
trainer.fit(model_finetuner, train_dataloader, val_dataloader)
print(f"best checkpoint is: {checkpoint_callback.best_model_path}")
test_model = model_cls.load_from_checkpoint(checkpoint_callback.best_model_path)
train_args = asdict(train_args)
train_args["best_model_path"] = checkpoint_callback.best_model_path
train_args["best_model_score"] = checkpoint_callback.best_model_score.item()
train_args["previous_experiment"] = logger.experiment.get_key()
temp_dir.cleanup()
return test_model.model.eval(), train_args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.