hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c40a6d905d2ababfcd8cc2279b320a8d6dff51ea
| 251
|
py
|
Python
|
Ago-Dic-2020/lopez-flores-jorge-luis/Especial/Ejercicio2/Autobus.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ago-Dic-2020/lopez-flores-jorge-luis/Especial/Ejercicio2/Autobus.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ago-Dic-2020/lopez-flores-jorge-luis/Especial/Ejercicio2/Autobus.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
from Especial.Ejercicio1.Vehiculo import *
class Autobus(vehicle):
def __str__(self):
return "Corro a: {} km/h \n Tengo {}km recorridos\n y llevo hasta: {} pasajeros\n".format(self.velocidad_maxima, self.kilometraje, self.capacidad).strip()
| 27.888889
| 158
| 0.729084
|
47d5f61559722138a95fda4d68233132bb80b14f
| 2,370
|
py
|
Python
|
src/commands/pipe/display.py
|
VFRAMEio/vframe
|
0dbc991697a6de47ccf3b65ced5b1201a45156b6
|
[
"MIT"
] | null | null | null |
src/commands/pipe/display.py
|
VFRAMEio/vframe
|
0dbc991697a6de47ccf3b65ced5b1201a45156b6
|
[
"MIT"
] | null | null | null |
src/commands/pipe/display.py
|
VFRAMEio/vframe
|
0dbc991697a6de47ccf3b65ced5b1201a45156b6
|
[
"MIT"
] | null | null | null |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.models.types import MediaType
from vframe.utils.click_utils import processor
from vframe.models.types import FrameImage, FrameImageVar
from vframe.utils.click_utils import show_help
@click.command('')
@click.option('--fps', 'opt_fps', default=25, show_default=True,
type=click.IntRange(1,1000),
help='Frames per second. Use 0 to pause.')
@click.option('--pause/--play', 'opt_pause', is_flag=True,
help='Autoplay video')
@click.option('--frame', 'opt_frame_type', default='draw',
type=FrameImageVar, help=show_help(FrameImage))
@click.option('--filter', 'opt_filter', type=click.Choice(['detections', 'no-detections']),
help='Only display frames with detections')
@processor
@click.pass_context
def cli(ctx, sink, opt_fps, opt_pause, opt_frame_type, opt_filter):
"""Display images to screen"""
import time
from vframe.settings.app_cfg import LOG, SKIP_FRAME, USE_DRAW_FRAME
from vframe.settings.app_cfg import PAUSED, READER
from vframe.utils.display_utils import DisplayUtils
# ---------------------------------------------------------------------------
# initialize
ctx.obj[USE_DRAW_FRAME] = True
display_utils = DisplayUtils()
target_mspf = 1000 / opt_fps
ctx.obj[PAUSED] = opt_pause
st = time.time()
# ---------------------------------------------------------------------------
# process
while True:
M = yield
# skip frame if flagged
if ctx.obj[SKIP_FRAME]:
sink.send(M)
continue
# override pause if single image
if ctx.obj[READER].n_files == 1 and M.type == MediaType.IMAGE:
ctx.obj[PAUSED] = True
# dynamically adjust framerate
actual_mspf = (time.time() - st) / 1000
frame_delay_ms = int(max(1, target_mspf - actual_mspf))
# get and display image
fde = M.frame_detections_exist
if not opt_filter or ((fde and opt_filter == 'detections') or (fde == False and opt_filter == 'no-detections')):
im = M.images.get(opt_frame_type)
display_utils.show_ctx(ctx, im, delay=frame_delay_ms)
# continue
sink.send(M)
st = time.time()
| 30
| 116
| 0.604219
|
0f71cbd2bc0b2bc3fdbecfda7958a2b4ec2ad0f0
| 361
|
py
|
Python
|
trdb2py/indicator_test.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | null | null | null |
trdb2py/indicator_test.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | 43
|
2020-12-11T09:07:51.000Z
|
2021-05-29T07:31:10.000Z
|
trdb2py/indicator_test.py
|
zhs007/trdb2py
|
d07b874bd37085ed64b5c6c6c2c21a380024d082
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import pytest
from trdb2py.indicator import isPriceIndicator
def test_isPriceIndicator():
assert isPriceIndicator('ema.5') == True
assert isPriceIndicator('rsi.5') == False
assert isPriceIndicator("ta-ema.29>day/1d/5m/53700") == True
assert isPriceIndicator("ta-ema") == True
assert isPriceIndicator('rsi') == False
| 30.083333
| 64
| 0.706371
|
9956d02cb6d0e1899bcd3066ffcc56bba1d1fd73
| 970
|
py
|
Python
|
tools/miktex-pkg-md5.py
|
MiKTeX/miktex-packagin
|
5c4f131c04eaff9006cccdfb2e09c1e9efb61642
|
[
"MIT"
] | 20
|
2018-04-29T01:07:28.000Z
|
2022-01-15T14:40:21.000Z
|
tools/miktex-pkg-md5.py
|
MiKTeX/miktex-packagin
|
5c4f131c04eaff9006cccdfb2e09c1e9efb61642
|
[
"MIT"
] | 273
|
2017-09-03T17:24:13.000Z
|
2022-03-26T21:26:44.000Z
|
tools/miktex-pkg-md5.py
|
MiKTeX/miktex-packagin
|
5c4f131c04eaff9006cccdfb2e09c1e9efb61642
|
[
"MIT"
] | 2
|
2020-05-18T14:11:30.000Z
|
2021-02-28T08:14:41.000Z
|
#!/usr/bin/env python3
#
# Licensed to you under the MIT license. See the LICENSE file in the
# project root for more information.
import sys
from miktex.packaging.info import inifile
from miktex.packaging.info import md5
if len(sys.argv) != 2 and len(sys.argv) != 3:
sys.exit("Usage: {} [-update] <package>".format(sys.argv[0]))
if sys.argv[1] == "-update":
if len(sys.argv) != 3:
sys.exit("missing package name")
update_requested = True
package_id = sys.argv[2]
else:
if len(sys.argv) != 2:
sys.exit("invalid argument(s)")
update_requested = False
package_id = sys.argv[1]
md5_hash = md5.try_get_md5_hash(package_id)
if update_requested:
if not md5_hash:
sys.exit("TDS digest of package '{}' could not be calculated".format(package_id))
package_info = inifile.PackageInfo(package_id)
package_info.md5 = md5_hash
package_info.write()
else:
print(md5_hash)
| 27.714286
| 90
| 0.657732
|
c4999cccfedc88e4e187281a191983b9edf90cc7
| 867
|
py
|
Python
|
openvas/komand_openvas/actions/scan_status/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
openvas/komand_openvas/actions/scan_status/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
openvas/komand_openvas/actions/scan_status/action.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
import komand
from .schema import ScanStatusInput, ScanStatusOutput
# Custom imports below
import sys
class ScanStatus(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='scan_status',
description='Get status of scan',
input=ScanStatusInput(),
output=ScanStatusOutput())
def run(self, params={}):
scan_id = str(params.get('scan_id'))
try:
stat = self.connection.scanner.get_scan_status(scan_id)
except:
return {'status': '', 'success': False,
'message': ' | '.join([str(sys.exc_info()[0]), str(sys.exc_info()[1])])}
return {'status': str(stat), 'success': True, 'message': 'Got status successfully'}
def test(self):
# TODO: Implement test function
return {}
| 30.964286
| 92
| 0.581315
|
3a4e19d7374dbbc1e76b915db440317af8a20fce
| 3,418
|
py
|
Python
|
mcc/storage.py
|
lanl/materialsCloudCompute
|
6aa5019638b830a79f35a0cf3ef879ff0ab4f2bb
|
[
"BSD-3-Clause"
] | null | null | null |
mcc/storage.py
|
lanl/materialsCloudCompute
|
6aa5019638b830a79f35a0cf3ef879ff0ab4f2bb
|
[
"BSD-3-Clause"
] | null | null | null |
mcc/storage.py
|
lanl/materialsCloudCompute
|
6aa5019638b830a79f35a0cf3ef879ff0ab4f2bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Storage Management"""
import random
import boto3
def connection():
"""Creates an s3 client connection"""
return boto3.client("s3")
def build_storage(name="", s3=connection()):
"""Builds a storage bucket for the calculation
Parameters
----------
name : string, optional
Name of the bucket. (Default: autogenerated)
s3 : s3 object, optional
S3 client object (Default: Creates a new one)
Returns
-------
name : string
Name of the bucket
response : dict
api response
"""
if not name:
name = str(hex(random.randint(1e10, 1e11-1)))
response = s3.create_bucket(Bucket=name)
return name, response
def close_storage(name, s3=connection(), safe=True):
"""Closes storage bucket
Parameters
----------
s3 : S3 object
S3 client object
name : string
Name of bucket to delete
safe : bool, optional
Safely delete will only delete if bucket is empty. (Default: True)
"""
if safe:
response = s3.delete_bucket(Bucket=name)
else:
empty_storage(name, s3)
response = s3.delete_bucket(Bucket=name)
return response
def empty_storage(name, s3=connection()):
"""Empties storage bucket
Parameters
----------
s3 : s3 object
S3 client objection
name : string
Name of bucket to empty
Returns
-------
response : dict
api response
"""
response = []
object_list = s3.list_objects(Bucket=name)
for file in object_list.get("Contents", []):
response.append(s3.delete_object(Bucket=name, Key=file["Key"]))
return response
def get_bucket_names(s3=connection()):
"""Gets the names of all of the buckets in s3 coonection
Parameters
----------
s3 : s3 object, optional
S3 client object (Default: creates connection from aws configuration)
Returns
-------
names : list{string}
the names, if any, of the existing buckets in your s3
"""
return [bucket["Name"] for bucket in s3.list_buckets()["Buckets"]]
def upload(bucket, file, s3=connection()):
"""Uploads a file to a specified bucket
Parameters
----------
bucket : string
Name of bucket
file : string
Path to file
s3 : s3 object, optional
S3 client object (Default: auto-connect)
Returns
-------
response : bucket
"""
response = s3.upload_file(file, bucket, file)
return response
def download(bucket, file, key=None, output=None, s3=connection()):
"""Download a file to a specified bucket
Parameters
----------
bucket : string
Name of bucket
file : string
Path to file
key : string, optional
path to file in bucket (Default: will try to determine key from filename)
output : string, optional
local path to download to
s3 : s3 object, optional
S3 client object (Default: auto-connect)
Returns
-------
response : bucket
"""
if key is None:
obj_list = s3.list_objects(Bucket=bucket)
for obj in obj_list.get("Contents", []):
if obj["Key"].endswith(file):
key = obj["Key"]
break
if output is None:
output = file
response = s3.download_file(bucket, key, output)
return response
| 21.229814
| 81
| 0.59567
|
8d812df5a692e796ff222fea6f8e9695ae5cccfc
| 9,593
|
py
|
Python
|
raft/server.py
|
guilload/aio-raft
|
3c6ca593996a26addf54e29ce1abdfb5013f2361
|
[
"MIT"
] | 2
|
2015-07-23T15:11:12.000Z
|
2022-02-16T06:06:21.000Z
|
raft/server.py
|
guilload/aio-raft
|
3c6ca593996a26addf54e29ce1abdfb5013f2361
|
[
"MIT"
] | null | null | null |
raft/server.py
|
guilload/aio-raft
|
3c6ca593996a26addf54e29ce1abdfb5013f2361
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import logging
import sys
from enum import Enum
from random import randint
from uuid import uuid4
from log import Log
from machine import Machine
from pool import Pool
from protocols import ClientProtocol, ServerProtocol
class State(Enum):
LEADER = 'Leader'
FOLLOWER = 'Follower'
CANDIDATE = 'Candidate'
class Server(object):
def __init__(self, peers, host, port):
self.host = host
self.port = port
self.peer_id = '{}:{}'.format(host, port)
self._logger = logging.getLogger(__name__)
self._loop = asyncio.get_event_loop()
self._pool = Pool(self, peers)
# heartbeat constants and bookkeeping variables
self._heartbeat_interval = 1000 # ms
self._last_interval = None
self._min_heartbeat_timeout = 2000 # ms
self._max_heartbeat_timeout = 4000 # ms
self._heartbeat_timeout = None
self._last_heartbeat = None
self.reset_heartbeat()
self.reset_timeout()
self._log = Log(Machine())
self.state = State.FOLLOWER
self.term = 0
self.voted = None
self.votes = set()
self._pending_clients = {}
self.handlers = {'append_entries_req': self.handle_append_entries_req,
'append_entries_resp': self.handle_append_entries_resp,
'request_vote_req': self.handle_request_vote_req,
'request_vote_resp': self.handle_request_vote_resp}
def reset_heartbeat(self):
self._last_heartbeat = self._loop.time()
def reset_interval(self):
self._last_interval = self._loop.time()
def reset_timeout(self):
self._heartbeat_timeout = randint(self._min_heartbeat_timeout,
self._max_heartbeat_timeout) / 1000
@property
def stale(self):
return self._last_heartbeat + self._heartbeat_timeout < self._loop.time()
@staticmethod
def decode(data):
return json.loads(data.decode())
@staticmethod
def encode(data):
return json.dumps(data).encode()
def broadcast(self, request):
for peer in self._pool:
self.send_async(peer, request)
@asyncio.coroutine
def run(self):
self.reset_interval()
while True:
self._logger.debug('state: {}, term: {}'.format(self.state,
self.term))
if self.state == State.LEADER:
self.append_entries()
if self.state in (State.CANDIDATE, State.FOLLOWER) and self.stale:
self.request_vote()
yield from self.wait()
@asyncio.coroutine
def send(self, peer, request):
"""
Send a request to a peer (if available).
"""
transport = yield from peer.get_transport()
if transport:
transport.write(self.encode(request))
def send_async(self, peer, request):
"""
Schedule the execution
"""
asyncio.async(self.send(peer, request))
@asyncio.coroutine
def wait(self):
"""
Wait for the next interval.
"""
tic = self._heartbeat_interval / 1000 - self._loop.time() + self._last_interval
yield from asyncio.sleep(tic)
self.reset_interval()
def to_leader(self):
self.append_entries()
self.state = State.LEADER
self.voted = None
self.votes = set()
for peer in self._pool:
peer.match = -1
peer.next = self._log.index + 1
def to_follower(self, term):
self.state = State.FOLLOWER
self.term = term
self.voted = None
self.votes = set()
def append_entries(self, peer=None):
"""
Append entries RPC.
"""
peers = self._pool.all() if peer is None else [peer]
for peer in peers:
log_entries = self._log[peer.next:]
log_index, log_term, _ = self._log[peer.next - 1]
request = {'rpc': 'append_entries_req',
'peer_id': self.peer_id,
'term': self.term,
'log_commit': self._log.commit,
'log_entries': log_entries,
'log_index': log_index,
'log_term': log_term,
}
self.send_async(peer, request)
self._logger.debug('broadcasting append entries')
def request_vote(self):
"""
Request vote RPC.
"""
self.reset_heartbeat()
self.reset_timeout()
self.state = State.CANDIDATE
self.term += 1
self.voted = self.peer_id
self.votes = set([self.peer_id])
request = {'rpc': 'request_vote_req',
'peer_id': self.peer_id,
'term': self.term,
'log_index': self._log.index,
'log_term': self._log.term,
}
self.broadcast(request)
self._logger.debug('broadcasting request vote')
def handle_peer(self, request):
"""
Dispatch requests to the appropriate handlers.
"""
if self.term < request['term']:
self.to_follower(request['term'])
return self.handlers[request['rpc']](request)
def handle_append_entries_req(self, request):
self._logger.debug('append entries request received')
if request['term'] < self.term:
return
self.reset_heartbeat()
log_index = request['log_index']
log_term = request['log_term']
if not self._log.match(log_index, log_term):
return {'rpc': 'append_entries_resp',
'peer_id': self.peer_id,
'term': self.term,
'log_index': self._log.index,
'success': False
}
log_entries = request['log_entries']
self._log.append(log_index, log_entries)
log_commit = request['log_commit']
if self._log.commit < log_commit:
index = min(self._log.index, log_commit)
self._log.commit = index
self._log.apply(index)
if not log_entries: # no need to answer, the peer might have committed
return # new entries but has certainly not replicated new ones
return {'rpc': 'append_entries_resp',
'peer_id': self.peer_id,
'term': self.term,
'log_index': self._log.index,
'log_term': self._log.term,
'success': True,
}
def handle_append_entries_resp(self, response):
if response['success']:
self._logger.debug('append entries succeeded')
log_index = response['log_index']
log_term = response['log_term']
peer_id = response['peer_id']
self._pool[peer_id].match = log_index
self._pool[peer_id].next = log_index + 1
if (self._log.commit < log_index and
self._pool.ack(log_index) and log_term == self.term):
self._log.commit = log_index
results = self._log.apply(log_index)
self.return_results(results)
else:
peer = self._pool[response['peer_id']]
peer.next -= 1
self.append_entries(peer)
# self._logger.debug('append entries failed')
def handle_request_vote_req(self, request):
self._logger.debug('request vote request received')
if request['term'] < self.term:
return
log_index = request['log_index']
log_term = request['log_term']
peer_id = request['peer_id']
if self.voted in (None, peer_id) and self._log.match(log_index, log_term):
granted = True
self.reset_heartbeat()
else:
granted = False
return {'rpc': 'request_vote_resp',
'peer_id': self.peer_id,
'term': self.term,
'granted': granted,
}
def handle_request_vote_resp(self, response):
if self.term == response['term'] and response['granted']:
self.votes.add(response['peer_id'])
if self._pool.majority(len(self.votes)):
self.to_leader()
def handle_client(self, cmd, transport):
self._log.add(self.term, cmd)
self._pending_clients[(self.term, self._log.index)] = transport
self.append_entries()
def return_results(self, results):
for result in results:
term, index, result = result
transport = self._pending_clients.pop((term, index))
transport.write(self.encode(result))
transport.close()
def run(port, ports):
loop = asyncio.get_event_loop()
peers = [('localhost', port) for port in ports]
raft = Server(peers, 'localhost', port)
client = loop.create_server(lambda: ClientProtocol(raft), 'localhost', port + 10)
server = loop.create_server(lambda: ServerProtocol(raft), 'localhost', port)
coroutines = asyncio.gather(client, server, raft.run())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# server.close()
# loop.run_until_complete(server.wait_closed())
# loop.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
port, *ports = map(int, sys.argv[1:])
run(port, ports)
| 29.69969
| 87
| 0.569686
|
9a898fb1e871fbc0a5eb1651d326dd0f73487717
| 7,221
|
py
|
Python
|
snakeeyes/blueprints/billing/views/billing.py
|
Pythonian/bsawf
|
3e422a81cfb1b157119473c20b94a9a01f8b9672
|
[
"MIT"
] | null | null | null |
snakeeyes/blueprints/billing/views/billing.py
|
Pythonian/bsawf
|
3e422a81cfb1b157119473c20b94a9a01f8b9672
|
[
"MIT"
] | null | null | null |
snakeeyes/blueprints/billing/views/billing.py
|
Pythonian/bsawf
|
3e422a81cfb1b157119473c20b94a9a01f8b9672
|
[
"MIT"
] | null | null | null |
from flask import (Blueprint, current_app, flash, redirect, render_template,
request, url_for)
from flask_login import current_user, login_required
from config import settings
from snakeeyes.blueprints.billing.decorators import (handle_stripe_exceptions,
subscription_required)
from snakeeyes.blueprints.billing.forms import (CancelSubscriptionForm,
CreditCardForm,
UpdateSubscriptionForm)
from snakeeyes.blueprints.billing.models.coupon import Coupon
from snakeeyes.blueprints.billing.models.invoice import Invoice
from snakeeyes.blueprints.billing.models.subscription import Subscription
from utils.util_json import render_json
billing = Blueprint('billing', __name__, template_folder='../templates',
url_prefix='/subscription')
@billing.route('/pricing')
def pricing():
if current_user.is_authenticated and current_user.subscription:
return redirect(url_for('billing.update'))
form = UpdateSubscriptionForm()
return render_template('billing/pricing.html', form=form,
plans=settings.STRIPE_PLANS)
@billing.route('/coupon_code', methods=['POST'])
@login_required
def coupon_code():
code = request.form.get('coupon_code')
if code is None:
return render_json(422,
{'error': 'Coupon code cannot be processed.'})
coupon = Coupon.find_by_code(code)
if coupon is None:
return render_json(404, {'error': 'Coupon code not found.'})
return render_json(200, {'data': coupon.to_json()})
@billing.route('/create', methods=['GET', 'POST'])
@handle_stripe_exceptions
@login_required
def create():
if current_user.subscription:
flash('You already have an active subscription.', 'info')
return redirect(url_for('user.settings'))
plan = request.args.get('plan')
subscription_plan = Subscription.get_plan_by_id(plan)
# Guard against an invalid or missing plan.
if subscription_plan is None and request.method == 'GET':
flash('Sorry, that plan did not exist.', 'error')
return redirect(url_for('billing.pricing'))
stripe_key = current_app.config.get('STRIPE_PUBLISHABLE_KEY')
form = CreditCardForm(stripe_key=stripe_key, plan=plan)
if form.validate_on_submit():
subscription = Subscription()
created = subscription.create(user=current_user,
name=request.form.get('name'),
plan=request.form.get('plan'),
coupon=request.form.get('coupon_code'),
token=request.form.get('stripe_token'))
if created:
flash('Awesome, thanks for subscribing!', 'success')
else:
flash('You must enable JavaScript for this request.', 'warning')
return redirect(url_for('user.settings'))
return render_template('billing/payment_method.html',
form=form, plan=subscription_plan)
@billing.route('/update', methods=['GET', 'POST'])
@handle_stripe_exceptions
@subscription_required
@login_required
def update():
current_plan = current_user.subscription.plan
active_plan = Subscription.get_plan_by_id(current_plan)
new_plan = Subscription.get_new_plan(request.form.keys())
plan = Subscription.get_plan_by_id(new_plan)
# Guard against an invalid, missing or identical plan.
is_same_plan = new_plan == active_plan['id']
if ((new_plan is not None and plan is None) or is_same_plan) and \
request.method == 'POST':
return redirect(url_for('billing.update'))
form = UpdateSubscriptionForm(coupon_code=current_user.subscription.coupon)
if form.validate_on_submit():
subscription = Subscription()
updated = subscription.update(user=current_user,
coupon=request.form.get('coupon_code'),
plan=plan.get('id'))
if updated:
flash('Your subscription has been updated.', 'success')
return redirect(url_for('user.settings'))
return render_template('billing/pricing.html',
form=form,
plans=settings.STRIPE_PLANS,
active_plan=active_plan)
@billing.route('/cancel', methods=['GET', 'POST'])
@handle_stripe_exceptions
@login_required
def cancel():
if not current_user.subscription:
flash('You do not have an active subscription.', 'error')
return redirect(url_for('user.settings'))
form = CancelSubscriptionForm()
if form.validate_on_submit():
subscription = Subscription()
cancelled = subscription.cancel(user=current_user)
if cancelled:
flash('Sorry to see you go, your subscription has been cancelled.',
'success')
return redirect(url_for('user.settings'))
return render_template('billing/cancel.html', form=form)
@billing.route('/update_payment_method', methods=['GET', 'POST'])
@handle_stripe_exceptions
@login_required
def update_payment_method():
if not current_user.credit_card:
flash('You do not have a payment method on file.', 'error')
return redirect(url_for('user.settings'))
active_plan = Subscription.get_plan_by_id(
current_user.subscription.plan)
card = current_user.credit_card
stripe_key = current_app.config.get('STRIPE_PUBLISHABLE_KEY')
form = CreditCardForm(stripe_key=stripe_key,
plan=active_plan,
name=current_user.name)
if form.validate_on_submit():
subscription = Subscription()
updated = subscription.update_payment_method(user=current_user,
credit_card=card,
name=request.form.get(
'name'),
token=request.form.get(
'stripe_token'))
if updated:
flash('Your payment method has been updated.', 'success')
else:
flash('You must enable JavaScript for this request.', 'warning')
return redirect(url_for('user.settings'))
return render_template('billing/payment_method.html', form=form,
plan=active_plan, card_last4=str(card.last4))
@billing.route('/billing_details')
@handle_stripe_exceptions
@login_required
def billing_details():
invoices = Invoice.billing_history(current_user)
if current_user.subscription:
upcoming = Invoice.upcoming(current_user.payment_id)
coupon = Coupon.query \
.filter(Coupon.code == current_user.subscription.coupon).first()
else:
upcoming = None
coupon = None
return render_template('billing/billing_details.html',
invoices=invoices, upcoming=upcoming, coupon=coupon)
| 37.221649
| 79
| 0.62429
|
4bcabca8ad0ec3768c301f665d2e898cc3432f07
| 393
|
py
|
Python
|
src/MpesaAPI/wsgi.py
|
EdwinMurimi/django-daraja
|
367ea6000c5097ec5941c28907300a8fecc505f8
|
[
"MIT"
] | null | null | null |
src/MpesaAPI/wsgi.py
|
EdwinMurimi/django-daraja
|
367ea6000c5097ec5941c28907300a8fecc505f8
|
[
"MIT"
] | null | null | null |
src/MpesaAPI/wsgi.py
|
EdwinMurimi/django-daraja
|
367ea6000c5097ec5941c28907300a8fecc505f8
|
[
"MIT"
] | null | null | null |
"""
WSGI config for MpesaAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MpesaAPI.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
48bca5ae9a59423fb329fce2c3a0ad15ddf8c427
| 3,953
|
py
|
Python
|
bigquery/tests/unit/test_dbapi__helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2021-01-04T11:40:17.000Z
|
2021-01-04T11:40:17.000Z
|
bigquery/tests/unit/test_dbapi__helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
bigquery/tests/unit/test_dbapi__helpers.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import unittest
import google.cloud._helpers
from google.cloud.bigquery.dbapi import _helpers
from google.cloud.bigquery.dbapi import exceptions
class TestQueryParameters(unittest.TestCase):
def test_scalar_to_query_parameter(self):
expected_types = [
(True, 'BOOL'),
(False, 'BOOL'),
(123, 'INT64'),
(-123456789, 'INT64'),
(1.25, 'FLOAT64'),
(b'I am some bytes', 'BYTES'),
(u'I am a string', 'STRING'),
(datetime.date(2017, 4, 1), 'DATE'),
(datetime.time(12, 34, 56), 'TIME'),
(datetime.datetime(2012, 3, 4, 5, 6, 7), 'DATETIME'),
(
datetime.datetime(
2012, 3, 4, 5, 6, 7, tzinfo=google.cloud._helpers.UTC),
'TIMESTAMP',
),
]
for value, expected_type in expected_types:
msg = 'value: {} expected_type: {}'.format(value, expected_type)
parameter = _helpers.scalar_to_query_parameter(value)
self.assertIsNone(parameter.name, msg=msg)
self.assertEqual(parameter.type_, expected_type, msg=msg)
self.assertEqual(parameter.value, value, msg=msg)
named_parameter = _helpers.scalar_to_query_parameter(
value, name='myvar')
self.assertEqual(named_parameter.name, 'myvar', msg=msg)
self.assertEqual(named_parameter.type_, expected_type, msg=msg)
self.assertEqual(named_parameter.value, value, msg=msg)
def test_scalar_to_query_parameter_w_unexpected_type(self):
with self.assertRaises(exceptions.ProgrammingError):
_helpers.scalar_to_query_parameter(value={'a': 'dictionary'})
def test_scalar_to_query_parameter_w_special_floats(self):
nan_parameter = _helpers.scalar_to_query_parameter(float('nan'))
self.assertTrue(math.isnan(nan_parameter.value))
self.assertEqual(nan_parameter.type_, 'FLOAT64')
inf_parameter = _helpers.scalar_to_query_parameter(float('inf'))
self.assertTrue(math.isinf(inf_parameter.value))
self.assertEqual(inf_parameter.type_, 'FLOAT64')
def test_to_query_parameters_w_dict(self):
parameters = {
'somebool': True,
'somestring': u'a-string-value',
}
query_parameters = _helpers.to_query_parameters(parameters)
query_parameter_tuples = []
for param in query_parameters:
query_parameter_tuples.append(
(param.name, param.type_, param.value))
self.assertSequenceEqual(
sorted(query_parameter_tuples),
sorted([
('somebool', 'BOOL', True),
('somestring', 'STRING', u'a-string-value'),
]))
def test_to_query_parameters_w_list(self):
parameters = [True, u'a-string-value']
query_parameters = _helpers.to_query_parameters(parameters)
query_parameter_tuples = []
for param in query_parameters:
query_parameter_tuples.append(
(param.name, param.type_, param.value))
self.assertSequenceEqual(
sorted(query_parameter_tuples),
sorted([
(None, 'BOOL', True),
(None, 'STRING', u'a-string-value'),
]))
| 40.336735
| 76
| 0.630407
|
ee4d6d8a0b7f26e246800ff0f570032d9a2752f7
| 25,399
|
py
|
Python
|
nltk/text.py
|
yigitsever/nltk
|
952a9636db4b1178dceb1789c98d56161deda3ab
|
[
"Apache-2.0"
] | 4
|
2020-02-05T11:26:47.000Z
|
2021-05-26T07:48:46.000Z
|
nltk/text.py
|
yigitsever/nltk
|
952a9636db4b1178dceb1789c98d56161deda3ab
|
[
"Apache-2.0"
] | 1
|
2021-04-30T17:27:07.000Z
|
2021-04-30T17:27:07.000Z
|
nltk/text.py
|
yigitsever/nltk
|
952a9636db4b1178dceb1789c98d56161deda3ab
|
[
"Apache-2.0"
] | 1
|
2021-05-12T07:12:02.000Z
|
2021-05-12T07:12:02.000Z
|
# Natural Language Toolkit: Texts
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
This module brings together a variety of NLTK functionality for
text analysis, and provides simple, interactive interfaces.
Functionality includes: concordancing, collocation discovery,
regular expression search over tokenized strings, and
distributional similarity.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from math import log
from collections import defaultdict, Counter, namedtuple
from functools import reduce
import re
from six import text_type
from nltk.probability import FreqDist
from nltk.probability import ConditionalFreqDist as CFD
from nltk.util import tokenwrap, LazyConcatenation
from nltk.metrics import f_measure, BigramAssocMeasures
from nltk.collocations import BigramCollocationFinder
from nltk.compat import python_2_unicode_compatible
ConcordanceLine = namedtuple(
'ConcordanceLine',
['left', 'query', 'right', 'offset', 'left_print', 'right_print', 'line'],
)
class ContextIndex(object):
"""
A bidirectional index between words and their 'contexts' in a text.
The context of a word is usually defined to be the words that occur
in a fixed window around the word; but other definitions may also
be used by providing a custom context function.
"""
@staticmethod
def _default_context(tokens, i):
"""One left token and one right token, normalized to lowercase"""
left = tokens[i - 1].lower() if i != 0 else '*START*'
right = tokens[i + 1].lower() if i != len(tokens) - 1 else '*END*'
return (left, right)
def __init__(self, tokens, context_func=None, filter=None, key=lambda x: x):
self._key = key
self._tokens = tokens
if context_func:
self._context_func = context_func
else:
self._context_func = self._default_context
if filter:
tokens = [t for t in tokens if filter(t)]
self._word_to_contexts = CFD(
(self._key(w), self._context_func(tokens, i)) for i, w in enumerate(tokens)
)
self._context_to_words = CFD(
(self._context_func(tokens, i), self._key(w)) for i, w in enumerate(tokens)
)
def tokens(self):
"""
:rtype: list(str)
:return: The document that this context index was
created from.
"""
return self._tokens
def word_similarity_dict(self, word):
"""
Return a dictionary mapping from words to 'similarity scores,'
indicating how often these two words occur in the same
context.
"""
word = self._key(word)
word_contexts = set(self._word_to_contexts[word])
scores = {}
for w, w_contexts in self._word_to_contexts.items():
scores[w] = f_measure(word_contexts, set(w_contexts))
return scores
def similar_words(self, word, n=20):
scores = defaultdict(int)
for c in self._word_to_contexts[self._key(word)]:
for w in self._context_to_words[c]:
if w != word:
scores[w] += (
self._context_to_words[c][word] * self._context_to_words[c][w]
)
return sorted(scores, key=scores.get, reverse=True)[:n]
def common_contexts(self, words, fail_on_unknown=False):
"""
Find contexts where the specified words can all appear; and
return a frequency distribution mapping each context to the
number of times that context was used.
:param words: The words used to seed the similarity search
:type words: str
:param fail_on_unknown: If true, then raise a value error if
any of the given words do not occur at all in the index.
"""
words = [self._key(w) for w in words]
contexts = [set(self._word_to_contexts[w]) for w in words]
empty = [words[i] for i in range(len(words)) if not contexts[i]]
common = reduce(set.intersection, contexts)
if empty and fail_on_unknown:
raise ValueError("The following word(s) were not found:", " ".join(words))
elif not common:
# nothing in common -- just return an empty freqdist.
return FreqDist()
else:
fd = FreqDist(
c for w in words for c in self._word_to_contexts[w] if c in common
)
return fd
@python_2_unicode_compatible
class ConcordanceIndex(object):
"""
An index that can be used to look up the offset locations at which
a given word occurs in a document.
"""
def __init__(self, tokens, key=lambda x: x):
"""
Construct a new concordance index.
:param tokens: The document (list of tokens) that this
concordance index was created from. This list can be used
to access the context of a given word occurrence.
:param key: A function that maps each token to a normalized
version that will be used as a key in the index. E.g., if
you use ``key=lambda s:s.lower()``, then the index will be
case-insensitive.
"""
self._tokens = tokens
"""The document (list of tokens) that this concordance index
was created from."""
self._key = key
"""Function mapping each token to an index key (or None)."""
self._offsets = defaultdict(list)
"""Dictionary mapping words (or keys) to lists of offset indices."""
# Initialize the index (self._offsets)
for index, word in enumerate(tokens):
word = self._key(word)
self._offsets[word].append(index)
def tokens(self):
"""
:rtype: list(str)
:return: The document that this concordance index was
created from.
"""
return self._tokens
def offsets(self, word):
"""
:rtype: list(int)
:return: A list of the offset positions at which the given
word occurs. If a key function was specified for the
index, then given word's key will be looked up.
"""
word = self._key(word)
return self._offsets[word]
def __repr__(self):
return '<ConcordanceIndex for %d tokens (%d types)>' % (
len(self._tokens),
len(self._offsets),
)
def find_concordance(self, word, width=80):
"""
Find all concordance lines given the query word.
"""
half_width = (width - len(word) - 2) // 2
context = width // 4 # approx number of words of context
# Find the instances of the word to create the ConcordanceLine
concordance_list = []
offsets = self.offsets(word)
if offsets:
for i in offsets:
query_word = self._tokens[i]
# Find the context of query word.
left_context = self._tokens[max(0, i - context) : i]
right_context = self._tokens[i + 1 : i + context]
# Create the pretty lines with the query_word in the middle.
left_print = ' '.join(left_context)[-half_width:]
right_print = ' '.join(right_context)[:half_width]
# The WYSIWYG line of the concordance.
line_print = ' '.join([left_print, query_word, right_print])
# Create the ConcordanceLine
concordance_line = ConcordanceLine(
left_context,
query_word,
right_context,
i,
left_print,
right_print,
line_print,
)
concordance_list.append(concordance_line)
return concordance_list
def print_concordance(self, word, width=80, lines=25):
"""
Print concordance lines given the query word.
:param word: The target word
:type word: str
:param lines: The number of lines to display (default=25)
:type lines: int
:param width: The width of each line, in characters (default=80)
:type width: int
:param save: The option to save the concordance.
:type save: bool
"""
concordance_list = self.find_concordance(word, width=width)
if not concordance_list:
print("no matches")
else:
lines = min(lines, len(concordance_list))
print("Displaying {} of {} matches:".format(lines, len(concordance_list)))
for i, concordance_line in enumerate(concordance_list[:lines]):
print(concordance_line.line)
class TokenSearcher(object):
"""
A class that makes it easier to use regular expressions to search
over tokenized strings. The tokenized string is converted to a
string where tokens are marked with angle brackets -- e.g.,
``'<the><window><is><still><open>'``. The regular expression
passed to the ``findall()`` method is modified to treat angle
brackets as non-capturing parentheses, in addition to matching the
token boundaries; and to have ``'.'`` not match the angle brackets.
"""
def __init__(self, tokens):
self._raw = ''.join('<' + w + '>' for w in tokens)
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> from nltk.text import TokenSearcher
>>> print('hack'); from nltk.book import text1, text5, text9
hack...
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
# preprocess the regular expression
regexp = re.sub(r'\s', '', regexp)
regexp = re.sub(r'<', '(?:<(?:', regexp)
regexp = re.sub(r'>', ')>)', regexp)
regexp = re.sub(r'(?<!\\)\.', '[^>]', regexp)
# perform the search
hits = re.findall(regexp, self._raw)
# Sanity check
for h in hits:
if not h.startswith('<') and h.endswith('>'):
raise ValueError('Bad regexp for TokenSearcher.findall')
# postprocess the output
hits = [h[1:-1].split('><') for h in hits]
return hits
@python_2_unicode_compatible
class Text(object):
"""
A wrapper around a sequence of simple (string) tokens, which is
intended to support initial exploration of texts (via the
interactive console). Its methods perform a variety of analyses
on the text's contexts (e.g., counting, concordancing, collocation
discovery), and display the results. If you wish to write a
program which makes use of these analyses, then you should bypass
the ``Text`` class, and use the appropriate analysis function or
class directly instead.
A ``Text`` is typically initialized from a given document or
corpus. E.g.:
>>> import nltk.corpus
>>> from nltk.text import Text
>>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
"""
# This defeats lazy loading, but makes things faster. This
# *shouldn't* be necessary because the corpus view *should* be
# doing intelligent caching, but without this it's running slow.
# Look into whether the caching is working correctly.
_COPY_TOKENS = True
def __init__(self, tokens, name=None):
"""
Create a Text object.
:param tokens: The source text.
:type tokens: sequence of str
"""
if self._COPY_TOKENS:
tokens = list(tokens)
self.tokens = tokens
if name:
self.name = name
elif ']' in tokens[:20]:
end = tokens[:20].index(']')
self.name = " ".join(text_type(tok) for tok in tokens[1:end])
else:
self.name = " ".join(text_type(tok) for tok in tokens[:8]) + "..."
# ////////////////////////////////////////////////////////////
# Support item & slice access
# ////////////////////////////////////////////////////////////
def __getitem__(self, i):
return self.tokens[i]
def __len__(self):
return len(self.tokens)
# ////////////////////////////////////////////////////////////
# Interactive console methods
# ////////////////////////////////////////////////////////////
def concordance(self, word, width=79, lines=25):
"""
Prints a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:param word: The target word
:type word: str
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
:seealso: ``ConcordanceIndex``
"""
if '_concordance_index' not in self.__dict__:
self._concordance_index = ConcordanceIndex(
self.tokens, key=lambda s: s.lower()
)
return self._concordance_index.print_concordance(word, width, lines)
def concordance_list(self, word, width=79, lines=25):
"""
Generate a concordance for ``word`` with the specified context window.
Word matching is not case-sensitive.
:param word: The target word
:type word: str
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
:seealso: ``ConcordanceIndex``
"""
if '_concordance_index' not in self.__dict__:
self._concordance_index = ConcordanceIndex(
self.tokens, key=lambda s: s.lower()
)
return self._concordance_index.find_concordance(word, width)[:lines]
def collocation_list(self, num=20, window_size=2):
"""
Return collocations derived from the text, ignoring stopwords.
:param num: The maximum number of collocations to return.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
"""
if not (
'_collocations' in self.__dict__
and self._num == num
and self._window_size == window_size
):
self._num = num
self._window_size = window_size
# print("Building collocations list")
from nltk.corpus import stopwords
ignored_words = stopwords.words('english')
finder = BigramCollocationFinder.from_words(self.tokens, window_size)
finder.apply_freq_filter(2)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
bigram_measures = BigramAssocMeasures()
self._collocations = finder.nbest(bigram_measures.likelihood_ratio, num)
return [w1 + ' ' + w2 for w1, w2 in self._collocations]
def collocations(self, num=20, window_size=2):
"""
Print collocations derived from the text, ignoring stopwords.
:param num: The maximum number of collocations to print.
:type num: int
:param window_size: The number of tokens spanned by a collocation (default=2)
:type window_size: int
"""
collocation_strings = [w1 + ' ' + w2 for w1, w2 in self.collocation_list(num, window_size)]
print(tokenwrap(collocation_strings, separator="; "))
def count(self, word):
"""
Count the number of times this word appears in the text.
"""
return self.tokens.count(word)
def index(self, word):
"""
Find the index of the first occurrence of the word in the text.
"""
return self.tokens.index(word)
def readability(self, method):
# code from nltk_contrib.readability
raise NotImplementedError
def similar(self, word, num=20):
"""
Distributional similarity: find other words which appear in the
same contexts as the specified word; list most similar words first.
:param word: The word used to seed the similarity search
:type word: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.similar_words()
"""
if '_word_context_index' not in self.__dict__:
# print('Building word-context index...')
self._word_context_index = ContextIndex(
self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower()
)
# words = self._word_context_index.similar_words(word, num)
word = word.lower()
wci = self._word_context_index._word_to_contexts
if word in wci.conditions():
contexts = set(wci[word])
fd = Counter(
w
for w in wci.conditions()
for c in wci[w]
if c in contexts and not w == word
)
words = [w for w, _ in fd.most_common(num)]
print(tokenwrap(words))
else:
print("No matches")
def common_contexts(self, words, num=20):
"""
Find contexts where the specified words appear; list
most frequent common contexts first.
:param words: The words used to seed the similarity search
:type words: str
:param num: The number of words to generate (default=20)
:type num: int
:seealso: ContextIndex.common_contexts()
"""
if '_word_context_index' not in self.__dict__:
# print('Building word-context index...')
self._word_context_index = ContextIndex(
self.tokens, key=lambda s: s.lower()
)
try:
fd = self._word_context_index.common_contexts(words, True)
if not fd:
print("No common contexts were found")
else:
ranked_contexts = [w for w, _ in fd.most_common(num)]
print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts))
except ValueError as e:
print(e)
def dispersion_plot(self, words):
"""
Produce a plot showing the distribution of the words through the text.
Requires pylab to be installed.
:param words: The words to be plotted
:type words: list(str)
:seealso: nltk.draw.dispersion_plot()
"""
from nltk.draw import dispersion_plot
dispersion_plot(self, words)
def generate(self, words):
"""
Issues a reminder to users following the book online
"""
import warnings
warnings.warn(
'The generate() method is no longer available.', DeprecationWarning
)
def plot(self, *args):
"""
See documentation for FreqDist.plot()
:seealso: nltk.prob.FreqDist.plot()
"""
self.vocab().plot(*args)
def vocab(self):
"""
:seealso: nltk.prob.FreqDist
"""
if "_vocab" not in self.__dict__:
# print("Building vocabulary index...")
self._vocab = FreqDist(self)
return self._vocab
def findall(self, regexp):
"""
Find instances of the regular expression in the text.
The text is a list of tokens, and a regexp pattern to match
a single token must be surrounded by angle brackets. E.g.
>>> print('hack'); from nltk.book import text1, text5, text9
hack...
>>> text5.findall("<.*><.*><bro>")
you rule bro; telling you bro; u twizted bro
>>> text1.findall("<a>(<.*>)<man>")
monied; nervous; dangerous; white; white; white; pious; queer; good;
mature; white; Cape; great; wise; wise; butterless; white; fiendish;
pale; furious; better; certain; complete; dismasted; younger; brave;
brave; brave; brave
>>> text9.findall("<th.*>{3,}")
thread through those; the thought that; that the thing; the thing
that; that that thing; through these than through; them that the;
through the thick; them that they; thought that the
:param regexp: A regular expression
:type regexp: str
"""
if "_token_searcher" not in self.__dict__:
self._token_searcher = TokenSearcher(self)
hits = self._token_searcher.findall(regexp)
hits = [' '.join(h) for h in hits]
print(tokenwrap(hits, "; "))
# ////////////////////////////////////////////////////////////
# Helper Methods
# ////////////////////////////////////////////////////////////
_CONTEXT_RE = re.compile('\w+|[\.\!\?]')
def _context(self, tokens, i):
"""
One left & one right token, both case-normalized. Skip over
non-sentence-final punctuation. Used by the ``ContextIndex``
that is created for ``similar()`` and ``common_contexts()``.
"""
# Left context
j = i - 1
while j >= 0 and not self._CONTEXT_RE.match(tokens[j]):
j -= 1
left = tokens[j] if j != 0 else '*START*'
# Right context
j = i + 1
while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]):
j += 1
right = tokens[j] if j != len(tokens) else '*END*'
return (left, right)
# ////////////////////////////////////////////////////////////
# String Display
# ////////////////////////////////////////////////////////////
def __str__(self):
return '<Text: %s>' % self.name
def __repr__(self):
return '<Text: %s>' % self.name
# Prototype only; this approach will be slow to load
class TextCollection(Text):
"""A collection of texts, which can be loaded with list of texts, or
with a corpus consisting of one or more texts, and which supports
counting, concordancing, collocation discovery, etc. Initialize a
TextCollection as follows:
>>> import nltk.corpus
>>> from nltk.text import TextCollection
>>> print('hack'); from nltk.book import text1, text2, text3
hack...
>>> gutenberg = TextCollection(nltk.corpus.gutenberg)
>>> mytexts = TextCollection([text1, text2, text3])
Iterating over a TextCollection produces all the tokens of all the
texts in order.
"""
def __init__(self, source):
if hasattr(source, 'words'): # bridge to the text corpus reader
source = [source.words(f) for f in source.fileids()]
self._texts = source
Text.__init__(self, LazyConcatenation(source))
self._idf_cache = {}
def tf(self, term, text):
""" The frequency of the term in text. """
return text.count(term) / len(text)
def idf(self, term):
""" The number of texts in the corpus divided by the
number of texts that the term appears in.
If a term does not appear in the corpus, 0.0 is returned. """
# idf values are cached for performance.
idf = self._idf_cache.get(term)
if idf is None:
matches = len([True for text in self._texts if term in text])
if len(self._texts) == 0:
raise ValueError('IDF undefined for empty document collection')
idf = log(len(self._texts) / matches) if matches else 0.0
self._idf_cache[term] = idf
return idf
def tf_idf(self, term, text):
return self.tf(term, text) * self.idf(term)
def demo():
from nltk.corpus import brown
text = Text(brown.words(categories='news'))
print(text)
print()
print("Concordance:")
text.concordance('news')
print()
print("Distributionally similar words:")
text.similar('news')
print()
print("Collocations:")
text.collocations()
print()
# print("Automatically generated text:")
# text.generate()
# print()
print("Dispersion plot:")
text.dispersion_plot(['news', 'report', 'said', 'announced'])
print()
print("Vocabulary plot:")
text.plot(50)
print()
print("Indexing:")
print("text[3]:", text[3])
print("text[3:5]:", text[3:5])
print("text.vocab()['news']:", text.vocab()['news'])
if __name__ == '__main__':
demo()
__all__ = [
"ContextIndex",
"ConcordanceIndex",
"TokenSearcher",
"Text",
"TextCollection",
]
| 35.473464
| 99
| 0.589196
|
3055847e43da805015f37bcfb04096c0ff642d02
| 2,173
|
py
|
Python
|
test/pybind_test/multi_node_dcn_4node_2gpu.py
|
quinnrong94/HugeCTR
|
1068dc48b05a1219b393144dd3b61a1749f232df
|
[
"Apache-2.0"
] | 1
|
2021-06-04T04:03:54.000Z
|
2021-06-04T04:03:54.000Z
|
test/pybind_test/multi_node_dcn_4node_2gpu.py
|
quinnrong94/HugeCTR
|
1068dc48b05a1219b393144dd3b61a1749f232df
|
[
"Apache-2.0"
] | null | null | null |
test/pybind_test/multi_node_dcn_4node_2gpu.py
|
quinnrong94/HugeCTR
|
1068dc48b05a1219b393144dd3b61a1749f232df
|
[
"Apache-2.0"
] | null | null | null |
from hugectr import Session, solver_parser_helper, get_learning_rate_scheduler
from mpi4py import MPI
import threading
import sys
def session_impl_test(json_file):
solver_config = solver_parser_helper(seed = 0,
batchsize = 16384,
batchsize_eval = 16384,
model_file = "",
embedding_files = [],
vvgpu = [[0,1],[2,3],[4,5],[6,7]],
use_mixed_precision = False,
scaler = 1.0,
i64_input_key = False,
use_algorithm_search = True,
use_cuda_graph = True,
repeat_dataset = True
)
sess = Session(solver_config, json_file)
sess.start_data_reading()
lr_sch = get_learning_rate_scheduler(json_file)
for i in range(10000):
lr = lr_sch.get_next()
sess.set_learning_rate(lr)
sess.train()
if (i%100 == 0):
loss = sess.get_current_loss()
if (rank == 0):
print("[HUGECTR][INFO] iter: {}; loss: {}".format(i, loss))
if (i%1000 == 0 and i != 0):
sess.check_overflow()
sess.copy_weights_for_evaluation()
data_reader_eval = sess.get_data_reader_eval()
for _ in range(solver_config.max_eval_batches):
sess.eval()
metrics = sess.get_eval_metrics()
print("[HUGECTR][INFO] rank: {}, iter: {}, {}".format(rank, i, metrics))
return
if __name__ == "__main__":
json_file = sys.argv[1]
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
thread = threading.Thread(target=session_impl_test, args = (json_file,), name='[rank-%d train]' % rank)
current_thread = threading.currentThread()
print('[HUGECTR][INFO] %s is main thread: %s' % (current_thread.name, MPI.Is_thread_main()))
print('[HUGECTR][INFO] before: rank %d '% (rank))
# start the thread
thread.start()
# wait for terminate
thread.join()
print('[HUGECTR][INFO] after: rank %d ' % (rank))
| 39.509091
| 105
| 0.541647
|
bc252ddc9c2a12d3d363641f212d576994ac1ad3
| 1,653
|
py
|
Python
|
h2o-py/tests/testdir_algos/glm/pyunit_solvers_glm.py
|
ghk829/h2o-study
|
bb2c2ff6bf3ca9430b1f0b428b007aedfd56264d
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/glm/pyunit_solvers_glm.py
|
ghk829/h2o-study
|
bb2c2ff6bf3ca9430b1f0b428b007aedfd56264d
|
[
"Apache-2.0"
] | 1
|
2021-11-15T17:47:41.000Z
|
2021-11-15T17:47:41.000Z
|
h2o-py/tests/testdir_algos/glm/pyunit_solvers_glm.py
|
purushothamgowthu/h2oai-h2o-3
|
af79095b600684978a9c397a086f1dedb9540760
|
[
"Apache-2.0"
] | 1
|
2019-12-09T03:07:04.000Z
|
2019-12-09T03:07:04.000Z
|
from __future__ import print_function
import sys
from h2o.exceptions import H2OResponseError
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def glm_solvers():
predictors = ["displacement","power","weight","acceleration","year"]
for solver in ["AUTO", "IRLSM", "L_BFGS", "COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT"]:
print("Solver = {0}".format(solver))
for family in ["binomial", "gaussian", "poisson", "tweedie", "gamma"]:
if family == 'binomial': response_col = "economy_20mpg"
elif family == 'gaussian': response_col = "economy"
else: response_col = "cylinders"
print("Family = {0}".format(family))
training_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
if family == 'binomial': training_data[response_col] = training_data[response_col].asfactor()
else: training_data[response_col] = training_data[response_col].asnumeric()
model = H2OGeneralizedLinearEstimator(family=family, alpha=0, Lambda=1e-5, solver=solver)
try:
model.train(x=predictors, y=response_col, training_frame=training_data)
except H2OResponseError as e:
#Coordinate descent (naive version) is not yet fully implemented.
# An exception is expected.
assert solver == "COORDINATE_DESCENT_NAIVE"
assert "unimplemented: Naive coordinate descent is not supported." in str(e)
h2o.remove(training_data)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_solvers)
else:
glm_solvers()
| 41.325
| 102
| 0.695705
|
8d6848194d9e725436f9026b67c0ae41d2a1227c
| 518
|
py
|
Python
|
tests/test_micro.py
|
basic-ph/feat
|
0660a34e5eeeab920d1ce8e139ab486e63bd419b
|
[
"MIT"
] | 2
|
2020-07-13T11:59:19.000Z
|
2020-07-13T12:02:05.000Z
|
tests/test_micro.py
|
basic-ph/feat
|
0660a34e5eeeab920d1ce8e139ab486e63bd419b
|
[
"MIT"
] | null | null | null |
tests/test_micro.py
|
basic-ph/feat
|
0660a34e5eeeab920d1ce8e139ab486e63bd419b
|
[
"MIT"
] | null | null | null |
from feat import micro
def test_reuss_model():
Ef = 224.4
Em = 3.1
Vf = 0.66
output = micro.reuss_model(Ef, Em, Vf)
assert round(output,2) == 8.88
def test_halpin_tsai_model():
# DOI: 10.1080/15376494.2014.938792 (page 11)
Ef = 74
Em = 3.35
Vf = 0.3
output_1 = micro.halpin_tsai_model(Ef, Em, Vf)
print(output_1)
output_2 = micro.halpin_tsai_model(Ef, Em, Vf, xi=1)
print(output_2)
assert round(output_1, 3) == 6.930
assert round(output_2, 3) == 5.879
| 20.72
| 56
| 0.621622
|
5996e4378f366978af04818cbda24d9cc19ced74
| 721
|
py
|
Python
|
Python Script Sample/osinfo.py
|
areriff/pythonlearncanvas
|
b797887311cafe38030e82cdac2068618ce4d33a
|
[
"MIT"
] | null | null | null |
Python Script Sample/osinfo.py
|
areriff/pythonlearncanvas
|
b797887311cafe38030e82cdac2068618ce4d33a
|
[
"MIT"
] | null | null | null |
Python Script Sample/osinfo.py
|
areriff/pythonlearncanvas
|
b797887311cafe38030e82cdac2068618ce4d33a
|
[
"MIT"
] | null | null | null |
# Script Name : osinfo.py
# Author : Craig Richards
# Created : 5th April 2012
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Displays some information about the OS you are running this script on
import platform
profile = [
platform.architecture(),
platform.dist(),
platform.libc_ver(),
platform.mac_ver(),
platform.machine(),
platform.node(),
platform.platform(),
platform.processor(),
platform.python_build(),
platform.python_compiler(),
platform.python_version(),
platform.release(),
platform.system(),
platform.uname(),
platform.version(),
]
i = 1
for item in profile:
print
'#', i, ' ', item
i = i + 1;
| 20.6
| 87
| 0.632455
|
61c1a7ed7c55c3b8d9aae422e2ee9735786d7c9f
| 639
|
py
|
Python
|
magnum_ui/api/rest/__init__.py
|
NeCTAR-RC/magnum-ui
|
f2f3e9ec71fecd60f166faf5bd228c0f26be80d0
|
[
"Apache-2.0"
] | 29
|
2015-06-23T08:06:10.000Z
|
2021-08-09T18:20:54.000Z
|
magnum_ui/api/rest/__init__.py
|
NeCTAR-RC/magnum-ui
|
f2f3e9ec71fecd60f166faf5bd228c0f26be80d0
|
[
"Apache-2.0"
] | null | null | null |
magnum_ui/api/rest/__init__.py
|
NeCTAR-RC/magnum-ui
|
f2f3e9ec71fecd60f166faf5bd228c0f26be80d0
|
[
"Apache-2.0"
] | 14
|
2016-01-08T06:30:41.000Z
|
2022-01-03T19:48:01.000Z
|
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import REST modules here
from . import magnum # noqa: F401
| 37.588235
| 74
| 0.760563
|
3215dd2819e32d08cc6e02171cdc89f330cdda8f
| 638
|
py
|
Python
|
tests/test_946.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_946.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_946.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 946. Validate Stack Sequences
"""
@pytest.fixture(scope="session")
def init_variables_946():
from src.leetcode_946_validate_stack_sequences import Solution
solution = Solution()
def _init_variables_946():
return solution
yield _init_variables_946
class TestClass946:
def test_solution_0(self, init_variables_946):
assert init_variables_946().validateStackSequences([1, 2, 3, 4, 5], [4, 5, 3, 2, 1])
def test_solution_1(self, init_variables_946):
assert not init_variables_946().validateStackSequences([1, 2, 3, 4, 5], [4, 3, 5, 1, 2])
| 22.785714
| 96
| 0.702194
|
72e38e57cf0afc8da446f2c210f3ac456261a5e1
| 7,000
|
py
|
Python
|
extra_apps/social_core/backends/odnoklassniki.py
|
kaocher82/Vue-Django-Shop-Website
|
6273990a5510b72c3a3115d73e149d242049b5bc
|
[
"MIT"
] | 84
|
2019-02-22T08:19:52.000Z
|
2022-02-08T03:36:32.000Z
|
Backend/extra_apps/social_core/backends/odnoklassniki.py
|
GinCho-Max/Dailyfresh-B2C
|
7c94e9a4428e5116c91bf27cf696e6eee430748a
|
[
"Apache-2.0"
] | 16
|
2019-09-06T10:25:40.000Z
|
2022-02-12T06:37:41.000Z
|
Backend/extra_apps/social_core/backends/odnoklassniki.py
|
GinCho-Max/Dailyfresh-B2C
|
7c94e9a4428e5116c91bf27cf696e6eee430748a
|
[
"Apache-2.0"
] | 61
|
2019-03-20T02:29:23.000Z
|
2021-07-09T08:14:25.000Z
|
"""
Odnoklassniki OAuth2 and Iframe Application backends, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/odnoklassnikiru.html
"""
from hashlib import md5
from six.moves.urllib_parse import unquote
from .base import BaseAuth
from .oauth import BaseOAuth2
from ..exceptions import AuthFailed
class OdnoklassnikiOAuth2(BaseOAuth2):
"""Odnoklassniki authentication backend"""
name = 'odnoklassniki-oauth2'
ID_KEY = 'uid'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ';'
AUTHORIZATION_URL = 'https://connect.ok.ru/oauth/authorize'
ACCESS_TOKEN_URL = 'https://api.ok.ru/oauth/token.do'
EXTRA_DATA = [('refresh_token', 'refresh_token'),
('expires_in', 'expires')]
def get_user_details(self, response):
"""Return user details from Odnoklassniki request"""
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': response.get('email', ''),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Odnoklassniki REST API"""
data = {'access_token': access_token, 'method': 'users.getCurrentUser'}
key, secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
return odnoklassniki_api(self, data, 'https://api.ok.ru/',
public_key, secret, 'oauth')
class OdnoklassnikiApp(BaseAuth):
"""Odnoklassniki iframe app authentication backend"""
name = 'odnoklassniki-app'
ID_KEY = 'uid'
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
return dict([(key, value) for key, value in response.items()
if key in response['extra_data_list']])
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
fullname=unquote(response['name']),
first_name=unquote(response['first_name']),
last_name=unquote(response['last_name'])
)
return {
'username': response['uid'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
def auth_complete(self, *args, **kwargs):
self.verify_auth_sig()
response = self.get_response()
fields = ('uid', 'first_name', 'last_name', 'name') + \
self.setting('EXTRA_USER_DATA_LIST', ())
data = {
'method': 'users.getInfo',
'uids': '{0}'.format(response['logged_user_id']),
'fields': ','.join(fields),
}
client_key, client_secret = self.get_key_and_secret()
public_key = self.setting('PUBLIC_NAME')
details = odnoklassniki_api(self, data, response['api_server'],
public_key, client_secret,
'iframe_nosession')
if len(details) == 1 and 'uid' in details[0]:
details = details[0]
auth_data_fields = self.setting('EXTRA_AUTH_DATA_LIST',
('api_server', 'apiconnection',
'session_key', 'authorized',
'session_secret_key'))
for field in auth_data_fields:
details[field] = response[field]
details['extra_data_list'] = fields + auth_data_fields
kwargs.update({'backend': self, 'response': details})
else:
raise AuthFailed(self, 'Cannot get user details: API error')
return self.strategy.authenticate(*args, **kwargs)
def get_auth_sig(self):
secret_key = self.setting('SECRET')
hash_source = '{0:s}{1:s}{2:s}'.format(self.data['logged_user_id'],
self.data['session_key'],
secret_key)
return md5(hash_source.encode('utf-8')).hexdigest()
def get_response(self):
fields = ('logged_user_id', 'api_server', 'application_key',
'session_key', 'session_secret_key', 'authorized',
'apiconnection')
return dict((name, self.data[name]) for name in fields
if name in self.data)
def verify_auth_sig(self):
correct_key = self.get_auth_sig()
key = self.data['auth_sig'].lower()
if correct_key != key:
raise AuthFailed(self, 'Wrong authorization key')
def odnoklassniki_oauth_sig(data, client_secret):
"""
Calculates signature of request data access_token value must be included
Algorithm is described at
https://apiok.ru/wiki/pages/viewpage.action?pageId=12878032,
search for "little bit different way"
"""
suffix = md5(
'{0:s}{1:s}'.format(data['access_token'],
client_secret).encode('utf-8')
).hexdigest()
check_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()
if key != 'access_token'])
return md5((''.join(check_list) + suffix).encode('utf-8')).hexdigest()
def odnoklassniki_iframe_sig(data, client_secret_or_session_secret):
"""
Calculates signature as described at:
https://apiok.ru/wiki/display/ok/Authentication+and+Authorization
If API method requires session context, request is signed with session
secret key. Otherwise it is signed with application secret key
"""
param_list = sorted(['{0:s}={1:s}'.format(key, value)
for key, value in data.items()])
return md5(
(''.join(param_list) + client_secret_or_session_secret).encode('utf-8')
).hexdigest()
def odnoklassniki_api(backend, data, api_url, public_key, client_secret,
request_type='oauth'):
"""Calls Odnoklassniki REST API method
https://apiok.ru/wiki/display/ok/Odnoklassniki+Rest+API"""
data.update({
'application_key': public_key,
'format': 'JSON'
})
if request_type == 'oauth':
data['sig'] = odnoklassniki_oauth_sig(data, client_secret)
elif request_type == 'iframe_session':
data['sig'] = odnoklassniki_iframe_sig(data,
data['session_secret_key'])
elif request_type == 'iframe_nosession':
data['sig'] = odnoklassniki_iframe_sig(data, client_secret)
else:
msg = 'Unknown request type {0}. How should it be signed?'
raise AuthFailed(backend, msg.format(request_type))
return backend.get_json(api_url + 'fb.do', params=data)
| 40.462428
| 85
| 0.591714
|
6ba4ec129b2bd7da6db22e968694016a8d368f9c
| 10,912
|
py
|
Python
|
django/db/migrations/graph.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/graph.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/graph.py
|
indevgr/django
|
0247c9b08f8da4a2d93b9cede6c615011552b55a
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2016-04-21T11:47:46.000Z
|
2016-04-21T11:47:46.000Z
|
from __future__ import unicode_literals
import warnings
from collections import deque
from functools import total_ordering
from django.db.migrations.state import ProjectState
from django.utils.datastructures import OrderedSet
from django.utils.encoding import python_2_unicode_compatible
from .exceptions import CircularDependencyError, NodeNotFoundError
RECURSION_DEPTH_WARNING = (
"Maximum recursion depth exceeded while generating migration graph, "
"falling back to iterative approach. If you're experiencing performance issues, "
"consider squashing migrations as described at "
"https://docs.djangoproject.com/en/dev/topics/migrations/#squashing-migrations."
)
@python_2_unicode_compatible
@total_ordering
class Node(object):
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<Node: (%r, %r)>' % self.key
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def ancestors(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_ancestors' not in self.__dict__:
ancestors = deque([self.key])
for parent in sorted(self.parents):
ancestors.extendleft(reversed(parent.ancestors()))
self.__dict__['_ancestors'] = list(OrderedSet(ancestors))
return self.__dict__['_ancestors']
# Use manual caching, @cached_property effectively doubles the
# recursion depth for each recursion.
def descendants(self):
# Use self.key instead of self to speed up the frequent hashing
# when constructing an OrderedSet.
if '_descendants' not in self.__dict__:
descendants = deque([self.key])
for child in sorted(self.children):
descendants.extendleft(reversed(child.descendants()))
self.__dict__['_descendants'] = list(OrderedSet(descendants))
return self.__dict__['_descendants']
@python_2_unicode_compatible
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
self.cached = False
def add_node(self, key, implementation):
node = Node(key)
self.node_map[key] = node
self.nodes[key] = implementation
self.clear_cache()
def add_dependency(self, migration, child, parent):
if child not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent child node %r" % (migration, child),
child
)
if parent not in self.nodes:
raise NodeNotFoundError(
"Migration %s dependencies reference nonexistent parent node %r" % (migration, parent),
parent
)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
self.clear_cache()
def clear_cache(self):
if self.cached:
for node in self.nodes:
self.node_map[node].__dict__.pop('_ancestors', None)
self.node_map[node].__dict__.pop('_descendants', None)
self.cached = False
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use parent.key instead of parent to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (parent.key for parent in self.node_map[x].parents))
self.cached = True
node = self.node_map[target]
try:
return node.ancestors()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node)
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
# Use child.key instead of child to speed up the frequent hashing in ensure_not_cyclic
self.ensure_not_cyclic(target, lambda x: (child.key for child in self.node_map[x].children))
self.cached = True
node = self.node_map[target]
try:
return node.descendants()
except RuntimeError:
# fallback to iterative dfs
warnings.warn(RECURSION_DEPTH_WARNING, RuntimeWarning)
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
visited = deque()
visited.append(start)
if forwards:
stack = deque(sorted(start.parents))
else:
stack = deque(sorted(start.children))
while stack:
node = stack.popleft()
visited.appendleft(node)
if forwards:
children = sorted(node.parents, reverse=True)
else:
children = sorted(node.children, reverse=True)
# reverse sorting is needed because prepending using deque.extendleft
# also effectively reverses values
stack.extendleft(children)
return list(OrderedSet(visited))
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self, start, get_children):
# Algo from GvR:
# http://neopythonic.blogspot.co.uk/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in get_children(top):
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
| 38.558304
| 114
| 0.627383
|
3b53171a053e7c3ca15c8e46bff4ccefb3d43cdc
| 3,270
|
py
|
Python
|
src/deepfield_jobs/deepfield_jobs/metadata.py
|
flixr/bagbunker
|
661814a2a481faf8363c63489cf19ad3b6af9fc9
|
[
"MIT"
] | null | null | null |
src/deepfield_jobs/deepfield_jobs/metadata.py
|
flixr/bagbunker
|
661814a2a481faf8363c63489cf19ad3b6af9fc9
|
[
"MIT"
] | null | null | null |
src/deepfield_jobs/deepfield_jobs/metadata.py
|
flixr/bagbunker
|
661814a2a481faf8363c63489cf19ad3b6af9fc9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2015 Ternaris, Munich, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division
import os
from marv import bb, db
from marv.bb import job_logger as logger
from bagbunker import bb_bag
__version__ = '0.0.3'
@bb.job_model()
class Metadata(object):
robot_name = db.Column(db.String(42), nullable=False)
use_case = db.Column(db.String(126), nullable=False)
# XXX: This will receive all messages. What we really want is to
# receive only /robot_name/name messages, but be called also if there
# are no messages.
@bb.job()
@bb_bag.messages(topics='*')
def job(fileset, messages):
if not fileset.bag:
return
name_topic = '/robot_name/name'
messages = messages \
if any(True for x in fileset.bag.topics if x.topic.name == name_topic) \
else ()
for topic, msg, timestamp in messages:
if topic == name_topic:
try:
robot_name = msg.data
except AttributeError:
robot_name = msg.robot_name
logger.debug('found robot_name via topic: %s' % msg)
use_case = ''
break
else:
path = fileset.dirpath.split(os.sep)
robot_name = path[3] if len(path) > 3 else 'unknown'
use_case = path[6] if len(path) > 6 else 'unknown'
logger.info('robot_name=%s, use_case=%s', robot_name, use_case)
yield Metadata(robot_name=robot_name, use_case=use_case)
@bb.filter()
@bb.filter_input('robot', operators=['substring'])
def filter_robot(query, ListingEntry, robot):
return query.filter(ListingEntry.robot.contains(robot.val))
@bb.filter()
@bb.filter_input('use_case', operators=['substring'])
def filter_use_case(query, ListingEntry, use_case):
return query.filter(ListingEntry.use_case.contains(use_case.val))
@bb.listing()
@bb.listing_column('robot')
@bb.listing_column('use_case')
def listing(fileset):
jobrun = fileset.get_latest_jobrun('deepfield::metadata')
if jobrun is None:
return {}
meta = Metadata.query.filter(Metadata.jobrun == jobrun).first()
if meta is None:
return {}
return {
'robot': meta.robot_name,
'use_case': meta.use_case,
}
| 33.367347
| 80
| 0.700917
|
e4ced51a1b52f9b63f923e4135a7e8d06d25d2e0
| 8,627
|
py
|
Python
|
willie/modules/clock.py
|
RuneMagic/HelpingHand
|
c3b53518758cd7c8461da365aebbc3ecc0c8a23f
|
[
"EFL-2.0"
] | null | null | null |
willie/modules/clock.py
|
RuneMagic/HelpingHand
|
c3b53518758cd7c8461da365aebbc3ecc0c8a23f
|
[
"EFL-2.0"
] | null | null | null |
willie/modules/clock.py
|
RuneMagic/HelpingHand
|
c3b53518758cd7c8461da365aebbc3ecc0c8a23f
|
[
"EFL-2.0"
] | null | null | null |
# coding=utf8
"""
clock.py - Willie Clock Module
Copyright 2008-9, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, embolalia.net
Licensed under the Eiffel Forum License 2.
http://willie.dfbta.net
"""
from __future__ import unicode_literals
try:
import pytz
except ImportError:
pytz = None
from willie.module import commands, example, OP
from willie.tools.time import (
get_timezone, format_time, validate_format, validate_timezone
)
from willie.config.types import StaticSection, ValidatedAttribute
class TimeSection(StaticSection):
tz = ValidatedAttribute(
'tz',
parse=validate_timezone,
serialize=validate_timezone,
default='UTC'
)
"""Default time zone (see http://dft.ba/-tz)"""
time_format = ValidatedAttribute(
'time_format',
parse=validate_format,
default='%Y-%m-%d - %T%Z'
)
"""Default time format (see http://strftime.net)"""
def configure(config):
config.define_section('clock', TimeSection)
config.clock.configure_setting(
'tz', 'Preferred time zone (http://dft.ba/-tz)')
config.clock.configure_setting(
'time_format', 'Preferred time format (http://strftime.net)')
def setup(bot):
bot.config.define_section('clock', TimeSection)
@commands('t', 'time')
@example('.t America/New_York')
def f_time(bot, trigger):
"""Returns the current time."""
if trigger.group(2):
zone = get_timezone(bot.db, bot.config, trigger.group(2).strip(), None, None)
if not zone:
bot.say('Could not find timezone %s.' % trigger.group(2).strip())
return
else:
zone = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
time = format_time(bot.db, bot.config, zone, trigger.nick, trigger.sender)
bot.say(time)
@commands('settz', 'settimezone')
@example('.settz America/New_York')
def update_user(bot, trigger):
"""
Set your preferred time zone. Most timezones will work, but it's best to
use one from http://dft.ba/-tz
"""
if not pytz:
bot.reply("Sorry, I don't have timezone support installed.")
else:
tz = trigger.group(2)
if not tz:
bot.reply("What timezone do you want to set? Try one from "
"http://dft.ba/-tz")
return
if tz not in pytz.all_timezones:
bot.reply("I don't know that time zone. Try one from "
"http://dft.ba/-tz")
return
bot.db.set_nick_value(trigger.nick, 'timezone', tz)
if len(tz) < 7:
bot.say("Okay, {}, but you should use one from http://dft.ba/-tz "
"if you use DST.".format(trigger.nick))
else:
bot.reply('I now have you in the %s time zone.' % tz)
@commands('gettz', 'gettimezone')
@example('.gettz [nick]')
def get_user_tz(bot, trigger):
"""
Gets a user's preferred time zone, will show yours if no user specified
"""
if not pytz:
bot.reply("Sorry, I don't have timezone support installed.")
else:
nick = trigger.group(2)
if not nick:
nick = trigger.nick
nick = nick.strip()
tz = bot.db.get_nick_value(nick, 'timezone')
if tz:
bot.say('%s\'s time zone is %s.' % (nick, tz))
else:
bot.say('%s has not set their time zone' % nick)
@commands('settimeformat', 'settf')
@example('.settf %Y-%m-%dT%T%z')
def update_user_format(bot, trigger):
"""
Sets your preferred format for time. Uses the standard strftime format. You
can use http://strftime.net or your favorite search engine to learn more.
"""
tformat = trigger.group(2)
if not tformat:
bot.reply("What format do you want me to use? Try using"
" http://strftime.net to make one.")
return
tz = get_timezone(bot.db, bot.config, None, trigger.nick, trigger.sender)
# Get old format as back-up
old_format = bot.db.get_nick_value(trigger.nick, 'time_format')
# Save the new format in the database so we can test it.
bot.db.set_nick_value(trigger.nick, 'time_format', tformat)
try:
timef = format_time(db=bot.db, zone=tz, nick=trigger.nick)
except:
bot.reply("That format doesn't work. Try using"
" http://strftime.net to make one.")
# New format doesn't work. Revert save in database.
bot.db.set_nick_value(trigger.nick, 'time_format', old_format)
return
bot.reply("Got it. Your time will now appear as %s. (If the "
"timezone is wrong, you might try the settz command)"
% timef)
@commands('gettimeformat', 'gettf')
@example('.gettf [nick]')
def get_user_format(bot, trigger):
"""
Gets a user's preferred time format, will show yours if no user specified
"""
nick = trigger.group(2)
if not nick:
nick = trigger.nick
nick = nick.strip()
# Get old format as back-up
format = bot.db.get_nick_value(nick, 'time_format')
if format:
bot.say("%s's time format: %s." % (nick, format))
else:
bot.say("%s hasn't set a custom time format" % nick)
@commands('setchanneltz', 'setctz')
@example('.setctz America/New_York')
def update_channel(bot, trigger):
"""
Set the preferred time zone for the channel.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
elif not pytz:
bot.reply("Sorry, I don't have timezone support installed.")
else:
tz = trigger.group(2)
if not tz:
bot.reply("What timezone do you want to set? Try one from "
"http://dft.ba/-tz")
return
if tz not in pytz.all_timezones:
bot.reply("I don't know that time zone. Try one from "
"http://dft.ba/-tz")
return
bot.db.set_channel_value(trigger.sender, 'timezone', tz)
if len(tz) < 7:
bot.say("Okay, {}, but you should use one from http://dft.ba/-tz "
"if you use DST.".format(trigger.nick))
else:
bot.reply(
'I now have {} in the {} time zone.'.format(trigger.sender, tz))
@commands('getchanneltz', 'getctz')
@example('.getctz [channel]')
def get_channel_tz(bot, trigger):
"""
Gets the preferred channel timezone, or the current channel timezone if no
channel given.
"""
if not pytz:
bot.reply("Sorry, I don't have timezone support installed.")
else:
channel = trigger.group(2)
if not channel:
channel = trigger.sender
channel = channel.strip()
timezone = bot.db.get_channel_value(channel, 'timezone')
if timezone:
bot.say('%s\'s timezone: %s' % (channel, timezone))
else:
bot.say('%s has no preferred timezone' % channel)
@commands('setchanneltimeformat', 'setctf')
@example('.setctf %Y-%m-%dT%T%z')
def update_channel_format(bot, trigger):
"""
Sets your preferred format for time. Uses the standard strftime format. You
can use http://strftime.net or your favorite search engine to learn more.
"""
if bot.privileges[trigger.sender][trigger.nick] < OP:
return
tformat = trigger.group(2)
if not tformat:
bot.reply("What format do you want me to use? Try using"
" http://strftime.net to make one.")
tz = get_timezone(bot.db, bot.config, None, None, trigger.sender)
try:
timef = format_time(zone=tz)
except:
bot.reply("That format doesn't work. Try using"
" http://strftime.net to make one.")
return
bot.db.set_channel_value(trigger.sender, 'time_format', tformat)
bot.reply("Got it. Times in this channel will now appear as %s "
"unless a user has their own format set. (If the timezone"
" is wrong, you might try the settz and channeltz "
"commands)" % timef)
@commands('getchanneltimeformat', 'getctf')
@example('.getctf [channel]')
def get_channel_format(bot, trigger):
"""
Gets the channel's preferred time format, will return current channel's if
no channel name is given
"""
channel = trigger.group(2)
if not channel:
channel = trigger.sender
channel = channel.strip()
tformat = bot.db.get_channel_value(channel, 'time_format')
if tformat:
bot.say('%s\'s time format: %s' % (channel, tformat))
else:
bot.say('%s has no preferred time format' % channel)
| 31.485401
| 85
| 0.60983
|
c5584a4e2452c8af9c201f73eb663450e684a80f
| 7,985
|
py
|
Python
|
code-postprocessing/cocopp/config.py
|
asmaatamna/coco
|
4b1497a0e6d4de4a0dd75e03779d6c5349fa21ae
|
[
"BSD-3-Clause"
] | 2
|
2021-02-15T17:09:24.000Z
|
2021-12-28T09:23:01.000Z
|
code-postprocessing/cocopp/config.py
|
patsp/coco
|
4b1497a0e6d4de4a0dd75e03779d6c5349fa21ae
|
[
"BSD-3-Clause"
] | null | null | null |
code-postprocessing/cocopp/config.py
|
patsp/coco
|
4b1497a0e6d4de4a0dd75e03779d6c5349fa21ae
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""This module is an attempt for a global configuration file for various parameters.
The import of this module, :py:mod:`config`, changes default settings (attributes)
of other modules. This works, because each module has only one instance.
Before this module is imported somewhere, modules use their default settings.
This file could be dynamically modified and reloaded.
See also genericsettings.py which is a central place to define settings
used by other modules, but does not modify settings of other modules.
"""
import warnings
import numpy as np
import ppfigdim
from . import genericsettings, pproc, pprldistr
from . import testbedsettings as tbs
from .comp2 import ppfig2, ppscatter
from .compall import pprldmany
def target_values(is_expensive):
"""manage target values setting in "expensive" optimization scenario.
"""
if is_expensive:
genericsettings.runlength_based_targets = True
genericsettings.maxevals_fix_display = genericsettings.xlimit_expensive
else:
genericsettings.runlength_based_targets = False
genericsettings.maxevals_fix_display = None
def config(testbed_name=None):
"""called from a high level, e.g. rungeneric, to configure the lower level
modules via modifying parameter settings.
"""
if testbed_name:
tbs.load_current_testbed(testbed_name, pproc.TargetValues)
genericsettings.simulated_runlength_bootstrap_sample_size = 10 + 990 / (1 + 10 * max(0, genericsettings.in_a_hurry))
if tbs.current_testbed and tbs.current_testbed.name not in tbs.suite_to_testbed:
if ((genericsettings.isExpensive in (True, 1) or
genericsettings.runlength_based_targets in (True, 1)) and
tbs.current_testbed.reference_algorithm_filename == ''):
warnings.warn('Expensive setting not yet supported with ' +
tbs.current_testbed.name +
' testbed; using non-expensive setting instead.')
genericsettings.isExpensive = False
genericsettings.runlength_based_targets = False
# pprldist.plotRLDistr2 needs to be revised regarding run_length based targets
if genericsettings.runlength_based_targets in (True, 1):
print('Reference algorithm based target values, using ' +
tbs.current_testbed.reference_algorithm_filename +
': now for each function, the target ' +
'values differ, but the "level of difficulty" ' +
'is "the same". ')
reference_data = 'testbedsettings'
# pprldmany:
if 1 < 3: # not yet functional, captions need to be adjusted and the bug reported by Ilya sorted out
# pprldmany.caption = ... captions are still hard coded in LaTeX
pprldmany.x_limit = genericsettings.maxevals_fix_display # always fixed
if tbs.current_testbed:
testbed = tbs.current_testbed
testbed.scenario = tbs.scenario_rlbased
# genericsettings (to be used in rungeneric2 while calling pprldistr.comp(...)):
testbed.rldValsOfInterest = pproc.RunlengthBasedTargetValues(
genericsettings.target_runlengths_in_single_rldistr,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
testbed.ppfigdim_target_values = pproc.RunlengthBasedTargetValues(
genericsettings.target_runlengths_in_scaling_figs,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
testbed.pprldistr_target_values = pproc.RunlengthBasedTargetValues(
genericsettings.target_runlengths_in_single_rldistr,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
testbed.pprldmany_target_values = pproc.RunlengthBasedTargetValues(
np.logspace(np.log10(0.5), np.log10(50), 31),
reference_data=reference_data,
smallest_target=1e-8 * 10**0.000,
force_different_targets_factor=1,
unique_target_values=True)
testbed.ppscatter_target_values = pproc.RunlengthBasedTargetValues(
np.logspace(np.log10(0.5),
np.log10(50), 8))
# pptable:
testbed.pptable_targetsOfInterest = pproc.RunlengthBasedTargetValues(
testbed.pptable_target_runlengths,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
# pptable2:
testbed.pptable2_targetsOfInterest = pproc.RunlengthBasedTargetValues(
testbed.pptable2_target_runlengths,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
# pptables:
testbed.pptablemany_targetsOfInterest = pproc.RunlengthBasedTargetValues(
testbed.pptables_target_runlengths,
reference_data=reference_data,
force_different_targets_factor=10**-0.2)
# ppfigs
testbed.ppfigs_ftarget = pproc.RunlengthBasedTargetValues([genericsettings.target_runlength],
reference_data=reference_data)
# pprldistr:
pprldistr.runlen_xlimits_max = \
genericsettings.maxevals_fix_display / 2 if genericsettings.maxevals_fix_display else None # can be None
pprldistr.runlen_xlimits_min = 10**-0.3 # can be None
# ppfigdim:
ppfigdim.xlim_max = genericsettings.maxevals_fix_display
if ppfigdim.xlim_max:
ppfigdim.styles = [ # sort of rainbow style, most difficult (red) first
{'color': 'y', 'marker': '^', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'g', 'marker': '.', 'linewidth': 4},
{'color': 'r', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'm', 'marker': '.', 'linewidth': 4},
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'b', 'marker': '.', 'linewidth': 4},
{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
]
ppscatter.markersize = 16
else:
pass # here the default values of the modules apply
# pprlmany.x_limit = ...should depend on noisy/noiseless
if 11 < 3: # for testing purpose
if tbs.current_testbed:
# TODO: this case needs to be tested yet: the current problem is that no noisy data are in this folder
tbs.current_testbed.pprldmany_target_values = \
pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH')
pprldmany.fontsize = 20.0 # should depend on the number of data lines down to 10.0 ?
ppscatter.markersize = 14
ppfig2.linewidth = 4
def main():
config()
| 49.290123
| 120
| 0.580589
|
9aecc584c0116b87fc40774fde96c0900b5edabc
| 717
|
py
|
Python
|
db/generate.py
|
WhatCheer/Number-Laundry
|
c8cb7a6190a115ce84998a0bef8ab3490788d071
|
[
"MIT"
] | 1
|
2019-04-10T05:02:14.000Z
|
2019-04-10T05:02:14.000Z
|
db/generate.py
|
WhatCheer/Number-Laundry
|
c8cb7a6190a115ce84998a0bef8ab3490788d071
|
[
"MIT"
] | null | null | null |
db/generate.py
|
WhatCheer/Number-Laundry
|
c8cb7a6190a115ce84998a0bef8ab3490788d071
|
[
"MIT"
] | null | null | null |
import json
# This program takes Twilio's rate data (in JSON form) and writes out queries for inserting.
# The output (at this time) is almost direct insert, there are just a few manual tweaks
# you will run into when you try the insert.
print 'INSERT INTO prefixes ( id, prefix, country, country_code, twilio_rate ) VALUES '
with open( 'international-calling-rates-ajax', 'r' ) as handle:
for line in json.loads( handle.read() ):
for prefix in line['Prefixes'].replace( ',', '' ).split( ' ' ):
print "( NULL, '%s', '%s', '%s', '%s' )," % (
prefix.replace( "'", "''" ),
line['Country'].replace( "'", "''" ),
line['IsoCountry'].replace( "'", "''" ),
line['Price'].replace( "'", "''" )
)
| 37.736842
| 92
| 0.612273
|
1cc40154953e1de06017bb3cf4758a1711cea40c
| 11,587
|
py
|
Python
|
fanficfare/adapters/__init__.py
|
bennr01/FanFicFare
|
3c77a68e61c39b0aa166f0bdf8df2f76f55d6de1
|
[
"Apache-2.0"
] | 1
|
2020-06-18T05:22:56.000Z
|
2020-06-18T05:22:56.000Z
|
venv/pyenv.cfg/Lib/site-packages/fanficfare/adapters/__init__.py
|
DylanB5402/SummerProject2
|
d2e20d16f310826b9c87a010f8eefcd0abe290b0
|
[
"Apache-2.0"
] | null | null | null |
venv/pyenv.cfg/Lib/site-packages/fanficfare/adapters/__init__.py
|
DylanB5402/SummerProject2
|
d2e20d16f310826b9c87a010f8eefcd0abe290b0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2020 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import os, re, sys, glob, types
from os.path import dirname, basename, normpath
import logging
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib.parse import urlparse
logger = logging.getLogger(__name__)
from .. import exceptions as exceptions
from .. import configurable as configurable
## must import each adapter here.
from . import base_efiction_adapter
from . import adapter_test1
from . import adapter_fanfictionnet
from . import adapter_fanficcastletvnet
from . import adapter_fictionalleyorg
from . import adapter_fictionpresscom
from . import adapter_ficwadcom
from . import adapter_fimfictionnet
from . import adapter_mediaminerorg
from . import adapter_potionsandsnitches
from . import adapter_tenhawkpresents
from . import adapter_adastrafanficcom
from . import adapter_tthfanficorg
from . import adapter_twilightednet
from . import adapter_whoficcom
from . import adapter_siyecouk
from . import adapter_archiveofourownorg
from . import adapter_ficbooknet
from . import adapter_midnightwhispers
from . import adapter_ksarchivecom
from . import adapter_archiveskyehawkecom
from . import adapter_squidgeorgpeja
from . import adapter_libraryofmoriacom
from . import adapter_wraithbaitcom
from . import adapter_ashwindersycophanthexcom
from . import adapter_chaossycophanthexcom
from . import adapter_erosnsapphosycophanthexcom
from . import adapter_lumossycophanthexcom
from . import adapter_occlumencysycophanthexcom
from . import adapter_phoenixsongnet
from . import adapter_walkingtheplankorg
from . import adapter_dokugacom
from . import adapter_iketernalnet
from . import adapter_storiesofardacom
from . import adapter_destinysgatewaycom
from . import adapter_ncisfictioncom
from . import adapter_fanfiktionde
from . import adapter_ponyfictionarchivenet
from . import adapter_themasquenet
from . import adapter_pretendercentrecom
from . import adapter_darksolaceorg
from . import adapter_finestoriescom
from . import adapter_hpfanficarchivecom
from . import adapter_nhamagicalworldsus
from . import adapter_hlfictionnet
from . import adapter_dracoandginnycom
from . import adapter_scarvesandcoffeenet
from . import adapter_thepetulantpoetesscom
from . import adapter_wolverineandroguecom
from . import adapter_merlinficdtwinscouk
from . import adapter_thehookupzonenet
from . import adapter_bloodtiesfancom
from . import adapter_qafficcom
from . import adapter_efpfanficnet
from . import adapter_potterficscom
from . import adapter_efictionestelielde
from . import adapter_imagineeficcom
from . import adapter_potterheadsanonymouscom
from . import adapter_fictionpadcom
from . import adapter_storiesonlinenet
from . import adapter_trekiverseorg
from . import adapter_literotica
from . import adapter_voracity2eficcom
from . import adapter_spikeluvercom
from . import adapter_bloodshedversecom
from . import adapter_fanfichu
from . import adapter_fictionmaniatv
from . import adapter_themaplebookshelf
from . import adapter_sheppardweircom
from . import adapter_samandjacknet
from . import adapter_csiforensicscom
from . import adapter_fanfictionjunkiesde
from . import adapter_tgstorytimecom
from . import adapter_itcouldhappennet
from . import adapter_forumsspacebattlescom
from . import adapter_forumssufficientvelocitycom
from . import adapter_forumquestionablequestingcom
from . import adapter_ninelivesarchivecom
from . import adapter_masseffect2in
from . import adapter_quotevcom
from . import adapter_mcstoriescom
from . import adapter_buffygilescom
from . import adapter_andromedawebcom
from . import adapter_naiceanilmenet
from . import adapter_deepinmysoulnet
from . import adapter_adultfanfictionorg
from . import adapter_fictionhuntcom
from . import adapter_royalroadcom
from . import adapter_chosentwofanficcom
from . import adapter_bdsmlibrarycom
from . import adapter_asexstoriescom
from . import adapter_gluttonyfictioncom
from . import adapter_valentchambercom
from . import adapter_looselugscom
from . import adapter_wwwgiantessworldnet
from . import adapter_lotrgficcom
from . import adapter_tomparisdormcom
from . import adapter_sugarquillnet
from . import adapter_starslibrarynet
from . import adapter_fanficauthorsnet
from . import adapter_fireflyfansnet
from . import adapter_sebklainenet
from . import adapter_shriftweborgbfa
from . import adapter_trekfanfictionnet
from . import adapter_wuxiaworldcom
from . import adapter_wwwlushstoriescom
from . import adapter_wwwutopiastoriescom
from . import adapter_sinfuldreamscomunicornfic
from . import adapter_sinfuldreamscomwhisperedmuse
from . import adapter_sinfuldreamscomwickedtemptation
from . import adapter_asianfanficscom
from . import adapter_webnovelcom
from . import adapter_deandamagecom
from . import adapter_mttjustoncenet
from . import adapter_narutoficorg
from . import adapter_starskyhutcharchivenet
from . import adapter_swordborderlineangelcom
from . import adapter_tasteofpoisoninkubationnet
from . import adapter_thedelphicexpansecom
from . import adapter_thundercatsfansorg
from . import adapter_wwwaneroticstorycom
from . import adapter_gravitytalescom
from . import adapter_lcfanficcom
from . import adapter_noveltrovecom
from . import adapter_inkbunnynet
from . import adapter_alternatehistorycom
from . import adapter_wattpadcom
from . import adapter_novelonlinefullcom
from . import adapter_wwwnovelallcom
from . import adapter_wuxiaworldco
from . import adapter_harrypotterfanfictioncom
from . import adapter_hentaifoundrycom
from . import adapter_mugglenetfanfictioncom
from . import adapter_swiorgru
from . import adapter_fanficsme
from . import adapter_archivehpfanfictalkcom
from . import adapter_scifistoriescom
from . import adapter_silmarillionwritersguildorg
## This bit of complexity allows adapters to be added by just adding
## importing. It eliminates the long if/else clauses we used to need
## to pick out the adapter.
## List of registered site adapters.
__class_list = []
__domain_map = {}
def imports():
out = []
for name, val in globals().items():
if isinstance(val, types.ModuleType):
out.append(val.__name__)
return out
for x in imports():
if "fanficfare.adapters.adapter_" in x:
#print x
cls = sys.modules[x].getClass()
__class_list.append(cls)
for site in cls.getAcceptDomains():
l = __domain_map.get(site,[])
l.append(cls)
__domain_map[site]=l
def get_url_chapter_range(url_in):
# Allow chapter range with URL.
# like test1.com?sid=5[4-6] or [4,6]
mc = re.match(r"^(?P<url>.*?)(?:\[(?P<begin>\d+)?(?P<comma>[,-])?(?P<end>\d+)?\])?$",url_in)
#print("url:(%s) begin:(%s) end:(%s)"%(mc.group('url'),mc.group('begin'),mc.group('end')))
url = mc.group('url')
ch_begin = mc.group('begin')
ch_end = mc.group('end')
if ch_begin and not mc.group('comma'):
ch_end = ch_begin
return url,ch_begin,ch_end
def getNormalStoryURL(url):
r = getNormalStoryURLSite(url)
if r:
return r[0]
else:
return None
def getNormalStoryURLSite(url):
# print("getNormalStoryURLSite:%s"%url)
if not getNormalStoryURL.__dummyconfig:
getNormalStoryURL.__dummyconfig = configurable.Configuration(["test1.com"],"EPUB",lightweight=True)
# pulling up an adapter is pretty low over-head. If
# it fails, it's a bad url.
try:
adapter = getAdapter(getNormalStoryURL.__dummyconfig,url)
url = adapter.url
site = adapter.getSiteDomain()
del adapter
return (url,site)
except:
return None
# kludgey function static/singleton
getNormalStoryURL.__dummyconfig = None
def getAdapter(config,url,anyurl=False):
#logger.debug("trying url:"+url)
(cls,fixedurl) = _get_class_for(url)
#logger.debug("fixedurl:"+fixedurl)
if cls:
if anyurl:
fixedurl = cls.getSiteExampleURLs().split()[0]
adapter = cls(config,fixedurl) # raises InvalidStoryURL
return adapter
# No adapter found.
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
def getSiteSections():
# doesn't include base sections. Sections rather than site DNS because of squidge/peja
return [cls.getConfigSection() for cls in __class_list]
def getConfigSections():
# does include base sections.
sections = set()
for cls in __class_list:
sections.update(cls.getConfigSections())
return sections
def get_bulk_load_sites():
# for now, all eFiction Base adapters are assumed to allow bulk_load.
sections = set()
for cls in [x for x in __class_list if issubclass(x,base_efiction_adapter.BaseEfictionAdapter) ]:
sections.update( [ x.replace('www.','') for x in cls.getConfigSections() ] )
return sections
def getSiteExamples():
l=[]
for cls in sorted(__class_list, key=lambda x : x.getConfigSection()):
l.append((cls.getConfigSection(),cls.getSiteExampleURLs().split()))
return l
def getConfigSectionsFor(url):
(cls,fixedurl) = _get_class_for(url)
if cls:
return cls.getConfigSections()
# No adapter found.
raise exceptions.UnknownSite( url, [cls.getSiteDomain() for cls in __class_list] )
def _get_class_for(url):
## fix up leading protocol.
fixedurl = re.sub(r"(?i)^[htp]+(s?)[:/]+",r"http\1://",url.strip())
if fixedurl.startswith("//"):
fixedurl = "http:%s"%url
if not fixedurl.startswith("http"):
fixedurl = "http://%s"%url
## remove any trailing '#' locations, except for #post-12345 for
## XenForo
if not "#post-" in fixedurl:
fixedurl = re.sub(r"#.*$","",fixedurl)
parsedUrl = urlparse(fixedurl)
domain = parsedUrl.netloc.lower()
if( domain != parsedUrl.netloc ):
fixedurl = fixedurl.replace(parsedUrl.netloc,domain)
clslst = _get_classlist_fromlist(domain)
## assumes all adapters for a domain will have www or not have www
## but not mixed.
if not clslst and domain.startswith("www."):
domain = domain.replace("www.","")
#logger.debug("trying site:without www: "+domain)
clslst = _get_classlist_fromlist(domain)
fixedurl = re.sub(r"^http(s?)://www\.",r"http\1://",fixedurl)
if not clslst:
#logger.debug("trying site:www."+domain)
clslst =_get_classlist_fromlist("www."+domain)
fixedurl = re.sub(r"^http(s?)://",r"http\1://www.",fixedurl)
cls = None
if clslst:
if len(clslst) == 1:
cls = clslst[0]
elif len(clslst) > 1:
for c in clslst:
if c.getSiteURLFragment() in fixedurl:
cls = c
break
if cls:
fixedurl = cls.stripURLParameters(fixedurl)
return (cls,fixedurl)
def _get_classlist_fromlist(domain):
try:
return __domain_map[domain]
except KeyError:
pass # return none.
| 35.006042
| 107
| 0.754811
|
07045b1ad3592a1bfa53c3004a47ea7aa88c935a
| 91
|
py
|
Python
|
k8s/images/codalab/apps/authenz/admin.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 333
|
2015-12-29T22:49:40.000Z
|
2022-03-27T12:01:57.000Z
|
k8s/images/codalab/apps/authenz/admin.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 1,572
|
2015-12-28T21:54:00.000Z
|
2022-03-31T13:00:32.000Z
|
k8s/images/codalab/apps/authenz/admin.py
|
abdulari/codalab-competitions
|
fdfbb77ac62d56c6b4b9439935037f97ffcd1423
|
[
"Apache-2.0"
] | 107
|
2016-01-08T03:46:07.000Z
|
2022-03-16T08:43:57.000Z
|
from django.contrib import admin
from .models import ClUser
admin.site.register(ClUser)
| 13
| 32
| 0.802198
|
c73f5fd7ac98902fa516ac91084b383f12bc4e38
| 39
|
py
|
Python
|
.history/app/__init___20210927034948.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | 2
|
2021-09-04T03:01:37.000Z
|
2021-09-04T03:06:38.000Z
|
.history/app/__init___20210927034948.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | 1
|
2021-09-04T03:06:32.000Z
|
2021-09-04T03:08:26.000Z
|
.history/app/__init___20210927034948.py
|
GraceOswal/pitch-perfect
|
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
|
[
"MIT"
] | 1
|
2021-09-04T03:02:01.000Z
|
2021-09-04T03:02:01.000Z
|
from flask_sqlalchemy import SQLAlchemy
| 39
| 39
| 0.923077
|
463dba0f6e77400648951b355cf4bc041bd43e13
| 1,629
|
py
|
Python
|
quant/demo/ma20.py
|
yunfeiz/py_learnt
|
d4134d8e9f1caed2db2848f19474c15c1b36557e
|
[
"Apache-2.0"
] | null | null | null |
quant/demo/ma20.py
|
yunfeiz/py_learnt
|
d4134d8e9f1caed2db2848f19474c15c1b36557e
|
[
"Apache-2.0"
] | null | null | null |
quant/demo/ma20.py
|
yunfeiz/py_learnt
|
d4134d8e9f1caed2db2848f19474c15c1b36557e
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2017-5-30
@author: 3xtrees
'''
# coding: UTF-8
import tushare as ts
'''
ma20 strategy
'''
def parse(stock, start_date):
'''''process stock'''
is_buy = 0
buy_val = []
buy_date = []
sell_val = []
sell_date = []
df = ts.get_hist_data(stock, start=start_date)
ma20 = df[u'ma20']
close = df[u'close']
rate = 1.0
idx = len(ma20)
while idx > 0:
idx -= 1
close_val = close[idx]
ma20_val = ma20[idx]
if close_val > ma20_val:
if is_buy == 0:
is_buy = 1
buy_val.append(close_val)
buy_date.append(close.keys()[idx])
elif close_val < ma20_val:
if is_buy == 1:
is_buy = 0
sell_val.append(close_val)
sell_date.append(close.keys()[idx])
print ("stock number: %s" % stock)
print ("buy count : %d" % len(buy_val))
print ("sell count : %d" % len(sell_val))
for i in range(len(sell_val)):
rate = rate * (sell_val[i] * (1 - 0.002) / buy_val[i])
print ("buy date : %s, buy price : %.2f" % (buy_date[i], buy_val[i]))
print ("sell date: %s, sell price: %.2f" % (sell_date[i], sell_val[i]))
print ("rate: %.2f" % rate)
pass
if __name__ == '__main__':
stock_pool = ['601668']
start_date_list = ['2014-05-29', '2015-05-29', '2016-05-29']
for stock in stock_pool:
for start_date in start_date_list:
parse(stock, start_date)
| 29.089286
| 81
| 0.493554
|
3d57db52b8a469e07e28b76539eca38b35de9525
| 555
|
py
|
Python
|
plasTeX/Imagers/gsdvipng.py
|
perfectbark/LaTex2Docx
|
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
|
[
"MIT"
] | 23
|
2019-06-16T06:00:39.000Z
|
2022-03-29T14:44:32.000Z
|
plasTeX/Imagers/gsdvipng.py
|
hao-han/LaTex2Docx
|
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
|
[
"MIT"
] | null | null | null |
plasTeX/Imagers/gsdvipng.py
|
hao-han/LaTex2Docx
|
e32f9dcc59cce7bea4e7b114687b2300c623d8c0
|
[
"MIT"
] | 12
|
2019-05-27T06:32:06.000Z
|
2022-03-15T10:22:07.000Z
|
#!/usr/bin/env python
import gspdfpng, os, sys
gs = 'gs'
if sys.platform.startswith('win'):
gs = 'gswin32c'
class GSDVIPNG(gspdfpng.GSPDFPNG):
""" Imager that uses gs to convert dvi to png """
compiler = 'latex'
verification = '(%s --help && dvips --help)' % gs
def executeConverter(self, output):
open('images.dvi', 'w').write(output.read())
rc = os.system('dvips -o images.ps images.dvi')
if rc: return rc, None
return gspdfpng.GSPDFPNG.executeConverter(self, open('images.ps'))
Imager = GSDVIPNG
| 26.428571
| 74
| 0.634234
|
b165d81d264409e4f163bbec119f15d6cb16c5fe
| 3,217
|
py
|
Python
|
vendas/views/cliente_view.py
|
SolutionUp/SolutionUP-System
|
4f341df1a51a43b350e626d47abd207a55edb94d
|
[
"MIT"
] | 4
|
2021-08-30T01:45:46.000Z
|
2022-01-08T18:05:10.000Z
|
vendas/views/cliente_view.py
|
DiskFar/DiskFar-System
|
dfd687e55cbca03118656d14761eb5de6c5a58a9
|
[
"MIT"
] | 6
|
2021-08-29T03:26:48.000Z
|
2021-09-24T00:13:11.000Z
|
vendas/views/cliente_view.py
|
DiskFar/DiskFar-System
|
dfd687e55cbca03118656d14761eb5de6c5a58a9
|
[
"MIT"
] | 1
|
2021-08-16T21:21:34.000Z
|
2021-08-16T21:21:34.000Z
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import ListView, DetailView
from django.contrib import messages
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from vendas.forms import FormCliente
from vendas.models import Clientes
class ClienteListView(LoginRequiredMixin, ListView):
model = Clientes
paginate_by = 100
template_name = 'cliente/cliente_list.html'
def get_queryset(self):
query = self.request.GET.get('q') or ''
object_list = self.model.objects.filter(
Q(nome__icontains=query) |
Q(cpf__icontains=query)
)
return object_list
class ClienteDetailView(LoginRequiredMixin, DetailView):
model = Clientes
@login_required
def adicionar_cliente(request):
if request.method == 'POST':
form_cliente = FormCliente(request.POST)
if form_cliente.is_valid():
if form_cliente.cleaned_data['cpf'] == None or len(form_cliente.cleaned_data['cpf']) == 11:
if '@' and '.com' in form_cliente.cleaned_data['email']:
form_cliente.save()
messages.add_message(request, messages.SUCCESS, 'Cliente cadastrado!', extra_tags='success')
return redirect('/clientes/adicionar')
else:
messages.add_message(request, messages.ERROR, 'Erro no formulário, tente novamente!', extra_tags='danger')
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
else:
messages.add_message(request, messages.ERROR, 'Erro no formulário, tente novamente!', extra_tags='danger')
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
else:
messages.add_message(request, messages.ERROR, 'Erro no formulário, tente novamente!', extra_tags='danger')
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
else:
form_cliente = FormCliente()
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
@login_required
def remover_cliente(request, id):
if request.method == 'GET':
cliente = Clientes.objects.get(id=id)
cliente.delete()
return redirect('/clientes')
else:
return render(request, 'cliente/cliente_list.html')
@login_required
def alterar_cliente(request, id):
instance = get_object_or_404(Clientes, id=id)
form_cliente = FormCliente(request.POST or None, instance=instance)
if request.method == 'POST':
if form_cliente.is_valid():
form_cliente.save()
messages.add_message(request, messages.SUCCESS, 'Cliente alterado!', extra_tags='success')
return redirect('/clientes')
else:
messages.add_message(request, messages.ERROR, 'Erro no formulário, tente novamente!', extra_tags='danger')
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
else:
return render(request, 'cliente/cliente_add.html', {'form': form_cliente})
| 44.068493
| 126
| 0.6705
|
8cdd85dc12846fea47281681d10d0adeadd0c309
| 539
|
py
|
Python
|
tools/stopwords/freqlist2stoplist.py
|
TeMU-BSC/jusText-plusplus
|
14ad3eda6ecc5a98833f1ab333583539dfb64da5
|
[
"BSD-2-Clause"
] | null | null | null |
tools/stopwords/freqlist2stoplist.py
|
TeMU-BSC/jusText-plusplus
|
14ad3eda6ecc5a98833f1ab333583539dfb64da5
|
[
"BSD-2-Clause"
] | null | null | null |
tools/stopwords/freqlist2stoplist.py
|
TeMU-BSC/jusText-plusplus
|
14ad3eda6ecc5a98833f1ab333583539dfb64da5
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
import re
CORPUS_COVERAGE = 0.5
#non_alphabetical = re.compile('(?!\s)(?:\W|\d|_)', re.U)
alphabetical = re.compile('(?![0-9_])\w', re.U)
sum_rel_freq = 0
for line in sys.stdin:
uline = unicode(line, 'utf-8', errors='ignore')
word, dummy_freq, rel_freq, dummy_sum_rel_freq = uline.strip().split()
if alphabetical.search(word):
rel_freq = float(rel_freq)
sum_rel_freq+= rel_freq
print word.encode('utf-8')
if sum_rel_freq >= CORPUS_COVERAGE:
break
| 26.95
| 74
| 0.638219
|
3590bf0b226bc316b4277b59a724675d9c71fed8
| 1,367
|
py
|
Python
|
web_dynamic/3-hbnb.py
|
DanielBaquero28/AirBnB_clone_v4
|
2bb1cae776f599674d07daed29f7cdc9893c7ca2
|
[
"MIT"
] | 1
|
2017-09-29T16:36:55.000Z
|
2017-09-29T16:36:55.000Z
|
web_dynamic/3-hbnb.py
|
DanielBaquero28/AirBnB_clone_v4
|
2bb1cae776f599674d07daed29f7cdc9893c7ca2
|
[
"MIT"
] | null | null | null |
web_dynamic/3-hbnb.py
|
DanielBaquero28/AirBnB_clone_v4
|
2bb1cae776f599674d07daed29f7cdc9893c7ca2
|
[
"MIT"
] | 2
|
2017-10-25T01:13:11.000Z
|
2022-02-15T02:10:44.000Z
|
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
import os
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
host = os.getenv("HBNB_API_HOST", "0.0.0.0")
port = os.getenv("HBNB_API_PORT", "5000")
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/3-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = str(uuid.uuid4())
return render_template('3-hbnb.html',
states=states,
amens=amens,
places=places,
users=users, cache_id=cache_id)
if __name__ == "__main__":
app.run(host=host, port=port)
| 28.479167
| 75
| 0.631309
|
bf7170b7e7d7f3a7d15d0053cfda0f3c53c1e0b1
| 9,838
|
py
|
Python
|
deepracing_py/generate_image_poses.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | 11
|
2020-06-29T15:21:37.000Z
|
2021-04-12T00:42:26.000Z
|
deepracing_py/generate_image_poses.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | null | null | null |
deepracing_py/generate_image_poses.py
|
linklab-uva/deepracing
|
fc25c47658277df029e7399d295d97a75fe85216
|
[
"Apache-2.0"
] | 4
|
2019-01-23T23:36:57.000Z
|
2021-07-02T00:18:37.000Z
|
import numpy as np
import numpy.linalg as la
import scipy
import skimage
import PIL
from PIL import Image as PILImage
import TimestampedPacketMotionData_pb2
import PoseSequenceLabel_pb2
import TimestampedImage_pb2
import Vector3dStamped_pb2
import argparse
import os
import google.protobuf.json_format
import Pose3d_pb2
import cv2
import bisect
import FrameId_pb2
import scipy.interpolate
import deepracing.backend
import deepracing.pose_utils
from deepracing.protobuf_utils import getAllSessionPackets, getAllImageFilePackets, getAllMotionPackets, extractPose, extractVelocity, extractAngularVelocity
from tqdm import tqdm as tqdm
import yaml
import shutil
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import RotationSpline as RotSpline
import json
from scipy.spatial import KDTree as KDTree
import matplotlib.pyplot as plt
import time
from deepracing import trackNames
import json
def imageDataKey(data):
return data.timestamp
def poseSequenceLabelKey(label):
return label.car_pose.session_time
parser = argparse.ArgumentParser()
parser.add_argument("db_path", help="Path to root directory of DB", type=str)
parser.add_argument("--output_dir", help="Output directory for the labels. relative to the database folder", default="image_poses", required=False)
parser.add_argument("--zforward", help="Use the old-school z-forward convention for orientation of poses", action="store_true", required=False)
args = parser.parse_args()
zforward : bool = args.zforward
root_dir = args.db_path
with open(os.path.join(root_dir,"f1_dataset_config.yaml"),"r") as f:
config = yaml.load(f,Loader=yaml.SafeLoader)
use_json = config["use_json"]
motion_data_folder = os.path.join(root_dir,"udp_data","motion_packets")
image_folder = os.path.join(root_dir,"images")
session_folder = os.path.join(root_dir,"udp_data","session_packets")
session_packets = getAllSessionPackets(session_folder,use_json)
track_ids = [packet.udp_packet.m_trackId for packet in session_packets]
if(len(list(set(track_ids))) > 1):
raise ValueError("This script only works on sessions where the whole session was done on the same track.")
track_id = track_ids[0]
output_dir = os.path.join(root_dir, args.output_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
time.sleep(1.0)
os.makedirs(output_dir)
spectating_flags = [bool(packet.udp_packet.m_isSpectating) for packet in session_packets]
spectating = any(spectating_flags)
car_indices = [int(packet.udp_packet.m_spectatorCarIndex) for packet in session_packets]
car_indices_set = set(car_indices)
print(car_indices_set)
print(car_indices)
if spectating:
if len(car_indices_set)>1:
raise ValueError("Spectated datasets are only supported if you only spectate 1 car the entire time.")
else:
car_index = car_indices[0]
else:
car_index = None
image_tags = getAllImageFilePackets(image_folder, use_json)
motion_packets = getAllMotionPackets(motion_data_folder, use_json)
motion_packets = sorted(motion_packets, key=deepracing.timestampedUdpPacketKey)
session_times = np.array([packet.udp_packet.m_header.m_sessionTime for packet in motion_packets])
system_times = np.array([packet.timestamp/1000.0 for packet in motion_packets])
print(system_times)
print(session_times)
maxudptime = system_times[-1]
image_tags = [ tag for tag in image_tags if tag.timestamp/1000.0<(maxudptime) ]
image_tags = sorted(image_tags, key = imageDataKey)
image_timestamps = np.array([data.timestamp/1000.0 for data in image_tags])
first_image_time = image_timestamps[0]
print(first_image_time)
Imin = system_times>(first_image_time + 1.0)
firstIndex = np.argmax(Imin)
motion_packets = motion_packets[firstIndex:]
motion_packets = sorted(motion_packets, key=deepracing.timestampedUdpPacketKey)
session_times = np.array([packet.udp_packet.m_header.m_sessionTime for packet in motion_packets], dtype=np.float64)
unique_session_times, unique_session_time_indices = np.unique(session_times, return_index=True)
motion_packets = [motion_packets[i] for i in unique_session_time_indices]
motion_packets = sorted(motion_packets, key=deepracing.timestampedUdpPacketKey)
session_times = np.array([packet.udp_packet.m_header.m_sessionTime for packet in motion_packets], dtype=np.float64)
system_times = np.array([packet.timestamp/1000.0 for packet in motion_packets], dtype=np.float64)
print("Range of session times: [%f,%f]" %(session_times[0], session_times[-1]))
print("Range of udp system times: [%f,%f]" %(system_times[0], system_times[-1]))
print("Range of image system times: [%f,%f]" %(image_timestamps[0], image_timestamps[-1]))
poses = [extractPose(packet.udp_packet, car_index=car_index, zforward=zforward) for packet in motion_packets]
velocities = np.array([extractVelocity(packet.udp_packet, car_index=car_index) for packet in motion_packets])
positions = np.array([pose[0] for pose in poses])
position_diffs = np.diff(positions, axis=0)
position_diff_norms = la.norm(position_diffs, axis=1)
print("Diff norm vector has length %d: " % (len(position_diff_norms)))
quaternions = np.array([pose[1] for pose in poses])
rotations = Rot.from_quat(quaternions)
slope_session_time_fit, intercept_session_time_fit, rvalue, pvalue, stderr = scipy.stats.linregress(np.linspace(1,session_times.shape[0],session_times.shape[0]), session_times)
print("Slope and intercept of raw session times: [%f,%f]" %(slope_session_time_fit, intercept_session_time_fit))
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(system_times, session_times)
print("Slope and intercept of session time vs system time: [%f,%f]" %(slope, intercept))
print( "r value of session time vs system time: %f" % ( rvalue ) )
print( "r^2 value of session time vs system time: %f" % ( rvalue**2 ) )
image_session_timestamps = slope*image_timestamps + intercept
print("Range of image session times before clipping: [%f,%f]" %(image_session_timestamps[0], image_session_timestamps[-1]))
Iclip = (image_session_timestamps>(np.min(session_times) + 1.5)) * (image_session_timestamps<(np.max(session_times) - 1.5 ))
image_tags = [image_tags[i] for i in range(len(image_session_timestamps)) if Iclip[i]]
image_session_timestamps = image_session_timestamps[Iclip]
print("Range of image session times after clipping: [%f,%f]" %(image_session_timestamps[0], image_session_timestamps[-1]))
position_interpolant = scipy.interpolate.make_interp_spline(session_times, positions)
rotation_interpolant = RotSpline(session_times, rotations)
velocity_interpolant = scipy.interpolate.make_interp_spline(session_times, velocities)
interpolated_positions = position_interpolant(image_session_timestamps)
interpolated_velocities = velocity_interpolant(image_session_timestamps)
interpolated_rotations = rotation_interpolant(image_session_timestamps)
interpolated_quaternions = interpolated_rotations.as_quat()
if spectating:
interpolated_angular_velocities = rotation_interpolant(image_session_timestamps,order=1)
else:
angular_velocities = np.array([extractAngularVelocity(packet.udp_packet) for packet in motion_packets])
angular_velocity_interpolant = scipy.interpolate.make_interp_spline(session_times, angular_velocities)
interpolated_angular_velocities = angular_velocity_interpolant(image_session_timestamps)
fig = plt.figure()
plt.scatter(interpolated_positions[:,0], interpolated_positions[:,2])
plt.show()
plt.close("all")
print()
print(len(image_tags))
print(len(image_session_timestamps))
print(len(interpolated_positions))
print(len(interpolated_quaternions))
print(len(interpolated_angular_velocities))
print()
print("Linear map from system time to session time: session_time = %f*system_time + %f" %(slope,intercept))
print("Standard error: %f" %(std_err))
print("R^2: %f" %(r_value**2))
output_dict : dict = dict()
image_keys = []
for i in tqdm(range(len(image_tags))):
image_tag = image_tags[i]
key, extension = os.path.splitext(image_tag.image_file)
key = key.replace("\n","")
image_keys.append(key)
imagedict : dict = dict()
imagedict["position"] = interpolated_positions[i].tolist()
imagedict["session_time"] = image_session_timestamps[i]
imagedict["quaternion"] = interpolated_quaternions[i].tolist()
imagedict["linear_velocity"] = interpolated_velocities[i].tolist()
imagedict["angular_velocity"] = interpolated_angular_velocities[i].tolist()
output_dict[key] = imagedict
output_dir = os.path.join(root_dir, args.output_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
image_files = os.path.join(output_dir, "image_files.txt")
with open(image_files, "w") as f:
f.writelines([key+"\n" for key in image_keys])
dictionary_file = os.path.join(output_dir, "image_poses.json")
with open(dictionary_file, "w") as f:
json.dump(output_dict, f, indent=3)
metadata : dict = {
"zforward" : zforward,
"trackname" : trackNames[track_ids[0]],
"networkgame" : session_packets[0].udp_packet.m_networkGame>0,
"spectating" : spectating
}
metadata_file = os.path.join(output_dir, "metadata.json")
with open(metadata_file, "w") as f:
json.dump(metadata, f, indent=3)
geometric_data_file = os.path.join(output_dir, "geometric_data.npz")
with open(geometric_data_file, "wb") as f:
np.savez(f, interpolated_positions=interpolated_positions, \
interpolated_quaternions = interpolated_quaternions, \
interpolated_velocities=interpolated_velocities, \
interpolated_angular_velocities=interpolated_angular_velocities, \
image_session_timestamps=image_session_timestamps, \
udp_positions=positions, \
udp_rotations=rotations.as_quat(), \
udp_velocities=velocities, \
udp_session_times=session_times, \
)
| 43.530973
| 176
| 0.78085
|
ecaa7438c7365a3f5f22e50068ea6bd8ea03e08d
| 2,164
|
py
|
Python
|
do_data/getter.py
|
justinhchae/pandas_project
|
233532d84b8af0c0064ca170a01320ce0d83e4eb
|
[
"MIT"
] | null | null | null |
do_data/getter.py
|
justinhchae/pandas_project
|
233532d84b8af0c0064ca170a01320ce0d83e4eb
|
[
"MIT"
] | null | null | null |
do_data/getter.py
|
justinhchae/pandas_project
|
233532d84b8af0c0064ca170a01320ce0d83e4eb
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
from do_mods.modify_columns import ModifyColumns
class Reader():
def __init__(self, folder='data', display_all_cols=True):
self.folder = folder
self.path = os.environ['PWD'] + os.sep + self.folder + os.sep
self.modify_columns = ModifyColumns()
if display_all_cols:
pd.set_option('display.max_columns', None)
def to_df(self
, filename=None
, index_col=None
, usecols=None
, dtype=None
, preview=True
, echo=True
, clean=True
):
if not isinstance(filename, str):
return "Filename should be a string"
if filename:
csv = '.csv'
pickle = '.pickle'
if csv in filename or pickle not in filename:
self.filename = filename
path = self.path + self.filename
if echo:
print('Reading From:', path)
print()
df = pd.read_csv(path
, index_col=index_col
, usecols=usecols
, dtype=dtype
, low_memory=False
)
if echo:
print('Read dataframe of length', len(df))
print()
if preview:
print(df.head(2))
print()
if clean:
df = self.modify_columns.parse_cols(df)
return df
if pickle in filename:
self.filename = filename
path = self.path + self.filename
if echo:
print('Reading From:', path)
print()
df = pd.read_pickle(path)
if echo:
print('Read dataframe of length', len(df))
print()
if preview:
print(df.head(2))
print()
return df
| 27.05
| 69
| 0.424214
|
6043ff82b2c7796f1a77b5003c747e1882857673
| 8,106
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200701/get_network_security_group.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200701/get_network_security_group.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200701/get_network_security_group.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkSecurityGroupResult',
'AwaitableGetNetworkSecurityGroupResult',
'get_network_security_group',
]
@pulumi.output_type
class GetNetworkSecurityGroupResult:
"""
NetworkSecurityGroup resource.
"""
def __init__(__self__, default_security_rules=None, etag=None, flow_logs=None, location=None, name=None, network_interfaces=None, provisioning_state=None, resource_guid=None, security_rules=None, subnets=None, tags=None, type=None):
if default_security_rules and not isinstance(default_security_rules, list):
raise TypeError("Expected argument 'default_security_rules' to be a list")
pulumi.set(__self__, "default_security_rules", default_security_rules)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if flow_logs and not isinstance(flow_logs, list):
raise TypeError("Expected argument 'flow_logs' to be a list")
pulumi.set(__self__, "flow_logs", flow_logs)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if security_rules and not isinstance(security_rules, list):
raise TypeError("Expected argument 'security_rules' to be a list")
pulumi.set(__self__, "security_rules", security_rules)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="defaultSecurityRules")
def default_security_rules(self) -> Sequence['outputs.SecurityRuleResponse']:
"""
The default security rules of network security group.
"""
return pulumi.get(self, "default_security_rules")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="flowLogs")
def flow_logs(self) -> Sequence['outputs.FlowLogResponse']:
"""
A collection of references to flow log resources.
"""
return pulumi.get(self, "flow_logs")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
A collection of references to network interfaces.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network security group resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the network security group resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="securityRules")
def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
A collection of security rules of the network security group.
"""
return pulumi.get(self, "security_rules")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkSecurityGroupResult(GetNetworkSecurityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkSecurityGroupResult(
default_security_rules=self.default_security_rules,
etag=self.etag,
flow_logs=self.flow_logs,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
security_rules=self.security_rules,
subnets=self.subnets,
tags=self.tags,
type=self.type)
def get_network_security_group(expand: Optional[str] = None,
network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkSecurityGroupResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200701:getNetworkSecurityGroup', __args__, opts=opts, typ=GetNetworkSecurityGroupResult).value
return AwaitableGetNetworkSecurityGroupResult(
default_security_rules=__ret__.default_security_rules,
etag=__ret__.etag,
flow_logs=__ret__.flow_logs,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
security_rules=__ret__.security_rules,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type)
| 38.235849
| 236
| 0.660252
|
08f0e57920d720737ee644528ffad5ff2ae6cd9f
| 7,866
|
py
|
Python
|
juriscraper/lib/html_utils.py
|
Alex-Devoid/juriscraper
|
6ac2d5182dbf3aea14d918e645d582e9b42c9dd6
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/lib/html_utils.py
|
Alex-Devoid/juriscraper
|
6ac2d5182dbf3aea14d918e645d582e9b42c9dd6
|
[
"BSD-2-Clause"
] | null | null | null |
juriscraper/lib/html_utils.py
|
Alex-Devoid/juriscraper
|
6ac2d5182dbf3aea14d918e645d582e9b42c9dd6
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import re
import sys
from lxml import etree, html
from lxml.etree import XMLSyntaxError
from lxml.html import fromstring, html5parser, tostring
from lxml.html.clean import Cleaner
from six import text_type
from six.moves.html_parser import HTMLParser
from six.moves.urllib.parse import urlsplit, urlunsplit
try:
# Use cchardet for performance to detect the character encoding.
import cchardet as chardet
except ImportError:
import chardet
if sys.maxunicode == 65535:
from .log_tools import make_default_logger
logger = make_default_logger()
logger.warn("You are using a narrow build of Python, which is not "
"completely supported. See issue #188 for details.")
def get_xml_parsed_text(text):
return etree.fromstring(text)
def get_html_parsed_text(text):
return html.fromstring(text)
def get_html5_parsed_text(text):
"""Return content using the html5parser, ideal for faulty html.
This dance is slightly different than usual because it uses the
html5parser to first create an _Element object, then serialize it using
`tostring`, then parse *that* using the usual fromstring function. The
end result is that irregularities in the html are fixed by the
html5parser, and the usual lxml parser gives us the same API we are
used to.
:param text: The html of the document
:return: an lxml.HtmlElement object
"""
parsed = html5parser.document_fromstring(text.encode('utf-8'))
return fromstring(tostring(parsed, encoding='unicode'))
def get_table_column_text(html, cell_num, path_base=False):
path_cell = '//table//tr/td[%d]' % cell_num
path = path_base + path_cell if path_base else path_cell
return [cell.text_content().strip() for cell in html.xpath(path)]
def get_table_column_links(html, cell_num, path_base=False):
path_cell = '//table//tr/td[%d]//a/@href' % cell_num
path = path_base + path_cell if path_base else path_cell
return html.xpath(path)
def get_row_column_text(row, cell_num):
"""Return string cell value for specified column.
:param row: HtmlElement
:param cell_num: int
:return: string
"""
return row.xpath('.//td[%d]' % cell_num)[0].text_content().strip()
def get_row_column_links(row, cell_num):
"""Return string href value for link in specified column.
NOTE: if there are multiple links in the column, you might
need to write your own function.
:param row: HtmlElement
:param cell_num: int
:return: string
"""
return row.xpath('.//td[%d]//a/@href' % cell_num)[0]
def get_clean_body_content(content, remove_extra_tags=[]):
"""Parse out the body from an html string, clean it up, and send it along.
"""
remove_tags = ['a', 'body', 'font', 'noscript']
remove_tags.extend(remove_extra_tags)
cleaner = Cleaner(style=True,
remove_tags=remove_tags)
try:
return cleaner.clean_html(content)
except XMLSyntaxError:
return "Unable to extract the content from this file. Please try " \
"reading the original."
def get_visible_text(html_content):
html_tree = html.fromstring(html_content)
text = html_tree.xpath("""//text()[normalize-space() and not(parent::style |
parent::link |
parent::head |
parent::script)]""")
return " ".join(text)
def html_unescape(s):
h = HTMLParser()
return h.unescape(s)
def set_response_encoding(request):
"""Set the encoding if it isn't set already.
Use cchardet for added performance.
"""
if request:
# If the encoding is iso-8859-1, switch it to cp1252 (a superset)
if request.encoding == 'ISO-8859-1':
request.encoding = 'cp1252'
if request.encoding is None:
# Requests detects the encoding when the item is GET'ed using
# HTTP headers, and then when r.text is accessed, if the encoding
# hasn't been set by that point. By setting the encoding here, we
# ensure that it's done by cchardet, if it hasn't been done with
# HTTP headers. This way it is done before r.text is accessed
# (which would do it with vanilla chardet). This is a big
# performance boon, and can be removed once requests is upgraded
if isinstance(request.content, text_type):
as_bytes = request.content.encode()
request.encoding = chardet.detect(as_bytes)['encoding']
else:
request.encoding = chardet.detect(request.content)['encoding']
def clean_html(text):
""" Cleans up text before we make it into an HTML tree:
1. Nukes <![CDATA stuff.
2. Nukes XML encoding declarations
3. Replaces </br> with <br/>
4. Nukes invalid bytes in input
5. ?
"""
# Remove <![CDATA because it causes breakage in lxml.
text = re.sub(r'<!\[CDATA\[', u'', text)
text = re.sub(r'\]\]>', u'', text)
# Remove <?xml> declaration in Unicode objects, because it causes an
# error: "ValueError: Unicode strings with encoding declaration are not
# supported."
# Note that the error only occurs if the <?xml> tag has an "encoding"
# attribute, but we remove it in all cases, as there's no downside to
# removing it. This moves our encoding detection to chardet, rather than
# lxml.
if isinstance(text, text_type):
text = re.sub(r'^\s*<\?xml\s+.*?\?>', '', text)
# Fix invalid bytes in XML (http://stackoverflow.com/questions/8733233/)
# Note that this won't work completely on narrow builds of Python, which
# existed prior to Py3. Thus, we check if it's a narrow build, and adjust
# accordingly.
if sys.maxunicode == 65535:
text = re.sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD]+',
u'', text)
else:
text = re.sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD'
u'\U00010000-\U0010FFFF]+', u'', text)
return text
def fix_links_but_keep_anchors(link):
# Wrap the function below so that we have one that can be passed to
# lxml's rewrite_links method, which doesn't accept any parameters.
return fix_links_in_lxml_tree(link, keep_anchors=True)
def fix_links_in_lxml_tree(link, keep_anchors=False):
"""Fix links in an lxml tree.
:param keep_anchors: Whether to nuke anchors at the ends of links.
This function is called by the rewrite_links method of an lxml tree, and is
used to normalize links in a few ways. It makes links absolute, works
around buggy URLs and nukes anchors.
Example: html_tree.rewrite_links(fix_links_in_lxml_tree, base_href=my_url)
Some URLS, like the following, make no sense:
- https://www.appeals2.az.gov/../Decisions/CR20130096OPN.pdf.
^^^^ -- This makes no sense!
The fix is to remove any extra '/..' patterns at the beginning of the
path.
Others have annoying anchors on the end, like:
- http://example.com/path/#anchor
Note that lxml has a method generally for this purpose called
make_links_absolute, but we cannot use it because it does not work
around invalid relative URLS, nor remove anchors. This is a limitation
of Python's urljoin that will be fixed in Python 3.5 according to a bug
we filed: http://bugs.python.org/issue22118
"""
url_parts = urlsplit(link)
url = urlunsplit(
url_parts[:2] +
(re.sub('^(/\.\.)+', '', url_parts.path),) +
url_parts[3:]
)
if keep_anchors:
return url
else:
return url.split('#')[0]
| 36.082569
| 85
| 0.65192
|
0c7930500ca408365b15fae9fdb3d446d471abf8
| 18,572
|
py
|
Python
|
asposewordscloud/models/xaml_flow_save_options_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/xaml_flow_save_options_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
asposewordscloud/models/xaml_flow_save_options_data.py
|
rizwanniazigroupdocs/aspose-words-cloud-python
|
b943384a1e3c0710cc84df74119e6edf7356037e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="xaml_flow_save_options_data.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
import json
class XamlFlowSaveOptionsData(object):
"""Container class for xaml flow save options.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'dml3_d_effects_rendering_mode': 'str',
'dml_effects_rendering_mode': 'str',
'dml_rendering_mode': 'str',
'file_name': 'str',
'save_format': 'str',
'update_fields': 'bool',
'update_last_printed_property': 'bool',
'update_last_saved_time_property': 'bool',
'update_sdt_content': 'bool',
'zip_output': 'bool',
'images_folder': 'str',
'images_folder_alias': 'str'
}
attribute_map = {
'dml3_d_effects_rendering_mode': 'Dml3DEffectsRenderingMode',
'dml_effects_rendering_mode': 'DmlEffectsRenderingMode',
'dml_rendering_mode': 'DmlRenderingMode',
'file_name': 'FileName',
'save_format': 'SaveFormat',
'update_fields': 'UpdateFields',
'update_last_printed_property': 'UpdateLastPrintedProperty',
'update_last_saved_time_property': 'UpdateLastSavedTimeProperty',
'update_sdt_content': 'UpdateSdtContent',
'zip_output': 'ZipOutput',
'images_folder': 'ImagesFolder',
'images_folder_alias': 'ImagesFolderAlias'
}
def __init__(self, dml3_d_effects_rendering_mode=None, dml_effects_rendering_mode=None, dml_rendering_mode=None, file_name=None, save_format=None, update_fields=None, update_last_printed_property=None, update_last_saved_time_property=None, update_sdt_content=None, zip_output=None, images_folder=None, images_folder_alias=None): # noqa: E501
"""XamlFlowSaveOptionsData - a model defined in Swagger""" # noqa: E501
self._dml3_d_effects_rendering_mode = None
self._dml_effects_rendering_mode = None
self._dml_rendering_mode = None
self._file_name = None
self._save_format = None
self._update_fields = None
self._update_last_printed_property = None
self._update_last_saved_time_property = None
self._update_sdt_content = None
self._zip_output = None
self._images_folder = None
self._images_folder_alias = None
self.discriminator = None
if dml3_d_effects_rendering_mode is not None:
self.dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode
if dml_effects_rendering_mode is not None:
self.dml_effects_rendering_mode = dml_effects_rendering_mode
if dml_rendering_mode is not None:
self.dml_rendering_mode = dml_rendering_mode
if file_name is not None:
self.file_name = file_name
if save_format is not None:
self.save_format = save_format
if update_fields is not None:
self.update_fields = update_fields
if update_last_printed_property is not None:
self.update_last_printed_property = update_last_printed_property
if update_last_saved_time_property is not None:
self.update_last_saved_time_property = update_last_saved_time_property
if update_sdt_content is not None:
self.update_sdt_content = update_sdt_content
if zip_output is not None:
self.zip_output = zip_output
if images_folder is not None:
self.images_folder = images_folder
if images_folder_alias is not None:
self.images_folder_alias = images_folder_alias
@property
def dml3_d_effects_rendering_mode(self):
"""Gets the dml3_d_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the value determining how 3D effects are rendered. # noqa: E501
:return: The dml3_d_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._dml3_d_effects_rendering_mode
@dml3_d_effects_rendering_mode.setter
def dml3_d_effects_rendering_mode(self, dml3_d_effects_rendering_mode):
"""Sets the dml3_d_effects_rendering_mode of this XamlFlowSaveOptionsData.
Gets or sets the value determining how 3D effects are rendered. # noqa: E501
:param dml3_d_effects_rendering_mode: The dml3_d_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
allowed_values = ["Basic", "Advanced"] # noqa: E501
if not dml3_d_effects_rendering_mode.isdigit():
if dml3_d_effects_rendering_mode not in allowed_values:
raise ValueError(
"Invalid value for `dml3_d_effects_rendering_mode` ({0}), must be one of {1}" # noqa: E501
.format(dml3_d_effects_rendering_mode, allowed_values))
self._dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode
else:
self._dml3_d_effects_rendering_mode = allowed_values[int(dml3_d_effects_rendering_mode) if six.PY3 else long(dml3_d_effects_rendering_mode)]
@property
def dml_effects_rendering_mode(self):
"""Gets the dml_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the value determining how DrawingML effects are rendered. { Simplified | None | Fine }. # noqa: E501
:return: The dml_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._dml_effects_rendering_mode
@dml_effects_rendering_mode.setter
def dml_effects_rendering_mode(self, dml_effects_rendering_mode):
"""Sets the dml_effects_rendering_mode of this XamlFlowSaveOptionsData.
Gets or sets the value determining how DrawingML effects are rendered. { Simplified | None | Fine }. # noqa: E501
:param dml_effects_rendering_mode: The dml_effects_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._dml_effects_rendering_mode = dml_effects_rendering_mode
@property
def dml_rendering_mode(self):
"""Gets the dml_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the option that controls how DrawingML shapes are rendered. # noqa: E501
:return: The dml_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._dml_rendering_mode
@dml_rendering_mode.setter
def dml_rendering_mode(self, dml_rendering_mode):
"""Sets the dml_rendering_mode of this XamlFlowSaveOptionsData.
Gets or sets the option that controls how DrawingML shapes are rendered. # noqa: E501
:param dml_rendering_mode: The dml_rendering_mode of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._dml_rendering_mode = dml_rendering_mode
@property
def file_name(self):
"""Gets the file_name of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the name of destination file. # noqa: E501
:return: The file_name of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this XamlFlowSaveOptionsData.
Gets or sets the name of destination file. # noqa: E501
:param file_name: The file_name of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def save_format(self):
"""Gets the save_format of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the format of save. # noqa: E501
:return: The save_format of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._save_format
@save_format.setter
def save_format(self, save_format):
"""Sets the save_format of this XamlFlowSaveOptionsData.
Gets or sets the format of save. # noqa: E501
:param save_format: The save_format of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._save_format = save_format
@property
def update_fields(self):
"""Gets the update_fields of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets a value indicating whether fields should be updated before saving the document to a fixed page format. The default value is true. # noqa: E501
:return: The update_fields of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: bool
"""
return self._update_fields
@update_fields.setter
def update_fields(self, update_fields):
"""Sets the update_fields of this XamlFlowSaveOptionsData.
Gets or sets a value indicating whether fields should be updated before saving the document to a fixed page format. The default value is true. # noqa: E501
:param update_fields: The update_fields of this XamlFlowSaveOptionsData. # noqa: E501
:type: bool
"""
self._update_fields = update_fields
@property
def update_last_printed_property(self):
"""Gets the update_last_printed_property of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets a value indicating whether the Aspose.Words.Properties.BuiltInDocumentProperties.LastPrinted property is updated before saving. # noqa: E501
:return: The update_last_printed_property of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: bool
"""
return self._update_last_printed_property
@update_last_printed_property.setter
def update_last_printed_property(self, update_last_printed_property):
"""Sets the update_last_printed_property of this XamlFlowSaveOptionsData.
Gets or sets a value indicating whether the Aspose.Words.Properties.BuiltInDocumentProperties.LastPrinted property is updated before saving. # noqa: E501
:param update_last_printed_property: The update_last_printed_property of this XamlFlowSaveOptionsData. # noqa: E501
:type: bool
"""
self._update_last_printed_property = update_last_printed_property
@property
def update_last_saved_time_property(self):
"""Gets the update_last_saved_time_property of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets a value indicating whether the Aspose.Words.Properties.BuiltInDocumentProperties.LastSavedTime property is updated before saving. # noqa: E501
:return: The update_last_saved_time_property of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: bool
"""
return self._update_last_saved_time_property
@update_last_saved_time_property.setter
def update_last_saved_time_property(self, update_last_saved_time_property):
"""Sets the update_last_saved_time_property of this XamlFlowSaveOptionsData.
Gets or sets a value indicating whether the Aspose.Words.Properties.BuiltInDocumentProperties.LastSavedTime property is updated before saving. # noqa: E501
:param update_last_saved_time_property: The update_last_saved_time_property of this XamlFlowSaveOptionsData. # noqa: E501
:type: bool
"""
self._update_last_saved_time_property = update_last_saved_time_property
@property
def update_sdt_content(self):
"""Gets the update_sdt_content of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets a value indicating whether content of StructuredDocumentTag is updated before saving. # noqa: E501
:return: The update_sdt_content of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: bool
"""
return self._update_sdt_content
@update_sdt_content.setter
def update_sdt_content(self, update_sdt_content):
"""Sets the update_sdt_content of this XamlFlowSaveOptionsData.
Gets or sets a value indicating whether content of StructuredDocumentTag is updated before saving. # noqa: E501
:param update_sdt_content: The update_sdt_content of this XamlFlowSaveOptionsData. # noqa: E501
:type: bool
"""
self._update_sdt_content = update_sdt_content
@property
def zip_output(self):
"""Gets the zip_output of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets a value indicating whether to zip output or not. The default value is false. # noqa: E501
:return: The zip_output of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: bool
"""
return self._zip_output
@zip_output.setter
def zip_output(self, zip_output):
"""Sets the zip_output of this XamlFlowSaveOptionsData.
Gets or sets a value indicating whether to zip output or not. The default value is false. # noqa: E501
:param zip_output: The zip_output of this XamlFlowSaveOptionsData. # noqa: E501
:type: bool
"""
self._zip_output = zip_output
@property
def images_folder(self):
"""Gets the images_folder of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the physical folder where images are saved when exporting. # noqa: E501
:return: The images_folder of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._images_folder
@images_folder.setter
def images_folder(self, images_folder):
"""Sets the images_folder of this XamlFlowSaveOptionsData.
Gets or sets the physical folder where images are saved when exporting. # noqa: E501
:param images_folder: The images_folder of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._images_folder = images_folder
@property
def images_folder_alias(self):
"""Gets the images_folder_alias of this XamlFlowSaveOptionsData. # noqa: E501
Gets or sets the name of the folder used to construct image URIs. # noqa: E501
:return: The images_folder_alias of this XamlFlowSaveOptionsData. # noqa: E501
:rtype: str
"""
return self._images_folder_alias
@images_folder_alias.setter
def images_folder_alias(self, images_folder_alias):
"""Sets the images_folder_alias of this XamlFlowSaveOptionsData.
Gets or sets the name of the folder used to construct image URIs. # noqa: E501
:param images_folder_alias: The images_folder_alias of this XamlFlowSaveOptionsData. # noqa: E501
:type: str
"""
self._images_folder_alias = images_folder_alias
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XamlFlowSaveOptionsData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 40.907489
| 346
| 0.669879
|
7ad12d58bc0a030dbaee99cb2cc5c37b70671b2a
| 23,619
|
py
|
Python
|
model.py
|
biancaAlexandru/painting
|
de442174687f477c743355009f74dd3b05a0e82e
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
biancaAlexandru/painting
|
de442174687f477c743355009f74dd3b05a0e82e
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
biancaAlexandru/painting
|
de442174687f477c743355009f74dd3b05a0e82e
|
[
"Apache-2.0"
] | null | null | null |
import numba
import torch
import torchvision
from scipy import stats
import numpy as np
from torch import nn
import utils
@numba.jit(nopython=True)
def make_input_tensor(input_tensor, new_aug_lidar_cam_coords, bin_idxs, pillar_idxs):
"""
:return input_tensor: (10, P, N) np.array passed into conv layer.
"""
max_pts_per_pillar = 100
num_nonempty_pillars = pillar_idxs.shape[0]
for i in range(num_nonempty_pillars):
condition = bin_idxs == pillar_idxs[i]
condition = (condition[:, 0] & condition[:, 1])
points = new_aug_lidar_cam_coords[condition][
:max_pts_per_pillar] # all points w/ same bin idx as pillar_idxs[i]
points = points.T
num_points = points.shape[1]
input_tensor[:, i, :num_points] = points
return input_tensor
class PFNv2(nn.Module):
def __init__(self):
super(PFNv2, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.max_pillars = 12000
self.max_pts_per_pillar = 100
self.xrange = (-40, 40)
self.zrange = (0, 80)
self.conv1 = nn.Conv2d(10, 64, kernel_size=1, bias=False) # output is (batch, 64, P, N) tensor
self.bn1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
def forward(self, lidar):
"""
:param lidar: list of tensors. (N_points, 5) in camera coordinates (x,y,z,r,class)
"""
assert isinstance(lidar, list)
assert len(lidar[0].shape) == 2 # (points, 5)
batch_size = len(lidar)
input_tensor = torch.zeros(batch_size, 10, self.max_pillars, self.max_pts_per_pillar).to(self.device)
encoded_bev = torch.zeros(batch_size, 64, 500, 500).to(self.device)
pillar_idxs = []
for i in range(batch_size):
l = np.array(lidar[i].cpu())
new_aug_lidar_cam_coords, bin_idxs, pillar_idxs_ = self.augment_points(l)
pillar_idxs.append(pillar_idxs_)
input_tensor_ = np.zeros((10, self.max_pillars, self.max_pts_per_pillar))
input_tensor_ = make_input_tensor(input_tensor_, new_aug_lidar_cam_coords, bin_idxs, pillar_idxs_)
input_tensor[i] = torch.from_numpy(input_tensor_).to(self.device)
x = nn.functional.relu(self.bn1(self.conv1(input_tensor))) # (batch, 64, P, N)
encoded_pillars, _ = x.max(dim=-1) # (batch, 64, P)
for i in range(batch_size):
encoded_bev_ = self.scatter_encoded_pillars_to_bev(encoded_pillars[i], pillar_idxs[i])
encoded_bev[i] = encoded_bev_
return encoded_bev # (batch, 64, 500, 500)
def augment_points(self, augmented_lidar_cam_coords):
"""
Converts (x,y,z,r,class) to (x,y,z,r,class,xc,yc,zc,xp,zp)
"""
points_in_xrange = (-40 < augmented_lidar_cam_coords[:, 0]) & (augmented_lidar_cam_coords[:, 0] < 40)
points_in_zrange = (0 < augmented_lidar_cam_coords[:, 2]) & (augmented_lidar_cam_coords[:, 2] < 70.4)
augmented_lidar_cam_coords = augmented_lidar_cam_coords[points_in_xrange & points_in_zrange]
new_aug_lidar_cam_coords = np.zeros((augmented_lidar_cam_coords.shape[0], 10))
new_aug_lidar_cam_coords[:, :5] = augmented_lidar_cam_coords
xedges = np.linspace(-40, 40, 501, dtype=np.float32)
# 80 first because a point 80m from ego car is in top row of bev img (row 0)
zedges = np.linspace(80, 0, 501, dtype=np.float32)
x = augmented_lidar_cam_coords[:, 0] # left/right
y = augmented_lidar_cam_coords[:, 1] # y in cam coords (+ is down, - is up)
z = augmented_lidar_cam_coords[:, 2] # front/back
x_inds = np.digitize(x, xedges).reshape(-1, 1) - 1 # subtract 1 to get 0 based indexing
z_inds = np.digitize(z, zedges).reshape(-1, 1) - 1 # idx into rows of bev img
bin_idxs = np.hstack((z_inds, x_inds)) # z first because it corresponds to rows of the bev
# mean of x vals of points in each bin
ret_x = stats.binned_statistic_2d(z, x, x, 'mean', bins=[np.flip(zedges), xedges])
ret_y = stats.binned_statistic_2d(z, x, y, 'mean', bins=[np.flip(zedges), xedges])
ret_z = stats.binned_statistic_2d(z, x, z, 'mean', bins=[np.flip(zedges), xedges])
# since need to flip zedges (row bins) to make function work, need to flip output by rows
x_mean = np.flip(ret_x.statistic, axis=0)
y_mean = np.flip(ret_y.statistic, axis=0)
# mean of all z values in each bev img 'pixel', NaN at cells with no points.
z_mean = np.flip(ret_z.statistic, axis=0)
# coord of x center of each bev cell. All cols have same value
x_ctr = np.tile(np.linspace((-40 + .08), (40 - .08), 500), (500, 1))
z_ctr = np.tile(np.linspace((80 - .08), (0 + .08), 500).reshape(-1, 1), (1, 500)) # all rows have same value
# offset of each point from x_mean of pillar, ymean, zmean, x_center, y_center
# offset of each point from xmean of pillar
new_aug_lidar_cam_coords[:, 5] = new_aug_lidar_cam_coords[:, 0] - x_mean[bin_idxs[:, 0], bin_idxs[:, 1]]
new_aug_lidar_cam_coords[:, 6] = new_aug_lidar_cam_coords[:, 1] - y_mean[bin_idxs[:, 0], bin_idxs[:, 1]] # yc
new_aug_lidar_cam_coords[:, 7] = new_aug_lidar_cam_coords[:, 2] - z_mean[bin_idxs[:, 0], bin_idxs[:, 1]] # zc
# offset from x center of pillar
new_aug_lidar_cam_coords[:, 8] = new_aug_lidar_cam_coords[:, 0] - x_ctr[bin_idxs[:, 0], bin_idxs[:, 1]]
new_aug_lidar_cam_coords[:, 9] = new_aug_lidar_cam_coords[:, 2] - z_ctr[bin_idxs[:, 0], bin_idxs[:, 1]] # zp
H, _, __ = np.histogram2d(z, x, bins=(np.flip(zedges), xedges))
H[H != 0] = 1
num_nonempty_pillars = int(np.flip(H, axis=0).sum()) # pillars containing >= 1 lidar point
pillar_idxs = np.unique(bin_idxs, axis=0) # ith element will be bin (row, col of bev img) of that pillar
if pillar_idxs.shape[0] > self.max_pillars:
np.random.shuffle(pillar_idxs)
pillar_idxs = pillar_idxs[:self.max_pillars]
return new_aug_lidar_cam_coords, bin_idxs, pillar_idxs
def scatter_encoded_pillars_to_bev(self, encoded_pillars, pillar_idxs):
"""
:return encoded_bev: (64, 500, 500) tensor for input to resnet portion of network
"""
num_nonempty_pillars = pillar_idxs.shape[0]
# bev_map and encoded_pillars must be torch.float, indices tensors must be torch.long
encoded_bev = torch.zeros(64, 500, 500).to(self.device)
encoded_bev[:, pillar_idxs[:, 0], pillar_idxs[:, 1]] = encoded_pillars[:, :num_nonempty_pillars]
return encoded_bev
class PredictionConvolutions(nn.Module):
def __init__(self, channels_for_block, n_classes):
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes # including background
# Number of prior-boxes we are considering per position in each feature map
n_boxes = 2
# Localization prediction convolutions (predict offsets w.r.t prior-boxes
# (4 values) and height/elevation (2 values))
self.loc_block0 = nn.Conv2d(channels_for_block[0], n_boxes * 4, kernel_size=3, padding=1)
self.loc_block1 = nn.Conv2d(channels_for_block[1], n_boxes * 4, kernel_size=3, padding=1)
self.loc_block2 = nn.Conv2d(channels_for_block[2], n_boxes * 4, kernel_size=3, padding=1)
self.loc_block3 = nn.Conv2d(channels_for_block[3], n_boxes * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_block0 = nn.Conv2d(channels_for_block[0], n_boxes * n_classes, kernel_size=3, padding=1)
self.cl_block1 = nn.Conv2d(channels_for_block[1], n_boxes * n_classes, kernel_size=3, padding=1)
self.cl_block2 = nn.Conv2d(channels_for_block[2], n_boxes * n_classes, kernel_size=3, padding=1)
self.cl_block3 = nn.Conv2d(channels_for_block[3], n_boxes * n_classes, kernel_size=3, padding=1)
def forward(self, block0_fmaps, block1_fmaps, block2_fmaps, block3_fmaps):
batch_size = block0_fmaps.size(0)
# Predict localization boxes' bounds (as offsets w.r.t prior-boxes)
l_block0 = self.loc_block0(block0_fmaps) # (N, 8, 250, 250) 12 channels is for 2 boxes * (4 offsets)
l_block0 = l_block0.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 4) # (N, 250*250*2, 4)
l_block1 = self.loc_block1(block1_fmaps) # (N, 8, 125, 125)
l_block1 = l_block1.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 4) # (N, 125*125*2, 4)
l_block2 = self.loc_block2(block2_fmaps) # (N, 8, 63, 63)
l_block2 = l_block2.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 4) # (N, 63*63*2, 4)
l_block3 = self.loc_block3(block3_fmaps) # (N, 8, 32, 32)
l_block3 = l_block3.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 4) # (N, 32*32*2, 4)
# Predict classes in localization boxes
c_block0 = self.cl_block0(block0_fmaps) # (N, 2 * n_classes, 250, 250). 2 refers to 2 boxes per cell of fmap
c_block0 = c_block0.permute(0, 2, 3, 1).contiguous().view(batch_size, -1,
self.n_classes) # (N, 2*250*250, n_classes)
c_block1 = self.cl_block1(block1_fmaps) # (N, 2 * n_classes, 125, 125)
c_block1 = c_block1.permute(0, 2, 3, 1).contiguous().view(batch_size, -1,
self.n_classes) # (N, 2*125*125, n_classes)
c_block2 = self.cl_block2(block2_fmaps) # (N, 2 * n_classes, 63, 63)
c_block2 = c_block2.permute(0, 2, 3, 1).contiguous().view(batch_size, -1,
self.n_classes) # (N, 2*63*63, n_classes)
c_block3 = self.cl_block3(block3_fmaps) # (N, 2 * n_classes, 32, 32)
c_block3 = c_block3.permute(0, 2, 3, 1).contiguous().view(batch_size, -1,
self.n_classes) # (N, 2*32*32, n_classes)
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([ # l_block0, l_block1,
l_block2, l_block3], dim=1)
classes_scores = torch.cat([ # c_block0, c_block1,
c_block2, c_block3], dim=1)
return locs, classes_scores
class SSD(nn.Module):
def __init__(self, resnet_type, n_classes):
super(SSD, self).__init__()
assert resnet_type in [18, 34, 50]
if resnet_type == 18:
resnet = list(torchvision.models.resnet18().children())
channels_for_block = [64, 128, 256, 512] # output channels of fmap for each of blocks 0 - 3
elif resnet_type == 34:
resnet = list(torchvision.models.resnet34().children())
channels_for_block = [64, 128, 256, 512]
else:
resnet = list(torchvision.models.resnet50().children())
channels_for_block = [256, 512, 1024, 2048]
self.n_classes = n_classes
self.pred_convs = PredictionConvolutions(channels_for_block, n_classes)
self.pillar_feat_net = PFNv2()
# Input channels of Conv2d in self.downsize must be output of PFN (N, 64, 500, 500)
self.downsize = nn.Sequential(nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False),
nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True,
track_running_stats=True),
nn.ReLU(inplace=True))
self.block0 = resnet[4]
self.block1 = resnet[5]
self.block2 = resnet[6]
self.block3 = resnet[7]
self.priors_cxcy = self.create_prior_boxes()
def forward(self, lidar):
encoded_bev = self.pillar_feat_net(lidar)
x = self.downsize(encoded_bev)
block0_fmaps = self.block0(x)
block1_fmaps = self.block1(block0_fmaps)
block2_fmaps = self.block2(block1_fmaps)
block3_fmaps = self.block3(block2_fmaps)
locs, classes_scores = self.pred_convs(block0_fmaps, block1_fmaps, block2_fmaps, block3_fmaps)
return locs, classes_scores, encoded_bev
def create_prior_boxes(self):
fmap_dims = { # 'block0': 250,
# 'block1': 125,
'block2': 63,
'block3': 32}
obj_scale = 0.031 # Assumes 500x500px BEV covers 80m x 80m and cars are 1.6m x 3.9m
aspect_ratios = [2., 0.5]
fmap_names = list(fmap_dims.keys())
prior_boxes = []
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for fmap in fmap_names:
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios:
prior_boxes.append([cx, cy, obj_scale * np.sqrt(ratio), obj_scale / np.sqrt(ratio)])
prior_boxes = torch.FloatTensor(prior_boxes).to(device)
prior_boxes.clamp_(0, 1)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k):
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = nn.functional.softmax(predicted_scores, dim=2)
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
# Decode object coordinates from the form we regressed predicted boxes to
decoded_locs = utils.cxcy_to_xy(
utils.gcxgcy_to_cxcy(predicted_locs[i],
self.priors_cxcy)) # (n_priors, 4), these are fractional pt. coordinates
image_boxes = list()
image_labels = list()
image_scores = list()
max_scores, best_label = predicted_scores[i].max(dim=1) # (8732)
for c in range(1, self.n_classes):
# Keep only predicted boxes and scores where scores for this class are above the minimum score
class_scores = predicted_scores[i][:, c]
score_above_min_score = class_scores > min_score # torch.uint8 (byte) tensor, for indexing
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score]
class_decoded_locs = decoded_locs[score_above_min_score]
# Sort predicted boxes and scores by scores
class_scores, sort_ind = class_scores.sort(dim=0, descending=True)
class_decoded_locs = class_decoded_locs[sort_ind]
print('class_scores.shape', class_scores.shape, 'class_decoded_locs.shape', class_decoded_locs.shape)
# Find the overlap between predicted boxes
overlap = utils.find_jaccard_overlap(class_decoded_locs, class_decoded_locs)
# Non-Maximum Suppression (NMS)
# A torch.uint8 (byte) tensor to keep track of which predicted boxes to suppress
# 1 implies suppress, 0 implies don't suppress
suppress = torch.max(suppress,
(overlap[box] > max_overlap).type(torch.cuda.ByteTensor)) # (n_qualified)
# Consider each box in order of decreasing scores
for box in range(class_decoded_locs.size(0)):
# If this box is already marked for suppression
if suppress[box] == 1:
continue
# Suppress boxes whose overlaps (with this box) are greater than maximum overlap
# Find such boxes and update suppress indices
suppress = torch.max(suppress, overlap[box] > max_overlap)
# The max operation retains previously suppressed boxes, like an 'OR' operation
# Don't suppress this box, even though it has an overlap of 1 with itself
suppress[box] = 0
# Store only unsuppressed boxes for this class
image_boxes.append(class_decoded_locs[1 - suppress])
image_labels.append(torch.LongTensor((1 - suppress).sum().item() * [c]).to(self.pillar_feat_net.device))
image_scores.append(class_scores[1 - suppress])
# If no object in any class is found, store a placeholder for 'background'
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(self.pillar_feat_net.device))
image_labels.append(torch.LongTensor([0]).to(self.pillar_feat_net.device))
image_scores.append(torch.FloatTensor([0.]).to(self.pillar_feat_net.device))
# Concatenate into single tensors
image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4)
image_labels = torch.cat(image_labels, dim=0) # (n_objects)
image_scores = torch.cat(image_scores, dim=0) # (n_objects)
n_objects = image_scores.size(0)
# Keep only the top k objects
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0, descending=True)
image_scores = image_scores[:top_k] # (top_k)
image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4)
image_labels = image_labels[sort_ind][:top_k] # (top_k)
# Append to lists that store predicted boxes and scores for all images
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores # lists of length batch_size
class MultiBoxLoss(nn.Module):
"""
The MultiBox loss, a loss function for object detection.
This is a combination of:
(1) a localization loss for the predicted locations of the boxes, and
(2) a confidence loss for the predicted class scores.
"""
def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.):
super(MultiBoxLoss, self).__init__()
self.priors_cxcy = priors_cxcy
self.priors_xy = utils.cxcy_to_xy(priors_cxcy)
self.threshold = threshold
self.neg_pos_ratio = neg_pos_ratio
self.alpha = alpha
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.smooth_l1 = nn.L1Loss()
self.cross_entropy = nn.CrossEntropyLoss(reduce=False)
def forward(self, predicted_locs, predicted_scores, boxes, labels):
"""
Forward propagation.
:param predicted_locs: predicted locations/boxes w.r.t the prior boxes, a tensor of dimensions (N, n_priors, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, n_priors, n_classes)
:param boxes: true object bounding boxes in boundary coordinates, a list of N tensors
:param labels: true object labels, a list of N tensors
:return: multibox loss, a scalar
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
n_classes = predicted_scores.size(2)
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(self.device)
true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(self.device)
# For each image
for i in range(batch_size):
n_objects = boxes[i].size(0)
overlap = utils.find_jaccard_overlap(boxes[i], self.priors_xy) # (n_objects, n_priors)
# For each prior, find the object that has the maximum overlap
overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (n_priors)
_, prior_for_each_object = overlap.max(dim=1) # (N_o)
# Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.)
object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(
self.device)
# To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.)
overlap_for_each_prior[prior_for_each_object] = 1.
# Labels for each prior
label_for_each_prior = labels[i][object_for_each_prior] # (n_priors)
# Set priors whose overlaps with objects are less than the threshold to be background (no object)
label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (n_priors)
# Store
true_classes[i] = label_for_each_prior
# Encode center-size object coordinates into the form we regressed predicted boxes to
true_locs[i] = utils.cxcy_to_gcxgcy(utils.xy_to_cxcy(boxes[i][object_for_each_prior]),
self.priors_cxcy) # (n_priors, 4)
# Identify priors that are positive (object/non-background)
positive_priors = true_classes != 0 # (N, n_priors)
# Localization loss is computed only over positive (non-background) priors
loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar
# Number of positive and hard-negative priors per image
n_positives = positive_priors.sum(dim=1) # (N)
n_hard_negatives = self.neg_pos_ratio * n_positives # (N)
# First, find the loss for all priors
conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes),
true_classes.view(-1)) # (N * n_priors)
conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, n_priors)
# We already know which priors are positive
conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives))
# Next, find which priors are hard-negative
# To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives
conf_loss_neg = conf_loss_all.clone() # (N, n_priors)
conf_loss_neg[
positive_priors] = 0. # (N, n_priors), positive priors are ignored (never in top n_hard_negatives)
conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, n_priors), sorted by decreasing hardness
hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(
self.device) # (N, n_priors)
hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, n_priors)
conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives))
# As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors
conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar
# TOTAL LOSS
return conf_loss + self.alpha * loc_loss
| 50.793548
| 134
| 0.629366
|
b7926f13417870d2f7e2f0c90b1f6ccdda236759
| 187
|
py
|
Python
|
setup.py
|
CyanideCN/vanadis
|
ebd2373fb55d5913eb1384ab434e083fe714ecab
|
[
"MIT"
] | 1
|
2019-12-01T11:31:23.000Z
|
2019-12-01T11:31:23.000Z
|
setup.py
|
CyanideCN/vanadis
|
ebd2373fb55d5913eb1384ab434e083fe714ecab
|
[
"MIT"
] | null | null | null |
setup.py
|
CyanideCN/vanadis
|
ebd2373fb55d5913eb1384ab434e083fe714ecab
|
[
"MIT"
] | 2
|
2019-09-24T00:46:24.000Z
|
2019-12-01T11:31:24.000Z
|
from setuptools import setup, find_packages
setup(name='vanadis',
version='0.0.2',
packages=find_packages(),
install_requires='matplotlib',
license='MIT Licence')
| 26.714286
| 43
| 0.684492
|
d49f06067d55f02bad7253dc0ac2b007faad00cd
| 679
|
py
|
Python
|
Implementation/5622_다이얼/5622_다이얼.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2022-03-30T15:50:47.000Z
|
2022-03-30T15:50:47.000Z
|
Implementation/5622_다이얼/5622_다이얼.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | null | null | null |
Implementation/5622_다이얼/5622_다이얼.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2021-07-20T07:11:06.000Z
|
2021-07-20T07:11:06.000Z
|
N = input()
Sum = 0
for i in range(len(N)):
if N[i] == 'A' or N[i] == 'B' or N[i] == 'C':
Sum = Sum + 3
elif N[i] == 'D' or N[i] == 'E' or N[i] == 'F':
Sum = Sum + 4
elif N[i] == 'G' or N[i] == 'H' or N[i] == 'I':
Sum = Sum + 5
elif N[i] == 'J' or N[i] == 'K' or N[i] == 'L':
Sum = Sum + 6
elif N[i] == 'M' or N[i] == 'N' or N[i] == 'O':
Sum = Sum + 7
elif N[i] == 'P' or N[i] == 'Q' or N[i] == 'R' or N[i] == 'S':
Sum = Sum + 8
elif N[i] == 'T' or N[i] == 'U' or N[i] == 'V':
Sum = Sum + 9
elif N[i] == 'W' or N[i] == 'X' or N[i] == 'Y' or N[i] == 'Z':
Sum = Sum + 10
print(Sum)
| 28.291667
| 66
| 0.344624
|
3ab3acd0ff9024a041d43712aeedd9c086c7ab6b
| 44,511
|
py
|
Python
|
tensorflow/python/keras/layers/convolutional_test.py
|
grasskin/tensorflow
|
3fbfc9351ae6a2bb719d24458ee2c95214682302
|
[
"Apache-2.0"
] | 4
|
2020-06-28T08:25:36.000Z
|
2021-08-12T12:41:34.000Z
|
tensorflow/python/keras/layers/convolutional_test.py
|
grasskin/tensorflow
|
3fbfc9351ae6a2bb719d24458ee2c95214682302
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:12:24.000Z
|
2022-02-10T02:04:13.000Z
|
tensorflow/python/keras/layers/convolutional_test.py
|
grasskin/tensorflow
|
3fbfc9351ae6a2bb719d24458ee2c95214682302
|
[
"Apache-2.0"
] | 4
|
2019-11-28T12:18:07.000Z
|
2021-08-01T16:12:17.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class Conv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size),
expected_output_shape=expected_output_shape)
def _run_test_extra_batch_dim(self, kwargs, expected_output_shape):
batch_shape = (2, 11)
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=batch_shape + (length, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 5, 2)),
('padding_same', {
'padding': 'same'
}, (None, 7, 2)),
('padding_same_dilation_2', {
'padding': 'same',
'dilation_rate': 2
}, (None, 7, 2)),
('padding_same_dilation_3', {
'padding': 'same',
'dilation_rate': 3
}, (None, 7, 2)),
('padding_causal', {
'padding': 'causal'
}, (None, 7, 2)),
('strides', {
'strides': 2
}, (None, 3, 2)),
('dilation_rate', {
'dilation_rate': 2
}, (None, 3, 2)),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 5, 6), True),
)
def test_conv1d(self, kwargs, expected_output_shape, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = 3
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
self._run_test_extra_batch_dim(kwargs, expected_output_shape)
def test_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv1d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv1d_recreate_conv(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(filters=1,
kernel_size=3,
strides=1,
dilation_rate=2,
padding='causal')
inpt1 = np.random.normal(size=[1, 2, 1])
inpt2 = np.random.normal(size=[1, 1, 1])
outp1_shape = layer(inpt1).shape
_ = layer(inpt2).shape
self.assertEqual(outp1_shape, layer(inpt1).shape)
def test_conv1d_recreate_conv_unknown_dims(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(filters=1,
kernel_size=3,
strides=1,
dilation_rate=2,
padding='causal')
inpt1 = np.random.normal(size=[1, 9, 1]).astype(np.float32)
inpt2 = np.random.normal(size=[1, 2, 1]).astype(np.float32)
outp1_shape = layer(inpt1).shape
@def_function.function(input_signature=[
tensor_spec.TensorSpec([1, None, 1])])
def fn(inpt):
return layer(inpt)
fn(inpt2)
self.assertEqual(outp1_shape, layer(inpt1).shape)
@keras_parameterized.run_all_keras_modes
class Conv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
def _run_test_extra_batch_dim(self, kwargs, expected_output_shape):
batch_shape = (2, 11)
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=batch_shape + (num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 5, 4, 2)),
('padding_same', {
'padding': 'same'
}, (None, 7, 6, 2)),
('padding_same_dilation_2', {
'padding': 'same',
'dilation_rate': 2
}, (None, 7, 6, 2)),
('strides', {
'strides': (2, 2)
}, (None, 3, 2, 2)),
('dilation_rate', {
'dilation_rate': (2, 2)
}, (None, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {
'data_format': 'channels_first'
}, None, True),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 5, 4, 6), True),
)
def test_conv2d(self, kwargs, expected_output_shape=None, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = (3, 3)
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
self._run_test_extra_batch_dim(kwargs, expected_output_shape)
def test_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv2d_zero_kernel_size(self):
kwargs = {'filters': 2, 'kernel_size': 0}
with self.assertRaises(ValueError):
keras.layers.Conv2D(**kwargs)
@keras_parameterized.run_all_keras_modes
class Conv3DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape, validate_training=True):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape,
validate_training=validate_training)
def _run_test_extra_batch_dim(self,
kwargs,
expected_output_shape,
validate_training=True):
batch_shape = (2, 11)
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=batch_shape + (depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape,
validate_training=validate_training)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 3, 5, 4, 2)),
('padding_same', {
'padding': 'same'
}, (None, 5, 7, 6, 2)),
('strides', {
'strides': (2, 2, 2)
}, (None, 2, 3, 2, 2)),
('dilation_rate', {
'dilation_rate': (2, 2, 2)
}, (None, 1, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {
'data_format': 'channels_first'
}, None, True),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 3, 5, 4, 6), True),
)
def test_conv3d(self, kwargs, expected_output_shape=None, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = (3, 3, 3)
# train_on_batch currently fails with XLA enabled on GPUs
test_training = 'groups' not in kwargs or not test_util.is_xla_enabled()
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape, test_training)
self._run_test_extra_batch_dim(kwargs, expected_output_shape,
test_training)
def test_conv3d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session(use_gpu=True):
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class GroupedConvTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('Conv1D', keras.layers.Conv1D),
('Conv2D', keras.layers.Conv2D),
('Conv3D', keras.layers.Conv3D),
)
def test_group_conv_incorrect_use(self, layer):
with self.assertRaisesRegex(ValueError, 'The number of filters'):
layer(16, 3, groups=3)
with self.assertRaisesRegex(ValueError, 'The number of input channels'):
layer(16, 3, groups=4).build((32, 12, 12, 3))
@parameterized.named_parameters(
('Conv1D', keras.layers.Conv1D, (32, 12, 32)),
('Conv2D', keras.layers.Conv2D, (32, 12, 12, 32)),
('Conv3D', keras.layers.Conv3D, (32, 12, 12, 12, 32)),
)
def disable_test_group_conv(self, layer_cls, input_shape):
if test.is_gpu_available(cuda_only=True):
with testing_utils.use_gpu():
inputs = random_ops.random_uniform(shape=input_shape)
layer = layer_cls(16, 3, groups=4, use_bias=False)
layer.build(input_shape)
input_slices = array_ops.split(inputs, 4, axis=-1)
weight_slices = array_ops.split(layer.kernel, 4, axis=-1)
expected_outputs = array_ops.concat([
nn.convolution_v2(inputs, weights)
for inputs, weights in zip(input_slices, weight_slices)
],
axis=-1)
self.assertAllClose(layer(inputs), expected_outputs, rtol=1e-5)
def test_group_conv_depthwise(self):
if test.is_gpu_available(cuda_only=True):
with testing_utils.use_gpu():
inputs = random_ops.random_uniform(shape=(3, 27, 27, 32))
layer = keras.layers.Conv2D(32, 3, groups=32, use_bias=False)
layer.build((3, 27, 27, 32))
weights_dw = array_ops.reshape(layer.kernel, [3, 3, 32, 1])
expected_outputs = nn.depthwise_conv2d(
inputs, weights_dw, strides=[1, 1, 1, 1], padding='VALID')
self.assertAllClose(layer(inputs), expected_outputs, rtol=1e-5)
@keras_parameterized.run_all_keras_modes
class Conv1DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_col = 6
with testing_utils.use_gpu():
testing_utils.layer_test(
keras.layers.Conv1DTranspose,
kwargs=kwargs,
input_shape=(num_samples, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 8, 2)),
('padding_same', {'padding': 'same'}, (None, 6, 2)),
('strides', {'strides': 2}, (None, 13, 2)),
# Only runs on GPU with CUDA, dilation_rate>1 is not supported on CPU.
('dilation_rate', {'dilation_rate': 2}, (None, 10, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv1d_transpose(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
if (('data_format' not in kwargs and 'dilation_rate' not in kwargs) or
test.is_gpu_available(cuda_only=True)):
self._run_test(kwargs, expected_output_shape)
@keras_parameterized.run_all_keras_modes
class Conv3DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with testing_utils.use_gpu():
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 7, 9, 8, 2)),
('padding_same', {'padding': 'same'}, (None, 5, 7, 6, 2)),
('strides', {'strides': (2, 2, 2)}, (None, 11, 15, 13, 2)),
('dilation_rate', {'dilation_rate': (2, 2, 2)}, (None, 7, 9, 8, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv3d_transpose(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
@keras_parameterized.run_all_keras_modes
class ConvSequentialTest(keras_parameterized.TestCase):
def _run_test(self, conv_layer_cls, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
kwargs['filters'] = 1
kwargs['kernel_size'] = 3
kwargs['dilation_rate'] = 2
with self.cached_session(use_gpu=True):
layer = conv_layer_cls(**kwargs)
output1 = layer(np.zeros(input_shape1))
self.assertEqual(output1.shape, expected_output_shape1)
output2 = layer(np.zeros(input_shape2))
self.assertEqual(output2.shape, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 8, 2), (1, 5, 2), (1, 4, 1), (1, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
('padding_causal', {'padding': 'causal'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
)
def test_conv1d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv1D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 3, 2, 1), (1, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 7, 6, 1), (1, 6, 5, 1)),
)
def test_conv2d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv2D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 1, 3, 2, 1), (1, 4, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 5, 7, 6, 1), (1, 8, 6, 5, 1)),
)
def test_conv3d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv3D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
def test_dynamic_shape(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(2, 3)
input_shape = (5, None, None, 2)
inputs = keras.Input(shape=input_shape)
x = layer(inputs)
# Won't raise error here with None values in input shape (b/144282043).
layer(x)
@keras_parameterized.run_all_keras_modes
class ZeroPaddingTest(keras_parameterized.TestCase):
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
@parameterized.named_parameters(('channels_first', 'channels_first'),
('channels_last', 'channels_last'))
def test_zero_padding_2d(self, data_format):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
if data_format == 'channels_first':
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
elif data_format == 'channels_last':
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={
'padding': (2, 2),
'data_format': data_format
},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={
'padding': ((1, 2), (3, 4)),
'data_format': data_format
},
input_shape=inputs.shape)
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
@parameterized.named_parameters(('channels_first', 'channels_first'),
('channels_last', 'channels_last'))
def test_zero_padding_3d(self, data_format):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
if data_format == 'channels_first':
inputs = np.ones((num_samples, stack_size, input_len_dim1, input_len_dim2,
input_len_dim3))
elif data_format == 'channels_last':
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={
'padding': (2, 2, 2),
'data_format': data_format
},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={
'padding': ((1, 2), (3, 4), (0, 2)),
'data_format': data_format
},
input_shape=inputs.shape)
with self.cached_session(use_gpu=True):
# correctness test
layer = keras.layers.ZeroPadding3D(
padding=(2, 2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, :, 2:-2, 2:-2, 2:-2], 1.)
layer = keras.layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
for offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, 0:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
for offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4, 0:-2], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=None)
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
@keras_parameterized.run_all_keras_modes
class UpSamplingTest(keras_parameterized.TestCase):
def test_upsampling_1d(self):
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
def test_upsampling_2d_bilinear(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
testing_utils.layer_test(keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format,
'interpolation': 'bilinear'},
input_shape=inputs.shape)
if not context.executing_eagerly():
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format)
layer.build(inputs.shape)
outputs = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(outputs)
if data_format == 'channels_first':
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
@keras_parameterized.run_all_keras_modes
class CroppingTest(keras_parameterized.TestCase):
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.cached_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)]
for cropping in croppings:
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
if len(croppings) == 3 and len(croppings[0]) == 2:
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
np.testing.assert_allclose(np_output, expected_out)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=(1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=None)
@keras_parameterized.run_all_keras_modes
class DepthwiseConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape=None):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.DepthwiseConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('depth_multiplier_1', {'depth_multiplier': 1}),
('depth_multiplier_2', {'depth_multiplier': 2}),
('dilation_rate', {'dilation_rate': (2, 2)}, (None, 3, 2, 3)),
)
def test_depthwise_conv2d(self, kwargs, expected_output_shape=None):
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
def test_depthwise_conv2d_full(self):
kwargs = {
'kernel_size': 3,
'padding': 'valid',
'data_format': 'channels_last',
'dilation_rate': (1, 1),
'activation': None,
'depthwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'depthwise_constraint': 'unit_norm',
'use_bias': True,
'strides': (2, 2),
'depth_multiplier': 1,
}
self._run_test(kwargs)
if __name__ == '__main__':
test.main()
| 37.946292
| 80
| 0.609018
|
5e7e80d29558c16b0d219a10c363299af3f64964
| 1,080
|
py
|
Python
|
funciones.py
|
manu9812/MongoDB
|
d8cdb217e00f598747d0eee3780c7edff63f25c8
|
[
"Apache-2.0"
] | null | null | null |
funciones.py
|
manu9812/MongoDB
|
d8cdb217e00f598747d0eee3780c7edff63f25c8
|
[
"Apache-2.0"
] | null | null | null |
funciones.py
|
manu9812/MongoDB
|
d8cdb217e00f598747d0eee3780c7edff63f25c8
|
[
"Apache-2.0"
] | null | null | null |
import pymongo
from flask import Flask, jsonify, request
defget_db_connection(uri):
client = pymongo.MongoClient(uri)
return client.cryptongo
app = Flask(__name__)
db_connection = get_db_connection('mongodb://localhost:27017')
defget_documents():
params = {}
name = int(request.args.get('name', ''))
limit = int(request.args.get('limit', 0))
if name:
params.udpate({'name':name})
cursor = db_connection.tickers.find(
params, {'_id': 0, 'ticker_hash': 0}
).limit(limit)
return list(cursor)
defget_top20():
params = {}
name = int(request.args.get('name', ''))
limit = int(request.args.get('limit', 0))
if name:
params.udpate({'name':name})
params.update({'rank': {'$lte': 20}})
cursor = db_connection.tickers.find(
params, {'_id': 0, 'ticker_hash': 0}
).limit(limit)
return list(cursor)
defremove_currency():
params = {}
name = request.args.get('name', '')
if name:
params.update({'name': name})
else:
returnFalse
return db_connection.tickers.delete_many(
params
).deleted_count
| 24.545455
| 62
| 0.650926
|
e31af4fb600a4b0a70fe12777c4cbf26628e52e5
| 3,972
|
py
|
Python
|
local/tf/eval_dnn.py
|
Alicegaz/x-vector-kaldi-tf
|
5249e7d5c499291efbca6c8e49ba197392ae0a28
|
[
"Apache-2.0"
] | 117
|
2018-11-27T11:55:51.000Z
|
2020-10-22T13:23:50.000Z
|
local/tf/eval_dnn.py
|
LCF2764/x-vector-kaldi-tf
|
5249e7d5c499291efbca6c8e49ba197392ae0a28
|
[
"Apache-2.0"
] | 3
|
2019-01-08T10:23:03.000Z
|
2020-03-23T14:34:53.000Z
|
local/tf/eval_dnn.py
|
LCF2764/x-vector-kaldi-tf
|
5249e7d5c499291efbca6c8e49ba197392ae0a28
|
[
"Apache-2.0"
] | 37
|
2018-11-27T15:38:47.000Z
|
2020-09-16T11:30:56.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import logging
import os
import sys
import traceback
import mkl
import numexpr
import ze_utils as utils
from models import Model
from train_dnn_one_iteration import TarFileDataLoader
mkl.set_num_threads(1)
numexpr.set_num_threads(1)
MKL_NUM_THREADS = 1
OMP_NUM_THREADS = 1
logger = logging.getLogger('eval_dnn')
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
def get_args():
""" Get args from stdin.
"""
parser = argparse.ArgumentParser(
description="""Trains a feed forward DNN using frame-level objectives like cross-entropy
and mean-squared-error. DNNs include simple DNNs, TDNNs and CNNs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
# Parameters for the optimization0
parser.add_argument("--use-gpu", type=str, dest='use_gpu', choices=["yes", "no"],
help="Use GPU for training.", default="yes")
parser.add_argument("--tar-file", type=str, dest='tar_file', required=True,
help="Specifies a tar file which contains the training data. Also, there must "
"ans npy file for labels with same name but with npy extension. If tar file "
"was given the scp and ranges file didn't used but at least one there two "
"must given.")
parser.add_argument("--input-dir", type=str, dest='input_dir', required=True,
help="Specify the input directory. The model will loaded from this directory and "
"the new model will wrote to the output directory.")
parser.add_argument("--log-file", type=str, dest='log_file', required=True,
help="Specify the log file for training to be able to separate training logs "
"from tensorflow logs.")
print(' '.join(sys.argv))
args = parser.parse_args()
args = process_args(args)
handler = logging.StreamHandler(open(args.log_file, 'wt'))
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting DNN evaluation (eval_dnn.py)')
return args
def process_args(args):
""" Process the options got from get_args()
"""
args.input_dir = args.input_dir.strip()
if args.input_dir == '' or not os.path.exists(os.path.join(args.input_dir, 'model.meta')):
raise Exception("This scripts expects the input model was exist in '{0}' directory.".format(args.input_dir))
if args.tar_file == '' or not os.path.exists(args.tar_file):
raise Exception("The specified tar file '{0}' not exist.".format(args.tar_file))
if not os.path.exists(args.tar_file.replace('.tar', '.npy')):
raise Exception("There is no corresponding npy label file for tar file '{0}'.".format(args.tar_file))
return args
def eval_dnn(args):
""" The main function for doing evaluation on a trained network.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
"""
input_dir = args.input_dir
use_gpu = args.use_gpu == 'yes'
data_loader = TarFileDataLoader(args.tar_file, logger=None, queue_size=16)
model = Model()
model.eval(data_loader, input_dir, use_gpu, logger)
def main():
args = get_args()
try:
eval_dnn(args)
utils.wait_for_background_commands()
except BaseException as e:
# look for BaseException so we catch KeyboardInterrupt, which is
# what we get when a background thread dies.
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| 33.378151
| 116
| 0.65282
|
22cd45d3cfe3ddc3707c354bf92acb5e93717533
| 2,137
|
py
|
Python
|
venv/lib/python3.8/site-packages/statsmodels/iolib/openfile.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 6,931
|
2015-01-01T11:41:55.000Z
|
2022-03-31T17:03:24.000Z
|
venv/lib/python3.8/site-packages/statsmodels/iolib/openfile.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 6,137
|
2015-01-01T00:33:45.000Z
|
2022-03-31T22:53:17.000Z
|
venv/lib/python3.8/site-packages/statsmodels/iolib/openfile.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 2,608
|
2015-01-02T21:32:31.000Z
|
2022-03-31T07:38:30.000Z
|
"""
Handle file opening for read/write
"""
from pathlib import Path
from numpy.lib._iotools import _is_string_like
class EmptyContextManager(object):
"""
This class is needed to allow file-like object to be used as
context manager, but without getting closed.
"""
def __init__(self, obj):
self._obj = obj
def __enter__(self):
"""When entering, return the embedded object"""
return self._obj
def __exit__(self, *args):
"""Do not hide anything"""
return False
def __getattr__(self, name):
return getattr(self._obj, name)
def _open(fname, mode, encoding):
if fname.endswith(".gz"):
import gzip
return gzip.open(fname, mode, encoding=encoding)
else:
return open(fname, mode, encoding=encoding)
def get_file_obj(fname, mode="r", encoding=None):
"""
Light wrapper to handle strings, path objects and let files (anything else)
pass through.
It also handle '.gz' files.
Parameters
----------
fname : str, path object or file-like object
File to open / forward
mode : str
Argument passed to the 'open' or 'gzip.open' function
encoding : str
For Python 3 only, specify the encoding of the file
Returns
-------
A file-like object that is always a context-manager. If the `fname` was
already a file-like object, the returned context manager *will not
close the file*.
"""
if _is_string_like(fname):
fname = Path(fname)
if isinstance(fname, Path):
return fname.open(mode=mode, encoding=encoding)
elif hasattr(fname, "open"):
return fname.open(mode=mode, encoding=encoding)
try:
return open(fname, mode, encoding=encoding)
except TypeError:
try:
# Make sure the object has the write methods
if "r" in mode:
fname.read
if "w" in mode or "a" in mode:
fname.write
except AttributeError:
raise ValueError("fname must be a string or a file-like object")
return EmptyContextManager(fname)
| 26.7125
| 79
| 0.623772
|
eaca363f69d14df4c50823d770dcc335e44f7631
| 3,702
|
py
|
Python
|
_unittests/ut_faq/test_graph_with_labels.py
|
mohamedelkansouli/Ensae_py
|
8bc867bd2081c259c793fadfa8be5dcc7bd1400b
|
[
"MIT"
] | null | null | null |
_unittests/ut_faq/test_graph_with_labels.py
|
mohamedelkansouli/Ensae_py
|
8bc867bd2081c259c793fadfa8be5dcc7bd1400b
|
[
"MIT"
] | null | null | null |
_unittests/ut_faq/test_graph_with_labels.py
|
mohamedelkansouli/Ensae_py
|
8bc867bd2081c259c793fadfa8be5dcc7bd1400b
|
[
"MIT"
] | null | null | null |
"""
@brief test log(time=3s)
"""
import sys
import os
import unittest
import warnings
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import fix_tkinter_issues_virtualenv
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.faq import graph_with_label
class TestGraphWithLabel(unittest.TestCase):
def test_graph_with_label(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
x = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43]
y = [
1,
3,
10,
6,
3,
5,
3,
6,
4,
2,
3,
2,
11,
10,
4,
5,
2,
5,
4,
1,
1,
1,
3,
15,
5,
2,
1,
5,
3,
1,
3,
2,
4,
5,
2,
12,
12,
5,
11,
2,
19,
21,
5,
2]
xl = [
'2014-w04',
'2014-w05',
'2014-w06',
'2014-w07',
'2014-w08',
'2014-w09',
'2014-w10',
'2014-w11',
'2014-w12',
'2014-w13',
'2014-w14',
'2014-w15',
'2014-w16',
'2014-w17',
'2014-w18',
'2014-w19',
'2014-w20',
'2014-w21',
'2014-w22',
'2014-w23',
'2014-w24',
'2014-w25',
'2014-w27',
'2014-w29',
'2014-w30',
'2014-w31',
'2014-w32',
'2014-w34',
'2014-w35',
'2014-w36',
'2014-w38',
'2014-w39',
'2014-w41',
'2014-w42',
'2014-w43',
'2014-w44',
'2014-w45',
'2014-w46',
'2014-w47',
'2014-w48',
'2014-w49',
'2014-w50',
'2014-w51',
'2014-w52']
if sys.version_info[:2] <= (3, 4):
warnings.warn(
"Issue with Python 3.4, bug probably related to wrong pointers")
return
fix_tkinter_issues_virtualenv(fLOG=fLOG)
import matplotlib.pyplot as plt
_, ax = plt.subplots(figsize=(8, 3))
graph_with_label(x, y, xl, ax=ax)
if __name__ == "__main__":
plt.show()
plt.close('all')
fLOG("end")
if __name__ == "__main__":
unittest.main()
| 19.691489
| 80
| 0.318747
|
a26d7e4bb966a864358215671fbb2cf2ebe33cd5
| 4,564
|
py
|
Python
|
server.py
|
niyunhuo98/simple-chat-app
|
b2e725083dce3200364233da54b930d2e6044513
|
[
"MIT"
] | null | null | null |
server.py
|
niyunhuo98/simple-chat-app
|
b2e725083dce3200364233da54b930d2e6044513
|
[
"MIT"
] | null | null | null |
server.py
|
niyunhuo98/simple-chat-app
|
b2e725083dce3200364233da54b930d2e6044513
|
[
"MIT"
] | null | null | null |
import socket, string, random, threading, time
from datetime import datetime
def get_time():
"""get current time"""
return str(datetime.now().hour) + ':' + str(datetime.now().minute)
def id_generator():
"""Generate an ID"""
number = str(random.randint(0, 9))
l = [random.choice(string.ascii_lowercase) for _ in range(4)]
s = ''.join(l)
return number + s
def get_id(clients):
"""get a unique id"""
id_num = id_generator()
while id_num in clients:
id_num = id_generator()
return id_num
def get_clients(clients, id_num):
"""Update client list"""
available_clients = list(clients.keys())
available_clients.remove(id_num)
return available_clients
def create_client(connectionSocket, id_num, clients, available_clients, blocklist):
"""Manage client's behavior"""
while 1:
msg = connectionSocket.recv(1024).decode()
if msg[:8] == '/unblock' and msg[9:] in clients and msg[9:] != id_num and msg[9:] in blocklist[id_num]:
blocklist[id_num].remove(msg[9:])
continue
if msg[:6] == '/block' and msg[7:] in clients and msg[7:] != id_num and msg[7:] not in blocklist[id_num]:
blocklist[id_num].append(msg[7:])
continue
if msg == '/getlist':
available_clients = get_clients(clients, id_num)
connectionSocket.send((f"{get_time()} Available Clients:{str(available_clients)}").encode('utf-8'))
continue
if msg == '/getblocklist':
connectionSocket.send((f"{get_time()} Blocklist:{blocklist[id_num]}").encode('utf-8'))
continue
if msg == '.exit' or len(msg) == 0:
connectionSocket.send(".exit".encode('utf-8'))
del blocklist[id_num]
del clients[id_num]
print(f"{get_time()} Client {id_num} Disconnected")
break
try:
if msg[0] == '#' and msg[1:msg.index(':')] == id_num:
connectionSocket.send((f"{get_time()} Error: You cannot send the message to yourself").encode('utf-8'))
elif msg[0] == '#' and msg[1:msg.index(':')] not in clients:
connectionSocket.send((f"{get_time()} Error: Client ID not found").encode('utf-8'))
elif msg[:6] == '/block' and msg[7:] == id_num:
connectionSocket.send((f"{get_time()} Error: You cannot block yourself").encode('utf-8'))
elif msg[:6] == '/block' and msg[7:] not in clients:
connectionSocket.send((f"{get_time()} Error: Client ID not found").encode('utf-8'))
elif msg[:6] == '/block' and msg[7:] in blocklist[id_num]:
connectionSocket.send((f"{get_time()} Error: Already blocked client {msg[7:]}").encode('utf-8'))
elif msg[:8] == '/unblock' and msg[9:] == id_num:
connectionSocket.send((f"{get_time()} Error: You cannot unblock yourself").encode('utf-8'))
elif msg[:8] == '/unblock' and msg[9:] not in blocklist[id_num]:
connectionSocket.send((f"{get_time()} Error: Client ID not found").encode('utf-8'))
elif id_num in blocklist[msg[1:msg.index(':')]]:
connectionSocket.send((f"{get_time()} Error: Client {msg[1:msg.index(':')]} blocked you").encode('utf-8'))
else:
clients[msg[1:msg.index(':')]].send((f"{get_time()} From {id_num}:{msg[msg.index(':')+1:]}").encode('utf-8'))
except:
connectionSocket.send((f"{get_time()} Error: Incorrect Format, Please Read Description Again").encode('utf-8'))
connectionSocket.close()
def main():
clients = {}
blocklist = {}
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverPort = 12000
serverSocket.bind(('', serverPort))
serverSocket.listen()
print(f"{get_time()} The server is started")
while 1:
id_num = get_id(clients)
connectionSocket, _ = serverSocket.accept()
clients[id_num] = connectionSocket
blocklist[id_num] = []
available_clients = get_clients(clients, id_num)
print(f"{get_time()} Client {id_num} Connected")
connectionSocket.send((f"Your Client ID is {id_num}").encode('utf-8'))
time.sleep(1)
connectionSocket.send((f"\n{get_time()} Available Clients:{str(available_clients)}").encode('utf-8'))
threading._start_new_thread(create_client, (connectionSocket, id_num, clients, available_clients, blocklist))
connectionSocket.close()
if __name__ == "__main__":
main()
| 44.31068
| 125
| 0.602542
|
e4ac3d9b3eb031827b2de555d9beaff7f8ce7e7e
| 1,523
|
py
|
Python
|
evaluation/utils/pytorch_memlab/courtesy.py
|
NotMorven/cavaface.pytorch
|
822651f0e6d4d08df5441922acead39dc5375103
|
[
"MIT"
] | 716
|
2019-05-29T10:28:38.000Z
|
2022-03-31T03:48:42.000Z
|
evaluation/utils/pytorch_memlab/courtesy.py
|
careytian0405/cavaface.pytorch
|
38956bb505558784081320bf40c88afd286b1fcd
|
[
"MIT"
] | 76
|
2020-05-22T05:21:33.000Z
|
2021-07-30T03:39:06.000Z
|
evaluation/utils/pytorch_memlab/courtesy.py
|
careytian0405/cavaface.pytorch
|
38956bb505558784081320bf40c88afd286b1fcd
|
[
"MIT"
] | 65
|
2020-05-07T08:57:16.000Z
|
2021-07-21T20:10:44.000Z
|
import gc
import torch
class Courtesy():
"""A class to yield CUDA memory at any time in the training
The whole save/load is a bit tricky because all data transfer should
be inplace operation and gradient agnostic
"""
def __init__(self):
self.loc_map = {}
def yield_memory(self):
"""Transfer all the CUDA tensors into CPU memory"""
tensors = [obj for obj in gc.get_objects() if isinstance(obj, torch.Tensor)]
for t in tensors:
# in case tensors appear more than once
if t not in self.loc_map:
self.loc_map[t] = t.device
t.data = t.data.cpu()
# parameters have one more wrapper for .data
if isinstance(t, torch.nn.Parameter):
# sometimes Parameter does not have grad
try:
t.grad.data = t.grad.cpu()
finally:
pass
torch.cuda.empty_cache()
def restore(self):
"""Restore the tensors into original CUDA devices"""
for t, device in self.loc_map.items():
t.data = t.data.to(device)
if isinstance(t, torch.nn.Parameter):
# sometimes Parameter does not have grad
try:
t.grad = t.grad.to(device)
finally:
pass
self.loc_map.clear()
def __enter__(self):
self.yield_memory()
return self
def __exit__(self, *args):
self.restore()
| 30.46
| 84
| 0.54826
|
94ab233e6d8f10bcd01637bb0a9b44fed34e676d
| 1,034
|
py
|
Python
|
food.py
|
RevanthMBontha/Snake-Game
|
f0f16202fecec4b80d3ed3cbbaa30b7b5d9b4d9a
|
[
"MIT"
] | 1
|
2022-03-01T04:01:30.000Z
|
2022-03-01T04:01:30.000Z
|
food.py
|
RevanthMBontha/Snake-Game
|
f0f16202fecec4b80d3ed3cbbaa30b7b5d9b4d9a
|
[
"MIT"
] | null | null | null |
food.py
|
RevanthMBontha/Snake-Game
|
f0f16202fecec4b80d3ed3cbbaa30b7b5d9b4d9a
|
[
"MIT"
] | null | null | null |
from turtle import Turtle
from winsound import PlaySound, SND_ASYNC
from global_helpers import get_random_color, get_random_location
from global_helpers import FOOD_COLORS
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.color(get_random_color(FOOD_COLORS))
self.speed("fastest")
self.goto(get_random_location())
def move_food(self):
"""Moves the food to a new location and set's it to a random color"""
self.goto(get_random_location())
self.color(get_random_color(FOOD_COLORS))
def change_color(self):
"""Updates the color of the food to a new random color from the list"""
self.color(get_random_color(FOOD_COLORS))
def update_food(self):
"""Does everything required to update the food"""
self.move_food()
self.change_color()
PlaySound("./music/coin_collect.wav", SND_ASYNC)
| 30.411765
| 79
| 0.67118
|
119a712fa53d18c824230251eaf260b6dd246e2c
| 6,006
|
py
|
Python
|
tests/symbolicator/test_minidump_full.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/symbolicator/test_minidump_full.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
tests/symbolicator/test_minidump_full.py
|
pombredanne/django-sentry
|
4ad09417fb3cfa3aa4a0d4175ae49fe02837c567
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import pytest
import zipfile
from mock import patch
from six import BytesIO
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from sentry import eventstore
from sentry.testutils import TransactionTestCase
from sentry.models import EventAttachment
from sentry.lang.native.utils import STORE_CRASH_REPORTS_ALL
from tests.symbolicator import get_fixture_path, insta_snapshot_stacktrace_data
class SymbolicatorMinidumpIntegrationTest(TransactionTestCase):
# For these tests to run, write `symbolicator.enabled: true` into your
# `~/.sentry/config.yml` and run `sentry devservices up`
@pytest.fixture(autouse=True)
def initialize(self, live_server):
self.project.update_option("sentry:builtin_symbol_sources", [])
new_prefix = live_server.url
with patch("sentry.auth.system.is_internal_ip", return_value=True), self.options(
{"system.url-prefix": new_prefix}
):
# Run test case:
yield
def upload_symbols(self):
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_slug": self.project.organization.slug,
"project_slug": self.project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.write(get_fixture_path("windows.sym"), "crash.sym")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.data) == 1
def test_full_minidump(self):
self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL)
self.upload_symbols()
with self.feature("organizations:event-attachments"):
attachment = BytesIO(b"Hello World!")
attachment.name = "hello.txt"
with open(get_fixture_path("windows.dmp"), "rb") as f:
resp = self._postMinidumpWithHeader(
f, {"sentry[logger]": "test-logger", "some_file": attachment}
)
assert resp.status_code == 200
event_id = resp.content
event = eventstore.get_event_by_id(self.project.id, event_id)
insta_snapshot_stacktrace_data(self, event.data)
assert event.data.get("logger") == "test-logger"
# assert event.data.get("extra") == {"foo": "bar"}
attachments = sorted(
EventAttachment.objects.filter(event_id=event.event_id), key=lambda x: x.name
)
hello, minidump = attachments
assert hello.name == "hello.txt"
assert hello.file.type == "event.attachment"
assert hello.file.checksum == "2ef7bde608ce5404e97d5f042f95f89f1c232871"
assert minidump.name == "windows.dmp"
assert minidump.file.type == "event.minidump"
assert minidump.file.checksum == "74bb01c850e8d65d3ffbc5bad5cabc4668fce247"
def test_full_minidump_json_extra(self):
self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL)
self.upload_symbols()
with self.feature("organizations:event-attachments"):
with open(get_fixture_path("windows.dmp"), "rb") as f:
resp = self._postMinidumpWithHeader(
f, {"sentry": '{"logger":"test-logger"}', "foo": "bar"}
)
assert resp.status_code == 200
event_id = resp.content
event = eventstore.get_event_by_id(self.project.id, event_id)
assert event.data.get("logger") == "test-logger"
assert event.data.get("extra") == {"foo": "bar"}
# Other assertions are performed by `test_full_minidump`
def test_full_minidump_invalid_extra(self):
self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL)
self.upload_symbols()
with self.feature("organizations:event-attachments"):
with open(get_fixture_path("windows.dmp"), "rb") as f:
resp = self._postMinidumpWithHeader(
f, {"sentry": "{{{{", "foo": "bar"} # invalid sentry JSON
)
assert resp.status_code == 200
event_id = resp.content
event = eventstore.get_event_by_id(self.project.id, event_id)
assert not event.data.get("logger")
assert event.data.get("extra") == {"foo": "bar"}
# Other assertions are performed by `test_full_minidump`
def test_raw_minidump(self):
self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL)
self.upload_symbols()
with self.feature("organizations:event-attachments"):
with open(get_fixture_path("windows.dmp"), "rb") as f:
# Send as raw request body instead of multipart/form-data
resp = self._postMinidumpWithHeader(f, raw=True)
assert resp.status_code == 200
event_id = resp.content
event = eventstore.get_event_by_id(self.project.id, event_id)
insta_snapshot_stacktrace_data(self, event.data)
def test_missing_dsym(self):
with self.feature("organizations:event-attachments"):
with open(get_fixture_path("windows.dmp"), "rb") as f:
resp = self._postMinidumpWithHeader(f, {"sentry[logger]": "test-logger"})
assert resp.status_code == 200
event_id = resp.content
event = eventstore.get_event_by_id(self.project.id, event_id)
insta_snapshot_stacktrace_data(self, event.data)
assert not EventAttachment.objects.filter(event_id=event.event_id)
| 39
| 89
| 0.634033
|
b36c07e7da91090130cedd9a0fc4b23708584151
| 2,197
|
py
|
Python
|
tests/stats/test_ap_checker.py
|
kralka/scaaml
|
f47e028dad6bc74eb480cbfae83724c1057caf39
|
[
"Apache-2.0"
] | null | null | null |
tests/stats/test_ap_checker.py
|
kralka/scaaml
|
f47e028dad6bc74eb480cbfae83724c1057caf39
|
[
"Apache-2.0"
] | null | null | null |
tests/stats/test_ap_checker.py
|
kralka/scaaml
|
f47e028dad6bc74eb480cbfae83724c1057caf39
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from unittest.mock import MagicMock, patch
import numpy as np
from scaaml.stats import APChecker
@patch.object(APChecker, '_run_check')
def test_run_all_calls_check_all_nonzero(mock_run_check):
attack_point_name = 'k'
counts = np.array([])
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
mock_run_check.assert_called_with(APChecker.check_all_nonzero)
@patch.object(APChecker, 'run_all')
def test_init_calls_run_all(mock_run_all):
attack_point_name = 'k'
counts = np.array([])
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
mock_run_all.assert_called_once_with()
@patch.object(APChecker, 'check_all_nonzero')
def test_run_all_calls_check_all_nonzero(mock_check_all_nonzero):
attack_point_name = 'k'
counts = np.array([])
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
mock_check_all_nonzero.assert_called_once_with()
def test_attack_point_name():
attack_point_name = MagicMock()
counts = np.array([])
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
assert ap_checker.attack_point_name == attack_point_name
def test_check_all_nonzero():
attack_point_name = 'some_strange_attack_p0int_name'
counts = np.array([[1, 2, 3], [2, 3, 1]])
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
assert not ap_checker._something_failed
counts[1][1] = 0
ap_checker = APChecker(counts=counts, attack_point_name=attack_point_name)
assert ap_checker._something_failed
| 30.09589
| 78
| 0.769231
|
3829d1597a4f02a55452c0dd261c354528323c24
| 1,176
|
py
|
Python
|
pyvo/vomas/tests/test.11.stc_position.py
|
lmichel/pyvo
|
8296bee2e799843909805fb6ae528a9b23776e8d
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
pyvo/vomas/tests/test.11.stc_position.py
|
lmichel/pyvo
|
8296bee2e799843909805fb6ae528a9b23776e8d
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
pyvo/vomas/tests/test.11.stc_position.py
|
lmichel/pyvo
|
8296bee2e799843909805fb6ae528a9b23776e8d
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
"""
Created on jan 2022
@author: laurentmichel
"""
import unittest
import os
from pyvo.vomas.stc_classes.measure import Velocity
from pyvo.vomas.utils.xml_utils import XmlUtils
class TestSTCPosition(unittest.TestCase):
def test_PKTable(self):
self.maxDiff = None
vpath = os.path.join(self.data_path, "data/input/test.11.xml")
xmltree = XmlUtils.xmltree_from_file(vpath)
velocity = Velocity(xmltree)
self.assertEqual(velocity.error.__repr__(),
"[Bound3D: [[1.0 1.2]mas/year [2.1 2.3]mas/year [2345.0 2399.0]km/sec]")
self.assertEqual(velocity.coord.__repr__(),
"[LonLatPoint: 1.1mas/year 2.2mas/year 2345.98km/sec ICRS]")
self.assertEqual(velocity.__repr__(),
"ucd: phys.veloc coords: [LonLatPoint: 1.1mas/year 2.2mas/year 2345.98km/sec ICRS] error: [Bound3D: [[1.0 1.2]mas/year [2.1 2.3]mas/year [2345.0 2399.0]km/sec]")
def setUp(self):
self.data_path = os.path.dirname(os.path.realpath(__file__))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 34.588235
| 186
| 0.628401
|
37931b1a9c5782866d7459cd570bba32fb5060da
| 750
|
py
|
Python
|
Carrito/compras/compras/0002_auto_20210609_0826.py
|
EnriqueViajero/carrito-Jaguarete-SAA
|
ada20d1a15ab9d8caa977b925a62d9b9ad72c670
|
[
"MIT"
] | null | null | null |
Carrito/compras/compras/0002_auto_20210609_0826.py
|
EnriqueViajero/carrito-Jaguarete-SAA
|
ada20d1a15ab9d8caa977b925a62d9b9ad72c670
|
[
"MIT"
] | null | null | null |
Carrito/compras/compras/0002_auto_20210609_0826.py
|
EnriqueViajero/carrito-Jaguarete-SAA
|
ada20d1a15ab9d8caa977b925a62d9b9ad72c670
|
[
"MIT"
] | null | null | null |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('muebles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='productos',
fields=[
('id_nombre', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=3)),
('precio', models.CharField(max_length=64)),
('cantidad_existente', models.CharField(max_length=64)),
('fecha_ingreso', models.CharField(max_length=3)),
],
),
]
| 30
| 125
| 0.548
|
0be70cb3d5d448ccfb83c89fe4b4a300cfcd2c55
| 12,994
|
py
|
Python
|
networks/comatchnet/comatchnet.py
|
huanglf714/COMatchNet
|
79023f5be65d354eb9bdac026d7e0d73110bc4aa
|
[
"Apache-2.0"
] | 1
|
2022-03-30T01:26:47.000Z
|
2022-03-30T01:26:47.000Z
|
networks/comatchnet/comatchnet.py
|
huanglf714/COMatchNet
|
79023f5be65d354eb9bdac026d7e0d73110bc4aa
|
[
"Apache-2.0"
] | null | null | null |
networks/comatchnet/comatchnet.py
|
huanglf714/COMatchNet
|
79023f5be65d354eb9bdac026d7e0d73110bc4aa
|
[
"Apache-2.0"
] | 1
|
2022-03-30T01:27:23.000Z
|
2022-03-30T01:27:23.000Z
|
from PIL.Image import NONE
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.layers.loss import Concat_CrossEntropyLoss
from networks.layers.loss import Lovasz_Loss
from networks.layers.matching import global_matching, global_matching_for_eval, local_matching, foreground2background
from networks.layers.transformer import Transformer
from networks.layers.attention import calculate_attention_head, calculate_attention_head_for_eval
from networks.layers.co_attention import CO_Attention
from networks.comatchnet.ensembler import CollaborativeEnsembler, DynamicPreHead
class COMatchNet(nn.Module):
def __init__(self, cfg, feature_extracter):
super(COMatchNet, self).__init__()
self.cfg = cfg
self.epsilon = cfg.MODEL_EPSILON
self.feature_extracter=feature_extracter
self.seperate_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_ASPP_OUTDIM, kernel_size=3, stride=1, padding=1, groups=cfg.MODEL_ASPP_OUTDIM)
self.bn1 = nn.GroupNorm(cfg.MODEL_GN_GROUPS, cfg.MODEL_ASPP_OUTDIM)
self.relu1 = nn.ReLU(True)
self.embedding_conv = nn.Conv2d(cfg.MODEL_ASPP_OUTDIM, cfg.MODEL_SEMANTIC_EMBEDDING_DIM, 1, 1)
self.bn2 = nn.GroupNorm(cfg.MODEL_GN_EMB_GROUPS, cfg.MODEL_SEMANTIC_EMBEDDING_DIM)
self.relu2 = nn.ReLU(True)
self.co_attention = CO_Attention(in_dim=cfg.MODEL_SEMANTIC_EMBEDDING_DIM,
co_attention_dim=cfg.MODEL_ATTENTION_OUT_DIM)
self.semantic_embedding=nn.Sequential(*[self.seperate_conv, self.bn1, self.relu1, self.embedding_conv, self.bn2, self.relu2])
self.global_transformer = Transformer(100,56,feature_adjustor=None,feature_extractor=None)
self.bg_bias = nn.Parameter(torch.zeros(1, 1, 1, 1))
self.fg_bias = nn.Parameter(torch.zeros(1, 1, 1, 1))
self.criterion = Concat_CrossEntropyLoss(cfg.TRAIN_TOP_K_PERCENT_PIXELS, cfg.TRAIN_HARD_MINING_STEP)
# self.criterion = Lovasz_Loss(cfg.TRAIN_TOP_K_PERCENT_PIXELS, cfg.TRAIN_HARD_MINING_STEP)
for m in self.semantic_embedding:
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
self.dynamic_seghead = CollaborativeEnsembler(
in_dim=cfg.MODEL_SEMANTIC_EMBEDDING_DIM * 3 + cfg.MODEL_PRE_HEAD_EMBEDDING_DIM,
attention_dim=cfg.MODEL_SEMANTIC_EMBEDDING_DIM * 4,
embed_dim=cfg.MODEL_HEAD_EMBEDDING_DIM,
refine_dim=cfg.MODEL_REFINE_CHANNELS,
low_level_dim=cfg.MODEL_LOW_LEVEL_INPLANES)
in_dim = 2 + len(cfg.MODEL_MULTI_LOCAL_DISTANCE)
if cfg.MODEL_MATCHING_BACKGROUND:
in_dim += len(cfg.MODEL_MULTI_LOCAL_DISTANCE)
self.dynamic_prehead = DynamicPreHead(
in_dim=in_dim,
embed_dim=cfg.MODEL_PRE_HEAD_EMBEDDING_DIM)
def forward(self, input, ref_frame_label, previous_frame_mask, current_frame_mask,
gt_ids, step=0, tf_board=False):
# print(input.shape,'--1')
# print(ref_frame_label.shape,'--2')
x, low_level = self.extract_feature(input)
ref_frame_embedding, previous_frame_embedding, current_frame_embedding = torch.split(x, split_size_or_sections=int(x.size(0)/3), dim=0)
_, _, current_low_level = torch.split(low_level, split_size_or_sections=int(x.size(0)/3), dim=0)
# print(ref_frame_embedding.shape,'--3')
# print(current_low_level.shape,'--4')
bs, c, h, w = current_frame_embedding.size()
tmp_dic, boards = self.before_seghead_process(
ref_frame_embedding,
previous_frame_embedding,
current_frame_embedding,
ref_frame_label,
previous_frame_mask,
gt_ids,
current_low_level=current_low_level,tf_board=tf_board)
label_dic=[]
all_pred = []
for i in range(bs):
tmp_pred_logits = tmp_dic[i]
tmp_pred_logits = nn.functional.interpolate(tmp_pred_logits, size=(input.shape[2],input.shape[3]), mode='bilinear', align_corners=True)
tmp_dic[i] = tmp_pred_logits
label_tmp, obj_num = current_frame_mask[i], gt_ids[i]
# print('label_tmp.shape:',label_tmp.shape)
label_dic.append(label_tmp.long())
pred = tmp_pred_logits
preds_s = torch.argmax(pred,dim=1)
all_pred.append(preds_s)
all_pred = torch.cat(all_pred, dim=0)
return self.criterion(tmp_dic, label_dic, step), all_pred, boards
def forward_for_eval(self, ref_embeddings, ref_masks, prev_embedding, prev_mask, current_frame, pred_size, gt_ids):
current_frame_embedding, current_low_level = self.extract_feature(current_frame)
if prev_embedding is None:
return None, current_frame_embedding
else:
bs,c,h,w = current_frame_embedding.size()
tmp_dic, _ = self.before_seghead_process(
ref_embeddings,
prev_embedding,
current_frame_embedding,
ref_masks,
prev_mask,
gt_ids,
current_low_level=current_low_level,
tf_board=False)
all_pred = []
for i in range(bs):
pred = tmp_dic[i]
pred = nn.functional.interpolate(pred, size=(pred_size[0],pred_size[1]), mode='bilinear',align_corners=True)
all_pred.append(pred)
all_pred = torch.cat(all_pred, dim=0)
all_pred = torch.softmax(all_pred, dim=1)
return all_pred, current_frame_embedding
def extract_feature(self, x):
x, low_level=self.feature_extracter(x)
x = self.semantic_embedding(x)
return x, low_level
def before_seghead_process(self,
ref_frame_embedding=None, previous_frame_embedding=None, current_frame_embedding=None,
ref_frame_label=None, previous_frame_mask=None,
gt_ids=None, current_low_level=None, tf_board=False):
cfg = self.cfg
dic_tmp=[]
bs,c,h,w = current_frame_embedding.size()
if self.training:
scale_ref_frame_label = torch.nn.functional.interpolate(ref_frame_label.float(),size=(h,w),mode='nearest')
scale_ref_frame_label = scale_ref_frame_label.int()
else:
scale_ref_frame_labels = []
for each_ref_frame_label in ref_frame_label:
each_scale_ref_frame_label = torch.nn.functional.interpolate(each_ref_frame_label.float(),size=(h,w),mode='nearest')
each_scale_ref_frame_label = each_scale_ref_frame_label.int()
scale_ref_frame_labels.append(each_scale_ref_frame_label)
scale_ref_frame_label = torch.cat(scale_ref_frame_labels)
scale_previous_frame_label=torch.nn.functional.interpolate(previous_frame_mask.float(),size=(h,w),mode='nearest')
scale_previous_frame_label=scale_previous_frame_label.int()
boards = {'image': {}, 'scalar': {}}
for n in range(bs):
ref_obj_ids = torch.arange(0, gt_ids[n] + 1, device=current_frame_embedding.device).int().view(-1, 1, 1, 1)
obj_num = ref_obj_ids.size(0)
if gt_ids[n] > 0:
dis_bias = torch.cat([self.bg_bias, self.fg_bias.expand(gt_ids[n], -1, -1, -1)], dim=0)
else:
dis_bias = self.bg_bias
seq_current_frame_embedding = current_frame_embedding[n]
seq_current_frame_embedding = seq_current_frame_embedding.permute(1,2,0)
seq_prev_frame_embedding = previous_frame_embedding[n]
seq_prev_frame_embedding = seq_prev_frame_embedding.permute(1,2,0)
seq_previous_frame_label = (scale_previous_frame_label[n].int() == ref_obj_ids).float()
to_cat_previous_frame = seq_previous_frame_label
seq_previous_frame_label = seq_previous_frame_label.squeeze(1).permute(1,2,0)
#<-----------------------------global----------------------------------------->
seq_ref_frame_label = (scale_ref_frame_label[n].int() == ref_obj_ids).float()
# param:
# test_feat.shape=[1,1,c,h,w]
# encoded_train_feat.shape=[1,1,c,h,w]
# train_mask.shape=[1,obj_num,1,h,w]
# return:
# global_transformer_fg.shape=[1,1,obj_num,h,w]
test_feat = current_frame_embedding[n].unsqueeze(0).unsqueeze(0)
if self.training:
train_feat = ref_frame_embedding[n].unsqueeze(0).unsqueeze(0)
else:
train_feat = ref_frame_embedding[n].unsqueeze(0)
train_mask = seq_ref_frame_label.unsqueeze(0)
# print(test_feat.shape,'----',train_feat.shape,'---',train_mask.shape)
global_transformer_fg = self.global_transformer(
test_feat=test_feat,
train_feat=train_feat,
train_mask=None,
train_mask_enc=train_mask)
# print(global_transformer_fg.shape,'---')
#########################Local dist map
local_matching_fg = local_matching(
prev_frame_embedding=seq_prev_frame_embedding,
query_embedding=seq_current_frame_embedding,
prev_frame_labels=seq_previous_frame_label,
multi_local_distance=cfg.MODEL_MULTI_LOCAL_DISTANCE,
dis_bias=dis_bias,
use_float16=cfg.MODEL_FLOAT16_MATCHING,
atrous_rate=cfg.TRAIN_LOCAL_ATROUS_RATE if self.training else cfg.TEST_LOCAL_ATROUS_RATE,
allow_downsample=cfg.MODEL_LOCAL_DOWNSAMPLE,
allow_parallel=cfg.TRAIN_LOCAL_PARALLEL if self.training else cfg.TEST_LOCAL_PARALLEL)
# print('match_fg.shape:----')
# print(global_matching_fg.shape)
# print(local_matching_fg.shape)
#########################
to_cat_current_frame_embedding = current_frame_embedding[n].unsqueeze(0).expand((obj_num,-1,-1,-1))
to_cat_prev_frame_embedding = previous_frame_embedding[n].unsqueeze(0).expand((obj_num,-1,-1,-1))
# to_cat_corelation_attention = corelation_attention.permute(2,3,0,1)
# to_cat_global_matching_fg = global_matching_fg.squeeze(0).permute(2,3,0,1)
to_cat_local_matching_fg = local_matching_fg.squeeze(0).permute(2,3,0,1)
to_cat_global_transformer_fg = global_transformer_fg.squeeze(0).permute(1,0,2,3)
if cfg.MODEL_MATCHING_BACKGROUND:
# to_cat_global_matching_bg = foreground2background(to_cat_global_matching_fg, gt_ids[n] + 1)
reshaped_prev_nn_feature_n = to_cat_local_matching_fg.permute(0, 2, 3, 1).unsqueeze(1)
to_cat_local_matching_bg = foreground2background(reshaped_prev_nn_feature_n, gt_ids[n] + 1)
to_cat_local_matching_bg = to_cat_local_matching_bg.permute(0, 4, 2, 3, 1).squeeze(-1)
# pre_to_cat = torch.cat((to_cat_corelation_attention, to_cat_global_transformer_fg, to_cat_local_matching_fg, to_cat_previous_frame), 1)
pre_to_cat = torch.cat((to_cat_global_transformer_fg, to_cat_local_matching_fg, to_cat_previous_frame), 1)
if cfg.MODEL_MATCHING_BACKGROUND:
# pre_to_cat = torch.cat([pre_to_cat, to_cat_global_matching_bg, to_cat_local_matching_bg], 1)
pre_to_cat = torch.cat([pre_to_cat, to_cat_local_matching_bg], 1)
pre_to_cat = self.dynamic_prehead(pre_to_cat)
to_cat = torch.cat((to_cat_current_frame_embedding, to_cat_prev_frame_embedding * to_cat_previous_frame, to_cat_prev_frame_embedding * (1 - to_cat_previous_frame), pre_to_cat),1)
# print(ref_frame_embedding[n].shape,'---',seq_ref_frame_label.shape,'--------------',previous_frame_embedding[n].shape)
attention_head = calculate_attention_head(
ref_frame_embedding[n].expand((obj_num,-1,-1,-1)),
seq_ref_frame_label,
previous_frame_embedding[n].unsqueeze(0).expand((obj_num,-1,-1,-1)),
to_cat_previous_frame,
epsilon=self.epsilon)
low_level_feat = current_low_level[n].unsqueeze(0)
# print('to_cat.shape:',to_cat.shape)
# print('attention_head',attention_head.shape)
# print('low_level_feat',low_level_feat.shape)
pred = self.dynamic_seghead(to_cat, attention_head, low_level_feat)
# print('pred.shape:----', pred.shape)
# pred.shape: (1,obj_num, 117,117)
dic_tmp.append(pred)
# print('dic_tmp.len:----', len(dic_tmp))
# dic_tmp is a list, len is 1 or 2
return dic_tmp, boards
def get_module():
return COMatchNet
| 50.560311
| 190
| 0.654841
|
ea24357f6ac2ffda745ce9c2ba650108757515dd
| 523
|
py
|
Python
|
libact/query_strategies/__init__.py
|
jonzarecki/libact
|
32bbc4af8e31cd947aeb07c3972d4bdcd759a77f
|
[
"BSD-2-Clause"
] | null | null | null |
libact/query_strategies/__init__.py
|
jonzarecki/libact
|
32bbc4af8e31cd947aeb07c3972d4bdcd759a77f
|
[
"BSD-2-Clause"
] | null | null | null |
libact/query_strategies/__init__.py
|
jonzarecki/libact
|
32bbc4af8e31cd947aeb07c3972d4bdcd759a77f
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Concrete query strategy classes.
"""
from __future__ import absolute_import
import os
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
import logging
LOGGER = logging.getLogger(__name__)
from .active_learning_by_learning import ActiveLearningByLearning
from .hintsvm import HintSVM
from .uncertainty_sampling import UncertaintySampling
from .query_by_committee import QueryByCommittee
from .quire import QUIRE
from .random_sampling import RandomSampling
from .density_weighted_uncertainty_sampling import DWUS
| 27.526316
| 65
| 0.839388
|
b7d210c67af331f793973212388e08de12333bee
| 1,804
|
py
|
Python
|
utils/hello.py
|
chuckfairy/TYOS
|
177c33432abee6ef827bd9c4bdbd7ed92237e278
|
[
"MIT"
] | null | null | null |
utils/hello.py
|
chuckfairy/TYOS
|
177c33432abee6ef827bd9c4bdbd7ed92237e278
|
[
"MIT"
] | null | null | null |
utils/hello.py
|
chuckfairy/TYOS
|
177c33432abee6ef827bd9c4bdbd7ed92237e278
|
[
"MIT"
] | null | null | null |
import pygame, sys
from pygame.locals import *
# set up pygame
pygame.init()
# set up the window
windowSurface = pygame.display.set_mode((500, 400), 0, 32)
pygame.display.set_caption('Hello world!')
# set up the colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# set up fonts
basicFont = pygame.font.SysFont(None, 48)
# set up the text
text = basicFont.render('Hello world!', True, WHITE, BLUE)
textRect = text.get_rect()
textRect.centerx = windowSurface.get_rect().centerx
textRect.centery = windowSurface.get_rect().centery
# draw the white background onto the surface
windowSurface.fill(WHITE)
# draw a green polygon onto the surface
pygame.draw.polygon(windowSurface, GREEN, ((146, 0), (291, 106), (236, 277), (56, 277), (0, 106)))
# draw some blue lines onto the surface
pygame.draw.line(windowSurface, BLUE, (60, 60), (120, 60), 4)
pygame.draw.line(windowSurface, BLUE, (120, 60), (60, 120))
pygame.draw.line(windowSurface, BLUE, (60, 120), (120, 120), 4)
# draw a blue circle onto the surface
pygame.draw.circle(windowSurface, BLUE, (300, 50), 20, 0)
# draw a red ellipse onto the surface
pygame.draw.ellipse(windowSurface, RED, (300, 250, 40, 80), 1)
# draw the text's background rectangle onto the surface
pygame.draw.rect(windowSurface, RED, (textRect.left - 20, textRect.top - 20, textRect.width + 40, textRect.height + 40))
# get a pixel array of the surface
pixArray = pygame.PixelArray(windowSurface)
pixArray[480][380] = BLACK
del pixArray
# draw the text onto the surface
windowSurface.blit(text, textRect)
# draw the window onto the screen
pygame.display.update()
# run the game loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
| 27.333333
| 120
| 0.701774
|
7bb96d77f95a39aef0cbe5c93baf032ad9baea3d
| 1,550
|
py
|
Python
|
test/test_hmrf_em.py
|
microfluidix/HMRF
|
d6f64ca99537e638d978e474034cabc64c6f047f
|
[
"MIT"
] | 1
|
2021-11-16T10:40:25.000Z
|
2021-11-16T10:40:25.000Z
|
test/test_hmrf_em.py
|
gronteix/HMRF
|
0c7b4b1ea9cd2934b2c9218e9d48b7c63b819a34
|
[
"MIT"
] | null | null | null |
test/test_hmrf_em.py
|
gronteix/HMRF
|
0c7b4b1ea9cd2934b2c9218e9d48b7c63b819a34
|
[
"MIT"
] | 1
|
2021-11-16T10:40:59.000Z
|
2021-11-16T10:40:59.000Z
|
import networkx as nx
import numpy as np
import pytest
from biograph import hmrf_em
@pytest.fixture
def create_graph():
"""Create a graph with five nodes and ten edges. Each node is
connected to two other nodes. Each node has one attribute 'cell_type',
there are two different 'cell_type' attributes in total.
Returns
-------
G : networkx.Graph
"""
G = nx.Graph()
G.add_nodes_from(range(5))
G.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 3), (3, 4)])
G.nodes[0]["cell_type"] = 0
G.nodes[1]["cell_type"] = 0
G.nodes[2]["cell_type"] = 1
G.nodes[3]["cell_type"] = 1
G.nodes[4]["cell_type"] = 1
return G
def test_generate_hmrf_instance(create_graph):
assert isinstance(create_graph, nx.Graph)
assert len(create_graph.nodes) == 5
assert len(create_graph.edges) == 5
hmrf_instance = hmrf_em.hmrf(create_graph, K=2, beta=10, max_it=30)
hmrf_instance.initiate_model()
assert hmrf_instance.K == 2
assert hmrf_instance.beta == 10
assert hmrf_instance.max_it == 30
assert hmrf_instance.number_of_cell_types == 2
def test_hmrf_run(create_graph):
"""Create a hmrf instance, then run the instance over 10 iterations.
Verify that the output object is a graph.
"""
hmrf_instance = hmrf_em.hmrf(create_graph, K=2, beta=10, max_it=30)
hmrf_instance.initiate_model()
hmrf_instance.run()
assert isinstance(hmrf_instance.graph, nx.Graph)
assert len(hmrf_instance.graph.nodes) == 5
assert len(hmrf_instance.graph.edges) == 5
| 28.181818
| 74
| 0.67871
|
e909f7660406d75e793063721b8a307017866b5d
| 1,448
|
py
|
Python
|
jiraan/models.py
|
saudahabib/jiraan-
|
be88f94d0229e97adc41438bc27bcf1dbce85458
|
[
"MIT"
] | null | null | null |
jiraan/models.py
|
saudahabib/jiraan-
|
be88f94d0229e97adc41438bc27bcf1dbce85458
|
[
"MIT"
] | null | null | null |
jiraan/models.py
|
saudahabib/jiraan-
|
be88f94d0229e97adc41438bc27bcf1dbce85458
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Neighborhood(models.Model):
hood_name=models.CharField(max_length=50)
hood_location = models.CharField(max_length=50)
hood_count=models.CharField(max_length=50)
#save function
def save_hood(self):
self.save()
#delete function
def delete_hood(self):
del_hoods=Neighborhood.objects.all().delete()
return del_hoods
def __str__(self):
return self.hood_name
class Meta:
ordering = ['-id']
class User(models.Model):
first_name=models.CharField(max_length=50)
user_id=models.IntegerField()
email_address=models.CharField(max_length=50)
neighborhood_id=models.ForeignKey(Neighborhood, on_delete=models.CASCADE)
def __str__(self):
return self.first_name
def save_users(self):
self.save()
def delete_user(self):
deleted_users=User.objects.all().delete()
return deleted_users
class Business(models.Model):
business_name=models.CharField(max_length=50)
user=models.ForeignKey(User, on_delete=models.CASCADE)
neighborhood_id=models.ForeignKey(Neighborhood, on_delete=models.CASCADE)
business_address=models.CharField(max_length=50)
def __str__(self):
return self.business_name
def save_bus(self):
self.save()
def delete_bus(self):
objects=Business.objects.all().delete()
return objects
| 24.542373
| 77
| 0.698895
|
95b518dc1a2cea3ae5636dda55b638daa8d88fe5
| 11,041
|
py
|
Python
|
cyberserver/servers/attacks_generator.py
|
csd3345/cybermap-server
|
f9cad5e59547d30c1e6e13c8b4b39e6ea46746cb
|
[
"Unlicense",
"MIT"
] | null | null | null |
cyberserver/servers/attacks_generator.py
|
csd3345/cybermap-server
|
f9cad5e59547d30c1e6e13c8b4b39e6ea46746cb
|
[
"Unlicense",
"MIT"
] | null | null | null |
cyberserver/servers/attacks_generator.py
|
csd3345/cybermap-server
|
f9cad5e59547d30c1e6e13c8b4b39e6ea46746cb
|
[
"Unlicense",
"MIT"
] | null | null | null |
import sys
import json
import time
from random import choice, randint, randrange
import pathlib
import logging
import timeit
import click
from click_help_colors import HelpColorsCommand, HelpColorsGroup
from typing import Union, Optional
from math import inf as INFINITE
from cyberserver.servers import utilities
_script_path = pathlib.Path(__file__)
_script_stem = pathlib.Path(__file__).stem
_script_name = pathlib.Path(__file__).name
_logger: logging.Logger
class AttacksGenerator(object):
_logger: logging.Logger
def __init__(
self,
channel: str = "raw-cyberattacks",
ips_to_generate: Union[int, type(INFINITE)] = INFINITE,
method: str = "random",
interval: float = 0.1,
filepath: Optional[Union[pathlib.Path, str]] = None,
silent: Optional[bool] = None,
script_mode: Optional[bool] = None,
verbose: Optional[bool] = None
):
self.method: str = method
if self.method != "random":
if filepath is None:
raise FileNotFoundError(f"You have to specify a file for method {self.method}")
self.ports = {
0: "DoS", # Denial of Service
1: "ICMP", # ICMP
20: "FTP", # FTP Data
21: "FTP", # FTP Control
22: "SSH", # SSH
23: "TELNET", # Telnet
25: "EMAIL", # SMTP
43: "WHOIS", # Whois
53: "DNS", # DNS
80: "HTTP", # HTTP
88: "AUTH", # Kerberos
109: "EMAIL", # POP v2
110: "EMAIL", # POP v3
115: "SFTP", # SFTP
118: "SQL", # SQL
143: "EMAIL", # IMAP
156: "SQL", # SQL
161: "SNMP", # SNMP
220: "EMAIL", # IMAP v3
389: "AUTH", # LDAP
443: "HTTPS", # HTTPS
445: "SMB", # SMB
636: "AUTH", # LDAP of SSL/TLS
1433: "SQL", # MySQL Server
1434: "SQL", # MySQL Monitor
3306: "SQL", # MySQL
3389: "RDP", # RDP
5900: "RDP", # VNC:0
5901: "RDP", # VNC:1
5902: "RDP", # VNC:2
5903: "RDP", # VNC:3
8080: "HTTP", # HTTP Alternative
}
self.ips_to_generate: int = ips_to_generate
# random method
self.channel: str = channel
self.interval: float = interval
# file methods
self.filepath: pathlib.Path = filepath
self.silent: bool = silent
self.verbose: bool = verbose
self.script_mode: bool = script_mode
self.redis_watcher: utilities.RedisWatcher = utilities.RedisWatcher(silent = self.silent)
self.ips_forged: int = 0
# initiation
self._logger = utilities.logging.get_console_logger(
name = _script_stem,
level = logging.DEBUG if self.verbose else logging.INFO,
disable_stream = True if self.silent else False
)
self._logger.info(f"Method used {utilities.colorize(self.method, 'gold_1')}")
if self.method != "random":
self._logger.info(f"File used {utilities.colorize(self.filepath, 'gold_1')}")
self._logger.info(
f"Number of IPs to generate "
f"{utilities.colorize(self.ips_to_generate if self.ips_to_generate != INFINITE else '∞', 'gold_1')}"
)
self._logger.info(
f"Publishing to channel "
f"{utilities.colorize(self.channel, 'gold_1')}"
)
self._logger.info(f"Publishing interval {utilities.colorize(self.interval, 'gold_1')} seconds")
self._logger.info(f"Silent mode {utilities.colorize(self.silent and 'on' or 'off', 'gold_1')}")
self._logger.info(f"Script mode {utilities.colorize(self.script_mode and 'on' or 'off', 'gold_1')}")
def __call__(self):
self.start_time = timeit.default_timer()
self._logger.info(
f"Attacks Generator started at: "
f"{utilities.colorize(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), 'gold_1')}"
)
if self.method == "random":
self.random()
def __del__(self):
if not self.silent:
sys.stdout.write("\033[1000D\033[K")
self._logger.info(f"Stopped generator. Total IPs generated: {utilities.colorize(self.ips_forged)}")
def random(self):
while self.ips_to_generate == INFINITE or self.ips_forged < self.ips_to_generate:
port = choice(list(self.ports.keys()))
data = {
'src': {
'ip': self.ipv4(),
'port': port
},
'dst': {
'ip': self.ipv4(),
'port': port
},
'type': self.ports[port],
'cve': f"CVE:{randrange(1997, 2019)}:{randrange(1, 400)}"
}
json_data = json.dumps(data)
self.redis_watcher.server.publish(channel = self.channel, message = json_data)
self.ips_forged += 1
self._logger.info(
f"Published random IP "
f"[{utilities.colorize(f'{self.ips_forged}', 'gold_1')}]"
)
self._logger.debug(f"{json.dumps(data, indent = 4)}")
if not self.script_mode and not self.silent:
print("\033[A\033[K", end = '')
time.sleep(self.interval)
@staticmethod
def ipv4() -> str:
"""
To generate a more accurate representation of an IP address these rules are followed:
https://www.quora.com/How-do-you-identify-an-invalid-IP-address
1.Any address that begins with a 0 is invalid (except as a default route).
2.Any address with a number above 255 in it is invalid.
3.Any address that has more than 3 dots is invalid.
4.Any address that begins with a number between 240 and 255 is reserved, and effectively invalid.
(Theoretically, they’re usable, but I’ve never seen one in use.)
5.Any address that begins with a number between 224 and 239 is reserved for multicast, and probably invalid.
"""
# the left-most part of the address is calculated first as it has the most restrictions
return ".".join(
[
str(randint(1, 222)),
str(randint(1, 255)),
str(randint(1, 255)),
str(randint(1, 255)),
]
)
@property
def method(self):
return self._method
@method.setter
def method(self, value: str):
available_methods = ['random', 'json', 'text', 'csv']
if not isinstance(value, str):
raise TypeError(f"method must be a string. Available methods: {','.join(available_methods)}")
if value.lower() not in available_methods:
raise ValueError(f"method {value} is not available. Available methods: {','.join(available_methods)}")
self._method = value.lower()
@property
def ports(self):
return self._ports
@ports.setter
def ports(self, value):
if not isinstance(value, dict):
raise TypeError("ports must a dict")
self._ports = value
@property
def filepath(self) -> pathlib.Path:
return self._filepath
@filepath.setter
def filepath(self, value):
if value is not None:
if not isinstance(value, pathlib.Path) or not isinstance(value, str):
raise TypeError("filepath must be a string or a pathlib.Path object")
self._filepath = pathlib.Path(value)
# region Command Line Tool section
# region command settings
@click.group(
cls = HelpColorsGroup,
help_headers_color = 'green',
help_options_color = 'bright_red',
name = "Attacks Generator",
context_settings = {
"help_option_names": ['-h', '--help'],
"ignore_unknown_options": True
},
no_args_is_help = False,
invoke_without_command = True,
options_metavar = "<options>"
)
# endregion
# region channel option
@click.option(
'-c',
'--channel',
default = "raw-cyberattacks",
type = click.STRING,
metavar = "string",
help = "Redis channel in which the attacks will be forwarded"
)
# endregion
# region interval option
@click.option(
"-i",
"--interval",
default = 0.05,
metavar = "Float",
type = click.FloatRange(0.05, 100),
help = "Interval between each publication"
)
# endregion
# region autostart option
@click.option(
"-a",
"--autostart",
metavar = "switch",
is_flag = True,
help = "Immediately start publishing generated attacks"
)
# endregion
# region verbose option
@click.option(
"--verbose",
metavar = "switch",
is_flag = True,
help = "Enables verbose logging messages"
)
# endregion
# region silent option
@click.option(
"--silent",
metavar = "switch",
is_flag = True,
help = "Disables ALL logging messages"
)
# endregion
# region script-mode option
@click.option(
"--script-mode",
metavar = "switch",
is_flag = True,
help = "Enables script mode which uses dynamic printing features in the terminal."
"CAUTION: using this flag when initializing Attacks Generator from another module may break some "
"logging messages"
)
# endregion
@click.pass_context
def main(ctx, channel: str, interval: float, verbose: bool, silent: bool, script_mode: bool, autostart: bool):
"""
Custom command line utility to generate cyberattacks for publishing to a redis channel
"""
ctx.obj = {
"channel": channel,
"interval": interval,
"verbose": verbose,
"silent": silent,
"script_mode": script_mode,
"autostart": autostart
}
if ctx.invoked_subcommand is None:
ctx.invoke(random)
else:
# click.echo('I am about to invoke %s' % ctx.invoked_subcommand)
pass
@main.command()
# region forge option
@click.option(
"-f",
"--forge",
"ips_to_forge",
default = INFINITE,
metavar = "float",
type = click.FLOAT,
help = "Number of IP to forge"
)
# endregion
@click.pass_context
def random(ctx, ips_to_forge):
generator = AttacksGenerator(
method = "RaNdOm",
channel = ctx.obj["channel"],
ips_to_generate = ips_to_forge,
interval = ctx.obj["interval"],
silent = ctx.obj["silent"],
script_mode = ctx.obj["script_mode"]
)
try:
generator()
except KeyboardInterrupt:
if ctx.obj["script_mode"] and not ctx.obj["silent"]:
sys.stdout.write("\033[1000D\033[K")
# endregion
if __name__ == '__main__':
main()
| 31.27762
| 116
| 0.56743
|
db6d526bfcf066b9dbd9e4d6828426ebe1ffb39c
| 390
|
py
|
Python
|
instagram/forms.py
|
Ianadika44/insta-clown
|
268e0cb1a66f20ff22073b308c80a2ab8d664c55
|
[
"Unlicense"
] | null | null | null |
instagram/forms.py
|
Ianadika44/insta-clown
|
268e0cb1a66f20ff22073b308c80a2ab8d664c55
|
[
"Unlicense"
] | 1
|
2021-06-08T21:42:18.000Z
|
2021-06-08T21:42:18.000Z
|
instagram/forms.py
|
Ianadika44/insta-clown
|
268e0cb1a66f20ff22073b308c80a2ab8d664c55
|
[
"Unlicense"
] | null | null | null |
from django import forms
from .models import Post
class NewsLetterForm(forms.Form):
your_name = forms.CharField(label='First Name',max_length=30)
email = forms.EmailField(label='Email')
class NewPostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['profile']
widgets = {
'following': forms.CheckboxSelectMultiple(),
}
| 26
| 65
| 0.648718
|
cce47068331277a5145a5ed8fefc8a7b8327929f
| 3,380
|
py
|
Python
|
designate-8.0.0/designate/tests/test_api/test_v2/test_service_status.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 145
|
2015-01-02T09:35:53.000Z
|
2021-12-14T17:03:53.000Z
|
designate/tests/test_api/test_v2/test_service_status.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | 6
|
2015-03-15T00:22:27.000Z
|
2019-12-16T09:37:38.000Z
|
designate/tests/test_api/test_v2/test_service_status.py
|
sapcc/designate
|
c3f084751006a2fe7562f137930542c4759d6fd9
|
[
"Apache-2.0"
] | 109
|
2015-01-13T16:47:34.000Z
|
2021-03-15T13:18:48.000Z
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v2 import ApiV2TestCase
LOG = logging.getLogger(__name__)
class ApiV2ServiceStatusTest(ApiV2TestCase):
def setUp(self):
super(ApiV2ServiceStatusTest, self).setUp()
def test_get_service_statuses(self):
# Set the policy file as this is an admin-only API
self.policy({'find_service_statuses': '@'})
response = self.client.get('/service_statuses/')
# Check the headers are what we expect
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Check the body structure is what we expect
self.assertIn('service_statuses', response.json)
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Test with 0 service_statuses
# Seeing that Central is started there will be 1 here already..
self.assertEqual(0, len(response.json['service_statuses']))
data = [self.update_service_status(
hostname="foo%s" % i, service_name="bar") for i in range(0, 10)]
self._assert_paging(data, '/service_statuses', key='service_statuses')
def test_get_service_status(self):
service_status = self.update_service_status(fixture=0)
# Set the policy file as this is an admin-only API
self.policy({'find_service_status': '@'})
response = self.client.get(
'/service_statuses/%s' % service_status['id'],
headers=[('Accept', 'application/json')])
# Verify the headers
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
# Verify the body structure
self.assertIn('links', response.json)
self.assertIn('self', response.json['links'])
# Verify the returned values
self.assertIn('id', response.json)
self.assertIn('created_at', response.json)
self.assertIsNone(response.json['updated_at'])
fixture = self.get_service_status_fixture(0)
self.assertEqual(fixture['hostname'], response.json['hostname'])
self.assertEqual(fixture['service_name'],
response.json['service_name'])
self.assertEqual(fixture['capabilities'],
response.json['capabilities'])
self.assertEqual(fixture['stats'], response.json['stats'])
self.assertEqual(fixture['status'], response.json['status'])
self.assertIsNone(response.json['heartbeated_at'])
def test_get_service_status_invalid_id(self):
self.policy({'find_service_status': '@'})
self._assert_invalid_uuid(self.client.get, '/service_statuses/%s')
| 39.764706
| 78
| 0.681361
|
16c08bf5560a181cb9066dae531a05db0771268d
| 5,182
|
py
|
Python
|
tests/mutations/test_person.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 1
|
2020-06-18T15:43:18.000Z
|
2020-06-18T15:43:18.000Z
|
tests/mutations/test_person.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 60
|
2019-12-17T11:08:28.000Z
|
2021-03-02T16:19:41.000Z
|
tests/mutations/test_person.py
|
trompamusic/trompace-client
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | null | null | null |
# Tests for mutations pertaining to person objects.
import os
import pytest
from trompace.exceptions import UnsupportedLanguageException, NotAMimeTypeException
from trompace.mutations import person
from tests import CeTestCase
class TestPerson(CeTestCase):
def setUp(self) -> None:
super()
self.data_dir = os.path.join(self.test_directory, "data", "person")
def test_create(self):
expected = self.read_file(os.path.join(self.data_dir, "create_person.txt"))
created_person = person.mutation_create_person(
title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu", source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
language="en", format_="text/html", gender="male",
description="Born circa 1860Died circa 1920A. J. Fynn was an early 20th Century scholar\
in literature and anthropology")
self.assert_queries_equal(created_person, expected)
def test_create_invalid_values(self):
"""Passing invalid values to language, format_, or gender cause exceptions"""
with pytest.raises(ValueError):
person.mutation_create_person(
title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu",
source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
format_="text/html",
gender="test"
)
with pytest.raises(UnsupportedLanguageException):
person.mutation_create_person(
title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu",
source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
format_="text/html",
language="pt"
)
with pytest.raises(NotAMimeTypeException):
person.mutation_create_person(
title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu",
source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
format_="html"
)
def test_update(self):
expected = self.read_file(os.path.join(self.data_dir, "update_person.txt"))
created_update = person.mutation_update_person('2eeca6dd-c62c-490e-beb0-2e3899fca74f',
title="A. J. Fynn")
self.assert_queries_equal(created_update, expected)
def test_delete(self):
expected = self.read_file(os.path.join(self.data_dir, "delete_person.txt"))
created_delete = person.mutation_delete_person('2eeca6dd-c62c-490e-beb0-2e3899fca74f')
self.assert_queries_equal(created_delete, expected)
def test_invalid_language(self):
with pytest.raises(UnsupportedLanguageException):
person.mutation_update_person('2eeca6dd-c62c-490e-beb0-2e3899fca74f', language="ja")
with pytest.raises(UnsupportedLanguageException):
person.mutation_create_person(title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu",
source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
language="ja", format_="text/html",
description="Born circa 1860Died circa 1920A. J. Fynn was\
an early 20th Century scholar in literature and anthropology")
def test_invalid_format(self):
with pytest.raises(NotAMimeTypeException):
person.mutation_update_person('2eeca6dd-c62c-490e-beb0-2e3899fca74f', format_="test,html")
with pytest.raises(NotAMimeTypeException):
person.mutation_create_person(title="A. J. Fynn", contributor="https://www.cpdl.org",
creator="https://www.upf.edu",
source="https://www.cpdl.org/wiki/index.php/A._J._Fynn",
language="en", format_="text,html",
description="Born circa 1860Died circa 1920A. J. Fynn was\
an early 20th Century scholar in literature and anthropology")
def test_person_add_exact_match_person(self):
expected = self.read_file(os.path.join(self.data_dir, "merge_person_exactmatch.txt"))
actual = person.mutation_person_add_exact_match_person("d3f968f4-90cd-4764-93bc-6fadcc2a35e6",
"b10ac895-beb8-489e-8168-3e786d1aeb0e")
self.assert_queries_equal(actual, expected)
def test_person_remove_exact_match_person(self):
expected = self.read_file(os.path.join(self.data_dir, "remove_person_exactmatch.txt"))
actual = person.mutation_person_remove_exact_match_person("d3f968f4-90cd-4764-93bc-6fadcc2a35e6",
"b10ac895-beb8-489e-8168-3e786d1aeb0e")
self.assert_queries_equal(actual, expected)
| 48.429907
| 105
| 0.603821
|
d2bffad9f4004b95f56c3810b96353c389d33c07
| 11,787
|
py
|
Python
|
com/precisely/apis/model/tax_locations.py
|
PreciselyData/PreciselyAPIsSDK-Python
|
28ffff0c96d81d3a53a5599c987d54d7b632b508
|
[
"Apache-2.0"
] | null | null | null |
com/precisely/apis/model/tax_locations.py
|
PreciselyData/PreciselyAPIsSDK-Python
|
28ffff0c96d81d3a53a5599c987d54d7b632b508
|
[
"Apache-2.0"
] | null | null | null |
com/precisely/apis/model/tax_locations.py
|
PreciselyData/PreciselyAPIsSDK-Python
|
28ffff0c96d81d3a53a5599c987d54d7b632b508
|
[
"Apache-2.0"
] | null | null | null |
"""
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from com.precisely.apis.exceptions import ApiAttributeError
def lazy_import():
from com.precisely.apis.model.tax_geometry import TaxGeometry
globals()['TaxGeometry'] = TaxGeometry
class TaxLocations(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'geometry': (TaxGeometry,), # noqa: E501
'purchase_amount': (str,), # noqa: E501
'object_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'geometry': 'geometry', # noqa: E501
'purchase_amount': 'purchaseAmount', # noqa: E501
'object_id': 'objectId', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, geometry, *args, **kwargs): # noqa: E501
"""TaxLocations - a model defined in OpenAPI
Args:
geometry (TaxGeometry):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
purchase_amount (str): [optional] # noqa: E501
object_id (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.geometry = geometry
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, geometry, *args, **kwargs): # noqa: E501
"""TaxLocations - a model defined in OpenAPI
Args:
geometry (TaxGeometry):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
purchase_amount (str): [optional] # noqa: E501
object_id (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.geometry = geometry
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.706522
| 145
| 0.571053
|
d47aecd08baaeabe664ceabac9dbb67643156591
| 547
|
py
|
Python
|
cradmin_legacy/viewhelpers/mixins.py
|
appressoas/cradmin_legacy
|
b9d024299333dd04c87c1031bd5be5778aa7f1f1
|
[
"BSD-3-Clause"
] | null | null | null |
cradmin_legacy/viewhelpers/mixins.py
|
appressoas/cradmin_legacy
|
b9d024299333dd04c87c1031bd5be5778aa7f1f1
|
[
"BSD-3-Clause"
] | 17
|
2018-03-07T15:52:42.000Z
|
2022-03-12T01:07:06.000Z
|
cradmin_legacy/viewhelpers/mixins.py
|
appressoas/cradmin_legacy
|
b9d024299333dd04c87c1031bd5be5778aa7f1f1
|
[
"BSD-3-Clause"
] | 1
|
2018-07-23T22:13:45.000Z
|
2018-07-23T22:13:45.000Z
|
from __future__ import unicode_literals
from builtins import object
class QuerysetForRoleMixin(object):
def get_queryset_for_role(self, role):
"""
Get a queryset with all objects of ``self.model`` that
the current role can access.
"""
raise NotImplementedError()
def get_queryset(self):
"""
DO NOT override this. Override :meth:`.get_queryset_for_role`
instead.
"""
queryset = self.get_queryset_for_role(self.request.cradmin_role)
return queryset
| 27.35
| 72
| 0.652651
|
55ae49096829e62436750f72fbc3f1bee8928e2a
| 12,398
|
py
|
Python
|
docs/exts/docs_build/docs_builder.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2020-09-11T07:21:58.000Z
|
2021-04-29T17:14:11.000Z
|
docs/exts/docs_build/docs_builder.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-02-27T17:34:47.000Z
|
2021-03-24T21:03:30.000Z
|
docs/exts/docs_build/docs_builder.py
|
khilawar4/airflow
|
5f3f65b82517f615f31f0c8a7f8ac0facb325235
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-02-03T09:55:26.000Z
|
2020-02-03T09:55:26.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import shlex
import shutil
from glob import glob
from subprocess import run
from typing import List
from rich.console import Console
from docs.exts.docs_build.code_utils import (
AIRFLOW_SITE_DIR,
ALL_PROVIDER_YAMLS,
CONSOLE_WIDTH,
DOCS_DIR,
PROCESS_TIMEOUT,
ROOT_PROJECT_DIR,
pretty_format_path,
)
from docs.exts.docs_build.errors import DocBuildError, parse_sphinx_warnings
# pylint: disable=no-name-in-module
from docs.exts.docs_build.spelling_checks import SpellingError, parse_spelling_warnings
# pylint: enable=no-name-in-module
console = Console(force_terminal=True, color_system="standard", width=CONSOLE_WIDTH)
class AirflowDocsBuilder:
"""Documentation builder for Airflow."""
def __init__(self, package_name: str, for_production: bool):
self.package_name = package_name
self.for_production = for_production
@property
def _doctree_dir(self) -> str:
return f"{DOCS_DIR}/_doctrees/docs/{self.package_name}"
@property
def _inventory_cache_dir(self) -> str:
return f"{DOCS_DIR}/_inventory_cache"
@property
def is_versioned(self):
"""Is current documentation package versioned?"""
# Disable versioning. This documentation does not apply to any released product and we can update
# it as needed, i.e. with each new package of providers.
return self.package_name not in ('apache-airflow-providers', 'docker-stack')
@property
def _build_dir(self) -> str:
if self.is_versioned:
version = "stable" if self.for_production else "latest"
return f"{DOCS_DIR}/_build/docs/{self.package_name}/{version}"
else:
return f"{DOCS_DIR}/_build/docs/{self.package_name}"
@property
def log_spelling_filename(self) -> str:
"""Log from spelling job."""
return os.path.join(self._build_dir, f"output-spelling-{self.package_name}.log")
@property
def log_spelling_output_dir(self) -> str:
"""Results from spelling job."""
return os.path.join(self._build_dir, f"output-spelling-results-{self.package_name}")
@property
def log_build_filename(self) -> str:
"""Log from build job."""
return os.path.join(self._build_dir, f"output-build-{self.package_name}.log")
@property
def log_build_warning_filename(self) -> str:
"""Warnings from build job."""
return os.path.join(self._build_dir, f"warning-build-{self.package_name}.log")
@property
def _current_version(self):
if not self.is_versioned:
raise Exception("This documentation package is not versioned")
if self.package_name == 'apache-airflow':
from airflow.version import version as airflow_version
return airflow_version
if self.package_name.startswith('apache-airflow-providers-'):
provider = next(p for p in ALL_PROVIDER_YAMLS if p['package-name'] == self.package_name)
return provider['versions'][0]
return Exception(f"Unsupported package: {self.package_name}")
@property
def _publish_dir(self) -> str:
if self.is_versioned:
return f"docs-archive/{self.package_name}/{self._current_version}"
else:
return f"docs-archive/{self.package_name}"
@property
def _src_dir(self) -> str:
return f"{DOCS_DIR}/{self.package_name}"
def clean_files(self) -> None:
"""Cleanup all artifacts generated by previous builds."""
api_dir = os.path.join(self._src_dir, "_api")
shutil.rmtree(api_dir, ignore_errors=True)
shutil.rmtree(self._build_dir, ignore_errors=True)
os.makedirs(api_dir, exist_ok=True)
os.makedirs(self._build_dir, exist_ok=True)
def check_spelling(self, verbose: bool) -> List[SpellingError]:
"""
Checks spelling
:param verbose: whether to show output while running
:return: list of errors
"""
spelling_errors = []
os.makedirs(self._build_dir, exist_ok=True)
shutil.rmtree(self.log_spelling_output_dir, ignore_errors=True)
os.makedirs(self.log_spelling_output_dir, exist_ok=True)
build_cmd = [
os.path.join(ROOT_PROJECT_DIR, "docs", "exts", "docs_build", "run_patched_sphinx.py"),
"-W", # turn warnings into errors
"--color", # do emit colored output
"-T", # show full traceback on exception
"-b", # builder to use
"spelling",
"-c",
DOCS_DIR,
"-d", # path for the cached environment and doctree files
self._doctree_dir,
self._src_dir, # path to documentation source files
self.log_spelling_output_dir,
]
env = os.environ.copy()
env['AIRFLOW_PACKAGE_NAME'] = self.package_name
if self.for_production:
env['AIRFLOW_FOR_PRODUCTION'] = 'true'
if verbose:
console.print(
f"[blue]{self.package_name:60}:[/] Executing cmd: ",
" ".join([shlex.quote(c) for c in build_cmd]),
)
console.print(f"[blue]{self.package_name:60}:[/] The output is hidden until an error occurs.")
with open(self.log_spelling_filename, "wt") as output:
completed_proc = run( # pylint: disable=subprocess-run-check
build_cmd,
cwd=self._src_dir,
env=env,
stdout=output if not verbose else None,
stderr=output if not verbose else None,
timeout=PROCESS_TIMEOUT,
)
if completed_proc.returncode != 0:
spelling_errors.append(
SpellingError(
file_path=None,
line_no=None,
spelling=None,
suggestion=None,
context_line=None,
message=(
f"Sphinx spellcheck returned non-zero exit status: {completed_proc.returncode}."
),
)
)
warning_text = ""
for filepath in glob(f"{self.log_spelling_output_dir}/**/*.spelling", recursive=True):
with open(filepath) as spelling_file:
warning_text += spelling_file.read()
spelling_errors.extend(parse_spelling_warnings(warning_text, self._src_dir))
console.print(f"[blue]{self.package_name:60}:[/] [red]Finished spell-checking with errors[/]")
else:
if spelling_errors:
console.print(
f"[blue]{self.package_name:60}:[/] [yellow]Finished spell-checking with warnings[/]"
)
else:
console.print(
f"[blue]{self.package_name:60}:[/] [green]Finished spell-checking successfully[/]"
)
return spelling_errors
def build_sphinx_docs(self, verbose: bool) -> List[DocBuildError]:
"""
Build Sphinx documentation.
:param verbose: whether to show output while running
:return: list of errors
"""
build_errors = []
os.makedirs(self._build_dir, exist_ok=True)
build_cmd = [
os.path.join(ROOT_PROJECT_DIR, "docs", "exts", "docs_build", "run_patched_sphinx.py"),
"-T", # show full traceback on exception
"--color", # do emit colored output
"-b", # builder to use
"html",
"-d", # path for the cached environment and doctree files
self._doctree_dir,
"-c",
DOCS_DIR,
"-w", # write warnings (and errors) to given file
self.log_build_warning_filename,
self._src_dir,
self._build_dir, # path to output directory
]
env = os.environ.copy()
env['AIRFLOW_PACKAGE_NAME'] = self.package_name
if self.for_production:
env['AIRFLOW_FOR_PRODUCTION'] = 'true'
if verbose:
console.print(
f"[blue]{self.package_name:60}:[/] Executing cmd: ",
" ".join([shlex.quote(c) for c in build_cmd]),
)
else:
console.print(
f"[blue]{self.package_name:60}:[/] Running sphinx. "
f"The output is hidden until an error occurs."
)
with open(self.log_build_filename, "wt") as output:
completed_proc = run( # pylint: disable=subprocess-run-check
build_cmd,
cwd=self._src_dir,
env=env,
stdout=output if not verbose else None,
stderr=output if not verbose else None,
timeout=PROCESS_TIMEOUT,
)
if completed_proc.returncode != 0:
build_errors.append(
DocBuildError(
file_path=None,
line_no=None,
message=f"Sphinx returned non-zero exit status: {completed_proc.returncode}.",
)
)
if os.path.isfile(self.log_build_warning_filename):
with open(self.log_build_warning_filename) as warning_file:
warning_text = warning_file.read()
# Remove 7-bit C1 ANSI escape sequences
warning_text = re.sub(r"\x1B[@-_][0-?]*[ -/]*[@-~]", "", warning_text)
build_errors.extend(parse_sphinx_warnings(warning_text, self._src_dir))
if build_errors:
console.print(f"[blue]{self.package_name:60}:[/] [red]Finished docs building with errors[/]")
else:
console.print(f"[blue]{self.package_name:60}:[/] [green]Finished docs building successfully[/]")
return build_errors
def publish(self, override_versioned: bool):
"""Copy documentation packages files to airflow-site repository."""
console.print(f"Publishing docs for {self.package_name}")
output_dir = os.path.join(AIRFLOW_SITE_DIR, self._publish_dir)
pretty_source = pretty_format_path(self._build_dir, os.getcwd())
pretty_target = pretty_format_path(output_dir, AIRFLOW_SITE_DIR)
console.print(f"Copy directory: {pretty_source} => {pretty_target}")
if os.path.exists(output_dir):
if self.is_versioned:
if override_versioned:
console.print(f"Overriding previously existing {output_dir}! ")
else:
console.print(
f"Skipping previously existing {output_dir}! "
f"Delete it manually if you want to regenerate it!"
)
console.print()
return
shutil.rmtree(output_dir)
shutil.copytree(self._build_dir, output_dir)
if self.is_versioned:
with open(os.path.join(output_dir, "..", "stable.txt"), "w") as stable_file:
stable_file.write(self._current_version)
console.print()
def get_available_providers_packages():
"""Get list of all available providers packages to build."""
return [provider['package-name'] for provider in ALL_PROVIDER_YAMLS]
def get_available_packages():
"""Get list of all available packages to build."""
provider_package_names = get_available_providers_packages()
return [
"apache-airflow",
*provider_package_names,
"apache-airflow-providers",
"helm-chart",
"docker-stack",
]
| 39.35873
| 108
| 0.610744
|
22049692047a19482d78243d99741b73fdd0aadb
| 41
|
py
|
Python
|
u-net_convolutional_networks_for_biomedical_image_segmentation/models/__init__.py
|
younnggsuk/CV-Paper-Implementation
|
fecd67d3f216872976f9b38445ce1c1f9ef1ac02
|
[
"MIT"
] | 4
|
2021-06-03T13:56:51.000Z
|
2021-11-05T06:22:25.000Z
|
u-net_convolutional_networks_for_biomedical_image_segmentation/models/__init__.py
|
younnggsuk/CV-Paper-Implementation
|
fecd67d3f216872976f9b38445ce1c1f9ef1ac02
|
[
"MIT"
] | null | null | null |
u-net_convolutional_networks_for_biomedical_image_segmentation/models/__init__.py
|
younnggsuk/CV-Paper-Implementation
|
fecd67d3f216872976f9b38445ce1c1f9ef1ac02
|
[
"MIT"
] | 1
|
2022-03-28T09:34:03.000Z
|
2022-03-28T09:34:03.000Z
|
from .unet import *
from .metric import *
| 20.5
| 21
| 0.731707
|
bbe8726b010c3b5e7361e3868ec2e799f31a25c3
| 3,286
|
py
|
Python
|
setup.py
|
lopessec/killerbee
|
c236d626e297815bdc401695cd6c8a6c9e353c38
|
[
"BSD-3-Clause"
] | 653
|
2015-04-01T18:29:08.000Z
|
2022-03-18T09:05:22.000Z
|
setup.py
|
lopessec/killerbee
|
c236d626e297815bdc401695cd6c8a6c9e353c38
|
[
"BSD-3-Clause"
] | 189
|
2015-04-23T03:37:34.000Z
|
2022-03-15T07:23:17.000Z
|
setup.py
|
lopessec/killerbee
|
c236d626e297815bdc401695cd6c8a6c9e353c38
|
[
"BSD-3-Clause"
] | 223
|
2015-04-07T10:12:18.000Z
|
2022-03-25T17:39:26.000Z
|
# NOTE: See the README file for a list of dependencies to install.
from __future__ import print_function
import sys
try:
from setuptools import setup, Extension
except ImportError:
print("No setuptools found, attempting to use distutils instead.")
from distutils.core import setup, Extension
err = []
warn = []
apt_get_pkgs = []
pip_pkgs = []
# Ensure we have either pyUSB 0.x or pyUSB 1.x, but we now
# prefer pyUSB 1.x moving forward. Support for 0.x may be deprecated.
try:
import usb
except ImportError:
err.append("usb")
apt_get_pkgs.append("python-usb")
try:
import usb.core
#print("Warning: You are using pyUSB 1.x, support is in beta.")
except ImportError:
warn.append("You are using pyUSB 0.x. Consider upgrading to pyUSB 1.x.")
# TODO: Ideally we would detect missing python-dev and libgcrypt-dev to give better errors.
# Dot15d4 is a dep of some of the newer tools
try:
from scapy.all import Dot15d4
except ImportError:
warn.append("Scapy (with dot15d4 layer)")
pip_pkgs.append("scapy")
if len(err) > 0:
print("""
Library requirements not met. Install the following libraries, then re-run the setup script.
{}\n""".format('\n'.join(err)), file=sys.stderr)
if len(warn) > 0:
print("""
Library recommendations not met. For full support, install the following libraries, then re-run the setup script.
{}\n""".format('\n'.join(warn)), file=sys.stderr)
if len(apt_get_pkgs) > 0 or len(pip_pkgs) > 0:
print("The following commands should install these dependencies on Ubuntu, and can be adapted for other OSs:", file=sys.stderr)
if len(apt_get_pkgs) > 0:
print("\tsudo apt-get install -y {}".format(' '.join(apt_get_pkgs)), file=sys.stderr)
if len(pip_pkgs) > 0:
print("\tpip3 install {}".format(' '.join(pip_pkgs)), file=sys.stderr)
if len(err) > 0:
sys.exit(1)
zigbee_crypt = Extension('zigbee_crypt',
sources = ['zigbee_crypt/zigbee_crypt.c'],
libraries = ['gcrypt'],
include_dirs = ['/usr/local/include', '/usr/include', '/sw/include/', 'zigbee_crypt'],
library_dirs = ['/usr/local/lib', '/usr/lib','/sw/var/lib/']
)
setup(name = 'killerbee',
version = '3.0.0-beta.2',
description = 'ZigBee and IEEE 802.15.4 Attack Framework and Tools',
author = 'Joshua Wright, Ryan Speers',
author_email = 'jwright@willhackforsushi.com, ryan@riverloopsecurity.com',
license = 'LICENSE.txt',
packages = ['killerbee'],
scripts = ['tools/zbdump', 'tools/zbgoodfind', 'tools/zbid', 'tools/zbreplay',
'tools/zbconvert', 'tools/zbdsniff', 'tools/zbstumbler', 'tools/zbassocflood',
'tools/zbscapy', 'tools/zbwireshark', 'tools/zbkey',
'tools/zbwardrive', 'tools/zbopenear', 'tools/zbfakebeacon',
'tools/zborphannotify', 'tools/zbpanidconflictflood', 'tools/zbrealign', 'tools/zbcat',
'tools/zbjammer', 'tools/kbbootloader'],
install_requires=['pyserial>=2.0', 'pyusb', 'pycrypto', 'rangeparser', 'scapy'],
# NOTE: pygtk doesn't install via distutils on non-Windows hosts
ext_modules = [zigbee_crypt],
)
| 38.209302
| 131
| 0.644248
|
e9bd0ec8d57c5b694badb5df4843632c4ef67425
| 2,703
|
py
|
Python
|
Finish/lms.py
|
soumallyadev07/Auto-Attendee
|
845c636b76884f71457de1560494ffaeaa7630fd
|
[
"MIT"
] | 5
|
2020-12-18T15:42:57.000Z
|
2021-08-06T08:47:17.000Z
|
Finish/lms.py
|
shourygupta28/Auto-Attendee
|
cf01ad7ae62f53c15363c04a053c3cdd77640c87
|
[
"MIT"
] | null | null | null |
Finish/lms.py
|
shourygupta28/Auto-Attendee
|
cf01ad7ae62f53c15363c04a053c3cdd77640c87
|
[
"MIT"
] | 39
|
2020-11-19T06:56:41.000Z
|
2021-08-06T08:47:20.000Z
|
from selenium import webdriver
from getpass import getpass
import time
from datetime import datetime
import calendar
import random
import csv
my_date = datetime.today()
#https://chromedriver.chromium.org/downloads
username = input("Enter in your username: ")
password = getpass("Enter your password: ")
lms = webdriver.Chrome(executable_path="ChromeDriver/chromedriver.exe")
lms.get("https://ada-lms.thapar.edu/moodle/login/index.php")
username_textbox = lms.find_element_by_id("username")
username_textbox.send_keys(username)
password_textbox = lms.find_element_by_id("password")
password_textbox.send_keys(password)
login_button = lms.find_element_by_id("loginbtn")
login_button.submit()
with open('Schedule.csv', newline='') as f:
reader = csv.reader(f)
lst = list(reader)
lst.remove(lst[0])
for i in range(len(lst)):
lst[i].remove(lst[i][0])
for j in range(len(lst)):
if lst[j-1][0] == '':
lst.remove(lst[j-1])
#print(lst)
isStarted = False
for i in lst:
while True:
if isStarted == False:
if datetime.now().hour == int(i[0].split(':')[0]) and datetime.now().minute == int(i[0].split(':')[1]) and calendar.day_name[my_date.weekday()] == i[2]:
lms.get(i[3])
time.sleep(10)
isStarted = True
elif datetime.now().hour == int(i[1].split(':')[0]):
#print('1')
if datetime.now().minute > int(i[1].split(':')[1]) and calendar.day_name[my_date.weekday()] == i[2]:
#print('2')
isStarted = False
break
elif datetime.now().hour > int(i[1].split(':')[0]):
if calendar.day_name[my_date.weekday()] == i[2]:
#print('3')
isStarted = False
break
elif calendar.day_name[my_date.weekday()] != i[2]:
isStarted = False
break
else:
num = random.randint(1, 3)
if num == 1:
lms.get('https://ada-lms.thapar.edu/moodle/my/')
time.sleep(2)
elif num == 2:
lms.get('https://ada-lms.thapar.edu/moodle/calendar/view.php?view=month')
time.sleep(2)
else:
lms.get('https://ada-lms.thapar.edu/moodle/')
time.sleep(2)
elif isStarted == True:
if datetime.now().hour == int(i[1].split(':')[0]) and datetime.now().minute == int(i[1].split(':')[1]) and calendar.day_name[my_date.weekday()] == i[2]:
time.sleep(1)
isStarted = False
break
| 36.04
| 164
| 0.550869
|
b947f003fe5d12293d3261e7f48e02003603d9c1
| 2,655
|
py
|
Python
|
app.py
|
nevermorethesjake/weather
|
18e1967fd50a87cde67a6c50c64e033deda2a7e2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
nevermorethesjake/weather
|
18e1967fd50a87cde67a6c50c64e033deda2a7e2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
nevermorethesjake/weather
|
18e1967fd50a87cde67a6c50c64e033deda2a7e2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import urllib.request, urllib.parse, urllib.error
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layoutsfsf
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urllib.parse.urlencode({'q': yql_query}) + "&format=json"
result = urllib.request.urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "') and u='c'"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
image = channel.get('image')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (image is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Vandaag in " + location.get('city') + ": " + condition.get('text') + \
" en het is " + condition.get('temp') + " " + units.get('temperature') + "elcius"
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| 25.285714
| 128
| 0.629379
|
ab58cb69bcfce6c695319b2fffe8755e28d2b909
| 13,366
|
py
|
Python
|
ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
|
dawnwish/ambari
|
accbb0048435de2d08e3d1b8771d966a94b98707
|
[
"Apache-2.0"
] | 16
|
2018-05-24T10:28:24.000Z
|
2021-08-05T03:13:26.000Z
|
ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
|
chinaworld/ambari-zh
|
f3b9afcbf0ed708fa5b5995a3acfb9f4131dc019
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/resources/common-services/SPARK/2.2.0/scripts/params.py
|
chinaworld/ambari-zh
|
f3b9afcbf0ed708fa5b5995a3acfb9f4131dc019
|
[
"Apache-2.0"
] | 17
|
2018-07-06T08:57:00.000Z
|
2021-11-04T11:00:36.000Z
|
#!/usr/bin/python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import status_params
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.constants import StackFeature
from resource_management.libraries.functions import conf_select, stack_select
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.copy_tarball import get_sysprep_skip_copy_tarballs_hdfs
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script.script import Script
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
'SPARK_CLIENT' : 'spark-client',
'SPARK_THRIFTSERVER' : 'spark-thriftserver',
'LIVY_SERVER' : 'livy-server',
'LIVY_CLIENT' : 'livy-client'
}
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
spark_conf = '/etc/spark/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
hadoop_home = stack_select.get_hadoop_dir("home")
spark_conf = format("{stack_root}/current/{component_directory}/conf")
spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
spark_pid_dir = status_params.spark_pid_dir
spark_home = format("{stack_root}/current/{component_directory}")
spark_daemon_memory = config['configurations']['spark-env']['spark_daemon_memory']
spark_thrift_server_conf_file = spark_conf + "/spark-thrift-sparkconf.conf"
java_home = config['hostLevelParams']['java_home']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
user_group = config['configurations']['cluster-env']['user_group']
spark_user = status_params.spark_user
hive_user = status_params.hive_user
spark_group = status_params.spark_group
user_group = status_params.user_group
spark_hdfs_user_dir = format("/user/{spark_user}")
spark_history_dir = default('/configurations/spark-defaults/spark.history.fs.logDirectory', "hdfs:///spark-history")
spark_history_server_pid_file = status_params.spark_history_server_pid_file
spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
spark_hadoop_lib_native = format("{stack_root}/current/hadoop-client/lib/native:{stack_root}/current/hadoop-client/lib/native/Linux-amd64-64")
run_example_cmd = format("{spark_home}/bin/run-example")
spark_smoke_example = "SparkPi"
spark_service_check_cmd = format(
"{run_example_cmd} --master yarn --deploy-mode cluster --num-executors 1 --driver-memory 256m --executor-memory 256m --executor-cores 1 {spark_smoke_example} 1")
spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
if len(spark_jobhistoryserver_hosts) > 0:
spark_history_server_host = spark_jobhistoryserver_hosts[0]
else:
spark_history_server_host = "localhost"
# spark-defaults params
ui_ssl_enabled = default("configurations/spark-defaults/spark.ssl.enabled", False)
spark_yarn_historyServer_address = default(spark_history_server_host, "localhost")
spark_history_scheme = "http"
spark_history_ui_port = config['configurations']['spark-defaults']['spark.history.ui.port']
if ui_ssl_enabled:
spark_history_ui_port = str(int(spark_history_ui_port) + 400)
spark_history_scheme = "https"
spark_env_sh = config['configurations']['spark-env']['content']
spark_log4j_properties = config['configurations']['spark-log4j-properties']['content']
spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
is_hive_installed = not len(hive_server_host) == 0
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
spark_kerberos_keytab = config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
spark_kerberos_principal = config['configurations']['spark-defaults']['spark.history.kerberos.principal']
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
# hive-site params
spark_hive_properties = {
'hive.metastore.uris': config['configurations']['hive-site']['hive.metastore.uris']
}
# security settings
if security_enabled:
spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
if is_hive_installed:
spark_hive_properties.update({
'hive.metastore.sasl.enabled': str(config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower(),
'hive.metastore.kerberos.keytab.file': config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'],
'hive.server2.authentication.spnego.principal': config['configurations']['hive-site']['hive.server2.authentication.spnego.principal'],
'hive.server2.authentication.spnego.keytab': config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab'],
'hive.metastore.kerberos.principal': config['configurations']['hive-site']['hive.metastore.kerberos.principal'],
'hive.server2.authentication.kerberos.principal': config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'],
'hive.server2.authentication.kerberos.keytab': config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab'],
'hive.server2.authentication': config['configurations']['hive-site']['hive.server2.authentication'],
})
hive_kerberos_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
hive_kerberos_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
# thrift server support - available on HDP 2.3 or higher
spark_thrift_sparkconf = None
spark_thrift_cmd_opts_properties = ''
spark_thrift_fairscheduler_content = None
spark_thrift_master = "yarn-client"
if 'nm_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['nm_hosts']) == 1:
# use local mode when there's only one nodemanager
spark_thrift_master = "local[4]"
if has_spark_thriftserver and 'spark-thrift-sparkconf' in config['configurations']:
spark_thrift_sparkconf = config['configurations']['spark-thrift-sparkconf']
spark_thrift_cmd_opts_properties = config['configurations']['spark-env']['spark_thrift_cmd_opts']
if is_hive_installed:
# update default metastore client properties (async wait for metastore component) it is useful in case of
# blueprint provisioning when hive-metastore and spark-thriftserver is not on the same host.
spark_hive_properties.update({
'hive.metastore.client.socket.timeout' : config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
})
spark_hive_properties.update(config['configurations']['spark-hive-site-override'])
if 'spark-thrift-fairscheduler' in config['configurations'] and 'fairscheduler_content' in config['configurations']['spark-thrift-fairscheduler']:
spark_thrift_fairscheduler_content = config['configurations']['spark-thrift-fairscheduler']['fairscheduler_content']
default_fs = config['configurations']['core-site']['fs.defaultFS']
hdfs_site = config['configurations']['hdfs-site']
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
has_ats = len(ats_host) > 0
dfs_type = default("/commandParams/dfs_type", "")
# livy related config
# livy for spark is only supported from HDP 2.6
has_livyserver = False
if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted):
livy_component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "LIVY_SERVER")
livy_conf = format("{stack_root}/current/{livy_component_directory}/conf")
livy_log_dir = config['configurations']['livy-env']['livy_log_dir']
livy_pid_dir = status_params.livy_pid_dir
livy_home = format("{stack_root}/current/{livy_component_directory}")
livy_user = status_params.livy_user
livy_group = status_params.livy_group
user_group = status_params.user_group
livy_hdfs_user_dir = format("/user/{livy_user}")
livy_server_pid_file = status_params.livy_server_pid_file
livy_recovery_dir = default("/configurations/livy-conf/livy.server.recovery.state-store.url", "/livy-recovery")
livy_server_start = format("{livy_home}/bin/livy-server start")
livy_server_stop = format("{livy_home}/bin/livy-server stop")
livy_logs_dir = format("{livy_home}/logs")
livy_env_sh = config['configurations']['livy-env']['content']
livy_log4j_properties = config['configurations']['livy-log4j-properties']['content']
livy_spark_blacklist_properties = config['configurations']['livy-spark-blacklist']['content']
if 'livy.server.kerberos.keytab' in config['configurations']['livy-conf']:
livy_kerberos_keytab = config['configurations']['livy-conf']['livy.server.kerberos.keytab']
else:
livy_kerberos_keytab = config['configurations']['livy-conf']['livy.server.launch.kerberos.keytab']
if 'livy.server.kerberos.principal' in config['configurations']['livy-conf']:
livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.kerberos.principal']
else:
livy_kerberos_principal = config['configurations']['livy-conf']['livy.server.launch.kerberos.principal']
livy_livyserver_hosts = default("/clusterHostInfo/livy_server_hosts", [])
# ats 1.5 properties
entity_groupfs_active_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.active-dir']
entity_groupfs_active_dir_mode = 01777
entity_groupfs_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.entity-group-fs-store.done-dir']
entity_groupfs_store_dir_mode = 0700
is_webhdfs_enabled = hdfs_site['dfs.webhdfs.enabled']
if len(livy_livyserver_hosts) > 0:
has_livyserver = True
if security_enabled:
livy_principal = livy_kerberos_principal.replace('_HOST', config['hostname'].lower())
livy_livyserver_port = default('configurations/livy-conf/livy.server.port',8999)
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = hdfs_resource_ignore_file,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
| 50.059925
| 163
| 0.794703
|
74ffbeee3d92ce8f6482db2f38c4cc0d2af628c4
| 12,836
|
py
|
Python
|
core/solver.py
|
Limacon101/EA_PODI_Library
|
6dee7210cd05a6112c919928fccadbd8857d7f10
|
[
"Apache-2.0"
] | null | null | null |
core/solver.py
|
Limacon101/EA_PODI_Library
|
6dee7210cd05a6112c919928fccadbd8857d7f10
|
[
"Apache-2.0"
] | null | null | null |
core/solver.py
|
Limacon101/EA_PODI_Library
|
6dee7210cd05a6112c919928fccadbd8857d7f10
|
[
"Apache-2.0"
] | null | null | null |
import random
from core import ops
from algorithms import algo_rs, algo_ea, algo_hc
from generators import gen_onemax, gen_gp, gen_sphere
class Solver(object):
def __init__(self, generator, algorithm, alg_params=None):
"""
Instantiate a Solver before starting an optimisation
:param generator: Problem generator
:param algorithm: Optimisation algorithm
:param alg_params: Algorithm parameters
"""
if alg_params is None:
alg_params = AlgoParams()
self.alg_params = alg_params
alg_params.validate()
self.generator = generator
Solver.validate_generator(self.generator)
self.algorithm = algorithm
Solver.validate_algorithm(self.algorithm)
self.stats = Stats(alg_params.generations)
@staticmethod
def validate_generator(generator):
if not hasattr(generator, 'create_solution'):
print('Invalid Generator')
raise AttributeError('Invalid Generator.\nGenerator must contain a function with signature: '
'create_solution(t).\n'
'This method generates a random candidate solution to a problem, '
'where t is a dependency injected used to generate random numbers')
if not hasattr(generator, 'fitness'):
raise AttributeError('Invalid Generator.\nGenerator must contain a function with signature: '
'fitness(sol)\n'
'This method calculates the fitness of a candidate solution, '
'where sol is a candidate solution ')
return True
@staticmethod
def validate_algorithm(algorithm):
if not hasattr(algorithm, 'run'):
print('Invalid Algorithm')
raise AttributeError('Invalid Algorithm.\nAlgorithm must contain a function with signature: '
'run(solver [, alg_operators]).\n'
'This method runs the optimisation algorithm, where solver is a reference'
'to the Solver object that initiated the optimisation process')
return True
def solve(self):
"""
Optimise the problem
:return: Trace object containing a solution
"""
return self.algorithm.run(self, self.alg_params)
def create_solution(self, es_params=None):
"""
Create a new solution
:param es_params: Additional parameters
:return: Trace object
"""
t = Trace()
sol = self.generator.create_solution(t)
t.solution = sol
t.new = False
t.fitness = self.generator.fitness(t.solution)
if es_params is not None:
t.es_params = es_params
return t
def create_solution_with_trace(self, t):
"""
Create a solution using an existing Trace object
:param t: Trace object
:return: Trace object
"""
sol = self.generator.create_solution(t)
t.solution = sol
t.count = -1
t.fitness = self.generator.fitness(t.solution)
return t
def create_solution_with_raw_trace(self, trace):
"""
Create a solution using a list of random numbers
:param trace: List of numbers
:return: Trace object
"""
t = Trace()
t.new = False
t.trace = trace
sol = self.generator.create_solution(t)
# Trims list if random numbers extend (otherwise some mutations have no effect)
if t.count < len(t.trace):
t.trace = t.trace[-(len(t.trace) - t.count - 1):]
t.solution = sol
t.count = -1
t.fitness = self.generator.fitness(t.solution)
return t
class Stats(object):
def __init__(self, num_gens):
self.num_gens = num_gens
self.best = []
self.gen_best = []
self.gen_average = []
def update_stats(self, best, gen_best=None, gen_av_fitness=None):
self.best.append(best.fitness)
if gen_best is not None:
self.gen_best.append(gen_best.fitness)
if gen_av_fitness is not None:
self.gen_average.append(gen_av_fitness)
def get_stats(self):
"""
Gets generation bests, current generation bests and current generation
averages
:return: A tuple with 3 lists of fitnesses
"""
self.validate_stats()
return self.best, self.gen_best, self.gen_average
def get_generational_overall_bests(self):
"""
For each generation, gets the best solution fitness found at this point
(This value should always be getting 'better')
:return: A list of fitnesses
"""
self.validate_stats()
return self.best
def get_generational_bests(self):
"""
For each generation, gets the best solution found during this generation
(This value may get worse for non-elitist evolutionary algorithms)
:return: A list of fitnesses
"""
self.validate_stats()
return self.gen_best
def get_generational_averages(self):
"""
For each generation, gets the generational average
(This value may get worse for non-elitist evolutionary algorithms)
:return: A list of fitnesses
"""
self.validate_stats()
return self.gen_average
def validate_stats(self):
if len(self.gen_best) > 0:
assert len(self.gen_best) == len(self.best)
if len(self.gen_average) > 0:
assert len(self.gen_average) == len(self.best)
class AlgoParams:
def __init__(self, select=None, mutate=None, crossover=None,
generations=100, pop_size=30, num_children=None,
mutation_rate=0.1, mutation_sd=0.1, crossover_rate=1,
minimising=True, verbose=-1):
"""
Arguments passed to an algorithm.
An algorithm can choose to use these parameters or set its own
(E.g. A SAEA may dynamically set the mutation_rate based on a solution's parameters)
Note: Functional arguments set as default None -- due to Python's use of mutable default arguments
:param select: Selection operator
:param mutate: Mutation operator
:param crossover: Crossover operator
:param generations: Number of generations (for iterative algorithms)
:param pop_size: Population Size (for population-based algorithms)
:param mutation_rate: Mutation rate
:param crossover_rate: Crossover rate
:param minimising: Whether algorithm should be minimising or maximising fitness
:param verbose: -1: No printout 0: Detailed (runthrough) 1-N: Print generation stats every n gens
"""
if select is None:
select = ops.select_tournament
if mutate is None:
mutate = ops.mutate_trace_gauss
if crossover is None:
crossover = ops.crossover_one_point
self.select = select
self.mutate = mutate
self.crossover = crossover
self.generations = generations
self.pop_size = pop_size
self.num_children = self.pop_size if num_children is None else num_children
self.mutation_rate = mutation_rate
self.mutation_sd = mutation_sd
self.crossover_rate = crossover_rate
self.minimising = minimising
self.verbose = verbose
def __str__(self):
s = ''
s += 'selection op: ' + str(self.select.__name__) + '\n'
s += 'mutation op: ' + str(self.mutate.__name__) + '\n'
s += 'crossover op: ' + str(self.crossover.__name__) + '\n'
s += 'generations: ' + str(self.generations) + '\n'
s += 'pop_size: ' + str(self.pop_size) + '\n'
s += 'num_children: ' + str(self.num_children) + '\n'
s += 'mutation_rate: ' + str(self.mutation_rate) + '\n'
s += 'mutation_sd: ' + str(self.mutation_sd) + '\n'
s += 'crossover_rate: ' + str(self.crossover_rate) + '\n'
s += 'minimising: ' + str(self.minimising) + '\n'
return s
def validate(self):
# Roulette selection cannot be used when minimising
if self.select is ops.select_roulette:
assert self.minimising is False
assert self.generations > 0
assert self.pop_size > 0
assert 0 <= self.mutation_rate <= 1
assert 0 <= self.mutation_sd
assert self.verbose >= -1
assert self.num_children <= self.pop_size # For Select_best_n method
class Trace(object):
"""
@:param es_params Additional parameters (e.g. parameters for self-adaptation)
@:param new: True if Trace created for the first time
@:param trace: List holding trace values
@:param solution Solution calculated from the trace
@:param fitness Fitness associated with the solution
@:param count During recall for a generator -- the position reached in the trace (reset externally)
"""
def __init__(self):
self.sa_params = []
self.es_params = []
self.new = True
self.trace = []
self.solution = 0
self.fitness = 0
self.count = -1
def __str__(self):
return 'S: ' + str(self.solution) + ' f: ' + str(self.fitness)
def random(self):
"""
Returns a random number float between 0 and 1 and adds it to the trace
:return: Random real number 0..1
"""
return self.add_to_trace(random.random())
def randrange(self, start, stop):
"""
Returns a random integer from range start to stop
:param start: Range start
:param stop: Range stop
:return: Random integer
"""
if start >= stop:
raise ValueError("stop must be larger than start")
if not isinstance(start, int) or not isinstance(stop, int):
raise TypeError("Inputs must be integers")
r = self.random()
return int(r * (stop - start) + start)
def randint(self, start, stop):
"""
Returns a random integer R from range start to stop such that start <= R <= stop
:param start: Range start
:param stop: Range stop
:return: Random integer
"""
return self.randrange(start, stop + 1)
def add_to_trace(self, r):
"""
If the Trace is new, add random number to trace (solution creation).
Otherwise get the next number from the existing trace (solution reconstruction).
:param r: Random real number 0..1
:return: 'Random number from trace
"""
if self.new:
self.trace.append(r)
return r
else:
return self.from_seed(r)
def from_seed(self, r):
"""
Get the next number from the trace. If the trace it too short add the random number supplied
to the trace
:param r: Random real number 0..1
:return: 'Random' number from trace
"""
self.count += 1
if self.count < len(self.trace):
# print('r', self.trace[self.count])
return self.trace[self.count]
elif self.count == len(self.trace):
# r = random.random()
self.trace.append(r)
return r
else:
raise ValueError('Count exceeded trace length')
def print_trace(self):
f_list = ['%.2f' % elem for elem in self.trace]
print(f_list)
if __name__ == '__main__':
gens = [gen_onemax, gen_sphere, gen_gp]
algs = [algo_hc, algo_rs, algo_ea]
for gen in gens:
for alg in algs:
print('\n\n', alg.__name__, ' --- ', gen.__name__)
my_ops = AlgoParams(select=ops.select_tournament,
crossover=ops.crossover_one_point,
mutate=ops.mutate_trace,
minimising=True)
s = Solver(gen, alg, my_ops).solve()
print(s)
# gen = gen_onemax
# alg = algo_hc
# s = Solver(gen, alg).solve()
# print(s)
# t = Trace(Representation.REAL)
# print(t.randint(0, 3))
# trace = t.trace
# print(trace)
# print("Calculating from core...")
# gen = gen_sphere
# alg = algo_hc
# s = Solver(gen, alg)
# print(s.representation)
# print(s)
| 34.880435
| 120
| 0.579854
|
886e834294a2cab7e844bb55ba9ebdbd50df5937
| 14,690
|
py
|
Python
|
courses/machine_learning/deepdive/09_sequence/txtclsmodel/trainer/model_native.py
|
studiocardo/GCP-Tutorials
|
ea81f0c1cb59f75b01e4912e40e338c2bd153b75
|
[
"Apache-2.0"
] | 1
|
2018-11-21T04:52:12.000Z
|
2018-11-21T04:52:12.000Z
|
courses/machine_learning/deepdive/09_sequence/txtclsmodel/trainer/model_native.py
|
studiocardo/GCP-Tutorials
|
ea81f0c1cb59f75b01e4912e40e338c2bd153b75
|
[
"Apache-2.0"
] | null | null | null |
courses/machine_learning/deepdive/09_sequence/txtclsmodel/trainer/model_native.py
|
studiocardo/GCP-Tutorials
|
ea81f0c1cb59f75b01e4912e40e338c2bd153b75
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
import re
import os
from tensorflow.python.keras.preprocessing import text
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import SeparableConv1D
from tensorflow.python.keras.layers import MaxPooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from google.cloud import storage
tf.logging.set_verbosity(tf.logging.INFO)
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
TOP_K = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
VOCAB_FILE_PATH = None # where vocabulary is saved, dynamically set in train_and_eval function
PADWORD = 'ZYXW'
"""
Helper function to download data from Google Cloud Storage
# Arguments:
source: string, the GCS URL to download from (e.g. 'gs://bucket/file.csv')
destination: string, the filename to save as on local disk. MUST be filename
ONLY, doesn't support folders. (e.g. 'file.csv', NOT 'folder/file.csv')
# Returns: nothing, downloads file to local disk
"""
def download_from_gcs(source, destination):
search = re.search('gs://(.*?)/(.*)', source)
bucket_name = search.group(1)
blob_name = search.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.blob(blob_name).download_to_filename(destination)
"""
Parses raw tsv containing hacker news headlines and returns (sentence, integer label) pairs
# Arguments:
train_data_path: string, path to tsv containing training data.
can be a local path or a GCS url (gs://...)
eval_data_path: string, path to tsv containing eval data.
can be a local path or a GCS url (gs://...)
# Returns:
((train_sentences, train_labels), (test_sentences, test_labels)): sentences
are lists of strings, labels are numpy integer arrays
"""
def load_hacker_news_data(train_data_path, eval_data_path):
if train_data_path.startswith('gs://'):
download_from_gcs(train_data_path, destination='train.csv')
train_data_path = 'train.csv'
if eval_data_path.startswith('gs://'):
download_from_gcs(eval_data_path, destination='eval.csv')
eval_data_path = 'eval.csv'
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
"""
Create tf.estimator compatible input function
# Arguments:
texts: [strings], list of sentences
labels: numpy int vector, integer labels for sentences
batch_size: int, number of records to use for each train batch
mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.EVAL
# Returns:
tf.estimator.inputs.numpy_input_fn, produces feature and label
tensors one batch at a time
"""
def input_fn(texts, labels, batch_size, mode):
# Convert texts from python strings to tensors
x = tf.constant(texts)
# Map text to sequence of word-integers and pad
x = vectorize_sentences(x)
# Create tf.data.Dataset from tensors
dataset = tf.data.Dataset.from_tensor_slices((x, labels))
# Pad to constant length
dataset = dataset.map(pad)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None #loop indefinitley
dataset = dataset.shuffle(buffer_size=50000) # our input is already shuffled so this is redundant
else:
num_epochs = 1
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
"""
Given an int tensor, remove 0s then pad to a fixed length representation.
#Arguments:
feature: int tensor
label: int. not used in function, just passed through
#Returns:
(int tensor, int) tuple.
"""
def pad(feature, label):
# 1. Remove 0s which represent out of vocabulary words
nonzero_indices = tf.where(tf.not_equal(feature, tf.zeros_like(feature)))
without_zeros = tf.gather(feature,nonzero_indices)
without_zeros = tf.squeeze(without_zeros, axis=1)
# 2. Prepend 0s till MAX_SEQUENCE_LENGTH
padded = tf.pad(without_zeros, [[MAX_SEQUENCE_LENGTH, 0]]) # pad out with zeros
padded = padded[-MAX_SEQUENCE_LENGTH:] # slice to constant length
return (padded, label)
"""
Given sentences, return an integer representation
# Arguments:
sentences: string tensor of shape (?,), contains sentences to vectorize
# Returns:
Integer representation of the sentence. Word-integer mapping is determined
by VOCAB_FILE_PATH. Words out of vocabulary will map to 0
"""
def vectorize_sentences(sentences):
# 1. Remove punctuation
sentences = tf.regex_replace(sentences, '[[:punct:]]', ' ')
# 2. Split string tensor into component words
words = tf.string_split(sentences)
words = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
# 3. Map each word to respective integer
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=VOCAB_FILE_PATH,
num_oov_buckets=0,
vocab_size=None,
default_value=0, # for words not in vocabulary (OOV)
key_column_index=0,
value_column_index=1,
delimiter=',')
numbers = table.lookup(words)
return numbers
"""
Builds a separable CNN model using keras and converts to tf.estimator.Estimator
# Arguments
model_dir: string, file path where training files will be written
config: tf.estimator.RunConfig, specifies properties of tf Estimator
blocks: int, number of pairs of sepCNN and pooling blocks in the model.
filters: int, output dimension of the layers.
kernel_size: int, length of the convolution window.
embedding_dim: int, dimension of the embedding vectors.
dropout_rate: float, percentage of input to drop at Dropout layers.
pool_size: int, factor by which to downscale input at MaxPooling layer.
embedding_path: string , file location of pre-trained embedding (if used)
defaults to None which will cause the model to train embedding from scratch
word_index: dictionary, mapping of vocabulary to integers. used only if
pre-trained embedding is provided
# Returns
A tf.estimator.Estimator that implements a sepCNN model
"""
def keras_estimator(model_dir,
config,
learning_rate,
blocks=2,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3,
embedding_path=None,
word_index=None):
# Create model instance.
model = models.Sequential()
num_features = min(len(word_index) + 1, TOP_K)
# Add embedding layer. If pre-trained embedding is used add weights to the
# embeddings layer and set trainable to input is_embedding_trainable flag.
if embedding_path != None:
embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
is_embedding_trainable = True # set to False to freeze embedding weights
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
weights=[embedding_matrix],
trainable=is_embedding_trainable))
else:
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH))
for _ in range(blocks - 1):
model.add(Dropout(rate=dropout_rate))
model.add(SeparableConv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
depthwise_initializer='random_uniform',
padding='same'))
model.add(SeparableConv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
depthwise_initializer='random_uniform',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(SeparableConv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
depthwise_initializer='random_uniform',
padding='same'))
model.add(SeparableConv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
depthwise_initializer='random_uniform',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
# Compile model with learning parameters.
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
estimator = tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return estimator
"""
Defines the features to be passed to the model during inference
Can pass in string text directly. Tokenization done in serving_input_fn
# Arguments: none
# Returns: tf.estimator.export.ServingInputReceiver
"""
def serving_input_fn():
feature_placeholder = tf.placeholder(tf.string, [None])
features = vectorize_sentences(feature_placeholder)
return tf.estimator.export.TensorServingInputReceiver(features, feature_placeholder)
"""
Takes embedding for generic vocabulary and extracts the embeddings
matching the current vocabulary
The pre-trained embedding file is obtained from https://nlp.stanford.edu/projects/glove/
# Arguments:
word_index: dict, {key =word in vocabulary: value= integer mapped to that word}
embedding_path: string, location of the pre-trained embedding file on disk
embedding_dim: int, dimension of the embedding space
# Returns: numpy matrix of shape (vocabulary, embedding_dim) that contains the embedded
representation of each word in the vocabulary.
"""
def get_embedding_matrix(word_index, embedding_path, embedding_dim):
# Read the pre-trained embedding file and get word to word vector mappings.
embedding_matrix_all = {}
# Download if embedding file is in GCS
if embedding_path.startswith('gs://'):
download_from_gcs(embedding_path, destination='embedding.csv')
embedding_path = 'embedding.csv'
with open(embedding_path) as f:
for line in f: # Every line contains word followed by the vector value
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_all[word] = coefs
# Prepare embedding matrix with just the words in our word_index dictionary
num_words = min(len(word_index) + 1, TOP_K)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= TOP_K:
continue
embedding_vector = embedding_matrix_all.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
"""
Main orchestrator. Responsible for calling all other functions in model.py
# Arguments:
output_dir: string, file path where training files will be written
hparams: dict, command line parameters passed from task.py
# Returns: nothing, kicks off training and evaluation
"""
def train_and_evaluate(output_dir, hparams):
# Load Data
((train_texts, train_labels), (test_texts, test_labels)) = load_hacker_news_data(
hparams['train_data_path'], hparams['eval_data_path'])
# Create vocabulary from training corpus.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Save vocabulary to file to use during serving time
tf.gfile.MkDir(output_dir) # directory must exist before we can use tf.gfile.open
global VOCAB_FILE_PATH; VOCAB_FILE_PATH = os.path.join(output_dir,'vocab.txt')
with tf.gfile.Open(VOCAB_FILE_PATH, 'wb') as f:
f.write("{},0\n".format(PADWORD)) # map padword to 0
for word, index in tokenizer.word_index.items():
if index < TOP_K: # only save mappings for TOP_K words
f.write("{},{}\n".format(word, index))
# Create estimator
run_config = tf.estimator.RunConfig(save_checkpoints_steps=1000)
estimator = keras_estimator(
model_dir=output_dir,
config=run_config,
learning_rate=hparams['learning_rate'],
embedding_path=hparams['embedding_path'],
word_index=tokenizer.word_index
)
# Create TrainSpec
train_steps = hparams['num_epochs'] * len(train_texts) / hparams['batch_size']
train_spec = tf.estimator.TrainSpec(
input_fn=lambda:input_fn(
train_texts,
train_labels,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps
)
# Create EvalSpec
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=lambda:input_fn(
test_texts,
test_labels,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10
)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| 40.58011
| 108
| 0.671205
|
777b4bc7538c5f0f4cf9978cc2b6a8f4659adefa
| 2,186
|
py
|
Python
|
intake/gui/__init__.py
|
j-r77/intake
|
5c920e99b8301cc2567a7be2ee070879768ed516
|
[
"BSD-2-Clause"
] | null | null | null |
intake/gui/__init__.py
|
j-r77/intake
|
5c920e99b8301cc2567a7be2ee070879768ed516
|
[
"BSD-2-Clause"
] | null | null | null |
intake/gui/__init__.py
|
j-r77/intake
|
5c920e99b8301cc2567a7be2ee070879768ed516
|
[
"BSD-2-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
from distutils.version import LooseVersion
def do_import():
error = too_old = False
try:
import panel as pn
too_old = LooseVersion(pn.__version__) < LooseVersion("0.9.5")
except ImportError as e:
error = e
if too_old or error:
class GUI(object):
def __repr__(self):
raise RuntimeError("Please install panel to use the GUI `conda "
"install -c conda-forge panel>=0.8.0`. Import "
"failed with error: %s" % error)
return GUI
try:
from .gui import GUI
css = """
.scrolling {
overflow: scroll;
}
"""
pn.config.raw_css.append(css) # add scrolling class from css (panel GH#383, GH#384)
pn.extension()
except Exception as e:
class GUI(object):
def __repr__(self):
raise RuntimeError("Initialisation of GUI failed, even though "
"panel is installed. Please update it "
"to a more recent version (`conda install -c"
" conda-forge panel>=0.9.5`).")
return GUI
class InstanceMaker(object):
def __init__(self):
self._instance = None
def _instantiate(self):
if self._instance is None:
GUI = do_import()
self._instance = GUI()
def __getattr__(self, attr, *args, **kwargs):
self._instantiate()
return getattr(self._instance, attr, *args, **kwargs)
def __getitem__(self, item):
self._instantiate()
return self._instance[item]
def __repr__(self):
self._instantiate()
return repr(self._instance)
def __dir__(self):
self._instantiate()
return dir(self._instance)
| 30.361111
| 92
| 0.516468
|
ce9c8056876288e1b0eb63604dfe98972eba4ac2
| 3,667
|
py
|
Python
|
src/utils/Vocabulary.py
|
ryderling/MalGraph
|
4d5f0f80ab2cf7d55abe35a9ef46686b5fdb9610
|
[
"MIT"
] | 4
|
2022-01-17T13:37:55.000Z
|
2022-03-24T07:21:43.000Z
|
src/utils/Vocabulary.py
|
ryderling/MalGraph
|
4d5f0f80ab2cf7d55abe35a9ef46686b5fdb9610
|
[
"MIT"
] | null | null | null |
src/utils/Vocabulary.py
|
ryderling/MalGraph
|
4d5f0f80ab2cf7d55abe35a9ef46686b5fdb9610
|
[
"MIT"
] | 3
|
2022-01-17T13:37:56.000Z
|
2022-03-29T10:51:48.000Z
|
import json
import os
from collections import Counter
from tqdm import tqdm
class Vocab:
def __init__(self, freq_file: str, max_vocab_size: int, min_freq: int = 1, unk_token: str = '<unk>', pad_token: str = '<pad>', special_tokens: list = None):
self.max_vocab_size = max_vocab_size
self.min_freq = min_freq
self.unk_token = unk_token
self.pad_token = pad_token
self.special_tokens = special_tokens
assert os.path.exists(freq_file), "The file of {} is not exist".format(freq_file)
freq_counter = self.load_freq_counter_from_file(file_path=freq_file, min_freq=self.min_freq)
self.token_2_index, self.index_2_token = self.create_vocabulary(freq_counter=freq_counter)
self.unk_idx = None if self.unk_token is None else self.token_2_index[self.unk_token]
self.pad_idx = None if self.pad_token is None else self.token_2_index[self.pad_token]
def __len__(self):
return len(self.index_2_token)
def __getitem__(self, item: str):
assert isinstance(item, str)
if item in self.token_2_index.keys():
return self.token_2_index[item]
else:
if self.unk_token is not None:
return self.token_2_index[self.unk_token]
else:
raise KeyError("{} is not in the vocabulary, and self.unk_token is None".format(item))
def create_vocabulary(self, freq_counter: Counter):
token_2_index = {} # dict
index_2_token = [] # list
if self.unk_token is not None:
index_2_token.append(self.unk_token)
if self.pad_token is not None:
index_2_token.append(self.pad_token)
if self.special_tokens is not None:
for token in self.special_tokens:
index_2_token.append(token)
for f_name, count in tqdm(freq_counter.most_common(self.max_vocab_size), desc="creating vocab ... "):
if f_name in index_2_token:
print("trying to add {} to the vocabulary, but it already exists !!!".format(f_name))
continue
else:
index_2_token.append(f_name)
for index, token in enumerate(index_2_token): # reverse
token_2_index.update({token: index})
return token_2_index, index_2_token
@staticmethod
def load_freq_counter_from_file(file_path: str, min_freq: int):
freq_dict = {}
with open(file_path, 'r') as f:
for line in tqdm(f, desc="Load frequency list from the file of {} ... ".format(file_path)):
line = json.loads(line)
f_name = line["f_name"]
count = int(line["count"])
assert f_name not in freq_dict, "trying to add {} to the vocabulary, but it already exists !!!"
if count < min_freq:
print(line, "break")
break
freq_dict[f_name] = count
return Counter(freq_dict)
if __name__ == '__main__':
max_vocab_size = 1000
vocab = Vocab(freq_file="../../data/processed_dataset/train_external_function_name_vocab.jsonl", max_vocab_size=max_vocab_size)
print(len(vocab.token_2_index), vocab.token_2_index)
print(len(vocab.index_2_token), vocab.index_2_token)
print(vocab.unk_token, vocab.unk_idx)
print(vocab.pad_token, vocab.pad_idx)
print(vocab['queryperformancecounter'])
print(vocab['EmptyClipboard'])
print(vocab[str.lower('EmptyClipboard')])
print(vocab['X_Y_Z'])
| 40.296703
| 160
| 0.619853
|
eaa5e390f675aba469aa94642b2e874dd2f4c336
| 1,583
|
py
|
Python
|
dask_ctl/tests/test_discovery.py
|
keewis/dask-ctl
|
3d12a3fba5e0e1322936276c7b2955384614cc2f
|
[
"BSD-3-Clause"
] | 17
|
2021-01-21T17:39:30.000Z
|
2022-03-04T10:11:37.000Z
|
dask_ctl/tests/test_discovery.py
|
keewis/dask-ctl
|
3d12a3fba5e0e1322936276c7b2955384614cc2f
|
[
"BSD-3-Clause"
] | 25
|
2021-01-21T19:27:39.000Z
|
2022-03-29T08:27:56.000Z
|
dask_ctl/tests/test_discovery.py
|
keewis/dask-ctl
|
3d12a3fba5e0e1322936276c7b2955384614cc2f
|
[
"BSD-3-Clause"
] | 10
|
2021-01-21T19:37:43.000Z
|
2022-03-03T17:20:03.000Z
|
import pytest
from typing import AsyncIterator
from distributed import LocalCluster
from dask_ctl.discovery import (
discover_cluster_names,
discover_clusters,
list_discovery_methods,
)
from dask_ctl.proxy import ProxyCluster
def test_discovery_methods():
assert "proxycluster" in list_discovery_methods()
@pytest.mark.asyncio
async def test_discover_cluster_names():
assert isinstance(discover_cluster_names(), AsyncIterator)
async with LocalCluster(scheduler_port=8786, asynchronous=True) as _:
count = 0
async for _ in discover_cluster_names():
count += 1
assert count == 1
@pytest.mark.asyncio
async def test_cluster_client():
from dask.distributed import Client
port = 8786
async with LocalCluster(scheduler_port=port, asynchronous=True) as _:
async with Client(
f"tcp://localhost:{port}", asynchronous=True, timeout=1
) as client:
assert int(client.scheduler.address.split(":")[-1]) == port
@pytest.mark.asyncio
async def test_discovery_list():
from dask_ctl.proxy import discover
port = 8786
async with LocalCluster(scheduler_port=port, asynchronous=True) as _:
async for name, _ in discover():
assert str(port) in name
@pytest.mark.asyncio
async def test_discover_clusters():
with LocalCluster() as cluster:
async for discovered_cluster in discover_clusters():
if isinstance(discovered_cluster, ProxyCluster):
assert cluster.scheduler_info == discovered_cluster.scheduler_info
| 28.267857
| 82
| 0.713203
|
fa7e54172a3aa415cd149de1e670f35501477d93
| 153,455
|
py
|
Python
|
src/command_modules/azure-cli-sql/azure/cli/command_modules/sql/tests/latest/test_sql_commands.py
|
AndrewLane/azure-cli
|
524491c580fc3c133f2d9859cef1c8251f4192e4
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-sql/azure/cli/command_modules/sql/tests/latest/test_sql_commands.py
|
AndrewLane/azure-cli
|
524491c580fc3c133f2d9859cef1c8251f4192e4
|
[
"MIT"
] | 3
|
2021-03-26T00:25:36.000Z
|
2022-03-29T22:03:55.000Z
|
src/command_modules/azure-cli-sql/azure/cli/command_modules/sql/tests/latest/test_sql_commands.py
|
david-driscoll/azure-cli
|
0dbf5e4ac2f35057bc9b8234b0a59612593552c5
|
[
"MIT"
] | 1
|
2020-07-13T22:28:09.000Z
|
2020-07-13T22:28:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import os
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.core.util import CLIError
from azure.cli.core.mock import DummyCli
from azure.cli.testsdk.base import execute
from azure.cli.testsdk.exceptions import CliTestError
from azure.cli.testsdk import (
JMESPathCheck,
JMESPathCheckExists,
JMESPathCheckGreaterThan,
NoneCheck,
ResourceGroupPreparer,
ScenarioTest,
StorageAccountPreparer,
LiveScenarioTest,
record_only)
from azure.cli.testsdk.preparers import (
AbstractPreparer,
SingleValueReplacer)
from azure.cli.command_modules.sql.custom import (
ClientAuthenticationType,
ClientType)
from datetime import datetime, timedelta
from time import sleep
# Constants
server_name_prefix = 'clitestserver'
server_name_max_length = 63
managed_instance_name_prefix = 'clitestmi'
managed_instance_name_max_length = 63
class SqlServerPreparer(AbstractPreparer, SingleValueReplacer):
def __init__(self, name_prefix=server_name_prefix, parameter_name='server', location='westus',
admin_user='admin123', admin_password='SecretPassword123',
resource_group_parameter_name='resource_group', skip_delete=True):
super(SqlServerPreparer, self).__init__(name_prefix, server_name_max_length)
self.location = location
self.parameter_name = parameter_name
self.admin_user = admin_user
self.admin_password = admin_password
self.resource_group_parameter_name = resource_group_parameter_name
self.skip_delete = skip_delete
def create_resource(self, name, **kwargs):
group = self._get_resource_group(**kwargs)
template = 'az sql server create -l {} -g {} -n {} -u {} -p {}'
execute(DummyCli(), template.format(self.location, group, name, self.admin_user, self.admin_password))
return {self.parameter_name: name}
def remove_resource(self, name, **kwargs):
if not self.skip_delete:
group = self._get_resource_group(**kwargs)
execute(DummyCli(), 'az sql server delete -g {} -n {} --yes --no-wait'.format(group, name))
def _get_resource_group(self, **kwargs):
try:
return kwargs.get(self.resource_group_parameter_name)
except KeyError:
template = 'To create a sql server account a resource group is required. Please add ' \
'decorator @{} in front of this storage account preparer.'
raise CliTestError(template.format(ResourceGroupPreparer.__name__,
self.resource_group_parameter_name))
class SqlServerMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
def test_sql_server_mgmt(self, resource_group_1, resource_group_2, resource_group_location):
server_name_1 = self.create_random_name(server_name_prefix, server_name_max_length)
server_name_2 = self.create_random_name(server_name_prefix, server_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
loc = 'westeurope'
user = admin_login
# test create sql server with minimal required parameters
server_1 = self.cmd('sql server create -g {} --name {} -l {} '
'--admin-user {} --admin-password {}'
.format(resource_group_1, server_name_1, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity', None)]).get_output_in_json()
# test list sql server should be 1
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[JMESPathCheck('length(@)', 1)])
# test update sql server
self.cmd('sql server update -g {} --name {} --admin-password {} -i'
.format(resource_group_1, server_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test update without identity parameter, validate identity still exists
# also use --id instead of -g/-n
self.cmd('sql server update --id {} --admin-password {}'
.format(server_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test create another sql server, with identity this time
self.cmd('sql server create -g {} --name {} -l {} -i '
'--admin-user {} --admin-password {}'
.format(resource_group_2, server_name_2, loc, user, admin_passwords[0]),
checks=[
JMESPathCheck('name', server_name_2),
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test list sql server in that group should be 1
self.cmd('sql server list -g {}'.format(resource_group_2), checks=[JMESPathCheck('length(@)', 1)])
# test list sql server in the subscription should be at least 2
self.cmd('sql server list', checks=[JMESPathCheckGreaterThan('length(@)', 1)])
# test show sql server
self.cmd('sql server show -g {} --name {}'
.format(resource_group_1, server_name_1),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server show --id {}'
.format(server_1['id']),
checks=[
JMESPathCheck('name', server_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
self.cmd('sql server list-usages -g {} -n {}'
.format(resource_group_1, server_name_1),
checks=[JMESPathCheck('[0].resourceName', server_name_1)])
# test delete sql server
self.cmd('sql server delete --id {} --yes'
.format(server_1['id']), checks=NoneCheck())
self.cmd('sql server delete -g {} --name {} --yes'
.format(resource_group_2, server_name_2), checks=NoneCheck())
# test list sql server should be 0
self.cmd('sql server list -g {}'.format(resource_group_1), checks=[NoneCheck()])
class SqlServerFirewallMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_firewall_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
firewall_rule_1 = 'rule1'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '255.255.255.255'
firewall_rule_2 = 'rule2'
start_ip_address_2 = '123.123.123.123'
end_ip_address_2 = '123.123.123.124'
# allow_all_azure_ips_rule = 'AllowAllAzureIPs'
# allow_all_azure_ips_address = '0.0.0.0'
# test sql server firewall-rule create
fw_rule_1 = self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_1, end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)]).get_output_in_json()
# test sql server firewall-rule show by group/server/name
self.cmd('sql server firewall-rule show --name {} -g {} --server {}'
.format(firewall_rule_1, rg, server),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule show by id
self.cmd('sql server firewall-rule show --id {}'
.format(fw_rule_1['id']),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule update by group/server/name
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule update by id
self.cmd('sql server firewall-rule update --id {} '
'--start-ip-address {}'
.format(fw_rule_1['id'], start_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_2)])
self.cmd('sql server firewall-rule update --name {} -g {} --server {} '
'--end-ip-address {}'
.format(firewall_rule_1, rg, server,
end_ip_address_1),
checks=[
JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# test sql server firewall-rule create another rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_2, rg, server,
start_ip_address_2, end_ip_address_2),
checks=[
JMESPathCheck('name', firewall_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('startIpAddress', start_ip_address_2),
JMESPathCheck('endIpAddress', end_ip_address_2)])
# test sql server firewall-rule list
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 2)])
# # test sql server firewall-rule create azure ip rule
# self.cmd('sql server firewall-rule allow-all-azure-ips -g {} --server {} '
# .format(rg, server), checks=[
# JMESPathCheck('name', allow_all_azure_ips_rule),
# JMESPathCheck('resourceGroup', rg),
# JMESPathCheck('startIpAddress', allow_all_azure_ips_address),
# JMESPathCheck('endIpAddress', allow_all_azure_ips_address)])
# # test sql server firewall-rule list
# self.cmd('sql server firewall-rule list -g {} --server {}'
# .format(rg, server), checks=[JMESPathCheck('length(@)', 3)])
# test sql server firewall-rule delete
self.cmd('sql server firewall-rule delete --id {}'
.format(fw_rule_1['id']), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[JMESPathCheck('length(@)', 1)])
self.cmd('sql server firewall-rule delete --name {} -g {} --server {}'
.format(firewall_rule_2, rg, server), checks=NoneCheck())
self.cmd('sql server firewall-rule list -g {} --server {}'
.format(rg, server), checks=[NoneCheck()])
class SqlServerDbMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
def test_sql_db_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
database_name_2 = "cliautomationdb02"
database_name_3 = "cliautomationdb03"
update_service_objective = 'S1'
update_storage = '10GB'
update_storage_bytes = str(10 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'eastus2'
# test sql db commands
db1 = self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('zoneRedundant', False)]).get_output_in_json()
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql db list-usages -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[JMESPathCheck('[0].resourceName', database_name)])
# Show by group/server/name
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Show by id
self.cmd('sql db show --id {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# Update by group/server/name
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name,
update_service_objective, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update by id
self.cmd('sql db update --id {} --set tags.key2=value2'
.format(db1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key2', 'value2')])
# Rename by group/server/name
db2 = self.cmd('sql db rename -g {} -s {} -n {} --new-name {}'
.format(rg, server, database_name, database_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2)]).get_output_in_json()
# Rename by id
db3 = self.cmd('sql db rename --id {} --new-name {}'
.format(db2['id'], database_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3)]).get_output_in_json()
# Delete by group/server/name
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name_3),
checks=[NoneCheck()])
# Delete by id
self.cmd('sql db delete --id {} --yes'
.format(db3['id']),
checks=[NoneCheck()])
@ResourceGroupPreparer(location='westus2')
@SqlServerPreparer(location='westus2')
@AllowLargeResponse()
def test_sql_db_vcore_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
# Create database with vcore edition
vcore_edition = 'GeneralPurpose'
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, database_name, vcore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition)])
# Update database to dtu edition
dtu_edition = 'Standard'
dtu_capacity = 10
self.cmd('sql db update -g {} --server {} --name {} --edition {} --capacity {} --max-size 250GB'
.format(resource_group, server, database_name, dtu_edition, dtu_capacity),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', dtu_edition),
JMESPathCheck('sku.tier', dtu_edition),
JMESPathCheck('sku.capacity', dtu_capacity)])
# Update database back to vcore edition
vcore_family = 'Gen5'
vcore_capacity = 4
self.cmd('sql db update -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, database_name, vcore_edition,
vcore_capacity, vcore_family),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family)])
# Update only family
vcore_family_updated = 'Gen4'
self.cmd('sql db update -g {} -s {} -n {} --family {}'
.format(resource_group, server, database_name, vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family_updated)])
# Update only capacity
vcore_capacity_updated = 8
self.cmd('sql db update -g {} -s {} -n {} --capacity {}'
.format(resource_group, server, database_name, vcore_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
# Update only edition
vcore_edition_updated = 'BusinessCritical'
self.cmd('sql db update -g {} -s {} -n {} --tier {}'
.format(resource_group, server, database_name, vcore_edition_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
# Create database with vcore edition and all sku properties specified
database_name_2 = 'cliautomationdb02'
vcore_edition = 'GeneralPurpose'
self.cmd('sql db create -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, database_name_2,
vcore_edition_updated, vcore_capacity_updated,
vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name_2),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated)])
class SqlServerDbOperationMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
def test_sql_db_operation_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'S1'
# Create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# Update DB with --no-wait
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --no-wait'
.format(resource_group, server, database_name, update_service_objective))
# List operations
ops = list(
self.cmd('sql db op list -g {} -s {} -d {}'
.format(resource_group, server, database_name, update_service_objective),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', resource_group),
JMESPathCheck('[0].databaseName', database_name)
])
.get_output_in_json())
# Cancel operation
self.cmd('sql db op cancel -g {} -s {} -d {} -n {}'
.format(resource_group, server, database_name, ops[0]['name']))
class SqlServerConnectionPolicyScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_server_connection_policy(self, resource_group, resource_group_location, server):
# Show
self.cmd('sql server conn-policy show -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('connectionType', 'Default')])
# Update
for type in ('Proxy', 'Default', 'Redirect'):
self.cmd('sql server conn-policy update -g {} -s {} -t {}'
.format(resource_group, server, type),
checks=[JMESPathCheck('connectionType', type)])
class AzureActiveDirectoryAdministratorScenarioTest(LiveScenarioTest):
# convert to ScenarioTest and re-record when ISSUE #6011 is fixed
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_aad_admin(self, resource_group, server):
rg = resource_group
sn = server
oid = '5e90ef3b-9b42-4777-819b-25c36961ea4d'
oid2 = 'e4d43337-d52c-4a0c-b581-09055e0359a0'
user = 'DSEngAll'
user2 = 'TestUser'
self.cmd('sql server ad-admin create -s {} -g {} -i {} -u {}'
.format(sn, rg, oid, user),
checks=[JMESPathCheck('login', user),
JMESPathCheck('sid', oid)])
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('[0].login', user)])
self.cmd('sql server ad-admin update -s {} -g {} -u {} -i {}'
.format(sn, rg, user2, oid2),
checks=[JMESPathCheck('login', user2),
JMESPathCheck('sid', oid2)])
self.cmd('sql server ad-admin delete -s {} -g {}'
.format(sn, rg))
self.cmd('sql server ad-admin list -s {} -g {}'
.format(sn, rg),
checks=[JMESPathCheck('login', None)])
class SqlServerDbCopyScenarioTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group_1')
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer(parameter_name='server1', resource_group_parameter_name='resource_group_1')
@SqlServerPreparer(parameter_name='server2', resource_group_parameter_name='resource_group_2')
@AllowLargeResponse()
def test_sql_db_copy(self, resource_group_1, resource_group_2,
resource_group_location,
server1, server2):
database_name = "cliautomationdb01"
database_copy_name = "cliautomationdb02"
service_objective = 'S1'
rg = resource_group_1
loc_display = 'westus'
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server1, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
# copy database to same server (min parameters)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {}'
.format(rg, server1, database_name, database_copy_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_copy_name)
])
# copy database to same server (min parameters, plus service_objective)
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --service-objective {}'
.format(rg, server1, database_name, database_copy_name, service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('requestedServiceObjectiveName', service_objective),
])
# copy database to elastic pool in other server (max parameters, other than
# service_objective)
pool_name = 'pool1'
pool_edition = 'Standard'
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
' --edition {}'
.format(resource_group_2, server2, pool_name, pool_edition))
self.cmd('sql db copy -g {} --server {} --name {} '
'--dest-name {} --dest-resource-group {} --dest-server {} '
'--elastic-pool {}'
.format(rg, server1, database_name, database_copy_name,
resource_group_2, server2, pool_name),
checks=[
JMESPathCheck('resourceGroup', resource_group_2),
JMESPathCheck('name', database_copy_name),
JMESPathCheck('elasticPoolName', pool_name)
])
def _get_earliest_restore_date(db):
return datetime.strptime(db['earliestRestoreDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _get_deleted_date(deleted_db):
return datetime.strptime(deleted_db['deletionDate'], "%Y-%m-%dT%H:%M:%S.%f+00:00")
def _create_db_wait_for_first_backup(test, rg, server, database_name):
# create db
db = test.cmd('sql db create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Wait until earliestRestoreDate is in the past. When run live, this will take at least
# 10 minutes. Unforunately there's no way to speed this up.
earliest_restore_date = _get_earliest_restore_date(db)
if datetime.utcnow() <= earliest_restore_date:
print('Waiting until earliest restore date', earliest_restore_date)
while datetime.utcnow() <= earliest_restore_date:
sleep(10) # seconds
return db
class SqlServerDbRestoreScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
@AllowLargeResponse()
def test_sql_db_restore(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_standalone_database_name = 'cliautomationdb01restore1'
restore_pool_database_name = 'cliautomationdb01restore2'
elastic_pool = 'cliautomationpool1'
# create elastic pool
self.cmd('sql elastic-pool create -g {} -s {} -n {}'
.format(rg, server, elastic_pool))
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Restore to standalone db
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_standalone_database_name, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_standalone_database_name),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore to db into pool. Note that 'elasticPoolName' is populated
# in transform func which only runs after `show`/`list` commands.
self.cmd('sql db restore -g {} -s {} -n {} -t {} --dest-name {}'
' --elastic-pool {}'
.format(rg, server, database_name, datetime.utcnow().isoformat(),
restore_pool_database_name, elastic_pool),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} -s {} -n {}'
.format(rg, server, restore_pool_database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_pool_database_name),
JMESPathCheck('status', 'Online'),
JMESPathCheck('elasticPoolName', elastic_pool)])
class SqlServerDbRestoreDeletedScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
@AllowLargeResponse()
def test_sql_db_restore_deleted(self, resource_group, resource_group_location, server):
rg = resource_group
database_name = 'cliautomationdb01'
# Standalone db
restore_service_objective = 'S1'
restore_edition = 'Standard'
restore_database_name1 = 'cliautomationdb01restore1'
restore_database_name2 = 'cliautomationdb01restore2'
# Create database and wait for first backup to exist
_create_db_wait_for_first_backup(self, rg, server, database_name)
# Delete database
self.cmd('sql db delete -g {} -s {} -n {} --yes'.format(rg, server, database_name))
# Wait for deleted database to become visible. When run live, this will take around
# 5-10 minutes. Unforunately there's no way to speed this up. Use timeout to ensure
# test doesn't loop forever if there's a bug.
start_time = datetime.now()
timeout = timedelta(0, 15 * 60) # 15 minutes timeout
while True:
deleted_dbs = list(self.cmd('sql db list-deleted -g {} -s {}'.format(rg, server)).get_output_in_json())
if deleted_dbs:
# Deleted db found, stop polling
break
# Deleted db not found, sleep (if running live) and then poll again.
if self.is_live:
self.assertTrue(datetime.now() < start_time + timeout, 'Deleted db not found before timeout expired.')
sleep(10) # seconds
deleted_db = deleted_dbs[0]
# Restore deleted to latest point in time
self.cmd('sql db restore -g {} -s {} -n {} --deleted-time {} --dest-name {}'
' --service-objective {} --edition {}'
.format(rg, server, database_name, _get_deleted_date(deleted_db).isoformat(),
restore_database_name1, restore_service_objective,
restore_edition),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name1),
JMESPathCheck('requestedServiceObjectiveName',
restore_service_objective),
JMESPathCheck('status', 'Online')])
# Restore deleted to earlier point in time
self.cmd('sql db restore -g {} -s {} -n {} -t {} --deleted-time {} --dest-name {}'
.format(rg, server, database_name, _get_earliest_restore_date(deleted_db).isoformat(),
_get_deleted_date(deleted_db).isoformat(), restore_database_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', restore_database_name2),
JMESPathCheck('status', 'Online')])
class SqlServerDbSecurityScenarioTest(ScenarioTest):
def _get_storage_endpoint(self, storage_account, resource_group):
return self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
def _get_storage_key(self, storage_account, resource_group):
return self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
@ResourceGroupPreparer()
@ResourceGroupPreparer(parameter_name='resource_group_2')
@SqlServerPreparer()
@StorageAccountPreparer()
@StorageAccountPreparer(parameter_name='storage_account_2',
resource_group_parameter_name='resource_group_2')
def test_sql_db_security_mgmt(self, resource_group, resource_group_2,
resource_group_location, server,
storage_account, storage_account_2):
database_name = "cliautomationdb01"
# get storage account endpoint and key
storage_endpoint = self._get_storage_endpoint(storage_account, resource_group)
key = self._get_storage_key(storage_account, resource_group)
# create db
self.cmd('sql db create -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', database_name),
JMESPathCheck('status', 'Online')])
# get audit policy
self.cmd('sql db audit-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update audit policy - enable
state_enabled = 'Enabled'
retention_days = 30
audit_actions_input = 'DATABASE_LOGOUT_GROUP DATABASE_ROLE_MEMBER_CHANGE_GROUP'
audit_actions_expected = ['DATABASE_LOGOUT_GROUP',
'DATABASE_ROLE_MEMBER_CHANGE_GROUP']
self.cmd('sql db audit-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint={}'
' --retention-days={} --actions {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, audit_actions_input),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - specify storage account and resource group. use secondary key
storage_endpoint_2 = self._get_storage_endpoint(storage_account_2, resource_group_2)
self.cmd('sql db audit-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# update audit policy - disable
state_disabled = 'Disabled'
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
# get threat detection policy
self.cmd('sql db threat-policy show -g {} -s {} -n {}'
.format(resource_group, server, database_name),
checks=[JMESPathCheck('resourceGroup', resource_group)])
# update threat detection policy - enable
disabled_alerts_input = 'Sql_Injection_Vulnerability Access_Anomaly'
disabled_alerts_expected = 'Sql_Injection_Vulnerability;Access_Anomaly'
email_addresses_input = 'test1@example.com test2@example.com'
email_addresses_expected = 'test1@example.com;test2@example.com'
email_account_admins = 'Enabled'
self.cmd('sql db threat-policy update -g {} -s {} -n {}'
' --state {} --storage-key {} --storage-endpoint {}'
' --retention-days {} --email-addresses {} --disabled-alerts {}'
' --email-account-admins {}'
.format(resource_group, server, database_name, state_enabled, key,
storage_endpoint, retention_days, email_addresses_input,
disabled_alerts_input, email_account_admins),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key),
JMESPathCheck('storageEndpoint', storage_endpoint),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - specify storage account and resource group. use secondary key
key_2 = self._get_storage_key(storage_account_2, resource_group_2)
self.cmd('sql db threat-policy update -g {} -s {} -n {} --storage-account {}'
.format(resource_group, server, database_name, storage_account_2,
resource_group_2),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_enabled),
JMESPathCheck('storageAccountAccessKey', key_2),
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('emailAddresses', email_addresses_expected),
JMESPathCheck('disabledAlerts', disabled_alerts_expected),
JMESPathCheck('emailAccountAdmins', email_account_admins)])
# update threat policy - disable
self.cmd('sql db audit-policy update -g {} -s {} -n {} --state {}'
.format(resource_group, server, database_name, state_disabled),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('state', state_disabled),
JMESPathCheck('storageAccountAccessKey', ''), # service doesn't return it
JMESPathCheck('storageEndpoint', storage_endpoint_2),
JMESPathCheck('retentionDays', retention_days),
JMESPathCheck('auditActionsAndGroups', audit_actions_expected)])
class SqlServerDwMgmtScenarioTest(ScenarioTest):
# pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer()
@SqlServerPreparer()
@AllowLargeResponse()
def test_sql_dw_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb01"
update_service_objective = 'DW200'
update_storage = '20TB'
update_storage_bytes = str(20 * 1024 * 1024 * 1024 * 1024)
rg = resource_group
loc_display = 'westus'
# test sql db commands
dw = self.cmd('sql dw create -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('edition', 'DataWarehouse'),
JMESPathCheck('sku.tier', 'DataWarehouse'),
JMESPathCheck('status', 'Online')]).get_output_in_json()
# Sanity check that the default max size is not equal to the size that we will update to
# later. That way we know that update is actually updating the size.
self.assertNotEqual(dw['maxSizeBytes'], update_storage_bytes,
'Initial max size in bytes is equal to the value we want to update to later,'
' so we will not be able to verify that update max size is actually updating.')
# DataWarehouse is a little quirky and is considered to be both a database and its
# separate own type of thing. (Why? Because it has the same REST endpoint as regular
# database, so it must be a database. However it has only a subset of supported operations,
# so to clarify which operations are supported by dw we group them under `sql dw`.) So the
# dw shows up under both `db list` and `dw list`.
self.cmd('sql db list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 2), # includes dw and master
JMESPathCheck('sort([].name)', sorted([database_name, 'master'])),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[1].resourceGroup', rg)])
self.cmd('sql dw list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].resourceGroup', rg)])
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg)])
# pause/resume
self.cmd('sql dw pause -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Paused')])
self.cmd('sql dw resume -g {} --server {} --name {}'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw show --id {}'
.format(dw['id']),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('status', 'Online')])
# Update DW storage
self.cmd('sql dw update -g {} -s {} -n {} --max-size {}'
' --set tags.key1=value1'
.format(rg, server, database_name, update_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Update DW service objective
self.cmd('sql dw update --id {} --service-objective {}'
.format(dw['id'], update_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', update_service_objective),
JMESPathCheck('maxSizeBytes', update_storage_bytes),
JMESPathCheck('tags.key1', 'value1')])
# Delete DW
self.cmd('sql dw delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
self.cmd('sql dw delete --id {} --yes'
.format(dw['id']),
checks=[NoneCheck()])
class SqlServerDnsAliasMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
def test_sql_server_dns_alias_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
alias_name = 'alias1'
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# Create server dns alias
self.cmd('sql server dns-alias create -n {} -s {} -g {}'
.format(alias_name, s1.name, s1.group),
checks=[
JMESPathCheck('name', alias_name),
JMESPathCheck('resourceGroup', s1.group)
])
# Check that alias is created on a right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s1.name, s1.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the same server (to check that operation is idempotent)
self.cmd('sql server dns-alias set -n {} --original-server {} -s {} -g {}'
.format(alias_name, s1.name, s2.name, s2.group),
checks=[NoneCheck()])
# Check if alias is pointing to the right server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Repoint alias to the server within the same resource group
self.cmd('sql server dns-alias set -n {} --original-server {} --original-resource-group {} -s {} -g {}'
.format(alias_name, s2.name, s2.group, s3.name, s3.group),
checks=[NoneCheck()])
# List the aliases on old server to check if alias is not pointing there
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s2.name, s2.group),
checks=[
JMESPathCheck('length(@)', 0)
])
# Check if alias is pointing to new server
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', alias_name)
])
# Drop alias
self.cmd('sql server dns-alias delete -n {} -s {} -g {}'
.format(alias_name, s3.name, s3.group),
checks=[NoneCheck()])
# Verify that alias got dropped correctly
self.cmd('sql server dns-alias list -s {} -g {}'
.format(s3.name, s3.group),
checks=[
JMESPathCheck('length(@)', 0)
])
class SqlServerDbReplicaMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_1")
@SqlServerPreparer(parameter_name="server_name_3",
resource_group_parameter_name="resource_group_2")
@AllowLargeResponse()
def test_sql_db_replica_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2, server_name_3):
database_name = "cliautomationdb01"
service_objective = 'S1'
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint: disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_1, resource_group_location_1)
s3 = ServerInfo(server_name_3, resource_group_2, resource_group_location_2)
# verify setup
for s in (s1, s2, s3):
self.cmd('sql server show -g {} -n {}'
.format(s.group, s.name),
checks=[
JMESPathCheck('name', s.name),
JMESPathCheck('resourceGroup', s.group)])
# create db in first server
self.cmd('sql db create -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s1.group)])
# create replica in second server with min params
# partner resouce group unspecified because s1.group == s2.group
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
.format(s1.group, s1.name, database_name,
s2.name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group)])
# Delete replica in second server and recreate with explicit service objective
self.cmd('sql db delete -g {} -s {} -n {} --yes'
.format(s2.group, s2.name, database_name))
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {} '
' --service-objective {}'
.format(s1.group, s1.name, database_name,
s2.name, service_objective),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s2.group),
JMESPathCheck('requestedServiceObjectiveName', service_objective)])
# Create replica in pool in third server with max params (except service objective)
pool_name = 'pool1'
pool_edition = 'Standard'
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
' --edition {}'
.format(s3.group, s3.name, pool_name, pool_edition))
self.cmd('sql db replica create -g {} -s {} -n {} --partner-server {}'
' --partner-resource-group {} --elastic-pool {}'
.format(s1.group, s1.name, database_name,
s3.name, s3.group, pool_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group),
JMESPathCheck('elasticPoolName', pool_name)])
# check that the replica was created in the correct server
self.cmd('sql db show -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', s3.group)])
# list replica links on s1 - it should link to s2 and s3
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s1.group, s1.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# list replica links on s3 - it should link only to s1
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Secondary'),
JMESPathCheck('[0].partnerRole', 'Primary')])
# Failover to s3.
self.cmd('sql db replica set-primary -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# list replica links on s3 - it should link to s1 and s2
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[JMESPathCheck('length(@)', 2)])
# Stop replication from s3 to s2 twice. Second time should be no-op.
for _ in range(2):
# Delete link
self.cmd('sql db replica delete-link -g {} -s {} -n {} --partner-resource-group {}'
' --partner-server {} --yes'
.format(s3.group, s3.name, database_name, s2.group, s2.name),
checks=[NoneCheck()])
# Verify link was deleted. s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Failover to s3 again (should be no-op, it's already primary)
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s3.group, s3.name, database_name),
checks=[NoneCheck()])
# s3 should still be the primary.
self.cmd('sql db replica list-links -g {} -s {} -n {}'
.format(s3.group, s3.name, database_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].role', 'Primary'),
JMESPathCheck('[0].partnerRole', 'Secondary')])
# Force failover back to s1
self.cmd('sql db replica set-primary -g {} -s {} -n {} --allow-data-loss'
.format(s1.group, s1.name, database_name),
checks=[NoneCheck()])
class SqlElasticPoolsMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolsMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "cliautomationpool01"
def verify_activities(self, activities, resource_group, server):
if isinstance(activities, list.__class__):
raise AssertionError("Actual value '{}' expected to be list class."
.format(activities))
for activity in activities:
if isinstance(activity, dict.__class__):
raise AssertionError("Actual value '{}' expected to be dict class"
.format(activities))
if activity['resourceGroup'] != resource_group:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['resourceGroup'], resource_group))
elif activity['serverName'] != server:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['serverName'], server))
elif activity['currentElasticPoolName'] != self.pool_name:
raise AssertionError("Actual value '{}' != Expected value {}"
.format(activity['currentElasticPoolName'], self.pool_name))
return True
@ResourceGroupPreparer(location='eastus2')
@SqlServerPreparer(location='eastus2')
@AllowLargeResponse()
def test_sql_elastic_pools_mgmt(self, resource_group, resource_group_location, server):
database_name = "cliautomationdb02"
pool_name2 = "cliautomationpool02"
edition = 'Standard'
dtu = 1200
db_dtu_min = 10
db_dtu_max = 50
storage = '1200GB'
storage_mb = 1228800
updated_dtu = 50
updated_db_dtu_min = 10
updated_db_dtu_max = 50
updated_storage = '50GB'
updated_storage_mb = 51200
db_service_objective = 'S1'
rg = resource_group
loc_display = 'East US 2'
# test sql elastic-pool commands
elastic_pool_1 = self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} '
'--storage {}'
.format(rg, server, self.pool_name, dtu,
edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition)]).get_output_in_json()
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('zoneRedundant', False)])
self.cmd('sql elastic-pool show --id {}'
.format(elastic_pool_1['id']),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('edition', edition),
JMESPathCheck('storageMb', storage_mb)])
self.cmd('sql elastic-pool list -g {} --server {}'
.format(rg, server),
checks=[
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', self.pool_name),
JMESPathCheck('[0].state', 'Ready'),
JMESPathCheck('[0].databaseDtuMin', db_dtu_min),
JMESPathCheck('[0].databaseDtuMax', db_dtu_max),
JMESPathCheck('[0].edition', edition),
JMESPathCheck('[0].storageMb', storage_mb)])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --storage {} --set tags.key1=value1'
.format(rg, server, self.pool_name,
updated_dtu, updated_storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', updated_dtu),
JMESPathCheck('sku.capacity', updated_dtu),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('storageMb', updated_storage_mb),
JMESPathCheck('maxSizeBytes', updated_storage_mb * 1024 * 1024),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update --id {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(elastic_pool_1['id'], dtu,
updated_db_dtu_min, updated_db_dtu_max,
storage),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', updated_db_dtu_min),
JMESPathCheck('databaseDtuMax', updated_db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', updated_db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', updated_db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('maxSizeBytes', storage_mb * 1024 * 1024),
JMESPathCheck('tags.key1', 'value1')])
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--remove tags.key1'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('tags', {})])
# create a second pool with minimal params
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
.format(rg, server, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name2),
JMESPathCheck('location', loc_display),
JMESPathCheck('state', 'Ready')])
self.cmd('sql elastic-pool list -g {} -s {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# Create a database directly in an Azure sql elastic pool.
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db create -g {} --server {} --name {} '
'--elastic-pool {}'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name, self.pool_name),
checks=[JMESPathCheck('elasticPoolName', self.pool_name)])
# Move database to second pool. Specify service objective just for fun
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, pool_name2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} --server {} --name {}'
.format(rg, server, database_name, pool_name2),
checks=[JMESPathCheck('elasticPoolName', pool_name2)])
# Remove database from pool
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name, db_service_objective),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('requestedServiceObjectiveName', db_service_objective),
JMESPathCheck('status', 'Online')])
# Move database back into pool
# Note that 'elasticPoolName' is populated in transform
# func which only runs after `show`/`list` commands.
self.cmd('sql db update -g {} -s {} -n {} --elastic-pool {}'
' --service-objective ElasticPool'
.format(rg, server, database_name, self.pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
self.cmd('sql db show -g {} -s {} -n {}'
.format(rg, server, database_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolName', self.pool_name),
JMESPathCheck('requestedServiceObjectiveName', 'ElasticPool'),
JMESPathCheck('status', 'Online')])
# List databases in a pool
self.cmd('sql elastic-pool list-dbs -g {} -s {} -n {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# List databases in a pool - alternative command
self.cmd('sql db list -g {} -s {} --elastic-pool {}'
.format(rg, server, self.pool_name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].resourceGroup', rg),
JMESPathCheck('[0].name', database_name),
JMESPathCheck('[0].elasticPoolName', self.pool_name)])
# delete sql server database
self.cmd('sql db delete -g {} --server {} --name {} --yes'
.format(rg, server, database_name),
checks=[NoneCheck()])
# delete sql elastic pool
self.cmd('sql elastic-pool delete -g {} --server {} --name {}'
.format(rg, server, self.pool_name),
checks=[NoneCheck()])
# delete sql elastic pool by id
self.cmd('sql elastic-pool delete --id {}'
.format(elastic_pool_1['id']),
checks=[NoneCheck()])
@ResourceGroupPreparer(location='westus2')
@SqlServerPreparer(location='westus2')
@AllowLargeResponse()
def test_sql_elastic_pools_vcore_mgmt(self, resource_group, resource_group_location, server):
pool_name = "cliautomationpool1"
# Create pool with vcore edition
vcore_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(resource_group, server, pool_name, vcore_edition),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition)])
# Update pool to dtu edition
dtu_edition = 'Standard'
dtu_capacity = 100
db_dtu_max = 10
self.cmd('sql elastic-pool update -g {} --server {} --name {} --edition {} --capacity {} --max-size 250GB '
'--db-max-dtu {}'
.format(resource_group, server, pool_name, dtu_edition, dtu_capacity, db_dtu_max),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', dtu_edition),
JMESPathCheck('sku.tier', dtu_edition),
JMESPathCheck('dtu', dtu_capacity),
JMESPathCheck('sku.capacity', dtu_capacity),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max)])
# Update pool back to vcore edition
vcore_family = 'Gen5'
vcore_capacity = 4
self.cmd('sql elastic-pool update -g {} --server {} --name {} -e {} -c {} -f {} '
'--db-max-capacity 2'
.format(resource_group, server, pool_name, vcore_edition,
vcore_capacity, vcore_family),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only family
vcore_family_updated = 'Gen4'
self.cmd('sql elastic-pool update -g {} -s {} -n {} --family {}'
.format(resource_group, server, pool_name, vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only capacity
vcore_capacity_updated = 8
self.cmd('sql elastic-pool update -g {} -s {} -n {} --capacity {}'
.format(resource_group, server, pool_name, vcore_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition),
JMESPathCheck('sku.tier', vcore_edition),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only edition
vcore_edition_updated = 'BusinessCritical'
self.cmd('sql elastic-pool update -g {} -s {} -n {} --tier {}'
.format(resource_group, server, pool_name, vcore_edition_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.maxCapacity', 2)])
# Update only db min & max cap
db_min_capacity_updated = 0.5
db_max_capacity_updated = 1
self.cmd('sql elastic-pool update -g {} -s {} -n {} --db-max-capacity {} --db-min-capacity {}'
.format(resource_group, server, pool_name, db_max_capacity_updated, db_min_capacity_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None),
JMESPathCheck('perDatabaseSettings.minCapacity', db_min_capacity_updated),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_max_capacity_updated)])
# Create pool with vcore edition and all sku properties specified
pool_name_2 = 'cliautomationpool2'
vcore_edition = 'GeneralPurpose'
self.cmd('sql elastic-pool create -g {} --server {} --name {} -e {} -c {} -f {}'
.format(resource_group, server, pool_name_2,
vcore_edition_updated, vcore_capacity_updated,
vcore_family_updated),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('edition', vcore_edition_updated),
JMESPathCheck('sku.tier', vcore_edition_updated),
JMESPathCheck('dtu', None),
JMESPathCheck('sku.capacity', vcore_capacity_updated),
JMESPathCheck('sku.family', vcore_family_updated),
JMESPathCheck('databaseDtuMin', None),
JMESPathCheck('databaseDtuMax', None)])
class SqlElasticPoolOperationMgmtScenarioTest(ScenarioTest):
def __init__(self, method_name):
super(SqlElasticPoolOperationMgmtScenarioTest, self).__init__(method_name)
self.pool_name = "operationtestep1"
@ResourceGroupPreparer(location='southeastasia')
@SqlServerPreparer(location='southeastasia')
@AllowLargeResponse()
def test_sql_elastic_pool_operation_mgmt(self, resource_group, resource_group_location, server):
edition = 'Premium'
dtu = 125
db_dtu_min = 0
db_dtu_max = 50
storage = '50GB'
storage_mb = 51200
update_dtu = 250
update_db_dtu_min = 50
update_db_dtu_max = 250
# Create elastic pool
self.cmd('sql elastic-pool create -g {} --server {} --name {} '
'--dtu {} --edition {} --db-dtu-min {} --db-dtu-max {} --storage {}'
.format(resource_group, server, self.pool_name, dtu, edition, db_dtu_min, db_dtu_max, storage),
checks=[
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', self.pool_name),
JMESPathCheck('edition', edition),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('dtu', dtu),
JMESPathCheck('sku.capacity', dtu),
JMESPathCheck('databaseDtuMin', db_dtu_min),
JMESPathCheck('databaseDtuMax', db_dtu_max),
JMESPathCheck('perDatabaseSettings.minCapacity', db_dtu_min),
JMESPathCheck('perDatabaseSettings.maxCapacity', db_dtu_max),
JMESPathCheck('storageMb', storage_mb),
JMESPathCheck('maxSizeBytes', storage_mb * 1024 * 1024)])
# Update elastic pool
self.cmd('sql elastic-pool update -g {} --server {} --name {} '
'--dtu {} --db-dtu-min {} --db-dtu-max {}'
.format(resource_group, server, self.pool_name, update_dtu, update_db_dtu_min, update_db_dtu_max))
# List operations on the elastic pool
ops = list(self.cmd('sql elastic-pool op list -g {} --server {} --elastic-pool {}'
.format(resource_group, server, self.pool_name)).get_output_in_json())
# Cancel operation
try:
self.cmd('sql elastic-pool op cancel -g {} --server {} --elastic-pool {} --name {}'
.format(resource_group, server, self.pool_name, ops[0]['name']))
except Exception as e:
expectedmessage = "Cannot cancel management operation {} in current state.".format(ops[0]['name'])
if expectedmessage in str(e):
pass
class SqlServerCapabilityScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_sql_capabilities(self):
location = 'westus'
# New capabilities are added quite frequently and the state of each capability depends
# on your subscription. So it's not a good idea to make strict checks against exactly
# which capabilities are returned. The idea is to just check the overall structure.
db_max_size_length_jmespath = 'length([].supportedServiceLevelObjectives[].supportedMaxSizes[])'
# Get all db capabilities
self.cmd('sql db list-editions -l {}'.format(location),
checks=[
# At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheckExists("[?name == 'Premium']"),
# At least s0 and p1 service objectives exist
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'P1']"),
# Max size data is omitted
JMESPathCheck(db_max_size_length_jmespath, 0)])
# Get all db capabilities with size data
self.cmd('sql db list-editions -l {} --show-details max-size'.format(location),
checks=[
# Max size data is included
JMESPathCheckGreaterThan(db_max_size_length_jmespath, 0)])
# Search for db edition - note that it's case insensitive
self.cmd('sql db list-editions -l {} --edition standard'.format(location),
checks=[
# Standard edition exists, other editions don't
JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtus
self.cmd('sql db list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan('length([].supportedServiceLevelObjectives[?performanceLevel.value == `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.value != `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `DTU`][])', 0)])
# Search for vcores
self.cmd('sql db list-editions -l {} --vcore 2'.format(location),
checks=[
# All results have 2 vcores
JMESPathCheckGreaterThan('length([].supportedServiceLevelObjectives[?performanceLevel.value == `2`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.value != `2`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `VCores`][])', 0)])
# Search for db service objective - note that it's case insensitive
# Checked items:
# * Standard edition exists, other editions don't
# * S0 service objective exists, others don't exist
self.cmd('sql db list-editions -l {} --edition standard --service-objective s0'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"),
JMESPathCheck("length([?name != 'Standard'])", 0),
JMESPathCheckExists("[].supportedServiceLevelObjectives[] | [?name == 'S0']"),
JMESPathCheck("length([].supportedServiceLevelObjectives[] | [?name != 'S0'])", 0)])
pool_max_size_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedMaxSizes[])'
pool_db_max_dtu_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxPerformanceLevels[])'
pool_db_min_dtu_length_jmespath = ('length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxPerformanceLevels[]'
'.supportedPerDatabaseMinPerformanceLevels[])')
pool_db_max_size_length_jmespath = 'length([].supportedElasticPoolPerformanceLevels[].supportedPerDatabaseMaxSizes[])'
# Get all elastic pool capabilities
self.cmd('sql elastic-pool list-editions -l {}'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # At least standard and premium edition exist
JMESPathCheckExists("[?name == 'Premium']"),
JMESPathCheck(pool_max_size_length_jmespath, 0), # Optional details are omitted
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Search for elastic pool edition - note that it's case insensitive
self.cmd('sql elastic-pool list-editions -l {} --edition standard'.format(location),
checks=[JMESPathCheckExists("[?name == 'Standard']"), # Standard edition exists, other editions don't
JMESPathCheck("length([?name != 'Standard'])", 0)])
# Search for dtus
self.cmd('sql elastic-pool list-editions -l {} --dtu 100'.format(location),
checks=[
# All results have 100 dtu
JMESPathCheckGreaterThan('length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value == `100`][])', 0),
JMESPathCheck('length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value != `100`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `DTU`][])', 0)])
# Search for vcores
self.cmd('sql elastic-pool list-editions -l {} --vcore 2'.format(location),
checks=[
# All results have 2 vcores
JMESPathCheckGreaterThan('length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value == `2`][])', 0),
JMESPathCheck('length([].supportedElasticPoolPerformanceLevels[?performanceLevel.value != `2`][])', 0),
JMESPathCheck('length([].supportedServiceLevelObjectives[?performanceLevel.unit != `VCores`][])', 0)])
# Get all db capabilities with pool max size
self.cmd('sql elastic-pool list-editions -l {} --show-details max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max size
self.cmd('sql elastic-pool list-editions -l {} --show-details db-max-size'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheck(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db max dtu
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-max-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with per db min dtu (which is nested under per db max dtu)
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu'.format(location),
checks=[JMESPathCheck(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheck(pool_db_max_size_length_jmespath, 0)])
# Get all db capabilities with everything
self.cmd('sql elastic-pool list-editions -l {} --edition standard --show-details db-min-dtu db-max-dtu '
'db-max-size max-size'.format(location),
checks=[JMESPathCheckGreaterThan(pool_max_size_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_min_dtu_length_jmespath, 0),
JMESPathCheckGreaterThan(pool_db_max_size_length_jmespath, 0)])
class SqlServerImportExportMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
@StorageAccountPreparer()
def test_sql_db_import_export_mgmt(self, resource_group, resource_group_location, server, storage_account):
location_long_name = 'westus'
admin_login = 'admin123'
admin_password = 'SecretPassword123'
db_name = 'cliautomationdb01'
db_name2 = 'cliautomationdb02'
db_name3 = 'cliautomationdb03'
blob = 'testbacpac.bacpac'
blob2 = 'testbacpac2.bacpac'
container = 'bacpacs'
firewall_rule_1 = 'allowAllIps'
start_ip_address_1 = '0.0.0.0'
end_ip_address_1 = '0.0.0.0'
# create server firewall rule
self.cmd('sql server firewall-rule create --name {} -g {} --server {} '
'--start-ip-address {} --end-ip-address {}'
.format(firewall_rule_1, resource_group, server,
start_ip_address_1, end_ip_address_1),
checks=[JMESPathCheck('name', firewall_rule_1),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('startIpAddress', start_ip_address_1),
JMESPathCheck('endIpAddress', end_ip_address_1)])
# create dbs
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name2),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name2),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name3),
checks=[JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('name', db_name3),
JMESPathCheck('location', location_long_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online')])
# get storage account endpoint
storage_endpoint = self.cmd('storage account show -g {} -n {}'
' --query primaryEndpoints.blob'
.format(resource_group, storage_account)).get_output_in_json()
bacpacUri = '{}{}/{}'.format(storage_endpoint, container, blob)
bacpacUri2 = '{}{}/{}'.format(storage_endpoint, container, blob2)
# get storage account key
storageKey = self.cmd('storage account keys list -g {} -n {} --query [0].value'
.format(resource_group, storage_account)).get_output_in_json()
# Set Expiry
expiryString = '9999-12-25T00:00:00Z'
# Get sas key
sasKey = self.cmd('storage blob generate-sas --account-name {} -c {} -n {} --permissions rw --expiry {}'.format(
storage_account, container, blob2, expiryString)).get_output_in_json()
# create storage account blob container
self.cmd('storage container create -n {} --account-name {} --account-key {} '
.format(container, storage_account, storageKey),
checks=[JMESPathCheck('created', True)])
# export database to blob container using both keys
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
self.cmd('sql db export -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name),
JMESPathCheck('requestType', 'Export'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to second database using Storage Key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type StorageAccessKey'
' --storage-uri {}'
.format(server, db_name2, resource_group, admin_password, admin_login, storageKey, bacpacUri),
checks=[JMESPathCheck('blobUri', bacpacUri),
JMESPathCheck('databaseName', db_name2),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
# import bacpac to third database using SAS key
self.cmd('sql db import -s {} -n {} -g {} -p {} -u {}'
' --storage-key {} --storage-key-type SharedAccessKey'
' --storage-uri {}'
.format(server, db_name3, resource_group, admin_password, admin_login, sasKey, bacpacUri2),
checks=[JMESPathCheck('blobUri', bacpacUri2),
JMESPathCheck('databaseName', db_name3),
JMESPathCheck('name', 'import'),
JMESPathCheck('requestType', 'Import'),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('serverName', server),
JMESPathCheck('status', 'Completed')])
class SqlServerConnectionStringScenarioTest(ScenarioTest):
def test_sql_db_conn_str(self):
# ADO.NET, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;')
# ADO.NET, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;User ID=<username>;Password=<password>;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Password"')
# ADO.NET, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c ado.net -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=true;Connection Timeout=30;Authentication="Active Directory Integrated"')
# SqlCmd, username/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -N -l 30')
# SqlCmd, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -U <username> -P <password> -G -N -l 30')
# SqlCmd, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c sqlcmd -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'sqlcmd -S tcp:myserver.database.windows.net,1433 -d mydb -G -N -l 30')
# JDBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>@myserver;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30')
# JDBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;user=<username>;password=<password>;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword')
# JDBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c jdbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'jdbc:sqlserver://myserver.database.windows.net:1433;database=mydb;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryIntegrated')
# PHP PDO, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo').get_output_in_json()
self.assertEqual(conn_str, '$conn = new PDO("sqlsrv:server = tcp:myserver.database.windows.net,1433; Database = mydb; LoginTimeout = 30; Encrypt = 1; TrustServerCertificate = 0;", "<username>", "<password>");')
# PHP PDO, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADPassword', expect_failure=True)
# PHP PDO, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php_pdo -a ADIntegrated', expect_failure=True)
# PHP, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c php').get_output_in_json()
self.assertEqual(conn_str, '$connectionOptions = array("UID"=>"<username>@myserver", "PWD"=>"<password>", "Database"=>mydb, "LoginTimeout" => 30, "Encrypt" => 1, "TrustServerCertificate" => 0); $serverName = "tcp:myserver.database.windows.net,1433"; $conn = sqlsrv_connect($serverName, $connectionOptions);')
# PHP, ADPassword
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADPassword', expect_failure=True)
# PHP, ADIntegrated
self.cmd('sql db show-connection-string -s myserver -n mydb -c php -a ADIntegrated', expect_failure=True)
# ODBC, user name/password
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;')
# ODBC, ADPassword
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADPassword').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Uid=<username>@myserver;Pwd=<password>;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryPassword')
# ODBC, ADIntegrated
conn_str = self.cmd('sql db show-connection-string -s myserver -n mydb -c odbc -a ADIntegrated').get_output_in_json()
self.assertEqual(conn_str, 'Driver={ODBC Driver 13 for SQL Server};Server=tcp:myserver.database.windows.net,1433;Database=mydb;Encrypt=yes;TrustServerCertificate=no;Authentication=ActiveDirectoryIntegrated')
class SqlTransparentDataEncryptionScenarioTest(ScenarioTest):
def wait_for_encryption_scan(self, rg, sn, db_name):
active_scan = True
retry_attempts = 5
while active_scan:
tdeactivity = self.cmd('sql db tde list-activity -g {} -s {} -d {}'
.format(rg, sn, db_name)).get_output_in_json()
# if tdeactivity is an empty array, there is no ongoing encryption scan
active_scan = (len(tdeactivity) > 0)
time.sleep(10)
retry_attempts -= 1
if retry_attempts <= 0:
raise CliTestError("Encryption scan still ongoing: {}.".format(tdeactivity))
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tde(self, resource_group, server):
rg = resource_group
sn = server
db_name = self.create_random_name("sqltdedb", 20)
# create database
self.cmd('sql db create -g {} --server {} --name {}'
.format(rg, sn, db_name))
# validate encryption is on by default
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# disable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Disabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is disabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Disabled')])
# enable encryption
self.cmd('sql db tde set -g {} -s {} -d {} --status Enabled'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
self.wait_for_encryption_scan(rg, sn, db_name)
# validate encryption is enabled
self.cmd('sql db tde show -g {} -s {} -d {}'
.format(rg, sn, db_name),
checks=[JMESPathCheck('status', 'Enabled')])
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_tdebyok(self, resource_group, server):
resource_prefix = 'sqltdebyok'
# add identity to server
server_resp = self.cmd('sql server update -g {} -n {} -i'
.format(resource_group, server)).get_output_in_json()
server_identity = server_resp['identity']['principalId']
# create db
db_name = self.create_random_name(resource_prefix, 20)
self.cmd('sql db create -g {} --server {} --name {}'
.format(resource_group, server, db_name))
# create vault and acl server identity
vault_name = self.create_random_name(resource_prefix, 24)
self.cmd('keyvault create -g {} -n {} --enable-soft-delete true'
.format(resource_group, vault_name))
self.cmd('keyvault set-policy -g {} -n {} --object-id {} --key-permissions wrapKey unwrapKey get list'
.format(resource_group, vault_name, server_identity))
# create key
key_name = self.create_random_name(resource_prefix, 32)
key_resp = self.cmd('keyvault key create -n {} -p software --vault-name {}'
.format(key_name, vault_name)).get_output_in_json()
kid = key_resp['key']['kid']
# add server key
server_key_resp = self.cmd('sql server key create -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault')])
server_key_name = server_key_resp.get_output_in_json()['name']
# validate show key
self.cmd('sql server key show -g {} -s {} -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('uri', kid),
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('name', server_key_name)])
# validate list key (should return 2 items)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 2)])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# update encryption protector to akv key
self.cmd('sql server tde-key set -g {} -s {} -t AzureKeyVault -k {}'
.format(resource_group, server, kid),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# validate encryption protector is akv via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'AzureKeyVault'),
JMESPathCheck('serverKeyName', server_key_name),
JMESPathCheck('uri', kid)])
# update encryption protector to service managed
self.cmd('sql server tde-key set -g {} -s {} -t ServiceManaged'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# validate encryption protector is service managed via show
self.cmd('sql server tde-key show -g {} -s {}'
.format(resource_group, server),
checks=[
JMESPathCheck('serverKeyType', 'ServiceManaged'),
JMESPathCheck('serverKeyName', 'ServiceManaged')])
# delete server key
self.cmd('sql server key delete -g {} -s {} -k {}'
.format(resource_group, server, kid))
# wait for key to be deleted
time.sleep(10)
# validate deleted server key via list (should return 1 item)
self.cmd('sql server key list -g {} -s {}'
.format(resource_group, server),
checks=[JMESPathCheck('length(@)', 1)])
class SqlServerVnetMgmtScenarioTest(ScenarioTest):
@ResourceGroupPreparer()
@SqlServerPreparer()
def test_sql_vnet_mgmt(self, resource_group, resource_group_location, server):
rg = resource_group
vnet_rule_1 = 'rule1'
vnet_rule_2 = 'rule2'
# Create vnet's - vnet1 and vnet2
vnetName1 = 'vnet1'
vnetName2 = 'vnet2'
subnetName = 'subnet1'
addressPrefix = '10.0.1.0/24'
endpoint = 'Microsoft.Sql'
# Vnet 1 without service endpoints to test ignore-missing-vnet-service-endpoint feature
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName1))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {}'
.format(rg, vnetName1, subnetName, addressPrefix))
vnet1 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName1, rg)).get_output_in_json()
vnet_id_1 = vnet1['id']
# Vnet 2
self.cmd('network vnet create -g {} -n {}'.format(rg, vnetName2))
self.cmd('network vnet subnet create -g {} --vnet-name {} -n {} --address-prefix {} --service-endpoints {}'
.format(rg, vnetName2, subnetName, addressPrefix, endpoint),
checks=JMESPathCheck('serviceEndpoints[0].service', 'Microsoft.Sql'))
vnet2 = self.cmd('network vnet subnet show -n {} --vnet-name {} -g {}'
.format(subnetName, vnetName2, rg)).get_output_in_json()
vnet_id_2 = vnet2['id']
# test sql server vnet-rule create using subnet name and vnet name and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {} --vnet-name {} -i'
.format(vnet_rule_1, rg, server, subnetName, vnetName1))
# test sql server vnet-rule show rule 1
self.cmd('sql server vnet-rule show --name {} -g {} --server {}'
.format(vnet_rule_1, rg, server),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule create using subnet id
self.cmd('sql server vnet-rule create --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_2, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 1 with vnet 2
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {}'
.format(vnet_rule_1, rg, server, vnet_id_2),
checks=[
JMESPathCheck('name', vnet_rule_1),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_2),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', False)])
# test sql server vnet-rule update rule 2 with vnet 1 and ignore-missing-vnet-service-endpoint flag
self.cmd('sql server vnet-rule update --name {} -g {} --server {} --subnet {} -i'
.format(vnet_rule_2, rg, server, vnet_id_1),
checks=[JMESPathCheck('name', vnet_rule_2),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('virtualNetworkSubnetId', vnet_id_1),
JMESPathCheck('ignoreMissingVnetServiceEndpoint', True)])
# test sql server vnet-rule list
self.cmd('sql server vnet-rule list -g {} --server {}'.format(rg, server),
checks=[JMESPathCheck('length(@)', 2)])
# test sql server vnet-rule delete rule 1
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_1, rg, server),
checks=NoneCheck())
# test sql server vnet-rule delete rule 2
self.cmd('sql server vnet-rule delete --name {} -g {} --server {}'.format(vnet_rule_2, rg, server),
checks=NoneCheck())
class SqlSubscriptionUsagesScenarioTest(ScenarioTest):
def test_sql_subscription_usages(self):
self.cmd('sql list-usages -l westus',
checks=[JMESPathCheckGreaterThan('length(@)', 0)])
self.cmd('sql show-usage -l westus -u ServerQuota',
checks=[
JMESPathCheck('name', 'ServerQuota'),
JMESPathCheckGreaterThan('limit', 0)])
class SqlZoneResilienceScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='centralus')
@SqlServerPreparer(location='centralus')
@AllowLargeResponse()
def test_sql_zone_resilient_database(self, resource_group, resource_group_location, server):
database_name = "createUnzonedUpdateToZonedDb"
database_name_2 = "createZonedUpdateToUnzonedDb"
database_name_3 = "updateNoParamForUnzonedDb"
database_name_4 = "updateNoParamForZonedDb"
rg = resource_group
loc_display = "centralus"
# Test creating database with zone resilience set to false. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant {}'
.format(rg, server, database_name, "Premium", False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with zone resilience set to true. Expect zone resilience to update to true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --zone-redundant'
.format(rg, server, database_name, 'P1'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', True)])
# Test creating database with zone resilience set to true. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --z'
.format(rg, server, database_name_2, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql db update -g {} -s {} -n {} --service-objective {} --z {}'
.format(rg, server, database_name_2, 'P1', False),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_2),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P1'),
JMESPathCheck('zoneRedundant', False)])
# Create database with no zone resilience set. Expect regular database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {}'
.format(rg, server, database_name_3, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular database with no zone resilience set. Expect zone resilience to stay false.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_3, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_3),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', False)])
# Create database with zone resilience set. Expect zone resilient database created.
self.cmd('sql db create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, database_name_4, "Premium"),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('location', loc_display),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('sku.tier', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned database with no zone resilience set. Expect zone resilience to stay true.
self.cmd('sql db update -g {} -s {} -n {} --service-objective {}'
.format(rg, server, database_name_4, 'P2'),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', database_name_4),
JMESPathCheck('elasticPoolId', None),
JMESPathCheck('status', 'Online'),
JMESPathCheck('requestedServiceObjectiveName', 'P2'),
JMESPathCheck('zoneRedundant', True)])
@ResourceGroupPreparer(location='centralus')
@SqlServerPreparer(location='centralus')
@AllowLargeResponse()
def test_sql_zone_resilient_pool(self, resource_group, resource_group_location, server):
pool_name = "createUnzonedUpdateToZonedPool"
pool_name_2 = "createZonedUpdateToUnzonedPool"
pool_name_3 = "updateNoParamForUnzonedPool"
pool_name_4 = "updateNoParamForZonedPool"
rg = resource_group
# Test creating pool with zone resilience set to false. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --z {}'
.format(rg, server, pool_name, "Premium", False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with zone resilience set to true. Expect zone resilience to update to true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --z'
.format(rg, server, pool_name))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name),
JMESPathCheck('zoneRedundant', True)])
# Test creating pool with zone resilience set to true. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_2, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with zone resilience set to false. Expect zone resilience to update to false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --zone-redundant {}'
.format(rg, server, pool_name_2, False))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_2),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_2),
JMESPathCheck('zoneRedundant', False)])
# Create pool with no zone resilience set. Expect regular pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {}'
.format(rg, server, pool_name_3, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', False)])
# Test running update on regular pool with no zone resilience set. Expect zone resilience to stay false
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_3, 250))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_3),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_3),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', False)])
# Create pool with zone resilience set. Expect zone resilient pool created.
self.cmd('sql elastic-pool create -g {} --server {} --name {} --edition {} --zone-redundant'
.format(rg, server, pool_name_4, "Premium"))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('state', 'Ready'),
JMESPathCheck('edition', 'Premium'),
JMESPathCheck('zoneRedundant', True)])
# Test running update on zoned pool with no zone resilience set. Expect zone resilience to stay true
self.cmd('sql elastic-pool update -g {} -s {} -n {} --dtu {}'
.format(rg, server, pool_name_4, 250, True))
self.cmd('sql elastic-pool show -g {} --server {} --name {}'
.format(rg, server, pool_name_4),
checks=[
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('name', pool_name_4),
JMESPathCheck('dtu', 250),
JMESPathCheck('zoneRedundant', True)])
class SqlManagedInstanceMgmtScenarioTest(ScenarioTest):
@record_only()
def test_sql_managed_instance_mgmt(self):
managed_instance_name_1 = self.create_random_name(managed_instance_name_prefix, managed_instance_name_max_length)
managed_instance_name_2 = self.create_random_name(managed_instance_name_prefix, managed_instance_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
is_playback = os.path.exists(self.recording_file)
if is_playback:
subnet = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cl_one/providers/Microsoft.Network/virtualNetworks/cl_initial/subnets/CLean'
else:
subnet = '/subscriptions/ee5ea899-0791-418f-9270-77cd8273794b/resourceGroups/cl_one/providers/Microsoft.Network/virtualNetworks/cl_initial/subnets/CooL'
license_type = 'LicenseIncluded'
loc = 'westcentralus'
v_cores = 8
storage_size_in_gb = '64'
edition = 'GeneralPurpose'
family = 'Gen4'
resource_group_1 = "cl_one"
user = admin_login
# test create sql managed_instance with minimal required parameters
managed_instance_1 = self.cmd('sql mi create -g {} -n {} -l {} '
'-u {} -p {} --subnet {} --license-type {} --capacity {} --storage {} --edition {} --family {}'
.format(resource_group_1, managed_instance_name_1, loc, user, admin_passwords[0], subnet, license_type, v_cores, storage_size_in_gb, edition, family),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('vCores', v_cores),
JMESPathCheck('storageSizeInGb', storage_size_in_gb),
JMESPathCheck('licenseType', license_type),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('sku.family', family),
JMESPathCheck('sku.capacity', v_cores),
JMESPathCheck('identity', None)]).get_output_in_json()
# test show sql managed instance 1
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_1),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
# test show sql managed instance 1 using id
self.cmd('sql mi show --id {}'
.format(managed_instance_1['id']),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
# test update sql managed_instance
self.cmd('sql mi update -g {} -n {} --admin-password {} -i'
.format(resource_group_1, managed_instance_name_1, admin_passwords[1]),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test update without identity parameter, validate identity still exists
# also use --id instead of -g/-n
self.cmd('sql mi update --id {} --admin-password {}'
.format(managed_instance_1['id'], admin_passwords[0]),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test create another sql managed instance, with identity this time
self.cmd('sql mi create -g {} -n {} -l {} -i '
'--admin-user {} --admin-password {} --subnet {} --license-type {} --capacity {} --storage {} --edition {} --family {}'
.format(resource_group_1, managed_instance_name_2, loc, user, admin_passwords[0], subnet, license_type, v_cores, storage_size_in_gb, edition, family),
checks=[
JMESPathCheck('name', managed_instance_name_2),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('vCores', v_cores),
JMESPathCheck('storageSizeInGb', storage_size_in_gb),
JMESPathCheck('licenseType', license_type),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('sku.family', family),
JMESPathCheck('sku.capacity', v_cores),
JMESPathCheck('identity.type', 'SystemAssigned')])
# test show sql managed instance 2
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_2),
checks=[
JMESPathCheck('name', managed_instance_name_2),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user)])
# test list sql managed_instance in the subscription should be at least 2
self.cmd('sql mi list', checks=[JMESPathCheckGreaterThan('length(@)', 1)])
# test delete sql managed instance
self.cmd('sql mi delete --id {} --yes'
.format(managed_instance_1['id']), checks=NoneCheck())
self.cmd('sql mi delete -g {} -n {} --yes'
.format(resource_group_1, managed_instance_name_2), checks=NoneCheck())
# test show sql managed instance doesn't return anything
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_1),
expect_failure=True)
# test show sql managed instance doesn't return anything
self.cmd('sql mi show -g {} -n {}'
.format(resource_group_1, managed_instance_name_2),
expect_failure=True)
class SqlManagedInstanceDbMgmtScenarioTest(ScenarioTest):
@record_only()
def test_sql_managed_db_mgmt(self):
database_name = "cliautomationdb01"
database_name_restored = "restoredcliautomationdb01"
managed_instance_name_1 = self.create_random_name(managed_instance_name_prefix, managed_instance_name_max_length)
admin_login = 'admin123'
admin_passwords = ['SecretPassword123', 'SecretPassword456']
is_playback = os.path.exists(self.recording_file)
if is_playback:
subnet = '/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cl_one/providers/Microsoft.Network/virtualNetworks/cl_initial/subnets/CLean'
else:
subnet = '/subscriptions/ee5ea899-0791-418f-9270-77cd8273794b/resourceGroups/cl_one/providers/Microsoft.Network/virtualNetworks/cl_initial/subnets/CooL'
license_type = 'LicenseIncluded'
loc = 'westcentralus'
v_cores = 8
storage_size_in_gb = '64'
edition = 'GeneralPurpose'
family = 'Gen4'
resource_group_1 = "cl_one"
collation = "Latin1_General_100_CS_AS_SC"
user = admin_login
# Prepare managed instance for test
managed_instance_1 = self.cmd('sql mi create -g {} -n {} -l {} '
'-u {} -p {} --subnet {} --license-type {} --capacity {} --storage {} --edition {} --family {}'
.format(resource_group_1, managed_instance_name_1, loc, user, admin_passwords[0], subnet, license_type, v_cores, storage_size_in_gb, edition, family),
checks=[
JMESPathCheck('name', managed_instance_name_1),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('administratorLogin', user),
JMESPathCheck('vCores', v_cores),
JMESPathCheck('storageSizeInGb', storage_size_in_gb),
JMESPathCheck('licenseType', license_type),
JMESPathCheck('sku.tier', edition),
JMESPathCheck('sku.family', family),
JMESPathCheck('sku.capacity', v_cores),
JMESPathCheck('identity', None)]).get_output_in_json()
# test sql db commands
db1 = self.cmd('sql midb create -g {} --mi {} -n {} --collation {}'
.format(resource_group_1, managed_instance_name_1, database_name, collation),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_name),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')]).get_output_in_json()
time.sleep(300) # Sleeping 5 minutes should be enough for the restore to be possible (Skipped under playback mode)
# test sql db restore command
db1 = self.cmd('sql midb restore -g {} --mi {} -n {} --dest-name {} --time {}'
.format(resource_group_1, managed_instance_name_1, database_name, database_name_restored, datetime.utcnow().isoformat()),
checks=[
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('name', database_name_restored),
JMESPathCheck('location', loc),
JMESPathCheck('status', 'Online')]).get_output_in_json()
self.cmd('sql midb list -g {} --managed-instance {}'
.format(resource_group_1, managed_instance_name_1),
checks=[JMESPathCheck('length(@)', 2)])
# Show by group/managed_instance/database-name
self.cmd('sql midb show -g {} --managed-instance {} -n {}'
.format(resource_group_1, managed_instance_name_1, database_name),
checks=[
JMESPathCheck('name', database_name),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')])
# Show by id
self.cmd('sql midb show --id {}'
.format(db1['id']),
checks=[
JMESPathCheck('name', database_name_restored),
JMESPathCheck('resourceGroup', resource_group_1),
JMESPathCheck('location', loc),
JMESPathCheck('collation', collation),
JMESPathCheck('status', 'Online')])
# Delete by group/server/name
self.cmd('sql midb delete -g {} --managed-instance {} -n {} --yes'
.format(resource_group_1, managed_instance_name_1, database_name),
checks=[NoneCheck()])
# test show sql managed db doesn't return anything
self.cmd('sql midb show -g {} --managed-instance {} -n {}'
.format(resource_group_1, managed_instance_name_1, database_name),
expect_failure=True)
self.cmd('sql mi delete --id {} --yes'
.format(managed_instance_1['id']), checks=NoneCheck())
class SqlFailoverGroupMgmtScenarioTest(ScenarioTest):
# create 2 servers in the same resource group, and 1 server in a different resource group
@ResourceGroupPreparer(parameter_name="resource_group_1",
parameter_name_for_location="resource_group_location_1")
@ResourceGroupPreparer(parameter_name="resource_group_2",
parameter_name_for_location="resource_group_location_2")
@SqlServerPreparer(parameter_name="server_name_1",
resource_group_parameter_name="resource_group_1",
location='westus')
@SqlServerPreparer(parameter_name="server_name_2",
resource_group_parameter_name="resource_group_2", location='eastus')
def test_sql_failover_group_mgmt(self,
resource_group_1, resource_group_location_1,
resource_group_2, resource_group_location_2,
server_name_1, server_name_2):
# helper class so that it's clear which servers are in which groups
class ServerInfo(object): # pylint disable=too-few-public-methods
def __init__(self, name, group, location):
self.name = name
self.group = group
self.location = location
from azure.cli.core.commands.client_factory import get_subscription_id
s1 = ServerInfo(server_name_1, resource_group_1, resource_group_location_1)
s2 = ServerInfo(server_name_2, resource_group_2, resource_group_location_2)
failover_group_name = "fgclitest1070"
database_name = "db1"
server2_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Sql/servers/{}".format(
get_subscription_id(self.cli_ctx),
resource_group_2,
server_name_2)
# Create database on primary server
self.cmd('sql db create -g {} --server {} --name {}'
.format(s1.group, s1.name, database_name),
checks=[
JMESPathCheck('resourceGroup', s1.group),
JMESPathCheck('name', database_name)
])
# Create Failover Group
self.cmd('sql failover-group create -n {} -g {} -s {} --partner-resource-group {} --partner-server {} --failover-policy Automatic --grace-period 2'
.format(failover_group_name, s1.group, s1.name, s2.group, s2.name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('resourceGroup', s1.group),
JMESPathCheck('partnerServers[0].id', server2_id),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 0)
])
# List of all failover groups on the primary server
self.cmd('sql failover-group list -g {} -s {}'
.format(s1.group, s1.name),
checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', failover_group_name),
JMESPathCheck('[0].replicationRole', 'Primary')
])
# Get Failover Group on a partner server and check if role is secondary
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 120),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary'),
JMESPathCheck('length(databases)', 0)
])
# Update Failover Group
self.cmd('sql failover-group update -g {} -s {} -n {} --grace-period 3 --add-db {}'
.format(s1.group, s1.name, failover_group_name, database_name),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 1)
])
# Check if properties got propagated to secondary server
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('name', failover_group_name),
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('replicationRole', 'Secondary'),
JMESPathCheck('length(databases)', 1)
])
# Check if database is created on partner side
self.cmd('sql db list -g {} -s {}'
.format(s2.group, s2.name),
checks=[
JMESPathCheck('length(@)', 2)
])
# Failover Failover Group
self.cmd('sql failover-group set-primary -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(30)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
# Fail back to original server
self.cmd('sql failover-group set-primary --allow-data-loss -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# The failover operation is completed when new primary is promoted to primary role
# But there is a async part to make old primary a new secondary
# And we have to wait for this to complete if we are recording the test
if self.in_recording:
time.sleep(30)
# Check the roles of failover groups to confirm failover happened
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Do no-op failover to the same server
self.cmd('sql failover-group set-primary -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# Check the roles of failover groups to confirm failover didn't happen
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s2.group, s2.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Secondary')
])
self.cmd('sql failover-group show -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name),
checks=[
JMESPathCheck('replicationRole', 'Primary')
])
# Remove database from failover group
self.cmd('sql failover-group update -g {} -s {} -n {} --remove-db {}'
.format(s1.group, s1.name, failover_group_name, database_name),
checks=[
JMESPathCheck('readWriteEndpoint.failoverPolicy', 'Automatic'),
JMESPathCheck('readWriteEndpoint.failoverWithDataLossGracePeriodMinutes', 180),
JMESPathCheck('readOnlyEndpoint.failoverPolicy', 'Disabled'),
JMESPathCheck('length(databases)', 0)
])
# Check if database got removed
self.cmd('sql db show -g {} -s {} -n {}'
.format(s2.group, s2.name, database_name),
checks=[
JMESPathCheck('[0].failoverGroupId', 'None')
])
# Drop failover group
self.cmd('sql failover-group delete -g {} -s {} -n {}'
.format(s1.group, s1.name, failover_group_name))
# Check if failover group really got dropped
self.cmd('sql failover-group list -g {} -s {}'
.format(s1.group, s1.name),
checks=[
JMESPathCheck('length(@)', 0)
])
self.cmd('sql failover-group list -g {} -s {}'
.format(s2.group, s2.name),
checks=[
JMESPathCheck('length(@)', 0)
])
| 51.598857
| 316
| 0.566283
|
08cb8cf06f6c9990f32354dde49cad203252d2cf
| 9,926
|
py
|
Python
|
src/sage/tests/arxiv_0812_2725.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/tests/arxiv_0812_2725.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/tests/arxiv_0812_2725.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2020-03-29T17:13:36.000Z
|
2021-05-03T18:11:28.000Z
|
r"""
Sage code for computing k-distant crossing numbers.
This code accompanies the article :arxiv:`0812.2725`. It is being
submitted because of a suggestion from
http://groups.google.com/group/sage-support/msg/3ea7ed2eeab0824a.
Right now, this code only computes k-dcrossings. If you are only
interested in the distribution, this is good enough because the extended
Kasraoui-Zeng involution tells us the distribution of k-dcrossings and
k-dnestings is symmetric. It would be nice, though, to have a function
which actually performed that involution.
AUTHORS:
- Dan Drake (2008-12-15): initial version.
EXAMPLES:
The example given in the paper. Note that in this format, we omit fixed
points since they cannot create any sort of crossing. ::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossing([(1,5), (2,4), (4,9), (6,12), (7,10), (10,11)])
3
"""
#*****************************************************************************
# Copyright (C) 2008 Dan Drake <ddrake@member.ams.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# See http://www.gnu.org/licenses/.
#*****************************************************************************
from six.moves import range
from sage.combinat.set_partition import SetPartitions as SetPartitions
def CompleteMatchings(n):
"""
Return a generator for the complete matchings of the set [1..n].
INPUT:
n -- nonnegative integer
OUTPUT:
A generator for the complete matchings of the set [1..n], or,
what is basically the same thing, complete matchings of the
graph K_n. Each complete matching is represented by a list of
2-element tuples.
EXAMPLES:
There are 3 complete matchings on 4 vertices::
sage: from sage.tests.arxiv_0812_2725 import *
sage: [m for m in CompleteMatchings(4)]
[[(3, 4), (1, 2)], [(2, 4), (1, 3)], [(2, 3), (1, 4)]]
There are no complete matchings on an odd number of vertices; the
number of complete matchings on an even number of vertices is a
double factorial::
sage: from sage.tests.arxiv_0812_2725 import *
sage: [len([m for m in CompleteMatchings(n)]) for n in [0..8]]
[1, 0, 1, 0, 3, 0, 15, 0, 105]
The exact behavior of CompleteMatchings(n) if n is not a nonnegative
integer depends on what [1..n] returns, and also on what range(1,
len([1..n])) is.
"""
for m in matchingsset(list(range(1, n + 1))):
yield m
def matchingsset(L):
"""
Return a generator for complete matchings of the sequence L.
This is not really meant to be called directly, but rather by
CompleteMatchings().
INPUT:
L -- a sequence. Lists, tuples, et cetera; anything that
supports len() and slicing should work.
OUTPUT:
A generator for complete matchings on K_n, where n is the length
of L and vertices are labeled by elements of L. Each matching is
represented by a list of 2-element tuples.
EXAMPLES::
sage: from sage.tests.arxiv_0812_2725 import *
sage: [m for m in matchingsset(('a', 'b', 'c', 'd'))]
[[('c', 'd'), ('a', 'b')], [('b', 'd'), ('a', 'c')], [('b', 'c'), ('a', 'd')]]
There's only one matching of the empty set/list/tuple: the empty
matching. ::
sage: [m for m in matchingsset(())]
[[]]
"""
if not L:
yield []
else:
for k in range(1, len(L)):
for m in matchingsset(L[1: k] + L[k + 1:]):
yield m + [(L[0], L[k])]
def dcrossing(m_):
"""
Return the largest k for which the given matching or set
partition has a k-distant crossing.
INPUT:
m -- a matching or set partition, as a list of 2-element tuples
representing the edges. You'll need to call setp_to_edges() on
the objects returned by SetPartitions() to put them into the
proper format.
OUTPUT:
The largest k for which the object has a k-distant crossing.
Matchings and set partitions with no crossings at all yield -1.
EXAMPLES:
The main example from the paper::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossing(setp_to_edges(Set(map(Set, [[1,5],[2,4,9],[3],[6,12],[7,10,11],[8]]))))
3
A matching example::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossing([(4, 7), (3, 6), (2, 5), (1, 8)])
2
TESTS:
The empty matching and set partition are noncrossing::
sage: dcrossing([])
-1
sage: dcrossing(Set([]))
-1
One edge::
sage: dcrossing([Set((1,2))])
-1
sage: dcrossing(Set([Set((1,2))]))
-1
Set partition with block of size >= 3 is always at least
0-dcrossing::
sage: dcrossing(setp_to_edges(Set([Set((1,2,3))])))
0
"""
d = -1
m = list(m_)
while len(m):
e1_ = m.pop()
for e2_ in m:
e1, e2 = sorted(e1_), sorted(e2_)
if (e1[0] < e2[0] and e2[0] <= e1[1] and e1[1] < e2[1] and
e1[1] - e2[0] > d):
d = e1[1] - e2[0]
if (e2[0] < e1[0] and e1[0] <= e2[1] and e2[1] < e1[1] and
e2[1] - e1[0] > d):
d = e2[1] - e1[0]
return d
def setp_to_edges(p):
"""
Transform a set partition into a list of edges.
INPUT:
p -- a Sage set partition.
OUTPUT:
A list of non-loop edges of the set partition. As this code just
works with crossings, we can ignore the loops.
EXAMPLES:
The main example from the paper::
sage: from sage.tests.arxiv_0812_2725 import *
sage: sorted(setp_to_edges(Set(map(Set, [[1,5],[2,4,9],[3],[6,12],[7,10,11],[8]]))))
[[1, 5], [2, 4], [4, 9], [6, 12], [7, 10], [10, 11]]
"""
q = [sorted(list(b)) for b in p]
ans = []
for b in q:
for n in range(len(b) - 1):
ans.append(b[n: n + 2])
return ans
def dcrossvec_setp(n):
"""
Return a list with the distribution of k-dcrossings on set partitions of [1..n].
INPUT:
n -- a nonnegative integer.
OUTPUT:
A list whose k'th entry is the number of set partitions p for
which dcrossing(p) = k. For example, let L = dcrossvec_setp(3).
We have L = [1, 0, 4]. L[0] is 1 because there's 1 partition of
[1..3] that has 0-dcrossing: [(1, 2, 3)].
One tricky bit is that noncrossing matchings get put at the end,
because L[-1] is the last element of the list. Above, we have
L[-1] = 4 because the other four set partitions are all
d-noncrossing. Because of this, you should not think of the last
element of the list as having index n-1, but rather -1.
EXAMPLES::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossvec_setp(3)
[1, 0, 4]
sage: dcrossvec_setp(4)
[5, 1, 0, 9]
The one set partition of 1 element is noncrossing, so the last
element of the list is 1::
sage: dcrossvec_setp(1)
[1]
"""
vec = [0] * n
for p in SetPartitions(n):
vec[dcrossing(setp_to_edges(p))] += 1
return vec
def dcrossvec_cm(n):
"""
Return a list with the distribution of k-dcrossings on complete matchings on n vertices.
INPUT:
n -- a nonnegative integer.
OUTPUT:
A list whose k'th entry is the number of complete matchings m
for which dcrossing(m) = k. For example, let L =
dcrossvec_cm(4). We have L = [0, 1, 0, 2]. L[1] is 1 because
there's one matching on 4 vertices that is 1-dcrossing: [(2, 4),
(1, 3)]. L[0] is zero because dcrossing() returns the *largest*
k for which the matching has a dcrossing, and 0-dcrossing is
equivalent to 1-dcrossing for complete matchings.
One tricky bit is that noncrossing matchings get put at the end,
because L[-1] is the last element of the list. Because of this, you
should not think of the last element of the list as having index
n-1, but rather -1.
If n is negative, you get silly results. Don't use them in your
next paper. :)
EXAMPLES:
The single complete matching on 2 vertices has no crossings, so the
only nonzero entry of the list (the last entry) is 1::
sage: from sage.tests.arxiv_0812_2725 import *
sage: dcrossvec_cm(2)
[0, 1]
Similarly, the empty matching has no crossings::
sage: dcrossvec_cm(0)
[1]
For odd n, there are no complete matchings, so the list has all
zeros::
sage: dcrossvec_cm(5)
[0, 0, 0, 0, 0]
sage: dcrossvec_cm(4)
[0, 1, 0, 2]
"""
vec = [0] * max(n, 1)
for m in CompleteMatchings(n):
vec[dcrossing(m)] += 1
return vec
def tablecolumn(n, k):
"""
Return column n of Table 1 or 2 from the paper :arxiv:`0812.2725`.
INPUT:
n -- positive integer.
k -- integer for which table you want: Table 1 is complete
matchings, Table 2 is set partitions.
OUTPUT:
The n'th column of the table as a list. This is basically just the
partial sums of dcrossvec_{cm,setp}(n).
table2column(1, 2) incorrectly returns [], instead of [1], but you
probably don't need this function to work through n = 1.
EXAMPLES:
Complete matchings::
sage: from sage.tests.arxiv_0812_2725 import *
sage: tablecolumn(2, 1)
[1]
sage: tablecolumn(6, 1)
[5, 5, 11, 14, 15]
Set partitions::
sage: tablecolumn(5, 2)
[21, 42, 51, 52]
sage: tablecolumn(2, 2)
[2]
"""
if k == 1:
v = dcrossvec_cm(n)
else:
v = dcrossvec_setp(n)
i = v[-1]
return [i + sum(v[:k]) for k in range(len(v) - 1)]
| 27.803922
| 95
| 0.597925
|
cb95708b21e8a539d8d229f7db0e554aa99bbe3d
| 277
|
py
|
Python
|
giico/quality_control_and_material_testing/doctype/water_absorption_test_for_concrete_cube/water_absorption_test_for_concrete_cube.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/quality_control_and_material_testing/doctype/water_absorption_test_for_concrete_cube/water_absorption_test_for_concrete_cube.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/quality_control_and_material_testing/doctype/water_absorption_test_for_concrete_cube/water_absorption_test_for_concrete_cube.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Waterabsorptiontestforconcretecube(Document):
pass
| 25.181818
| 51
| 0.790614
|
23d09c166eab13c4ebf076f881703686dc6bfa5f
| 525
|
py
|
Python
|
algs/0028_Implement_strStr.py
|
torpedoallen/leetcodes
|
1c5042aa82c4d959383562bd77fd8829c002375d
|
[
"MIT"
] | null | null | null |
algs/0028_Implement_strStr.py
|
torpedoallen/leetcodes
|
1c5042aa82c4d959383562bd77fd8829c002375d
|
[
"MIT"
] | null | null | null |
algs/0028_Implement_strStr.py
|
torpedoallen/leetcodes
|
1c5042aa82c4d959383562bd77fd8829c002375d
|
[
"MIT"
] | null | null | null |
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
m = len(haystack)
n = len(needle)
i, j = 0, 0
if n == 0:
return 0
while i < m:
if haystack[i] == needle[j]:
j += 1
if j == n:
return i - j + 1
else:
i = i - j
j = 0
i += 1
return -1
| 21.875
| 40
| 0.337143
|
2b6b383d6ee0db4e68cb33b81d02c2fa3766087f
| 568
|
py
|
Python
|
app/regTest/regTestMaster.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/regTest/regTestMaster.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | null | null | null |
app/regTest/regTestMaster.py
|
fkwai/geolearn
|
30cb4353d22af5020a48100d07ab04f465a315b0
|
[
"MIT"
] | 2
|
2021-04-04T02:45:59.000Z
|
2022-03-19T09:41:39.000Z
|
from hydroDL import pathSMAP, master
import os
optData = master.updateOpt(
master.default.optDataCsv,
path=pathSMAP['DB_L3_NA'],
subset='CONUSv4f1',
dateRange=[20150401, 20160401])
optModel = master.default.optLstm
optLoss = master.default.optLoss
optTrain = master.default.optTrainSMAP
out = os.path.join(pathSMAP['Out_L3_Global'], 'regTest')
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
# master.train(masterDict, overwrite=True)
pred = master.test(
out, tRange=[20160401, 20170401], subset='CONUSv4f1', epoch=500)
| 31.555556
| 73
| 0.753521
|
dbefcddbdd01a5456f57137ee460cc0520ac1ab7
| 719
|
py
|
Python
|
readthedocs/core/management/commands/set_metadata.py
|
kennethlarsen/readthedocs.org
|
735d630d83f79ae24772d10e66fd35b8f5675a30
|
[
"MIT"
] | 2
|
2018-01-14T14:04:00.000Z
|
2021-02-07T19:25:45.000Z
|
readthedocs/core/management/commands/set_metadata.py
|
Alig1493/readthedocs.org
|
c37b00995c1bbc5ee51d3552ef176546373bb912
|
[
"MIT"
] | 4
|
2021-03-31T20:17:21.000Z
|
2021-12-13T20:49:19.000Z
|
readthedocs/core/management/commands/set_metadata.py
|
Alig1493/readthedocs.org
|
c37b00995c1bbc5ee51d3552ef176546373bb912
|
[
"MIT"
] | 6
|
2019-02-13T16:08:41.000Z
|
2020-03-12T14:17:14.000Z
|
"""Generate metadata for all projects"""
from __future__ import absolute_import
import logging
from django.core.management.base import BaseCommand
from readthedocs.projects import tasks
from readthedocs.projects.models import Project
from readthedocs.core.utils import broadcast
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = __doc__
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s", p)
try:
broadcast(type='app', task=tasks.update_static_metadata, args=[p.pk])
except Exception:
log.exception('Build failed for %s', p)
| 26.62963
| 85
| 0.684284
|
bd247e2379b92bff04067e6132c256328e63c183
| 9,085
|
py
|
Python
|
infoblox_netmri/client.py
|
rexyim/infoblox-netmri
|
193eabce5db11113f5e991b6cdee1ddc005827ed
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/client.py
|
rexyim/infoblox-netmri
|
193eabce5db11113f5e991b6cdee1ddc005827ed
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/client.py
|
rexyim/infoblox-netmri
|
193eabce5db11113f5e991b6cdee1ddc005827ed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import re
from os.path import isfile
import requests
from requests.exceptions import HTTPError
logger = logging.getLogger("infoblox_netmri")
class InfobloxNetMRI(object):
def __init__(self, host, username, password, api_version="auto",
use_ssl=True, ssl_verify=False, http_pool_connections=5,
http_pool_maxsize=10, max_retries=5):
# Process ssl parameters
if use_ssl:
self.protocol = "https"
if isinstance(ssl_verify, bool):
self.ssl_verify = ssl_verify
else:
opt = str(ssl_verify).lower()
if opt in ['yes', 'on', 'true']:
self.ssl_verify = True
elif opt in ['no', 'off', 'false']:
self.ssl_verify = False
elif isfile(ssl_verify):
self.ssl_verify = ssl_verify
else:
raise ValueError("ssl_verify is not a valid boolean value,"
"nor a valid path to a CA bundle file")
else:
self.protocol = "http"
self.ssl_verify = False
# Process host
if re.match(r"^[\w.-]+$", host):
self.host = host
else:
raise ValueError("Hostname is not a valid hostname")
# Authentication parameters
self.username = username
self.password = password
self._is_authenticated = False
# HTTP session settings
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
max_retries=max_retries,
pool_connections=http_pool_connections,
pool_maxsize=http_pool_maxsize)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.verify = self.ssl_verify
# API version
if re.match(r'^(?:\d+\.)?(?:\d+\.)?(?:\*|\d+)$', api_version):
self.api_version = api_version
elif api_version.lower() == "auto":
self.api_version = self._get_api_version()
else:
raise ValueError("Incorrect API version")
logger.debug("Using API version %s" % self.api_version)
def _make_request(self, url, method="get", data=None, extra_headers=None):
"""Prepares the request, checks for authentication and retries in case of issues
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict
"""
attempts = 0
while attempts < 1:
# Authenticate first if not authenticated already
if not self._is_authenticated:
self._authenticate()
# Make the request and check for authentication errors
# This allows us to catch session timeouts for long standing connections
try:
return self._send_request(url, method, data, extra_headers)
except HTTPError as e:
if e.response.status_code == 403:
logger.info("Authenticated session against NetMRI timed out. Retrying.")
self._is_authenticated = False
attempts += 1
else:
# re-raise other HTTP errors
raise
def _send_request(self, url, method="get", data=None, extra_headers=None):
"""Performs a given request and returns a json object
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict
"""
headers = {'Content-type': 'application/json'}
if isinstance(extra_headers, dict):
headers.update(extra_headers)
if not data or "password" not in data:
logger.debug("Sending {method} request to {url} with data {data}".format(
method=method.upper(), url=url, data=data)
)
r = self.session.request(method, url, headers=headers, data=data)
r.raise_for_status()
return r.json()
def _get_api_version(self):
"""Fetches the most recent API version
Returns:
str
"""
url = "{base_url}/api/server_info".format(base_url=self._base_url())
server_info = self._make_request(url=url, method="get")
return server_info["latest_api_version"]
def _authenticate(self):
""" Perform an authentication against NetMRI"""
url = "{base_url}/api/authenticate".format(base_url=self._base_url())
data = json.dumps({'username': self.username, "password": self.password})
# Bypass authentication check in make_request by using _send_request
logger.debug("Authenticating against NetMRI")
self._send_request(url, method="post", data=data)
self._is_authenticated = True
def _controller_name(self, objtype):
"""Determines the controller name for the object's type
Args:
objtype (str): The object type
Returns:
A string with the controller name
"""
# would be better to use inflect.pluralize here, but would add a dependency
if objtype.endswith('y'):
return objtype[:-1] + 'ies'
if objtype[-1] in 'sx' or objtype[-2:] in ['sh', 'ch']:
return objtype + 'es'
if objtype.endswith('an'):
return objtype[:-2] + 'en'
return objtype + 's'
def _base_url(self):
"""Generate the base URL for the connection with NetMRI
Returns:
A string containing the base URL
"""
return "{proto}://{host}".format(
proto=self.protocol,
host=self.host
)
def _object_url(self, objtype, objid):
"""Generate the URL for the specified object
Args:
objtype (str): The object's type
objid (int): The objects ID
Returns:
A string containing the URL of the object
"""
return "{base_url}/api/{api_version}/{controller}/{obj_id}".format(
base_url=self._base_url(),
api_version=self.api_version,
controller=self._controller_name(objtype),
obj_id=objid
)
def _method_url(self, method_name):
"""Generate the URL for the requested method
Args:
method_name (str): Name of the method
Returns:
A string containing the URL of the method
"""
return "{base_url}/api/{api}/{method}".format(
base_url=self._base_url(),
api=self.api_version,
method=method_name
)
def api_request(self, method_name, params):
"""Execute an arbitrary method.
Args:
method_name (str): include the controller name: 'devices/search'
params (dict): the method parameters
Returns:
A dict with the response
Raises:
requests.exceptions.HTTPError
"""
url = self._method_url(method_name)
data = json.dumps(params)
return self._make_request(url=url, method="post", data=data)
def show(self, objtype, objid):
"""Query for a specific resource by ID
Args:
objtype (str): object type, e.g. 'device', 'interface'
objid (int): object ID (DeviceID, etc.)
Returns:
A dict with that object
Raises:
requests.exceptions.HTTPError
"""
url = self._object_url(objtype, int(objid))
return self._make_request(url, method="get")
def delete(self, objtype, objid):
"""Destroy a specific resource by ID
Args:
objtype (str): object type, e.g. 'script'
objid (int): object ID
Returns:
A dict with the response
Raises:
requests.exceptions.HTTPError
"""
url = self._object_url(objtype, int(objid))
return self._make_request(url, method="delete")
| 34.942308
| 92
| 0.581948
|
7a4329648f63e6390349705354abc6f220b89103
| 5,260
|
py
|
Python
|
quark/plugin_modules/ip_addresses.py
|
sumanthns/quark
|
4f0d00608827271439ee0c2dfc22729dbbb17763
|
[
"Apache-2.0"
] | null | null | null |
quark/plugin_modules/ip_addresses.py
|
sumanthns/quark
|
4f0d00608827271439ee0c2dfc22729dbbb17763
|
[
"Apache-2.0"
] | null | null | null |
quark/plugin_modules/ip_addresses.py
|
sumanthns/quark
|
4f0d00608827271439ee0c2dfc22729dbbb17763
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from oslo.config import cfg
import webob
from quark.db import api as db_api
from quark import exceptions as quark_exceptions
from quark import plugin_views as v
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ipam_driver = (importutils.import_class(CONF.QUARK.ipam_driver))()
def get_ip_addresses(context, **filters):
LOG.info("get_ip_addresses for tenant %s" % context.tenant_id)
filters["_deallocated"] = False
addrs = db_api.ip_address_find(context, scope=db_api.ALL, **filters)
return [v._make_ip_dict(ip) for ip in addrs]
def get_ip_address(context, id):
LOG.info("get_ip_address %s for tenant %s" %
(id, context.tenant_id))
addr = db_api.ip_address_find(context, id=id, scope=db_api.ONE)
if not addr:
raise quark_exceptions.IpAddressNotFound(addr_id=id)
return v._make_ip_dict(addr)
def create_ip_address(context, ip_address):
LOG.info("create_ip_address for tenant %s" % context.tenant_id)
port = None
ip_dict = ip_address["ip_address"]
port_ids = ip_dict.get('port_ids')
network_id = ip_dict.get('network_id')
device_ids = ip_dict.get('device_ids')
ip_version = ip_dict.get('version')
ip_address = ip_dict.get('ip_address')
ports = []
if device_ids and not network_id:
raise exceptions.BadRequest(
resource="ip_addresses",
msg="network_id is required if device_ids are supplied.")
with context.session.begin():
if network_id and device_ids:
for device_id in device_ids:
port = db_api.port_find(
context, network_id=network_id, device_id=device_id,
tenant_id=context.tenant_id, scope=db_api.ONE)
ports.append(port)
elif port_ids:
for port_id in port_ids:
port = db_api.port_find(context, id=port_id,
tenant_id=context.tenant_id,
scope=db_api.ONE)
ports.append(port)
if not ports:
raise exceptions.PortNotFound(port_id=port_ids,
net_id=network_id)
address = ipam_driver.allocate_ip_address(
context,
port['network_id'],
port['id'],
CONF.QUARK.ipam_reuse_after,
ip_version,
ip_addresses=[ip_address])
for port in ports:
port["ip_addresses"].append(address)
return v._make_ip_dict(address)
def _get_deallocated_override():
"""This function exists to mock and for future requirements if needed."""
return '2000-01-01 00:00:00'
def update_ip_address(context, id, ip_address):
LOG.info("update_ip_address %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
address = db_api.ip_address_find(
context, id=id, tenant_id=context.tenant_id, scope=db_api.ONE)
if not address:
raise exceptions.NotFound(
message="No IP address found with id=%s" % id)
reset = ip_address['ip_address'].get('reset_allocation_time',
False)
if reset and address['deallocated'] == 1:
if context.is_admin:
LOG.info("IP's deallocated time being manually reset")
address['deallocated_at'] = _get_deallocated_override()
else:
msg = "Modification of reset_allocation_time requires admin"
raise webob.exc.HTTPForbidden(detail=msg)
old_ports = address['ports']
port_ids = ip_address['ip_address'].get('port_ids')
if port_ids is None:
return v._make_ip_dict(address)
for port in old_ports:
port['ip_addresses'].remove(address)
if port_ids:
ports = db_api.port_find(
context, tenant_id=context.tenant_id, id=port_ids,
scope=db_api.ALL)
# NOTE: could be considered inefficient because we're converting
# to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
raise exceptions.NotFound(
message="No ports not found with ids=%s" % port_ids)
for port in ports:
port['ip_addresses'].extend([address])
else:
address["deallocated"] = 1
return v._make_ip_dict(address)
| 35.782313
| 78
| 0.629278
|
546cdb12f3d0c9420cf10d3d2377f9daac941819
| 259
|
py
|
Python
|
college/college/doctype/achievements/achievements.py
|
ashithamudraje/college
|
fcfb5fa47c8815d659821390ea9c85d598d34c19
|
[
"MIT"
] | 1
|
2019-09-08T19:27:27.000Z
|
2019-09-08T19:27:27.000Z
|
college/college/doctype/achievements/achievements.py
|
ashithamudraje/college
|
fcfb5fa47c8815d659821390ea9c85d598d34c19
|
[
"MIT"
] | null | null | null |
college/college/doctype/achievements/achievements.py
|
ashithamudraje/college
|
fcfb5fa47c8815d659821390ea9c85d598d34c19
|
[
"MIT"
] | 4
|
2019-09-23T16:42:12.000Z
|
2020-05-01T06:56:57.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, mvit ise and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Achievements(Document):
pass
| 23.545455
| 49
| 0.772201
|
16e1c91e5ea464a94a33aed016e7e63e90194e53
| 1,632
|
py
|
Python
|
fleet-server/init-fleet.py
|
cekay79/otel-with-java
|
2de11eac1192d6f44badd93e227ecd7641331db8
|
[
"Apache-2.0"
] | null | null | null |
fleet-server/init-fleet.py
|
cekay79/otel-with-java
|
2de11eac1192d6f44badd93e227ecd7641331db8
|
[
"Apache-2.0"
] | null | null | null |
fleet-server/init-fleet.py
|
cekay79/otel-with-java
|
2de11eac1192d6f44badd93e227ecd7641331db8
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from requests.auth import HTTPBasicAuth
es_url = os.getenv("ELASTICSEARCH_HOST")
kb_url = os.getenv("KIBANA_HOST")
es_user = os.getenv("ELASTICSEARCH_USERNAME")
es_pass = os.getenv("ELASTICSEARCH_PASSWORD")
basic_auth = HTTPBasicAuth(es_user, es_pass)
default_headers = {
"Content-Type": "application/json",
"kbn-xsrf": "true"
}
wait = True
while wait:
try:
res = requests.get(
url=es_url,
auth=basic_auth,
headers=default_headers
)
if res.status_code == 200:
wait = False
except:
print("Waiting for Elasticsearch")
token_url = "{}/_security/oauth2/token".format(es_url)
data_json = {"grant_type": "client_credentials"}
res = requests.post(
url=token_url,
auth=basic_auth,
json=data_json,
headers=default_headers
)
es_access_token = res.json()['access_token']
service_toke_url = "{}/_security/service/elastic/fleet-server/credential/token".format(es_url)
data_json = {"grant_type": "client_credentials"}
default_headers['Authorization'] = "Bearer {}".format(es_access_token)
res = requests.post(
url=service_toke_url,
headers=default_headers
)
fleet_service_token = res.json()['token']['value']
print("ES_ACCESS_TOKEN={}".format(es_access_token))
print("FLEET_SERVICE_TOKEN={}".format(fleet_service_token))
with open("/out/environment", "w") as f:
f.write("export ES_ACCESS_TOKEN={}\n".format(es_access_token))
f.write("export FLEET_SERVER_SERVICE_TOKEN={}\n".format(fleet_service_token))
f.write("export KIBANA_FLEET_SERVICE_TOKEN={}\n".format(fleet_service_token))
| 29.672727
| 94
| 0.710784
|
b2842cd5d8466b5c8ac9d5a60b454d7f89044c12
| 3,659
|
py
|
Python
|
smoothauth/smoothauth/settings/base.py
|
kellerjustin/smoothauth
|
0ba343e999851e43416f203c0489e7b72e3dc30a
|
[
"MIT"
] | null | null | null |
smoothauth/smoothauth/settings/base.py
|
kellerjustin/smoothauth
|
0ba343e999851e43416f203c0489e7b72e3dc30a
|
[
"MIT"
] | null | null | null |
smoothauth/smoothauth/settings/base.py
|
kellerjustin/smoothauth
|
0ba343e999851e43416f203c0489e7b72e3dc30a
|
[
"MIT"
] | null | null | null |
"""
Django settings for smoothauth project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from django.core.exceptions import ImproperlyConfigured
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
def get_env_variable(var_name):
"""Get the env variable or return exception."""
try:
return os.environ[var_name]
except KeyError:
error_msg = 'Set the {} environment variable'.format(var_name)
raise ImproperlyConfigured(error_msg)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable('SMOOTHAUTH_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api.apps.ApiConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smoothauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smoothauth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.'
'password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| 25.061644
| 79
| 0.68762
|
3fa7a31c4d6a485804c49317e7c90ba2097a41f4
| 3,738
|
py
|
Python
|
rl_visualization/app.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
rl_visualization/app.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
rl_visualization/app.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
from flask import Flask, send_file, make_response, render_template, request
import os
from datetime import datetime
from threading import Lock
mutex = Lock()
class Plot:
def __init__(self, title, url):
self.title = title
self.url = url
def start_app(env):
app = Flask(__name__)
BASE_URL = "http://localhost:5000/"
plots_url = {
'Q-table': 'plots/q-table',
'Visit Count': 'plots/visitcount',
'Rewards': 'plots/rewards',
'Episode Rewards': 'plots/episoderewards',
#'Epsilon': 'plots/epsilon',
'Features Distributions': 'plots/featuresdistribution',
'Actions Distributions': 'plots/actionsdistribution'
}
for plot in env.user_plots.keys():
plots_url[plot] = 'userplots/' + plot.lowercase
@app.route('/')
def index():
delay = request.args.get('delay')
if delay is not None:
env.delay = int(delay)
for plot in env.user_plots.keys():
plots_url[plot] = 'userplots/' + plot
plots = env.get_available_plots()
return render_template('index.html', base_url=BASE_URL, plots=[Plot(p, plots_url[p]) for p in plots], refresh_time=env.refresh_time)
@app.route('/userplots/<userplot>', methods=['GET'])
def user_plot(userplot):
mutex.acquire()
bytes_obj = env.get_userplot(userplot)
mutex.release()
return send_file(bytes_obj, attachment_filename=userplot+'.png', mimetype='image/png')
@app.route('/plots/visitcount', methods=['GET'])
def visitcount():
mutex.acquire()
bytes_obj = env.get_visitcount()
mutex.release()
return send_file(bytes_obj, attachment_filename='visitcount.png', mimetype='image/png')
@app.route('/plots/q-table', methods=['GET'])
def q_table():
mutex.acquire()
bytes_obj = env.get_qtable_png()
mutex.release()
return send_file(bytes_obj, attachment_filename='qtable.png', mimetype='image/png')
@app.route('/plots/rewards', methods=['GET'])
def rewards():
mutex.acquire()
bytes_obj = env.get_rewards()
mutex.release()
return send_file(bytes_obj, attachment_filename='rewards.png', mimetype='image/png')
@app.route('/plots/episoderewards', methods=['GET'])
def episoderewards():
mutex.acquire()
bytes_obj = env.get_episoderewards()
mutex.release()
return send_file(bytes_obj, attachment_filename='episoderewards.png', mimetype='image/png')
""" @app.route('/plots/epsilon', methods=['GET'])
def epsilon():
mutex.acquire()
bytes_obj = env.get_epsilon()
mutex.release()
return send_file(bytes_obj, attachment_filename='epsilon.png', mimetype='image/png')"""
@app.route('/plots/featuresdistribution', methods=['GET'])
def featuresdistribution():
mutex.acquire()
bytes_obj = env.get_featuresdistribution()
mutex.release()
return send_file(bytes_obj, attachment_filename='featuresdistribution.png', mimetype='image/png')
@app.route('/plots/actionsdistribution', methods=['GET'])
def actionsdistribution():
mutex.acquire()
bytes_obj = env.get_actionsdistribution()
mutex.release()
return send_file(bytes_obj, attachment_filename='actionsdistribution.png', mimetype='image/png')
"""for plot in env.user_plots:
app.add_url_rule(
rule='/plots/'+plot.lowercase(),
endpoint='plots/user_func',
) """
app.jinja_env.auto_reload = True
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=False)
| 31.948718
| 140
| 0.635902
|
0d89c3b50ec89219d4182093f496e5b181bb25e5
| 3,502
|
py
|
Python
|
bot/modules/message_filter.py
|
monkeydg/POG-bot
|
97ced2c4b8f8709887b3d0828484e1dd1128dc1f
|
[
"MIT"
] | 2
|
2020-09-24T14:56:50.000Z
|
2021-04-15T16:12:36.000Z
|
bot/modules/message_filter.py
|
monkeydg/POG-bot
|
97ced2c4b8f8709887b3d0828484e1dd1128dc1f
|
[
"MIT"
] | 12
|
2021-04-28T15:33:34.000Z
|
2022-03-29T10:10:05.000Z
|
bot/modules/message_filter.py
|
monkeydg/POG-bot
|
97ced2c4b8f8709887b3d0828484e1dd1128dc1f
|
[
"MIT"
] | 6
|
2020-08-01T13:38:40.000Z
|
2020-08-14T20:35:17.000Z
|
"""Tiny module used as a slight spam protector
"""
from display import AllStrings as disp, ContextWrapper
from discord import DMChannel, NotFound
import modules.config as cfg
from modules.loader import is_all_locked
from modules.roles import is_admin
from asyncio import sleep
import modules.spam_checker as spam_checker
from modules.dm_handler import on_dm
class FakeMember:
def __init__(self, id):
self.id = id
self.name = "Unknown"
@property
def mention(self):
return f'<@{self.id}>'
async def on_message(client, message):
# if bot, do nothing
if message.author == client.user:
return
# if dm, send in staff
if isinstance(message.channel, DMChannel):
await on_dm(message)
return
# If message not in the bot's area of action
if message.channel.id not in cfg.channels_list:
return
if len(message.content) == 0:
return
if message.content == cfg.emojis['info']:
message.content = "=info"
if message.content[0] != cfg.general["command_prefix"]:
return
# If bot is locked
if is_all_locked():
if not is_admin(message.author):
return
# Admins can still use bot when locked
# Save actual author
actual_author = message.author
# Check if too many requests from this user:
if await spam_checker.is_spam(message.author, message.channel):
return
try:
# Make the message lower-case:
if not message.content.lower().startswith("=rename"):
message.content = message.content.lower()
message.content = message.content.replace(",", " ").replace("/", " ").replace(";", " ")
# Split on whitespaces
args = message.content.split()
new_args = list()
for arg in args:
if '@' in arg:
continue
try:
arg_int = int(arg)
except ValueError:
pass
else:
if arg_int >= 21154535154122752: # minimum number for discord id
member = message.channel.guild.get_member(arg_int)
if member:
message.mentions.append(member)
continue
try:
member = await message.channel.guild.fetch_member(arg_int)
except NotFound:
message.mentions.append(FakeMember(arg_int))
continue
if member:
message.mentions.append(member)
continue
new_args.append(arg)
message.content = " ".join(new_args)
# Check for =as command
if is_admin(message.author) and message.content[0:3] == "=as":
try:
message.author = message.mentions[0]
del message.mentions[0]
i = message.content[1:].index('=')
message.content = message.content[i+1:]
except (ValueError, IndexError):
ctx = ContextWrapper.wrap(message.channel, author=actual_author)
await disp.WRONG_USAGE.send(ctx, "as")
spam_checker.unlock(actual_author.id)
return
await client.process_commands(message) # if not spam, processes
# Call finished, we can release user
await sleep(0.5)
finally:
spam_checker.unlock(actual_author.id)
| 29.677966
| 95
| 0.573101
|
b6e978b75b76a8aaefe3d4a7b3caaf2ccdb70186
| 389
|
py
|
Python
|
apps/reporting/migrations/0007_alter_pentestreport_options.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 3
|
2021-12-22T07:02:24.000Z
|
2022-01-27T20:19:11.000Z
|
apps/reporting/migrations/0007_alter_pentestreport_options.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 44
|
2021-12-14T07:24:29.000Z
|
2022-03-23T07:01:16.000Z
|
apps/reporting/migrations/0007_alter_pentestreport_options.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 1
|
2022-01-21T16:29:56.000Z
|
2022-01-21T16:29:56.000Z
|
# Generated by Django 4.0.3 on 2022-04-11 04:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reporting', '0006_alter_reportsection_unique_together_and_more'),
]
operations = [
migrations.AlterModelOptions(
name='pentestreport',
options={'ordering': ['-date_created']},
),
]
| 21.611111
| 75
| 0.632391
|
cb98f04c0131b0dfd861a3e1cce5b5e31bf5b8da
| 243
|
py
|
Python
|
example.py
|
JLyons1985/SmartMirrorServer
|
417540280dee57b7e39004e6891f13835aa15ced
|
[
"MIT"
] | null | null | null |
example.py
|
JLyons1985/SmartMirrorServer
|
417540280dee57b7e39004e6891f13835aa15ced
|
[
"MIT"
] | null | null | null |
example.py
|
JLyons1985/SmartMirrorServer
|
417540280dee57b7e39004e6891f13835aa15ced
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Just importing Forsimatic class
from forismatic import Forismatic
# Initializing manager
f = Forismatic()
# Getting Quote object & printing quote and author
q = f.get_quote()
print(u'%s\t%s' % (q.quote, q.author))
| 22.090909
| 50
| 0.703704
|
75dd9ca179c269e38b861c3eece4cc7f9a2e059f
| 2,208
|
py
|
Python
|
mkdocs_combine/page.py
|
kellpossible/mkdocs-combine
|
b9db94978ac960aa160abdc268906a65f8625f5e
|
[
"Apache-2.0"
] | null | null | null |
mkdocs_combine/page.py
|
kellpossible/mkdocs-combine
|
b9db94978ac960aa160abdc268906a65f8625f5e
|
[
"Apache-2.0"
] | 3
|
2018-04-19T23:41:12.000Z
|
2018-04-25T05:32:57.000Z
|
mkdocs_combine/page.py
|
kellpossible/mkdocs-combine
|
b9db94978ac960aa160abdc268906a65f8625f5e
|
[
"Apache-2.0"
] | null | null | null |
from mkdocs_combine.slugify import slugify
class Page(object):
def __init__(self, title, parent=None, file_path=None, is_section=False):
self.title = title
self.parent = parent
self.file_path = file_path
self.is_section = is_section
self.heading_index = {}
self.headings = []
def get_slug(self):
slugs = []
current_page = self
while(current_page is not None):
slugs.insert(0, slugify(current_page.get_title()))
current_page = current_page.get_parent()
if len(slugs) == 0:
return None
slug = "--".join(slugs)
return slug
def get_is_section():
return self.is_section
def get_title(self):
return self.title
def get_parent(self):
return self.parent
def get_file_path(self):
return self.file_path
def add_heading(self, heading):
if len(self.headings) == 0:
# title heading
self.heading_index[self.get_slug()] = heading
else:
while heading.get_unique_title_slug() in self.heading_index:
title_id = heading.get_unique_title_id()
heading.set_unique_title_id(title_id + 1)
self.headings.append(heading)
self.heading_index[heading.get_unique_title_slug()] = heading
def get_heading(self, heading_slug):
if heading_slug in self.heading_index:
return self.heading_index[heading_slug]
if len(self.headings) > 0:
# title heading
return self.headings[0]
return None
def get_headings(self):
return self.headings
def get_heading_index(self):
return self.heading_index
def is_page_heading(self, heading):
return self.headings[0] == heading
def get_level(self):
level = 0
current_page = self
while(current_page is not None):
level += 1
current_page = current_page.get_parent()
return level
def __repr__(self):
return "Page(title: {}, file_path: {})".format(self.get_title(), self.get_file_path())
| 27.259259
| 94
| 0.595562
|
e5008cefdfa2ac8cd4c08e26f69df0de4f813ec5
| 5,577
|
py
|
Python
|
scripts/pipeline/predict.py
|
seanliu96/WRMCQA
|
8bb6cc79262cd82a490c8d3bad6bb7252e9d4f09
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pipeline/predict.py
|
seanliu96/WRMCQA
|
8bb6cc79262cd82a490c8d3bad6bb7252e9d4f09
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pipeline/predict.py
|
seanliu96/WRMCQA
|
8bb6cc79262cd82a490c8d3bad6bb7252e9d4f09
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2018-present, HKUST-KnowComp.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Run predictions using the full WRMCQA retriever-reader pipeline."""
import torch
import os
import time
try:
import ujson as json
except ImportError:
import json
import argparse
import logging
from wrmcqa import pipeline
from wrmcqa.retriever import utils
from multiprocessing import cpu_count
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str)
parser.add_argument('--out-dir', type=str, default='/tmp',
help=("Directory to write prediction file to "
"(<dataset>-<model>-pipeline.preds)"))
parser.add_argument('--reader-model', type=str, default=None,
help="Path to trained Document Reader model")
parser.add_argument('--retriever-model', type=str, default=None,
help="Path to Document Retriever model (tfidf)")
parser.add_argument('--doc-db', type=str, default=None,
help='Path to Document DB')
parser.add_argument('--embedding-file', type=str, default=None,
help=("Expand dictionary to use all pretrained "
"embeddings in this file"))
parser.add_argument('--char-embedding-file', type=str, default=None,
help=("Expand char dictionary to use all pretrained "
"embeddings in this file"))
parser.add_argument('--candidate-file', type=str, default=None,
help=("List of candidates to restrict predictions to, "
"one candidate per line"))
parser.add_argument('--n-docs', type=int, default=5,
help="Number of docs to retrieve per query")
parser.add_argument('--top-n', type=int, default=1,
help="Number of predictions to make per query")
parser.add_argument('--tokenizer', type=str, default=None,
help=("String option specifying tokenizer type to use "
"(e.g. 'spacy')"))
parser.add_argument('--no-cuda', action='store_true',
help="Use CPU only")
parser.add_argument('--gpu', type=int, default=-1,
help="Specify GPU device id to use")
parser.add_argument('--parallel', action='store_true',
help='Use data parallel (split across gpus)')
parser.add_argument('--use-ala', action='store_true',
help='Use Answer Ranking Algorithm')
parser.add_argument('--num-workers', type=int, default=int(cpu_count()/2),
help='Number of CPU processes (for tokenizing, etc)')
parser.add_argument('--batch-size', type=int, default=32,
help='Document paragraph batching size')
parser.add_argument('--predict-batch-size', type=int, default=32,
help='Question batching size')
args = parser.parse_args()
t0 = time.time()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info('CUDA enabled (GPU %d)' % args.gpu)
else:
logger.info('Running on CPU only.')
if args.candidate_file:
logger.info('Loading candidates from %s' % args.candidate_file)
candidates = set()
with open(args.candidate_file) as f:
for line in f:
line = utils.normalize(line.strip()).lower()
candidates.add(line)
logger.info('Loaded %d candidates.' % len(candidates))
else:
candidates = None
logger.info('Initializing pipeline...')
WRMCQA = pipeline.WRMCQA(
reader_model=args.reader_model,
embedding_file=args.embedding_file,
char_embedding_file=args.char_embedding_file,
tokenizer=args.tokenizer,
fixed_candidates=candidates,
batch_size=args.batch_size,
cuda=args.cuda,
data_parallel=args.parallel,
num_workers=args.num_workers,
db_config={'options': {'db_path': args.doc_db}},
ranker_config={'options': {'tfidf_path': args.retriever_model,
'strict': False}},
use_ala=args.use_ala
)
# ------------------------------------------------------------------------------
# Read in dataset and make predictions
# ------------------------------------------------------------------------------
logger.info('Loading queries from %s' % args.dataset)
queries = []
for line in open(args.dataset):
data = json.loads(line)
queries.append(data['question'])
model = os.path.splitext(os.path.basename(args.reader_model or 'default'))[0]
basename = os.path.splitext(os.path.basename(args.dataset))[0]
outfile = os.path.join(args.out_dir, basename + '-' + model + '-pipeline.preds')
logger.info('Writing results to %s' % outfile)
with open(outfile, 'w') as f:
batches = [queries[i: i + args.predict_batch_size]
for i in range(0, len(queries), args.predict_batch_size)]
for i, batch in enumerate(batches):
logger.info(
'-' * 25 + ' Batch %d/%d ' % (i + 1, len(batches)) + '-' * 25
)
predictions = WRMCQA.process_batch(
batch,
n_docs=args.n_docs,
top_n=args.top_n,
)
for p in predictions:
f.write(json.dumps(p) + '\n')
logger.info('Total time: %.2f' % (time.time() - t0))
| 39.274648
| 80
| 0.622736
|
02666c424e9566ff49d457724093a3bf5eb4b6a6
| 3,125
|
py
|
Python
|
deps/lib/python3.5/site-packages/Crypto/PublicKey/__init__.py
|
jfarmer08/hassio
|
792a6071a97bb33857c14c9937946233c620035c
|
[
"MIT"
] | 2
|
2020-05-16T08:38:34.000Z
|
2020-10-01T01:32:57.000Z
|
venv/Lib/site-packages/Crypto/PublicKey/__init__.py
|
Team20s/blockchain_python
|
438eb4540198693f4f592541d671a2056adb8a42
|
[
"Apache-2.0"
] | 51
|
2019-10-08T01:53:02.000Z
|
2021-06-04T22:02:21.000Z
|
venv/Lib/site-packages/Crypto/PublicKey/__init__.py
|
Team20s/blockchain_python
|
438eb4540198693f4f592541d671a2056adb8a42
|
[
"Apache-2.0"
] | 2
|
2019-10-10T17:06:33.000Z
|
2020-06-16T05:34:13.000Z
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from Crypto.Util.asn1 import (DerSequence, DerInteger, DerBitString,
DerObjectId, DerNull)
def _expand_subject_public_key_info(encoded):
"""Parse a SubjectPublicKeyInfo structure.
It returns a triple with:
* OID (string)
* encoded public key (bytes)
* Algorithm parameters (bytes or None)
"""
#
# SubjectPublicKeyInfo ::= SEQUENCE {
# algorithm AlgorithmIdentifier,
# subjectPublicKey BIT STRING
# }
#
# AlgorithmIdentifier ::= SEQUENCE {
# algorithm OBJECT IDENTIFIER,
# parameters ANY DEFINED BY algorithm OPTIONAL
# }
#
spki = DerSequence().decode(encoded, nr_elements=2)
algo = DerSequence().decode(spki[0], nr_elements=(1,2))
algo_oid = DerObjectId().decode(algo[0])
spk = DerBitString().decode(spki[1]).value
if len(algo) == 1:
algo_params = None
else:
try:
DerNull().decode(algo[1])
algo_params = None
except:
algo_params = algo[1]
return algo_oid.value, spk, algo_params
def _create_subject_public_key_info(algo_oid, secret_key, params=None):
if params is None:
params = DerNull()
spki = DerSequence([
DerSequence([
DerObjectId(algo_oid),
params]),
DerBitString(secret_key)
])
return spki.encode()
def _extract_subject_public_key_info(x509_certificate):
"""Extract subjectPublicKeyInfo from a DER X.509 certificate."""
certificate = DerSequence().decode(x509_certificate, nr_elements=3)
tbs_certificate = DerSequence().decode(certificate[0],
nr_elements=list(range(6, 11)))
index = 5
try:
tbs_certificate[0] + 1
# Version not present
version = 1
except TypeError:
version = DerInteger(explicit=0).decode(tbs_certificate[0]).value
if version not in (2, 3):
raise ValueError("Incorrect X.509 certificate version")
index = 6
return tbs_certificate[index]
| 32.552083
| 74
| 0.61696
|
ce3d4289981f151f29cf3046f5a84389c87159cf
| 3,121
|
py
|
Python
|
implementations/tests/tests_df/test_reviews_accepted_github.py
|
alexmwebber01/wg-evolution
|
1815e2c1f5fead082c676111952cb7b1d646e403
|
[
"MIT"
] | null | null | null |
implementations/tests/tests_df/test_reviews_accepted_github.py
|
alexmwebber01/wg-evolution
|
1815e2c1f5fead082c676111952cb7b1d646e403
|
[
"MIT"
] | null | null | null |
implementations/tests/tests_df/test_reviews_accepted_github.py
|
alexmwebber01/wg-evolution
|
1815e2c1f5fead082c676111952cb7b1d646e403
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CHAOSS
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Aniruddha Karajgi <akarajgi0@gmail.com>
#
import unittest
import json
from pandas.util.testing import assert_frame_equal
from implementations.code_df.reviews_accepted_github import ReviewsAcceptedGitHub
def read_file(path):
"""
Given a line-by-line JSON file, this function converts it to
a Python dictionary and returns all such lines as a list.
:param path: the path to the JSON file
:returns items: a list of dictionaries read from the JSON file
"""
items = list()
with open(path, 'r') as raw_data:
for line in raw_data:
line = json.loads(line)
items.append(line)
return items
class TestReviewsAcceptedGitHub(unittest.TestCase):
def setUp(self):
"""
Run before each test to read the test data file
"""
self.items = read_file('data/test_pulls_data.json')
def test_compute(self):
"""
Test the compute method of a ReviewsAcceptedGitHub
object with default parameters.
"""
reviews = ReviewsAcceptedGitHub(self.items)
expected_count = 18
count = reviews.compute()
self.assertEqual(expected_count, count)
def test__agg(self):
"""
Test the _agg method of a ReviewsAcceptedGitHub
object with default parameters when re-sampling
on a weekly basis.
"""
reviews_accepted = ReviewsAcceptedGitHub(self.items)
reviews_accepted.df = reviews_accepted.df.set_index('created_date')
test_df = reviews_accepted.df.copy(deep=True)
test_df = test_df[test_df['merged']]
test_df = test_df.resample('W')['category'].agg(['count'])
reviews_accepted.df = reviews_accepted._agg(reviews_accepted.df, 'W')
assert_frame_equal(test_df, reviews_accepted.df)
def test__get_params(self):
"""
Test whether the _get_params method correctly returns
the expected parameters for plotting a timeseries plot
for the Reviews Accepted metric.
"""
changes = ReviewsAcceptedGitHub(self.items)
params = changes._get_params()
expected_params = {
'x': None,
'y': 'count',
'title': "Trends in Reviews Accepted",
'use_index': True
}
self.assertEqual(expected_params, params)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 28.898148
| 81
| 0.666773
|
8867edde9e0284d99c1c203428f77759455c7f6f
| 6,066
|
py
|
Python
|
dvc/system.py
|
chlin501/dvc
|
8798d8531ef1e57744954ac789e60ee9be313aba
|
[
"Apache-2.0"
] | null | null | null |
dvc/system.py
|
chlin501/dvc
|
8798d8531ef1e57744954ac789e60ee9be313aba
|
[
"Apache-2.0"
] | null | null | null |
dvc/system.py
|
chlin501/dvc
|
8798d8531ef1e57744954ac789e60ee9be313aba
|
[
"Apache-2.0"
] | null | null | null |
import errno
import logging
import os
import platform
import shutil
import sys
from dvc.compat import fspath
from dvc.exceptions import DvcException
logger = logging.getLogger(__name__)
if platform.system() == "Windows" and sys.version_info < (3, 8):
try:
import speedcopy
speedcopy.patch_copyfile()
except ImportError:
pass
class System(object):
@staticmethod
def is_unix():
return os.name != "nt"
@staticmethod
def copy(src, dest):
src, dest = fspath(src), fspath(dest)
return shutil.copyfile(src, dest)
@staticmethod
def hardlink(source, link_name):
source, link_name = fspath(source), fspath(link_name)
try:
os.link(source, link_name)
except OSError as exc:
raise DvcException("failed to link") from exc
@staticmethod
def symlink(source, link_name):
source, link_name = fspath(source), fspath(link_name)
try:
os.symlink(source, link_name)
except OSError as exc:
raise DvcException("failed to symlink") from exc
@staticmethod
def _reflink_darwin(src, dst):
import ctypes
LIBC = "libc.dylib"
LIBC_FALLBACK = "/usr/lib/libSystem.dylib"
try:
clib = ctypes.CDLL(LIBC)
except OSError as exc:
logger.debug(
"unable to access '{}' (errno '{}'). "
"Falling back to '{}'.".format(LIBC, exc.errno, LIBC_FALLBACK)
)
if exc.errno != errno.ENOENT:
raise
# NOTE: trying to bypass System Integrity Protection (SIP)
clib = ctypes.CDLL(LIBC_FALLBACK)
if not hasattr(clib, "clonefile"):
return -1
clonefile = clib.clonefile
clonefile.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int]
clonefile.restype = ctypes.c_int
return clonefile(
ctypes.c_char_p(src.encode("utf-8")),
ctypes.c_char_p(dst.encode("utf-8")),
ctypes.c_int(0),
)
@staticmethod
def _reflink_windows(src, dst):
return -1
@staticmethod
def _reflink_linux(src, dst):
import os
import fcntl
FICLONE = 0x40049409
try:
ret = 255
with open(src, "r") as s, open(dst, "w+") as d:
ret = fcntl.ioctl(d.fileno(), FICLONE, s.fileno())
finally:
if ret != 0:
os.unlink(dst)
return ret
@staticmethod
def reflink(source, link_name):
source, link_name = fspath(source), fspath(link_name)
system = platform.system()
try:
if system == "Windows":
ret = System._reflink_windows(source, link_name)
elif system == "Darwin":
ret = System._reflink_darwin(source, link_name)
elif system == "Linux":
ret = System._reflink_linux(source, link_name)
else:
ret = -1
except IOError:
ret = -1
if ret != 0:
raise DvcException("reflink is not supported")
@staticmethod
def _getdirinfo(path):
from collections import namedtuple
from win32file import (
CreateFileW,
GetFileInformationByHandle,
FILE_FLAG_BACKUP_SEMANTICS,
FILE_FLAG_OPEN_REPARSE_POINT,
FILE_SHARE_READ,
OPEN_EXISTING,
)
# NOTE: use FILE_FLAG_OPEN_REPARSE_POINT to open symlink itself and not
# the target See https://docs.microsoft.com/en-us/windows/desktop/api/
# fileapi/nf-fileapi-createfilew#symbolic-link-behavior
flags = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT
hfile = CreateFileW(
path, 0, FILE_SHARE_READ, None, OPEN_EXISTING, flags, None
)
# See BY_HANDLE_FILE_INFORMATION structure from fileapi.h
Info = namedtuple(
"BY_HANDLE_FILE_INFORMATION",
[
"dwFileAttributes",
"ftCreationTime",
"ftLastAccessTime",
"ftLastWriteTime",
"dwVolumeSerialNumber",
"nFileSizeHigh",
"nFileSizeLow",
"nNumberOfLinks",
"nFileIndexHigh",
"nFileIndexLow",
],
)
return Info(*GetFileInformationByHandle(hfile))
@staticmethod
def inode(path):
path = fspath(path)
if System.is_unix():
import ctypes
inode = os.lstat(path).st_ino
# NOTE: See https://bugs.python.org/issue29619 and
# https://stackoverflow.com/questions/34643289/
# pythons-os-stat-is-returning-wrong-inode-value
inode = ctypes.c_ulong(inode).value
else:
# getdirinfo from ntfsutils works on both files and dirs
info = System._getdirinfo(path)
inode = abs(
hash(
(
info.dwVolumeSerialNumber,
info.nFileIndexHigh,
info.nFileIndexLow,
)
)
)
assert inode >= 0
assert inode < 2 ** 64
return inode
@staticmethod
def is_symlink(path):
path = fspath(path)
if System.is_unix():
return os.path.islink(path)
# https://docs.microsoft.com/en-us/windows/desktop/fileio/
# file-attribute-constants
from winnt import FILE_ATTRIBUTE_REPARSE_POINT
if os.path.lexists(path):
info = System._getdirinfo(path)
return info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT
return False
@staticmethod
def is_hardlink(path):
path = fspath(path)
if System.is_unix():
return os.stat(path).st_nlink > 1
info = System._getdirinfo(path)
return info.nNumberOfLinks > 1
| 28.478873
| 79
| 0.559677
|
2d7022267b7063be3474c3cf926a3ba78afe4094
| 11,914
|
py
|
Python
|
flowd/flow_app.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
flowd/flow_app.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
flowd/flow_app.py
|
rexengineering/metaflow
|
fcba7cd6aaccd3806ce7d6a4a8aaeef350bbeaf8
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from collections import defaultdict
import base64
import logging
import json
from async_timeout import timeout
from quart import request, jsonify
from flowlib.etcd_utils import get_etcd, transition_state
from flowlib.quart_app import QuartApp
from flowlib.workflow import Workflow, get_workflows
from flowlib.config import (
INSTANCE_FAIL_ENDPOINT_PATH,
WF_MAP_ENDPOINT_PATH
)
from flowlib.constants import (
BStates,
flow_result,
WorkflowInstanceKeys,
Headers,
)
from flowlib.token_api import TokenPool
TIMEOUT_SECONDS = 10
def convert_envoy_hdr_msg_to_dict(headers_bytes):
headers_str = base64.b64decode(headers_bytes).decode()
hdrs = {}
for header in headers_str.split('\n'):
# note: Envoy puts a `:` at the start of the path, authority, and host headers for
# some reason.
header = header.lstrip(':')
hdr_key = header[:header.find(':')]
hdr_val = header[header.find(':') + 1:]
if len(hdr_key) > 0:
hdrs[hdr_key] = hdr_val
return hdrs
def process_data(encoded_str, data_should_be_json):
"""Accepts a string of b64-encoded data. Tries the following steps, saving
the result each time. If any of the steps fails, it returns the most recent
success. If no processing step succeeds (i.e. the result of decoding the
data not an ascii-representable string), then we just return the original
input: the base64-encoded string. Steps:
1. Try to decode the base64-encoded string into something ascii-readable.
2. If data_should_be_json, then we try to load the json into a python dict.
"""
result = encoded_str
# Step 1: Try to make it a human-readable string
try:
decoded_bytes = base64.b64decode(encoded_str)
if decoded_bytes.isascii():
decoded_str = decoded_bytes.decode()
result = decoded_str
if data_should_be_json:
result = json.loads(result)
elif data_should_be_json:
logging.warning(
f"Should've been able to json load this but it wasn't even ascii: {encoded_str}"
)
except json.decoder.JSONDecodeError as exn:
logging.exception(
f"Should've been able to json load the data but couldn't: {encoded_str}",
exc_info=exn,
)
except Exception as exn:
logging.exception(
f"Caught an unexpected exception processing `{encoded_str}`:",
exc_info=exn,
)
return result
class FlowApp(QuartApp):
def __init__(self, **kws):
super().__init__(__name__, **kws)
self.etcd = get_etcd()
self.app.route('/health', methods=['GET'])(self.health)
self.app.route('/', methods=['POST'])(self.root_route)
self.app.route(INSTANCE_FAIL_ENDPOINT_PATH, methods=(['POST']))(self.fail_route)
self.app.route(WF_MAP_ENDPOINT_PATH, methods=['GET', 'POST'])(self.wf_map)
async def health(self):
self.etcd.get('Is The Force With Us?')
return flow_result(0, "Ok.")
async def root_route(self):
# When there is a flow ID in the headers, store the result in etcd and
# change the state to completed.
if Headers.X_HEADER_FLOW_ID in request.headers:
flow_id = request.headers[Headers.X_HEADER_FLOW_ID]
keys = WorkflowInstanceKeys(flow_id)
good_states = {BStates.STARTING, BStates.RUNNING}
if self.etcd.get(keys.state)[0] in good_states:
if transition_state(self.etcd, keys.state, good_states, BStates.COMPLETED):
self.etcd.put(keys.result, await request.data)
else:
logging.error(
f'Race on {keys.state}; state changed out of known'
' good state before state transition could occur!'
)
return 'Hello there!\n'
async def fail_route(self):
# When there is a flow ID in the headers, store the result in etcd and
# change the state to ERROR.
if Headers.X_HEADER_WORKFLOW_ID not in request.headers or Headers.X_HEADER_FLOW_ID not in request.headers:
return jsonify(flow_result(-1, "Didn't provide workflow headers"), 400)
flow_id = request.headers[Headers.X_HEADER_FLOW_ID]
wf_id = request.headers[Headers.X_HEADER_WORKFLOW_ID]
timer_pool_id = request.headers.get(Headers.X_HEADER_TOKEN_POOL_ID)
workflow = Workflow.from_id(wf_id)
keys = WorkflowInstanceKeys(flow_id)
state_key = keys.state
good_states = {BStates.STARTING, BStates.RUNNING}
if self.etcd.get(state_key)[0] in good_states:
# As per spec, if we have a recoverable workflow we go to STOPPING --> STOPPED.
# Otherwise, we go straight to ERROR.
if workflow.process.properties.is_recoverable:
if not transition_state(self.etcd, state_key, good_states, BStates.STOPPING):
logging.error(
f'Race on {state_key}; state changed out of known'
' good state before state transition could occur!'
)
else:
if not transition_state(self.etcd, state_key, good_states, BStates.ERROR):
logging.error(
f'Race on {state_key}; state changed out of known'
' good state before state transition could occur!'
)
if timer_pool_id is not None:
# if we're tracking tokens, we're not any more as the workflow instance
# is being failed.
for pool_name in timer_pool_id.split(','):
logging.info(f'Erasing token pool {pool_name}')
TokenPool.erase(pool_name)
incoming_data = None
try:
with timeout(TIMEOUT_SECONDS):
incoming_data = await request.data
except asyncio.exceptions.TimeoutError as exn:
logging.exception(
f"Timed out waiting for error data on flow id {flow_id}.",
exc_info=exn
)
self.etcd.put(state_key, BStates.ERROR)
return jsonify(flow_result(-1, "Could not load promised data."), 400)
try:
payload = json.loads(incoming_data.decode())
self._put_payload(payload, keys, workflow)
except Exception as exn:
logging.exception(
f"Failed processing instance error payload:",
exc_info=exn,
)
self.etcd.put(keys.result, incoming_data)
self.etcd.put(keys.content_type, 'application/octet-stream')
if workflow.process.properties.is_recoverable:
self.etcd.replace(state_key, BStates.STOPPING, BStates.STOPPED)
return 'Another happy landing (https://i.gifer.com/PNk.gif)'
def wf_map(self):
"""Get a map from workflow ID's to workflow deployment ID's.
Note that this mapping does not assume the workflow ID is "baked" into
the workflow deployment ID, which it presently is.
"""
etcd = get_etcd(is_not_none=True)
wf_map = {}
for workflow in get_workflows():
if etcd.get(workflow.keys.state)[0] == BStates.RUNNING:
wf_id = workflow.process.xmldict['@id']
if wf_id not in wf_map:
wf_map[wf_id] = []
wf_did = workflow.id
start_event_urls = [
start_event.k8s_url
for start_event in workflow.process.start_events
]
wf_map[wf_id].append({
'id': wf_did,
'start_event_urls': start_event_urls,
'user_opaque_metadata': workflow.properties.user_opaque_metadata,
})
return flow_result(0, 'Ok', wf_map=wf_map)
def _put_payload(self, payload: dict, keys: WorkflowInstanceKeys, workflow: Workflow):
"""Accepts incoming JSON and saves the error payload. Error data from Envoy
looks slightly different than error data from flowpost(), simply because
it's harder to manipulate data within the confines of the Envoy codebase.
Therefore, we have a separate helper method _put_payload_from_envoy() that
cleans up and stores the data. If the `from_flowpost` key is in the result,
we don't use that helper; otherwise, we know the data came from Envoy, and we
do use the helper.
"""
if payload.get('from_envoy', True):
self._put_payload_from_envoy(payload, keys, workflow)
else:
self.etcd.put(keys.result, json.dumps(payload))
self.etcd.put(keys.content_type, 'application/json')
def _put_payload_from_envoy(
self, payload: dict, keys: WorkflowInstanceKeys, workflow: Workflow
):
"""Take all of the incoming data from envoy and make it as close to JSON as we
can. The error data from BAVS looks like:
{
'input_headers_encoded': base64-encoded dump of headers of request to the task
'input_data_encoded': base64-encoded dump of request data to the task
'output_headers_encoded': (optional) base64-encoded dump of task response headers
'output_data_encoded': (optional) base64-encoded dump of task response data
'error_msg': human-readable error message
'error_code': enum of {
'CONNECTION_ERROR', 'TASK_ERROR', 'CONTEXT_INPUT_ERROR', 'CONTEXT_OUTPUT_ERROR'
}
}
For the input/output headers/data, we first try to decode them if they're not
binary data (since BAVS just encodes to avoid having to deal with escaping, etc.).
If we can successfully decode into a string, we then check if content-type is json,
and if so, we make it a dict.
Finally, after processing, we dump the whole dict into the `result` key, and
since we're putting a `json.dumps()` into the `result` key, we put `application/json`
into the `content-type` key so that consumers of the result payload may know how
to process the data.
"""
input_is_json = False
output_is_json = False
result = {}
if 'error_msg' in payload:
result['error_msg'] = payload['error_msg']
if 'error_code' in payload:
result['error_code'] = payload['error_code']
if 'input_headers_encoded' in payload:
hdrs = convert_envoy_hdr_msg_to_dict(payload['input_headers_encoded'])
result['input_headers'] = hdrs
if Headers.X_HEADER_TASK_ID.lower() in hdrs:
task_id = hdrs[Headers.X_HEADER_TASK_ID.lower()]
bpmn_component = workflow.process.component_map[task_id]
result['failed_task_id'] = task_id
result['failed_task_name'] = bpmn_component.name
input_is_json = (hdrs.get('content-type')) == 'application/json'
if 'input_data_encoded' in payload:
result['input_data'] = process_data(payload['input_data_encoded'], input_is_json)
if 'output_headers_encoded' in payload:
hdrs = convert_envoy_hdr_msg_to_dict(payload['output_headers_encoded'])
output_is_json = (hdrs.get('content-type')) == 'application/json'
result['output_headers'] = hdrs
if 'output_data_encoded' in payload:
result['output_data'] = process_data(payload['output_data_encoded'], output_is_json)
self.etcd.put(keys.result, json.dumps(result))
self.etcd.put(keys.content_type, 'application/json')
| 44.289963
| 114
| 0.622461
|
d309124d54f64ca84afb15ca65b8dc2409c3cae1
| 90,045
|
py
|
Python
|
trimesh/base.py
|
gustavla/trimesh
|
d7ea008595f78b411bef3b770cedc94914ff954f
|
[
"MIT"
] | null | null | null |
trimesh/base.py
|
gustavla/trimesh
|
d7ea008595f78b411bef3b770cedc94914ff954f
|
[
"MIT"
] | null | null | null |
trimesh/base.py
|
gustavla/trimesh
|
d7ea008595f78b411bef3b770cedc94914ff954f
|
[
"MIT"
] | null | null | null |
# flake8: noqa
"""
github.com/mikedh/trimesh
----------------------------
Library for importing, exporting and doing simple operations on triangular meshes.
"""
import numpy as np
import copy
from . import ray
from . import util
from . import units
from . import poses
from . import graph
from . import sample
from . import repair
from . import convex
from . import remesh
from . import bounds
from . import caching
from . import inertia
from . import nsphere
from . import boolean
from . import grouping
from . import geometry
from . import permutate
from . import proximity
from . import triangles
from . import collision
from . import curvature
from . import smoothing
from . import comparison
from . import registration
from . import decomposition
from . import intersections
from . import transformations
from .visual import create_visual
from .exchange.export import export_mesh
from .constants import log, log_time, tol
from .scene import Scene
from .parent import Geometry
class Trimesh(Geometry):
def __init__(self,
vertices=None,
faces=None,
face_normals=None,
vertex_normals=None,
face_colors=None,
vertex_colors=None,
face_attributes=None,
vertex_attributes=None,
metadata=None,
process=True,
validate=False,
use_embree=True,
initial_cache=None,
visual=None,
**kwargs):
"""
A Trimesh object contains a triangular 3D mesh.
Parameters
------------
vertices : (n, 3) float
Array of vertex locations
faces : (m, 3) or (m, 4) int
Array of triangular or quad faces (triangulated on load)
face_normals : (m, 3) float
Array of normal vectors corresponding to faces
vertex_normals : (n, 3) float
Array of normal vectors for vertices
metadata : dict
Any metadata about the mesh
process : bool
if True, Nan and Inf values will be removed
immediately and vertices will be merged
validate : bool
If True, degenerate and duplicate faces will be
removed immediately, and some functions will alter
the mesh to ensure consistent results.
use_embree : bool
If True try to use pyembree raytracer.
If pyembree is not available it will automatically fall
back to a much slower rtree/numpy implementation
initial_cache : dict
A way to pass things to the cache in case expensive
things were calculated before creating the mesh object.
visual : ColorVisuals or TextureVisuals
Assigned to self.visual
"""
if initial_cache is None:
initial_cache = {}
# self._data stores information about the mesh which
# CANNOT be regenerated.
# in the base class all that is stored here is vertex and
# face information
# any data put into the store is converted to a TrackedArray
# which is a subclass of np.ndarray that provides md5 and crc
# methods which can be used to detect changes in the array.
self._data = caching.DataStore()
# self._cache stores information about the mesh which CAN be
# regenerated from self._data, but may be slow to calculate.
# In order to maintain consistency
# the cache is cleared when self._data.crc() changes
self._cache = caching.Cache(
id_function=self._data.fast_hash,
force_immutable=True)
self._cache.update(initial_cache)
# if validate we are allowed to alter the mesh silently
# to ensure valid results
self._validate = bool(validate)
# check for None only to avoid warning messages in subclasses
if vertices is not None:
# (n, 3) float, set of vertices
self.vertices = vertices
if faces is not None:
# (m, 3) int of triangle faces, references self.vertices
self.faces = faces
# hold visual information about the mesh (vertex and face colors)
if visual is None:
self.visual = create_visual(
face_colors=face_colors,
vertex_colors=vertex_colors,
mesh=self)
else:
self.visual = visual
# normals are accessed through setters/properties and are regenerated
# if dimensions are inconsistent, but can be set by the constructor
# to avoid a substantial number of cross products
if face_normals is not None:
self.face_normals = face_normals
# (n, 3) float of vertex normals, can be created from face normals
if vertex_normals is not None:
self.vertex_normals = vertex_normals
# embree is a much, much faster raytracer written by Intel
# if you have pyembree installed you should use it
# although both raytracers were designed to have a common API
if ray.has_embree and use_embree:
self.ray = ray.ray_pyembree.RayMeshIntersector(self)
else:
# create a ray-mesh query object for the current mesh
# initializing is very inexpensive and object is convenient to have.
# On first query expensive bookkeeping is done (creation of r-tree),
# and is cached for subsequent queries
self.ray = ray.ray_triangle.RayMeshIntersector(self)
# a quick way to get permuted versions of the current mesh
self.permutate = permutate.Permutator(self)
# convenience class for nearest point queries
self.nearest = proximity.ProximityQuery(self)
# store metadata about the mesh in a dictionary
self.metadata = dict()
# update the mesh metadata with passed metadata
if isinstance(metadata, dict):
self.metadata.update(metadata)
elif metadata is not None:
raise ValueError(
'metadata should be a dict or None, got %s' % str(metadata))
# Set the default center of mass and density
self._density = 1.0
self._center_mass = None
# store per-face and per-vertex attributes which will
# be updated when an update_faces call is made
self.face_attributes = {}
self.vertex_attributes = {}
# use update to copy items
if face_attributes is not None:
self.face_attributes.update(face_attributes)
if vertex_attributes is not None:
self.vertex_attributes.update(vertex_attributes)
# process will remove NaN and Inf values and merge vertices
# if validate, will remove degenerate and duplicate faces
if process or validate:
self.process()
# save reference to kwargs
self._kwargs = kwargs
def process(self):
"""
Do the bare minimum processing to make a mesh useful.
Does this by:
1) removing NaN and Inf values
2) merging duplicate vertices
If self._validate:
3) Remove triangles which have one edge of their rectangular 2D
oriented bounding box shorter than tol.merge
4) remove duplicated triangles
Returns
------------
self: trimesh.Trimesh
Current mesh
"""
# if there are no vertices or faces exit early
if self.is_empty:
return self
# avoid clearing the cache during operations
with self._cache:
self.remove_infinite_values()
self.merge_vertices()
# if we're cleaning remove duplicate
# and degenerate faces
if self._validate:
self.remove_duplicate_faces()
self.remove_degenerate_faces()
# since none of our process operations moved vertices or faces
# we can keep face and vertex normals in the cache without recomputing
# if faces or vertices have been removed, normals are validated before
# being returned so there is no danger of inconsistent dimensions
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
self.metadata['processed'] = True
return self
def md5(self):
"""
An MD5 of the core geometry information for the mesh,
faces and vertices.
Generated from TrackedArray which subclasses np.ndarray to
monitor array for changes and returns a correct lazily
evaluated md5 so it only has to recalculate the hash
occasionally, rather than on every call.
Returns
----------
md5 : string
MD5 of everything in the DataStore
"""
md5 = self._data.md5()
return md5
def crc(self):
"""
A zlib.adler32 checksum for the current mesh data.
This is about 5x faster than an MD5, and the checksum is
checked every time something is requested from the cache so
it gets called a lot.
Returns
----------
crc : int
Checksum of current mesh data
"""
return self._data.fast_hash()
@property
def faces(self):
"""
The faces of the mesh.
This is regarded as core information which cannot be regenerated from
cache, and as such is stored in self._data which tracks the array for
changes and clears cached values of the mesh if this is altered.
Returns
----------
faces : (n, 3) int
Representing triangles which reference self.vertices
"""
return self._data.get('faces', np.empty(shape=(0, 3), dtype=int))
@faces.setter
def faces(self, values):
"""
Set the vertex indexes that make up triangular faces.
Parameters
--------------
values : (n, 3) int
Indexes of self.vertices
"""
if values is None or len(values) == 0:
if 'faces' in self._data:
del self._data['faces']
return
values = np.asanyarray(values, dtype=np.int64)
# automatically triangulate quad faces
if util.is_shape(values, (-1, 4)):
log.info('triangulating quad faces')
values = geometry.triangulate_quads(values)
self._data['faces'] = values
@caching.cache_decorator
def faces_sparse(self):
"""
A sparse matrix representation of the faces.
Returns
----------
sparse : scipy.sparse.coo_matrix
Has properties:
dtype : bool
shape : (len(self.vertices), len(self.faces))
"""
sparse = geometry.index_sparse(
columns=len(self.vertices),
indices=self.faces)
return sparse
@property
def face_normals(self):
"""
Return the unit normal vector for each face.
If a face is degenerate and a normal can't be generated
a zero magnitude unit vector will be returned for that face.
Returns
-----------
normals : (len(self.faces), 3) np.float64
Normal vectors of each face
"""
# check shape of cached normals
cached = self._cache['face_normals']
if np.shape(cached) == np.shape(self._data['faces']):
return cached
log.debug('generating face normals')
# use cached triangle cross products to generate normals
# this will always return the correct shape but some values
# will be zero or an arbitrary vector if the inputs had
# a cross product below machine epsilon
normals, valid = triangles.normals(
triangles=self.triangles,
crosses=self.triangles_cross)
# if all triangles are valid shape is correct
if valid.all():
# put calculated face normals into cache manually
self._cache['face_normals'] = normals
return normals
# make a padded list of normals for correct shape
padded = np.zeros((len(self.triangles), 3),
dtype=np.float64)
padded[valid] = normals
# put calculated face normals into cache manually
self._cache['face_normals'] = padded
return padded
@face_normals.setter
def face_normals(self, values):
"""
Assign values to face normals.
Parameters
-------------
values : (len(self.faces), 3) float
Unit face normals
"""
# if nothing passed exit
if values is None:
return
# make sure candidate face normals are C- contiguous float
values = np.asanyarray(
values, order='C', dtype=np.float64)
# face normals need to correspond to faces
if len(values) == 0 or values.shape != self.faces.shape:
log.warning('face_normals incorrect shape, ignoring!')
return
# check if any values are larger than tol.merge
# don't set the normals if they are all zero
ptp = values.ptp()
if not np.isfinite(ptp):
log.warning('face_normals contain NaN, ignoring!')
return
if ptp < tol.merge:
log.warning('face_normals all zero, ignoring!')
return
# make sure the first few normals match the first few triangles
check, valid = triangles.normals(
self.vertices.view(np.ndarray)[self.faces[:20]])
compare = np.zeros((len(valid), 3))
compare[valid] = check
if not np.allclose(compare, values[:20]):
log.debug("face_normals didn't match triangles, ignoring!")
return
# otherwise store face normals
self._cache['face_normals'] = values
@property
def vertices(self):
"""
The vertices of the mesh.
This is regarded as core information which cannot be
generated from cache and as such is stored in self._data
which tracks the array for changes and clears cached
values of the mesh if this is altered.
Returns
----------
vertices : (n, 3) float
Points in cartesian space referenced by self.faces
"""
return self._data['vertices']
@vertices.setter
def vertices(self, values):
"""
Assign vertex values to the mesh.
Parameters
--------------
values : (n, 3) float
Points in space
"""
self._data['vertices'] = np.asanyarray(
values, order='C', dtype=np.float64)
@caching.cache_decorator
def vertex_normals(self):
"""
The vertex normals of the mesh. If the normals were loaded
we check to make sure we have the same number of vertex
normals and vertices before returning them. If there are
no vertex normals defined or a shape mismatch we calculate
the vertex normals from the mean normals of the faces the
vertex is used in.
Returns
----------
vertex_normals : (n, 3) float
Represents the surface normal at each vertex.
Where n == len(self.vertices)
"""
# make sure we have faces_sparse
assert hasattr(self.faces_sparse, 'dot')
vertex_normals = geometry.weighted_vertex_normals(
vertex_count=len(self.vertices),
faces=self.faces,
face_normals=self.face_normals,
face_angles=self.face_angles)
return vertex_normals
@vertex_normals.setter
def vertex_normals(self, values):
"""
Assign values to vertex normals
Parameters
-------------
values : (len(self.vertices), 3) float
Unit normal vectors for each vertex
"""
if values is not None:
values = np.asanyarray(values,
order='C',
dtype=np.float64)
if values.shape == self.vertices.shape:
# check to see if they assigned all zeros
if values.ptp() < tol.merge:
log.warning(
'vertex_normals are all set to zero!')
self._cache['vertex_normals'] = values
@caching.cache_decorator
def vertex_faces(self):
"""
A representation of the face indices that correspond to each vertex.
Returns
----------
vertex_faces : (n,m) int
Each row contains the face indices that correspond to the given vertex,
padded with -1 up to the max number of faces corresponding to any one vertex
Where n == len(self.vertices), m == max number of faces for a single vertex
"""
vertex_faces = geometry.vertex_face_indices(
vertex_count=len(self.vertices),
faces=self.faces,
faces_sparse=self.faces_sparse)
return vertex_faces
@caching.cache_decorator
def bounds(self):
"""
The axis aligned bounds of the faces of the mesh.
Returns
-----------
bounds : (2, 3) float or None
Bounding box with [min, max] coordinates
If mesh is empty will return None
"""
# return bounds including ONLY referenced vertices
in_mesh = self.vertices[self.referenced_vertices]
# don't crash if we have no vertices referenced
if len(in_mesh) == 0:
return None
# get mesh bounds with min and max
mesh_bounds = np.array([in_mesh.min(axis=0),
in_mesh.max(axis=0)])
return mesh_bounds
@caching.cache_decorator
def extents(self):
"""
The length, width, and height of the axis aligned
bounding box of the mesh.
Returns
-----------
extents : (3, ) float or None
Array containing axis aligned [length, width, height]
If mesh is empty returns None
"""
# if mesh is empty return None
if self.bounds is None:
return None
extents = self.bounds.ptp(axis=0)
extents.flags.writeable = False
return extents
@caching.cache_decorator
def scale(self):
"""
A metric for the overall scale of the mesh, the length of the
diagonal of the axis aligned bounding box of the mesh.
Returns
----------
scale : float
The length of the meshes AABB diagonal
"""
# if mesh is empty just return no scale
if self.extents is None:
return 1.0
# make sure we are returning python floats
scale = float((self.extents ** 2).sum() ** .5)
return scale
@caching.cache_decorator
def centroid(self):
"""
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3, ) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
centroid = np.average(self.triangles_center,
axis=0,
weights=self.area_faces)
centroid.flags.writeable = False
return centroid
@property
def center_mass(self):
"""
The point in space which is the center of mass/volume.
If the current mesh is not watertight, this is meaningless garbage
unless it was explicitly set.
Returns
-----------
center_mass : (3, ) float
Volumetric center of mass of the mesh
"""
center_mass = self.mass_properties['center_mass']
return center_mass
@center_mass.setter
def center_mass(self, cm):
self._center_mass = cm
self._cache.delete('mass_properties')
@property
def density(self):
"""
The density of the mesh.
Returns
-----------
density : float
The density of the mesh.
"""
density = self.mass_properties['density']
return density
@density.setter
def density(self, value):
"""
Set the density of the mesh.
Parameters
-------------
density : float
Specify the density of the mesh to be used in inertia calculations
"""
self._density = float(value)
self._cache.delete('mass_properties')
@property
def volume(self):
"""
Volume of the current mesh calculated using a surface
integral. If the current mesh isn't watertight this is
garbage.
Returns
---------
volume : float
Volume of the current mesh
"""
volume = self.mass_properties['volume']
return volume
@property
def mass(self):
"""
Mass of the current mesh, based on specified density and
volume. If the current mesh isn't watertight this is garbage.
Returns
---------
mass : float
Mass of the current mesh
"""
mass = self.mass_properties['mass']
return mass
@property
def moment_inertia(self):
"""
Return the moment of inertia matrix of the current mesh.
If mesh isn't watertight this is garbage.
Returns
---------
inertia : (3, 3) float
Moment of inertia of the current mesh
"""
inertia = self.mass_properties['inertia']
return inertia
@caching.cache_decorator
def principal_inertia_components(self):
"""
Return the principal components of inertia
Ordering corresponds to mesh.principal_inertia_vectors
Returns
----------
components : (3, ) float
Principal components of inertia
"""
# both components and vectors from inertia matrix
components, vectors = inertia.principal_axis(self.moment_inertia)
# store vectors in cache for later
self._cache['principal_inertia_vectors'] = vectors
return components
@property
def principal_inertia_vectors(self):
"""
Return the principal axis of inertia.
Ordering corresponds to mesh.principal_inertia_components
Returns
----------
vectors : (3, 3) float
Three vectors pointing along the
principal axis of inertia directions
"""
populate = self.principal_inertia_components
return self._cache['principal_inertia_vectors']
@caching.cache_decorator
def principal_inertia_transform(self):
"""
A transform which moves the current mesh so the principal
inertia vectors are on the X,Y, and Z axis, and the centroid is
at the origin.
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
"""
order = np.argsort(self.principal_inertia_components)[1:][::-1]
vectors = self.principal_inertia_vectors[order]
vectors = np.vstack((vectors, np.cross(*vectors)))
transform = np.eye(4)
transform[:3, :3] = vectors
transform = transformations.transform_around(
matrix=transform,
point=self.centroid)
transform[:3, 3] -= self.centroid
return transform
@caching.cache_decorator
def symmetry(self):
"""
Check whether a mesh has rotational symmetry.
Returns
-----------
symmetry: None No rotational symmetry
'radial' Symmetric around an axis
'spherical' Symmetric around a point
"""
symmetry, axis, section = inertia.radial_symmetry(self)
self._cache['symmetry_axis'] = axis
self._cache['symmetry_section'] = section
return symmetry
@property
def symmetry_axis(self):
"""
If a mesh has rotational symmetry, return the axis.
Returns
------------
axis: (3, ) float
Axis around which a 2D profile
was revolved to generate this mesh
"""
if self.symmetry is not None:
return self._cache['symmetry_axis']
@property
def symmetry_section(self):
"""
If a mesh has rotational symmetry, return the two
vectors which make up a section coordinate frame.
Returns
----------
section: (2, 3) float, vectors to take a section along
"""
if self.symmetry is not None:
return self._cache['symmetry_section']
@caching.cache_decorator
def triangles(self):
"""
Actual triangles of the mesh (points, not indexes)
Returns
---------
triangles : (n, 3, 3) float
Points of triangle vertices
"""
# use of advanced indexing on our tracked arrays will
# trigger a change flag which means the MD5 will have to be
# recomputed. We can escape this check by viewing the array.
triangles = self.vertices.view(np.ndarray)[self.faces]
# make triangles (which are derived from faces/vertices) not writeable
triangles.flags.writeable = False
return triangles
@caching.cache_decorator
def triangles_tree(self):
"""
An R-tree containing each face of the mesh.
Returns
----------
tree : rtree.index
Each triangle in self.faces has a rectangular cell
"""
tree = triangles.bounds_tree(self.triangles)
return tree
@caching.cache_decorator
def triangles_center(self):
"""
The center of each triangle (barycentric [1/3, 1/3, 1/3])
Returns
---------
triangles_center : (len(self.faces), 3) float
Center of each triangular face
"""
triangles_center = self.triangles.mean(axis=1)
return triangles_center
@caching.cache_decorator
def triangles_cross(self):
"""
The cross product of two edges of each triangle.
Returns
---------
crosses : (n, 3) float
Cross product of each triangle
"""
crosses = triangles.cross(self.triangles)
return crosses
@caching.cache_decorator
def edges(self):
"""
Edges of the mesh (derived from faces).
Returns
---------
edges : (n, 2) int
List of vertex indices making up edges
"""
edges, index = geometry.faces_to_edges(self.faces.view(np.ndarray),
return_index=True)
self._cache['edges_face'] = index
return edges
@caching.cache_decorator
def edges_face(self):
"""
Which face does each edge belong to.
Returns
---------
edges_face : (n, ) int
Index of self.faces
"""
populate = self.edges
return self._cache['edges_face']
@caching.cache_decorator
def edges_unique(self):
"""
The unique edges of the mesh.
Returns
----------
edges_unique : (n, 2) int
Vertex indices for unique edges
"""
unique, inverse = grouping.unique_rows(self.edges_sorted)
edges_unique = self.edges_sorted[unique]
# edges_unique will be added automatically by the decorator
# additional terms generated need to be added to the cache manually
self._cache['edges_unique_idx'] = unique
self._cache['edges_unique_inverse'] = inverse
return edges_unique
@caching.cache_decorator
def edges_unique_length(self):
"""
How long is each unique edge.
Returns
----------
length : (len(self.edges_unique), ) float
Length of each unique edge
"""
vector = np.subtract(*self.vertices[self.edges_unique.T])
length = util.row_norm(vector)
return length
@caching.cache_decorator
def edges_unique_inverse(self):
"""
Return the inverse required to reproduce
self.edges_sorted from self.edges_unique.
Useful for referencing edge properties:
mesh.edges_unique[mesh.edges_unique_inverse] == m.edges_sorted
Returns
----------
inverse : (len(self.edges), ) int
Indexes of self.edges_unique
"""
populate = self.edges_unique
return self._cache['edges_unique_inverse']
@caching.cache_decorator
def edges_sorted(self):
"""
Edges sorted along axis 1
Returns
----------
edges_sorted : (n, 2)
Same as self.edges but sorted along axis 1
"""
edges_sorted = np.sort(self.edges, axis=1)
return edges_sorted
@caching.cache_decorator
def edges_sorted_tree(self):
"""
A KDTree for mapping edges back to edge index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with edges will return
their index in mesh.edges_sorted
"""
from scipy.spatial import cKDTree
return cKDTree(self.edges_sorted)
@caching.cache_decorator
def edges_sparse(self):
"""
Edges in sparse bool COO graph format where connected
vertices are True.
Returns
----------
sparse: (len(self.vertices), len(self.vertices)) bool
Sparse graph in COO format
"""
sparse = graph.edges_to_coo(self.edges,
count=len(self.vertices))
return sparse
@caching.cache_decorator
def body_count(self):
"""
How many connected groups of vertices exist in this mesh.
Note that this number may differ from result in mesh.split,
which is calculated from FACE rather than vertex adjacency.
Returns
-----------
count : int
Number of connected vertex groups
"""
# labels are (len(vertices), int) OB
count, labels = graph.csgraph.connected_components(
self.edges_sparse,
directed=False,
return_labels=True)
self._cache['vertices_component_label'] = labels
return count
@caching.cache_decorator
def faces_unique_edges(self):
"""
For each face return which indexes in mesh.unique_edges constructs
that face.
Returns
---------
faces_unique_edges : (len(self.faces), 3) int
Indexes of self.edges_unique that
construct self.faces
Examples
---------
In [0]: mesh.faces[0:2]
Out[0]:
TrackedArray([[ 1, 6946, 24224],
[ 6946, 1727, 24225]])
In [1]: mesh.edges_unique[mesh.faces_unique_edges[0:2]]
Out[1]:
array([[[ 1, 6946],
[ 6946, 24224],
[ 1, 24224]],
[[ 1727, 6946],
[ 1727, 24225],
[ 6946, 24225]]])
"""
# make sure we have populated unique edges
populate = self.edges_unique
# we are relying on the fact that edges are stacked in triplets
result = self._cache['edges_unique_inverse'].reshape((-1, 3))
return result
@caching.cache_decorator
def euler_number(self):
"""
Return the Euler characteristic (a topological invariant) for the mesh
In order to guarantee correctness, this should be called after
remove_unreferenced_vertices
Returns
----------
euler_number : int
Topological invariant
"""
euler = int(self.referenced_vertices.sum() -
len(self.edges_unique) +
len(self.faces))
return euler
@caching.cache_decorator
def referenced_vertices(self):
"""
Which vertices in the current mesh are referenced by a face.
Returns
-------------
referenced : (len(self.vertices), ) bool
Which vertices are referenced by a face
"""
referenced = np.zeros(len(self.vertices), dtype=np.bool)
referenced[self.faces] = True
return referenced
@property
def units(self):
"""
Definition of units for the mesh.
Returns
----------
units : str
Unit system mesh is in, or None if not defined
"""
if 'units' in self.metadata:
return self.metadata['units']
else:
return None
@units.setter
def units(self, value):
value = str(value).lower()
self.metadata['units'] = value
def convert_units(self, desired, guess=False):
"""
Convert the units of the mesh into a specified unit.
Parameters
------------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert?
"""
units._convert_units(self, desired, guess)
return self
def merge_vertices(self, digits=None, textured=True):
"""
If a mesh has vertices that are closer than
trimesh.constants.tol.merge reindex faces to reference
the same index for both vertices.
Parameters
--------------
digits : int
If specified overrides tol.merge
textured : bool
If True avoids merging vertices with different UV
coordinates. No effect on untextured meshes.
"""
grouping.merge_vertices(self,
digits=digits,
textured=textured)
def update_vertices(self, mask, inverse=None):
"""
Update vertices with a mask.
Parameters
------------
vertex_mask : (len(self.vertices)) bool
Array of which vertices to keep
inverse : (len(self.vertices)) int
Array to reconstruct vertex references
such as output by np.unique
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
# make sure mask is a numpy array
mask = np.asanyarray(mask)
if ((mask.dtype.name == 'bool' and mask.all()) or
len(mask) == 0 or self.is_empty):
# mask doesn't remove any vertices so exit early
return
# create the inverse mask if not passed
if inverse is None:
inverse = np.zeros(len(self.vertices), dtype=np.int64)
if mask.dtype.kind == 'b':
inverse[mask] = np.arange(mask.sum())
elif mask.dtype.kind == 'i':
inverse[mask] = np.arange(len(mask))
else:
inverse = None
# re- index faces from inverse
if inverse is not None and util.is_shape(self.faces, (-1, 3)):
self.faces = inverse[self.faces.reshape(-1)].reshape((-1, 3))
# update the visual object with our mask
self.visual.update_vertices(mask)
# get the normals from cache before dumping
cached_normals = self._cache['vertex_normals']
# apply to face_attributes
count = len(self.vertices)
for key, value in self.vertex_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.vertex_attributes[key] = value[mask]
# actually apply the mask
self.vertices = self.vertices[mask]
# if we had passed vertex normals try to save them
if util.is_shape(cached_normals, (-1, 3)):
try:
self.vertex_normals = cached_normals[mask]
except BaseException:
pass
def update_faces(self, mask):
"""
In many cases, we will want to remove specific faces.
However, there is additional bookkeeping to do this cleanly.
This function updates the set of faces with a validity mask,
as well as keeping track of normals and colors.
Parameters
------------
valid : (m) int or (len(self.faces)) bool
Mask to remove faces
"""
# if the mesh is already empty we can't remove anything
if self.is_empty:
return
mask = np.asanyarray(mask)
if mask.dtype.name == 'bool' and mask.all():
# mask removes no faces so exit early
return
# try to save face normals before dumping cache
cached_normals = self._cache['face_normals']
faces = self._data['faces']
# if Trimesh has been subclassed and faces have been moved
# from data to cache, get faces from cache.
if not util.is_shape(faces, (-1, 3)):
faces = self._cache['faces']
# apply to face_attributes
count = len(self.faces)
for key, value in self.face_attributes.items():
try:
# covers un-len'd objects as well
if len(value) != count:
raise TypeError()
except TypeError:
continue
# apply the mask to the attribute
self.face_attributes[key] = value[mask]
# actually apply the mask
self.faces = faces[mask]
# apply to face colors
self.visual.update_faces(mask)
# if our normals were the correct shape apply them
if util.is_shape(cached_normals, (-1, 3)):
self.face_normals = cached_normals[mask]
def remove_infinite_values(self):
"""
Ensure that every vertex and face consists of finite numbers.
This will remove vertices or faces containing np.nan and np.inf
Alters
----------
self.faces : masked to remove np.inf/np.nan
self.vertices : masked to remove np.inf/np.nan
"""
if util.is_shape(self.faces, (-1, 3)):
# (len(self.faces), ) bool, mask for faces
face_mask = np.isfinite(self.faces).all(axis=1)
self.update_faces(face_mask)
if util.is_shape(self.vertices, (-1, 3)):
# (len(self.vertices), ) bool, mask for vertices
vertex_mask = np.isfinite(self.vertices).all(axis=1)
self.update_vertices(vertex_mask)
def remove_duplicate_faces(self):
"""
On the current mesh remove any faces which are duplicates.
Alters
----------
self.faces : removes duplicates
"""
unique, inverse = grouping.unique_rows(np.sort(self.faces, axis=1))
self.update_faces(unique)
def rezero(self):
"""
Translate the mesh so that all vertex vertices are positive.
Alters
----------
self.vertices : Translated to first octant (all values > 0)
"""
self.apply_translation(self.bounds[0] * -1.0)
@log_time
def split(self, only_watertight=True, adjacency=None, **kwargs):
"""
Returns a list of Trimesh objects, based on face connectivity.
Splits into individual components, sometimes referred to as 'bodies'
Parameters
------------
only_watertight : bool
Only return watertight meshes and discard remainder
adjacency : None or (n, 2) int
Override face adjacency with custom values
Returns
---------
meshes : (n, ) trimesh.Trimesh
Separate bodies from original mesh
"""
meshes = graph.split(self,
only_watertight=only_watertight,
adjacency=adjacency,
**kwargs)
return meshes
@caching.cache_decorator
def face_adjacency(self):
"""
Find faces that share an edge, which we call here 'adjacent'.
Returns
----------
adjacency : (n, 2) int
Pairs of faces which share an edge
Examples
---------
In [1]: mesh = trimesh.load('models/featuretype.STL')
In [2]: mesh.face_adjacency
Out[2]:
array([[ 0, 1],
[ 2, 3],
[ 0, 3],
...,
[1112, 949],
[3467, 3475],
[1113, 3475]])
In [3]: mesh.faces[mesh.face_adjacency[0]]
Out[3]:
TrackedArray([[ 1, 0, 408],
[1239, 0, 1]], dtype=int64)
In [4]: import networkx as nx
In [5]: graph = nx.from_edgelist(mesh.face_adjacency)
In [6]: groups = nx.connected_components(graph)
"""
adjacency, edges = graph.face_adjacency(
mesh=self, return_edges=True)
self._cache['face_adjacency_edges'] = edges
return adjacency
@caching.cache_decorator
def face_adjacency_edges(self):
"""
Returns the edges that are shared by the adjacent faces.
Returns
--------
edges : (n, 2) int
Vertex indices which correspond to face_adjacency
"""
# this value is calculated as a byproduct of the face adjacency
populate = self.face_adjacency
return self._cache['face_adjacency_edges']
@caching.cache_decorator
def face_adjacency_edges_tree(self):
"""
A KDTree for mapping edges back face adjacency index.
Returns
------------
tree : scipy.spatial.cKDTree
Tree when queried with SORTED edges will return
their index in mesh.face_adjacency
"""
from scipy.spatial import cKDTree
return cKDTree(self.face_adjacency_edges)
@caching.cache_decorator
def face_adjacency_angles(self):
"""
Return the angle between adjacent faces
Returns
--------
adjacency_angle : (n, ) float
Angle between adjacent faces
Each value corresponds with self.face_adjacency
"""
pairs = self.face_normals[self.face_adjacency]
angles = geometry.vector_angle(pairs)
return angles
@caching.cache_decorator
def face_adjacency_projections(self):
"""
The projection of the non- shared vertex of a triangle onto
its adjacent face
Returns
----------
projections : (len(self.face_adjacency), ) float
Dot product of vertex
onto plane of adjacent triangle.
"""
projections = convex.adjacency_projections(self)
return projections
@caching.cache_decorator
def face_adjacency_convex(self):
"""
Return faces which are adjacent and locally convex.
What this means is that given faces A and B, the one vertex
in B that is not shared with A, projected onto the plane of A
has a projection that is zero or negative.
Returns
----------
are_convex : (len(self.face_adjacency), ) bool
Face pairs that are locally convex
"""
are_convex = self.face_adjacency_projections < tol.merge
return are_convex
@caching.cache_decorator
def face_adjacency_unshared(self):
"""
Return the vertex index of the two vertices not in the shared
edge between two adjacent faces
Returns
-----------
vid_unshared : (len(mesh.face_adjacency), 2) int
Indexes of mesh.vertices
"""
vid_unshared = graph.face_adjacency_unshared(self)
return vid_unshared
@caching.cache_decorator
def face_adjacency_radius(self):
"""
The approximate radius of a cylinder that fits inside adjacent faces.
Returns
------------
radii : (len(self.face_adjacency), ) float
Approximate radius formed by triangle pair
"""
radii, span = graph.face_adjacency_radius(mesh=self)
self._cache['face_adjacency_span'] = span
return radii
@caching.cache_decorator
def face_adjacency_span(self):
"""
The approximate perpendicular projection of the non- shared
vertices in a pair of adjacent faces onto the shared edge of
the two faces.
Returns
------------
span : (len(self.face_adjacency), ) float
Approximate span between the non- shared vertices
"""
populate = self.face_adjacency_radius
return self._cache['face_adjacency_span']
@caching.cache_decorator
def vertex_adjacency_graph(self):
"""
Returns a networkx graph representing the vertices and their connections
in the mesh.
Returns
---------
graph: networkx.Graph
Graph representing vertices and edges between
them where vertices are nodes and edges are edges
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
mesh = trimesh.primitives.Box()
graph = mesh.vertex_adjacency_graph
graph.neighbors(0)
> [1, 2, 3, 4]
"""
adjacency_g = graph.vertex_adjacency_graph(mesh=self)
return adjacency_g
@caching.cache_decorator
def vertex_neighbors(self):
"""
The vertex neighbors of each vertex of the mesh, determined from
the cached vertex_adjacency_graph, if already existent.
Returns
----------
vertex_neighbors : (len(self.vertices), ) int
Represents immediate neighbors of each vertex along
the edge of a triangle
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> mesh = trimesh.primitives.Box()
>>> mesh.vertex_neighbors[0]
[1, 2, 3, 4]
"""
graph = self.vertex_adjacency_graph
neighbors = [list(graph.neighbors(i)) for
i in range(len(self.vertices))]
return np.array(neighbors)
@caching.cache_decorator
def is_winding_consistent(self):
"""
Does the mesh have consistent winding or not.
A mesh with consistent winding has each shared edge
going in an opposite direction from the other in the pair.
Returns
--------
consistent : bool
Is winding is consistent or not
"""
if self.is_empty:
return False
# consistent winding check is populated into the cache by is_watertight
populate = self.is_watertight
return self._cache['is_winding_consistent']
@caching.cache_decorator
def is_watertight(self):
"""
Check if a mesh is watertight by making sure every edge is
included in two faces.
Returns
----------
is_watertight : bool
Is mesh watertight or not
"""
if self.is_empty:
return False
watertight, winding = graph.is_watertight(
edges=self.edges, edges_sorted=self.edges_sorted)
self._cache['is_winding_consistent'] = winding
return watertight
@caching.cache_decorator
def is_volume(self):
"""
Check if a mesh has all the properties required to represent
a valid volume, rather than just a surface.
These properties include being watertight, having consistent
winding and outward facing normals.
Returns
---------
valid : bool
Does the mesh represent a volume
"""
valid = bool(self.is_watertight and
self.is_winding_consistent and
np.isfinite(self.center_mass).all() and
self.volume > 0.0)
return valid
@caching.cache_decorator
def is_empty(self):
"""
Does the current mesh have data defined.
Returns
--------
empty : bool
If True, no data is set on the current mesh
"""
return self._data.is_empty()
@caching.cache_decorator
def is_convex(self):
"""
Check if a mesh is convex or not.
Returns
----------
is_convex: bool
Is mesh convex or not
"""
if self.is_empty:
return False
is_convex = bool(convex.is_convex(self))
return is_convex
@caching.cache_decorator
def kdtree(self):
"""
Return a scipy.spatial.cKDTree of the vertices of the mesh.
Not cached as this lead to observed memory issues and segfaults.
Returns
---------
tree : scipy.spatial.cKDTree
Contains mesh.vertices
"""
from scipy.spatial import cKDTree
tree = cKDTree(self.vertices.view(np.ndarray))
return tree
def remove_degenerate_faces(self, height=tol.merge):
"""
Remove degenerate faces (faces without 3 unique vertex indices)
from the current mesh.
If a height is specified, it will remove any face with a 2D oriented
bounding box with one edge shorter than that height.
If not specified, it will remove any face with a zero normal.
Parameters
------------
height : float
If specified removes faces with an oriented bounding
box shorter than this on one side.
Returns
-------------
nondegenerate : (len(self.faces), ) bool
Mask used to remove faces
"""
nondegenerate = triangles.nondegenerate(
self.triangles,
areas=self.area_faces,
height=height)
self.update_faces(nondegenerate)
return nondegenerate
@caching.cache_decorator
def facets(self):
"""
Return a list of face indices for coplanar adjacent faces.
Returns
---------
facets : (n, ) sequence of (m, ) int
Groups of indexes of self.faces
"""
facets = graph.facets(self)
return facets
@caching.cache_decorator
def facets_area(self):
"""
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets), ) float
Total area of each facet (group of faces)
"""
# avoid thrashing the cache inside a loop
area_faces = self.area_faces
# sum the area of each group of faces represented by facets
# use native python sum in tight loop as opposed to array.sum()
# as in this case the lower function call overhead of
# native sum provides roughly a 50% speedup
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas
@caching.cache_decorator
def facets_normal(self):
"""
Return the normal of each facet
Returns
---------
normals: (len(self.facets), 3) float
A unit normal vector for each facet
"""
if len(self.facets) == 0:
return np.array([])
area_faces = self.area_faces
# the face index of the largest face in each facet
index = np.array([i[area_faces[i].argmax()]
for i in self.facets])
# (n, 3) float, unit normal vectors of facet plane
normals = self.face_normals[index]
# (n, 3) float, points on facet plane
origins = self.vertices[self.faces[:, 0][index]]
# save origins in cache
self._cache['facets_origin'] = origins
return normals
@caching.cache_decorator
def facets_origin(self):
"""
Return a point on the facet plane.
Returns
------------
origins : (len(self.facets), 3) float
A point on each facet plane
"""
populate = self.facets_normal
return self._cache['facets_origin']
@caching.cache_decorator
def facets_boundary(self):
"""
Return the edges which represent the boundary of each facet
Returns
---------
edges_boundary : sequence of (n, 2) int
Indices of self.vertices
"""
# make each row correspond to a single face
edges = self.edges_sorted.reshape((-1, 6))
# get the edges for each facet
edges_facet = [edges[i].reshape((-1, 2)) for i in self.facets]
edges_boundary = np.array([i[grouping.group_rows(i, require_count=1)]
for i in edges_facet])
return edges_boundary
@caching.cache_decorator
def facets_on_hull(self):
"""
Find which facets of the mesh are on the convex hull.
Returns
---------
on_hull : (len(mesh.facets), ) bool
is A facet on the meshes convex hull or not
"""
# if no facets exit early
if len(self.facets) == 0:
return np.array([], dtype=np.bool)
# facets plane, origin and normal
normals = self.facets_normal
origins = self.facets_origin
# (n, 3) convex hull vertices
convex = self.convex_hull.vertices.view(np.ndarray).copy()
# boolean mask for which facets are on convex hull
on_hull = np.zeros(len(self.facets), dtype=np.bool)
for i, normal, origin in zip(range(len(normals)), normals, origins):
# a facet plane is on the convex hull if every vertex
# of the convex hull is behind that plane
# which we are checking with dot products
dot = np.dot(normal, (convex - origin).T)
on_hull[i] = (dot < tol.merge).all()
return on_hull
@log_time
def fix_normals(self, multibody=None):
"""
Find and fix problems with self.face_normals and self.faces
winding direction.
For face normals ensure that vectors are consistently pointed
outwards, and that self.faces is wound in the correct direction
for all connected components.
Parameters
-------------
multibody : None or bool
Fix normals across multiple bodies
if None automatically pick from body_count
"""
if multibody is None:
multibody = self.body_count > 1
repair.fix_normals(self, multibody=multibody)
def fill_holes(self):
"""
Fill single triangle and single quad holes in the current mesh.
Returns
----------
watertight : bool
Is the mesh watertight after the function completes
"""
return repair.fill_holes(self)
def register(self, other, **kwargs):
"""
Align a mesh with another mesh or a PointCloud using
the principal axes of inertia as a starting point which
is refined by iterative closest point.
Parameters
------------
mesh : trimesh.Trimesh object
Mesh to align with other
other : trimesh.Trimesh or (n, 3) float
Mesh or points in space
samples : int
Number of samples from mesh surface to align
icp_first : int
How many ICP iterations for the 9 possible
combinations of
icp_final : int
How many ICP itertations for the closest
candidate from the wider search
Returns
-----------
mesh_to_other : (4, 4) float
Transform to align mesh to the other object
cost : float
Average square distance per point
"""
mesh_to_other, cost = registration.mesh_other(mesh=self,
other=other,
**kwargs)
return mesh_to_other, cost
def compute_stable_poses(self,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian (mean at com, cov equal to identity times sigma) over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaluates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogeneous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
------------
center_mass : (3, ) float
The object center of mass (if None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly)
sigma : float
The covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogeneous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n, ) float
A probability ranging from 0.0 to 1.0 for each pose
"""
return poses.compute_stable_poses(mesh=self,
center_mass=center_mass,
sigma=sigma,
n_samples=n_samples,
threshold=threshold)
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
------------
face_index: (m, ) int or None
If None all faces of mesh will be subdivided
If (m, ) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
# subdivide vertex attributes
vertex_attributes = {}
if (hasattr(self.visual, 'uv') and
np.shape(self.visual.uv)[0] == len(self.vertices)):
# only subdivide if
vertex_attributes['uv'] = self.visual.uv
# perform the subdivision with vertex attributes
vertices, faces, attr = remesh.subdivide(
vertices=self.vertices,
faces=self.faces,
face_index=face_index,
vertex_attributes=vertex_attributes)
# if we had texture reconstruct it here
visual = None
if 'uv' in attr:
# get a copy of the current visuals
visual = self.visual.copy()
# assign the subdivided UV's and remove them
visual.uv = attr.pop('uv')
# create a new mesh
result = Trimesh(
vertices=vertices,
faces=faces,
visual=visual,
vertex_attributes=attr,
process=False)
return result
@log_time
def smoothed(self, angle=None, facet_minlen=4):
"""
Return a version of the current mesh which will render
nicely, without changing source mesh.
Parameters
-------------
angle : float or None
Angle in radians face pairs with angles
smaller than this will appear smoothed
facet_minlen : int or None
Minimum length of facets to consider
Returns
---------
smoothed : trimesh.Trimesh
Non watertight version of current mesh
which will render nicely with smooth shading
"""
# smooth should be recomputed if visuals change
self.visual._verify_crc()
cached = self.visual._cache['smoothed']
if cached is not None:
return cached
# run smoothing
smoothed = graph.smoothed(
self,
angle=angle,
facet_minlen=facet_minlen)
self.visual._cache['smoothed'] = smoothed
return smoothed
@property
def visual(self):
"""
Get the stored visuals for the current mesh.
Returns
-------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
if hasattr(self, '_visual'):
return self._visual
return None
@visual.setter
def visual(self, value):
"""
When setting a visual object, always make sure
that `visual.mesh` points back to the source mesh.
Parameters
--------------
visual : ColorVisuals or TextureVisuals
Contains visual information about the mesh
"""
value.mesh = self
self._visual = value
def section(self,
plane_normal,
plane_origin):
"""
Returns a 3D cross section of the current mesh and a plane
defined by origin and normal.
Parameters
------------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3, ) float
Point on the cross section plane
Returns
---------
intersections: Path3D or None
Curve of intersection
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# return a single cross section in 3D
lines, face_index = intersections.mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
return_faces=True)
# if the section didn't hit the mesh return None
if len(lines) == 0:
return None
# otherwise load the line segments into a Path3D object
path = load_path(lines)
# add the face index info into metadata
path.metadata['face_index'] = face_index
return path
def section_multiplane(self,
plane_origin,
plane_normal,
heights):
"""
Return multiple parallel cross sections of the current
mesh in 2D.
Parameters
------------
plane_normal: (3) vector for plane normal
Normal vector of section plane
plane_origin : (3, ) float
Point on the cross section plane
heights : (n, ) float
Each section is offset by height along
the plane normal.
Returns
---------
paths : (n, ) Path2D or None
2D cross sections at specified heights.
path.metadata['to_3D'] contains transform
to return 2D section back into 3D space.
"""
# turn line segments into Path2D/Path3D objects
from .exchange.load import load_path
# do a multiplane intersection
lines, transforms, faces = intersections.mesh_multiplane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
heights=heights)
# turn the line segments into Path2D objects
paths = [None] * len(lines)
for index, L, T in zip(range(len(lines)),
lines,
transforms):
if len(L) > 0:
paths[index] = load_path(
L, metadata={'to_3D': T})
return paths
def slice_plane(self,
plane_origin,
plane_normal,
**kwargs):
"""
Slice the mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
Parameters
------------
plane_normal: (3) vector for plane normal
Normal vector of slicing plane
plane_origin : (3, ) float
Point on the slicing plane
Returns
---------
new_mesh: trimesh.Trimesh or None
Subset of current mesh that intersects the half plane
to the positive normal side of the plane
"""
# return a new mesh
new_mesh = intersections.slice_mesh_plane(
mesh=self,
plane_normal=plane_normal,
plane_origin=plane_origin,
**kwargs)
return new_mesh
@caching.cache_decorator
def convex_hull(self):
"""
Returns a Trimesh object representing the convex hull of
the current mesh.
Returns
--------
convex : trimesh.Trimesh
Mesh of convex hull of current mesh
"""
hull = convex.convex_hull(self)
return hull
def sample(self, count, return_index=False):
"""
Return random samples distributed across the
surface of the mesh
Parameters
------------
count : int
Number of points to sample
return_index : bool
If True will also return the index of which face each
sample was taken from.
Returns
---------
samples : (count, 3) float
Points on surface of mesh
face_index : (count, ) int
Index of self.faces
"""
samples, index = sample.sample_surface(mesh=self, count=count)
if return_index:
return samples, index
return samples
def remove_unreferenced_vertices(self):
"""
Remove all vertices in the current mesh which are not
referenced by a face.
"""
referenced = np.zeros(len(self.vertices), dtype=np.bool)
referenced[self.faces] = True
inverse = np.zeros(len(self.vertices), dtype=np.int64)
inverse[referenced] = np.arange(referenced.sum())
self.update_vertices(mask=referenced, inverse=inverse)
def unmerge_vertices(self):
"""
Removes all face references so that every face contains
three unique vertex indices and no faces are adjacent.
"""
# new faces are incrementing so every vertex is unique
faces = np.arange(len(self.faces) * 3,
dtype=np.int64).reshape((-1, 3))
# use update_vertices to apply mask to
# all properties that are per-vertex
self.update_vertices(self.faces.reshape(-1))
# set faces to incrementing indexes
self.faces = faces
# keep face normals as the haven't changed
self._cache.clear(exclude=['face_normals'])
def apply_transform(self, matrix):
"""
Transform mesh by a homogeneous transformation matrix.
Does the bookkeeping to avoid recomputing things so this function
should be used rather than directly modifying self.vertices
if possible.
Parameters
------------
matrix : (4, 4) float
Homogeneous transformation matrix
"""
# get c-order float64 matrix
matrix = np.asanyarray(
matrix, order='C', dtype=np.float64)
# only support homogeneous transformations
if matrix.shape != (4, 4):
raise ValueError('Transformation matrix must be (4, 4)!')
# exit early if we've been passed an identity matrix
# np.allclose is surprisingly slow so do this test
elif util.allclose(matrix, np.eye(4), 1e-8):
log.debug('apply_transform passed identity matrix')
return self
# new vertex positions
new_vertices = transformations.transform_points(
self.vertices,
matrix=matrix)
# check to see if the matrix has rotation
# rather than just translation
has_rotation = not util.allclose(
matrix[:3, :3], np.eye(3), atol=1e-6)
# overridden center of mass
if self._center_mass is not None:
self._center_mass = transformations.transform_points(
np.array([self._center_mass, ]),
matrix)[0]
# preserve face normals if we have them stored
new_face_normals = None
if has_rotation and 'face_normals' in self._cache:
# transform face normals by rotation component
new_face_normals = util.unitize(
transformations.transform_points(
self.face_normals,
matrix=matrix,
translate=False))
# preserve vertex normals if we have them stored
new_vertex_normals = None
if has_rotation and 'vertex_normals' in self._cache:
new_vertex_normals = util.unitize(
transformations.transform_points(
self.vertex_normals,
matrix=matrix,
translate=False))
# if transformation flips winding of triangles
if has_rotation and transformations.flips_winding(matrix):
log.debug('transform flips winding')
# fliplr will make array non C contiguous
# which will cause hashes to be more
# expensive than necessary so wrap
self.faces = np.ascontiguousarray(
np.fliplr(self.faces))
# assign the new values
self.vertices = new_vertices
# may be None if we didn't have them previously
self.face_normals = new_face_normals
self.vertex_normals = new_vertex_normals
# preserve normals and topology in cache
# while dumping everything else
self._cache.clear(exclude=[
'face_normals', # transformed by us
'vertex_normals' # also transformed by us
'face_adjacency', # topological
'face_adjacency_edges',
'face_adjacency_unshared',
'edges',
'edges_sorted',
'edges_unique',
'edges_sparse',
'body_count',
'faces_unique_edges',
'euler_number', ])
# set the cache ID with the current hash value
self._cache.id_set()
log.debug('mesh transformed by matrix')
return self
def voxelized(self, pitch, method='subdivide', **kwargs):
"""
Return a VoxelGrid object representing the current mesh
discretized into voxels at the specified pitch
Parameters
------------
pitch : float
The edge length of a single voxel
method: implementation key. See `trimesh.voxel.creation.voxelizers`
**kwargs: additional kwargs passed to the specified implementation.
Returns
----------
voxelized : VoxelGrid object
Representing the current mesh
"""
from .voxel import creation
return creation.voxelize(
mesh=self, pitch=pitch, method=method, **kwargs)
def outline(self, face_ids=None, **kwargs):
"""
Given a list of face indexes find the outline of those
faces and return it as a Path3D.
The outline is defined here as every edge which is only
included by a single triangle.
Note that this implies a non-watertight mesh as the
outline of a watertight mesh is an empty path.
Parameters
------------
face_ids : (n, ) int
Indices to compute the outline of.
If None, outline of full mesh will be computed.
**kwargs: passed to Path3D constructor
Returns
----------
path : Path3D
Curve in 3D of the outline
"""
from .path.exchange.misc import faces_to_path
from .path.exchange.load import _create_path
path = _create_path(**faces_to_path(self,
face_ids,
**kwargs))
return path
@caching.cache_decorator
def area(self):
"""
Summed area of all triangles in the current mesh.
Returns
---------
area : float
Surface area of mesh
"""
area = self.area_faces.sum()
return area
@caching.cache_decorator
def area_faces(self):
"""
The area of each face in the mesh.
Returns
---------
area_faces : (n, ) float
Area of each face
"""
area_faces = triangles.area(crosses=self.triangles_cross,
sum=False)
return area_faces
@caching.cache_decorator
def mass_properties(self):
"""
Returns the mass properties of the current mesh.
Assumes uniform density, and result is probably garbage if mesh
isn't watertight.
Returns
----------
properties : dict
With keys:
'volume' : in global units^3
'mass' : From specified density
'density' : Included again for convenience (same as kwarg density)
'inertia' : Taken at the center of mass and aligned with global
coordinate system
'center_mass' : Center of mass location, in global coordinate system
"""
mass = triangles.mass_properties(triangles=self.triangles,
crosses=self.triangles_cross,
density=self._density,
center_mass=self._center_mass,
skip_inertia=False)
# if magical clean- up mode is enabled
# and mesh is watertight/wound correctly but with negative
# volume it means that every triangle is probably facing
# inwards, so we invert it in- place without dumping cache
if (self._validate and
self.is_watertight and
self.is_winding_consistent and
np.linalg.det(mass['inertia']) < 0.0 and
mass['mass'] < 0.0 and
mass['volume'] < 0.0):
# negate mass properties so we don't need to recalculate
mass['inertia'] = -mass['inertia']
mass['mass'] = -mass['mass']
mass['volume'] = -mass['volume']
# invert the faces and normals of the mesh
self.invert()
return mass
def invert(self):
"""
Invert the mesh in- place by reversing the winding of every
face and negating normals without dumping the cache.
Alters
---------
self.faces : columns reversed
self.face_normals : negated if defined
self.vertex_normals : negated if defined
"""
with self._cache:
if 'face_normals' in self._cache:
self.face_normals = self._cache['face_normals'] * -1.0
if 'vertex_normals' in self._cache:
self.vertex_normals = self._cache['vertex_normals'] * -1.0
self.faces = np.fliplr(self.faces)
# save our normals
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
def scene(self, **kwargs):
"""
Returns a Scene object containing the current mesh.
Returns
---------
scene : trimesh.scene.scene.Scene
Contains just the current mesh
"""
return Scene(self, **kwargs)
def show(self, **kwargs):
"""
Render the mesh in an opengl window. Requires pyglet.
Parameters
------------
smooth : bool
Run smooth shading on mesh or not,
large meshes will be slow
Returns
-----------
scene : trimesh.scene.Scene
Scene with current mesh in it
"""
scene = self.scene()
return scene.show(**kwargs)
def submesh(self, faces_sequence, **kwargs):
"""
Return a subset of the mesh.
Parameters
------------
faces_sequence : sequence (m, ) int
Face indices of mesh
only_watertight : bool
Only return submeshes which are watertight
append : bool
Return a single mesh which has the faces appended.
if this flag is set, only_watertight is ignored
Returns
---------
if append : trimesh.Trimesh object
else : list of trimesh.Trimesh objects
"""
return util.submesh(mesh=self,
faces_sequence=faces_sequence,
**kwargs)
@caching.cache_decorator
def identifier(self):
"""
Return a float vector which is unique to the mesh
and is robust to rotation and translation.
Returns
-----------
identifier : (6, ) float
Identifying properties of the current mesh
"""
identifier = comparison.identifier_simple(self)
return identifier
@caching.cache_decorator
def identifier_md5(self):
"""
An MD5 of the rotation invariant identifier vector
Returns
---------
hashed : str
MD5 hash of the identifier vector
"""
hashed = comparison.identifier_hash(self.identifier)
return hashed
def export(self, file_obj=None, file_type=None, **kwargs):
"""
Export the current mesh to a file object.
If file_obj is a filename, file will be written there.
Supported formats are stl, off, ply, collada, json, dict, glb,
dict64, msgpack.
Parameters
------------
file_obj: open writeable file object
str, file name where to save the mesh
None, if you would like this function to return the export blob
file_type: str
Which file type to export as.
If file name is passed this is not required
"""
return export_mesh(mesh=self,
file_obj=file_obj,
file_type=file_type,
**kwargs)
def to_dict(self):
"""
Return a dictionary representation of the current mesh, with keys
that can be used as the kwargs for the Trimesh constructor, eg:
a = Trimesh(**other_mesh.to_dict())
Returns
----------
result : dict
With keys that match trimesh constructor
"""
result = self.export(file_type='dict')
return result
def convex_decomposition(self, maxhulls=20, **kwargs):
"""
Compute an approximate convex decomposition of a mesh.
testVHACD Parameters which can be passed as kwargs:
Name Default
-----------------------------------------------------
resolution 100000
max. concavity 0.001
plane down-sampling 4
convex-hull down-sampling 4
alpha 0.05
beta 0.05
maxhulls 10
pca 0
mode 0
max. vertices per convex-hull 64
min. volume to add vertices to convex-hulls 0.0001
convex-hull approximation 1
OpenCL acceleration 1
OpenCL platform ID 0
OpenCL device ID 0
output output.wrl
log log.txt
Parameters
------------
maxhulls : int
Maximum number of convex hulls to return
**kwargs : testVHACD keyword arguments
Returns
-------
meshes : list of trimesh.Trimesh
List of convex meshes that approximate the original
"""
result = decomposition.convex_decomposition(self,
maxhulls=maxhulls,
**kwargs)
return result
def union(self, other, engine=None, **kwargs):
"""
Boolean union between this mesh and n other meshes
Parameters
------------
other : Trimesh or (n, ) Trimesh
Other meshes to union
engine : None or str
Which backend to use
Returns
---------
union : trimesh.Trimesh
Union of self and other Trimesh objects
"""
result = boolean.union(
meshes=np.append(self, other),
engine=engine,
**kwargs)
return result
def difference(self, other, engine=None, **kwargs):
"""
Boolean difference between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to difference
Returns
---------
difference : trimesh.Trimesh
Difference between self and other Trimesh objects
"""
result = boolean.difference(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def intersection(self, other, engine=None, **kwargs):
"""
Boolean intersection between this mesh and n other meshes
Parameters
------------
other : trimesh.Trimesh, or list of trimesh.Trimesh objects
Meshes to calculate intersections with
Returns
---------
intersection : trimesh.Trimesh
Mesh of the volume contained by all passed meshes
"""
result = boolean.intersection(meshes=np.append(self, other),
engine=engine, **kwargs)
return result
def contains(self, points):
"""
Given an array of points determine whether or not they
are inside the mesh. This raises an error if called on a
non- watertight mesh.
Parameters
------------
points : (n, 3) float
Points in cartesian space
Returns
---------
contains : (n, ) bool
Whether or not each point is inside the mesh
"""
if not self.is_watertight:
log.warning('Mesh is non- watertight for contained point query!')
contains = self.ray.contains_points(points)
return contains
@caching.cache_decorator
def face_angles(self):
"""
Returns the angle at each vertex of a face.
Returns
--------
angles : (len(self.faces), 3) float
Angle at each vertex of a face
"""
angles = triangles.angles(self.triangles)
return angles
@caching.cache_decorator
def face_angles_sparse(self):
"""
A sparse matrix representation of the face angles.
Returns
----------
sparse : scipy.sparse.coo_matrix
Float sparse matrix with with shape:
(len(self.vertices), len(self.faces))
"""
angles = curvature.face_angles_sparse(self)
return angles
@caching.cache_decorator
def vertex_defects(self):
"""
Return the vertex defects, or (2*pi) minus the sum of the angles
of every face that includes that vertex.
If a vertex is only included by coplanar triangles, this
will be zero. For convex regions this is positive, and
concave negative.
Returns
--------
vertex_defect : (len(self.vertices), ) float
Vertex defect at the every vertex
"""
defects = curvature.vertex_defects(self)
return defects
@caching.cache_decorator
def vertex_degree(self):
"""
Return the number of faces each vertex is included in.
Returns
----------
degree : (len(self.vertices), ) int
Number of faces each vertex is included in
"""
# get degree through sparse matrix
degree = np.array(self.faces_sparse.sum(axis=1)).flatten()
return degree
@caching.cache_decorator
def face_adjacency_tree(self):
"""
An R-tree of face adjacencies.
Returns
--------
tree: rtree.index
Where each edge in self.face_adjacency has a
rectangular cell
"""
# the (n,6) interleaved bounding box for every line segment
segment_bounds = np.column_stack((
self.vertices[self.face_adjacency_edges].min(axis=1),
self.vertices[self.face_adjacency_edges].max(axis=1)))
tree = util.bounds_tree(segment_bounds)
return tree
def copy(self, include_cache=False):
"""
Safely return a copy of the current mesh.
By default, copied meshes will have emptied cache
to avoid memory issues and so may be slow on initial
operations until caches are regenerated.
Current object will *never* have its cache cleared.
Parameters
------------
include_cache : bool
If True, will shallow copy cached data to new mesh
Returns
---------
copied : trimesh.Trimesh
Copy of current mesh
"""
# start with an empty mesh
copied = Trimesh()
# always deepcopy vertex and face data
copied._data.data = copy.deepcopy(self._data.data)
# copy visual information
copied.visual = self.visual.copy()
# get metadata
copied.metadata = copy.deepcopy(self.metadata)
# get center_mass and density
if self._center_mass is not None:
copied.center_mass = self.center_mass
copied._density = self._density
# make sure cache ID is set initially
copied._cache.verify()
if include_cache:
# shallow copy cached items into the new cache
# since the data didn't change here when the
# data in the new mesh is changed these items
# will be dumped in the new mesh but preserved
# in the original mesh
copied._cache.cache.update(self._cache.cache)
return copied
def __deepcopy__(self, *args):
# interpret deep copy as "get rid of cached data"
return self.copy(include_cache=False)
def __copy__(self, *args):
# interpret shallow copy as "keep cached data"
return self.copy(include_cache=True)
def eval_cached(self, statement, *args):
"""
Evaluate a statement and cache the result before returning.
Statements are evaluated inside the Trimesh object, and
Parameters
------------
statement : str
Statement of valid python code
*args : list
Available inside statement as args[0], etc
Returns
-----------
result : result of running eval on statement with args
Examples
-----------
r = mesh.eval_cached('np.dot(self.vertices, args[0])', [0, 0, 1])
"""
statement = str(statement)
key = 'eval_cached_' + statement
key += '_'.join(str(i) for i in args)
if key in self._cache:
return self._cache[key]
result = eval(statement)
self._cache[key] = result
return result
def __hash__(self):
"""
Return the MD5 hash of the mesh as an integer.
Returns
----------
hashed : int
MD5 of mesh data
"""
hashed = int(self.md5(), 16)
return hashed
def __add__(self, other):
"""
Concatenate the mesh with another mesh.
Parameters
------------
other : trimesh.Trimesh object
Mesh to be concatenated with self
Returns
----------
concat : trimesh.Trimesh
Mesh object of combined result
"""
concat = util.concatenate(self, other)
return concat
| 31.829268
| 86
| 0.568382
|
50044b50485811b6c39fb2f2ca8a7410bffafd5a
| 164
|
py
|
Python
|
demo2/handlers/nav/NavHandler.py
|
philipgold/replace-nested-conditional-with-polymorphism-demo
|
c0a530e7c69a8946338a46fde8c67ea8fe85a77e
|
[
"MIT"
] | null | null | null |
demo2/handlers/nav/NavHandler.py
|
philipgold/replace-nested-conditional-with-polymorphism-demo
|
c0a530e7c69a8946338a46fde8c67ea8fe85a77e
|
[
"MIT"
] | null | null | null |
demo2/handlers/nav/NavHandler.py
|
philipgold/replace-nested-conditional-with-polymorphism-demo
|
c0a530e7c69a8946338a46fde8c67ea8fe85a77e
|
[
"MIT"
] | null | null | null |
from demo2.handlers.EventHandler import EventHandler
class NavHandler(EventHandler):
def __init__(self, handler_type='nav'):
self.type = handler_type
| 23.428571
| 52
| 0.756098
|
80fc8323a06b4c7405fe6eb79660d815d13aae93
| 3,872
|
py
|
Python
|
tortoise/tests/fields/test_m2m.py
|
Quinn-Fang/tortoise-orm
|
2874c1adec8306925ee2c966241fa34710a84ef3
|
[
"Apache-2.0"
] | null | null | null |
tortoise/tests/fields/test_m2m.py
|
Quinn-Fang/tortoise-orm
|
2874c1adec8306925ee2c966241fa34710a84ef3
|
[
"Apache-2.0"
] | 5
|
2020-03-24T17:23:14.000Z
|
2021-12-13T20:12:49.000Z
|
tortoise/tests/fields/test_m2m.py
|
Quinn-Fang/tortoise-orm
|
2874c1adec8306925ee2c966241fa34710a84ef3
|
[
"Apache-2.0"
] | null | null | null |
from tortoise.contrib import test
from tortoise.exceptions import OperationalError
from tortoise.tests import testmodels
class TestManyToManyField(test.TestCase):
async def test_empty(self):
await testmodels.M2MOne.create()
async def test__add(self):
one = await testmodels.M2MOne.create(name="One")
two = await testmodels.M2MTwo.create(name="Two")
await one.two.add(two)
self.assertEqual(await one.two, [two])
self.assertEqual(await two.one, [one])
async def test__add__nothing(self):
one = await testmodels.M2MOne.create(name="One")
await one.two.add()
async def test__add__reverse(self):
one = await testmodels.M2MOne.create(name="One")
two = await testmodels.M2MTwo.create(name="Two")
await two.one.add(one)
self.assertEqual(await one.two, [two])
self.assertEqual(await two.one, [one])
async def test__add__many(self):
one = await testmodels.M2MOne.create(name="One")
two = await testmodels.M2MTwo.create(name="Two")
await one.two.add(two)
await one.two.add(two)
await two.one.add(one)
self.assertEqual(await one.two, [two])
self.assertEqual(await two.one, [one])
async def test__add__two(self):
one = await testmodels.M2MOne.create(name="One")
two1 = await testmodels.M2MTwo.create(name="Two")
two2 = await testmodels.M2MTwo.create(name="Two")
await one.two.add(two1, two2)
self.assertEqual(await one.two, [two1, two2])
self.assertEqual(await two1.one, [one])
self.assertEqual(await two2.one, [one])
async def test__remove(self):
one = await testmodels.M2MOne.create(name="One")
two1 = await testmodels.M2MTwo.create(name="Two")
two2 = await testmodels.M2MTwo.create(name="Two")
await one.two.add(two1, two2)
await one.two.remove(two1)
self.assertEqual(await one.two, [two2])
self.assertEqual(await two1.one, [])
self.assertEqual(await two2.one, [one])
async def test__remove__many(self):
one = await testmodels.M2MOne.create(name="One")
two1 = await testmodels.M2MTwo.create(name="Two1")
two2 = await testmodels.M2MTwo.create(name="Two2")
two3 = await testmodels.M2MTwo.create(name="Two3")
await one.two.add(two1, two2, two3)
await one.two.remove(two1, two2)
self.assertEqual(await one.two, [two3])
self.assertEqual(await two1.one, [])
self.assertEqual(await two2.one, [])
self.assertEqual(await two3.one, [one])
async def test__remove__blank(self):
one = await testmodels.M2MOne.create(name="One")
with self.assertRaisesRegex(OperationalError, r"remove\(\) called on no instances"):
await one.two.remove()
async def test__clear(self):
one = await testmodels.M2MOne.create(name="One")
two1 = await testmodels.M2MTwo.create(name="Two")
two2 = await testmodels.M2MTwo.create(name="Two")
await one.two.add(two1, two2)
await one.two.clear()
self.assertEqual(await one.two, [])
self.assertEqual(await two1.one, [])
self.assertEqual(await two2.one, [])
async def test__uninstantiated_add(self):
one = testmodels.M2MOne(name="One")
two = await testmodels.M2MTwo.create(name="Two")
with self.assertRaisesRegex(
OperationalError, r"You should first call .save\(\) on <M2MOne>"
):
await one.two.add(two)
async def test__add_uninstantiated(self):
one = testmodels.M2MOne(name="One")
two = await testmodels.M2MTwo.create(name="Two")
with self.assertRaisesRegex(
OperationalError, r"You should first call .save\(\) on <M2MOne>"
):
await two.one.add(one)
| 39.510204
| 92
| 0.641529
|
9264958936764a57e060a5f74f1f3b42d87c9011
| 9,955
|
py
|
Python
|
engine/app.py
|
FeatureCloud/fc-survival-svm
|
0a05d148509418091bddb33ad412a43a9e204a69
|
[
"Apache-2.0"
] | null | null | null |
engine/app.py
|
FeatureCloud/fc-survival-svm
|
0a05d148509418091bddb33ad412a43a9e204a69
|
[
"Apache-2.0"
] | null | null | null |
engine/app.py
|
FeatureCloud/fc-survival-svm
|
0a05d148509418091bddb33ad412a43a9e204a69
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import logging
import threading
import time
import traceback
from time import sleep
import textwrap
from typing import Dict, List, Tuple
PARTICIPANT = (True, False)
COORDINATOR = (False, True)
BOTH = (True, True)
STATE_RUNNING = 'running'
STATE_ERROR = 'error'
STATE_ACTION = 'action_required'
def data_to_bytes(d):
return
class App:
def __init__(self):
self.id = None
self.coordinator = None
self.clients = None
self.thread = None
self.status_available = False
self.status_finished = False
self.status_message = None
self.status_progress = None
self.status_state = None
self.status_destination = None
self.status_smpc = None
self.data_incoming = []
self.data_outgoing = []
self.outgoing_lock: threading.Lock = threading.Lock()
self.default_smpc = {'operation': 'add', 'serialization': 'json', 'shards': 0, 'exponent': 8}
self.current_state: AppState or None = None
self.states: Dict[str, AppState] = {}
self.transitions: Dict[str, Tuple[AppState, AppState, bool, bool]] = {} # name => (source, target, participant, coordinator)
self.transition_log: List[Tuple[datetime.datetime, str]] = []
self.internal = {}
def handle_setup(self, client_id, coordinator, clients):
# This method is called once upon startup and contains information about the execution context of this instance
self.id = client_id
self.coordinator = coordinator
self.clients = clients
self.log(f'id: {self.id}')
self.log(f'coordinator: {self.coordinator}')
self.log(f'clients: {self.clients}')
self.current_state = self.states.get('initial')
if not self.current_state:
raise RuntimeError('initial state not found')
self.thread = threading.Thread(target=self.guarded_run)
self.thread.start()
def guarded_run(self):
try:
self.run()
except Exception as e: # catch all # noqa
self.log(traceback.format_exc())
sleep(10)
self.status_message = 'ERROR. See log for stack trace.'
self.status_state = STATE_ERROR
self.status_finished = True
def run(self):
self.internal['_tic_total'] = time.perf_counter()
while True:
self.log(f'state: {self.current_state.name}')
transition = self.current_state.run()
if not transition:
self.status_progress = 1.0
self.log(f'done')
sleep(10)
self.status_finished = True
return
self.log(f'transition: {transition}')
self.transition(f'{self.current_state.name}_{transition}')
sleep(1)
def register(self):
for s in self.states:
state = self.states[s]
state.register()
def handle_incoming(self, data, client):
# This method is called when new data arrives
self.log(f"HANDLE INCOMING", level=logging.DEBUG)
self.data_incoming.append((data.read(), client))
def handle_outgoing(self):
# This method is called when data is requested
if len(self.data_outgoing) == 0:
return None
data = self.data_outgoing[0]
self.data_outgoing = self.data_outgoing[1:]
if len(self.data_outgoing) == 0:
self.status_available = False
self.status_destination = None
self.status_smpc = None
else:
self.status_available = True
self.status_smpc = self.default_smpc if self.data_outgoing[0][1] else None
self.status_destination = self.data_outgoing[0][2]
self.log(f'OUTGOING: {data}', level=logging.DEBUG)
self.outgoing_lock.release()
return data[0]
def _register_state(self, name, state, participant, coordinator, **kwargs):
if self.transitions.get(name):
raise RuntimeError(f'state {name} already exists')
si = state(**kwargs)
si.app = self
si.name = name
si.participant = participant
si.coordinator = coordinator
self.states[si.name] = si
def register_transition(self, name: str, source: str, target: str, participant=True, coordinator=True):
if not participant and not coordinator:
raise RuntimeError('either participant or coordinator must be True')
if self.transitions.get(name):
raise RuntimeError(f'transition {name} already exists')
source_state = self.states.get(source)
if not source_state:
raise RuntimeError(f'source state {source} not found')
if participant and not source_state.participant:
raise RuntimeError(f'source state {source} not accessible for participants')
if coordinator and not source_state.coordinator:
raise RuntimeError(f'source state {source} not accessible for the coordinator')
target_state = self.states.get(target)
if not target_state:
raise RuntimeError(f'target state {target} not found')
if participant and not target_state.participant:
raise RuntimeError(f'target state {target} not accessible for participants')
if coordinator and not target_state.coordinator:
raise RuntimeError(f'target state {target} not accessible for the coordinator')
self.transitions[name] = (source_state, target_state, participant, coordinator)
def transition(self, name):
transition = self.transitions.get(name)
if not transition:
raise RuntimeError(f'transition {name} not found')
if transition[0] != self.current_state:
raise RuntimeError(f'current state unequal to source state')
if not transition[2] and not self.coordinator:
raise RuntimeError(f'cannot perform transition {name} as participant')
if not transition[3] and self.coordinator:
raise RuntimeError(f'cannot perform transition {name} as coordinator')
self.transition_log.append((datetime.datetime.now(), name))
self.current_state = transition[1]
def log(self, msg, level=logging.INFO):
logging.log(level, '\n'.join(['', *textwrap.wrap(str(msg), 120)]))
class AppState:
def __init__(self):
self.app = None
self.name = None
self.participant = None
self.coordinator = None
def register(self):
pass
def run(self) -> str or None:
pass
def register_transition(self, target: str, role=BOTH, name: str or None = None):
if not name:
name = target
participant, coordinator = role
self.app.register_transition(f'{self.name}_{name}', self.name, target, participant, coordinator)
def gather_data(self):
return self.await_data(len(self.app.clients), unwrap=False)
def await_data(self, n: int = 1, unwrap: bool = True):
while True:
self.app.log(f'INCOMING: ({len(self.app.data_incoming)}) {self.app.data_incoming}')
if len(self.app.data_incoming) >= n:
data = self.app.data_incoming[:n]
self.app.data_incoming = self.app.data_incoming[n:]
if unwrap and n == 1:
return data[0][0]
else:
return data
sleep(1)
def send_data_to_participant(self, data, destination):
if destination == self.app.id:
self.app.data_incoming.append((data, self.app.id))
else:
self.app.outgoing_lock.acquire()
self.app.data_outgoing.append((data, False, destination))
self.app.status_destination = destination
self.app.status_smpc = None
self.app.status_available = True
def send_data_to_coordinator(self, data, send_to_self=True, use_smpc=False):
if self.app.coordinator and not use_smpc:
if send_to_self:
self.app.data_incoming.append((data, self.app.id))
else:
self.app.outgoing_lock.acquire()
self.app.data_outgoing.append((data, use_smpc, None))
self.app.status_destination = None
self.app.status_smpc = self.app.default_smpc if use_smpc else None
self.app.status_available = True
def broadcast_data(self, data, send_to_self=True):
if not self.app.coordinator:
raise RuntimeError('only the coordinator can broadcast data')
self.app.outgoing_lock.acquire()
self.app.data_outgoing.append((data, False, None))
self.app.status_destination = None
self.app.status_smpc = None
self.app.status_available = True
if send_to_self:
self.app.data_incoming.append((data, self.app.id))
def update(self, message=None, progress=None, state=None):
if message and len(message) > 40:
self.app.log(f"Truncated a long message. Original message: {message!r}", logging.DEBUG)
message = message[:39]
if progress is not None and (progress < 0 or progress > 1):
raise RuntimeError('progress must be between 0 and 1')
if state is not None and state != STATE_RUNNING and state != STATE_ERROR and state != STATE_ACTION:
raise RuntimeError('invalid state')
self.app.status_message = message
self.app.status_progress = progress
self.app.status_state = state
def app_state(app: App, name: str, role=BOTH, **kwargs):
participant, coordinator = role
if not participant and not coordinator:
raise RuntimeError('either participant or coordinator must be True')
def func(state_class):
app._register_state(name, state_class, participant, coordinator, **kwargs)
return state_class
return func
| 36.734317
| 133
| 0.630236
|
4b2bc3ab15b47083c90deac84f7031d3ba7a0aea
| 14,652
|
py
|
Python
|
helper-scripts/hdf5_to_counts_table.py
|
GordonLab/riesling-pipeline
|
384f41dc964db0f59b3992f775e87c651e846f2b
|
[
"MIT"
] | 9
|
2017-10-25T18:27:23.000Z
|
2020-10-15T08:06:42.000Z
|
helper-scripts/hdf5_to_counts_table.py
|
GordonLab/riesling-pipeline
|
384f41dc964db0f59b3992f775e87c651e846f2b
|
[
"MIT"
] | null | null | null |
helper-scripts/hdf5_to_counts_table.py
|
GordonLab/riesling-pipeline
|
384f41dc964db0f59b3992f775e87c651e846f2b
|
[
"MIT"
] | 4
|
2016-11-05T23:21:48.000Z
|
2020-02-25T12:35:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This script exists to convert .hdf5 files into counts tables readable by R.
#
# It is useful for performing differential accessibility analyses (e.g. with
# DESeq2) on hdf5 counts data from RIESLING, ROSE, or bamliquidator.
#
#
# Unfortunately, we can't use the rhdf5 package, since it doesn't support the
# datatypes used by our .hdf5 files.
#
#
# Copyright (c) 2014-2016 Nick Semenkovich <semenko@alum.mit.edu>.
# https://nick.semenkovich.com/
#
# Developed for the Gordon Lab, Washington University in St. Louis (WUSTL)
# http://gordonlab.wustl.edu/
#
# This software is released under the MIT License:
# http://opensource.org/licenses/MIT
#
# Source: https://github.com/GordonLab/riesling-pipeline
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = 'Nick Semenkovich <semenko@alum.mit.edu>'
__copyright__ = 'Gordon Lab at Washington University in St. Louis'
__license__ = 'MIT'
__version__ = '1.0.3'
import argparse
import csv
import fnmatch
import operator
import os
import pybedtools
import tables
import _logshim
import _script_helpers
from collections import deque, OrderedDict
CONFIG = _script_helpers.get_config()
# TODO: Modularize this function. This code is repeated in a *lot* of scripts.
def get_input_files(input_path):
"""
Generate a list of all input files.
:param input_files: A directory with .h5 files. (e.g. /tmp/)
:return: a list of all .h5 files with absolute paths. (e.g. ['/tmp/a.h5'] )
"""
if not os.path.isdir(input_path):
raise ValueError("Input must be a directory. You gave: %s" % (input_path))
# Adapted from:
# https://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
all_files = []
for root, _, filenames in os.walk(input_path):
for filename in fnmatch.filter(filenames, '*.h5'):
all_files.append(os.path.join(root, filename))
if len(all_files) == 0:
raise ValueError("Input directory contains no .h5 files!")
return all_files
def flatten_tsv(filename):
"""
Flaten a TSV file -- parse and concatenate identical row names, by summing their values.
"""
flatlog = _logshim.getLogger('flatten_tsv')
flatlog.debug('Flattening input file: %s' % (filename))
data_dict = OrderedDict()
with open(filename, 'r') as tsv_ro_fh:
tsv_input = csv.reader(tsv_ro_fh, delimiter=str("\t"))
header = next(tsv_input, None)
for row in tsv_input:
row_key = row[0]
these_row_values_as_int = map(int, row[1:])
if row_key in data_dict:
# Add the current row values to the existing values
data_dict[row_key] = map(operator.add, data_dict[row_key], these_row_values_as_int)
else:
data_dict[row_key] = these_row_values_as_int
# Write back the parsed dict
with open(filename, 'wb') as tsv_rw_fh:
tsv_writer = csv.writer(tsv_rw_fh, delimiter=str("\t"))
tsv_writer.writerow(header)
for key, val in data_dict.iteritems():
tsv_writer.writerow([key] + val)
def parse_h5files(input_files, annotationBedTool, overwrite, flatten, density, normalized, sizescaled):
h5logger = _logshim.getLogger('parse_h5files')
assert(not (density and normalized))
total_file_count = len(input_files)
h5logger.info('Parsing a total of: %d file(s)' % (total_file_count))
output_suffix_list = ['tsv']
annotating_regions = False
if annotationBedTool:
annotating_regions = True
output_suffix_list.append('annotated')
if normalized:
output_suffix_list.append('normalized')
elif density:
output_suffix_list.append('density')
elif sizescaled:
output_suffix_list.append('sizescaled')
output_suffix = '.'.join(reversed(output_suffix_list))
# Cache regions that we're annotating, maybe.
region_annotation_cache = {}
for this_file_count, file in enumerate(input_files):
h5logger.info('\tParsing: %s (%d/%d)' % (file, this_file_count + 1, total_file_count))
output_filename = file + '.' + output_suffix
if not overwrite and os.path.isfile(output_filename):
h5logger.warn('Skipping this .h5 as output .tsv already exists: %s' % (output_filename))
continue
# TODO: Modularize H5FD_CORE (the in-memory driver?)
with tables.open_file(file, mode="r", driver="H5FD_CORE") as h5_object:
assert(h5_object.title.startswith("bam liquidator genome read counts")) # Some sanity checking
assert(h5_object.root.file_names[0] == "*")
bam_filename_header = h5_object.root.file_names[1:]
bam_filename_header.insert(0, 'region')
# Note: len(files) = len(file_names) - 1, since file_names has a 'wildcard' first entry.
number_of_regions = int(len(h5_object.root.region_counts) / len(h5_object.root.files))
# We expect this .h5 object's region_counts to contain:
# /region_counts (Table(SIZE,)) 'region counts'
# description := {
# "file_key": UInt32Col(shape=(), dflt=0, pos=0),
# "chromosome": StringCol(itemsize=64, shape=(), dflt='', pos=1),
# "region_name": StringCol(itemsize=64, shape=(), dflt='', pos=2),
# "start": UInt64Col(shape=(), dflt=0, pos=3),
# "stop": UInt64Col(shape=(), dflt=0, pos=4),
# "strand": StringCol(itemsize=1, shape=(), dflt='', pos=5),
# "count": UInt64Col(shape=(), dflt=0, pos=6),
# "normalized_count": Float64Col(shape=(), dflt=0.0, pos=7)}
# byteorder := 'little'
# chunkshape := (NNN,)
counts = h5_object.root.region_counts
with open(output_filename, 'wb') as tsv_output:
tsvwriter = csv.writer(tsv_output, delimiter=str("\t"))
tsvwriter.writerow(bam_filename_header)
if annotating_regions:
h5logger.debug('Generating .bed annotations from provided genome.')
region_to_gene = {}
# Perform one annotation rapidly for all regions in the .hdf5
hdf5_positions_only = []
for region_number in range(0, number_of_regions):
hdf5_positions_only.append(counts[region_number][1] + ' ' + str(counts[region_number][3]) + ' ' + str(counts[region_number][4]))
hdf5_positions_only_hashkey = ''.join(hdf5_positions_only)
if hdf5_positions_only_hashkey in region_annotation_cache:
# The genome doesn't change mid run, so we cache only on hdf5_positions
region_to_gene = region_annotation_cache[hdf5_positions_only_hashkey]
h5logger.debug('Annotation from cache.')
else:
hdf5_stub_bed = pybedtools.BedTool('\n'.join(hdf5_positions_only), from_string=True)
annotated_bed = hdf5_stub_bed.closest(annotationBedTool, t='first')
for locus in annotated_bed:
region_to_gene[locus.chrom + ':' + str(locus.start) + '-' + str(locus.end)] = locus.fields[11].split('"')[1]
region_annotation_cache[hdf5_positions_only_hashkey] = region_to_gene
h5logger.debug('Annotation completed.')
# We're going to aggressively access the hdf5 at a bunch of fixed offsets.
# rowarray = [counts[number_of_regions*0 + i], counts[number_of_regions*1 + i] + counts[number_of_regions*2 + i] ...]
number_of_files = len(h5_object.root.files)
working_deque = deque(maxlen=number_of_files + 1)
# Here, we loop over every "region"/locus (every entry in the first column of the .tsv)
# And then (within this loop) jump to each individual "file" (the hdf5 can contain multiple
# separate samples) to build the data for every row.
for region_number in range(0, number_of_regions):
# Prefix the row with chrN:bpSTART-pbEND e.g. chr4:100-2000
locus_name = counts[region_number][1] + ':' + str(counts[region_number][3]) + '-' + str(counts[region_number][4])
# Sanity checking, in case the input is nuts
feature_width = counts[region_number][4] - counts[region_number][3]
assert(feature_width > 0)
# DESeq2 requires each region have a unique name.
# You can either append a unique value, or aggregate identical loci.
# We address this later by re-opening and aggregating.
if annotating_regions:
working_deque.append(region_to_gene[locus_name])
else:
working_deque.append(locus_name)
#rowarray = [counts[region_number][1] + ':' + str(counts[region_number][3]) + '-' + str(counts[region_number][4])]
for file_number in range(0, number_of_files):
if normalized:
# Standard normalized (counts/mreads)
# bamliquidator gives us (counts/mreads)/width so we multiply by width
working_deque.append(int(counts[number_of_regions * file_number + region_number][7] * feature_width))
elif density:
# (counts/mreads)/width
# We upscale the fractional normalized count values by an arbitrary amount,
# because subsequent analyses like integers.
working_deque.append(int(counts[number_of_regions * file_number + region_number][7] * 10000))
elif sizescaled:
# counts/width
# We upscale the fractional normalized count values by an arbitrary amount,
# because subsequent analyses like integers.
working_deque.append(int(counts[number_of_regions * file_number + region_number][6] / feature_width * 100))
else:
working_deque.append(int(counts[number_of_regions * file_number + region_number][6]))
tsvwriter.writerow(working_deque)
if flatten:
flatten_tsv(output_filename)
h5logger.info('Completed.')
def main():
# Parse & interpret command line flags.
parser = argparse.ArgumentParser(description='Convert hdf5 tables from bamliquidator format to CSV counts tables '
'for use in R and elsewhere. (Necessary as rhdf5 doesn\'t support our data structure.)',
epilog="Written by Nick Semenkovich <semenko@alum.mit.edu> for the Gordon Lab at "
"Washington University in St. Louis: http://gordonlab.wustl.edu.",
usage='%(prog)s [options]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-path', '-i', dest="input_path", metavar='input_dir/', type=str,
help='Input path with .h5 files.',
required=True)
parser.add_argument("--overwrite", dest="overwrite", default=False, action='store_true',
help='Regenerate and overwrite output .tsv files, even if they already exist.')
parser.add_argument('--call-genes', dest="call_genes", default=False, action='store_true',
help='Instead of a .tsv (with positions as keys), make a .annotated.tsv with nearby genes.')
parser.add_argument('--normalized', dest="normalized", default=False, action='store_true',
help='Store the normalized counts (counts/total reads) instead of the raw read counts.')
parser.add_argument('--density', dest="density", default=False, action='store_true',
help='Store the width-normalized density (counts/total reads/region size) instead of the raw read counts.')
parser.add_argument('--sizescaled', dest="sizescaled", default=False, action='store_true',
help='Store the size scaled counts (counts/feature size) instead of the raw read counts.')
# Useful for EdgeR/DESeq2, etc. where every locus/position/gene-name must be unique.
parser.add_argument('--flatten', dest="flatten", default=False, action='store_true',
help='Aggregate identical locus IDs and sum their values. '
'Think carefully before you sum non-normalized values!')
genome_choices = sorted(CONFIG['gffs'].keys())
parser.add_argument('--genome', '-g', dest="genome", metavar='genome', type=str, default=None,
choices=genome_choices, help='Genome to use for annotation, one of: %s' % (', '.join(genome_choices)), required=False)
parser.add_argument("--verbose", "-v", dest="verbose", default=False, action='store_true')
parser.add_argument("--no-log", "-nl", dest="nolog", default=False, action='store_true',
help="Do not create a log file.")
args = parser.parse_args()
if args.call_genes and not args.genome:
parser.error('--genome is when requesting --call_genes')
assert((args.density + args.normalized + args.sizescaled) <= 1)
annotationBedTool = None
if args.call_genes:
genome_gff = CONFIG['gffs'][args.genome]
assert(os.access(genome_gff, os.R_OK))
annotationBedTool = pybedtools.BedTool(genome_gff)
# Output path is input path. This also checks that the path is writeable.
output_path = _script_helpers.setup_output_path(args.input_path)
_logshim.startLogger(verbose=args.verbose, noFileLog=args.nolog, outPath=output_path)
input_files = get_input_files(args.input_path)
parse_h5files(input_files,
annotationBedTool=annotationBedTool,
overwrite=args.overwrite,
flatten=args.flatten,
density=args.density,
normalized=args.normalized,
sizescaled=args.sizescaled)
if __name__ == '__main__':
main()
| 44.944785
| 152
| 0.615616
|
b07a4c63f95ed515799ea6d5ab0e381da738a3cf
| 1,964
|
py
|
Python
|
cogs/utils/DataBase/rep.py
|
rax-v/Central-Spreader-Bot
|
560e3207d42cbeef7e4904ef9679d1faf005d595
|
[
"Apache-2.0"
] | null | null | null |
cogs/utils/DataBase/rep.py
|
rax-v/Central-Spreader-Bot
|
560e3207d42cbeef7e4904ef9679d1faf005d595
|
[
"Apache-2.0"
] | null | null | null |
cogs/utils/DataBase/rep.py
|
rax-v/Central-Spreader-Bot
|
560e3207d42cbeef7e4904ef9679d1faf005d595
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from json import dumps
class Rep(object):
def __init__(self, bot, rep_id: int, user_id: int, author_id: int,
repped_at: datetime = datetime.utcnow(), extra_info: dict = None):
self.bot = bot
self.rep_id = rep_id # In most cases this is the id of the message that posted this.
self.user_id = user_id # The user that recieved +1 rep.
self.author_id = author_id # The user that gave +1 rep.
self.repped_at = repped_at
self.extra_info = dumps(extra_info)
async def post(self, assure_24h: bool = True):
"""We shouldn't have to check for duplicate reps either. ->
Unless someone mis-uses this.
If a conflict somehow still occurs nothing will happen. ( hopefully :shrug: )
:param assure_24h:
if True, this will only post the rep if the latest
rep for this user_id is more than 24 hours ago.
:return:
If posting is successful, returns None.
If post is on cooldown, returns a datetime object on when the last rep was added.
"""
if assure_24h:
query = """SELECT * FROM reps
WHERE author_id = $1
ORDER BY repped_at DESC
LIMIT 1"""
record = await self.bot.db.fetch(query, self.author_id)
if record:
rep = Rep(bot=self.bot, **record[0])
if (rep.repped_at + timedelta(days=1)) > datetime.utcnow():
return rep.repped_at
query = """INSERT INTO reps ( rep_id, user_id, author_id, repped_at, extra_info )
VALUES ( $1, $2, $3, $4, $5 )
ON CONFLICT DO NOTHING"""
await self.bot.db.execute(query, self.rep_id, self.user_id, self.author_id,
self.repped_at, f"{self.extra_info}")
return None
| 43.644444
| 93
| 0.570774
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.