hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a5148333f31c4597e7614726735b54546105dbc
| 95
|
py
|
Python
|
EncryptionSystems/Exponential.py
|
Sirius2051/atbash
|
cda11c9f0c99e20a2350fd3a8a7ef1a61fe269a9
|
[
"Apache-2.0"
] | null | null | null |
EncryptionSystems/Exponential.py
|
Sirius2051/atbash
|
cda11c9f0c99e20a2350fd3a8a7ef1a61fe269a9
|
[
"Apache-2.0"
] | null | null | null |
EncryptionSystems/Exponential.py
|
Sirius2051/atbash
|
cda11c9f0c99e20a2350fd3a8a7ef1a61fe269a9
|
[
"Apache-2.0"
] | null | null | null |
class Exponential:
def __init__(self):
self.rules = ""
def Convertion(self, text):
pass
| 13.571429
| 28
| 0.684211
|
a36cf9078f94af041fe749ab4f012a2e70eeac34
| 663
|
py
|
Python
|
spirit/topic/utils.py
|
StepanBakshayev/Spirit
|
28c053983d8323801d022c5314af7fdff4569228
|
[
"MIT"
] | 1
|
2020-12-08T01:09:30.000Z
|
2020-12-08T01:09:30.000Z
|
spirit/topic/utils.py
|
StepanBakshayev/Spirit
|
28c053983d8323801d022c5314af7fdff4569228
|
[
"MIT"
] | 16
|
2015-08-10T18:28:18.000Z
|
2022-03-11T23:12:48.000Z
|
spirit/topic/utils.py
|
StepanBakshayev/Spirit
|
28c053983d8323801d022c5314af7fdff4569228
|
[
"MIT"
] | 6
|
2018-06-25T02:17:53.000Z
|
2020-12-08T01:09:32.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..comment.bookmark.models import CommentBookmark
from .notification.models import TopicNotification
from .unread.models import TopicUnread
def topic_viewed(request, topic):
# Todo test detail views
user = request.user
comment_number = CommentBookmark.page_to_comment_number(request.GET.get('page', 1))
CommentBookmark.update_or_create(
user=user,
topic=topic,
comment_number=comment_number
)
TopicNotification.mark_as_read(user=user, topic=topic)
TopicUnread.create_or_mark_as_read(user=user, topic=topic)
topic.increase_view_count()
| 30.136364
| 87
| 0.75264
|
02b55fc7ddb52d0664e4c72539a6571c7f1b8d2e
| 4,153
|
py
|
Python
|
dev_tools/parsers/extract_propery_from_csv.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | 2
|
2019-11-20T18:29:02.000Z
|
2019-11-20T18:29:05.000Z
|
dev_tools/parsers/extract_propery_from_csv.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | null | null | null |
dev_tools/parsers/extract_propery_from_csv.py
|
rbertran/microprobe
|
232b60aad88b3541de1a962d6da924b234cd521c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docstring
"""
# from microprobe.code.ins import Instruction
from __future__ import absolute_import
from __future__ import print_function
import sys
import six
usage = "Usage: %s [-h | options] < infile > outfile" % sys.argv[0]
# functions
def print_header():
print("# Copyright 2011-2018 IBM Corporation")
print("# All rights reserved")
print("#")
print("# Author: Ramon Bertran")
print("# Email: rbertra@us.ibm.com")
print("#")
print("# Version: 0.5")
print("#")
def print_int_property(name, descr, default, props):
print_header()
print("---")
print("- Name: %s" % name)
print(" Description: %s" % descr)
print(" Default: %s" % str(default))
print(" Values:")
for prop, value in sorted(six.iteritems(props), key=lambda ins: ins[0]):
if (value != default):
print(" %s: %s" % (prop, str(value)))
if len(sys.argv) < 2:
sys.exit(usage)
elif sys.argv[1] == '-h':
print(usage)
print("")
print("This utility extracts fields from csv (stdin) to properties")
print("")
print("-h \t prints this help")
print("-n [name] \tcolumn name to be the property")
print("-f columnname=value \tfilter")
print("-p name \t property name")
print("-d default \t property default value")
print("-D description \t property description")
print("-k key \t property key")
print("-s separator \t property separator (if multiple keys)")
skip = True
cname = None
pfilter = []
pname = None
pdesc = None
pdefault = None
pkey = []
pkeysep = None
for idx, value in enumerate(sys.argv):
# print idx, value
if skip:
skip = False
continue
if value == "-n":
assert cname is None, "Column name specified twice"
cname = sys.argv[idx + 1]
skip = True
elif value == "-f":
pfilter.append(sys.argv[idx + 1].split('='))
skip = True
elif value == "-p":
assert pname is None, "Property name specified twice"
pname = sys.argv[idx + 1]
skip = True
elif value == "-d":
assert pdefault is None, "Property default specified twice"
pdefault = sys.argv[idx + 1]
skip = True
elif value == "-k":
pkey.append(sys.argv[idx + 1])
skip = True
elif value == "-s":
assert pkeysep is None, "Key separator specified twice"
pkeysep = sys.argv[idx + 1]
skip = True
elif value == "-D":
assert pdesc is None, "Property descriptor specified twice"
pdesc = sys.argv[idx + 1]
skip = True
if pdesc is None:
pdesc = "Not provided"
if pkeysep is None:
pkeysep = "_"
assert cname is not None, "Column name not provided"
assert pname is not None, "Property name not provided"
assert pdefault is not None, "Default value not provided"
assert len(pkey) > 0, "Key not specified"
header = None
def get_value(line, key):
val = [cidx for (cidx, cval) in enumerate(header.split(","))
if cval == key][0]
return line.split(",")[val]
def get_key(line):
key = []
for elem in pkey:
key.append(get_value(line, elem))
return pkeysep.join(key)
def filters_ok(line):
for ufilter in pfilter:
if get_value(line, ufilter[0]) != ufilter[1]:
return False
return True
props = {}
for line in sys.stdin.readlines():
if header is None:
header = line
continue
if filters_ok(line):
props[get_key(line)] = get_value(line, cname)
print_int_property(pname, pdesc, pdefault, props)
| 26.119497
| 76
| 0.628702
|
9ac60571abc21c6a67dc758c4fb54537ece547b5
| 1,678
|
py
|
Python
|
insta/views.py
|
jinka/Inst-Photo-App
|
4c64033d800951855d9c6185506e0f2f3c830ba3
|
[
"MIT"
] | null | null | null |
insta/views.py
|
jinka/Inst-Photo-App
|
4c64033d800951855d9c6185506e0f2f3c830ba3
|
[
"MIT"
] | null | null | null |
insta/views.py
|
jinka/Inst-Photo-App
|
4c64033d800951855d9c6185506e0f2f3c830ba3
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from .models import Image
from django.contrib.auth.decorators import login_required
from .email import send_welcome_email
@login_required(login_url='/register/')
def home(request):
context = {
'posts': Image.objects.all()
}
return render(request,'insta/home.html',context)
class ImageListView(ListView):
model = Image
template_name = Imagecontect_name = 'insta/home.html'
context_object_name = 'posts'
ordering =['-date_created']
class ImageDetailView(DetailView):
model = Image
class ImageCreateView(LoginRequiredMixin,CreateView):
model = Image
fields = ['image','caption']
def form_valid(self, form):
# form.instance.user = Image.objects.get(user=self.request.user)
form.instance.user = self.request.user
form.instance.name = self.request.user
return super().form_valid(form)
class ImageUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model = Image
fields = ['image','caption']
def form_valid(self, form):
# form.instance.user = Image.objects.get(user=self.request.user)
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
image = self.get_object()
if self.request.user == image.user:
return True
return False
def about(request):
return render(request,'insta/about.html', {'title': 'About'})
def search_results(request):
pass
| 27.966667
| 78
| 0.697259
|
60f7cc3fe0928a459753f4e3c8ee1c472c9ce8e2
| 615
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_200/rule_161.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_200/rule_161.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/numtrees_200/rule_161.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Driving_to, obj[1]: Passanger, obj[2]: Weather, obj[3]: Temperature, obj[4]: Time, obj[5]: Coupon, obj[6]: Coupon_validity, obj[7]: Gender, obj[8]: Age, obj[9]: Maritalstatus, obj[10]: Children, obj[11]: Education, obj[12]: Occupation, obj[13]: Income, obj[14]: Bar, obj[15]: Coffeehouse, obj[16]: Carryaway, obj[17]: Restaurantlessthan20, obj[18]: Restaurant20to50, obj[19]: Direction_same, obj[20]: Distance
# {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.971, "depth": 1}
if obj[15]<=0.0:
return 'True'
elif obj[15]>0.0:
return 'False'
else: return 'False'
| 76.875
| 441
| 0.678049
|
9343632d9f140fbf1062b66ad13c2a6e1127323b
| 157
|
py
|
Python
|
tests/test_temporal.py
|
fahadalisarwar1/soundly
|
d9ba95bc8edbc760d9c6199178fd9d07aeddee6d
|
[
"MIT"
] | null | null | null |
tests/test_temporal.py
|
fahadalisarwar1/soundly
|
d9ba95bc8edbc760d9c6199178fd9d07aeddee6d
|
[
"MIT"
] | null | null | null |
tests/test_temporal.py
|
fahadalisarwar1/soundly
|
d9ba95bc8edbc760d9c6199178fd9d07aeddee6d
|
[
"MIT"
] | null | null | null |
from soundly.features.statistical import get_max
import numpy as np
def test_max():
arr = np.array([1, 2, 3, 4, 5, 6, 7])
assert get_max(arr) == 7
| 19.625
| 48
| 0.656051
|
a18246e2df817ec3fdc229e249079833bf2a75a2
| 2,514
|
py
|
Python
|
python/spark_dbconnect/__init__.py
|
jholycross/spark-esri
|
c8635777d17c8f4c22a0c098e2fa606f8b614371
|
[
"Apache-2.0"
] | 18
|
2020-07-31T14:08:16.000Z
|
2022-02-07T19:12:46.000Z
|
python/spark_dbconnect/__init__.py
|
jholycross/spark-esri
|
c8635777d17c8f4c22a0c098e2fa606f8b614371
|
[
"Apache-2.0"
] | 1
|
2020-08-07T01:02:52.000Z
|
2021-06-30T17:08:38.000Z
|
python/spark_dbconnect/__init__.py
|
jholycross/spark-esri
|
c8635777d17c8f4c22a0c098e2fa606f8b614371
|
[
"Apache-2.0"
] | 8
|
2020-07-31T16:21:07.000Z
|
2022-02-16T15:17:34.000Z
|
#
# Code borrowed and modified from https://www.esri.com/arcgis-blog/products/arcgis-pro/health/use-proximity-tracing-to-identify-possible-contact-events/
#
import os
import subprocess
from typing import Dict
import arcpy
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from spark.java_gateway import launch_gateway
SparkContext._gateway = None
def spark_start(config: Dict = {}) -> SparkSession:
pro_home = arcpy.GetInstallInfo()["InstallDir"]
# pro_lib_dir = os.path.join(pro_home, "Java", "lib")
pro_runtime_dir = os.path.join(pro_home, "Java", "runtime")
os.environ["HADOOP_HOME"] = os.path.join(pro_runtime_dir, "hadoop")
conf = SparkConf()
conf.set("spark.ui.enabled", False)
conf.set("spark.ui.showConsoleProgress", False)
conf.set("spark.sql.execution.arrow.enabled", True)
conf.set("spark.sql.catalogImplementation", "in-memory")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
for k, v in config.items():
conf.set(k, v)
#
# these need to be reset on every run or pyspark will think the Java gateway is still up and running
os.environ.unsetenv("PYSPARK_GATEWAY_PORT")
os.environ.unsetenv("PYSPARK_GATEWAY_SECRET")
SparkContext._jvm = None
SparkContext._gateway = None
# we have to manage the py4j gateway ourselves so that we can control the JVM process
popen_kwargs = {
'stdout': subprocess.DEVNULL, # need to redirect stdout & stderr when running in Pro or JVM fails immediately
'stderr': subprocess.DEVNULL,
'shell': True # keeps the command-line window from showing
}
gateway = launch_gateway(conf=conf, popen_kwargs=popen_kwargs)
sc = SparkContext(gateway=gateway)
spark = SparkSession(sc)
# Kick start the spark engine.
spark.sql("select 1").collect()
return spark
def spark_stop():
if SparkContext._gateway:
spark = SparkSession.builder.getOrCreate()
gateway = spark._sc._gateway
spark.stop()
gateway.shutdown()
gateway.proc.stdin.close()
# ensure that process and all children are killed
subprocess.Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(gateway.proc.pid)],
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
SparkContext._gateway = None
else:
print("Warning: Undefined variable SparkContext._gateway")
| 37.522388
| 152
| 0.68218
|
615ed6e075e901fd64e316ba646cbf3c33d4b645
| 7,180
|
py
|
Python
|
examples/nativeMode/teleport/aliceTest.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | null | null | null |
examples/nativeMode/teleport/aliceTest.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | null | null | null |
examples/nativeMode/teleport/aliceTest.py
|
WrathfulSpatula/SimulaQron
|
eaa5548df2f992e187ee70ccd81f192a1ce93e14
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2017, Stephanie Wehner and Axel Dahlberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Stephanie Wehner, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import numpy as np
from simulaqron.local.setup import setup_local, assemble_qubit
from simulaqron.general.host_config import socketsConfig
from simulaqron.toolbox.stabilizer_states import StabilizerState
from simulaqron.settings import simulaqron_settings
from twisted.internet.defer import inlineCallbacks
from twisted.spread import pb
from twisted.internet import reactor
#####################################################################################################
#
# runClientNode
#
# This will be run on the local node if all communication links are set up (to the virtual node
# quantum backend, as well as the nodes in the classical communication network), and the local classical
# communication server is running (if applicable).
#
@inlineCallbacks
def runClientNode(qReg, virtRoot, myName, classicalNet):
"""
Code to execute for the local client node. Called if all connections are established.
Arguments
qReg quantum register (twisted object supporting remote method calls)
virtRoot virtual quantum ndoe (twisted object supporting remote method calls)
myName name of this node (string)
classicalNet servers in the classical communication network (dictionary of hosts)
"""
logging.debug("LOCAL %s: Runing client side program.", myName)
# Create 3 qubits
q1 = yield virtRoot.callRemote("new_qubit_inreg", qReg)
# Prepare the first one in the |-> state
# yield q1.callRemote("apply_X")
yield q1.callRemote("apply_H")
# For information purposes, let's print the state of that qubit
if simulaqron_settings.sim_backend == "qutip":
realRho, imagRho = yield q1.callRemote("get_qubit")
state = np.array(assemble_qubit(realRho, imagRho), dtype=complex)
elif simulaqron_settings.sim_backend == "projectq":
realvec, imagvec = yield virtRoot.callRemote("get_register_RI", q1)
state = [r + (1j * j) for r, j in zip(realvec, imagvec)]
elif simulaqron_settings.sim_backend == "stabilizer":
array, _ = yield virtRoot.callRemote("get_register_RI", q1)
state = StabilizerState(array)
elif simulaqron_settings.sim_backend == "pyqrack":
state = '(Not directly queryable with PyQrack)'
else:
ValueError("Unknown backend {}".format(simulaqron_settings.sim_backend))
print("Qubit to be teleported is:\n{}".format(state))
# Create qubit for teleportation
qA = yield virtRoot.callRemote("new_qubit_inreg", qReg)
qB = yield virtRoot.callRemote("new_qubit_inreg", qReg)
# Put qubits A and B in an EPR state
yield qA.callRemote("apply_H")
yield qA.callRemote("cnot_onto", qB)
# Send qubit B to Bob
# Instruct the virtual node to transfer the qubit
remoteNum = yield virtRoot.callRemote("send_qubit", qB, "Bob")
logging.debug("LOCAL %s: Remote qubit is %d.", myName, remoteNum)
# Apply the local teleportation operations
yield q1.callRemote("cnot_onto", qA)
yield q1.callRemote("apply_H")
a = yield q1.callRemote("measure")
b = yield qA.callRemote("measure")
logging.debug("LOCAL %s: Correction info is a=%d, b=%d.", myName, a, b)
# Tell Bob the number of the virtual qubit so the can use it locally
bob = classicalNet.hostDict["Bob"]
yield bob.root.callRemote("recover_teleport", a, b, remoteNum)
reactor.stop()
#####################################################################################################
#
# localNode
#
# This will be run if the local node acts as a server on the classical communication network,
# accepting remote method calls from the other nodes.
class localNode(pb.Root):
def __init__(self, node, classicalNet):
self.node = node
self.classicalNet = classicalNet
self.virtRoot = None
self.qReg = None
def set_virtual_node(self, virtRoot):
self.virtRoot = virtRoot
def set_virtual_reg(self, qReg):
self.qReg = qReg
def remote_test(self):
return "Tested!"
#####################################################################################################
#
# main
#
def main():
# In this example, we are Alice.
myName = "Alice"
# This file defines the network of virtual quantum nodes
network_file = simulaqron_settings.network_config_file
# This file defines the nodes acting as servers in the classical communication network
classicalFile = "classicalNet.cfg"
# Read configuration files for the virtual quantum, as well as the classical network
virtualNet = socketsConfig(network_file)
classicalNet = socketsConfig(classicalFile)
# Check if we should run a local classical server. If so, initialize the code
# to handle remote connections on the classical communication network
if myName in classicalNet.hostDict:
lNode = localNode(classicalNet.hostDict[myName], classicalNet)
else:
lNode = None
# Set up the local classical server if applicable, and connect to the virtual
# node and other classical servers. Once all connections are set up, this will
# execute the function runClientNode
setup_local(myName, virtualNet, classicalNet, lNode, runClientNode)
##################################################################################################
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(message)s", level=logging.DEBUG)
main()
| 40.337079
| 104
| 0.691783
|
aac6f7e9de31909b95e6ac76a883b707dbbd9c55
| 72
|
py
|
Python
|
pyhac/module/__init__.py
|
zroger49/hac
|
5905369344c985d5293d572a610c82308306e385
|
[
"Apache-2.0"
] | 28
|
2021-08-18T09:33:30.000Z
|
2021-11-08T09:14:35.000Z
|
pyhac/module/__init__.py
|
zroger49/hac
|
5905369344c985d5293d572a610c82308306e385
|
[
"Apache-2.0"
] | 3
|
2021-08-19T14:01:25.000Z
|
2021-09-06T10:57:46.000Z
|
pyhac/module/__init__.py
|
zroger49/hac
|
5905369344c985d5293d572a610c82308306e385
|
[
"Apache-2.0"
] | 4
|
2021-08-12T02:35:17.000Z
|
2021-09-21T18:35:16.000Z
|
from .module import Module
from .control import MouseControl, KeyControl
| 36
| 45
| 0.847222
|
3ab1491bf594c963c00a5aa650cd0b9000f36ff8
| 3,186
|
py
|
Python
|
src/labpyrinth/geometry.py
|
alistair-broomhead/labpyrinth
|
0bc8481f3975019b3a64120d9758345e7f178e00
|
[
"BSD-3-Clause"
] | null | null | null |
src/labpyrinth/geometry.py
|
alistair-broomhead/labpyrinth
|
0bc8481f3975019b3a64120d9758345e7f178e00
|
[
"BSD-3-Clause"
] | null | null | null |
src/labpyrinth/geometry.py
|
alistair-broomhead/labpyrinth
|
0bc8481f3975019b3a64120d9758345e7f178e00
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import typing
from pygame.math import Vector2
class Coordinate:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
x, y = other
return x == self.x and y == self.y
def __repr__(self):
return f'{type(self).__name__}{*self,}'
def __iter__(self):
yield from [self.x, self.y]
def __add__(self, other):
x, y = other
return type(self)(self.x + x, self.y + y)
def __sub__(self, other):
x, y = other
return type(self)(self.x - x, self.y - y)
def __neg__(self):
return type(self)(-self.x, -self.y)
def __mul__(self, scale: int):
return type(self)(self.x * scale, self.y * scale)
def __hash__(self):
return hash((type(self), *self))
@property
def as_vector(self):
return Vector2(self.x, self.y)
class Direction:
up = Coordinate(0, -1)
down = Coordinate(0, 1)
left = Coordinate(-1, 0)
right = Coordinate(1, 0)
all = frozenset((up, down, left, right))
_int_lookup = {
# Used for a bitfield of sides
up: 1,
down: 2,
left: 4,
right: 8,
}
@classmethod
def to_int(cls, *directions: Coordinate):
return sum(
cls._int_lookup.get(direction, 0) for direction in directions
)
@classmethod
def combinations(cls) -> typing.Iterable[typing.Tuple['Direction']]:
yield ()
for direction in cls.all:
yield direction,
for length in range(1, len(cls.all)):
length += 1
for selected in itertools.permutations(cls.all, length):
yield selected
class Square:
connected_from: Coordinate
connected_to: typing.Set[Coordinate]
is_start = False
is_end = False
def __init__(self, position: Coordinate):
self.position = position
self.connected_to = set()
def open_sides(self):
if from_ := getattr(self, 'connected_from', False):
yield from_
yield from self.connected_to
def closed_sides(self):
sides = Direction.all - set(self.open_sides())
yield from sides
def neighbour_positions(self) -> typing.Iterable[Coordinate]:
for direction in Direction.all:
yield self.position + direction
def start_from(self, position: Coordinate):
self.is_start = True
self.connected_from = position - self.position
return self
def vector_to(self, other: 'Square') -> Coordinate:
return other.position - self.position
def linked_from(self, other: 'Square') -> 'Square':
vector = self.vector_to(other)
self.connected_from = vector
other.connected_to.add(-vector)
return self
def __repr__(self):
return f'{type(self).__name__}{self.position,}'
def __iter__(self) -> typing.Iterable[Coordinate]:
yield from self.position
@property
def visited(self) -> bool:
return hasattr(self, 'connected_from') or self.is_start
@property
def assigned(self) -> bool:
return self.visited or self.is_end
| 23.954887
| 73
| 0.596045
|
04668f406a3cd714bff1a2c5bdd134765b0c46a7
| 417
|
py
|
Python
|
web/setup.py
|
euri16/python-url-shortener
|
f54b8b0c1f1370747b3e1aafb87bc6b94e26a18d
|
[
"Apache-2.0"
] | null | null | null |
web/setup.py
|
euri16/python-url-shortener
|
f54b8b0c1f1370747b3e1aafb87bc6b94e26a18d
|
[
"Apache-2.0"
] | null | null | null |
web/setup.py
|
euri16/python-url-shortener
|
f54b8b0c1f1370747b3e1aafb87bc6b94e26a18d
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name="UrlShortener",
version="1.0",
author="Eury Pérez",
classifiers=[
"Development Status :: 1 - Planning"
],
install_requires=[
"Flask",
"Flask-SQLAlchemy",
"SQLAlchemy",
"Flask-Cache",
"uWSGI",
"passlib",
"Flask-Sslify",
"PyCrypto",
"Flower",
"SimpleJson"
]
)
| 18.130435
| 45
| 0.503597
|
a6262d9805342265ccd5337c025031024f83b09a
| 4,774
|
py
|
Python
|
stages/stage 5.py
|
Muhammad-Saad-1/Space-Assault
|
3c71128905cc0f9d7adc9a48393e7ab57b09f19e
|
[
"MIT"
] | null | null | null |
stages/stage 5.py
|
Muhammad-Saad-1/Space-Assault
|
3c71128905cc0f9d7adc9a48393e7ab57b09f19e
|
[
"MIT"
] | null | null | null |
stages/stage 5.py
|
Muhammad-Saad-1/Space-Assault
|
3c71128905cc0f9d7adc9a48393e7ab57b09f19e
|
[
"MIT"
] | null | null | null |
from enemy import enemy
import stage
import player
import pygame as pg
from player import fire
pg.init()
r = (255,0,0)
display = pg.display.set_mode((900,600))
clock = pg.time.Clock()
bullet1 = pg.mixer.Sound('audio/1.wav')
bullet2 = pg.mixer.Sound('audio/2.wav')
#sound.play()
music = pg.mixer.music.load('audio/dvs.mp3')
pg.mixer.music.play(-1)
xin = 50
yin = 585
change = 8
lm = False
rm = False
ls = False
rs = True
jump = False
jump_h = 10
neg = 1
bullet = []
enemy_list = [enemy(430,580,535),enemy(0,182,323),enemy(640,890,395),enemy(545,645,237),enemy(230,380,237)]
block_list = [[215,365,480],[0,150,373],[430,580,373],[640,890,445],[545,645,287],[385,535,180],[230,380,287]]
fall = 1
fall_state = True
health = 10
health_list = []
gameExit = False
while not gameExit:
clock.tick(24)
for bullets in bullet:
if bullets.x > 885 or bullets.x < 15:
if bullets in bullet:
bullet.pop(bullet.index(bullets))
else :
bullets.x += bullets.vel
for enemy in enemy_list:
if bullets.x+10 >= enemy.x and bullets.x+10 <= enemy.x+30:
if bullets.y+10 >= enemy.y and bullets.y <= enemy.y+50:
if bullets in bullet:
enemy.hit()
bullet.pop(bullet.index(bullets))
for enemy in enemy_list:
if enemy.bullet_hit >= 3:# enemy life
enemy.dead(display)
enemy_list.pop(enemy_list.index(enemy))
for enemy in enemy_list:
if enemy.y+25 > yin and enemy.y < yin+50 :
if xin > enemy.x and enemy.vel > 0:
enemy.shoot()
bullet2.play()
elif xin < enemy.x and enemy.vel < 0:
enemy.shoot()
bullet2.play()
else:
enemy.shooting = False
for enemy in enemy_list:
health_list.append(enemy.h)
if len(health_list) > len(enemy_list):
health_list.pop(0)
health -= int(sum(health_list))
health_list.clear()
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
exit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
bullet1.play()
if lm or ls:
facing = -1
elif rm or rs:
facing = 1
if len(bullet) < 8:
if rs or rm:
bullet.append(fire(xin+35,yin+16,facing))
elif ls or lm:
bullet.append(fire(xin-5,yin+16,facing))
if event.key == pg.K_LEFT:
lm = True
ls = False
rs = False
if not jump:
ls = True
if event.key == pg.K_RIGHT:
rm = True
ls = False
rs = False
if not jump:
rs = True
if event.key == pg.K_UP:
jump = True
if event.type == pg.KEYUP:
if event.key == pg.K_RIGHT:
rs = True
rm = False
ls = False
elif event.key == pg.K_LEFT:
ls = True
lm = False
rs = False
stage.bg(display,4)
stage.bottom(display,900,600,250,400)
stage.border(display,900,600)
for block in block_list:
stage.tile1(display,block[0],block[1],block[2])
for enemy in enemy_list:
enemy.draw(display,xin,yin)
xin,yin,jump_h,neg,jump = player.player(display,xin,yin,change,lm,rm,rs,ls,jump,jump_h,neg,bullet,health)
player_block = pg.Rect(xin+15,yin+4,20 ,46)
for block in block_list:
if pg.Rect(int(block[0]),int(block[2]),int(block[1]-block[0]),20).colliderect(player_block):
if player_block.bottom > pg.Rect(int(block[0]),int(block[2]),int(block[1]-block[0]),20).top and player_block.top < pg.Rect(int(block[0]),int(block[2]),int(block[1]-block[0]),20).top:
jump = False
jump_h = 10
fall_state = False
if player_block.top < pg.Rect(int(block[0]),int(block[2]),int(block[1]-block[0]),20).bottom:
jump = False
jump_h = 10
player_block.top = pg.Rect(int(block[0]),int(block[2]),int(block[1]-block[0]),20).bottom
if fall_state and not jump:
yin += (fall**2)*0.30
fall +=1
if yin+50 >= 580:
fall = 1
jump = False
jump_h = 10
yin = 535
fall_state = True
pg.display.update()
if len(enemy_list) == 0:
pg.quit()
quit()
if health < 1:
pg.quit()
quit()
| 32.69863
| 198
| 0.514244
|
a9bbb2a2ae31a4a05044ac0194b9494e7c241a8a
| 870
|
py
|
Python
|
test/system/volume/UNMOUNT_VOL_BASIC_1.py
|
YongJin-Cho/poseidonos
|
c07a0240316d4536aa09f22d7977604f3650d752
|
[
"BSD-3-Clause"
] | null | null | null |
test/system/volume/UNMOUNT_VOL_BASIC_1.py
|
YongJin-Cho/poseidonos
|
c07a0240316d4536aa09f22d7977604f3650d752
|
[
"BSD-3-Clause"
] | null | null | null |
test/system/volume/UNMOUNT_VOL_BASIC_1.py
|
YongJin-Cho/poseidonos
|
c07a0240316d4536aa09f22d7977604f3650d752
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import os
import sys
import json
sys.path.append("../lib/")
sys.path.append("../array/")
import json_parser
import pos
import cli
import test_result
import MOUNT_VOL_BASIC_1
VOL_NAME = MOUNT_VOL_BASIC_1.VOL_NAME
ARRAYNAME = MOUNT_VOL_BASIC_1.ARRAYNAME
def clear_result():
if os.path.exists( __file__ + ".result"):
os.remove( __file__ + ".result")
def set_result(detail):
code = json_parser.get_response_code(detail)
result = test_result.expect_true(code)
with open(__file__ + ".result", "w") as result_file:
result_file.write(result + " (" + str(code) + ")" + "\n" + detail)
def execute():
clear_result()
MOUNT_VOL_BASIC_1.execute()
out = cli.unmount_volume(VOL_NAME, ARRAYNAME)
return out
if __name__ == "__main__":
out = execute()
set_result(out)
pos.kill_pos()
| 23.513514
| 74
| 0.696552
|
bc5f6b4436c9932d78570fcdc1f9f0f54f55d5b4
| 611
|
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/MultipleInheritanceVersionTolerant/multipleinheritanceversiontolerant/aio/_operations/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/MultipleInheritanceVersionTolerant/multipleinheritanceversiontolerant/aio/_operations/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/MultipleInheritanceVersionTolerant/multipleinheritanceversiontolerant/aio/_operations/__init__.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import MultipleInheritanceServiceClientOperationsMixin
__all__ = [
"MultipleInheritanceServiceClientOperationsMixin",
]
| 43.642857
| 94
| 0.602291
|
e708202c46ca8f3cbf9944bfac96d302904376e8
| 44,367
|
py
|
Python
|
cirq-core/cirq/circuits/circuit_operation_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/circuits/circuit_operation_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | 4
|
2022-01-16T14:12:15.000Z
|
2022-02-24T03:58:46.000Z
|
cirq-core/cirq/circuits/circuit_operation_test.py
|
Nexuscompute/Cirq
|
640ef8f82d6a56ec95361388ce7976e096cca906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
from typing import Optional
import numpy as np
import pytest
import sympy
import cirq
import cirq.circuits.circuit_operation as circuit_operation
from cirq.circuits.circuit_operation import _full_join_string_lists
ALL_SIMULATORS = (cirq.Simulator(), cirq.DensityMatrixSimulator(), cirq.CliffordSimulator())
def test_properties():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
op = cirq.CircuitOperation(circuit)
assert op.circuit is circuit
assert op.qubits == (a, b, c)
assert op.qubit_map == {}
assert op.measurement_key_map == {}
assert op.param_resolver == cirq.ParamResolver()
assert op.repetitions == 1
assert op.repetition_ids is None
# Despite having the same decomposition, these objects are not equal.
assert op != circuit
assert op == circuit.to_op()
def test_circuit_type():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
with pytest.raises(TypeError, match='Expected circuit of type FrozenCircuit'):
_ = cirq.CircuitOperation(circuit)
def test_non_invertible_circuit():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
with pytest.raises(ValueError, match='circuit is not invertible'):
_ = cirq.CircuitOperation(circuit, repetitions=-2)
def test_repetitions_and_ids_length_mismatch():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
with pytest.raises(ValueError, match='Expected repetition_ids to be a list of length 2'):
_ = cirq.CircuitOperation(circuit, repetitions=2, repetition_ids=['a', 'b', 'c'])
def test_is_measurement_memoization():
a = cirq.LineQubit(0)
circuit = cirq.FrozenCircuit(cirq.measure(a, key='m'))
c_op = cirq.CircuitOperation(circuit)
assert circuit._has_measurements is None
# Memoize `_has_measurements` in the circuit.
assert cirq.is_measurement(c_op)
assert circuit._has_measurements is True
def test_invalid_measurement_keys():
a = cirq.LineQubit(0)
circuit = cirq.FrozenCircuit(cirq.measure(a, key='m'))
c_op = cirq.CircuitOperation(circuit)
# Invalid key remapping
with pytest.raises(ValueError, match='Mapping to invalid key: m:a'):
_ = c_op.with_measurement_key_mapping({'m': 'm:a'})
# Invalid key remapping nested CircuitOperation
with pytest.raises(ValueError, match='Mapping to invalid key: m:a'):
_ = cirq.CircuitOperation(cirq.FrozenCircuit(c_op), measurement_key_map={'m': 'm:a'})
# Originally invalid key
with pytest.raises(ValueError, match='Invalid key name: m:a'):
_ = cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(a, key='m:a')))
# Remapped to valid key
_ = cirq.CircuitOperation(circuit, measurement_key_map={'m:a': 'ma'})
def test_invalid_qubit_mapping():
q = cirq.LineQubit(0)
q3 = cirq.LineQid(1, dimension=3)
# Invalid qid remapping dict in constructor
with pytest.raises(ValueError, match='Qid dimension conflict'):
_ = cirq.CircuitOperation(cirq.FrozenCircuit(), qubit_map={q: q3})
# Invalid qid remapping dict in with_qubit_mapping call
c_op = cirq.CircuitOperation(cirq.FrozenCircuit(cirq.X(q)))
with pytest.raises(ValueError, match='Qid dimension conflict'):
_ = c_op.with_qubit_mapping({q: q3})
# Invalid qid remapping function in with_qubit_mapping call
with pytest.raises(ValueError, match='Qid dimension conflict'):
_ = c_op.with_qubit_mapping(lambda q: q3)
def test_circuit_sharing():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
op1 = cirq.CircuitOperation(circuit)
op2 = cirq.CircuitOperation(op1.circuit)
op3 = circuit.to_op()
assert op1.circuit is circuit
assert op2.circuit is circuit
assert op3.circuit is circuit
assert hash(op1) == hash(op2)
assert hash(op1) == hash(op3)
def test_with_qubits():
a, b, c, d = cirq.LineQubit.range(4)
circuit = cirq.FrozenCircuit(cirq.H(a), cirq.CX(a, b))
op_base = cirq.CircuitOperation(circuit)
op_with_qubits = op_base.with_qubits(d, c)
assert op_with_qubits.base_operation() == op_base
assert op_with_qubits.qubits == (d, c)
assert op_with_qubits.qubit_map == {a: d, b: c}
assert op_base.with_qubit_mapping({a: d, b: c, d: a}) == op_with_qubits
def map_fn(qubit: 'cirq.Qid') -> 'cirq.Qid':
if qubit == a:
return d
if qubit == b:
return c
return qubit
fn_op = op_base.with_qubit_mapping(map_fn)
assert fn_op == op_with_qubits
# map_fn does not affect qubits c and d.
assert fn_op.with_qubit_mapping(map_fn) == op_with_qubits
# with_qubits must receive the same number of qubits as the circuit contains.
with pytest.raises(ValueError, match='Expected 2 qubits, got 3'):
_ = op_base.with_qubits(c, d, b)
# Two qubits cannot be mapped onto the same target qubit.
with pytest.raises(ValueError, match='Collision in qubit map'):
_ = op_base.with_qubit_mapping({a: b})
# Two qubits cannot be transformed into the same target qubit.
with pytest.raises(ValueError, match='Collision in qubit map'):
_ = op_base.with_qubit_mapping(lambda q: b)
# with_qubit_mapping requires exactly one argument.
with pytest.raises(TypeError, match='must be a function or dict'):
_ = op_base.with_qubit_mapping('bad arg')
def test_with_measurement_keys():
a, b = cirq.LineQubit.range(2)
circuit = cirq.FrozenCircuit(cirq.X(a), cirq.measure(b, key='mb'), cirq.measure(a, key='ma'))
op_base = cirq.CircuitOperation(circuit)
op_with_keys = op_base.with_measurement_key_mapping({'ma': 'pa', 'x': 'z'})
assert op_with_keys.base_operation() == op_base
assert op_with_keys.measurement_key_map == {'ma': 'pa'}
assert cirq.measurement_key_names(op_with_keys) == {'pa', 'mb'}
assert cirq.with_measurement_key_mapping(op_base, {'ma': 'pa'}) == op_with_keys
# Two measurement keys cannot be mapped onto the same target string.
with pytest.raises(ValueError):
_ = op_base.with_measurement_key_mapping({'ma': 'mb'})
def test_with_params():
a = cirq.LineQubit(0)
z_exp = sympy.Symbol('z_exp')
x_exp = sympy.Symbol('x_exp')
delta = sympy.Symbol('delta')
theta = sympy.Symbol('theta')
circuit = cirq.FrozenCircuit(cirq.Z(a) ** z_exp, cirq.X(a) ** x_exp, cirq.Z(a) ** delta)
op_base = cirq.CircuitOperation(circuit)
param_dict = {z_exp: 2, x_exp: theta, sympy.Symbol('k'): sympy.Symbol('phi')}
op_with_params = op_base.with_params(param_dict)
assert op_with_params.base_operation() == op_base
assert op_with_params.param_resolver == cirq.ParamResolver(
{
z_exp: 2,
x_exp: theta,
# As 'k' is irrelevant to the circuit, it does not appear here.
}
)
assert cirq.parameter_names(op_with_params) == {'theta', 'delta'}
assert (
cirq.resolve_parameters(op_base, cirq.ParamResolver(param_dict), recursive=False)
== op_with_params
)
def test_recursive_params():
q = cirq.LineQubit(0)
a, a2, b, b2 = sympy.symbols('a a2 b b2')
circuitop = cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q) ** a, cirq.Z(q) ** b),
# Not recursive, a and b are swapped.
param_resolver=cirq.ParamResolver({a: b, b: a}),
)
# Recursive, so a->a2->0 and b->b2->1.
outer_params = {a: a2, a2: 0, b: b2, b2: 1}
resolved = cirq.resolve_parameters(circuitop, outer_params)
# Combined, a->b->b2->1, and b->a->a2->0.
assert resolved.param_resolver.param_dict == {a: 1, b: 0}
# Non-recursive, so a->a2 and b->b2.
resolved = cirq.resolve_parameters(circuitop, outer_params, recursive=False)
# Combined, a->b->b2, and b->a->a2.
assert resolved.param_resolver.param_dict == {a: b2, b: a2}
with pytest.raises(RecursionError):
cirq.resolve_parameters(circuitop, {a: a2, a2: a})
# Non-recursive, so a->b and b->a.
resolved = cirq.resolve_parameters(circuitop, {a: b, b: a}, recursive=False)
# Combined, a->b->a, and b->a->b.
assert resolved.param_resolver.param_dict == {}
# First example should behave like an X when simulated
result = cirq.Simulator().simulate(cirq.Circuit(circuitop), param_resolver=outer_params)
assert np.allclose(result.state_vector(copy=False), [0, 1])
@pytest.mark.parametrize('add_measurements', [True, False])
@pytest.mark.parametrize('use_default_ids_for_initial_rep', [True, False])
def test_repeat(add_measurements, use_default_ids_for_initial_rep):
a, b = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(a), cirq.CX(a, b))
if add_measurements:
circuit.append([cirq.measure(b, key='mb'), cirq.measure(a, key='ma')])
op_base = cirq.CircuitOperation(circuit.freeze())
assert op_base.repeat(1) is op_base
assert op_base.repeat(1, ['0']) != op_base
assert op_base.repeat(1, ['0']) == op_base.repeat(repetition_ids=['0'])
assert op_base.repeat(1, ['0']) == op_base.with_repetition_ids(['0'])
initial_repetitions = -3
if add_measurements:
with pytest.raises(ValueError, match='circuit is not invertible'):
_ = op_base.repeat(initial_repetitions)
initial_repetitions = abs(initial_repetitions)
op_with_reps: Optional[cirq.CircuitOperation] = None
rep_ids = []
if use_default_ids_for_initial_rep:
op_with_reps = op_base.repeat(initial_repetitions)
rep_ids = ['0', '1', '2']
assert op_base**initial_repetitions == op_with_reps
else:
rep_ids = ['a', 'b', 'c']
op_with_reps = op_base.repeat(initial_repetitions, rep_ids)
assert op_base**initial_repetitions != op_with_reps
assert (op_base**initial_repetitions).replace(repetition_ids=rep_ids) == op_with_reps
assert op_with_reps.repetitions == initial_repetitions
assert op_with_reps.repetition_ids == rep_ids
assert op_with_reps.repeat(1) is op_with_reps
final_repetitions = 2 * initial_repetitions
op_with_consecutive_reps = op_with_reps.repeat(2)
assert op_with_consecutive_reps.repetitions == final_repetitions
assert op_with_consecutive_reps.repetition_ids == _full_join_string_lists(['0', '1'], rep_ids)
assert op_base**final_repetitions != op_with_consecutive_reps
op_with_consecutive_reps = op_with_reps.repeat(2, ['a', 'b'])
assert op_with_reps.repeat(repetition_ids=['a', 'b']) == op_with_consecutive_reps
assert op_with_consecutive_reps.repetitions == final_repetitions
assert op_with_consecutive_reps.repetition_ids == _full_join_string_lists(['a', 'b'], rep_ids)
with pytest.raises(ValueError, match='length to be 2'):
_ = op_with_reps.repeat(2, ['a', 'b', 'c'])
with pytest.raises(
ValueError, match='At least one of repetitions and repetition_ids must be set'
):
_ = op_base.repeat()
with pytest.raises(TypeError, match='Only integer or sympy repetitions are allowed'):
_ = op_base.repeat(1.3)
assert op_base.repeat(3.00000000001).repetitions == 3
assert op_base.repeat(2.99999999999).repetitions == 3
@pytest.mark.parametrize('add_measurements', [True, False])
@pytest.mark.parametrize('use_repetition_ids', [True, False])
@pytest.mark.parametrize('initial_reps', [0, 1, 2, 3])
def test_repeat_zero_times(add_measurements, use_repetition_ids, initial_reps):
q = cirq.LineQubit(0)
subcircuit = cirq.Circuit(cirq.X(q))
if add_measurements:
subcircuit.append(cirq.measure(q))
op = cirq.CircuitOperation(
subcircuit.freeze(), repetitions=initial_reps, use_repetition_ids=use_repetition_ids
)
result = cirq.Simulator().simulate(cirq.Circuit(op))
assert np.allclose(result.state_vector(copy=False), [0, 1] if initial_reps % 2 else [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op**0))
assert np.allclose(result.state_vector(copy=False), [1, 0])
def test_no_repetition_ids():
def default_repetition_ids(self):
assert False, "Should not call default_repetition_ids"
with mock.patch.object(circuit_operation, 'default_repetition_ids', new=default_repetition_ids):
q = cirq.LineQubit(0)
op = cirq.CircuitOperation(
cirq.Circuit(cirq.X(q), cirq.measure(q)).freeze(),
repetitions=1_000_000,
use_repetition_ids=False,
)
assert op.repetitions == 1_000_000
assert op.repetition_ids is None
_ = repr(op)
_ = str(op)
op2 = op.repeat(10)
assert op2.repetitions == 10_000_000
assert op2.repetition_ids is None
def test_parameterized_repeat():
q = cirq.LineQubit(0)
op = cirq.CircuitOperation(cirq.FrozenCircuit(cirq.X(q))) ** sympy.Symbol('a')
assert cirq.parameter_names(op) == {'a'}
assert not cirq.has_unitary(op)
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 0})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 2})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': -1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
with pytest.raises(TypeError, match='Only integer or sympy repetitions are allowed'):
cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1.5})
with pytest.raises(ValueError, match='Circuit contains ops whose symbols were not specified'):
cirq.Simulator().simulate(cirq.Circuit(op))
op = op**-1
assert cirq.parameter_names(op) == {'a'}
assert not cirq.has_unitary(op)
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 0})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 2})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': -1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
with pytest.raises(TypeError, match='Only integer or sympy repetitions are allowed'):
cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1.5})
with pytest.raises(ValueError, match='Circuit contains ops whose symbols were not specified'):
cirq.Simulator().simulate(cirq.Circuit(op))
op = op ** sympy.Symbol('b')
assert cirq.parameter_names(op) == {'a', 'b'}
assert not cirq.has_unitary(op)
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1, 'b': 1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 2, 'b': 1})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1, 'b': 2})
assert np.allclose(result.state_vector(copy=False), [1, 0])
with pytest.raises(TypeError, match='Only integer or sympy repetitions are allowed'):
cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1.5, 'b': 1})
with pytest.raises(ValueError, match='Circuit contains ops whose symbols were not specified'):
cirq.Simulator().simulate(cirq.Circuit(op))
op = op**2.0
assert cirq.parameter_names(op) == {'a', 'b'}
assert not cirq.has_unitary(op)
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1, 'b': 1})
assert np.allclose(result.state_vector(copy=False), [1, 0])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1.5, 'b': 1})
assert np.allclose(result.state_vector(copy=False), [0, 1])
result = cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1, 'b': 1.5})
assert np.allclose(result.state_vector(copy=False), [0, 1])
with pytest.raises(TypeError, match='Only integer or sympy repetitions are allowed'):
cirq.Simulator().simulate(cirq.Circuit(op), param_resolver={'a': 1.5, 'b': 1.5})
with pytest.raises(ValueError, match='Circuit contains ops whose symbols were not specified'):
cirq.Simulator().simulate(cirq.Circuit(op))
def test_parameterized_repeat_side_effects():
q = cirq.LineQubit(0)
op = cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q).with_classical_controls('c'), cirq.measure(q, key='m')),
repetitions=sympy.Symbol('a'),
)
# Control keys can be calculated because they only "lift" if there's a matching
# measurement, in which case they're not returned here.
assert cirq.control_keys(op) == {cirq.MeasurementKey('c')}
# "local" params do not bind to the repetition param.
assert cirq.parameter_names(op.with_params({'a': 1})) == {'a'}
# Check errors that require unrolling the circuit.
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
cirq.measurement_key_objs(op)
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
cirq.measurement_key_names(op)
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
op.mapped_circuit()
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
cirq.decompose(op)
# Not compatible with repetition ids
with pytest.raises(ValueError, match='repetition ids with parameterized repetitions'):
op.with_repetition_ids(['x', 'y'])
with pytest.raises(ValueError, match='repetition ids with parameterized repetitions'):
op.repeat(repetition_ids=['x', 'y'])
# TODO(daxfohl): This should work, but likely requires a new protocol that returns *just* the
# name of the measurement keys. (measurement_key_names returns the full serialized string).
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
cirq.with_measurement_key_mapping(op, {'m': 'm2'})
# Everything should work once resolved
op = cirq.resolve_parameters(op, {'a': 2})
assert set(map(str, cirq.measurement_key_objs(op))) == {'0:m', '1:m'}
assert op.mapped_circuit() == cirq.Circuit(
cirq.X(q).with_classical_controls('c'),
cirq.measure(q, key=cirq.MeasurementKey.parse_serialized('0:m')),
cirq.X(q).with_classical_controls('c'),
cirq.measure(q, key=cirq.MeasurementKey.parse_serialized('1:m')),
)
assert cirq.decompose(op) == cirq.decompose(
cirq.Circuit(
cirq.X(q).with_classical_controls('c'),
cirq.measure(q, key=cirq.MeasurementKey.parse_serialized('0:m')),
cirq.X(q).with_classical_controls('c'),
cirq.measure(q, key=cirq.MeasurementKey.parse_serialized('1:m')),
)
)
def test_parameterized_repeat_side_effects_when_not_using_rep_ids():
q = cirq.LineQubit(0)
op = cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q).with_classical_controls('c'), cirq.measure(q, key='m')),
repetitions=sympy.Symbol('a'),
use_repetition_ids=False,
)
assert cirq.control_keys(op) == {cirq.MeasurementKey('c')}
assert cirq.parameter_names(op.with_params({'a': 1})) == {'a'}
assert set(map(str, cirq.measurement_key_objs(op))) == {'m'}
assert cirq.measurement_key_names(op) == {'m'}
assert cirq.measurement_key_names(cirq.with_measurement_key_mapping(op, {'m': 'm2'})) == {'m2'}
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
op.mapped_circuit()
with pytest.raises(
ValueError, match='Cannot unroll circuit due to nondeterministic repetitions'
):
cirq.decompose(op)
with pytest.raises(ValueError, match='repetition ids with parameterized repetitions'):
op.with_repetition_ids(['x', 'y'])
with pytest.raises(ValueError, match='repetition ids with parameterized repetitions'):
op.repeat(repetition_ids=['x', 'y'])
def test_qid_shape():
circuit = cirq.FrozenCircuit(
cirq.IdentityGate(qid_shape=(q.dimension,)).on(q)
for q in cirq.LineQid.for_qid_shape((1, 2, 3, 4))
)
op = cirq.CircuitOperation(circuit)
assert cirq.qid_shape(op) == (1, 2, 3, 4)
assert cirq.num_qubits(op) == 4
id_circuit = cirq.FrozenCircuit(cirq.I(q) for q in cirq.LineQubit.range(3))
id_op = cirq.CircuitOperation(id_circuit)
assert cirq.qid_shape(id_op) == (2, 2, 2)
assert cirq.num_qubits(id_op) == 3
def test_string_format():
x, y, z = cirq.LineQubit.range(3)
fc0 = cirq.FrozenCircuit()
op0 = cirq.CircuitOperation(fc0)
assert str(op0) == f"[ ]"
fc0_global_phase_inner = cirq.FrozenCircuit(
cirq.global_phase_operation(1j), cirq.global_phase_operation(1j)
)
op0_global_phase_inner = cirq.CircuitOperation(fc0_global_phase_inner)
fc0_global_phase_outer = cirq.FrozenCircuit(
op0_global_phase_inner, cirq.global_phase_operation(1j)
)
op0_global_phase_outer = cirq.CircuitOperation(fc0_global_phase_outer)
assert (
str(op0_global_phase_outer)
== f"""\
[ ]
[ ]
[ global phase: -0.5π ]"""
)
fc1 = cirq.FrozenCircuit(cirq.X(x), cirq.H(y), cirq.CX(y, z), cirq.measure(x, y, z, key='m'))
op1 = cirq.CircuitOperation(fc1)
assert (
str(op1)
== f"""\
[ 0: ───X───────M('m')─── ]
[ │ ]
[ 1: ───H───@───M──────── ]
[ │ │ ]
[ 2: ───────X───M──────── ]"""
)
assert (
repr(op1)
== """\
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(0)),
cirq.H(cirq.LineQubit(1)),
),
cirq.Moment(
cirq.CNOT(cirq.LineQubit(1), cirq.LineQubit(2)),
),
cirq.Moment(
cirq.measure(cirq.LineQubit(0), cirq.LineQubit(1), cirq.LineQubit(2), key=cirq.MeasurementKey(name='m')),
),
]),
)"""
)
fc2 = cirq.FrozenCircuit(cirq.X(x), cirq.H(y), cirq.CX(y, x))
op2 = cirq.CircuitOperation(
circuit=fc2,
qubit_map=({y: z}),
repetitions=3,
parent_path=('outer', 'inner'),
repetition_ids=['a', 'b', 'c'],
)
assert (
str(op2)
== f"""\
[ 0: ───X───X─── ]
[ │ ]
[ 1: ───H───@─── ](qubit_map={{q(1): q(2)}}, parent_path=('outer', 'inner'),\
repetition_ids=['a', 'b', 'c'])"""
)
assert (
repr(op2)
== """\
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(0)),
cirq.H(cirq.LineQubit(1)),
),
cirq.Moment(
cirq.CNOT(cirq.LineQubit(1), cirq.LineQubit(0)),
),
]),
repetitions=3,
qubit_map={cirq.LineQubit(1): cirq.LineQubit(2)},
parent_path=('outer', 'inner'),
repetition_ids=['a', 'b', 'c'],
)"""
)
fc3 = cirq.FrozenCircuit(cirq.X(x) ** sympy.Symbol('b'), cirq.measure(x, key='m'))
op3 = cirq.CircuitOperation(
circuit=fc3,
qubit_map={x: y},
measurement_key_map={'m': 'p'},
param_resolver={sympy.Symbol('b'): 2},
)
indented_fc3_repr = repr(fc3).replace('\n', '\n ')
assert (
str(op3)
== f"""\
[ 0: ───X^b───M('m')─── ](qubit_map={{q(0): q(1)}}, \
key_map={{m: p}}, params={{b: 2}})"""
)
assert (
repr(op3)
== f"""\
cirq.CircuitOperation(
circuit={indented_fc3_repr},
qubit_map={{cirq.LineQubit(0): cirq.LineQubit(1)}},
measurement_key_map={{'m': 'p'}},
param_resolver=cirq.ParamResolver({{sympy.Symbol('b'): 2}}),
)"""
)
fc4 = cirq.FrozenCircuit(cirq.X(y))
op4 = cirq.CircuitOperation(fc4)
fc5 = cirq.FrozenCircuit(cirq.X(x), op4)
op5 = cirq.CircuitOperation(fc5)
assert (
repr(op5)
== """\
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(0)),
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(1)),
),
]),
),
),
]),
)"""
)
op6 = cirq.CircuitOperation(fc5, use_repetition_ids=False)
assert (
repr(op6)
== """\
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(0)),
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.X(cirq.LineQubit(1)),
),
]),
),
),
]),
use_repetition_ids=False,
)"""
)
op7 = cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.measure(x, key='a')),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(cirq.MeasurementKey('a')),
)
assert (
repr(op7)
== """\
cirq.CircuitOperation(
circuit=cirq.FrozenCircuit([
cirq.Moment(
cirq.measure(cirq.LineQubit(0), key=cirq.MeasurementKey(name='a')),
),
]),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(cirq.MeasurementKey(name='a')),
)"""
)
def test_json_dict():
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(
cirq.X(a),
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** sympy.Symbol('exp'),
cirq.measure(a, b, c, key='m'),
)
op = cirq.CircuitOperation(
circuit=circuit,
qubit_map={c: b, b: c},
measurement_key_map={'m': 'p'},
param_resolver={'exp': 'theta'},
parent_path=('nested', 'path'),
)
assert op._json_dict_() == {
'circuit': circuit,
'repetitions': 1,
'qubit_map': sorted([(k, v) for k, v in op.qubit_map.items()]),
'measurement_key_map': op.measurement_key_map,
'param_resolver': op.param_resolver,
'parent_path': op.parent_path,
'repetition_ids': None,
}
def test_terminal_matches():
a, b = cirq.LineQubit.range(2)
fc = cirq.FrozenCircuit(cirq.H(a), cirq.measure(b, key='m1'))
op = cirq.CircuitOperation(fc)
c = cirq.Circuit(cirq.X(a), op)
assert c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(cirq.X(b), op)
assert c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(cirq.measure(a), op)
assert not c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(cirq.measure(b), op)
assert not c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(op, cirq.X(a))
assert c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(op, cirq.X(b))
assert not c.are_all_measurements_terminal()
assert not c.are_any_measurements_terminal()
c = cirq.Circuit(op, cirq.measure(a))
assert c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
c = cirq.Circuit(op, cirq.measure(b))
assert not c.are_all_measurements_terminal()
assert c.are_any_measurements_terminal()
def test_nonterminal_in_subcircuit():
a, b = cirq.LineQubit.range(2)
fc = cirq.FrozenCircuit(cirq.H(a), cirq.measure(b, key='m1'), cirq.X(b))
op = cirq.CircuitOperation(fc)
c = cirq.Circuit(cirq.X(a), op)
assert isinstance(op, cirq.CircuitOperation)
assert not c.are_all_measurements_terminal()
assert not c.are_any_measurements_terminal()
op = op.with_tags('test')
c = cirq.Circuit(cirq.X(a), op)
assert not isinstance(op, cirq.CircuitOperation)
assert not c.are_all_measurements_terminal()
assert not c.are_any_measurements_terminal()
def test_decompose_applies_maps():
a, b, c = cirq.LineQubit.range(3)
exp = sympy.Symbol('exp')
theta = sympy.Symbol('theta')
circuit = cirq.FrozenCircuit(
cirq.X(a) ** theta,
cirq.Y(b),
cirq.H(c),
cirq.CX(a, b) ** exp,
cirq.measure(a, b, c, key='m'),
)
op = cirq.CircuitOperation(
circuit=circuit,
qubit_map={c: b, b: c},
measurement_key_map={'m': 'p'},
param_resolver={exp: theta, theta: exp},
)
expected_circuit = cirq.Circuit(
cirq.X(a) ** exp,
cirq.Y(c),
cirq.H(b),
cirq.CX(a, c) ** theta,
cirq.measure(a, c, b, key='p'),
)
assert cirq.Circuit(cirq.decompose_once(op)) == expected_circuit
def test_decompose_loops():
a, b = cirq.LineQubit.range(2)
circuit = cirq.FrozenCircuit(cirq.H(a), cirq.CX(a, b))
base_op = cirq.CircuitOperation(circuit)
op = base_op.with_qubits(b, a).repeat(3)
expected_circuit = cirq.Circuit(
cirq.H(b), cirq.CX(b, a), cirq.H(b), cirq.CX(b, a), cirq.H(b), cirq.CX(b, a)
)
assert cirq.Circuit(cirq.decompose_once(op)) == expected_circuit
op = base_op.repeat(-2)
expected_circuit = cirq.Circuit(cirq.CX(a, b), cirq.H(a), cirq.CX(a, b), cirq.H(a))
assert cirq.Circuit(cirq.decompose_once(op)) == expected_circuit
def test_decompose_loops_with_measurements():
a, b = cirq.LineQubit.range(2)
circuit = cirq.FrozenCircuit(cirq.H(a), cirq.CX(a, b), cirq.measure(a, b, key='m'))
base_op = cirq.CircuitOperation(circuit)
op = base_op.with_qubits(b, a).repeat(3)
expected_circuit = cirq.Circuit(
cirq.H(b),
cirq.CX(b, a),
cirq.measure(b, a, key=cirq.MeasurementKey.parse_serialized('0:m')),
cirq.H(b),
cirq.CX(b, a),
cirq.measure(b, a, key=cirq.MeasurementKey.parse_serialized('1:m')),
cirq.H(b),
cirq.CX(b, a),
cirq.measure(b, a, key=cirq.MeasurementKey.parse_serialized('2:m')),
)
assert cirq.Circuit(cirq.decompose_once(op)) == expected_circuit
def test_decompose_nested():
a, b, c, d = cirq.LineQubit.range(4)
exp1 = sympy.Symbol('exp1')
exp_half = sympy.Symbol('exp_half')
exp_one = sympy.Symbol('exp_one')
exp_two = sympy.Symbol('exp_two')
circuit1 = cirq.FrozenCircuit(cirq.X(a) ** exp1, cirq.measure(a, key='m1'))
op1 = cirq.CircuitOperation(circuit1)
circuit2 = cirq.FrozenCircuit(
op1.with_qubits(a).with_measurement_key_mapping({'m1': 'ma'}),
op1.with_qubits(b).with_measurement_key_mapping({'m1': 'mb'}),
op1.with_qubits(c).with_measurement_key_mapping({'m1': 'mc'}),
op1.with_qubits(d).with_measurement_key_mapping({'m1': 'md'}),
)
op2 = cirq.CircuitOperation(circuit2)
circuit3 = cirq.FrozenCircuit(
op2.with_params({exp1: exp_half}),
op2.with_params({exp1: exp_one})
.with_measurement_key_mapping({'ma': 'ma1'})
.with_measurement_key_mapping({'mb': 'mb1'})
.with_measurement_key_mapping({'mc': 'mc1'})
.with_measurement_key_mapping({'md': 'md1'}),
op2.with_params({exp1: exp_two})
.with_measurement_key_mapping({'ma': 'ma2'})
.with_measurement_key_mapping({'mb': 'mb2'})
.with_measurement_key_mapping({'mc': 'mc2'})
.with_measurement_key_mapping({'md': 'md2'}),
)
op3 = cirq.CircuitOperation(circuit3)
final_op = op3.with_params({exp_half: 0.5, exp_one: 1.0, exp_two: 2.0})
expected_circuit1 = cirq.Circuit(
op2.with_params({exp1: 0.5, exp_half: 0.5, exp_one: 1.0, exp_two: 2.0}),
op2.with_params({exp1: 1.0, exp_half: 0.5, exp_one: 1.0, exp_two: 2.0})
.with_measurement_key_mapping({'ma': 'ma1'})
.with_measurement_key_mapping({'mb': 'mb1'})
.with_measurement_key_mapping({'mc': 'mc1'})
.with_measurement_key_mapping({'md': 'md1'}),
op2.with_params({exp1: 2.0, exp_half: 0.5, exp_one: 1.0, exp_two: 2.0})
.with_measurement_key_mapping({'ma': 'ma2'})
.with_measurement_key_mapping({'mb': 'mb2'})
.with_measurement_key_mapping({'mc': 'mc2'})
.with_measurement_key_mapping({'md': 'md2'}),
)
result_ops1 = cirq.decompose_once(final_op)
assert cirq.Circuit(result_ops1) == expected_circuit1
expected_circuit = cirq.Circuit(
cirq.X(a) ** 0.5,
cirq.measure(a, key='ma'),
cirq.X(b) ** 0.5,
cirq.measure(b, key='mb'),
cirq.X(c) ** 0.5,
cirq.measure(c, key='mc'),
cirq.X(d) ** 0.5,
cirq.measure(d, key='md'),
cirq.X(a) ** 1.0,
cirq.measure(a, key='ma1'),
cirq.X(b) ** 1.0,
cirq.measure(b, key='mb1'),
cirq.X(c) ** 1.0,
cirq.measure(c, key='mc1'),
cirq.X(d) ** 1.0,
cirq.measure(d, key='md1'),
cirq.X(a) ** 2.0,
cirq.measure(a, key='ma2'),
cirq.X(b) ** 2.0,
cirq.measure(b, key='mb2'),
cirq.X(c) ** 2.0,
cirq.measure(c, key='mc2'),
cirq.X(d) ** 2.0,
cirq.measure(d, key='md2'),
)
assert cirq.Circuit(cirq.decompose(final_op)) == expected_circuit
# Verify that mapped_circuit gives the same operations.
assert final_op.mapped_circuit(deep=True) == expected_circuit
def test_decompose_repeated_nested_measurements():
# Details of this test described at
# https://tinyurl.com/measurement-repeated-circuitop#heading=h.sbgxcsyin9wt.
a = cirq.LineQubit(0)
op1 = (
cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(a, key='A')))
.with_measurement_key_mapping({'A': 'B'})
.repeat(2, ['zero', 'one'])
)
op2 = (
cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(a, key='P'), op1))
.with_measurement_key_mapping({'B': 'C', 'P': 'Q'})
.repeat(2, ['zero', 'one'])
)
op3 = (
cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(a, key='X'), op2))
.with_measurement_key_mapping({'C': 'D', 'X': 'Y'})
.repeat(2, ['zero', 'one'])
)
expected_measurement_keys_in_order = [
'zero:Y',
'zero:zero:Q',
'zero:zero:zero:D',
'zero:zero:one:D',
'zero:one:Q',
'zero:one:zero:D',
'zero:one:one:D',
'one:Y',
'one:zero:Q',
'one:zero:zero:D',
'one:zero:one:D',
'one:one:Q',
'one:one:zero:D',
'one:one:one:D',
]
assert cirq.measurement_key_names(op3) == set(expected_measurement_keys_in_order)
expected_circuit = cirq.Circuit()
for key in expected_measurement_keys_in_order:
expected_circuit.append(cirq.measure(a, key=cirq.MeasurementKey.parse_serialized(key)))
assert cirq.Circuit(cirq.decompose(op3)) == expected_circuit
assert cirq.measurement_key_names(expected_circuit) == set(expected_measurement_keys_in_order)
# Verify that mapped_circuit gives the same operations.
assert op3.mapped_circuit(deep=True) == expected_circuit
def test_keys_under_parent_path():
a = cirq.LineQubit(0)
op1 = cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(a, key='A')))
assert cirq.measurement_key_names(op1) == {'A'}
op2 = op1.with_key_path(('B',))
assert cirq.measurement_key_names(op2) == {'B:A'}
op3 = op2.repeat(2)
assert cirq.measurement_key_names(op3) == {'B:0:A', 'B:1:A'}
def test_mapped_circuit_preserves_moments():
q0, q1 = cirq.LineQubit.range(2)
fc = cirq.FrozenCircuit(cirq.Moment(cirq.X(q0)), cirq.Moment(cirq.X(q1)))
op = cirq.CircuitOperation(fc)
assert op.mapped_circuit() == fc
assert op.repeat(3).mapped_circuit(deep=True) == fc * 3
def test_mapped_op():
q0, q1 = cirq.LineQubit.range(2)
a, b = (sympy.Symbol(x) for x in 'ab')
fc1 = cirq.FrozenCircuit(cirq.X(q0) ** a, cirq.measure(q0, q1, key='m'))
op1 = (
cirq.CircuitOperation(fc1)
.with_params({'a': 'b'})
.with_qubits(q1, q0)
.with_measurement_key_mapping({'m': 'k'})
)
fc2 = cirq.FrozenCircuit(cirq.X(q1) ** b, cirq.measure(q1, q0, key='k'))
op2 = cirq.CircuitOperation(fc2)
assert op1.mapped_op() == op2
def test_tag_propagation():
# Tags are not propagated from the CircuitOperation to its components.
# TODO: support tag propagation for better serialization.
a, b, c = cirq.LineQubit.range(3)
circuit = cirq.FrozenCircuit(cirq.X(a), cirq.H(b), cirq.H(c), cirq.CZ(a, c))
op = cirq.CircuitOperation(circuit)
test_tag = 'test_tag'
op = op.with_tags(test_tag)
assert test_tag in op.tags
# TODO: Tags must propagate during decomposition.
sub_ops = cirq.decompose(op)
for op in sub_ops:
assert test_tag not in op.tags
def test_mapped_circuit_keeps_keys_under_parent_path():
q = cirq.LineQubit(0)
op1 = cirq.CircuitOperation(
cirq.FrozenCircuit(
cirq.measure(q, key='A'),
cirq.measure_single_paulistring(cirq.X(q), key='B'),
cirq.MixedUnitaryChannel.from_mixture(cirq.bit_flip(0.5), key='C').on(q),
cirq.KrausChannel.from_channel(cirq.phase_damp(0.5), key='D').on(q),
)
)
op2 = op1.with_key_path(('X',))
assert cirq.measurement_key_names(op2.mapped_circuit()) == {'X:A', 'X:B', 'X:C', 'X:D'}
def test_mapped_circuit_allows_repeated_keys():
q = cirq.LineQubit(0)
op1 = cirq.CircuitOperation(cirq.FrozenCircuit(cirq.measure(q, key='A')))
op2 = cirq.CircuitOperation(cirq.FrozenCircuit(op1, op1))
circuit = op2.mapped_circuit(deep=True)
cirq.testing.assert_has_diagram(
circuit, "0: ───M('A')───M('A')───", use_unicode_characters=True
)
op1 = cirq.measure(q, key='A')
op2 = cirq.CircuitOperation(cirq.FrozenCircuit(op1, op1))
circuit = op2.mapped_circuit()
cirq.testing.assert_has_diagram(
circuit, "0: ───M('A')───M('A')───", use_unicode_characters=True
)
@pytest.mark.parametrize('sim', ALL_SIMULATORS)
def test_simulate_no_repetition_ids_both_levels(sim):
q = cirq.LineQubit(0)
inner = cirq.Circuit(cirq.measure(q, key='a'))
middle = cirq.Circuit(
cirq.CircuitOperation(inner.freeze(), repetitions=2, use_repetition_ids=False)
)
outer_subcircuit = cirq.CircuitOperation(
middle.freeze(), repetitions=2, use_repetition_ids=False
)
circuit = cirq.Circuit(outer_subcircuit)
result = sim.run(circuit)
assert result.records['a'].shape == (1, 4, 1)
@pytest.mark.parametrize('sim', ALL_SIMULATORS)
def test_simulate_no_repetition_ids_outer(sim):
q = cirq.LineQubit(0)
inner = cirq.Circuit(cirq.measure(q, key='a'))
middle = cirq.Circuit(cirq.CircuitOperation(inner.freeze(), repetitions=2))
outer_subcircuit = cirq.CircuitOperation(
middle.freeze(), repetitions=2, use_repetition_ids=False
)
circuit = cirq.Circuit(outer_subcircuit)
result = sim.run(circuit)
assert result.records['0:a'].shape == (1, 2, 1)
assert result.records['1:a'].shape == (1, 2, 1)
@pytest.mark.parametrize('sim', ALL_SIMULATORS)
def test_simulate_no_repetition_ids_inner(sim):
q = cirq.LineQubit(0)
inner = cirq.Circuit(cirq.measure(q, key='a'))
middle = cirq.Circuit(
cirq.CircuitOperation(inner.freeze(), repetitions=2, use_repetition_ids=False)
)
outer_subcircuit = cirq.CircuitOperation(middle.freeze(), repetitions=2)
circuit = cirq.Circuit(outer_subcircuit)
result = sim.run(circuit)
assert result.records['0:a'].shape == (1, 2, 1)
assert result.records['1:a'].shape == (1, 2, 1)
@pytest.mark.parametrize('sim', ALL_SIMULATORS)
def test_repeat_until(sim):
q = cirq.LineQubit(0)
key = cirq.MeasurementKey('m')
c = cirq.Circuit(
cirq.X(q),
cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q), cirq.measure(q, key=key)),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(key),
),
)
measurements = sim.run(c).records['m'][0]
assert len(measurements) == 2
assert measurements[0] == (0,)
assert measurements[1] == (1,)
@pytest.mark.parametrize('sim', ALL_SIMULATORS)
def test_repeat_until_sympy(sim):
q1, q2 = cirq.LineQubit.range(2)
circuitop = cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q2), cirq.measure(q2, key='b')),
use_repetition_ids=False,
repeat_until=cirq.SympyCondition(sympy.Eq(sympy.Symbol('a'), sympy.Symbol('b'))),
)
c = cirq.Circuit(cirq.measure(q1, key='a'), circuitop)
# Validate commutation
assert len(c) == 2
assert cirq.control_keys(circuitop) == {cirq.MeasurementKey('a')}
measurements = sim.run(c).records['b'][0]
assert len(measurements) == 2
assert measurements[0] == (1,)
assert measurements[1] == (0,)
@pytest.mark.parametrize('sim', [cirq.Simulator(), cirq.DensityMatrixSimulator()])
def test_post_selection(sim):
q = cirq.LineQubit(0)
key = cirq.MeasurementKey('m')
c = cirq.Circuit(
cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q) ** 0.2, cirq.measure(q, key=key)),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(key),
)
)
result = sim.run(c)
assert result.records['m'][0][-1] == (1,)
for i in range(len(result.records['m'][0]) - 1):
assert result.records['m'][0][i] == (0,)
def test_repeat_until_diagram():
q = cirq.LineQubit(0)
key = cirq.MeasurementKey('m')
c = cirq.Circuit(
cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.X(q) ** 0.2, cirq.measure(q, key=key)),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(key),
)
)
cirq.testing.assert_has_diagram(
c,
"""
0: ───[ 0: ───X^0.2───M('m')─── ](no_rep_ids, until=m)───
""",
use_unicode_characters=True,
)
def test_repeat_until_error():
q = cirq.LineQubit(0)
with pytest.raises(ValueError, match='Cannot use repetitions with repeat_until'):
cirq.CircuitOperation(
cirq.FrozenCircuit(),
use_repetition_ids=True,
repeat_until=cirq.KeyCondition(cirq.MeasurementKey('a')),
)
with pytest.raises(ValueError, match='Infinite loop'):
cirq.CircuitOperation(
cirq.FrozenCircuit(cirq.measure(q, key='m')),
use_repetition_ids=False,
repeat_until=cirq.KeyCondition(cirq.MeasurementKey('a')),
)
# TODO: Operation has a "gate" property. What is this for a CircuitOperation?
| 36.849668
| 117
| 0.642099
|
0c2815fb10ef23fd9bf46d7e2a323823b4c02653
| 2,662
|
py
|
Python
|
mayan/apps/events/links.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/events/links.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/events/links.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
from django.apps import apps
from django.utils.translation import ugettext_lazy as _
from mayan.apps.navigation.classes import Link
from .icons import (
icon_events_list, icon_events_for_object,
icon_event_types_subscriptions_list,
icon_object_event_types_user_subcriptions_list,
icon_user_notifications_list
)
from .permissions import permission_events_view
def get_kwargs_factory(variable_name):
def get_kwargs(context):
ContentType = apps.get_model(
app_label='contenttypes', model_name='ContentType'
)
content_type = ContentType.objects.get_for_model(
model=context[variable_name]
)
return {
'app_label': '"{}"'.format(content_type.app_label),
'model': '"{}"'.format(content_type.model),
'object_id': '{}.pk'.format(variable_name)
}
return get_kwargs
def get_unread_notification_count(context):
Notification = apps.get_model(
app_label='events', model_name='Notification'
)
return Notification.objects.filter(
user=context.request.user
).filter(read=False).count()
link_current_user_events = Link(
icon_class=icon_events_list, text=_('My events'),
view='events:current_user_events'
)
link_events_details = Link(
text=_('Events'), view='events:events_list'
)
link_events_for_object = Link(
icon_class=icon_events_for_object,
kwargs=get_kwargs_factory('resolved_object'),
permissions=(permission_events_view,), text=_('Events'),
view='events:events_for_object',
)
link_events_list = Link(
icon_class=icon_events_list, permissions=(permission_events_view,),
text=_('Events'), view='events:events_list'
)
link_event_types_subscriptions_list = Link(
icon_class=icon_event_types_subscriptions_list,
text=_('Event subscriptions'),
view='events:event_types_user_subcriptions_list'
)
link_notification_mark_read = Link(
args='object.pk', text=_('Mark as seen'),
view='events:notification_mark_read'
)
link_notification_mark_read_all = Link(
text=_('Mark all as seen'), view='events:notification_mark_read_all'
)
link_object_event_types_user_subcriptions_list = Link(
icon_class=icon_object_event_types_user_subcriptions_list,
kwargs=get_kwargs_factory('resolved_object'),
permissions=(permission_events_view,), text=_('Subscriptions'),
view='events:object_event_types_user_subcriptions_list',
)
link_user_notifications_list = Link(
badge_text=get_unread_notification_count,
icon_class=icon_user_notifications_list, text='',
view='events:user_notifications_list'
)
| 31.690476
| 72
| 0.745304
|
3b4cd2784053319a8e29f158bd70768576959baf
| 118,528
|
py
|
Python
|
python/cudf/cudf/core/column/string.py
|
vuule/cudf
|
efebcd1452692ee15f3d30627a9ef3d0cafa85d5
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/core/column/string.py
|
vuule/cudf
|
efebcd1452692ee15f3d30627a9ef3d0cafa85d5
|
[
"Apache-2.0"
] | 1
|
2022-01-18T19:36:35.000Z
|
2022-01-18T19:36:35.000Z
|
python/cudf/cudf/core/column/string.py
|
vuule/cudf
|
efebcd1452692ee15f3d30627a9ef3d0cafa85d5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import pickle
import warnings
from codecs import decode
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf._lib as libcudf
import cudf._lib.string_casting as str_cast
from cudf._lib.nvtext.generate_ngrams import (
generate_ngrams as cpp_generate_ngrams,
)
from cudf._lib.nvtext.ngrams_tokenize import (
ngrams_tokenize as cpp_ngrams_tokenize,
)
from cudf._lib.nvtext.normalize import normalize_spaces as cpp_normalize_spaces
from cudf._lib.nvtext.tokenize import (
count_tokens as cpp_count_tokens,
tokenize as cpp_tokenize,
)
from cudf._lib.nvtx import annotate
from cudf._lib.strings.attributes import (
code_points as cpp_code_points,
count_characters as cpp_count_characters,
)
from cudf._lib.strings.capitalize import (
capitalize as cpp_capitalize,
title as cpp_title,
)
from cudf._lib.strings.case import (
swapcase as cpp_swapcase,
to_lower as cpp_to_lower,
to_upper as cpp_to_upper,
)
from cudf._lib.strings.char_types import (
is_alnum as cpp_is_alnum,
is_alpha as cpp_is_alpha,
is_decimal as cpp_is_decimal,
is_digit as cpp_is_digit,
is_float as cpp_is_float,
is_integer as cpp_is_integer,
is_lower as cpp_is_lower,
is_numeric as cpp_is_numeric,
is_space as cpp_isspace,
is_upper as cpp_is_upper,
)
from cudf._lib.strings.combine import (
concatenate as cpp_concatenate,
join as cpp_join,
)
from cudf._lib.strings.contains import (
contains_re as cpp_contains_re,
count_re as cpp_count_re,
match_re as cpp_match_re,
)
from cudf._lib.strings.convert.convert_urls import (
url_decode as cpp_url_decode,
url_encode as cpp_url_encode,
)
from cudf._lib.strings.extract import extract as cpp_extract
from cudf._lib.strings.find import (
contains as cpp_contains,
endswith as cpp_endswith,
find as cpp_find,
rfind as cpp_rfind,
startswith as cpp_startswith,
)
from cudf._lib.strings.findall import findall as cpp_findall
from cudf._lib.strings.padding import (
PadSide,
center as cpp_center,
ljust as cpp_ljust,
pad as cpp_pad,
rjust as cpp_rjust,
zfill as cpp_zfill,
)
from cudf._lib.strings.replace import (
insert as cpp_string_insert,
replace as cpp_replace,
replace_multi as cpp_replace_multi,
slice_replace as cpp_slice_replace,
)
from cudf._lib.strings.replace_re import (
replace_multi_re as cpp_replace_multi_re,
replace_re as cpp_replace_re,
replace_with_backrefs as cpp_replace_with_backrefs,
)
from cudf._lib.strings.split.partition import (
partition as cpp_partition,
rpartition as cpp_rpartition,
)
from cudf._lib.strings.split.split import (
rsplit as cpp_rsplit,
split as cpp_split,
)
from cudf._lib.strings.strip import (
lstrip as cpp_lstrip,
rstrip as cpp_rstrip,
strip as cpp_strip,
)
from cudf._lib.strings.substring import (
get as cpp_string_get,
slice_from as cpp_slice_from,
slice_strings as cpp_slice_strings,
)
from cudf._lib.strings.translate import translate as cpp_translate
from cudf._lib.strings.wrap import wrap as cpp_wrap
from cudf.core.buffer import Buffer
from cudf.core.column import column, datetime
from cudf.utils import utils
from cudf.utils.dtypes import is_list_like, is_scalar
_str_to_numeric_typecast_functions = {
np.dtype("int8"): str_cast.stoi8,
np.dtype("int16"): str_cast.stoi16,
np.dtype("int32"): str_cast.stoi,
np.dtype("int64"): str_cast.stol,
np.dtype("float32"): str_cast.stof,
np.dtype("float64"): str_cast.stod,
np.dtype("bool"): str_cast.to_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): str_cast.timestamp2int,
np.dtype("datetime64[s]"): str_cast.timestamp2int,
np.dtype("datetime64[ms]"): str_cast.timestamp2int,
np.dtype("datetime64[us]"): str_cast.timestamp2int,
np.dtype("datetime64[ns]"): str_cast.timestamp2int,
}
_numeric_to_str_typecast_functions = {
np.dtype("int8"): str_cast.i8tos,
np.dtype("int16"): str_cast.i16tos,
np.dtype("int32"): str_cast.itos,
np.dtype("int64"): str_cast.ltos,
np.dtype("float32"): str_cast.ftos,
np.dtype("float64"): str_cast.dtos,
np.dtype("bool"): str_cast.from_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): str_cast.int2timestamp,
np.dtype("datetime64[s]"): str_cast.int2timestamp,
np.dtype("datetime64[ms]"): str_cast.int2timestamp,
np.dtype("datetime64[us]"): str_cast.int2timestamp,
np.dtype("datetime64[ns]"): str_cast.int2timestamp,
}
class StringMethods(object):
def __init__(self, column, parent=None):
"""
Vectorized string functions for Series and Index.
This mimics pandas ``df.str`` interface. nulls stay null
unless handled otherwise by a particular method.
Patterned after Python’s string methods, with some
inspiration from R’s stringr package.
"""
self._column = column
self._parent = parent
def htoi(self):
"""
Returns integer value represented by each hex string.
String is interpretted to have hex (base-16) characters.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1234", "ABCDEF", "1A2", "cafe"])
>>> s.str.htoi()
0 4660
1 11259375
2 418
3 51966
dtype: int64
"""
out = str_cast.htoi(self._column)
return self._return_or_inplace(out, inplace=False)
def ip2int(self):
"""
This converts ip strings to integers
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1"])
>>> s.str.ip2int()
0 212336897
1 167772161
dtype: int64
Returns 0's if any string is not an IP.
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1", "abc"])
>>> s.str.ip2int()
0 212336897
1 167772161
2 0
dtype: int64
"""
out = str_cast.ip2int(self._column)
return self._return_or_inplace(out, inplace=False)
def _return_or_inplace(self, new_col, **kwargs):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
from cudf import Series, DataFrame, MultiIndex
from cudf.core.index import Index, as_index
inplace = kwargs.get("inplace", False)
if inplace:
self._parent._mimic_inplace(new_col, inplace=True)
else:
expand = kwargs.get("expand", False)
if expand or isinstance(self._parent, (DataFrame, MultiIndex)):
# This branch indicates the passed as new_col
# is actually a table-like data
table = new_col
from cudf._lib.table import Table
if isinstance(table, Table):
if isinstance(self._parent, Index):
idx = self._parent._constructor_expanddim._from_table(
table=table
)
idx.names = None
return idx
else:
return self._parent._constructor_expanddim(
data=table._data, index=self._parent.index
)
else:
return self._parent._constructor_expanddim(
{index: value for index, value in enumerate(table)},
index=self._parent.index,
)
elif isinstance(self._parent, Series):
retain_index = kwargs.get("retain_index", True)
if retain_index:
return Series(
new_col,
name=self._parent.name,
index=self._parent.index,
)
else:
return Series(new_col, name=self._parent.name)
elif isinstance(self._parent, Index):
return as_index(new_col, name=self._parent.name)
else:
if self._parent is None:
return new_col
else:
return self._parent._mimic_inplace(new_col, inplace=False)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def len(self, **kwargs):
"""
Computes the length of each element in the Series/Index.
Returns : Series or Index of int
A Series or Index of integer values
indicating the length of each element in the Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["dog", "", "\\n", None])
>>> s.str.len()
0 3
1 0
2 1
3 null
dtype: int32
"""
return self._return_or_inplace(
cpp_count_characters(self._column), **kwargs,
)
def cat(self, others=None, sep=None, na_rep=None, **kwargs):
"""
Concatenate strings in the Series/Index with given separator.
If ``others`` is specified, this function concatenates the Series/Index
and elements of others element-wise. If others is not passed, then all
values in the Series/Index are concatenated into a single string with
a given sep.
Parameters
----------
others : Series or List of str
Strings to be appended.
The number of strings must match ``size()`` of this instance.
This must be either a Series of string dtype or a Python
list of strings.
sep : str
If specified, this separator will be appended to each string
before appending the others.
na_rep : str
This character will take the place of any null strings
(not empty strings) in either list.
- If ``na_rep`` is ``None``, and ``others`` is ``None``,
missing values in the Series/Index are
omitted from the result.
- If ``na_rep`` is ``None``, and ``others`` is
not ``None``, a row containing a missing value
in any of the columns (before concatenation)
will have a missing value in the result.
Returns
-------
concat : str or Series/Index of str dtype
If ``others`` is ``None``, ``str`` is returned,
otherwise a ``Series/Index`` (same type as caller)
of str dtype is returned.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using na_rep, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If others is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 None
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using na_rep
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If sep is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
"""
from cudf.core import DataFrame
if sep is None:
sep = ""
from cudf._lib.scalar import as_scalar
if others is None:
data = cpp_join(
self._column, as_scalar(sep), as_scalar(na_rep, "str")
)
else:
other_cols = _get_cols_list(others)
all_cols = [self._column] + other_cols
data = cpp_concatenate(
DataFrame(
{index: value for index, value in enumerate(all_cols)}
),
as_scalar(sep),
as_scalar(na_rep, "str"),
)
if len(data) == 1 and data.null_count == 1:
data = [""]
out = self._return_or_inplace(data, **kwargs)
if len(out) == 1 and others is None:
out = out.iloc[0]
return out
def join(self, sep):
"""
Join lists contained as elements in the Series/Index with passed
delimiter.
Raises : NotImplementedError
Columns of arrays / lists are not yet supported.
"""
raise NotImplementedError(
"Columns of arrays / lists are not yet " "supported"
)
def extract(self, pat, flags=0, expand=True, **kwargs):
"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the first
match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
expand : bool, default True
If True, return DataFrame with on column per capture group.
If False, return a Series/Index if there is one capture group or
DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series/Index
A DataFrame with one row for each subject string, and one column
for each group. If `expand=False` and `pat` has only one capture
group, then return a Series/Index.
Notes
-----
The `flags` parameter is not yet supported and will raise a
NotImplementedError if anything other than the default value is passed.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)') # noqa W605
0 1
0 a 1
1 b 2
2 None None
A pattern with one group will return a DataFrame with one
column if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True) # noqa W605
0
0 1
1 2
2 None
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False) # noqa W605
0 1
1 2
2 None
dtype: object
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
out = cpp_extract(self._column, pat)
if out._num_columns == 1 and expand is False:
return self._return_or_inplace(out._columns[0], **kwargs)
else:
kwargs.setdefault("expand", expand)
return self._return_or_inplace(out, **kwargs)
def contains(
self, pat, case=True, flags=0, na=np.nan, regex=True, **kwargs
):
"""
Test if pattern or regex is contained within a string of a Series or
Index.
Return boolean Series or Index based on whether a given pattern or
regex is contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of bool dtype
A Series/Index of boolean dtype indicating whether the given
pattern is contained within the string of each element of the
Series/Index.
Notes
-----
The parameters `case`, `flags`, and `na` are not yet supported and
will raise a NotImplementedError if anything other than the default
value is set.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['Mouse', 'dog', 'house and parrot', '23', None])
>>> s1
0 Mouse
1 dog
2 house and parrot
3 23
4 None
dtype: object
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 null
dtype: bool
Returning an Index of booleans using only a literal pattern.
>>> data = ['Mouse', 'dog', 'house and parrot', '23.0', np.NaN]
>>> ind = cudf.core.index.StringIndex(data)
>>> ind.str.contains('23', regex=False)
Index(['False', 'False', 'False', 'True', 'null'], dtype='object')
Returning ‘house’ or ‘dog’ when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 null
dtype: bool
Returning any digit using regular expression.
>>> s1.str.contains('\d', regex=True) # noqa W605
0 False
1 False
2 False
3 True
4 null
dtype: bool
Ensure ``pat`` is a not a literal pattern when ``regex`` is set
to True. Note in the following example one might expect
only `s2[1]` and `s2[3]` to return True. However,
‘.0’ as a regex matches any character followed by a 0.
>>> s2 = cudf.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
elif flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
elif na is not np.nan:
raise NotImplementedError("`na` parameter is not yet supported")
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_contains_re(self._column, pat)
if regex is True
else cpp_contains(self._column, as_scalar(pat, "str")),
**kwargs,
)
def replace(
self, pat, repl, n=-1, case=None, flags=0, regex=True, **kwargs
):
"""
Replace occurrences of pattern/regex in the Series/Index with some
other string. Equivalent to `str.replace()
<https://docs.python.org/3/library/stdtypes.html#str.replace>`_
or `re.sub()
<https://docs.python.org/3/library/re.html#re.sub>`_.
Parameters
----------
pat : str or list-like
String(s) to be replaced as a character sequence or regular
expression.
repl : str or list-like
String(s) to be used as replacement.
n : int, default -1 (all)
Number of replacements to make from the start.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of str dtype
A copy of the object with all matching occurrences of pat replaced
by repl.
Notes
-----
The parameters `case` and `flags` are not yet supported and will raise
a `NotImplementedError` if anything other than the default value
is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['foo', 'fuz', None])
>>> s
0 foo
1 fuz
2 None
dtype: object
When pat is a string and regex is True (the default), the given pat
is compiled as a regex. When repl is a string, it replaces matching
regex patterns as with ``re.sub()``. NaN value(s) in the Series
are left as is:
>>> s.str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 None
dtype: object
When pat is a string and `regex` is False, every pat is replaced
with repl as with ``str.replace()``:
>>> s.str.replace('f.', 'ba', regex=False)
0 foo
1 fuz
2 None
dtype: object
"""
if case is not None:
raise NotImplementedError("`case` parameter is not yet supported")
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
from cudf.core import Series, Index
if (
is_list_like(pat)
or isinstance(pat, (Series, Index, pd.Series, pd.Index))
) and (
is_list_like(repl)
or isinstance(repl, (Series, Index, pd.Series, pd.Index))
):
warnings.warn(
"`n` parameter is not supported when \
`pat` and `repl` are list-like inputs"
)
return self._return_or_inplace(
cpp_replace_multi_re(
self._column, pat, column.as_column(repl, dtype="str")
)
if regex
else cpp_replace_multi(
self._column,
column.as_column(pat, dtype="str"),
column.as_column(repl, dtype="str"),
),
**kwargs,
)
# Pandas treats 0 as all
if n == 0:
n = -1
from cudf._lib.scalar import as_scalar
# Pandas forces non-regex replace when pat is a single-character
return self._return_or_inplace(
cpp_replace_re(self._column, pat, as_scalar(repl, "str"), n)
if regex is True and len(pat) > 1
else cpp_replace(
self._column, as_scalar(pat, "str"), as_scalar(repl, "str"), n
),
**kwargs,
)
def replace_with_backrefs(self, pat, repl, **kwargs):
"""
Use the ``repl`` back-ref template to create a new string
with the extracted elements found using the ``pat`` expression.
Parameters
----------
pat : str
Regex with groupings to identify extract sections.
This should not be a compiled regex.
repl : str
String template containing back-reference indicators.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["A543","Z756"])
>>> s.str.replace_with_backrefs('(\\d)(\\d)', 'V\\2\\1')
0 AV453
1 ZV576
dtype: object
"""
return self._return_or_inplace(
cpp_replace_with_backrefs(self._column, pat, repl), **kwargs
)
def slice(self, start=None, stop=None, step=None, **kwargs):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series/Index of str dtype
Series or Index from sliced substring from
original string object.
See also
--------
slice_replace
Replace a slice with a string.
get
Return element at position. Equivalent
to ``Series.str.slice(start=i, stop=i+1)``
with ``i`` being the position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(start=-1)
0 a
1 x
2 n
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
"""
return self._return_or_inplace(
cpp_slice_strings(self._column, start, stop, step), **kwargs,
)
def isdecimal(self, **kwargs):
"""
Check whether all characters in each string are decimal.
This is equivalent to running the Python string method
`str.isdecimal()
<https://docs.python.org/3/library/stdtypes.html#str.isdecimal>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s3 = cudf.Series(['23', '³', '⅕', ''])
The s3.str.isdecimal method checks for characters used to form
numbers in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_decimal(self._column), **kwargs)
def isalnum(self, **kwargs):
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
`str.isalnum()
<https://docs.python.org/3/library/stdtypes.html#str.isalnum>`_
for each element of the Series/Index. If a string has zero
characters, False is returned for that check.
Equivalent to: ``isalpha() or isdigit() or isnumeric() or isdecimal()``
Returns : Series or Index of bool
Series or Index of boolean values with the
same length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with
any additional punctuation or whitespace will
evaluate to false for an alphanumeric check.
>>> s2 = cudf.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_alnum(self._column), **kwargs)
def isalpha(self, **kwargs):
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
`str.isalpha()
<https://docs.python.org/3/library/stdtypes.html#str.isalpha>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same length
as the original Series/Index.
See also
--------
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_alpha(self._column), **kwargs)
def isdigit(self, **kwargs):
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
`str.isdigit()
<https://docs.python.org/3/library/stdtypes.html#str.isdigit>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['23', '³', '⅕', ''])
The ``s.str.isdigit`` method is the same as ``s.str.isdecimal`` but
also includes special digits, like superscripted and
subscripted digits in unicode.
>>> s.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_digit(self._column), **kwargs)
def isnumeric(self, **kwargs):
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
`str.isnumeric()
<https://docs.python.org/3/library/stdtypes.html#str.isnumeric>`_
for each element of the Series/Index. If a
string has zero characters, False is returned for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
The ``s1.str.isnumeric`` method is the same as ``s2.str.isdigit`` but
also includes other characters that can represent
quantities such as unicode fractions.
>>> s2 = pd.Series(['23', '³', '⅕', ''])
>>> s2.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_numeric(self._column), **kwargs)
def isupper(self, **kwargs):
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
`str.isupper()
<https://docs.python.org/3/library/stdtypes.html#str.isupper>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
islower
Check whether all characters are lowercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_upper(self._column), **kwargs)
def islower(self, **kwargs):
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
`str.islower()
<https://docs.python.org/3/library/stdtypes.html#str.islower>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_lower(self._column), **kwargs)
def lower(self, **kwargs):
"""
Converts all characters to lowercase.
Equivalent to `str.lower()
<https://docs.python.org/3/library/stdtypes.html#str.lower>`_.
Returns : Series or Index of object
A copy of the object with all strings converted to lowercase.
See also
--------
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
"""
return self._return_or_inplace(cpp_to_lower(self._column), **kwargs)
def upper(self, **kwargs):
"""
Convert each string to uppercase.
This only applies to ASCII characters at this time.
Equivalent to `str.upper()
<https://docs.python.org/3/library/stdtypes.html#str.upper>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and
remaining to lowercase.
capitalize
Converts first character to uppercase and remaining to
lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
"""
return self._return_or_inplace(cpp_to_upper(self._column), **kwargs)
def capitalize(self, **kwargs):
"""
Convert strings in the Series/Index to be capitalized.
This only applies to ASCII characters at this time.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s = cudf.Series(["hello, friend","goodbye, friend"])
>>> s.str.capitalize()
0 Hello, friend
1 Goodbye, friend
dtype: object
"""
return self._return_or_inplace(cpp_capitalize(self._column), **kwargs)
def swapcase(self, **kwargs):
"""
Change each lowercase character to uppercase and vice versa.
This only applies to ASCII characters at this time.
Equivalent to `str.swapcase()
<https://docs.python.org/3/library/stdtypes.html#str.swapcase>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
return self._return_or_inplace(cpp_swapcase(self._column), **kwargs)
def title(self, **kwargs):
"""
Uppercase the first letter of each letter after a space
and lowercase the rest.
This only applies to ASCII characters at this time.
Equivalent to `str.title()
<https://docs.python.org/3/library/stdtypes.html#str.title>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
"""
return self._return_or_inplace(cpp_title(self._column), **kwargs)
def slice_from(self, starts, stops, **kwargs):
"""
Return substring of each string using positions for each string.
The starts and stops parameters are of Column type.
Parameters
----------
starts : Series
Beginning position of each the string to extract.
Default is beginning of the each string.
stops : Series
Ending position of the each string to extract.
Default is end of each string.
Use -1 to specify to the end of that string.
Returns
-------
Series/Index of str dtype
A substring of each string using positions for each string.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello","there"])
>>> s
0 hello
1 there
dtype: object
>>> starts = cudf.Series([1, 3])
>>> stops = cudf.Series([5, 5])
>>> s.str.slice_from(starts, stops)
0 ello
1 re
dtype: object
"""
return self._return_or_inplace(
cpp_slice_from(
self._column, column.as_column(starts), column.as_column(stops)
),
**kwargs,
)
def slice_replace(self, start=None, stop=None, repl=None, **kwargs):
"""
Replace the specified section of each string with a new string.
Parameters
----------
start : int, optional
Beginning position of the string to replace.
Default is beginning of the each string.
stop : int, optional
Ending position of the string to replace.
Default is end of each string.
repl : str, optional
String to insert into the specified position values.
Returns
-------
Series/Index of str dtype
A new string with the specified section of the string
replaced with `repl` string.
See also
--------
slice
Just slicing without replacement.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the `end` of
the string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the `start` of the string to `stop`
is replaced with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start`
to `stop` is replaced with `repl`. Everything before or
after `start` and `stop` is included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if start is None:
start = 0
if stop is None:
stop = -1
if repl is None:
repl = ""
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_slice_replace(self._column, start, stop, as_scalar(repl)),
**kwargs,
)
def insert(self, start=0, repl=None, **kwargs):
"""
Insert the specified string into each string in the specified
position.
Parameters
----------
start : int
Beginning position of the string to replace.
Default is beginning of the each string.
Specify -1 to insert at the end of each string.
repl : str
String to insert into the specified position value.
Returns
-------
Series/Index of str dtype
A new string series with the specified string
inserted at the specified position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abcdefghij", "0123456789"])
>>> s.str.insert(2, '_')
0 ab_cdefghij
1 01_23456789
dtype: object
When no `repl` is passed, nothing is inserted.
>>> s.str.insert(2)
0 abcdefghij
1 0123456789
dtype: object
Negative values are also supported for `start`.
>>> s.str.insert(-1,'_')
0 abcdefghij_
1 0123456789_
dtype: object
"""
if repl is None:
repl = ""
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_string_insert(self._column, start, as_scalar(repl)), **kwargs
)
def get(self, i=0, **kwargs):
"""
Extract element from each component at specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids", "cudf"])
>>> s
0 hello world
1 rapids
2 cudf
dtype: object
>>> s.str.get(10)
0 d
1
2
dtype: object
>>> s.str.get(1)
0 e
1 a
2 u
dtype: object
``get`` also accepts negative index number.
>>> s.str.get(-1)
0 d
1 s
2 f
dtype: object
"""
return self._return_or_inplace(
cpp_string_get(self._column, i), **kwargs
)
def split(self, pat=None, n=-1, expand=None, **kwargs):
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the beginning, at the
specified delimiter string. Equivalent to `str.split()
<https://docs.python.org/3/library/stdtypes.html#str.split>`_.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
Returns
-------
DataFrame
Returns a DataFrame with each split as a column.
See also
--------
rsplit
Splits string around given separator/delimiter, starting from
the right.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The parameter `expand` is not yet supported and will raise a
NotImplementedError if anything other than the default value
is set. The handling of the n keyword depends on the number
of found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found
splits < n, append None for padding up to n
Examples
--------
>>> import cudf
>>> data = ["this is a regular sentence", "https://docs.python.org/index.html", None] # noqa E501
>>> s = cudf.Series(data)
>>> s
0 this is a regular sentence
1 https://docs.python.org/index.html
2 None
dtype: object
The `n` parameter can be used to limit the number of
splits on the delimiter.
>>> s.str.split(n=2)
0 1 2
0 this is a regular sentence
1 https://docs.python.org/index.html None None
2 None None None
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 1 2 3
0 this is a regular sentence None None None
1 https: docs.python.org index.html
2 None None None None
"""
if expand is None:
expand = True
warnings.warn("`expand` parameter defatults to True.")
elif expand is not True:
raise NotImplementedError(
"`expand=False` setting is not supported yet"
)
# Pandas treats 0 as all
if n == 0:
n = -1
kwargs.setdefault("expand", expand)
if pat is None:
pat = ""
from cudf._lib.scalar import as_scalar
result_table = cpp_split(self._column, as_scalar(pat, "str"), n)
if len(result_table._data) == 1:
if result_table._data[0].null_count == len(self._column):
result_table = []
elif self._column.null_count == len(self._column):
result_table = [self._column.copy()]
return self._return_or_inplace(result_table, **kwargs,)
def rsplit(self, pat=None, n=-1, expand=None, **kwargs):
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the end, at the
specified delimiter string. Equivalent to `str.rsplit()
<https://docs.python.org/3/library/stdtypes.html#str.rsplit>`_.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame/MultiIndex with each split as a column.
See also
--------
split
Split strings around given separator/delimiter.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is
set. The handling of the n keyword depends on the number of
found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found splits < n,
append None for padding up to n.
Examples
--------
>>> import cudf
>>> data = ["this is a regular sentence","https://docs.python.org/3/tutorial/index.html",None] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.rsplit(n=2)
0 1 2
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None
2 None None None
For slightly more complex use cases like splitting the
html document name from a url, a combination of parameter
settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 None None
"""
if expand is None:
expand = True
warnings.warn("`expand` parameter defatults to True.")
elif expand is not True:
raise NotImplementedError(
"`expand=False` setting is not supported yet"
)
# Pandas treats 0 as all
if n == 0:
n = -1
kwargs.setdefault("expand", expand)
if pat is None:
pat = ""
from cudf._lib.scalar import as_scalar
result_table = cpp_rsplit(self._column, as_scalar(pat), n)
if len(result_table._data) == 1:
if result_table._data[0].null_count == len(self._parent):
result_table = []
elif self._parent.null_count == len(self._parent):
result_table = [self._column.copy()]
return self._return_or_inplace(result_table, **kwargs)
def partition(self, sep=" ", expand=True, **kwargs):
"""
Split the string at the first occurrence of sep.
This method splits the string at the first occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not found,
return 3 elements containing the string itself, followed
by two empty strings.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
See also
--------
rpartition
Split the string at the last occurrence of sep.
split
Split strings around given separators.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
Also available on indices:
>>> idx = cudf.core.index.StringIndex(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex(levels=[0 X
1 Y
dtype: object, 0
dtype: object, 0 123
1 999
dtype: object],
codes= 0 1 2
0 0 0 0
1 1 0 1)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
kwargs.setdefault("expand", expand)
if sep is None:
sep = " "
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_partition(self._column, as_scalar(sep)), **kwargs
)
def rpartition(self, sep=" ", expand=True, **kwargs):
"""
Split the string at the last occurrence of sep.
This method splits the string at the last occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not
found, return 3 elements containing two empty strings,
followed by the string itself.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
Also available on indices:
>>> idx = cudf.core.index.StringIndex(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.rpartition()
MultiIndex(levels=[0 X
1 Y
dtype: object, 0
dtype: object, 0 123
1 999
dtype: object],
codes= 0 1 2
0 0 0 0
1 1 0 1)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
kwargs.setdefault("expand", expand)
if sep is None:
sep = " "
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_rpartition(self._column, as_scalar(sep)), **kwargs
)
def pad(self, width, side="left", fillchar=" ", **kwargs):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled with
character defined in fillchar.
side : {‘left’, ‘right’, ‘both’}, default ‘left’
Side from which to fill resulting string.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of object
Returns Series or Index with minimum number
of char in object.
See also
--------
rjust
Fills the left side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='left')``.
ljust
Fills the right side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='right')``.
center
Fills boths sides of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='both')``.
zfill
Pad strings in the Series/Index by prepending ‘0’ character.
Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["caribou", "tiger"])
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
try:
side = PadSide[side.upper()]
except KeyError:
raise ValueError(
"side has to be either one of {‘left’, ‘right’, ‘both’}"
)
return self._return_or_inplace(
cpp_pad(self._column, width, fillchar, side), **kwargs
)
def zfill(self, width, **kwargs):
"""
Pad strings in the Series/Index by prepending ‘0’ characters.
Strings in the Series/Index are padded with ‘0’ characters
on the left of the string to reach a total string length
width. Strings in the Series/Index with length greater
or equal to width are unchanged.
Parameters
----------
width : int
Minimum length of resulting string;
strings with length less than width
be prepended with ‘0’ characters.
Returns
-------
Series/Index of str dtype
Returns Series or Index with prepended ‘0’ characters.
See also
--------
rjust
Fills the left side of strings with an arbitrary character.
ljust
Fills the right side of strings with an arbitrary character.
pad
Fills the specified sides of strings with an arbitrary character.
center
Fills boths sides of strings with an arbitrary character.
Notes
-----
Differs from `str.zfill()
<https://docs.python.org/3/library/stdtypes.html#str.zfill>`_
which has special handling for ‘+’/’-‘ in the string.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['-1', '1', '1000', None])
>>> s
0 -1
1 1
2 1000
3 None
dtype: object
Note that ``None`` is not string, therefore it is converted
to ``None``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left
of it (`str.zfill()
<https://docs.python.org/3/library/stdtypes.html#str.zfill>`_
would have moved it to the left). ``1000`` remains unchanged as
it is longer than width.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 None
dtype: object
"""
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_zfill(self._column, width), **kwargs
)
def center(self, width, fillchar=" ", **kwargs):
"""
Filling left and right side of strings in the Series/Index with an
additional character.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default is ' ' (whitespace)
Additional character for filling.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.center(1)
0 a
1 b
2 None
3 d
dtype: object
>>> s.str.center(1, fillchar='-')
0 a
1 b
2 None
3 d
dtype: object
>>> s.str.center(2, fillchar='-')
0 a-
1 b-
2 None
3 d-
dtype: object
>>> s.str.center(5, fillchar='-')
0 --a--
1 --b--
2 None
3 --d--
dtype: object
>>> s.str.center(6, fillchar='-')
0 --a---
1 --b---
2 None
3 --d---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_center(self._column, width, fillchar), **kwargs
)
def ljust(self, width, fillchar=" ", **kwargs):
"""
Filling right side of strings in the Series/Index with an additional
character. Equivalent to `str.ljust()
<https://docs.python.org/3/library/stdtypes.html#str.ljust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with ``fillchar``.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.ljust(10, fillchar="_")
0 hello world
1 rapids ai_
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.ljust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_ljust(self._column, width, fillchar), **kwargs
)
def rjust(self, width, fillchar=" ", **kwargs):
"""
Filling left side of strings in the Series/Index with an additional
character. Equivalent to `str.rjust()
<https://docs.python.org/3/library/stdtypes.html#str.rjust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.rjust(20, fillchar="_")
0 _________hello world
1 ___________rapids ai
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.rjust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_rjust(self._column, width, fillchar), **kwargs
)
def strip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of
specified characters from each string in the Series/Index
from left and right sides. Equivalent to `str.strip()
<https://docs.python.org/3/library/stdtypes.html#str.strip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters
will be stripped. If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See also
--------
lstrip
Remove leading characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\n
2 3. Cat?\\t
3 None
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 None
dtype: object
>>> s.str.strip('123.!? \\n\\t')
0 Ant
1 Bee
2 Cat
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_strip(self._column, as_scalar(to_strip)), **kwargs
)
def lstrip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from
each string in the Series/Index from left side.
Equivalent to `str.lstrip()
<https://docs.python.org/3/library/stdtypes.html#str.lstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will
be stripped. If None then whitespaces are removed.
Returns
-------
Series or Index of object
See also
--------
strip
Remove leading and trailing characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\\n
2 Cat?\\t
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_lstrip(self._column, as_scalar(to_strip)), **kwargs
)
def rstrip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from each
string in the Series/Index from right side.
Equivalent to `str.rstrip()
<https://docs.python.org/3/library/stdtypes.html#str.rstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to
be removed. All combinations of this
set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See also
--------
strip
Remove leading and trailing characters in Series/Index.
lstrip
Remove leading characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\n
2 3. Cat?\\t
3 None
dtype: object
>>> s.str.rstrip('.!? \\n\\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
from cudf._lib.scalar import as_scalar
return self._return_or_inplace(
cpp_rstrip(self._column, as_scalar(to_strip)), **kwargs
)
def wrap(self, width, **kwargs):
"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
Parameters
----------
width : int
Maximum line width.
Returns
-------
Series or Index
Notes
-----
The parameters `expand_tabsbool`, `replace_whitespace`,
`drop_whitespace`, `break_long_words`, `break_on_hyphens`,
`expand_tabsbool` are not yet supported and will raise a
NotImplementedError if they are set to any value.
This method currently achieves behavior matching R’s
stringr library ``str_wrap`` function, the equivalent
pandas implementation can be obtained using the
following parameter setting:
expand_tabs = False
replace_whitespace = True
drop_whitespace = True
break_long_words = False
break_on_hyphens = False
Examples
--------
>>> import cudf
>>> data = ['line to be wrapped', 'another line to be wrapped']
>>> s = cudf.Series(data)
>>> s.str.wrap(12)
0 line to be\\nwrapped
1 another line\\nto be\\nwrapped
dtype: object
"""
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
expand_tabs = kwargs.get("expand_tabs", None)
if expand_tabs is True:
raise NotImplementedError("`expand_tabs=True` is not supported")
elif expand_tabs is None:
warnings.warn(
"wrap current implementation defaults to `expand_tabs`=False"
)
replace_whitespace = kwargs.get("replace_whitespace", True)
if not replace_whitespace:
raise NotImplementedError(
"`replace_whitespace=False` is not supported"
)
drop_whitespace = kwargs.get("drop_whitespace", True)
if not drop_whitespace:
raise NotImplementedError(
"`drop_whitespace=False` is not supported"
)
break_long_words = kwargs.get("break_long_words", None)
if break_long_words is True:
raise NotImplementedError(
"`break_long_words=True` is not supported"
)
elif break_long_words is None:
warnings.warn(
"wrap current implementation defaults to \
`break_long_words`=False"
)
break_on_hyphens = kwargs.get("break_on_hyphens", None)
if break_long_words is True:
raise NotImplementedError(
"`break_on_hyphens=True` is not supported"
)
elif break_on_hyphens is None:
warnings.warn(
"wrap current implementation defaults to \
`break_on_hyphens`=False"
)
return self._return_or_inplace(cpp_wrap(self._column, width), **kwargs)
def count(self, pat, flags=0, **kwargs):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular
regex pattern is repeated in each of the string elements of the Series.
Parameters
----------
pat : str
Valid regular expression.
Returns
-------
Series or Index
Notes
-----
- `flags` parameter is currently not supported.
- Some characters need to be escaped when passing
in pat. eg. ``'$'`` has a special meaning in regex
and must be escaped when finding this literal character.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A', 'B', 'Aaba', 'Baca', None, 'CABA', 'cat'])
>>> s.str.count('a')
0 0
1 0
2 2
3 2
4 null
5 0
6 1
dtype: int32
Escape ``'$'`` to find the literal dollar sign.
>>> s = cudf.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\$') # noqa W605
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int32
This is also available on Index.
>>> index = cudf.core.index.StringIndex(['A', 'A', 'Aaba', 'cat'])
>>> index.str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
return self._return_or_inplace(
cpp_count_re(self._column, pat), **kwargs
)
def findall(self, pat, flags=0, **kwargs):
"""
Find all occurrences of pattern or regular expression in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
Returns
-------
DataFrame
All non-overlapping matches of pattern or
regular expression in each string of this Series/Index.
Notes
-----
`flags` parameter is currently not supported.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern ‘Monkey’ returns one match:
>>> s.str.findall('Monkey')
0
0 None
1 Monkey
2 None
When the pattern matches more than one string
in the Series, all matches are returned:
>>> s.str.findall('on')
0
0 on
1 on
2 None
Regular expressions are supported too. For instance,
the search for all the strings ending with
the word ‘on’ is shown next:
>>> s.str.findall('on$')
0
0 on
1 None
2 None
If the pattern is found more than once in the same
string, then multiple strings are returned as columns:
>>> s.str.findall('b')
0 1
0 None None
1 None None
2 b b
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
kwargs.setdefault("expand", True)
return self._return_or_inplace(
cpp_findall(self._column, pat), **kwargs
)
def isempty(self, **kwargs):
"""
Check whether each string is an empty string.
Returns : Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1", "abc", "", " ", None])
>>> s.str.isempty()
0 False
1 False
2 True
3 False
4 False
dtype: bool
"""
return self._return_or_inplace(
(self._parent == "").fillna(False), **kwargs
)
def isspace(self, **kwargs):
"""
Check whether all characters in each string are whitespace.
This is equivalent to running the Python string method
`str.isspace()
<https://docs.python.org/3/library/stdtypes.html#str.isspace>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isnumeric
Check whether all characters are numeric.
isalnum
Check whether all characters are alphanumeric.
isdigit
Check whether all characters are digits.
isdecimal
Check whether all characters are decimal.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series([' ', '\\t\\r\\n ', ''])
>>> s.str.isspace()
0 True
1 True
2 False
dtype: bool
"""
return self._return_or_inplace(cpp_isspace(self._column), **kwargs)
def endswith(self, pat, **kwargs):
"""
Test if the end of each string element matches a pattern.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the end of each string element.
Notes
-----
`na` parameter is not yet supported, as cudf uses
native strings instead of Python objects.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['bat', 'bear', 'caT', None])
>>> s
0 bat
1 bear
2 caT
3 None
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 null
dtype: bool
"""
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
if pat is None:
result_col = column.column_empty(
len(self._column), dtype="bool", masked=True
)
else:
from cudf._lib.scalar import as_scalar
result_col = cpp_endswith(self._column, as_scalar(pat, "str"))
return self._return_or_inplace(result_col, **kwargs)
def startswith(self, pat, **kwargs):
"""
Test if the start of each string element matches a pattern.
Equivalent to `str.startswith()
<https://docs.python.org/3/library/stdtypes.html#str.startswith>`_.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the start of each string element.
See also
--------
endswith
Same as startswith, but tests the end of string.
contains
Tests if string element contains a pattern.
Examples
--------
>>> import cudf
>>> s
0 bat
1 Bear
2 cat
3 None
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 null
dtype: bool
"""
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
if pat is None:
result_col = column.column_empty(
len(self._column), dtype="bool", masked=True
)
else:
from cudf._lib.scalar import as_scalar
result_col = cpp_startswith(self._column, as_scalar(pat, "str"))
return self._return_or_inplace(result_col, **kwargs)
def find(self, sub, start=0, end=None, **kwargs):
"""
Return lowest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.find('b')
0 1
1 -1
2 0
3 2
dtype: int32
Parameters such as `start` and `end` can also be used.
>>> s.str.find('b', start=1, end=5)
0 1
1 -1
2 -1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
from cudf._lib.scalar import as_scalar
if end is None:
end = -1
result_col = cpp_find(self._column, as_scalar(sub, "str"), start, end)
return self._return_or_inplace(result_col, **kwargs)
def rfind(self, sub, start=0, end=None, **kwargs):
"""
Return highest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure. Equivalent to standard `str.rfind()
<https://docs.python.org/3/library/stdtypes.html#str.rfind>`_.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
See also
--------
find
Return lowest indexes in each strings.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abc", "hello world", "rapids ai"])
>>> s.str.rfind('a')
0 0
1 -1
2 7
dtype: int32
Using `start` and `end` parameters.
>>> s.str.rfind('a', start=2, end=5)
0 -1
1 -1
2 -1
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
from cudf._lib.scalar import as_scalar
if end is None:
end = -1
result_col = cpp_rfind(self._column, as_scalar(sub, "str"), start, end)
return self._return_or_inplace(result_col, **kwargs)
def index(self, sub, start=0, end=None, **kwargs):
"""
Return lowest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as str.find except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.index('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.index('b', start=1, end=5)
0 1
1 1
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
from cudf._lib.scalar import as_scalar
if end is None:
end = -1
result_col = cpp_find(self._column, as_scalar(sub, "str"), start, end)
result = self._return_or_inplace(result_col, **kwargs)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def rindex(self, sub, start=0, end=None, **kwargs):
"""
Return highest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as ``str.rfind`` except instead of returning -1, it raises a
``ValueError`` when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.rindex('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.rindex('b', start=1, end=5)
0 1
1 2
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
from cudf._lib.scalar import as_scalar
if end is None:
end = -1
result_col = cpp_rfind(self._column, as_scalar(sub, "str"), start, end)
result = self._return_or_inplace(result_col, **kwargs)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def match(self, pat, case=True, flags=0, **kwargs):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
Returns
-------
Series or Index of boolean values.
Notes
-----
Parameters currently not supported are: `case`, `flags` and `na`.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["rapids", "ai", "cudf"])
Checking for strings starting with `a`.
>>> s.str.match('a')
0 False
1 True
2 False
dtype: bool
Checking for strings starting with any of `a` or `c`.
>>> s.str.match('[ac]')
0 False
1 True
2 True
dtype: bool
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
return self._return_or_inplace(
cpp_match_re(self._column, pat), **kwargs
)
def url_decode(self, **kwargs):
"""
Returns a URL-decoded format of each string.
No format checking is performed. All characters
are expected to be encoded as UTF-8 hex values.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A%2FB-C%2FD', 'e%20f.g', '4-5%2C6'])
>>> s.str.url_decode()
0 A/B-C/D
1 e f.g
2 4-5,6
dtype: object
>>> data = ["https%3A%2F%2Frapids.ai%2Fstart.html", "https%3A%2F%2Fmedium.com%2Frapids-ai"] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.url_decode()
0 https://rapids.ai/start.html
1 https://medium.com/rapids-ai
dtype: object
"""
return self._return_or_inplace(cpp_url_decode(self._column), **kwargs)
def url_encode(self, **kwargs):
"""
Returns a URL-encoded format of each string.
No format checking is performed.
All characters are encoded except for ASCII letters,
digits, and these characters: ``‘.’,’_’,’-‘,’~’``.
Encoding converts to hex using UTF-8 encoded bytes.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A/B-C/D', 'e f.g', '4-5,6'])
>>> s.str.url_encode()
0 A%2FB-C%2FD
1 e%20f.g
2 4-5%2C6
dtype: object
>>> data = ["https://rapids.ai/start.html", "https://medium.com/rapids-ai"] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.url_encode()
0 https%3A%2F%2Frapids.ai%2Fstart.html
1 https%3A%2F%2Fmedium.com%2Frapids-ai
dtype: object
"""
return self._return_or_inplace(cpp_url_encode(self._column), **kwargs)
def code_points(self, **kwargs):
"""
Returns an array by filling it with the UTF-8 code point
values for each character of each string.
This function uses the ``len()`` method to determine
the size of each sub-array of integers.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["a","xyz", "éee"])
>>> s.str.code_points()
0 97
1 120
2 121
3 122
4 50089
5 101
6 101
dtype: int32
>>> s = cudf.Series(["abc"])
>>> s.str.code_points()
0 97
1 98
2 99
dtype: int32
"""
from cudf.core.series import Series, Index
new_col = cpp_code_points(self._column)
if self._parent is None:
return new_col
elif isinstance(self._parent, Series):
return Series(new_col, name=self._parent.name)
elif isinstance(self._parent, Index):
return column.as_index(new_col, name=self._parent.name)
def translate(self, table, **kwargs):
"""
Map all characters in the string through the given
mapping table.
Equivalent to standard `str.translate()
<https://docs.python.org/3/library/stdtypes.html#str.translate>`_.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None.
Unmapped characters are left untouched.
`str.maketrans()
<https://docs.python.org/3/library/stdtypes.html#str.maketrans>`_
is a helper function for making translation tables.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence','SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.translate({'a': "1"})
0 lower
1 CAPITALS
2 this is 1 sentence
3 SwApC1Se
dtype: object
>>> s.str.translate({'a': "1", "e":"#"})
0 low#r
1 CAPITALS
2 this is 1 s#nt#nc#
3 SwApC1S#
dtype: object
"""
table = str.maketrans(table)
return self._return_or_inplace(
cpp_translate(self._column, table), **kwargs
)
def normalize_spaces(self, **kwargs):
"""
Remove extra whitespace between tokens and trim whitespace
from the beginning and the end of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello \\t world"," test string "])
>>> ser.str.normalize_spaces()
0 hello world
1 test string
dtype: object
"""
return self._return_or_inplace(
cpp_normalize_spaces(self._column), **kwargs
)
def tokenize(self, delimiter=" ", **kwargs):
"""
Each string is split into tokens using the provided delimiter(s).
The sequence returned contains the tokens in the order
they were found.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The string used to locate the split points of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> data = ["hello world", "goodbye world", "hello goodbye"]
>>> ser = cudf.Series(data)
>>> ser.str.tokenize()
0 hello
1 world
2 goodbye
3 world
4 hello
5 goodbye
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_tokenize(self._column, delimiter), **kwargs
)
def token_count(self, delimiter=" ", **kwargs):
"""
Each string is split into tokens using the provided delimiter.
The returned integer sequence is the number of tokens in each string.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The characters or strings used to locate the
split points of each string.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello world","goodbye",""])
>>> ser.str.token_count()
0 2
1 1
2 0
dtype: int32
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
return self._return_or_inplace(
cpp_count_tokens(self._column, delimiter), **kwargs
)
def ngrams(self, n=2, separator="_", **kwargs):
"""
Generate the n-grams from a set of tokens, each record
in series is treated a token.
You can generate tokens from a Series instance using
the ``Series.str.tokenize()`` function.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive tokens).
Default of 2 for bigrams.
separator : str
The separator to use between within an n-gram.
Default is '_'.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> str_series.str.ngrams(2, "_")
0 this is my_favorite book
dtype: object
>>> str_series = cudf.Series(['abc','def','xyz','hhh'])
>>> str_series.str.ngrams(2, "_")
0 abc_def
1 def_xyz
2 xyz_hhh
dtype: object
"""
separator = _massage_string_arg(separator, "separator")
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_generate_ngrams(self._column, n, separator), **kwargs
)
def ngrams_tokenize(self, n=2, delimiter=" ", separator="_", **kwargs):
"""
Generate the n-grams using tokens from each string.
This will tokenize each string and then generate ngrams for each
string.
Parameters
----------
n : int, Default 2.
The degree of the n-gram (number of consecutive tokens).
delimiter : str, Default is white-space.
The character used to locate the split points of each string.
sep : str, Default is '_'.
The separator to use between tokens within an n-gram.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(['this is the', 'best book'])
>>> ser.str.ngrams_tokenize(n=2, sep='_')
0 this_is
1 is_the
2 best_book
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter")
separator = _massage_string_arg(separator, "separator")
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_ngrams_tokenize(self._column, n, delimiter, separator),
**kwargs,
)
def _massage_string_arg(value, name, allow_col=False):
from cudf._lib.scalar import as_scalar, Scalar
from cudf._lib.column import Column
from cudf.utils.dtypes import is_string_dtype
if isinstance(value, str):
return as_scalar(value, dtype="str")
if isinstance(value, Scalar) and is_string_dtype(value.dtype):
return value
allowed_types = ["Scalar"]
if allow_col:
if isinstance(value, list):
return column.as_column(value, dtype="str")
if isinstance(value, Column) and is_string_dtype(value.dtype):
return value
allowed_types.append("Column")
raise ValueError(
"Expected {} for {} but got {}".format(
_expected_types_format(allowed_types), name, type(value)
)
)
def _expected_types_format(types):
if len(types) == 1:
return types[0]
return ", ".join(types[:-1]) + ", or " + types[-1]
class StringColumn(column.ColumnBase):
"""Implements operations for Columns of String type
"""
def __init__(
self, mask=None, size=None, offset=0, null_count=None, children=()
):
"""
Parameters
----------
mask : Buffer
The validity mask
offset : int
Data offset
children : Tuple[Column]
Two non-null columns containing the string data and offsets
respectively
"""
dtype = np.dtype("object")
if size is None:
for child in children:
assert child.offset == 0
if len(children) == 0:
size = 0
elif children[0].size == 0:
size = 0
else:
# one less because the last element of offsets is the number of
# bytes in the data buffer
size = children[0].size - 1
size = size - offset
super().__init__(
None,
size,
dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
@property
def base_size(self):
if len(self.base_children) == 0:
return 0
else:
return int(
(self.base_children[0].size - 1)
/ self.base_children[0].dtype.itemsize
)
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"StringColumns do not use data attribute of Column, use "
"`set_base_children` instead"
)
else:
super().set_base_data(value)
def set_base_mask(self, value):
super().set_base_mask(value)
def set_base_children(self, value):
# TODO: Implement dtype validation of the children here somehow
super().set_base_children(value)
@property
def children(self):
if self._children is None:
if len(self.base_children) == 0:
self._children = ()
elif self.offset == 0 and self.base_children[0].size == (
self.size + 1
):
self._children = self.base_children
else:
# First get the base columns for chars and offsets
chars_column = self.base_children[1]
offsets_column = self.base_children[0]
# Shift offsets column by the parent offset.
offsets_column = column.build_column(
data=offsets_column.base_data,
dtype=offsets_column.dtype,
mask=offsets_column.base_mask,
size=self.size + 1,
offset=self.offset,
)
# Now run a subtraction binary op to shift all of the offsets
# by the respective number of characters relative to the
# parent offset
chars_offset = libcudf.copying.get_element(offsets_column, 0)
offsets_column = offsets_column.binary_operator(
"sub", chars_offset
)
# Shift the chars offset by the new first element of the
# offsets column
chars_size = libcudf.copying.get_element(
offsets_column, self.size
)
chars_column = column.build_column(
data=chars_column.base_data,
dtype=chars_column.dtype,
mask=chars_column.base_mask,
size=chars_size.value,
offset=chars_offset.value,
)
self._children = (offsets_column, chars_column)
return self._children
def __contains__(self, item):
return True in self.str().contains(f"^{item}$")
def str(self, parent=None):
return StringMethods(self, parent=parent)
def __sizeof__(self):
n = 0
if len(self.base_children) == 2:
n += (
self.base_children[0].__sizeof__()
+ self.base_children[1].__sizeof__()
)
if self.base_mask is not None:
n += self.base_mask.size
return n
def _memory_usage(self, deep=False):
if deep:
return self.__sizeof__()
else:
return self.str().size() * self.dtype.itemsize
def __len__(self):
return self.size
def _set_mask(self, value):
super()._set_mask(value)
@property
def _nbytes(self):
if self.size == 0:
return 0
else:
return self.children[1].size
def as_numerical_column(self, dtype, **kwargs):
out_dtype = np.dtype(dtype)
kwargs.update(dtype=out_dtype)
if out_dtype.type is np.datetime64:
if "format" not in kwargs:
if len(self) > 0:
# infer on host from the first not na element
fmt = datetime.infer_format(self[self.notna()][0])
kwargs.update(format=fmt)
# Check for None strings
if len(self) > 0 and self.binary_operator("eq", "None").any():
raise ValueError("Could not convert `None` value to datetime")
boolean_match = self.binary_operator("eq", "NaT")
elif out_dtype.kind in ("i"):
if not cpp_is_integer(self).all():
raise ValueError(
"Could not convert strings to integer \
type due to presence of non-integer values."
)
elif out_dtype.kind in ("f"):
if not cpp_is_float(self).all():
raise ValueError(
"Could not convert strings to float \
type due to presence of non-floating values."
)
result_col = _str_to_numeric_typecast_functions[out_dtype](
self, **kwargs
)
if (out_dtype.type is np.datetime64) and boolean_match.any():
result_col[boolean_match] = None
return result_col
def as_datetime_column(self, dtype, **kwargs):
return self.as_numerical_column(dtype, **kwargs)
def as_string_column(self, dtype, **kwargs):
return self
def to_arrow(self):
if len(self) == 0:
sbuf = np.empty(0, dtype="int8")
obuf = np.empty(0, dtype="int32")
nbuf = None
else:
sbuf = self.children[1].data.to_host_array().view("int8")
obuf = self.children[0].data.to_host_array().view("int32")
nbuf = None
if self.null_count > 0:
nbuf = self.mask.to_host_array().view("int8")
nbuf = pa.py_buffer(nbuf)
sbuf = pa.py_buffer(sbuf)
obuf = pa.py_buffer(obuf)
if self.null_count == len(self):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer((b""))], self.null_count
)
else:
return pa.StringArray.from_buffers(
len(self), obuf, sbuf, nbuf, self.null_count
)
def to_pandas(self, index=None):
pd_series = self.to_arrow().to_pandas()
if index is not None:
pd_series.index = index
return pd_series
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
Raises
------
``NotImplementedError`` if there are nulls
"""
if fillna is not None:
warnings.warn("fillna parameter not supported for string arrays")
return self.to_arrow().to_pandas().values
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not allowed, \
Conversion to GPU array in strings is not yet supported.\nTo \
explicitly construct a host array, consider using .to_array()"
)
def serialize(self):
header = {"null_count": self.null_count}
header["type-serialized"] = pickle.dumps(type(self))
frames = []
sub_headers = []
for item in self.children:
sheader, sframes = item.serialize()
sub_headers.append(sheader)
frames.extend(sframes)
if self.null_count > 0:
frames.append(self.mask)
header["subheaders"] = sub_headers
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
# Deserialize the mask, value, and offset frames
buffers = [Buffer(each_frame) for each_frame in frames]
if header["null_count"] > 0:
nbuf = buffers[2]
else:
nbuf = None
children = []
for h, b in zip(header["subheaders"], buffers[:2]):
column_type = pickle.loads(h["type-serialized"])
children.append(column_type.deserialize(h, [b]))
col = column.build_column(
data=None, dtype="str", mask=nbuf, children=tuple(children)
)
return col
def can_cast_safely(self, to_dtype):
to_dtype = np.dtype(to_dtype)
if self.dtype == to_dtype:
return True
elif to_dtype.kind in ("i") and not cpp_is_integer(self).all():
return False
elif to_dtype.kind in ("f") and not cpp_is_float(self).all():
return False
else:
return True
def find_and_replace(self, to_replace, replacement, all_nan):
"""
Return col with *to_replace* replaced with *value*
"""
to_replace = column.as_column(to_replace, dtype=self.dtype)
replacement = column.as_column(replacement, dtype=self.dtype)
return libcudf.replace.replace(self, to_replace, replacement)
def fillna(self, fill_value):
if not is_scalar(fill_value):
fill_value = column.as_column(fill_value, dtype=self.dtype)
return libcudf.replace.replace_nulls(self, fill_value, dtype="object")
def _find_first_and_last(self, value):
found_indices = self.str().contains(f"^{value}$")
found_indices = libcudf.unary.cast(found_indices, dtype=np.int32)
first = column.as_column(found_indices).find_first_value(1)
last = column.as_column(found_indices).find_last_value(1)
return first, last
def find_first_value(self, value, closest=False):
return self._find_first_and_last(value)[0]
def find_last_value(self, value, closest=False):
return self._find_first_and_last(value)[1]
def normalize_binop_value(self, other):
if isinstance(other, column.Column):
return other.astype(self.dtype)
elif isinstance(other, str) or other is None:
col = utils.scalar_broadcast_to(
other, size=len(self), dtype="object"
)
return col
else:
raise TypeError("cannot broadcast {}".format(type(other)))
def default_na_value(self):
return None
def binary_operator(self, op, rhs, reflect=False):
lhs = self
if reflect:
lhs, rhs = rhs, lhs
if isinstance(rhs, StringColumn) and op == "add":
return lhs.str().cat(others=rhs)
elif op in ("eq", "ne", "gt", "lt", "ge", "le"):
return _string_column_binop(self, rhs, op=op, out_dtype="bool")
else:
msg = "{!r} operator not supported between {} and {}"
raise TypeError(msg.format(op, type(self), type(rhs)))
def sum(self, dtype=None):
# Should we be raising here? Pandas can't handle the mix of strings and
# None and throws, but we already have a test that looks to ignore
# nulls and returns anyway.
# if self.null_count > 0:
# raise ValueError("Cannot get sum of string column with nulls")
if len(self) == 0:
return ""
return decode(self.children[1].data.to_host_array(), encoding="utf-8")
@property
def is_unique(self):
return len(self.unique()) == len(self)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Strings are not yet supported via `__cuda_array_interface__`"
)
def _mimic_inplace(self, other_col, inplace=False):
out = super()._mimic_inplace(other_col, inplace=inplace)
return out
@annotate("BINARY_OP", color="orange", domain="cudf_python")
def _string_column_binop(lhs, rhs, op, out_dtype):
out = libcudf.binaryop.binaryop(lhs=lhs, rhs=rhs, op=op, dtype=out_dtype)
return out
def _get_cols_list(others):
from cudf.core import Series, Index
from cudf.core.column import as_column
if (
is_list_like(others)
and len(others) > 0
and (
is_list_like(others[0])
or isinstance(others[0], (Series, Index, pd.Series, pd.Index))
)
):
"""
If others is a list-like object (in our case lists & tuples)
just another Series/Index, great go ahead with concatenation.
"""
cols_list = [as_column(frame, dtype="str") for frame in others]
return cols_list
elif others is not None:
return [as_column(others, dtype="str")]
else:
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarrary "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
| 29.991903
| 119
| 0.532836
|
5fdde393ccfdc10a80261cf7572774c756f0f070
| 6,122
|
py
|
Python
|
cupy/cuda/curand.py
|
ytoyama/yans_chainer_hackathon
|
744e7a5a67da8dec2869879f0adfae2d43eaf75c
|
[
"MIT"
] | null | null | null |
cupy/cuda/curand.py
|
ytoyama/yans_chainer_hackathon
|
744e7a5a67da8dec2869879f0adfae2d43eaf75c
|
[
"MIT"
] | null | null | null |
cupy/cuda/curand.py
|
ytoyama/yans_chainer_hackathon
|
744e7a5a67da8dec2869879f0adfae2d43eaf75c
|
[
"MIT"
] | null | null | null |
"""Thin wrapper of cuRAND."""
import ctypes
import sys
from cupy.cuda import internal
from cupy.cuda import runtime
if 'win32' == sys.platform:
_curand = internal.load_library(
internal.get_windows_cuda_library_names('curand'))
else:
_curand = internal.load_library('curand')
_I = ctypes.c_int
_U = ctypes.c_uint
_S = ctypes.c_size_t
_ULL = ctypes.c_ulonglong
_P = ctypes.c_void_p
_IP = ctypes.POINTER(_I)
_UP = ctypes.POINTER(_U)
_ULLP = ctypes.POINTER(_ULL)
_F = ctypes.c_float
_D = ctypes.c_double
_FP = ctypes.POINTER(_F)
_DP = ctypes.POINTER(_D)
CURAND_RNG_PSEUDO_DEFAULT = 100
CURAND_RNG_PSEUDO_XORWOW = 101
CURAND_RNG_PSEUDO_MRG32K3A = 121
CURAND_RNG_PSEUDO_MTGP32 = 141
CURAND_RNG_PSEUDO_MT19937 = 142
CURAND_RNG_PSEUDO_PHILOX4_32_10 = 161
CURAND_RNG_QUASI_DEFAULT = 200
CURAND_RNG_QUASI_SOBOL32 = 201
CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 = 202
CURAND_RNG_QUASI_SOBOL64 = 203
CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 = 204
CURAND_ORDERING_PSEUDO_BEST = 100
CURAND_ORDERING_PSEUDO_DEFAULT = 101
CURAND_ORDERING_PSEUDO_SEEDED = 102
CURAND_ORDERING_QUASI_DEFAULT = 201
Generator = _P
Distribution = _DP
DiscreteDistribution = _P
###############################################################################
# Error handling
###############################################################################
STATUS = {
0: 'CURAND_STATUS_SUCCESS',
100: 'CURAND_STATUS_VERSION_MISMATCH',
101: 'CURAND_STATUS_NOT_INITIALIZED',
102: 'CURAND_STATUS_ALLOCATION_FAILED',
103: 'CURAND_STATUS_TYPE_ERROR',
104: 'CURAND_STATUS_OUT_OF_RANGE',
105: 'CURAND_STATUS_LENGTH_NOT_MULTIPLE',
106: 'CURAND_STATUS_DOUBLE_PRECISION_REQUIRED',
201: 'CURAND_STATUS_LAUNCH_FAILURE',
202: 'CURAND_STATUS_PREEXISTING_FAILURE',
203: 'CURAND_STATUS_INITIALIZATION_FAILED',
204: 'CURAND_STATUS_ARCH_MISMATCH',
999: 'CURAND_STATUS_INTERNAL_ERROR',
}
class CURANDError(RuntimeError):
def __init__(self, status):
self.status = status
super(CURANDError, self).__init__(STATUS[status])
def check_status(status):
if status != 0:
raise CURANDError(status)
###############################################################################
# Generator
###############################################################################
_curand.curandCreateGenerator.argtypes = (_P, _I)
def createGenerator(rng_type):
generator = Generator()
status = _curand.curandCreateGenerator(ctypes.byref(generator), rng_type)
check_status(status)
return generator
_curand.curandDestroyGenerator.argtypes = (Generator,)
def destroyGenerator(generator):
status = _curand.curandDestroyGenerator(generator)
check_status(status)
_curand.curandGetVersion.argtypes = (_IP,)
def getVersion():
version = _I()
status = _curand.curandGetVersion(ctypes.byref(version))
check_status(status)
return version
_curand.curandSetStream.argtypes = (Generator, runtime.Stream)
def setStream(generator, stream):
status = _curand.curandSetStream(generator, stream)
check_status(status)
_curand.curandSetPseudoRandomGeneratorSeed.argtypes = (Generator, _ULL)
def setPseudoRandomGeneratorSeed(generator, seed):
status = _curand.curandSetPseudoRandomGeneratorSeed(generator, seed)
check_status(status)
_curand.curandSetGeneratorOffset.argtypes = (Generator, _ULL)
def setGeneratorOffset(generator, offset):
status = _curand.curandSetGeneratorOffset(generator, offset)
check_status(status)
_curand.curandSetGeneratorOrdering.argtypes = (Generator, _I)
def setGeneratorOrdering(generator, order):
status = _curand.curandSetGeneratorOrdering(generator, order)
check_status(status)
###############################################################################
# Generation functions
###############################################################################
_curand.curandGenerate.argtypes = (Generator, _P, _S)
def generate(generator, outputPtr, num):
status = _curand.curandGenerate(generator, outputPtr, num)
check_status(status)
_curand.curandGenerateLongLong.argtypes = (Generator, _P, _S)
def generateLongLong(generator, outputPtr, num):
status = _curand.curandGenerateLongLong(generator, outputPtr, num)
check_status(status)
_curand.curandGenerateUniform.argtypes = (Generator, _P, _S)
def generateUniform(generator, outputPtr, num):
status = _curand.curandGenerateUniform(generator, outputPtr, num)
check_status(status)
_curand.curandGenerateUniformDouble.argtypes = (Generator, _P, _S)
def generateUniformDouble(generator, outputPtr, num):
status = _curand.curandGenerateUniformDouble(generator, outputPtr, num)
check_status(status)
_curand.curandGenerateNormal.argtypes = (Generator, _P, _S, _F, _F)
def generateNormal(generator, outputPtr, n, mean, stddev):
status = _curand.curandGenerateNormal(generator, outputPtr, n, mean,
stddev)
check_status(status)
_curand.curandGenerateNormalDouble.argtypes = (Generator, _P, _S, _D, _D)
def generateNormalDouble(generator, outputPtr, n, mean, stddev):
status = _curand.curandGenerateNormalDouble(generator, outputPtr, n, mean,
stddev)
check_status(status)
_curand.curandGenerateLogNormal.argtypes = (Generator, _P, _S, _F, _F)
def generateLogNormal(generator, outputPtr, n, mean, stddev):
status = _curand.curandGenerateLogNormal(generator, outputPtr, n,
stddev)
check_status(status)
_curand.curandGenerateLogNormalDouble.argtypes = (Generator, _P, _S, _D, _D)
def generateLogNormalDouble(generator, outputPtr, n, mean, stddev):
status = _curand.curandGenerateLogNormalDouble(generator, outputPtr, n,
mean, stddev)
check_status(status)
_curand.curandGeneratePoisson.argtypes = (Generator, _P, _S, _D)
def generatePoisson(generator, outputPtr, n, lam):
status = _curand.curandGeneratePoisson(generator, outputPtr, n, lam)
check_status(status)
| 27.452915
| 79
| 0.68801
|
5dbb0abe7674012a4fd4825b80c89d5d9e4ef0ce
| 8,097
|
py
|
Python
|
tests/prediction_latency_test/prepare.py
|
yarenty/mindsdb
|
9164bca6f45fd0f5ec329babe973f286ffe59709
|
[
"MIT"
] | null | null | null |
tests/prediction_latency_test/prepare.py
|
yarenty/mindsdb
|
9164bca6f45fd0f5ec329babe973f286ffe59709
|
[
"MIT"
] | null | null | null |
tests/prediction_latency_test/prepare.py
|
yarenty/mindsdb
|
9164bca6f45fd0f5ec329babe973f286ffe59709
|
[
"MIT"
] | null | null | null |
import os
import atexit
import importlib.util
import time
import csv
from subprocess import Popen
import pandas as pd
import docker
import requests
import psutil
import schemas as schema
from config import CONFIG
class Dataset:
def __init__(self, name, **kwargs):
self.name = name
self.target = kwargs.get("target")
self.handler_file = kwargs.get("handler_file", None)
if self.handler_file is not None:
self.handler = self._get_handler()
else:
self.handler = None
def _get_handler(self):
spec = importlib.util.spec_from_file_location("common", os.path.abspath(self.handler_file))
handler = importlib.util.module_from_spec(spec)
spec.loader.exec_module(handler)
return handler.handler
DATASETS_PATH = os.getenv("DATASETS_PATH")
CONFIG_PATH = os.getenv("CONFIG_PATH")
datasets = [Dataset(key, **CONFIG['datasets'][key]) for key in CONFIG['datasets'].keys()]
class Datasource:
def __init__(self, name):
self.name = name
self.base_url = "http://127.0.0.1:47334/api/datasources/"
self.url = self.base_url + name
def upload(self, force=False):
if force or not self.exists:
self.delete()
files = {}
file_name = f"{self.name}.csv"
with open(file_name, 'r') as fd:
files['file'] = (file_name, fd, 'text/csv')
files['source_type'] = (None, 'file')
files['source'] = (None, file_name)
print(f"calling {self.url} with files={files}")
res = requests.put(self.url, files=files)
res.raise_for_status()
@property
def list_datasources(self):
res = requests.get(self.base_url)
res.raise_for_status()
return [x["name"] for x in res.json()]
@property
def exists(self):
return self.name in self.list_datasources
def delete(self):
requests.delete(self.url)
class Predictor():
def __init__(self, name):
self.name = name
self.base_url = "http://127.0.0.1:47334/api"
self.url = f'{self.base_url}/predictors/{self.name}'
def get_info(self):
return requests.get(self.url).json()
def is_ready(self):
return self.get_info()["status"] == 'complete'
@property
def exists(self):
return "status" in self.get_info()
def learn(self, to_predict, force=False):
if force or not self.exists:
self.delete()
datasource_name = f"{self.name}_train"
res = requests.put(self.url, json={
'data_source_name': datasource_name,
'to_predict': to_predict,
'stop_training_in_x_seconds': 10,
})
res.raise_for_status()
def delete(self):
requests.delete(self.url)
def monthly_sunspots_handler(df):
months = df['Month']
for i, val in enumerate(months):
months[i] = val + "-01"
def get_handler(handler_path):
spec = importlib.util.spec_from_file_location("common", os.path.abspath(handler_path))
handler = importlib.util.module_from_spec(spec)
spec.loader.exec_module(handler)
return handler.handler
def add_integration():
db_info = CONFIG['database']
db_info['enabled'] = True
db_info['type'] = 'clickhouse'
url = "http://127.0.0.1:47334/api/config/integrations/prediction_clickhouse"
exist_request = requests.get(url)
if exist_request.status_code == requests.status_codes.codes.ok:
print("integration is already exists")
return
res = requests.put(url, json={'params': db_info})
res.raise_for_status()
def split_datasets():
for dataset in datasets:
data_path = os.path.join(DATASETS_PATH, dataset.name, "data.csv")
df = pd.read_csv(data_path)
if dataset.handler is not None:
dataset.handler(df)
all_len = len(df)
train_len = int(float(all_len) * 0.8)
train_df = df[:train_len]
test_df = df[train_len:]
test_df = test_df.drop(columns=[dataset.target,])
train_df.to_csv(f"{dataset.name}_train.csv", index=False)
test_df.to_csv(f"{dataset.name}_test.csv", index=False)
def upload_datasets(force=False):
"""Upload train dataset to mindsdb via API."""
for dataset in datasets:
datasource = Datasource(f"{dataset.name}_train")
print(datasource.name)
datasource.upload(force=force)
def create_predictors(force=False):
predictors = []
for dataset in datasets:
predictor = Predictor(dataset.name)
predictor.learn(dataset.target, force=force)
predictors.append(predictor)
while predictors:
for predictor in predictors[:]:
if predictor.is_ready():
print(f"predictor {predictor.name} is ready")
predictors.remove(predictor)
continue
time.sleep(5)
def stop_mindsdb(ppid):
pprocess = psutil.Process(ppid)
pids = [x.pid for x in pprocess.children(recursive=True)]
pids.append(ppid)
for pid in pids:
try:
os.kill(pid, 9)
# process may be killed by OS due to some reasons in that moment
except ProcessLookupError:
pass
def run_mindsdb():
sp = Popen(['python3', '-m', 'mindsdb', '--config', CONFIG_PATH],
close_fds=True)
time.sleep(30)
atexit.register(stop_mindsdb, sp.pid)
def run_clickhouse():
docker_client = docker.from_env(version='auto')
image = "yandex/clickhouse-server:latest"
container_params = {'name': 'clickhouse-latency-test',
'remove': True,
'network_mode': 'host',
}
container = docker_client.containers.run(image, detach=True, **container_params)
atexit.register(container.stop)
return container
def prepare_db():
db = schema.database
query(f'DROP DATABASE IF EXISTS {db}')
query(f'CREATE DATABASE {db}')
for dataset in datasets:
query(schema.tables[dataset.name])
with open(f'{dataset.name}_train.csv') as fp:
csv_fp = csv.reader(fp)
for i, row in enumerate(csv_fp):
if i == 0:
continue
for i in range(len(row)):
try:
if '.' in row[i]:
row[i] = float(row[i])
else:
if row[i].isdigit():
row[i] = int(row[i])
except Exception as e:
print(e)
query('INSERT INTO ' + schema.database + '.' + dataset.name + ' VALUES ({})'.format(
str(row).lstrip('[').rstrip(']')
))
def query(query):
if 'CREATE ' not in query.upper() and 'INSERT ' not in query.upper():
query += ' FORMAT JSON'
db_info = CONFIG['database']
host = db_info['host']
port = db_info['port']
user = db_info['user']
password = db_info['password']
connect_string = f'http://{host}:{port}'
params = {'user': user, 'password': password}
res = requests.post(
connect_string,
data=query,
params=params,
headers={"Connection": "close"}
)
if res.status_code != 200:
print(f"error uploading: {query}")
print(res.text, res.status_code)
assert res.status_code == 200
return res.text
def prepare_env(prepare_data=True,
use_docker=True,
setup_db=True,
train_models=True):
if prepare_data:
print("preparing_datasets")
split_datasets()
if use_docker:
print("running docker")
run_clickhouse()
time.sleep(10)
if setup_db:
print("preparing db")
prepare_db()
print("running mindsdb")
run_mindsdb()
print("uploading train datasets to mindsdb")
upload_datasets(force=prepare_data)
create_predictors(force=train_models)
add_integration()
| 29.768382
| 100
| 0.595529
|
8ea731d9a9b1c3c4223268cca1da10a854715f58
| 1,599
|
py
|
Python
|
test/try_hdfs.py
|
ZEMUSHKA/pydoop
|
e3d3378ae9921561f6c600c79364c2ad42ec206d
|
[
"Apache-2.0"
] | 1
|
2017-11-16T02:13:15.000Z
|
2017-11-16T02:13:15.000Z
|
test/try_hdfs.py
|
ZEMUSHKA/pydoop
|
e3d3378ae9921561f6c600c79364c2ad42ec206d
|
[
"Apache-2.0"
] | null | null | null |
test/try_hdfs.py
|
ZEMUSHKA/pydoop
|
e3d3378ae9921561f6c600c79364c2ad42ec206d
|
[
"Apache-2.0"
] | null | null | null |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2014 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Check that resetting the hdfs module after changing
os.environ['HADOOP_CONF_DIR'] works (i.e., Pydoop references the
correct HDFS service).
Note that it does **NOT** work if you've already instantiated an hdfs
handle, and this is NOT due to the caching system.
"""
import sys, os, argparse
import pydoop.hdfs as hdfs
def dump_status(fs):
print "(host, port, user) = %r" % ((fs.host, fs.port, fs.user),)
print "_CACHE = %r" % (fs._CACHE,)
print "_ALIASES = %r" % (fs._ALIASES,)
print
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--conf-dir", metavar="HADOOP_CONF_DIR")
args = parser.parse_args(argv)
if args.conf_dir:
os.environ["HADOOP_CONF_DIR"] = os.path.abspath(args.conf_dir)
hdfs.reset()
fs = hdfs.hdfs()
print "--- OPEN ---"
dump_status(fs)
print "cwd:", fs.working_directory()
print
fs.close()
print "--- CLOSED ---"
dump_status(fs)
if __name__ == "__main__":
main()
| 27.101695
| 77
| 0.710444
|
74a8f2466360fceb866dbe69cfd8b4f4c376a38d
| 10,011
|
py
|
Python
|
sparse_operation_kit/sparse_operation_kit/core/initialize.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 292
|
2021-12-24T03:24:33.000Z
|
2022-03-31T15:41:05.000Z
|
sparse_operation_kit/sparse_operation_kit/core/initialize.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 54
|
2021-12-24T06:40:09.000Z
|
2022-03-30T07:57:24.000Z
|
sparse_operation_kit/sparse_operation_kit/core/initialize.py
|
aalbersk/DeepRec
|
f673a950780959b44dcda99398880a1d883ab338
|
[
"Apache-2.0"
] | 75
|
2021-12-24T04:48:21.000Z
|
2022-03-29T10:13:39.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sparse_operation_kit import kit_lib
from tensorflow.python.ops import collective_ops
try:
from tensorflow.distribute import MultiWorkerMirroredStrategy
except:
from tensorflow.distribute.experimental import MultiWorkerMirroredStrategy
from tensorflow.distribute import MirroredStrategy, get_replica_context, has_strategy, get_strategy
from tensorflow import constant, TensorShape, function
from tensorflow.dtypes import int32, int64
from tensorflow import print as tf_print
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
import sys
from tensorflow.python.framework import config
def Init(**kwargs):
"""
Abbreviated as ``sok.Init(**kwargs)``.
This function is used to do the initialization of SparseOperationKit (SOK).
SOK will leverage all available GPUs for current CPU process. Please set
`CUDA_VISIBLE_DEVICES` or `tf.config.set_visible_devices` to specify which
GPU(s) are used in this process before launching tensorflow runtime
and calling this function.
In **TensorFlow 2.x**, SOK can be used with **tf.distribute.Strategy** or **Horovod**.
When it's used with tf.distribute.Strategy, it must be called under `strategy.scope()`.
For example,
.. code-block:: python
with strategy.scope():
sok.Init(**kwargs)
When it's used with Horovod, it must be called at each process. For example,
.. code-block:: python
import horovod.tensorflow as hvd
hvd.init()
sok.Init(**kwargs)
In **TensorFlow 1.15**, SOK can only work with **Horovod**. The retured status
must be evaluated with `sess.run`, and it must be the first step before evaluate
any other SOK APIs.
.. code-block:: python
sok_init = sok.Init(global_batch_size=args.global_batch_size)
with tf.Session() as sess:
sess.run(sok_init)
...
Parameters
----------
kwargs: dictionary
keyword arguments for this function.
Currently, it must contains `global_batch_size` used in all GPUs.
Returns
-------
status: string
a string will be returned if this function executed successfully.
And its contents will be 'OK'.
"""
def _get_visible_devices():
gpus = config.get_visible_devices('GPU')
assert(len(gpus) > 0)
visible_devices = []
for i in range(len(gpus)):
visible_devices.append(int(gpus[i].name.split(':')[-1]))
return array_ops.constant(visible_devices, dtype=int32)
@function
def _single_worker_init(**kwargs):
replica_ctx = get_replica_context()
replica_ctx.merge_call(lambda strategy:
tf_print("You are using the plugin with MirroredStrategy."))
nccl_unique_id = replica_ctx.merge_call(lambda strategy:
kit_lib.get_nccl_unique_id())
global_random_seed = kwargs.get("seed", None) or replica_ctx.merge_call(lambda strategy:
kit_lib.gen_random_seed())
global_id = replica_ctx.replica_id_in_sync_group
visible_devices = _get_visible_devices()
status = kit_lib.plugin_init(global_id, replica_ctx.num_replicas_in_sync,
nccl_unique_id, global_random_seed, visible_devices,
global_batch_size=kwargs['global_batch_size'])
return status
def _multi_worker_init(**kwargs):
replica_ctx = get_replica_context()
global_id = replica_ctx.replica_id_in_sync_group
if global_id == 0:
unique_id = kit_lib.get_nccl_unique_id()
re = collective_ops.broadcast_send(unique_id,
TensorShape([32,]),
int32,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=2)
else:
re = collective_ops.broadcast_recv(TensorShape([32,]),
int32,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=2)
if global_id == 0:
global_seed = kwargs.get("seed", None) or kit_lib.gen_random_seed()
re_seed = collective_ops.broadcast_send(global_seed,
TensorShape([1,]),
int64,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=3)
else:
global_seed = kwargs.get("seed", None)
re_seed = collective_ops.broadcast_recv(TensorShape([1,]),
int64,
group_size=replica_ctx.num_replicas_in_sync,
group_key=1,
instance_key=3)
if (global_seed and global_seed != re_seed):
logging.warning("The seed: {} is not consistent with that from cheif-node: {}, "
"and the seed from cheif-node will be used.".format(global_seed, re_seed))
visible_devices = _get_visible_devices()
status = kit_lib.plugin_init(global_id, replica_ctx.num_replicas_in_sync,
re, re_seed, visible_devices,
global_batch_size=kwargs['global_batch_size'])
return status
# @function
def _horovod_init(**kwargs):
r"""
This function uses horovod to broadcast nccl-id and random-seed which is used by sparse_operation_kit.
Please note that the nccl-comm mentioned here is not the same one as the nccl-comm of horovod itself.
After broadcasting, this function uses kit_lib.plugin_init and "nccl-id", "random-seed" to initialize
sparse_operation_kit.
"""
local_rank = hvd.local_rank()
unique_id = kit_lib.get_nccl_unique_id() if local_rank == 0 else array_ops.zeros([32,], dtype=int32)
unique_id = hvd.broadcast(unique_id, root_rank=0, name="nccl_unique_id")
seed = kwargs.get("seed", None)
if 0 == local_rank:
global_seed = seed or kit_lib.gen_random_seed()
else:
global_seed = array_ops.zeros([1,], dtype=int64)
re_seed = hvd.broadcast(global_seed, root_rank=0, name="random_seed")
if (seed and seed != re_seed):
logging.warning("The seed: {} is not consistent with that from cheif-node: {}, "
"and the seed from cheif-node will be used.".format(global_seed, re_seed))
visible_devices = _get_visible_devices()
status = kit_lib.plugin_init(local_rank, hvd.size(), unique_id, re_seed,
visible_devices,
global_batch_size=kwargs["global_batch_size"])
return status
def _one_device_init(**kwargs):
"""
This function use to initialize only one GPU for SOK.
"""
local_rank = 0
unique_id = kit_lib.get_nccl_unique_id()
global_seed = kwargs.get("seed", None) or kit_lib.gen_random_seed()
visible_devices = _get_visible_devices()
status = kit_lib.plugin_init(local_rank, 1, unique_id, global_seed, visible_devices,
global_batch_size=kwargs["global_batch_size"])
return status
if has_strategy():
strategy = get_strategy()
@function
def _init_wrapper(run_fn, init_fn, **kwargs):
return run_fn(init_fn, kwargs=kwargs)
if isinstance(strategy, MirroredStrategy):
_init_fn = _single_worker_init
elif isinstance(strategy, MultiWorkerMirroredStrategy):
_init_fn = _multi_worker_init
else:
raise RuntimeError("This strategy type is not supported yet.")
if not kit_lib.in_tensorflow2():
_init_results = _init_wrapper(strategy.experimental_run_v2, _init_fn, **kwargs)
if hasattr(_init_results, "values"):
_init_results = _init_results.values
return _init_results
else:
return _init_wrapper(strategy.run, _init_fn, **kwargs)
elif "horovod.tensorflow" in sys.modules:
# imported horovod
import horovod.tensorflow as hvd
if not kit_lib.in_tensorflow2():
@function
def _init_wrapper(**kwargs):
return _horovod_init(**kwargs)
return _init_wrapper(**kwargs)
else:
return _horovod_init(**kwargs)
else:
# horovod not imported
return _one_device_init(**kwargs)
| 42.240506
| 110
| 0.60014
|
5f6b846487b83c9be86f878ba14a6b4843e90211
| 23,520
|
py
|
Python
|
planners/rrdtPlanner.py
|
jkwang1992/rrdt
|
5579081fc53f56573c772ff9a894c0093e0bc5e0
|
[
"MIT"
] | 8
|
2019-05-21T04:40:53.000Z
|
2020-10-17T07:20:04.000Z
|
planners/rrdtPlanner.py
|
soraxas/rrdt
|
5579081fc53f56573c772ff9a894c0093e0bc5e0
|
[
"MIT"
] | null | null | null |
planners/rrdtPlanner.py
|
soraxas/rrdt
|
5579081fc53f56573c772ff9a894c0093e0bc5e0
|
[
"MIT"
] | 3
|
2021-07-16T07:06:14.000Z
|
2022-02-09T12:06:06.000Z
|
import logging
import random
from overrides import overrides
from checkCollision import *
from helpers import *
from planners.particleFilterSampler import (ENERGY_START,
RANDOM_RESTART_PARTICLES_ENERGY_UNDER,
Particle, ParticleFilterSampler)
from planners.rrtPlanner import RRTPlanner
LOGGER = logging.getLogger(__name__)
MAX_NUMBER_NODES = 20000
def update_progress(progress, total_num, num_of_blocks=10):
if not logging.getLogger().isEnabledFor(logging.INFO):
return
percentage = progress / total_num
print(
'\r[{bar:<{num_of_blocks}}] {cur}/{total} {percen:0.1f}%'.format(
bar='#' * int(percentage * num_of_blocks),
cur=progress,
total=total_num,
percen=percentage * 100,
num_of_blocks=num_of_blocks),
end='')
if percentage == 1:
print()
class BFS:
"""Walk through the connected nodes with BFS"""
def __init__(self, node, validNodes):
self.visitedNodes = set()
self.validNodes = validNodes
self.next_node_to_visit = [node]
self.next_node = None
def visit_node(self, node):
self.visitedNodes.add(node)
self.next_node_to_visit.extend(node.edges)
self.next_node = node
def has_next(self):
if self.next_node is not None:
return True
if len(self.next_node_to_visit) < 1:
return False
# get next available node
while True:
_node = self.next_node_to_visit.pop(0)
if _node not in self.visitedNodes and _node in self.validNodes:
break
if len(self.next_node_to_visit) < 1:
return False
self.visit_node(_node)
return True
def next(self):
node = self.next_node
self.next_node = None
return node
class TreesManager:
def __init__(self, args, restart_when_merge):
self.root = None
self.disjointedTrees = []
self.args = args
self.restart_when_merge = restart_when_merge
def connect_two_nodes(self, newnode, nn, parent_tree=None,
draw_only=False):
"""Add node to disjoint tree OR root tree. Draw line for it too."""
if not draw_only:
if parent_tree is self.root:
# using rrt* algorithm to add each nodes
newnode, nn = self.args.planner.rrt_star_add_node(newnode, nn)
else:
newnode.edges.append(nn)
nn.edges.append(newnode)
if parent_tree is not None:
parent_tree.add_newnode(newnode)
self.args.env.draw_path(newnode, nn)
return newnode, nn
def add_pos_to_existing_tree(self, newnode, parent_tree):
"""Try to add pos to existing tree. If success, return True."""
nearest_nodes = self.find_nearest_node_from_neighbour(
node=newnode, parent_tree=parent_tree, radius=self.args.radius)
for nearest_neighbour_node, nearest_neighbour_tree in nearest_nodes:
if self.args.env.cc.path_is_free(newnode.pos,
nearest_neighbour_node.pos):
if parent_tree is None:
### joining ORPHAN NODE to a tree
self.connect_two_nodes(newnode, nearest_neighbour_node,
nearest_neighbour_tree)
parent_tree = nearest_neighbour_tree
LOGGER.debug(
" ==> During respawning particle, joining to existing tree with size: {}"
.format(len(nearest_neighbour_tree.nodes)))
else:
### joining a TREE to another tree
try:
parent_tree = self.join_trees(
parent_tree,
nearest_neighbour_tree,
tree1_node=newnode,
tree2_node=nearest_neighbour_node)
except AssertionError as e:
LOGGER.warning(
"== Assertion error in joining sampled point to existing tree... Skipping this node..."
)
return parent_tree
def find_nearest_node_from_neighbour(self, node, parent_tree, radius):
"""
Given a tree, a node within that tree, and radius
Return a list of cloest nodes (and its corresponding tree) within the radius (that's from other neighbourhood trees)
Return None if none exists
IF root exists in the list, add it at the last position (So the connection behaviour would remain stable)
This ensure all previous action would only add add edges to each nodes, and only the last action would it
modifies the entire tree structures wtih rrt* procedures.
"""
nearest_nodes = {}
for tree in [*self.disjointedTrees, self.root]:
if tree is parent_tree:
# skip self
continue
idx = self.args.planner.find_nearest_neighbour_idx(
node.pos, tree.poses[:len(tree.nodes)])
nn = tree.nodes[idx]
if dist(nn.pos, node.pos) < radius:
nearest_nodes[tree] = nn
# construct list of the found solution. And root at last (or else the result won't be stable)
root_nn = nearest_nodes.pop(self.root, None)
nearest_nodes_list = [(nearest_nodes[key], key)
for key in nearest_nodes]
if root_nn is not None:
nearest_nodes_list.append((root_nn, self.root))
return nearest_nodes_list
def join_tree_to_root(self, tree, middle_node):
"""It will join the given tree to the root"""
from env import Colour
bfs = BFS(middle_node, validNodes=tree.nodes)
# add all nodes from disjoint tree via rrt star method
total_num = len(tree.nodes)
progress = 0
LOGGER.info("> Joining to root tree")
while bfs.has_next():
newnode = bfs.next()
progress += 1
update_progress(progress, total_num, num_of_blocks=20)
# draw white (remove edge for visual) on top of disjointed tree
for e in (x for x in newnode.edges
if x not in bfs.visitedNodes and x in bfs.validNodes):
self.args.env.draw_path(e, newnode, Colour.white)
try:
self.connect_two_nodes(newnode, nn=None, parent_tree=self.root)
except LookupError:
LOGGER.warning(
"nn not found when attempting to joint to root. Ignoring..."
)
# remove this node's edges (as we don't have a use on them anymore) to free memory
del newnode.edges
assert progress == total_num, "Inconsistency in BFS walk {} != {}".format(
progress, total_num)
# raise Exception("NOT implemented yet")
def join_trees(self, tree1, tree2, tree1_node, tree2_node):
"""
Join the two given tree together (along with their nodes).
It will delete the particle reference from the second tree.
It will use RRT* method to add all nodes if one of the tree is the ROOT.
tree1_node & 2 represent the nodes that join the two tree together. It only matters currently to
joining root tree to disjointed treeself.
Return the tree that has not been killed
"""
assert tree1 is not tree2, "Both given tree should not be the same"
if tree1 not in self.disjointedTrees:
assert tree1 is self.root, "Given tree is neither in disjointed tree, nor is it the root: {}".format(
tree1)
if tree2 not in self.disjointedTrees:
assert tree2 is self.root, "Given tree is neither in disjointed tree, nor is it the root: {}".format(
tree2)
LOGGER.info(" => Joining trees with size {} to {}".format(
len(tree1.nodes), len(tree2.nodes)))
# Re-arrange only. Make it so that tree1 will always be root (if root exists among the two)
# And tree1 node must always be belong to tree1, tree2 node belong to tree2
if tree1 is not self.root:
# set tree1 as root (if root exists among the two)
tree1, tree2 = tree2, tree1
if tree1_node in tree2.nodes or tree2_node in tree1.nodes:
# swap to correct position
tree1_node, tree2_node = tree2_node, tree1_node
assert tree1_node in tree1.nodes, "Given nodes does not belong to the two given corresponding trees"
assert tree2_node in tree2.nodes, "Given nodes does not belong to the two given corresponding trees"
if tree1 is self.root:
# find which middle_node belongs to the disjointed tree
self.join_tree_to_root(tree2, tree2_node)
self.connect_two_nodes(tree1_node, tree2_node, draw_only=True)
else:
self.connect_two_nodes(tree1_node, tree2_node)
tree1.extend_tree(tree2)
del tree2.nodes
del tree2.poses
self.disjointedTrees.remove(tree2)
if self.restart_when_merge:
# restart all particles
for p in tree2.particle_handler:
p.restart()
del tree2.particle_handler
else:
# pass the remaining particle to the remaining tree
for p in tree2.particle_handler:
p.tree = tree1
tree1.particle_handler.append(p)
return tree1
RANDOM_RESTART_EVERY = 20
ENERGY_START = 10
RANDOM_RESTART_PARTICLES_ENERGY_UNDER = 0.75
class DisjointTreeParticle(Particle):
@overrides
def __init__(self,
tree_manager,
p_manager,
direction=None,
pos=None,
isroot=False,
startPtNode=None):
self.isroot = isroot
self.p_manager = p_manager
self.tree_manager = tree_manager
self.last_node = None
if isroot:
self.tree_manager.root = TreeRoot(particle_handler=self)
self.tree = self.tree_manager.root
self.tree.add_newnode(startPtNode)
super().__init__(direction=direction, pos=pos)
@overrides
def restart(self, direction=None, pos=None, restart_when_merge=True):
if self.isroot:
# root particles has a different initialisation method
# (for the first time)
self.isroot = False
super().restart(direction, pos)
return
self.last_node = None
merged_tree = None
if pos is None:
# get random position
pos = self.p_manager.new_pos_in_free_space()
merged_tree = self.tree_manager.add_pos_to_existing_tree(
Node(pos), None)
if merged_tree is not None and restart_when_merge:
# Successfully found a new valid node that's close to existing tree
# Return False to indicate it (and abort restart if we want more exploration)
self.p_manager.add_to_restart(self)
# we need to abort the restart procedure. add this to pending restart
return False
try:
self.tree.particle_handler.remove(self)
except AttributeError:
# probably this is its first init
pass
# initialise to initial value, create new d-tree
self.p_manager.modify_energy(particle_ref=self, set_val=ENERGY_START)
if merged_tree is not None:
self.tree = merged_tree
merged_tree.particle_handler.append(self)
else:
self.tree = TreeDisjoint(particle_handler=self)
self.tree.add_newnode(Node(pos))
self.tree_manager.disjointedTrees.append(self.tree)
super().restart(direction, pos)
return True
class RRdTSampler(ParticleFilterSampler):
@overrides
def __init__(self, restart_when_merge=True):
self.restart_when_merge = restart_when_merge
super().__init__()
@overrides
def init(self, **kwargs):
super().init(**kwargs)
global MAX_NUMBER_NODES
MAX_NUMBER_NODES = self.args.max_number_nodes
self.lsamplers_to_be_restart = []
self.tree_manager = TreesManager(
args=self.args, restart_when_merge=self.restart_when_merge)
# ditch the particles created by the original particle filter sampler, and
# create ones that has link towards the disjointed tree
self.p_manager.particles = []
for _ in range(self.p_manager.num_particles - 1):
pos = self.p_manager.new_pos_in_free_space()
dt_p = DisjointTreeParticle(
direction=random.uniform(0, math.pi * 2),
pos=pos,
tree_manager=self.tree_manager,
p_manager=self.p_manager,
)
self.p_manager.particles.append(dt_p)
# spawn one that comes from the root
self.p_manager.particles.append(
DisjointTreeParticle(
direction=random.uniform(0, math.pi * 2),
pos=self.start_pos,
isroot=True,
startPtNode=self.args.env.startPt,
tree_manager=self.tree_manager,
p_manager=self.p_manager,
))
def particles_random_free_space_restart(self):
for i in range(self.p_manager.size()):
if self.p_manager.particles_energy[
i] < RANDOM_RESTART_PARTICLES_ENERGY_UNDER:
self.p_manager.add_to_restart(self.p_manager.particles[i])
@overrides
def report_success(self, idx, **kwargs):
self.p_manager.particles[idx].last_node = kwargs['newnode']
self.p_manager.confirm(idx, kwargs['pos'])
self.p_manager.modify_energy(idx=idx, factor=0.95)
@overrides
def get_valid_next_pos(self):
"""Loop until we find a valid next node"""
while True:
_tmp = self.get_next_pos()
if _tmp is None:
# This denotes a particle had tried to restart and added the new node
# to existing tree instead. Skip remaining steps and iterate to next loop
return None
rand_pos = _tmp[0]
self.args.env.stats.add_sampled_node(rand_pos)
if not self.args.env.collides(rand_pos):
return _tmp
report_fail = _tmp[-1]
report_fail(pos=rand_pos, obstacle=True)
self.args.env.stats.add_invalid(obs=True)
def restart_all_pending_local_samplers(self):
# restart all pending local samplers
while len(self.p_manager.local_samplers_to_be_rstart) > 0:
# during the proces of restart, if the new restart position
# is close to an existing tree, it will simply add to that new tree.
if not self.p_manager.local_samplers_to_be_rstart[0].restart(
restart_when_merge=self.restart_when_merge):
# This flag denotes that a new position was found among the trees,
# And it NEEDS to get back to restarting particles in the next ierations
return False
self.p_manager.local_samplers_to_be_rstart.pop(0)
return True
@overrides
def get_next_pos(self):
self.counter += 1
self._c_random += 1
self._c_resample += 1
if self._c_random > RANDOM_RESTART_EVERY > 0:
self._c_random = 0
self.particles_random_free_space_restart()
if not self.restart_all_pending_local_samplers():
LOGGER.debug("Adding node to existing trees.")
return None
# get a node to random walk
choice = self.get_random_choice()
# NOTE This controls if testing (via mouse) or actual runs
pos = self.randomWalk(choice)
# pos, choice = self.random_walk_by_mouse()
return (pos, self.p_manager.particles[choice].tree,
self.p_manager.particles[choice].last_node,
lambda c=choice, **kwargs: self.report_success(c, **kwargs),
lambda c=choice, **kwargs: self.report_fail(c, **kwargs))
def random_walk_by_mouse(self):
"""FOR testing purpose. Mimic random walk, but do so via mouse click."""
from planners.mouseSampler import MouseSampler as mouse
pos = mouse.get_mouse_click_position(scaling=self.scaling)
# find the cloest particle from this position
_dist = None
p_idx = None
for i in range(len(self.p_manager.particles)):
p = self.p_manager.particles[i]
if _dist is None or _dist > dist(pos, p.pos):
_dist = dist(pos, p.pos)
p_idx = i
LOGGER.debug("num of tree: {}".format(
len(self.tree_manager.disjointedTrees)))
self.p_manager.new_pos(idx=p_idx, pos=pos, dir=0)
return pos, p_idx
############################################################
## PATCHING RRT with disjointed-tree specific stuff ##
############################################################
class Node:
def __init__(self, pos):
self.pos = np.array(pos)
self.cost = 0 # index 0 is x, index 1 is y
self.edges = []
self.children = []
def __repr__(self):
try:
num_edges = len(self.edges)
except AttributeError:
num_edges = "DELETED"
return "Node(pos={}, cost={}, num_edges={})".format(
self.pos, self.cost, num_edges)
class RRdTPlanner(RRTPlanner):
@overrides
def init(self, *argv, **kwargs):
super().init(*argv, **kwargs)
self.goal_tree_nodes = []
self.goal_tree_poses = np.empty((self.args.max_number_nodes + 50,
2)) # +50 to prevent over flow
self.goal_tree_nodes.append(self.args.env.goalPt)
self.goal_tree_poses[0] = self.args.env.goalPt.pos
self.found_solution = False
self.goal_tree_turn = False
@overrides
def run_once(self):
# Get an sample that is free (not in blocked space)
_tmp = self.args.sampler.get_valid_next_pos()
if _tmp is None:
# we have added a new samples when respawning a local sampler
return
rand_pos, parent_tree, last_node, report_success, report_fail = _tmp
if last_node is not None:
# use the last succesful node as the nearest node
# This is expliting the advantage of local sampler :)
nn = last_node
newpos = rand_pos
else:
idx = self.find_nearest_neighbour_idx(
rand_pos, parent_tree.poses[:len(parent_tree.nodes)])
nn = parent_tree.nodes[idx]
# get an intermediate node according to step-size
newpos = self.args.env.step_from_to(nn.pos, rand_pos)
# check if it is free or not ofree
if not self.args.env.cc.path_is_free(nn.pos, newpos):
self.args.env.stats.add_invalid(obs=False)
report_fail(pos=rand_pos, free=False)
else:
newnode = Node(newpos)
self.args.env.stats.add_free()
self.args.sampler.add_tree_node(newnode.pos)
report_success(newnode=newnode, pos=newnode.pos)
######################
newnode, nn = self.args.sampler.tree_manager.connect_two_nodes(
newnode, nn, parent_tree)
# try to add this newnode to existing trees
self.args.sampler.tree_manager.add_pos_to_existing_tree(
newnode, parent_tree)
def rrt_star_add_node(self, newnode, nn=None):
"""This function perform finding optimal parent, and rewiring."""
newnode, nn = self.choose_least_cost_parent(
newnode, nn=nn, nodes=self.args.sampler.tree_manager.root.nodes)
self.rewire(newnode, nodes=self.args.sampler.tree_manager.root.nodes)
# check for goal condition
if dist(newnode.pos, self.goalPt.pos) < self.args.goal_radius:
if newnode.cost < self.c_max:
self.c_max = newnode.cost
self.goalPt.parent = newnode
newnode.children.append(self.goalPt.parent)
return newnode, nn
@overrides
def paint(self):
drawn_nodes_pairs = set()
# Draw disjointed trees
for tree in self.args.sampler.tree_manager.disjointedTrees:
bfs = BFS(tree.nodes[0], validNodes=tree.nodes)
while bfs.has_next():
newnode = bfs.next()
for e in newnode.edges:
new_set = frozenset({newnode, e})
if new_set not in drawn_nodes_pairs:
drawn_nodes_pairs.add(new_set)
self.args.env.draw_path(newnode, e)
# Draw root tree
for n in self.args.sampler.tree_manager.root.nodes:
if n.parent is not None:
new_set = frozenset({n, n.parent})
if new_set not in drawn_nodes_pairs:
drawn_nodes_pairs.add(new_set)
self.args.env.draw_path(n, n.parent, Colour.orange)
# for nodes in (self.nodes, self.goal_tree_nodes):
# for n in nodes:
# if n.parent is not None:
# new_set = frozenset({n, n.parent})
# if new_set not in drawn_nodes_pairs:
# drawn_nodes_pairs.add(new_set)
# self.args.env.draw_path(n, n.parent)
self.draw_solution_path()
############################################################
## Classes ##
############################################################
class TreeRoot:
def __init__(self, particle_handler):
self.particle_handler = [particle_handler]
self.nodes = []
self.poses = np.empty((MAX_NUMBER_NODES + 50,
2)) # +50 to prevent over flow
# This stores the last node added to this tree (by local sampler)
def add_newnode(self, node):
self.poses[len(self.nodes)] = node.pos
self.nodes.append(node)
def extend_tree(self, tree):
self.poses[len(self.nodes):len(self.nodes) +
len(tree.nodes)] = tree.poses[:len(tree.nodes)]
self.nodes.extend(tree.nodes)
def __repr__(self):
string = super().__repr__()
string += '\n'
import pprint
string += pprint.pformat(vars(self), indent=4)
# string += ', '.join("%s: %s" % item for item in vars(self).items())
return string
class TreeDisjoint(TreeRoot):
@overrides
def __init__(self, **kwargs):
super().__init__(**kwargs)
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
| 40.551724
| 124
| 0.580612
|
ec443a38acb01b203cda05b716c5ce5f3cf3575a
| 1,694
|
py
|
Python
|
sdk/identity/azure-identity/azure/identity/_constants.py
|
lsundaralingam/azure-sdk-for-python
|
538a5055b4330c6b324fa54a00e96d73d882fa63
|
[
"MIT"
] | 1
|
2021-04-26T21:15:01.000Z
|
2021-04-26T21:15:01.000Z
|
sdk/identity/azure-identity/azure/identity/_constants.py
|
lsundaralingam/azure-sdk-for-python
|
538a5055b4330c6b324fa54a00e96d73d882fa63
|
[
"MIT"
] | 1
|
2021-01-19T22:41:38.000Z
|
2021-01-19T22:41:38.000Z
|
sdk/identity/azure-identity/azure/identity/_constants.py
|
lsundaralingam/azure-sdk-for-python
|
538a5055b4330c6b324fa54a00e96d73d882fa63
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
DEVELOPER_SIGN_ON_CLIENT_ID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
AZURE_VSCODE_CLIENT_ID = "aebc6443-996d-45c2-90f0-388ff96faa56"
VSCODE_CREDENTIALS_SECTION = "VS Code Azure"
DEFAULT_REFRESH_OFFSET = 300
DEFAULT_TOKEN_REFRESH_RETRY_DELAY = 30
class AzureAuthorityHosts:
AZURE_CHINA = "login.chinacloudapi.cn"
AZURE_GERMANY = "login.microsoftonline.de"
AZURE_GOVERNMENT = "login.microsoftonline.us"
AZURE_PUBLIC_CLOUD = "login.microsoftonline.com"
class KnownAuthorities(AzureAuthorityHosts):
"""Alias of :class:`AzureAuthorityHosts`"""
class EnvironmentVariables:
AZURE_CLIENT_ID = "AZURE_CLIENT_ID"
AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET"
AZURE_TENANT_ID = "AZURE_TENANT_ID"
CLIENT_SECRET_VARS = (AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_TENANT_ID)
AZURE_CLIENT_CERTIFICATE_PATH = "AZURE_CLIENT_CERTIFICATE_PATH"
CERT_VARS = (AZURE_CLIENT_ID, AZURE_CLIENT_CERTIFICATE_PATH, AZURE_TENANT_ID)
AZURE_USERNAME = "AZURE_USERNAME"
AZURE_PASSWORD = "AZURE_PASSWORD"
USERNAME_PASSWORD_VARS = (AZURE_CLIENT_ID, AZURE_USERNAME, AZURE_PASSWORD)
AZURE_POD_IDENTITY_TOKEN_URL = "AZURE_POD_IDENTITY_TOKEN_URL"
IDENTITY_ENDPOINT = "IDENTITY_ENDPOINT"
IDENTITY_HEADER = "IDENTITY_HEADER"
IDENTITY_SERVER_THUMBPRINT = "IDENTITY_SERVER_THUMBPRINT"
IMDS_ENDPOINT = "IMDS_ENDPOINT"
MSI_ENDPOINT = "MSI_ENDPOINT"
MSI_SECRET = "MSI_SECRET"
AZURE_AUTHORITY_HOST = "AZURE_AUTHORITY_HOST"
AZURE_REGIONAL_AUTHORITY_NAME = "AZURE_REGIONAL_AUTHORITY_NAME"
| 35.291667
| 81
| 0.757379
|
28ee36b6510d97df2cc947ff298c8faccc8a66b7
| 1,288
|
py
|
Python
|
custom_imports/file_module/ext_finder.py
|
madman-bob/python-custom-imports
|
e9d6979865bfde5f149a2190d8f2895d333ab219
|
[
"MIT"
] | null | null | null |
custom_imports/file_module/ext_finder.py
|
madman-bob/python-custom-imports
|
e9d6979865bfde5f149a2190d8f2895d333ab219
|
[
"MIT"
] | 1
|
2020-05-21T02:36:07.000Z
|
2020-05-21T12:55:24.000Z
|
custom_imports/file_module/ext_finder.py
|
madman-bob/python-custom-imports
|
e9d6979865bfde5f149a2190d8f2895d333ab219
|
[
"MIT"
] | null | null | null |
import sys
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
from typing import Iterable, Optional
from custom_imports.importer import Finder
__all__ = ["FileModuleExtensionFinder"]
@dataclass(frozen=True)
class FileModuleExtensionFinder(Finder[Path]):
"""
Finder for file based modules by file extensions.
FileModuleExtensionFinder(ext)
This Finder interprets a module's name as a filename, with extension ext.
Parent modules are interpreted as directories.
This provides a relative path, which is searched for on the standard module
search path. If a file with that relative path is found, then the absolute
Path of that file is returned as its module locator.
"""
extension: str
def find_path(self, fullname: str, search_paths: Iterable[str]) -> Optional[Path]:
rel_file_path = Path(fullname.replace(".", "/") + "." + self.extension)
for path in search_paths:
abs_file_path = path / rel_file_path
if abs_file_path.is_file():
return abs_file_path
def find_module_locator(
self, fullname: str, path: Iterable[str], target: Optional[ModuleType] = None
) -> Optional[Path]:
return self.find_path(fullname, sys.path)
| 31.414634
| 86
| 0.709627
|
38c97db66a92e3e65d4bf78d2a5f12f8f118c9a1
| 3,214
|
py
|
Python
|
optlang_enumerator/cobra_cnapy.py
|
cnapy-org/optlang_enumerator
|
4a754608bc1aa142c2ad5f27834868ab6b1ca9a8
|
[
"Apache-2.0"
] | 2
|
2021-08-30T13:52:35.000Z
|
2021-08-30T17:57:18.000Z
|
optlang_enumerator/cobra_cnapy.py
|
cnapy-org/optlang_enumerator
|
4a754608bc1aa142c2ad5f27834868ab6b1ca9a8
|
[
"Apache-2.0"
] | null | null | null |
optlang_enumerator/cobra_cnapy.py
|
cnapy-org/optlang_enumerator
|
4a754608bc1aa142c2ad5f27834868ab6b1ca9a8
|
[
"Apache-2.0"
] | 2
|
2021-05-07T15:18:20.000Z
|
2021-11-15T10:38:35.000Z
|
import cobra
from cobra.util.context import get_context
from functools import partial
import hashlib
import pickle
"""
Extensions of cobra classes used in optlang_enumerator
"""
def set_hash_value(self):
context = get_context(self)
if context:
context(partial(self.restore_hash_value, self._hash_value))
# float(s): make stable with respect to the number type of the stoiciometric coefficients (1 != 1.0)
self._hash_value = hashlib.md5(pickle.dumps(tuple((sorted((m.id, float(s)) for m, s in self.metabolites.items()),
self.lower_bound, self.upper_bound)))).digest()
def get_hash_value(self):
return self._hash_value
def restore_hash_value(self, hash_value):
self._hash_value = hash_value
cobra.Reaction._hash_value = None
"""
hash_value takes only the reaction stoichiometry (via the metabolites)
and reaction bounds into account
"""
cobra.Reaction.hash_value = property(fset=None, fget=get_hash_value)
cobra.Reaction.set_hash_value = set_hash_value
cobra.Reaction.restore_hash_value = restore_hash_value
def set_reaction_hashes(self):
for r in self.reactions:
r.set_hash_value()
cobra.Model.set_reaction_hashes = set_reaction_hashes
"""
stoichiometry_hash() only sets up the initial hash object,
further data can be added with update() and it can
be processed with digest() or hexdigest()
"""
cobra.Model.stoichiometry_hash = lambda self: hashlib.md5(pickle.dumps(self.reactions.list_attr("hash_value")))
cobra.Model._stoichiometry_hash_object = None
def set_stoichiometry_hash_object(self):
context = get_context(self)
if context:
context(partial(self.restore_stoichiometry_hash_object, self._stoichiometry_hash_object))
self._stoichiometry_hash_object = self.stoichiometry_hash()
self._stoichiometry_hash_object.digest()
def get_stoichiometry_hash_object(self):
return self._stoichiometry_hash_object
def restore_stoichiometry_hash_object(self, stoichiometry_hash_object):
self._stoichiometry_hash_object= stoichiometry_hash_object
cobra.Model.set_stoichiometry_hash_object = set_stoichiometry_hash_object
"""
a copy() of the stoichiometry_hash_object can then be used for
calculating problem-specific hashes
"""
cobra.Model.stoichiometry_hash_object = property(fset=None, fget=get_stoichiometry_hash_object)
cobra.Model.restore_stoichiometry_hash_object = restore_stoichiometry_hash_object
class CNApyModel(cobra.Model):
@staticmethod
def read_sbml_model(file_name):
model: cobra.Model = cobra.io.read_sbml_model(file_name)
model.set_reaction_hashes()
model.set_stoichiometry_hash_object()
"""
kludge because in COBRApy creating a model from a SBML file
is not implemented as method of the Model class
this is ugly but works because all new properties are added
to cobra.Reaction and cobra.Model
"""
model.__class__ = CNApyModel
return model
def __init__(self, id_or_model=None, name=None):
super().__init__(id_or_model=id_or_model, name=name)
if id_or_model is None:
self.id = ""
self.set_reaction_hashes()
self.set_stoichiometry_hash_object()
| 36.11236
| 117
| 0.757934
|
2078530ddd9cd08fb2d656a9dd3d50de0d863cfb
| 2,110
|
py
|
Python
|
src/get_reference_metadata.py
|
Fnyasimi/summary-gwas-imputation
|
eb6c744fd30afd707dbdc81cf2e73723e8b6416c
|
[
"MIT"
] | 20
|
2018-12-26T09:51:32.000Z
|
2022-01-22T17:21:14.000Z
|
src/get_reference_metadata.py
|
Fnyasimi/summary-gwas-imputation
|
eb6c744fd30afd707dbdc81cf2e73723e8b6416c
|
[
"MIT"
] | 14
|
2019-12-06T17:52:32.000Z
|
2022-03-24T15:45:11.000Z
|
src/get_reference_metadata.py
|
Fnyasimi/summary-gwas-imputation
|
eb6c744fd30afd707dbdc81cf2e73723e8b6416c
|
[
"MIT"
] | 18
|
2019-01-25T14:45:08.000Z
|
2022-03-11T20:41:35.000Z
|
#!/usr/bin/env python
__author__ = "alvaro barbeira"
import os
import logging
import pandas
from genomic_tools_lib import Utilities, Logging
from genomic_tools_lib.data_management import GTExMisc
from genomic_tools_lib.file_formats import ModelTraining
from genomic_tools_lib.individual_data import Genotype
def run(args):
if os.path.exists(args.output):
logging.info("Output exists. Nope")
return
filters = {x[0]:x[1:] for x in args.filter}
maf_filter = float(filters["MAF"][0]) if "MAF" in filters else None
logging.info("Loading GTEX variant map")
gtex_snp_key = GTExMisc.load_gtex_variant_to_rsid(args.annotation[0], args.rsid_column)
logging.info("Processing genotype")
m = []
for mean, metadata, ids in ModelTraining.dosage_generator(args.genotype, gtex_snp_key, dosage_conversion=ModelTraining._mean, do_none=True):
if maf_filter:
f = mean / 2 if mean < 1 else 1 - mean / 2
if f<maf_filter:
continue
m.append(metadata)
m = Utilities.to_dataframe(m, [x[1] for x in Genotype.MetadataTFE.order])
if "TOP_CHR_POS_BY_FREQ" in filters:
logging.info("Simplifying multi-allelic variants")
m = Genotype._monoallelic_by_frequency(m)
logging.info("Saving...")
Utilities.save_dataframe(m, args.output)
logging.info("Finished")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Parse a genotype text file for its variant metadata")
parser.add_argument("-genotype", help="Path to genotype file")
parser.add_argument("-annotation", help="Annotation file", nargs="+")
parser.add_argument("-output", help = "Where to save the file")
parser.add_argument("-filter", help="What to apply", nargs="+", action="append")
parser.add_argument("-parsimony", help="Log parsimony", type=int, default=10)
parser.add_argument("-rsid_column", help = "Column name with rsid variant identifiers", default = "rs_id_dbSNP150_GRCh38p7")
args = parser.parse_args()
Logging.configure_logging(args.parsimony)
run(args)
| 36.37931
| 144
| 0.708057
|
9d06a1cc7818dc356968666a60407b175ac2cb8f
| 674
|
py
|
Python
|
migrations/versions/cd9f3f359db1_add_setter_username.py
|
melodily/wordle-with-friends
|
2485c8b3127db3a921410463bab6a61f486aeb1d
|
[
"MIT"
] | null | null | null |
migrations/versions/cd9f3f359db1_add_setter_username.py
|
melodily/wordle-with-friends
|
2485c8b3127db3a921410463bab6a61f486aeb1d
|
[
"MIT"
] | null | null | null |
migrations/versions/cd9f3f359db1_add_setter_username.py
|
melodily/wordle-with-friends
|
2485c8b3127db3a921410463bab6a61f486aeb1d
|
[
"MIT"
] | null | null | null |
"""Add setter username
Revision ID: cd9f3f359db1
Revises: efa327561105
Create Date: 2022-02-06 13:55:24.170817
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cd9f3f359db1'
down_revision = 'efa327561105'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('games', sa.Column('setter_username', sa.String(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('games', 'setter_username')
# ### end Alembic commands ###
| 23.241379
| 85
| 0.698813
|
2da520c9cac1ddc5dd7cced56642239f26b0e895
| 839
|
py
|
Python
|
oss_server/notification/urls.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 10
|
2018-01-30T06:21:43.000Z
|
2022-01-03T12:18:07.000Z
|
oss_server/notification/urls.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 4
|
2018-02-21T02:30:37.000Z
|
2018-03-04T05:20:30.000Z
|
oss_server/notification/urls.py
|
pallet-io/Pallet-API
|
fd3b8ed4c8063d9010ed53ace6ec068c983ae22e
|
[
"BSD-3-Clause"
] | 2
|
2018-01-31T04:22:52.000Z
|
2018-03-10T14:04:35.000Z
|
from django.conf.urls import url
from .v1.views import AddressSubscriptionCreateView, TxSubscriptionCreateView
from .v1.views import AddressSubscriptionDetailView, TxSubscriptionDetailView
from .v1.views import AddressSubscriptionDeleteView, TxSubscriptionDeleteView
urlpatterns = [
url(r'^v1/address/subscription$', AddressSubscriptionCreateView.as_view()),
url(r'^v1/address/subscription/(?P<pk>[0-9a-z-]{36})$', AddressSubscriptionDetailView.as_view()),
url(r'^v1/address/subscription/(?P<pk>[0-9a-z-]{36})/delete$', AddressSubscriptionDeleteView.as_view()),
url(r'^v1/tx/subscription$', TxSubscriptionCreateView.as_view()),
url(r'^v1/tx/subscription/(?P<pk>[0-9a-z-]{36})$', TxSubscriptionDetailView.as_view()),
url(r'^v1/tx/subscription/(?P<pk>[0-9a-z-]{36})/delete$', TxSubscriptionDeleteView.as_view()),
]
| 55.933333
| 108
| 0.753278
|
db02fa4730e4ccf0343a9dece17b1d04ab515d80
| 1,195
|
py
|
Python
|
setup.py
|
ma-sadeghi/pardiso4py
|
f8396df2040be4e5d8c854a4bade116f6e658f67
|
[
"BSD-3-Clause"
] | 6
|
2020-11-24T09:46:19.000Z
|
2022-01-12T13:22:51.000Z
|
setup.py
|
ma-sadeghi/pardiso4py
|
f8396df2040be4e5d8c854a4bade116f6e658f67
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ma-sadeghi/pardiso4py
|
f8396df2040be4e5d8c854a4bade116f6e658f67
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(
name='pypardiso',
version="0.3.1",
packages=['pypardiso'],
install_requires=['mkl', 'mkl-service', 'numpy', 'scipy', 'psutil'],
author="Adrian Haas",
license=open('LICENSE.txt').read(),
url="https://github.com/haasad/PyPardisoProject",
long_description=open('README.md').read(),
description='Python interface to the Intel MKL Pardiso library to solve large sparse linear systems of equations',
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],
)
| 39.833333
| 118
| 0.629289
|
7948659d614246078f34235e52dcb4383d957696
| 1,385
|
py
|
Python
|
ooobuild/dyn/xml/crypto/x_cipher_context.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/xml/crypto/x_cipher_context.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/xml/crypto/x_cipher_context.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.crypto
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.xml.crypto import XCipherContext as XCipherContext
setattr(XCipherContext, '__ooo_ns__', 'com.sun.star.xml.crypto')
setattr(XCipherContext, '__ooo_full_ns__', 'com.sun.star.xml.crypto.XCipherContext')
setattr(XCipherContext, '__ooo_type_name__', 'interface')
else:
from ....lo.xml.crypto.x_cipher_context import XCipherContext as XCipherContext
__all__ = ['XCipherContext']
| 37.432432
| 88
| 0.768231
|
4b91ff41ca8f24fa293be7055456e91b81b261c7
| 2,435
|
py
|
Python
|
lib/googlecloudsdk/api_lib/dns/export_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/dns/export_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/googlecloudsdk/api_lib/dns/export_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for exporting record-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from dns import name
from dns import rdata
from dns import rdataclass
from dns import rdatatype
from dns import zone
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.resource import resource_printer
class Error(exceptions.Error):
"""Base exception for all export errors."""
class UnableToExportRecordsToFile(Error):
"""Unable to export records to specified file."""
def WriteToZoneFile(zone_file, record_sets, domain):
"""Writes the given record-sets in zone file format to the given file.
Args:
zone_file: file, File into which the records should be written.
record_sets: list, ResourceRecordSets to be written out.
domain: str, The origin domain for the zone file.
"""
zone_contents = zone.Zone(name.from_text(domain))
for record_set in record_sets:
rdset = zone_contents.get_rdataset(record_set.name,
record_set.type,
create=True)
for rrdata in record_set.rrdatas:
rdset.add(rdata.from_text(rdataclass.IN,
rdatatype.from_text(record_set.type),
str(rrdata),
origin=zone_contents.origin),
ttl=record_set.ttl)
zone_contents.to_file(zone_file, relativize=False)
def WriteToYamlFile(yaml_file, record_sets):
"""Writes the given record-sets in yaml format to the given file.
Args:
yaml_file: file, File into which the records should be written.
record_sets: list, ResourceRecordSets to be written out.
"""
resource_printer.Print(record_sets, print_format='yaml', out=yaml_file)
| 35.289855
| 74
| 0.708419
|
049f4ec0eeb5742da51514fcc9ce4fae477f4443
| 7,784
|
py
|
Python
|
neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py
|
CingHu/neutron-ustack
|
a1da17d0d63b3342a48c35da37984d6386ee1016
|
[
"Apache-2.0"
] | 3
|
2015-02-02T02:51:39.000Z
|
2015-02-23T10:20:23.000Z
|
neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py
|
CingHu/neutron-ustack
|
a1da17d0d63b3342a48c35da37984d6386ee1016
|
[
"Apache-2.0"
] | 4
|
2015-02-23T10:21:11.000Z
|
2015-03-04T09:28:20.000Z
|
neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py
|
CingHu/neutron-ustack
|
a1da17d0d63b3342a48c35da37984d6386ee1016
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Support for VPNaaS
Revision ID: 52ff27f7567a
Revises: 39cf3f799352
Create Date: 2013-07-14 23:04:13.395955
"""
# revision identifiers, used by Alembic.
revision = '52ff27f7567a'
down_revision = '39cf3f799352'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.vpn.plugin.VPNDriverPlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
vpn_auth_algorithms = sa.Enum('sha1', name='vpn_auth_algorithms')
vpn_encrypt_algorithms = sa.Enum('3des', 'aes-128', 'aes-256', 'aes-192',
name='vpn_encrypt_algorithms')
ike_phase1_mode = sa.Enum('main', name='ike_phase1_mode')
vpn_lifetime_units = sa.Enum('seconds', 'kilobytes', name='vpn_lifetime_units')
ike_versions = sa.Enum('v1', 'v2', name='ike_versions')
vpn_pfs = sa.Enum('group2', 'group5', 'group14', name='vpn_pfs')
ipsec_transform_protocols = sa.Enum('esp', 'ah', 'ah-esp',
name='ipsec_transform_protocols')
ipsec_encapsulations = sa.Enum('tunnel', 'transport',
name='ipsec_encapsulations')
vpn_dpd_actions = sa.Enum('hold', 'clear', 'restart', 'disabled',
'restart-by-peer', name='vpn_dpd_actions')
vpn_initiators = sa.Enum('bi-directional', 'response-only',
name='vpn_initiators')
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'ikepolicies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column(
'auth_algorithm', vpn_auth_algorithms, nullable=False),
sa.Column(
'encryption_algorithm', vpn_encrypt_algorithms, nullable=False),
sa.Column(
'phase1_negotiation_mode', ike_phase1_mode, nullable=False),
sa.Column(
'lifetime_units', vpn_lifetime_units, nullable=False),
sa.Column('lifetime_value', sa.Integer(), nullable=False),
sa.Column('ike_version', ike_versions, nullable=False),
sa.Column('pfs', vpn_pfs, nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsecpolicies',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('transform_protocol', ipsec_transform_protocols,
nullable=False),
sa.Column('auth_algorithm', vpn_auth_algorithms, nullable=False),
sa.Column('encryption_algorithm', vpn_encrypt_algorithms,
nullable=False),
sa.Column(
'encapsulation_mode', ipsec_encapsulations, nullable=False),
sa.Column(
'lifetime_units', vpn_lifetime_units, nullable=False),
sa.Column(
'lifetime_value', sa.Integer(), nullable=False),
sa.Column('pfs', vpn_pfs, nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'vpnservices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('subnet_id', sa.String(length=36), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'], ),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsec_site_connections',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('peer_address', sa.String(length=64), nullable=False),
sa.Column('peer_id', sa.String(length=255), nullable=False),
sa.Column('route_mode', sa.String(length=8), nullable=False),
sa.Column('mtu', sa.Integer(), nullable=False),
sa.Column(
'initiator', vpn_initiators, nullable=False),
sa.Column('auth_mode', sa.String(length=16), nullable=False),
sa.Column('psk', sa.String(length=255), nullable=False),
sa.Column(
'dpd_action', vpn_dpd_actions, nullable=False),
sa.Column('dpd_interval', sa.Integer(), nullable=False),
sa.Column('dpd_timeout', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('vpnservice_id', sa.String(length=36), nullable=False),
sa.Column('ipsecpolicy_id', sa.String(length=36), nullable=False),
sa.Column('ikepolicy_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['ikepolicy_id'], ['ikepolicies.id']),
sa.ForeignKeyConstraint(['ipsecpolicy_id'], ['ipsecpolicies.id']),
sa.ForeignKeyConstraint(['vpnservice_id'], ['vpnservices.id']),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'ipsecpeercidrs',
sa.Column('cidr', sa.String(length=32), nullable=False),
sa.Column('ipsec_site_connection_id',
sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['ipsec_site_connection_id'],
['ipsec_site_connections.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('cidr', 'ipsec_site_connection_id')
)
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table('ipsecpeercidrs')
op.drop_table('ipsec_site_connections')
vpn_dpd_actions.drop(op.get_bind(), checkfirst=False)
vpn_initiators.drop(op.get_bind(), checkfirst=False)
op.drop_table('vpnservices')
op.drop_table('ipsecpolicies')
ipsec_transform_protocols.drop(op.get_bind(), checkfirst=False)
ipsec_encapsulations.drop(op.get_bind(), checkfirst=False)
op.drop_table('ikepolicies')
vpn_auth_algorithms.drop(op.get_bind(), checkfirst=False)
vpn_encrypt_algorithms.drop(op.get_bind(), checkfirst=False)
ike_phase1_mode.drop(op.get_bind(), checkfirst=False)
vpn_lifetime_units.drop(op.get_bind(), checkfirst=False)
ike_versions.drop(op.get_bind(), checkfirst=False)
vpn_pfs.drop(op.get_bind(), checkfirst=False)
| 45.255814
| 79
| 0.659044
|
084d0879f1764536f425576ef0b45dc613b7cb3b
| 77,841
|
py
|
Python
|
Lib/test/test_patma.py
|
holmanb/cpython
|
9405a02f4c50e235d01d942bd91eb4bea2a86e96
|
[
"0BSD"
] | 12
|
2021-04-22T14:52:17.000Z
|
2021-12-21T12:51:31.000Z
|
Lib/test/test_patma.py
|
holmanb/cpython
|
9405a02f4c50e235d01d942bd91eb4bea2a86e96
|
[
"0BSD"
] | 14
|
2020-03-12T01:10:53.000Z
|
2022-01-01T14:00:53.000Z
|
Lib/test/test_patma.py
|
holmanb/cpython
|
9405a02f4c50e235d01d942bd91eb4bea2a86e96
|
[
"0BSD"
] | 5
|
2021-04-25T22:26:29.000Z
|
2022-01-25T22:22:30.000Z
|
import array
import collections
import contextlib
import dataclasses
import enum
import inspect
import unittest
import warnings
def no_perf(f):
f.no_perf = None
return f
@dataclasses.dataclass
class MyClass:
x: int
y: str
__match_args__ = ("x", "y")
@dataclasses.dataclass
class Point:
x: int
y: int
class TestPatma(unittest.TestCase):
def assert_syntax_error(self, code: str):
with self.assertRaises(SyntaxError):
compile(inspect.cleandoc(code), "<test>", "exec")
def test_patma_000(self):
match 0:
case 0:
x = True
self.assertIs(x, True)
def test_patma_001(self):
match 0:
case 0 if False:
x = False
case 0 if True:
x = True
self.assertIs(x, True)
def test_patma_002(self):
match 0:
case 0:
x = True
case 0:
x = False
self.assertIs(x, True)
def test_patma_003(self):
x = False
match 0:
case 0 | 1 | 2 | 3:
x = True
self.assertIs(x, True)
def test_patma_004(self):
x = False
match 1:
case 0 | 1 | 2 | 3:
x = True
self.assertIs(x, True)
def test_patma_005(self):
x = False
match 2:
case 0 | 1 | 2 | 3:
x = True
self.assertIs(x, True)
def test_patma_006(self):
x = False
match 3:
case 0 | 1 | 2 | 3:
x = True
self.assertIs(x, True)
def test_patma_007(self):
x = False
match 4:
case 0 | 1 | 2 | 3:
x = True
self.assertIs(x, False)
def test_patma_008(self):
x = 0
class A:
y = 1
match x:
case A.y as z:
pass
self.assertEqual(x, 0)
self.assertEqual(A.y, 1)
def test_patma_009(self):
class A:
B = 0
match 0:
case x if x:
z = 0
case _ as y if y == x and y:
z = 1
case A.B:
z = 2
self.assertEqual(A.B, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 2)
def test_patma_010(self):
match ():
case []:
x = 0
self.assertEqual(x, 0)
def test_patma_011(self):
match (0, 1, 2):
case [*x]:
y = 0
self.assertEqual(x, [0, 1, 2])
self.assertEqual(y, 0)
def test_patma_012(self):
match (0, 1, 2):
case [0, *x]:
y = 0
self.assertEqual(x, [1, 2])
self.assertEqual(y, 0)
def test_patma_013(self):
match (0, 1, 2):
case [0, 1, *x,]:
y = 0
self.assertEqual(x, [2])
self.assertEqual(y, 0)
def test_patma_014(self):
match (0, 1, 2):
case [0, 1, 2, *x]:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_015(self):
match (0, 1, 2):
case [*x, 2,]:
y = 0
self.assertEqual(x, [0, 1])
self.assertEqual(y, 0)
def test_patma_016(self):
match (0, 1, 2):
case [*x, 1, 2]:
y = 0
self.assertEqual(x, [0])
self.assertEqual(y, 0)
def test_patma_017(self):
match (0, 1, 2):
case [*x, 0, 1, 2,]:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_018(self):
match (0, 1, 2):
case [0, *x, 2]:
y = 0
self.assertEqual(x, [1])
self.assertEqual(y, 0)
def test_patma_019(self):
match (0, 1, 2):
case [0, 1, *x, 2,]:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_020(self):
match (0, 1, 2):
case [0, *x, 1, 2]:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_021(self):
match (0, 1, 2):
case [*x,]:
y = 0
self.assertEqual(x, [0, 1, 2])
self.assertEqual(y, 0)
def test_patma_022(self):
x = {}
match x:
case {}:
y = 0
self.assertEqual(x, {})
self.assertEqual(y, 0)
def test_patma_023(self):
x = {0: 0}
match x:
case {}:
y = 0
self.assertEqual(x, {0: 0})
self.assertEqual(y, 0)
def test_patma_024(self):
x = {}
y = None
match x:
case {0: 0}:
y = 0
self.assertEqual(x, {})
self.assertIs(y, None)
def test_patma_025(self):
x = {0: 0}
match x:
case {0: (0 | 1 | 2 as z)}:
y = 0
self.assertEqual(x, {0: 0})
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_026(self):
x = {0: 1}
match x:
case {0: (0 | 1 | 2 as z)}:
y = 0
self.assertEqual(x, {0: 1})
self.assertEqual(y, 0)
self.assertEqual(z, 1)
def test_patma_027(self):
x = {0: 2}
match x:
case {0: (0 | 1 | 2 as z)}:
y = 0
self.assertEqual(x, {0: 2})
self.assertEqual(y, 0)
self.assertEqual(z, 2)
def test_patma_028(self):
x = {0: 3}
y = None
match x:
case {0: (0 | 1 | 2 as z)}:
y = 0
self.assertEqual(x, {0: 3})
self.assertIs(y, None)
def test_patma_029(self):
x = {}
y = None
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: [1, 2, {}], 1: [[]]}:
y = 1
case []:
y = 2
self.assertEqual(x, {})
self.assertIs(y, None)
def test_patma_030(self):
x = {False: (True, 2.0, {})}
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: [1, 2, {}], 1: [[]]}:
y = 1
case []:
y = 2
self.assertEqual(x, {False: (True, 2.0, {})})
self.assertEqual(y, 0)
def test_patma_031(self):
x = {False: (True, 2.0, {}), 1: [[]], 2: 0}
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: [1, 2, {}], 1: [[]]}:
y = 1
case []:
y = 2
self.assertEqual(x, {False: (True, 2.0, {}), 1: [[]], 2: 0})
self.assertEqual(y, 0)
def test_patma_032(self):
x = {False: (True, 2.0, {}), 1: [[]], 2: 0}
match x:
case {0: [1, 2]}:
y = 0
case {0: [1, 2, {}], 1: [[]]}:
y = 1
case []:
y = 2
self.assertEqual(x, {False: (True, 2.0, {}), 1: [[]], 2: 0})
self.assertEqual(y, 1)
def test_patma_033(self):
x = []
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: [1, 2, {}], 1: [[]]}:
y = 1
case []:
y = 2
self.assertEqual(x, [])
self.assertEqual(y, 2)
def test_patma_034(self):
x = {0: 0}
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: ([1, 2, {}] | False)} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}:
y = 1
case []:
y = 2
self.assertEqual(x, {0: 0})
self.assertEqual(y, 1)
def test_patma_035(self):
x = {0: 0}
match x:
case {0: [1, 2, {}]}:
y = 0
case {0: [1, 2, {}] | True} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}:
y = 1
case []:
y = 2
self.assertEqual(x, {0: 0})
self.assertEqual(y, 1)
def test_patma_036(self):
x = 0
match x:
case 0 | 1 | 2:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_037(self):
x = 1
match x:
case 0 | 1 | 2:
y = 0
self.assertEqual(x, 1)
self.assertEqual(y, 0)
def test_patma_038(self):
x = 2
match x:
case 0 | 1 | 2:
y = 0
self.assertEqual(x, 2)
self.assertEqual(y, 0)
def test_patma_039(self):
x = 3
y = None
match x:
case 0 | 1 | 2:
y = 0
self.assertEqual(x, 3)
self.assertIs(y, None)
def test_patma_040(self):
x = 0
match x:
case (0 as z) | (1 as z) | (2 as z) if z == x % 2:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_041(self):
x = 1
match x:
case (0 as z) | (1 as z) | (2 as z) if z == x % 2:
y = 0
self.assertEqual(x, 1)
self.assertEqual(y, 0)
self.assertEqual(z, 1)
def test_patma_042(self):
x = 2
y = None
match x:
case (0 as z) | (1 as z) | (2 as z) if z == x % 2:
y = 0
self.assertEqual(x, 2)
self.assertIs(y, None)
self.assertEqual(z, 2)
def test_patma_043(self):
x = 3
y = None
match x:
case (0 as z) | (1 as z) | (2 as z) if z == x % 2:
y = 0
self.assertEqual(x, 3)
self.assertIs(y, None)
def test_patma_044(self):
x = ()
match x:
case []:
y = 0
self.assertEqual(x, ())
self.assertEqual(y, 0)
def test_patma_045(self):
x = ()
match x:
case ():
y = 0
self.assertEqual(x, ())
self.assertEqual(y, 0)
def test_patma_046(self):
x = (0,)
match x:
case [0]:
y = 0
self.assertEqual(x, (0,))
self.assertEqual(y, 0)
def test_patma_047(self):
x = ((),)
match x:
case [[]]:
y = 0
self.assertEqual(x, ((),))
self.assertEqual(y, 0)
def test_patma_048(self):
x = [0, 1]
match x:
case [0, 1] | [1, 0]:
y = 0
self.assertEqual(x, [0, 1])
self.assertEqual(y, 0)
def test_patma_049(self):
x = [1, 0]
match x:
case [0, 1] | [1, 0]:
y = 0
self.assertEqual(x, [1, 0])
self.assertEqual(y, 0)
def test_patma_050(self):
x = [0, 0]
y = None
match x:
case [0, 1] | [1, 0]:
y = 0
self.assertEqual(x, [0, 0])
self.assertIs(y, None)
def test_patma_051(self):
w = None
x = [1, 0]
match x:
case [(0 as w)]:
y = 0
case [z] | [1, (0 | 1 as z)] | [z]:
y = 1
self.assertIs(w, None)
self.assertEqual(x, [1, 0])
self.assertEqual(y, 1)
self.assertEqual(z, 0)
def test_patma_052(self):
x = [1, 0]
match x:
case [0]:
y = 0
case [1, 0] if (x := x[:0]):
y = 1
case [1, 0]:
y = 2
self.assertEqual(x, [])
self.assertEqual(y, 2)
def test_patma_053(self):
x = {0}
y = None
match x:
case [0]:
y = 0
self.assertEqual(x, {0})
self.assertIs(y, None)
def test_patma_054(self):
x = set()
y = None
match x:
case []:
y = 0
self.assertEqual(x, set())
self.assertIs(y, None)
def test_patma_055(self):
x = iter([1, 2, 3])
y = None
match x:
case []:
y = 0
self.assertEqual([*x], [1, 2, 3])
self.assertIs(y, None)
def test_patma_056(self):
x = {}
y = None
match x:
case []:
y = 0
self.assertEqual(x, {})
self.assertIs(y, None)
def test_patma_057(self):
x = {0: False, 1: True}
y = None
match x:
case [0, 1]:
y = 0
self.assertEqual(x, {0: False, 1: True})
self.assertIs(y, None)
def test_patma_058(self):
x = 0
match x:
case 0:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_059(self):
x = 0
y = None
match x:
case False:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, None)
def test_patma_060(self):
x = 0
y = None
match x:
case 1:
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_061(self):
x = 0
y = None
match x:
case None:
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_062(self):
x = 0
match x:
case 0:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_063(self):
x = 0
y = None
match x:
case 1:
y = 0
case 1:
y = 1
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_064(self):
x = "x"
match x:
case "x":
y = 0
case "y":
y = 1
self.assertEqual(x, "x")
self.assertEqual(y, 0)
def test_patma_065(self):
x = "x"
match x:
case "y":
y = 0
case "x":
y = 1
self.assertEqual(x, "x")
self.assertEqual(y, 1)
def test_patma_066(self):
x = "x"
match x:
case "":
y = 0
case "x":
y = 1
self.assertEqual(x, "x")
self.assertEqual(y, 1)
def test_patma_067(self):
x = b"x"
match x:
case b"y":
y = 0
case b"x":
y = 1
self.assertEqual(x, b"x")
self.assertEqual(y, 1)
def test_patma_068(self):
x = 0
match x:
case 0 if False:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_069(self):
x = 0
y = None
match x:
case 0 if 0:
y = 0
case 0 if 0:
y = 1
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_070(self):
x = 0
match x:
case 0 if True:
y = 0
case 0 if True:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_071(self):
x = 0
match x:
case 0 if 1:
y = 0
case 0 if 1:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_072(self):
x = 0
match x:
case 0 if True:
y = 0
case 0 if True:
y = 1
y = 2
self.assertEqual(x, 0)
self.assertEqual(y, 2)
def test_patma_073(self):
x = 0
match x:
case 0 if 0:
y = 0
case 0 if 1:
y = 1
y = 2
self.assertEqual(x, 0)
self.assertEqual(y, 2)
def test_patma_074(self):
x = 0
y = None
match x:
case 0 if not (x := 1):
y = 0
case 1:
y = 1
self.assertEqual(x, 1)
self.assertIs(y, None)
def test_patma_075(self):
x = "x"
match x:
case ["x"]:
y = 0
case "x":
y = 1
self.assertEqual(x, "x")
self.assertEqual(y, 1)
def test_patma_076(self):
x = b"x"
match x:
case [b"x"]:
y = 0
case ["x"]:
y = 1
case [120]:
y = 2
case b"x":
y = 4
self.assertEqual(x, b"x")
self.assertEqual(y, 4)
def test_patma_077(self):
x = bytearray(b"x")
y = None
match x:
case [120]:
y = 0
case 120:
y = 1
self.assertEqual(x, b"x")
self.assertIs(y, None)
def test_patma_078(self):
x = ""
match x:
case []:
y = 0
case [""]:
y = 1
case "":
y = 2
self.assertEqual(x, "")
self.assertEqual(y, 2)
def test_patma_079(self):
x = "xxx"
match x:
case ["x", "x", "x"]:
y = 0
case ["xxx"]:
y = 1
case "xxx":
y = 2
self.assertEqual(x, "xxx")
self.assertEqual(y, 2)
def test_patma_080(self):
x = b"xxx"
match x:
case [120, 120, 120]:
y = 0
case [b"xxx"]:
y = 1
case b"xxx":
y = 2
self.assertEqual(x, b"xxx")
self.assertEqual(y, 2)
def test_patma_081(self):
x = 0
match x:
case 0 if not (x := 1):
y = 0
case (0 as z):
y = 1
self.assertEqual(x, 1)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
def test_patma_082(self):
x = 0
match x:
case (1 as z) if not (x := 1):
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_083(self):
x = 0
match x:
case (0 as z):
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_084(self):
x = 0
y = None
match x:
case (1 as z):
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_085(self):
x = 0
y = None
match x:
case (0 as z) if (w := 0):
y = 0
self.assertEqual(w, 0)
self.assertEqual(x, 0)
self.assertIs(y, None)
self.assertEqual(z, 0)
def test_patma_086(self):
x = 0
match x:
case ((0 as w) as z):
y = 0
self.assertEqual(w, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_087(self):
x = 0
match x:
case (0 | 1) | 2:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_088(self):
x = 1
match x:
case (0 | 1) | 2:
y = 0
self.assertEqual(x, 1)
self.assertEqual(y, 0)
def test_patma_089(self):
x = 2
match x:
case (0 | 1) | 2:
y = 0
self.assertEqual(x, 2)
self.assertEqual(y, 0)
def test_patma_090(self):
x = 3
y = None
match x:
case (0 | 1) | 2:
y = 0
self.assertEqual(x, 3)
self.assertIs(y, None)
def test_patma_091(self):
x = 0
match x:
case 0 | (1 | 2):
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_092(self):
x = 1
match x:
case 0 | (1 | 2):
y = 0
self.assertEqual(x, 1)
self.assertEqual(y, 0)
def test_patma_093(self):
x = 2
match x:
case 0 | (1 | 2):
y = 0
self.assertEqual(x, 2)
self.assertEqual(y, 0)
def test_patma_094(self):
x = 3
y = None
match x:
case 0 | (1 | 2):
y = 0
self.assertEqual(x, 3)
self.assertIs(y, None)
def test_patma_095(self):
x = 0
match x:
case -0:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_096(self):
x = 0
match x:
case -0.0:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_097(self):
x = 0
match x:
case -0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_098(self):
x = 0
match x:
case -0.0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_099(self):
x = -1
match x:
case -1:
y = 0
self.assertEqual(x, -1)
self.assertEqual(y, 0)
def test_patma_100(self):
x = -1.5
match x:
case -1.5:
y = 0
self.assertEqual(x, -1.5)
self.assertEqual(y, 0)
def test_patma_101(self):
x = -1j
match x:
case -1j:
y = 0
self.assertEqual(x, -1j)
self.assertEqual(y, 0)
def test_patma_102(self):
x = -1.5j
match x:
case -1.5j:
y = 0
self.assertEqual(x, -1.5j)
self.assertEqual(y, 0)
def test_patma_103(self):
x = 0
match x:
case 0 + 0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_104(self):
x = 0
match x:
case 0 - 0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_105(self):
x = 0
match x:
case -0 + 0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_106(self):
x = 0
match x:
case -0 - 0j:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_107(self):
x = 0.25 + 1.75j
match x:
case 0.25 + 1.75j:
y = 0
self.assertEqual(x, 0.25 + 1.75j)
self.assertEqual(y, 0)
def test_patma_108(self):
x = 0.25 - 1.75j
match x:
case 0.25 - 1.75j:
y = 0
self.assertEqual(x, 0.25 - 1.75j)
self.assertEqual(y, 0)
def test_patma_109(self):
x = -0.25 + 1.75j
match x:
case -0.25 + 1.75j:
y = 0
self.assertEqual(x, -0.25 + 1.75j)
self.assertEqual(y, 0)
def test_patma_110(self):
x = -0.25 - 1.75j
match x:
case -0.25 - 1.75j:
y = 0
self.assertEqual(x, -0.25 - 1.75j)
self.assertEqual(y, 0)
def test_patma_111(self):
class A:
B = 0
x = 0
match x:
case A.B:
y = 0
self.assertEqual(A.B, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_112(self):
class A:
class B:
C = 0
x = 0
match x:
case A.B.C:
y = 0
self.assertEqual(A.B.C, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_113(self):
class A:
class B:
C = 0
D = 1
x = 1
match x:
case A.B.C:
y = 0
case A.B.D:
y = 1
self.assertEqual(A.B.C, 0)
self.assertEqual(A.B.D, 1)
self.assertEqual(x, 1)
self.assertEqual(y, 1)
def test_patma_114(self):
class A:
class B:
class C:
D = 0
x = 0
match x:
case A.B.C.D:
y = 0
self.assertEqual(A.B.C.D, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_115(self):
class A:
class B:
class C:
D = 0
E = 1
x = 1
match x:
case A.B.C.D:
y = 0
case A.B.C.E:
y = 1
self.assertEqual(A.B.C.D, 0)
self.assertEqual(A.B.C.E, 1)
self.assertEqual(x, 1)
self.assertEqual(y, 1)
def test_patma_116(self):
match = case = 0
match match:
case case:
x = 0
self.assertEqual(match, 0)
self.assertEqual(case, 0)
self.assertEqual(x, 0)
def test_patma_117(self):
match = case = 0
match case:
case match:
x = 0
self.assertEqual(match, 0)
self.assertEqual(case, 0)
self.assertEqual(x, 0)
def test_patma_118(self):
x = []
match x:
case [*_, _]:
y = 0
case []:
y = 1
self.assertEqual(x, [])
self.assertEqual(y, 1)
def test_patma_119(self):
x = collections.defaultdict(int)
match x:
case {0: 0}:
y = 0
case {}:
y = 1
self.assertEqual(x, {})
self.assertEqual(y, 1)
def test_patma_120(self):
x = collections.defaultdict(int)
match x:
case {0: 0}:
y = 0
case {**z}:
y = 1
self.assertEqual(x, {})
self.assertEqual(y, 1)
self.assertEqual(z, {})
def test_patma_121(self):
match ():
case ():
x = 0
self.assertEqual(x, 0)
def test_patma_122(self):
match (0, 1, 2):
case (*x,):
y = 0
self.assertEqual(x, [0, 1, 2])
self.assertEqual(y, 0)
def test_patma_123(self):
match (0, 1, 2):
case 0, *x:
y = 0
self.assertEqual(x, [1, 2])
self.assertEqual(y, 0)
def test_patma_124(self):
match (0, 1, 2):
case (0, 1, *x,):
y = 0
self.assertEqual(x, [2])
self.assertEqual(y, 0)
def test_patma_125(self):
match (0, 1, 2):
case 0, 1, 2, *x:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_126(self):
match (0, 1, 2):
case *x, 2,:
y = 0
self.assertEqual(x, [0, 1])
self.assertEqual(y, 0)
def test_patma_127(self):
match (0, 1, 2):
case (*x, 1, 2):
y = 0
self.assertEqual(x, [0])
self.assertEqual(y, 0)
def test_patma_128(self):
match (0, 1, 2):
case *x, 0, 1, 2,:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_129(self):
match (0, 1, 2):
case (0, *x, 2):
y = 0
self.assertEqual(x, [1])
self.assertEqual(y, 0)
def test_patma_130(self):
match (0, 1, 2):
case 0, 1, *x, 2,:
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_131(self):
match (0, 1, 2):
case (0, *x, 1, 2):
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
def test_patma_132(self):
match (0, 1, 2):
case *x,:
y = 0
self.assertEqual(x, [0, 1, 2])
self.assertEqual(y, 0)
def test_patma_133(self):
x = collections.defaultdict(int, {0: 1})
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 1
case {}:
y = 2
self.assertEqual(x, {0: 1})
self.assertEqual(y, 2)
def test_patma_134(self):
x = collections.defaultdict(int, {0: 1})
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 1
case {**z}:
y = 2
self.assertEqual(x, {0: 1})
self.assertEqual(y, 2)
self.assertEqual(z, {0: 1})
def test_patma_135(self):
x = collections.defaultdict(int, {0: 1})
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 1
case {0: _, **z}:
y = 2
self.assertEqual(x, {0: 1})
self.assertEqual(y, 2)
self.assertEqual(z, {})
def test_patma_136(self):
x = {0: 1}
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 0
case {}:
y = 1
self.assertEqual(x, {0: 1})
self.assertEqual(y, 1)
def test_patma_137(self):
x = {0: 1}
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 0
case {**z}:
y = 1
self.assertEqual(x, {0: 1})
self.assertEqual(y, 1)
self.assertEqual(z, {0: 1})
def test_patma_138(self):
x = {0: 1}
match x:
case {1: 0}:
y = 0
case {0: 0}:
y = 0
case {0: _, **z}:
y = 1
self.assertEqual(x, {0: 1})
self.assertEqual(y, 1)
self.assertEqual(z, {})
def test_patma_139(self):
x = False
match x:
case bool(z):
y = 0
self.assertIs(x, False)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_140(self):
x = True
match x:
case bool(z):
y = 0
self.assertIs(x, True)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_141(self):
x = bytearray()
match x:
case bytearray(z):
y = 0
self.assertEqual(x, bytearray())
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_142(self):
x = b""
match x:
case bytes(z):
y = 0
self.assertEqual(x, b"")
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_143(self):
x = {}
match x:
case dict(z):
y = 0
self.assertEqual(x, {})
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_144(self):
x = 0.0
match x:
case float(z):
y = 0
self.assertEqual(x, 0.0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_145(self):
x = frozenset()
match x:
case frozenset(z):
y = 0
self.assertEqual(x, frozenset())
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_146(self):
x = 0
match x:
case int(z):
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_147(self):
x = []
match x:
case list(z):
y = 0
self.assertEqual(x, [])
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_148(self):
x = set()
match x:
case set(z):
y = 0
self.assertEqual(x, set())
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_149(self):
x = ""
match x:
case str(z):
y = 0
self.assertEqual(x, "")
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_150(self):
x = ()
match x:
case tuple(z):
y = 0
self.assertEqual(x, ())
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_151(self):
x = 0
match x,:
case y,:
z = 0
self.assertEqual(x, 0)
self.assertIs(y, x)
self.assertIs(z, 0)
def test_patma_152(self):
w = 0
x = 0
match w, x:
case y, z:
v = 0
self.assertEqual(w, 0)
self.assertEqual(x, 0)
self.assertIs(y, w)
self.assertIs(z, x)
self.assertEqual(v, 0)
def test_patma_153(self):
x = 0
match w := x,:
case y as v,:
z = 0
self.assertEqual(x, 0)
self.assertIs(y, x)
self.assertEqual(z, 0)
self.assertIs(w, x)
self.assertIs(v, y)
def test_patma_154(self):
x = 0
y = None
match x:
case 0 if x:
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_155(self):
x = 0
y = None
match x:
case 1e1000:
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_156(self):
x = 0
match x:
case z:
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_157(self):
x = 0
y = None
match x:
case _ if x:
y = 0
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_158(self):
x = 0
match x:
case -1e1000:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_159(self):
x = 0
match x:
case 0 if not x:
y = 0
case 1:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_160(self):
x = 0
z = None
match x:
case 0:
y = 0
case z if x:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, None)
def test_patma_161(self):
x = 0
match x:
case 0:
y = 0
case _:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_162(self):
x = 0
match x:
case 1 if x:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_163(self):
x = 0
y = None
match x:
case 1:
y = 0
case 1 if not x:
y = 1
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_164(self):
x = 0
match x:
case 1:
y = 0
case z:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertIs(z, x)
def test_patma_165(self):
x = 0
match x:
case 1 if x:
y = 0
case _:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_166(self):
x = 0
match x:
case z if not z:
y = 0
case 0 if x:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_167(self):
x = 0
match x:
case z if not z:
y = 0
case 1:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_168(self):
x = 0
match x:
case z if not x:
y = 0
case z:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_169(self):
x = 0
match x:
case z if not z:
y = 0
case _ if x:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, x)
def test_patma_170(self):
x = 0
match x:
case _ if not x:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_171(self):
x = 0
y = None
match x:
case _ if x:
y = 0
case 1:
y = 1
self.assertEqual(x, 0)
self.assertIs(y, None)
def test_patma_172(self):
x = 0
z = None
match x:
case _ if not x:
y = 0
case z if not x:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertIs(z, None)
def test_patma_173(self):
x = 0
match x:
case _ if not x:
y = 0
case _:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_174(self):
def http_error(status):
match status:
case 400:
return "Bad request"
case 401:
return "Unauthorized"
case 403:
return "Forbidden"
case 404:
return "Not found"
case 418:
return "I'm a teapot"
case _:
return "Something else"
self.assertEqual(http_error(400), "Bad request")
self.assertEqual(http_error(401), "Unauthorized")
self.assertEqual(http_error(403), "Forbidden")
self.assertEqual(http_error(404), "Not found")
self.assertEqual(http_error(418), "I'm a teapot")
self.assertEqual(http_error(123), "Something else")
self.assertEqual(http_error("400"), "Something else")
self.assertEqual(http_error(401 | 403 | 404), "Something else") # 407
def test_patma_175(self):
def http_error(status):
match status:
case 400:
return "Bad request"
case 401 | 403 | 404:
return "Not allowed"
case 418:
return "I'm a teapot"
self.assertEqual(http_error(400), "Bad request")
self.assertEqual(http_error(401), "Not allowed")
self.assertEqual(http_error(403), "Not allowed")
self.assertEqual(http_error(404), "Not allowed")
self.assertEqual(http_error(418), "I'm a teapot")
self.assertIs(http_error(123), None)
self.assertIs(http_error("400"), None)
self.assertIs(http_error(401 | 403 | 404), None) # 407
@no_perf
def test_patma_176(self):
def whereis(point):
match point:
case (0, 0):
return "Origin"
case (0, y):
return f"Y={y}"
case (x, 0):
return f"X={x}"
case (x, y):
return f"X={x}, Y={y}"
case _:
raise ValueError("Not a point")
self.assertEqual(whereis((0, 0)), "Origin")
self.assertEqual(whereis((0, -1.0)), "Y=-1.0")
self.assertEqual(whereis(("X", 0)), "X=X")
self.assertEqual(whereis((None, 1j)), "X=None, Y=1j")
with self.assertRaises(ValueError):
whereis(42)
def test_patma_177(self):
def whereis(point):
match point:
case Point(0, 0):
return "Origin"
case Point(0, y):
return f"Y={y}"
case Point(x, 0):
return f"X={x}"
case Point():
return "Somewhere else"
case _:
return "Not a point"
self.assertEqual(whereis(Point(1, 0)), "X=1")
self.assertEqual(whereis(Point(0, 0)), "Origin")
self.assertEqual(whereis(10), "Not a point")
self.assertEqual(whereis(Point(False, False)), "Origin")
self.assertEqual(whereis(Point(0, -1.0)), "Y=-1.0")
self.assertEqual(whereis(Point("X", 0)), "X=X")
self.assertEqual(whereis(Point(None, 1j)), "Somewhere else")
self.assertEqual(whereis(Point), "Not a point")
self.assertEqual(whereis(42), "Not a point")
def test_patma_178(self):
def whereis(point):
match point:
case Point(1, var):
return var
self.assertEqual(whereis(Point(1, 0)), 0)
self.assertIs(whereis(Point(0, 0)), None)
def test_patma_179(self):
def whereis(point):
match point:
case Point(1, y=var):
return var
self.assertEqual(whereis(Point(1, 0)), 0)
self.assertIs(whereis(Point(0, 0)), None)
def test_patma_180(self):
def whereis(point):
match point:
case Point(x=1, y=var):
return var
self.assertEqual(whereis(Point(1, 0)), 0)
self.assertIs(whereis(Point(0, 0)), None)
def test_patma_181(self):
def whereis(point):
match point:
case Point(y=var, x=1):
return var
self.assertEqual(whereis(Point(1, 0)), 0)
self.assertIs(whereis(Point(0, 0)), None)
def test_patma_182(self):
def whereis(points):
match points:
case []:
return "No points"
case [Point(0, 0)]:
return "The origin"
case [Point(x, y)]:
return f"Single point {x}, {y}"
case [Point(0, y1), Point(0, y2)]:
return f"Two on the Y axis at {y1}, {y2}"
case _:
return "Something else"
self.assertEqual(whereis([]), "No points")
self.assertEqual(whereis([Point(0, 0)]), "The origin")
self.assertEqual(whereis([Point(0, 1)]), "Single point 0, 1")
self.assertEqual(whereis([Point(0, 0), Point(0, 0)]), "Two on the Y axis at 0, 0")
self.assertEqual(whereis([Point(0, 1), Point(0, 1)]), "Two on the Y axis at 1, 1")
self.assertEqual(whereis([Point(0, 0), Point(1, 0)]), "Something else")
self.assertEqual(whereis([Point(0, 0), Point(0, 0), Point(0, 0)]), "Something else")
self.assertEqual(whereis([Point(0, 1), Point(0, 1), Point(0, 1)]), "Something else")
def test_patma_183(self):
def whereis(point):
match point:
case Point(x, y) if x == y:
return f"Y=X at {x}"
case Point(x, y):
return "Not on the diagonal"
self.assertEqual(whereis(Point(0, 0)), "Y=X at 0")
self.assertEqual(whereis(Point(0, False)), "Y=X at 0")
self.assertEqual(whereis(Point(False, 0)), "Y=X at False")
self.assertEqual(whereis(Point(-1 - 1j, -1 - 1j)), "Y=X at (-1-1j)")
self.assertEqual(whereis(Point("X", "X")), "Y=X at X")
self.assertEqual(whereis(Point("X", "x")), "Not on the diagonal")
def test_patma_184(self):
class Seq(collections.abc.Sequence):
__getitem__ = None
def __len__(self):
return 0
match Seq():
case []:
y = 0
self.assertEqual(y, 0)
def test_patma_185(self):
class Seq(collections.abc.Sequence):
__getitem__ = None
def __len__(self):
return 42
match Seq():
case [*_]:
y = 0
self.assertEqual(y, 0)
def test_patma_186(self):
class Seq(collections.abc.Sequence):
def __getitem__(self, i):
return i
def __len__(self):
return 42
match Seq():
case [x, *_, y]:
z = 0
self.assertEqual(x, 0)
self.assertEqual(y, 41)
self.assertEqual(z, 0)
def test_patma_187(self):
w = range(10)
match w:
case [x, y, *rest]:
z = 0
self.assertEqual(w, range(10))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
self.assertEqual(rest, list(range(2, 10)))
def test_patma_188(self):
w = range(100)
match w:
case (x, y, *rest):
z = 0
self.assertEqual(w, range(100))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
self.assertEqual(rest, list(range(2, 100)))
def test_patma_189(self):
w = range(1000)
match w:
case x, y, *rest:
z = 0
self.assertEqual(w, range(1000))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
self.assertEqual(rest, list(range(2, 1000)))
def test_patma_190(self):
w = range(1 << 10)
match w:
case [x, y, *_]:
z = 0
self.assertEqual(w, range(1 << 10))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
def test_patma_191(self):
w = range(1 << 20)
match w:
case (x, y, *_):
z = 0
self.assertEqual(w, range(1 << 20))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
def test_patma_192(self):
w = range(1 << 30)
match w:
case x, y, *_:
z = 0
self.assertEqual(w, range(1 << 30))
self.assertEqual(x, 0)
self.assertEqual(y, 1)
self.assertEqual(z, 0)
def test_patma_193(self):
x = {"bandwidth": 0, "latency": 1}
match x:
case {"bandwidth": b, "latency": l}:
y = 0
self.assertEqual(x, {"bandwidth": 0, "latency": 1})
self.assertIs(b, x["bandwidth"])
self.assertIs(l, x["latency"])
self.assertEqual(y, 0)
def test_patma_194(self):
x = {"bandwidth": 0, "latency": 1, "key": "value"}
match x:
case {"latency": l, "bandwidth": b}:
y = 0
self.assertEqual(x, {"bandwidth": 0, "latency": 1, "key": "value"})
self.assertIs(l, x["latency"])
self.assertIs(b, x["bandwidth"])
self.assertEqual(y, 0)
def test_patma_195(self):
x = {"bandwidth": 0, "latency": 1, "key": "value"}
match x:
case {"bandwidth": b, "latency": l, **rest}:
y = 0
self.assertEqual(x, {"bandwidth": 0, "latency": 1, "key": "value"})
self.assertIs(b, x["bandwidth"])
self.assertIs(l, x["latency"])
self.assertEqual(rest, {"key": "value"})
self.assertEqual(y, 0)
def test_patma_196(self):
x = {"bandwidth": 0, "latency": 1}
match x:
case {"latency": l, "bandwidth": b, **rest}:
y = 0
self.assertEqual(x, {"bandwidth": 0, "latency": 1})
self.assertIs(l, x["latency"])
self.assertIs(b, x["bandwidth"])
self.assertEqual(rest, {})
self.assertEqual(y, 0)
def test_patma_197(self):
w = [Point(-1, 0), Point(1, 2)]
match w:
case (Point(x1, y1), Point(x2, y2) as p2):
z = 0
self.assertEqual(w, [Point(-1, 0), Point(1, 2)])
self.assertIs(x1, w[0].x)
self.assertIs(y1, w[0].y)
self.assertIs(p2, w[1])
self.assertIs(x2, w[1].x)
self.assertIs(y2, w[1].y)
self.assertIs(z, 0)
def test_patma_198(self):
class Color(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(color):
match color:
case Color.RED:
return "I see red!"
case Color.GREEN:
return "Grass is green"
case Color.BLUE:
return "I'm feeling the blues :("
self.assertEqual(f(Color.RED), "I see red!")
self.assertEqual(f(Color.GREEN), "Grass is green")
self.assertEqual(f(Color.BLUE), "I'm feeling the blues :(")
self.assertIs(f(Color), None)
self.assertIs(f(0), None)
self.assertIs(f(1), None)
self.assertIs(f(2), None)
self.assertIs(f(3), None)
self.assertIs(f(False), None)
self.assertIs(f(True), None)
self.assertIs(f(2+0j), None)
self.assertIs(f(3.0), None)
def test_patma_199(self):
class Color(int, enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(color):
match color:
case Color.RED:
return "I see red!"
case Color.GREEN:
return "Grass is green"
case Color.BLUE:
return "I'm feeling the blues :("
self.assertEqual(f(Color.RED), "I see red!")
self.assertEqual(f(Color.GREEN), "Grass is green")
self.assertEqual(f(Color.BLUE), "I'm feeling the blues :(")
self.assertIs(f(Color), None)
self.assertEqual(f(0), "I see red!")
self.assertEqual(f(1), "Grass is green")
self.assertEqual(f(2), "I'm feeling the blues :(")
self.assertIs(f(3), None)
self.assertEqual(f(False), "I see red!")
self.assertEqual(f(True), "Grass is green")
self.assertEqual(f(2+0j), "I'm feeling the blues :(")
self.assertIs(f(3.0), None)
def test_patma_200(self):
class Class:
__match_args__ = ("a", "b")
c = Class()
c.a = 0
c.b = 1
match c:
case Class(x, y):
z = 0
self.assertIs(x, c.a)
self.assertIs(y, c.b)
self.assertEqual(z, 0)
def test_patma_201(self):
class Class:
__match_args__ = ("a", "b")
c = Class()
c.a = 0
c.b = 1
match c:
case Class(x, b=y):
z = 0
self.assertIs(x, c.a)
self.assertIs(y, c.b)
self.assertEqual(z, 0)
def test_patma_202(self):
class Parent:
__match_args__ = "a", "b"
class Child(Parent):
__match_args__ = ("c", "d")
c = Child()
c.a = 0
c.b = 1
match c:
case Parent(x, y):
z = 0
self.assertIs(x, c.a)
self.assertIs(y, c.b)
self.assertEqual(z, 0)
def test_patma_203(self):
class Parent:
__match_args__ = ("a", "b")
class Child(Parent):
__match_args__ = "c", "d"
c = Child()
c.a = 0
c.b = 1
match c:
case Parent(x, b=y):
z = 0
self.assertIs(x, c.a)
self.assertIs(y, c.b)
self.assertEqual(z, 0)
def test_patma_204(self):
def f(w):
match w:
case 42:
out = locals()
del out["w"]
return out
self.assertEqual(f(42), {})
self.assertIs(f(0), None)
self.assertEqual(f(42.0), {})
self.assertIs(f("42"), None)
def test_patma_205(self):
def f(w):
match w:
case 42.0:
out = locals()
del out["w"]
return out
self.assertEqual(f(42.0), {})
self.assertEqual(f(42), {})
self.assertIs(f(0.0), None)
self.assertIs(f(0), None)
def test_patma_206(self):
def f(w):
match w:
case 1 | 2 | 3:
out = locals()
del out["w"]
return out
self.assertEqual(f(1), {})
self.assertEqual(f(2), {})
self.assertEqual(f(3), {})
self.assertEqual(f(3.0), {})
self.assertIs(f(0), None)
self.assertIs(f(4), None)
self.assertIs(f("1"), None)
def test_patma_207(self):
def f(w):
match w:
case [1, 2] | [3, 4]:
out = locals()
del out["w"]
return out
self.assertEqual(f([1, 2]), {})
self.assertEqual(f([3, 4]), {})
self.assertIs(f(42), None)
self.assertIs(f([2, 3]), None)
self.assertIs(f([1, 2, 3]), None)
self.assertEqual(f([1, 2.0]), {})
def test_patma_208(self):
def f(w):
match w:
case x:
out = locals()
del out["w"]
return out
self.assertEqual(f(42), {"x": 42})
self.assertEqual(f((1, 2)), {"x": (1, 2)})
self.assertEqual(f(None), {"x": None})
def test_patma_209(self):
def f(w):
match w:
case _:
out = locals()
del out["w"]
return out
self.assertEqual(f(42), {})
self.assertEqual(f(None), {})
self.assertEqual(f((1, 2)), {})
def test_patma_210(self):
def f(w):
match w:
case (x, y, z):
out = locals()
del out["w"]
return out
self.assertEqual(f((1, 2, 3)), {"x": 1, "y": 2, "z": 3})
self.assertIs(f((1, 2)), None)
self.assertIs(f((1, 2, 3, 4)), None)
self.assertIs(f(123), None)
self.assertIs(f("abc"), None)
self.assertIs(f(b"abc"), None)
self.assertEqual(f(array.array("b", b"abc")), {'x': 97, 'y': 98, 'z': 99})
self.assertEqual(f(memoryview(b"abc")), {"x": 97, "y": 98, "z": 99})
self.assertIs(f(bytearray(b"abc")), None)
def test_patma_211(self):
def f(w):
match w:
case {"x": x, "y": "y", "z": z}:
out = locals()
del out["w"]
return out
self.assertEqual(f({"x": "x", "y": "y", "z": "z"}), {"x": "x", "z": "z"})
self.assertEqual(f({"x": "x", "y": "y", "z": "z", "a": "a"}), {"x": "x", "z": "z"})
self.assertIs(f(({"x": "x", "y": "yy", "z": "z", "a": "a"})), None)
self.assertIs(f(({"x": "x", "y": "y"})), None)
def test_patma_212(self):
def f(w):
match w:
case MyClass(int(xx), y="hello"):
out = locals()
del out["w"]
return out
self.assertEqual(f(MyClass(42, "hello")), {"xx": 42})
def test_patma_213(self):
def f(w):
match w:
case (p, q) as x:
out = locals()
del out["w"]
return out
self.assertEqual(f((1, 2)), {"p": 1, "q": 2, "x": (1, 2)})
self.assertEqual(f([1, 2]), {"p": 1, "q": 2, "x": [1, 2]})
self.assertIs(f(12), None)
self.assertIs(f((1, 2, 3)), None)
def test_patma_214(self):
def f():
match 42:
case 42:
return locals()
self.assertEqual(set(f()), set())
def test_patma_215(self):
def f():
match 1:
case 1 | 2 | 3:
return locals()
self.assertEqual(set(f()), set())
def test_patma_216(self):
def f():
match ...:
case _:
return locals()
self.assertEqual(set(f()), set())
def test_patma_217(self):
def f():
match ...:
case abc:
return locals()
self.assertEqual(set(f()), {"abc"})
@no_perf
def test_patma_218(self):
self.assert_syntax_error("""
match ...:
case "a" | a:
pass
""")
@no_perf
def test_patma_219(self):
self.assert_syntax_error("""
match ...:
case a | "a":
pass
""")
def test_patma_220(self):
def f():
match ..., ...:
case a, b:
return locals()
self.assertEqual(set(f()), {"a", "b"})
@no_perf
def test_patma_221(self):
self.assert_syntax_error("""
match ...:
case a, a:
pass
""")
def test_patma_222(self):
def f():
match {"k": ..., "l": ...}:
case {"k": a, "l": b}:
return locals()
self.assertEqual(set(f()), {"a", "b"})
@no_perf
def test_patma_223(self):
self.assert_syntax_error("""
match ...:
case {"k": a, "l": a}:
pass
""")
def test_patma_224(self):
def f():
match MyClass(..., ...):
case MyClass(x, y=y):
return locals()
self.assertEqual(set(f()), {"x", "y"})
@no_perf
def test_patma_225(self):
self.assert_syntax_error("""
match ...:
case MyClass(x, x):
pass
""")
@no_perf
def test_patma_226(self):
self.assert_syntax_error("""
match ...:
case MyClass(x=x, y=x):
pass
""")
@no_perf
def test_patma_227(self):
self.assert_syntax_error("""
match ...:
case MyClass(x, y=x):
pass
""")
def test_patma_228(self):
def f():
match ...:
case b as a:
return locals()
self.assertEqual(set(f()), {"a", "b"})
@no_perf
def test_patma_229(self):
self.assert_syntax_error("""
match ...:
case a as a:
pass
""")
def test_patma_230(self):
def f(x):
match x:
case _:
return 0
self.assertEqual(f(0), 0)
self.assertEqual(f(1), 0)
self.assertEqual(f(2), 0)
self.assertEqual(f(3), 0)
def test_patma_231(self):
def f(x):
match x:
case 0:
return 0
self.assertEqual(f(0), 0)
self.assertIs(f(1), None)
self.assertIs(f(2), None)
self.assertIs(f(3), None)
def test_patma_232(self):
def f(x):
match x:
case 0:
return 0
case _:
return 1
self.assertEqual(f(0), 0)
self.assertEqual(f(1), 1)
self.assertEqual(f(2), 1)
self.assertEqual(f(3), 1)
def test_patma_233(self):
def f(x):
match x:
case 0:
return 0
case 1:
return 1
self.assertEqual(f(0), 0)
self.assertEqual(f(1), 1)
self.assertIs(f(2), None)
self.assertIs(f(3), None)
def test_patma_234(self):
def f(x):
match x:
case 0:
return 0
case 1:
return 1
case _:
return 2
self.assertEqual(f(0), 0)
self.assertEqual(f(1), 1)
self.assertEqual(f(2), 2)
self.assertEqual(f(3), 2)
def test_patma_235(self):
def f(x):
match x:
case 0:
return 0
case 1:
return 1
case 2:
return 2
self.assertEqual(f(0), 0)
self.assertEqual(f(1), 1)
self.assertEqual(f(2), 2)
self.assertIs(f(3), None)
@no_perf
def test_patma_236(self):
self.assert_syntax_error("""
match ...:
case {**rest, "key": value}:
pass
""")
@no_perf
def test_patma_237(self):
self.assert_syntax_error("""
match ...:
case {"first": first, **rest, "last": last}:
pass
""")
@no_perf
def test_patma_238(self):
self.assert_syntax_error("""
match ...:
case *a, b, *c, d, *e:
pass
""")
@no_perf
def test_patma_239(self):
self.assert_syntax_error("""
match ...:
case a, *b, c, *d, e:
pass
""")
@no_perf
def test_patma_240(self):
self.assert_syntax_error("""
match ...:
case 0+0:
pass
""")
@no_perf
def test_patma_241(self):
self.assert_syntax_error("""
match ...:
case f"":
pass
""")
@no_perf
def test_patma_242(self):
self.assert_syntax_error("""
match ...:
case f"{x}":
pass
""")
@no_perf
def test_patma_243(self):
self.assert_syntax_error("""
match 42:
case x:
pass
case y:
pass
""")
@no_perf
def test_patma_244(self):
self.assert_syntax_error("""
match ...:
case {**_}:
pass
""")
@no_perf
def test_patma_245(self):
self.assert_syntax_error("""
match ...:
case 42 as _:
pass
""")
@no_perf
def test_patma_246(self):
class Class:
__match_args__ = None
x = Class()
y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y):
z = 0
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_247(self):
class Class:
__match_args__ = "XYZ"
x = Class()
y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y):
z = 0
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_248(self):
class Class:
__match_args__ = (None,)
x = Class()
y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y):
z = 0
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_249(self):
class Class:
__match_args__ = ()
x = Class()
y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y):
z = 0
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_250(self):
self.assert_syntax_error("""
match ...:
case Class(a=_, a=_):
pass
""")
@no_perf
def test_patma_251(self):
x = {"a": 0, "b": 1}
w = y = z = None
with self.assertRaises(ValueError):
match x:
case {"a": y, "a": z}:
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_252(self):
class Keys:
KEY = "a"
x = {"a": 0, "b": 1}
w = y = z = None
with self.assertRaises(ValueError):
match x:
case {Keys.KEY: y, "a": z}:
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_253(self):
class Class:
__match_args__ = ("a", "a")
a = None
x = Class()
w = y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y, z):
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_254(self):
class Class:
__match_args__ = ("a",)
a = None
x = Class()
w = y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y, a=z):
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
def test_patma_255(self):
match():
case():
x = 0
self.assertEqual(x, 0)
def test_patma_256(self):
x = 0
match(x):
case(x):
y = 0
self.assertEqual(x, 0)
self.assertEqual(y, 0)
def test_patma_257(self):
x = 0
match x:
case False:
y = 0
case 0:
y = 1
self.assertEqual(x, 0)
self.assertEqual(y, 1)
def test_patma_258(self):
x = 1
match x:
case True:
y = 0
case 1:
y = 1
self.assertEqual(x, 1)
self.assertEqual(y, 1)
def test_patma_259(self):
class Eq:
def __eq__(self, other):
return True
x = eq = Eq()
y = None
match x:
case None:
y = 0
self.assertIs(x, eq)
self.assertEqual(y, None)
def test_patma_260(self):
x = False
match x:
case False:
y = 0
self.assertIs(x, False)
self.assertEqual(y, 0)
def test_patma_261(self):
x = True
match x:
case True:
y = 0
self.assertIs(x, True)
self.assertEqual(y, 0)
def test_patma_262(self):
x = None
match x:
case None:
y = 0
self.assertIs(x, None)
self.assertEqual(y, 0)
def test_patma_263(self):
x = 0
match x:
case (0 as w) as z:
y = 0
self.assertEqual(w, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_264(self):
x = 0
match x:
case (0 as w) as z:
y = 0
self.assertEqual(w, 0)
self.assertEqual(x, 0)
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_265(self):
x = ((0, 1), (2, 3))
match x:
case ((a as b, c as d) as e) as w, ((f as g, h) as i) as z:
y = 0
self.assertEqual(a, 0)
self.assertEqual(b, 0)
self.assertEqual(c, 1)
self.assertEqual(d, 1)
self.assertEqual(e, (0, 1))
self.assertEqual(f, 2)
self.assertEqual(g, 2)
self.assertEqual(h, 3)
self.assertEqual(i, (2, 3))
self.assertEqual(w, (0, 1))
self.assertEqual(x, ((0, 1), (2, 3)))
self.assertEqual(y, 0)
self.assertEqual(z, (2, 3))
@no_perf
def test_patma_266(self):
self.assert_syntax_error("""
match ...:
case _ | _:
pass
""")
@no_perf
def test_patma_267(self):
self.assert_syntax_error("""
match ...:
case (_ as x) | [x]:
pass
""")
@no_perf
def test_patma_268(self):
self.assert_syntax_error("""
match ...:
case _ | _ if condition():
pass
""")
@no_perf
def test_patma_269(self):
self.assert_syntax_error("""
match ...:
case x | [_ as x] if x:
pass
""")
@no_perf
def test_patma_270(self):
self.assert_syntax_error("""
match ...:
case _:
pass
case None:
pass
""")
@no_perf
def test_patma_271(self):
self.assert_syntax_error("""
match ...:
case x:
pass
case [x] if x:
pass
""")
@no_perf
def test_patma_272(self):
self.assert_syntax_error("""
match ...:
case x:
pass
case _:
pass
""")
@no_perf
def test_patma_273(self):
self.assert_syntax_error("""
match ...:
case (None | _) | _:
pass
""")
@no_perf
def test_patma_274(self):
self.assert_syntax_error("""
match ...:
case _ | (True | False):
pass
""")
def test_patma_275(self):
x = collections.UserDict({0: 1, 2: 3})
match x:
case {2: 3}:
y = 0
self.assertEqual(x, {0: 1, 2: 3})
self.assertEqual(y, 0)
def test_patma_276(self):
x = collections.UserDict({0: 1, 2: 3})
match x:
case {2: 3, **z}:
y = 0
self.assertEqual(x, {0: 1, 2: 3})
self.assertEqual(y, 0)
self.assertEqual(z, {0: 1})
def test_patma_277(self):
x = [[{0: 0}]]
match x:
case list([({-0-0j: int(real=0+0j, imag=0-0j) | (1) as z},)]):
y = 0
self.assertEqual(x, [[{0: 0}]])
self.assertEqual(y, 0)
self.assertEqual(z, 0)
def test_patma_278(self):
x = range(3)
match x:
case [y, *_, z]:
w = 0
self.assertEqual(w, 0)
self.assertEqual(x, range(3))
self.assertEqual(y, 0)
self.assertEqual(z, 2)
def test_patma_279(self):
x = range(3)
match x:
case [_, *_, y]:
z = 0
self.assertEqual(x, range(3))
self.assertEqual(y, 2)
self.assertEqual(z, 0)
def test_patma_280(self):
x = range(3)
match x:
case [*_, y]:
z = 0
self.assertEqual(x, range(3))
self.assertEqual(y, 2)
self.assertEqual(z, 0)
@no_perf
def test_patma_281(self):
x = range(10)
y = None
with self.assertRaises(TypeError):
match x:
case range(10):
y = 0
self.assertEqual(x, range(10))
self.assertIs(y, None)
@no_perf
def test_patma_282(self):
class Class:
__match_args__ = ["spam", "eggs"]
spam = 0
eggs = 1
x = Class()
w = y = z = None
with self.assertRaises(TypeError):
match x:
case Class(y, z):
w = 0
self.assertIs(w, None)
self.assertIs(y, None)
self.assertIs(z, None)
@no_perf
def test_patma_283(self):
self.assert_syntax_error("""
match ...:
case {0+0: _}:
pass
""")
@no_perf
def test_patma_284(self):
self.assert_syntax_error("""
match ...:
case {f"": _}:
pass
""")
@no_perf
def test_patma_285(self):
self.assert_syntax_error("""
match ...:
case 0j+0:
pass
""")
@no_perf
def test_patma_286(self):
self.assert_syntax_error("""
match ...:
case 0j+0j:
pass
""")
@no_perf
def test_patma_287(self):
self.assert_syntax_error("""
match ...:
case {0j+0: _}:
pass
""")
@no_perf
def test_patma_288(self):
self.assert_syntax_error("""
match ...:
case {0j+0j: _}:
pass
""")
def test_patma_289(self):
x = {"y": 1}
match x:
case {"y": (0 as y) | (1 as y)}:
z = 0
self.assertEqual(x, {"y": 1})
self.assertEqual(y, 1)
self.assertEqual(z, 0)
@no_perf
def test_patma_290(self):
self.assert_syntax_error("""
match ...:
case [a, [b] | [c] | [d]]:
pass
""")
@no_perf
def test_patma_291(self):
# Hunting for leaks using -R doesn't catch leaks in the compiler itself,
# just the code under test. This test ensures that if there are leaks in
# the pattern compiler, those runs will fail:
with open(__file__) as file:
compile(file.read(), __file__, "exec")
def test_patma_292(self):
def f(x):
match x:
case ((a, b, c, d, e, f, g, h, i, 9) |
(h, g, i, a, b, d, e, c, f, 10) |
(g, b, a, c, d, -5, e, h, i, f) |
(-1, d, f, b, g, e, i, a, h, c)):
w = 0
out = locals()
del out["x"]
return out
alts = [
dict(a=0, b=1, c=2, d=3, e=4, f=5, g=6, h=7, i=8, w=0),
dict(h=1, g=2, i=3, a=4, b=5, d=6, e=7, c=8, f=9, w=0),
dict(g=0, b=-1, a=-2, c=-3, d=-4, e=-6, h=-7, i=-8, f=-9, w=0),
dict(d=-2, f=-3, b=-4, g=-5, e=-6, i=-7, a=-8, h=-9, c=-10, w=0),
dict(),
]
self.assertEqual(f(range(10)), alts[0])
self.assertEqual(f(range(1, 11)), alts[1])
self.assertEqual(f(range(0, -10, -1)), alts[2])
self.assertEqual(f(range(-1, -11, -1)), alts[3])
self.assertEqual(f(range(10, 20)), alts[4])
def test_patma_293(self):
def f(x):
match x:
case [y, (a, b, c, d, e, f, g, h, i, 9) |
(h, g, i, a, b, d, e, c, f, 10) |
(g, b, a, c, d, -5, e, h, i, f) |
(-1, d, f, b, g, e, i, a, h, c), z]:
w = 0
out = locals()
del out["x"]
return out
alts = [
dict(a=0, b=1, c=2, d=3, e=4, f=5, g=6, h=7, i=8, w=0, y=False, z=True),
dict(h=1, g=2, i=3, a=4, b=5, d=6, e=7, c=8, f=9, w=0, y=False, z=True),
dict(g=0, b=-1, a=-2, c=-3, d=-4, e=-6, h=-7, i=-8, f=-9, w=0, y=False, z=True),
dict(d=-2, f=-3, b=-4, g=-5, e=-6, i=-7, a=-8, h=-9, c=-10, w=0, y=False, z=True),
dict(),
]
self.assertEqual(f((False, range(10), True)), alts[0])
self.assertEqual(f((False, range(1, 11), True)), alts[1])
self.assertEqual(f((False, range(0, -10, -1), True)), alts[2])
self.assertEqual(f((False, range(-1, -11, -1), True)), alts[3])
self.assertEqual(f((False, range(10, 20), True)), alts[4])
class TestInheritance(unittest.TestCase):
def test_multiple_inheritance(self):
class C:
pass
class S1(collections.UserList, collections.abc.Mapping):
pass
class S2(C, collections.UserList, collections.abc.Mapping):
pass
class S3(list, C, collections.abc.Mapping):
pass
class S4(collections.UserList, dict, C):
pass
class M1(collections.UserDict, collections.abc.Sequence):
pass
class M2(C, collections.UserDict, collections.abc.Sequence):
pass
class M3(collections.UserDict, C, list):
pass
class M4(dict, collections.abc.Sequence, C):
pass
def f(x):
match x:
case []:
return "seq"
case {}:
return "map"
def g(x):
match x:
case {}:
return "map"
case []:
return "seq"
for Seq in (S1, S2, S3, S4):
self.assertEqual(f(Seq()), "seq")
self.assertEqual(g(Seq()), "seq")
for Map in (M1, M2, M3, M4):
self.assertEqual(f(Map()), "map")
self.assertEqual(g(Map()), "map")
class PerfPatma(TestPatma):
def assertEqual(*_, **__):
pass
def assertIs(*_, **__):
pass
def assertRaises(*_, **__):
assert False, "this test should be decorated with @no_perf!"
def assertWarns(*_, **__):
assert False, "this test should be decorated with @no_perf!"
def run_perf(self):
attrs = vars(TestPatma).items()
tests = [
attr for name, attr in attrs
if name.startswith("test_") and not hasattr(attr, "no_perf")
]
for _ in range(1 << 8):
for test in tests:
test(self)
@staticmethod
def setUpClass():
raise unittest.SkipTest("performance testing")
"""
# From inside venv pointing to this Python, with pyperf installed:
sudo $(which python) -m pyperf system tune && \
$(which python) -m pyperf timeit --rigorous --setup "from test.test_patma import PerfPatma; p = PerfPatma()" "p.run_perf()"; \
sudo $(which python) -m pyperf system reset
"""
| 25.454872
| 131
| 0.425573
|
3c795cfd280db64cfac223774bbb98411012e7ea
| 2,499
|
py
|
Python
|
metadata_etl.py
|
DanielKnott95/TrueFilm
|
c859207bdd07ba0700d6326d6b83c568e6649724
|
[
"MIT"
] | null | null | null |
metadata_etl.py
|
DanielKnott95/TrueFilm
|
c859207bdd07ba0700d6326d6b83c568e6649724
|
[
"MIT"
] | null | null | null |
metadata_etl.py
|
DanielKnott95/TrueFilm
|
c859207bdd07ba0700d6326d6b83c568e6649724
|
[
"MIT"
] | null | null | null |
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType
from SPARQLWrapper import SPARQLWrapper, JSON
from pyspark.sql.functions import udf, desc
from utils import setup_logger
logger = setup_logger(__name__)
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
spark = SparkSession.builder.appName("TrueFilm ETL").getOrCreate()
class MovieMetaDataETL:
def __init__(self, metadata_path: str):
self.metadata_path = metadata_path
def run_movie_etl(self):
movie_metadata = self.load_spark_dataframe()
movie_metadata = self.calculate_rev_to_budget(movie_metadata, top_n=1000)
movie_metadata = self.run_get_wiki_url(movie_metadata)
return movie_metadata
def load_spark_dataframe(self):
logger.info(f"Loading Spark DataFrame from {self.metadata_path}")
movie_metadata = spark.read.option("header",True).format("csv").load(self.metadata_path)
movie_metadata = movie_metadata.filter((movie_metadata.adult == 'False') | (movie_metadata.adult == 'True'))
movie_metadata = movie_metadata.where("budget>0")
return movie_metadata
def calculate_rev_to_budget(self, dataframe, top_n = 1000):
dataframe = dataframe.withColumn("rev_ratio", (F.col("revenue") / F.col("budget")))
dataframe = dataframe.sort(desc("rev_ratio"))
if top_n:
dataframe = dataframe.limit(1000)
return dataframe
def run_get_wiki_url(self, dataframe):
def get_wiki_url(imdbID):
if not imdbID:
return ""
queryString = """
SELECT ?wppage WHERE {
?subject wdt:P345 'IMDB-ID' .
?wppage schema:about ?subject .
FILTER(contains(str(?wppage),'//en.wikipedia'))
}
"""
queryString = queryString.replace("IMDB-ID",str(imdbID))
sparql.setQuery(queryString)
sparql.setReturnFormat(JSON)
try:
results = sparql.query().convert()
for result in results["results"]["bindings"]:
wppage = result["wppage"]["value"]
return wppage
except Exception:
return ""
udf_get_wiki_url = udf(get_wiki_url, StringType())
dataframe_wiki_url = dataframe.withColumn("wiki_url", udf_get_wiki_url("imdb_id"))
return dataframe_wiki_url
| 37.298507
| 116
| 0.638255
|
4082f5868acb3240bd0b7da12ab9fc72defefe09
| 1,141
|
py
|
Python
|
setup.py
|
semicolom/django-tools-seo
|
9b16731949480fc25ad010811e528d22bf335bfb
|
[
"MIT"
] | 2
|
2018-11-07T18:21:16.000Z
|
2021-07-09T21:57:41.000Z
|
setup.py
|
QPC-database/django-tools-seo
|
9b16731949480fc25ad010811e528d22bf335bfb
|
[
"MIT"
] | 2
|
2018-11-12T14:33:31.000Z
|
2019-03-06T17:16:13.000Z
|
setup.py
|
QPC-database/django-tools-seo
|
9b16731949480fc25ad010811e528d22bf335bfb
|
[
"MIT"
] | 1
|
2021-07-09T21:57:43.000Z
|
2021-07-09T21:57:43.000Z
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-tools-seo',
version='1.0.6',
packages=find_packages(exclude=("tests",)),
include_package_data=True,
description='Simple Django app to manage project SEO like Google Analytics',
long_description=README,
long_description_content_type="text/markdown",
author='Toni Colom',
author_email='toni@semicolom.com',
url='https://github.com/semicolom/django-tools-seo',
install_requires=[],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 33.558824
| 80
| 0.657318
|
a757e98fd54ab13728b028eb093e76ae81ea343d
| 822
|
py
|
Python
|
survey/urls.py
|
suger-luck/health
|
1e02525d52508184faf1119f553d57e3b757d960
|
[
"MIT"
] | null | null | null |
survey/urls.py
|
suger-luck/health
|
1e02525d52508184faf1119f553d57e3b757d960
|
[
"MIT"
] | null | null | null |
survey/urls.py
|
suger-luck/health
|
1e02525d52508184faf1119f553d57e3b757d960
|
[
"MIT"
] | null | null | null |
from django.urls import path
from survey import views
urlpatterns = [
path('index/', views.index, name='index'), # 首页
path('login/', views.login,name='login'), # 登录页面
path('login_user/', views.login_user, name='login_user'), # 登录用户名校验
path('login_check/', views.login_check, name='login_check'), # 登录校验
path('registered/', views.registered, name='registered'), # 注册页面
path('registered_check/', views.registered_check, name='registered_check'), # 注册验证页面
path('check_id/', views.check_id, name='check_id'), # 验证注册是否已经注册
path('modify/', views.modify, name='modify'), # 修改信息
path('delete/', views.delete, name='delete'), # 退出登录,清除session
path('question/', views.question, name='question'), # 调查问卷
path('question_sumbit/', views.question_submit, name='question_sumbit'), #提交调查问卷
]
| 48.352941
| 88
| 0.684915
|
fac772042e5606fb8e030e24e77872332a250b6a
| 7,841
|
py
|
Python
|
setup.py
|
nataliaratnikova/rucio
|
4c7789bb9cdb824cc90b91fa0b66acf1b0fac128
|
[
"Apache-2.0"
] | 1
|
2018-02-27T08:03:41.000Z
|
2018-02-27T08:03:41.000Z
|
setup.py
|
greed2411/rucio
|
3e9a50ce1c3b062a0d048e7aa125ca8ce36fe78e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
greed2411/rucio
|
3e9a50ce1c3b062a0d048e7aa125ca8ce36fe78e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2014
# - Vincent Garonne <vgaronne@gmail.com>, 2012-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2012-2017
# - Wen Guan <wguan.icedew@gmail.com>, 2014
# - Thomas Beermann <thomas.beermann@cern.ch>, 2014
'''
Setup.py which moves kerberos modules in extra dependencies to be buildthedocs compliant.
'''
import glob
import os
import re
import subprocess
import sys
from distutils.command.sdist import sdist as _sdist
if sys.version_info < (2, 4):
print('ERROR: Rucio requires at least Python 2.5 to run.')
sys.exit(1)
sys.path.insert(0, os.path.abspath('lib/'))
from rucio import version # noqa
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
name = 'rucio'
packages = find_packages('lib/')
description = "Rucio Package"
IsRelease = False
requirements_files = ['tools/pip-requires', 'tools/pip-requires-client']
data_files = [('rucio/etc/', glob.glob('etc/*.template')),
('rucio/etc/web', glob.glob('etc/web/*.template')),
('rucio/etc/schemas', glob.glob('etc/schemas/*.json')),
('rucio/tools/', ['tools/pip-requires', 'tools/pip-requires-client', 'tools/pip-requires-test',
'tools/bootstrap.py', 'tools/reset_database.py']),
('rucio/tools/probes/common/', ['tools/probes/common/graphite2nagios', ]),
('rucio/tools/probes/common/', glob.glob('tools/probes/common/check*')),
('rucio/etc/mail_templates/', glob.glob('etc/mail_templates/*.tmpl'))]
scripts = glob.glob('bin/rucio*')
copy_args = sys.argv[1:]
if '--release' in copy_args:
IsRelease = True
copy_args.remove('--release')
# Flags to know if the installation is done through pip and against git
using_pip = os.path.basename(os.path.dirname(__file__)).startswith('pip-')
using_git = os.path.isdir('.git')
def run_git_command(cmd):
"""
Run a git command in path and return output"
:param cmd: the git command.
:return: Output of the git command.
"""
output = subprocess.Popen(["/bin/sh", "-c", cmd], stdout=subprocess.PIPE)
return output.communicate()[0].strip()
if using_pip and using_git:
git_version_cmd = '''git describe --dirty=-dev`date +%s`'''
pkg_version = run_git_command(git_version_cmd)
branch_nick_cmd = 'git branch | grep -Ei "\* (.*)" | cut -f2 -d" "'
branch_nick = run_git_command(branch_nick_cmd)
revid_cmd = "git rev-parse HEAD"
revid = run_git_command(revid_cmd)
revno_cmd = "git --no-pager log --oneline | wc -l"
revno = run_git_command(revno_cmd)
version_file = open("lib/rucio/vcsversion.py", 'w')
version_file.write("""'''\n"""
"""This file is automatically generated by setup.py, So don't edit it. :)\n"""
"""'''\n"""
"""VERSION_INFO = {\n"""
""" 'final': False,\n"""
""" 'version': '%s',\n"""
""" 'branch_nick': '%s',\n"""
""" 'revision_id': '%s',\n"""
""" 'revno': %s\n"""
"""}""" % (pkg_version, branch_nick, revid, revno))
version_file.close()
else:
pkg_version = version.version_string()
cmdclass = {}
try:
from sphinx.setup_command import BuildDoc
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html']: # 'man','latex'
self.builder = builder
self.finalize_options()
BuildDoc.run(self)
cmdclass['build_sphinx'] = local_BuildDoc
except:
pass
def get_reqs_from_file(requirements_file):
if os.path.exists(requirements_file):
return open(requirements_file, 'r').read().split('\n')
return []
def parse_requirements(requirements_files):
requirements = []
for requirements_file in requirements_files:
for line in get_reqs_from_file(requirements_file):
if 'kerberos' in line:
pass
elif re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files):
dependency_links = []
for requirements_file in requirements_files:
for line in get_reqs_from_file(requirements_file):
if re.match(r'(\s*#)|(\s*$)', line):
continue
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
return dependency_links
def write_requirements():
venv = os.environ.get('VIRTUAL_ENV', None)
if venv is not None:
req_file = open("requirements.txt", "w")
output = subprocess.Popen(["pip", "freeze", "-l"], stdout=subprocess.PIPE)
requirements = output.communicate()[0].strip()
req_file.write(requirements)
req_file.close()
oracle_extras = ['cx_oracle>=5.1']
postgresql_extras = ['psycopg2>=2.4.2']
mysql_extras = ['PyMySQL']
kerberos_extras = ['kerberos>=1.2.5', 'pykerberos>=1.1.14', 'requests-kerberos>=0.11.0']
requires = parse_requirements(requirements_files=requirements_files)
extras_require = dict(oracle=oracle_extras,
postgresql=postgresql_extras,
mysql=mysql_extras,
kerberos=kerberos_extras)
depend_links = parse_dependency_links(requirements_files=requirements_files)
class CustomSdist(_sdist):
user_options = [
('packaging=', None, "Some option to indicate what should be packaged")
] + _sdist.user_options
def __init__(self, *args, **kwargs):
_sdist.__init__(self, *args, **kwargs)
self.packaging = "default value for this option"
def get_file_list(self):
print "Chosen packaging option: " + name
self.distribution.data_files = data_files
_sdist.get_file_list(self)
cmdclass['sdist'] = CustomSdist
setup(
name=name,
version=pkg_version,
packages=packages,
package_dir={'': 'lib'},
data_files=data_files,
script_args=copy_args,
cmdclass=cmdclass,
include_package_data=True,
scripts=scripts,
# doc=cmdclass,
author="Rucio",
author_email="rucio-dev@cern.ch",
description=description,
license="Apache License, Version 2.0",
url="http://rucio.cern.ch/",
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Environment :: No Input/Output (Daemon)', ],
install_requires=requires,
extras_require=extras_require,
dependency_links=depend_links,
)
| 34.390351
| 109
| 0.631425
|
78939760c42b9d8a029f24bb532122ead4d04b61
| 8,413
|
py
|
Python
|
python/tvm/micro/artifact.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 22
|
2022-03-18T07:29:31.000Z
|
2022-03-23T14:54:32.000Z
|
python/tvm/micro/artifact.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/micro/artifact.py
|
janifer112x/incubator-tvm
|
98c2096f4944bdbdbbb2b7b20ccd35c6c11dfbf6
|
[
"Apache-2.0"
] | 2
|
2022-03-18T08:26:34.000Z
|
2022-03-20T06:02:48.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""""Defines abstractions around compiler artifacts produced in compiling micro TVM binaries."""
import io
import os
import json
import shutil
import tarfile
class ArtifactFileNotFoundError(Exception):
"""Raised when an artifact file cannot be found on disk."""
class ArtifactBadSymlinkError(Exception):
"""Raised when an artifact symlink points outside the base directory."""
class ArtifactBadArchiveError(Exception):
"""Raised when an artifact archive is malformed."""
class Artifact:
"""Describes a compiler artifact and defines common logic to archive it for transport."""
# A version number written to the archive.
ENCODING_VERSION = 1
# A unique string identifying the type of artifact in an archive. Subclasses must redefine this
# variable.
ARTIFACT_TYPE = None
@classmethod
def unarchive(cls, archive_path, base_dir):
"""Unarchive an artifact into base_dir.
Parameters
----------
archive_path : str
Path to the archive file.
base_dir : str
Path to a non-existent, empty directory under which the artifact will live.
Returns
-------
Artifact :
The unarchived artifact.
"""
if os.path.exists(base_dir):
raise ValueError(f"base_dir exists: {base_dir}")
base_dir_parent, base_dir_name = os.path.split(base_dir)
temp_dir = os.path.join(base_dir_parent, f"__tvm__{base_dir_name}")
os.mkdir(temp_dir)
try:
with tarfile.open(archive_path) as tar_f:
tar_f.extractall(temp_dir)
temp_dir_contents = os.listdir(temp_dir)
if len(temp_dir_contents) != 1:
raise ArtifactBadArchiveError(
"Expected exactly 1 subdirectory at root of archive, got "
f"{temp_dir_contents!r}"
)
metadata_path = os.path.join(temp_dir, temp_dir_contents[0], "metadata.json")
if not metadata_path:
raise ArtifactBadArchiveError("No metadata.json found in archive")
with open(metadata_path) as metadata_f:
metadata = json.load(metadata_f)
version = metadata.get("version")
if version != cls.ENCODING_VERSION:
raise ArtifactBadArchiveError(
f"archive version: expect {cls.EXPECTED_VERSION}, found {version}"
)
os.rename(os.path.join(temp_dir, temp_dir_contents[0]), base_dir)
artifact_cls = cls
for sub_cls in cls.__subclasses__():
if sub_cls.ARTIFACT_TYPE is not None and sub_cls.ARTIFACT_TYPE == metadata.get(
"artifact_type"
):
artifact_cls = sub_cls
break
return artifact_cls.from_unarchived(
base_dir, metadata["labelled_files"], metadata["metadata"]
)
finally:
shutil.rmtree(temp_dir)
@classmethod
def from_unarchived(cls, base_dir, labelled_files, metadata):
return cls(base_dir, labelled_files, metadata)
def __init__(self, base_dir, labelled_files, metadata):
"""Create a new artifact.
Parameters
----------
base_dir : str
The path to a directory on disk which contains all the files in this artifact.
labelled_files : Dict[str, str]
A dict mapping a file label to the relative paths of the files that carry that label.
metadata : Dict
A dict containing artitrary JSON-serializable key-value data describing the artifact.
"""
self.base_dir = os.path.realpath(base_dir)
self.labelled_files = labelled_files
self.metadata = metadata
for label, files in labelled_files.items():
for f in files:
f_path = os.path.join(self.base_dir, f)
if not os.path.lexists(f_path):
raise ArtifactFileNotFoundError(f"{f} (label {label}): not found at {f_path}")
if os.path.islink(f_path):
link_path = os.path.readlink(f_path)
if os.path.isabs(link_path):
link_fullpath = link_path
else:
link_fullpath = os.path.join(os.path.dirname(f_path), link_path)
link_fullpath = os.path.realpath(link_fullpath)
if not link_fullpath.startswith(self.base_dir):
raise ArtifactBadSymlinkError(
f"{f} (label {label}): symlink points outside artifact tree"
)
def abspath(self, rel_path):
"""Return absolute path to the member with the given relative path."""
return os.path.join(self.base_dir, rel_path)
def label(self, label):
"""Return a list of relative paths to files with the given label."""
return self.labelled_files[label]
def label_abspath(self, label):
return [self.abspath(p) for p in self.labelled_files[label]]
def archive(self, archive_path):
"""Create a relocatable tar archive of the artifacts.
Parameters
----------
archive_path : str
Path to the tar file to create. Or, path to a directory, under which a tar file will be
created named {base_dir}.tar.
Returns
-------
str :
The value of archive_path, after potentially making the computation describe above.
"""
if os.path.isdir(archive_path):
archive_path = os.path.join(archive_path, f"{os.path.basename(self.base_dir)}.tar")
archive_name = os.path.splitext(os.path.basename(archive_path))[0]
with tarfile.open(archive_path, "w") as tar_f:
def _add_file(name, data, f_type):
tar_info = tarfile.TarInfo(name=name)
tar_info.type = f_type
data_bytes = bytes(data, "utf-8")
tar_info.size = len(data)
tar_f.addfile(tar_info, io.BytesIO(data_bytes))
_add_file(
f"{archive_name}/metadata.json",
json.dumps(
{
"version": self.ENCODING_VERSION,
"labelled_files": self.labelled_files,
"metadata": self.metadata,
},
indent=2,
sort_keys=True,
),
tarfile.REGTYPE,
)
for dir_path, _, files in os.walk(self.base_dir):
for f in files:
file_path = os.path.join(dir_path, f)
archive_file_path = os.path.join(
archive_name, os.path.relpath(file_path, self.base_dir)
)
if not os.path.islink(file_path):
tar_f.add(file_path, archive_file_path, recursive=False)
continue
link_path = os.readlink(file_path)
if not os.path.isabs(link_path):
tar_f.add(file_path, archive_file_path, recursive=False)
continue
relpath = os.path.relpath(link_path, os.path.dirname(file_path))
_add_file(archive_file_path, relpath, tarfile.LNKTYPE)
return archive_path
| 38.240909
| 99
| 0.584928
|
e3183e8ca6ba269ab25836ee501d02330de998b5
| 2,804
|
py
|
Python
|
cterasdk/edge/ftp.py
|
CTERA-Networks/ctera-python-sdk
|
35d9cb6949590e664fd237d29f54ce054fe80a9e
|
[
"Apache-2.0"
] | 5
|
2020-02-25T22:34:48.000Z
|
2020-02-29T22:56:39.000Z
|
cterasdk/edge/ftp.py
|
ctera/ctera-python-sdk
|
a86cb575d538620a12f326fe5e92f5abb5cc1f6b
|
[
"Apache-2.0"
] | 16
|
2020-03-25T19:12:03.000Z
|
2021-06-02T14:45:34.000Z
|
cterasdk/edge/ftp.py
|
ctera/ctera-python-sdk
|
a86cb575d538620a12f326fe5e92f5abb5cc1f6b
|
[
"Apache-2.0"
] | 3
|
2020-10-22T18:22:06.000Z
|
2021-10-03T18:38:41.000Z
|
import logging
from .enum import Mode
from ..exception import CTERAException
from .base_command import BaseCommand
class FTP(BaseCommand):
""" Gateway FTP configuration APIs """
def get_configuration(self):
"""
Get the current FTP configuration
:return cterasdk.common.object.Object:
"""
return self._gateway.get('/config/fileservices/ftp')
def enable(self):
""" Enable FTP """
self._set_mode(True)
def disable(self):
""" Disable FTP """
self._set_mode(False)
def is_disabled(self):
""" Check if the FTP server is disabled """
return self._gateway.get('/config/fileservices/ftp/mode') == Mode.Disabled
def _set_mode(self, enabled):
logging.getLogger().info('%s FTP server.', ('Enabling' if enabled else 'Disabling'))
self._gateway.put('/config/fileservices/ftp/mode', Mode.Enabled if enabled else Mode.Disabled)
logging.getLogger().info('FTP server %s.', ('enabled' if enabled else 'disabled'))
def modify(
self,
allow_anonymous_ftp=None,
anonymous_download_limit=None,
anonymous_ftp_folder=None,
banner_message=None,
max_connections_per_ip=None,
require_ssl=None):
"""
Modify the FTP Configuration. Parameters that are not passed will not be affected
:param bool,optional allow_anonymous_ftp: Enable/Disable anonymous FTP downloads
:param int,optional anonymous_download_limit:
Limit download bandwidth of anonymous connection in KB/sec per connection. 0 for unlimited
:param str,optional anonymous_ftp_folder: Anonymous FTP Directory
:param str,optional banner_message: FTP Banner Message
:param int,optional max_connections_per_ip: Maximum Connections per Client
:param bool,optional require_ssl: If Ture, allow only SSL/TLS connections
"""
config = self.get_configuration()
if config.mode != Mode.Enabled:
raise CTERAException("FTP must be enabled in order to modify its configuration")
if anonymous_download_limit is not None:
config.AnonymousDownloadLimit = anonymous_download_limit
if anonymous_ftp_folder is not None:
config.AnonymousFTPFolder = anonymous_ftp_folder
if allow_anonymous_ftp is not None:
config.AllowAnonymousFTP = allow_anonymous_ftp
if banner_message is not None:
config.BannerMessage = banner_message
if max_connections_per_ip is not None:
config.MaxConnectionsPerIP = max_connections_per_ip
if require_ssl is not None:
config.RequireSSL = require_ssl
self._gateway.put('/config/fileservices/ftp', config)
| 39.492958
| 102
| 0.670827
|
e1f07aa343595c7f276fd901b259d7090f6db05d
| 68
|
py
|
Python
|
DistanceConfiguration/DistanceConfigItem.py
|
oswald0071/RaspberryDistanceSensorRead
|
4c2cff0de37191c0ad6a9b327c7f500bbcc0e283
|
[
"MIT"
] | null | null | null |
DistanceConfiguration/DistanceConfigItem.py
|
oswald0071/RaspberryDistanceSensorRead
|
4c2cff0de37191c0ad6a9b327c7f500bbcc0e283
|
[
"MIT"
] | null | null | null |
DistanceConfiguration/DistanceConfigItem.py
|
oswald0071/RaspberryDistanceSensorRead
|
4c2cff0de37191c0ad6a9b327c7f500bbcc0e283
|
[
"MIT"
] | null | null | null |
class DistanceModel:
name = ''
echo = 0
trigger = 0
| 13.6
| 21
| 0.514706
|
044028fef604823b1ef089c63295dadadf88a4d3
| 4,722
|
py
|
Python
|
azure-metrics-calc-storage-size/metrics-data.py
|
calebrob6/ai4eutils
|
7ffc8ad26300af70b6f9a3b665dd1803e5e4e603
|
[
"MIT"
] | null | null | null |
azure-metrics-calc-storage-size/metrics-data.py
|
calebrob6/ai4eutils
|
7ffc8ad26300af70b6f9a3b665dd1803e5e4e603
|
[
"MIT"
] | null | null | null |
azure-metrics-calc-storage-size/metrics-data.py
|
calebrob6/ai4eutils
|
7ffc8ad26300af70b6f9a3b665dd1803e5e4e603
|
[
"MIT"
] | null | null | null |
#%% Constants and imports
import utils
import datetime
import pandas as pd
import humanfriendly
import math
import os
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.monitor import MonitorManagementClient
from azure.mgmt.storage import StorageManagementClient
from enum import Enum
class Authentication:
# This refers to the on.microsoft.com directory, change this to enumerate alternative
# directories.
tenant = '72f988bf-86f1-41af-91ab-2d7cd011db47'
# This is the client ID for the Azure CLI
client_id = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
METRICS_NOT_AVAILABLE = float('nan')
class Metric_type(Enum):
blob_capacity = 1
fileshare_capacity = 2
#%% Classes and functions
def get_used_avg_blob_capacity(credentials,subscription_id):
resource_client = ResourceManagementClient(credentials, subscription_id)
storage_client = StorageManagementClient(credentials, subscription_id)
lst = []
count = 0
resource_groups = resource_client.resource_groups.list()
for group in resource_groups:
storage_accounts = storage_client.storage_accounts.list_by_resource_group(group.name)
for storage_account in storage_accounts:
print("Reading metric data from storage account: " + storage_account.name)
count += 1
blob_size = get_metric_data_capacity(group.name, storage_account.name,
subscription_id, Metric_type.blob_capacity)
file_size = get_metric_data_capacity(group.name, storage_account.name,
subscription_id, Metric_type.fileshare_capacity)
total_size = blob_size + file_size
if math.isnan(total_size):
total_size_friendly = ''
else:
total_size_friendly = humanfriendly.format_size(total_size)
lst.append([storage_account.name, group.name, blob_size, file_size, total_size, total_size_friendly])
print("Total number of storage accounts: "+ str(count))
cols = ['Storage account', 'Resource group', 'Blob capacity', 'File capacity', 'Total capacity', 'Total capacity (friendly)']
df = pd.DataFrame(lst, columns=cols)
file_name = 'metrics_' + datetime.datetime.now().strftime('%m-%d-%y-%H%M%S') + '.csv'
df.to_csv(file_name, header=cols, index=False)
print("\n")
print("Metrics saved to file: " + file_name)
return file_name
def get_metric_data_capacity(resource_group_name, storage_account_name, subscription_id, type):
client = MonitorManagementClient(credentials, subscription_id)
today = datetime.datetime.utcnow().date()
yesterday = today - datetime.timedelta(days=1)
resource_id = (
"subscriptions/{}/"
"resourceGroups/{}/"
"providers/Microsoft.Storage/storageAccounts/{}/{}")
metrics_data = None
if (type == Metric_type.blob_capacity):
resource_id = resource_id.format(subscription_id,
resource_group_name, storage_account_name, 'blobServices/default')
metrics_data = client.metrics.list(
resource_id,
timespan="{}/{}".format(yesterday, today),
interval='PT1H',
metric='Blob capacity',
aggregation='Average')
if (type == Metric_type.fileshare_capacity):
resource_id = resource_id.format(subscription_id,
resource_group_name, storage_account_name, 'fileServices/default')
metrics_data = client.metrics.list(
resource_id,
timespan="{}/{}".format(yesterday, today),
interval='PT1H',
metric='File capacity',
aggregation='Average')
if(metrics_data.value is None):
return METRICS_NOT_AVAILABLE
for item in metrics_data.value:
for item in item.timeseries:
if(len(item.data) > 0):
data = item.data[-1]
if(data.average is not None):
return data.average
else:
return METRICS_NOT_AVAILABLE
return METRICS_NOT_AVAILABLE
# ... def get_metric_data_capacity
#%% Command-line driver
if __name__ == '__main__':
#%%
auth = Authentication()
credentials = utils.authenticate_device_code(auth)
subscription_id = utils.get_subscription_id(credentials)
file_name = get_used_avg_blob_capacity(credentials,subscription_id)
os.startfile(file_name)
| 31.691275
| 129
| 0.638077
|
bf3b139b18604fa74fe5dbaeb05980dfc999012c
| 1,381
|
py
|
Python
|
src/brouwers/shop/models/presentation.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 6
|
2015-03-03T13:23:07.000Z
|
2021-12-19T18:12:41.000Z
|
src/brouwers/shop/models/presentation.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 95
|
2015-02-07T00:55:39.000Z
|
2022-02-08T20:22:05.000Z
|
src/brouwers/shop/models/presentation.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 2
|
2016-03-22T16:53:26.000Z
|
2019-02-09T22:46:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class HomepageCategory(models.Model):
main_category = models.OneToOneField(
"Category",
related_name="homepage_categories",
on_delete=models.CASCADE,
)
order = models.PositiveIntegerField(
_("order"), help_text=_("Order in which to display category on the homepage")
)
class Meta:
verbose_name = _("homepage category")
verbose_name_plural = _("homepage categories")
def __str__(self):
return self.main_category.name
@python_2_unicode_compatible
class HomepageCategoryChild(models.Model):
parent = models.ForeignKey(
"HomepageCategory", related_name="children", on_delete=models.CASCADE
)
category = models.ForeignKey(
"Category", related_name="homepage_category_children", on_delete=models.CASCADE
)
order = models.PositiveIntegerField(
_("order"), help_text=_("Order in which to display category on the homepage")
)
class Meta:
verbose_name = _("homepage category child")
verbose_name_plural = _("homepage category children")
def __str__(self):
return self.category.name
| 30.021739
| 87
| 0.709631
|
d1518934d40796718f07e933620e09e691791424
| 1,798
|
py
|
Python
|
mistune/__init__.py
|
sakshi87/mistune
|
ef45d4d61c258c22f100ee3db179ffcc04bde8bb
|
[
"BSD-3-Clause"
] | null | null | null |
mistune/__init__.py
|
sakshi87/mistune
|
ef45d4d61c258c22f100ee3db179ffcc04bde8bb
|
[
"BSD-3-Clause"
] | null | null | null |
mistune/__init__.py
|
sakshi87/mistune
|
ef45d4d61c258c22f100ee3db179ffcc04bde8bb
|
[
"BSD-3-Clause"
] | null | null | null |
from .markdown import Markdown
from .block_parser import BlockParser
from .inline_parser import InlineParser
from .renderers import AstRenderer, HTMLRenderer
from .scanner import escape, escape_url, escape_html, unikey
from .plugins import PLUGINS
def create_markdown(escape=True, renderer=None, plugins=None):
"""Create a Markdown instance based on the given condition.
:param escape: Boolean. If using html renderer, escape html.
:param renderer: renderer instance or string of ``html`` and ``ast``.
:param plugins: List of plugins, string or callable.
This method is used when you want to re-use a Markdown instance::
markdown = create_markdown(
escape=False,
renderer='html',
plugins=['url', 'strikethrough', 'footnotes', 'table'],
)
# re-use markdown function
markdown('.... your text ...')
"""
if renderer is None or renderer == 'html':
renderer = HTMLRenderer(escape=escape)
elif renderer == 'ast':
renderer = AstRenderer()
if plugins:
_plugins = []
for p in plugins:
if isinstance(p, str):
_plugins.append(PLUGINS[p])
else:
_plugins.append(p)
plugins = _plugins
return Markdown(renderer, plugins=plugins)
html = create_markdown(
escape=False,
renderer='html',
plugins=['strikethrough', 'footnotes', 'table'],
)
def markdown(text, escape=True, renderer=None, plugins=None):
md = create_markdown(escape, renderer, plugins)
return md(text)
__all__ = [
'Markdown', 'AstRenderer', 'HTMLRenderer',
'BlockParser', 'InlineParser',
'escape', 'escape_url', 'escape_html', 'unikey',
'html', 'create_markdown', 'markdown',
]
__version__ = '2.0.0a1'
| 29
| 73
| 0.645717
|
b0c5c19a0e3c5ffdd2aeb8ae0709e31cfecb2e32
| 1,430
|
py
|
Python
|
tests/staticfiles_tests/test_forms.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
tests/staticfiles_tests/test_forms.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
tests/staticfiles_tests/test_forms.py
|
peteralexandercharles/django
|
61c7350f41f2534daf3888709f3c987b7d779a29
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
from urllib.parse import urljoin
from django.contrib.staticfiles import storage
from django.forms import Media
from django.templatetags.static import static
from django.test import SimpleTestCase, override_settings
class StaticTestStorage(storage.StaticFilesStorage):
def url(self, name):
return urljoin("https://example.com/assets/", name)
@override_settings(
STATIC_URL="http://media.example.com/static/",
INSTALLED_APPS=("django.contrib.staticfiles",),
STATICFILES_STORAGE="staticfiles_tests.test_forms.StaticTestStorage",
)
class StaticFilesFormsMediaTestCase(SimpleTestCase):
def test_absolute_url(self):
m = Media(
css={"all": ("path/to/css1", "/path/to/css2")},
js=(
"/path/to/js1",
"http://media.other.com/path/to/js2",
"https://secure.other.com/path/to/js3",
static("relative/path/to/js4"),
),
)
self.assertEqual(
str(m),
"""<link href="https://example.com/assets/path/to/css1" type="text/css" media="all" rel="stylesheet">
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet">
<script src="/path/to/js1"></script>
<script src="http://media.other.com/path/to/js2"></script>
<script src="https://secure.other.com/path/to/js3"></script>
<script src="https://example.com/assets/relative/path/to/js4"></script>""",
)
| 36.666667
| 113
| 0.651049
|
ecc7dac0bc02466a5e214ddb5443dee5119a1727
| 1,516
|
py
|
Python
|
homepage/views.py
|
tdavn/portfolio_django
|
cdbadd53d5137b15955717c15f4d3991ac5a9b7d
|
[
"MIT"
] | null | null | null |
homepage/views.py
|
tdavn/portfolio_django
|
cdbadd53d5137b15955717c15f4d3991ac5a9b7d
|
[
"MIT"
] | null | null | null |
homepage/views.py
|
tdavn/portfolio_django
|
cdbadd53d5137b15955717c15f4d3991ac5a9b7d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import TemplateView
from .models import ContactMessage
# Create your views here.
class HompageView(TemplateView):
'''Display homepage'''
template_name = 'homepage/index.html'
def post(self, request):
info = self.request.POST
ContactMessage.objects.create(
name = info['name'],
email = info['email'],
message = info['message']
)
context = {
'temp_mes': 'Your message has been recored. Thank you!',
}
return render(request, self.template_name, context)
class PortfolioView(TemplateView):
'''Display portfolio page'''
template_name = 'homepage/portfolio.html'
class Ing4View(TemplateView):
'''Display portfolio page of Ing4 project'''
template_name = 'homepage/ing4.html'
class NanobodyView(TemplateView):
'''Display portfolio page of Nanobody project'''
template_name = 'homepage/nanobody.html'
class TrichostatinView(TemplateView):
'''Display portfolio page of TSA project'''
template_name = 'homepage/trichostatin.html'
class NetosisView(TemplateView):
'''Display portfolio page of Netosis project'''
template_name = 'homepage/netosis.html'
class Ggtpase3View(TemplateView):
'''Display portfolio page of YKT6 project'''
template_name = 'homepage/ggtpase3.html'
class RalgapView(TemplateView):
'''Display portfolio page of Ral GaP protein project'''
template_name = 'homepage/ralgap.html'
| 26.137931
| 64
| 0.69591
|
4ac3d1477eb1ca87de0a77d25f734524e15ec1e2
| 1,946
|
py
|
Python
|
extlist.py
|
rf-crescenzi/PrimeNumbers
|
af830701990e83d2e1b047f2260af815c2ebdf00
|
[
"Unlicense"
] | null | null | null |
extlist.py
|
rf-crescenzi/PrimeNumbers
|
af830701990e83d2e1b047f2260af815c2ebdf00
|
[
"Unlicense"
] | null | null | null |
extlist.py
|
rf-crescenzi/PrimeNumbers
|
af830701990e83d2e1b047f2260af815c2ebdf00
|
[
"Unlicense"
] | null | null | null |
# Subclass of list which add2 2 useful methods
class ExtendedList(list):
# Recoursive method able to return, of a given integers list, all the integers lesser
# than or equal to a given number. If the list is already sorted, set the argument
# 'already_sorted' to True, in order not to waste time sorting it again.
def integers_lesser_or_equal(self, num, already_sorted = False):
# The list will be sorted if it is not sorted yet.
if not already_sorted:
self.sort()
# If the list is empty, an empty list will be returned.
if len(self) == 0 or self[0] > num:
return []
# If the last item of the sorted list is lesser than or equal to the parameter, the entire list will be returned.
if self[-1] <= num:
return self
# Otherwise the method will look for the item of the list situated in the middle.
i = (len(self) - 1) // 2
# If such item is greater than the parameter, the method will return itself reiterated over the first half of the list
# else, if it is lesser than or equal to the parameter and the following one is greater, the first hal of the list will be returned.
# If, finally, both such item and the following one are lesser than or equal to the parameter
# the return value will be the sum of the first half of the list and the return value of the method itself reiterated over the second half.
if self[i] > num:
return ExtendedList(self[:i+1]).integers_lesser_or_equal(num, True)
else:
if self[i+1] > num:
return self[:i+1]
else:
return self[:i+1] + ExtendedList(self[i+1:]).integers_lesser_or_equal(num, True)
# This method return a dictionary in which the keys are all the items in the list, repeated once,
# whereas the values are integers which represent how many times those items appeared in the original list
def count_items(self):
counter = {}
for item in self:
if item in counter:
counter[item] += 1
else:
counter[item] = 1
return counter
| 46.333333
| 141
| 0.721994
|
ccf87556d44f35c4a796a46ccb2490fe498024d9
| 3,087
|
py
|
Python
|
lib/surface/compute/instances/add_labels.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/compute/instances/add_labels.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/surface/compute/instances/add_labels.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for adding labels to instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import labels_doc_helper
from googlecloudsdk.command_lib.compute import labels_flags
from googlecloudsdk.command_lib.compute.instances import flags
from googlecloudsdk.command_lib.util.args import labels_util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class InstancesAddLabels(base.UpdateCommand):
"""add-labels command for instances."""
@staticmethod
def Args(parser):
flags.INSTANCE_ARG.AddArgument(parser)
labels_flags.AddArgsForAddLabels(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=flags.GetInstanceZoneScopeLister(holder.client))
add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)
instance = client.instances.Get(
messages.ComputeInstancesGetRequest(**instance_ref.AsDict()))
labels_update = labels_util.Diff(additions=add_labels).Apply(
messages.InstancesSetLabelsRequest.LabelsValue, instance.labels)
if not labels_update.needs_update:
return instance
request = messages.ComputeInstancesSetLabelsRequest(
project=instance_ref.project,
instance=instance_ref.instance,
zone=instance_ref.zone,
instancesSetLabelsRequest=
messages.InstancesSetLabelsRequest(
labelFingerprint=instance.labelFingerprint,
labels=labels_update.labels))
operation = client.instances.SetLabels(request)
operation_ref = holder.resources.Parse(
operation.selfLink, collection='compute.zoneOperations')
operation_poller = poller.Poller(client.instances)
return waiter.WaitFor(
operation_poller, operation_ref,
'Updating labels of instance [{0}]'.format(
instance_ref.Name()))
InstancesAddLabels.detailed_help = (
labels_doc_helper.GenerateDetailedHelpForAddLabels('instance'))
| 37.192771
| 74
| 0.76482
|
a99f7a218938a3da5afb3fcc70f5571e18938199
| 1,812
|
py
|
Python
|
app.py
|
hherrerob/Automated-Resume-Screening-System
|
4788024d58211b02ccdb4e9599bf08b1a5ceeb40
|
[
"MIT"
] | null | null | null |
app.py
|
hherrerob/Automated-Resume-Screening-System
|
4788024d58211b02ccdb4e9599bf08b1a5ceeb40
|
[
"MIT"
] | null | null | null |
app.py
|
hherrerob/Automated-Resume-Screening-System
|
4788024d58211b02ccdb4e9599bf08b1a5ceeb40
|
[
"MIT"
] | null | null | null |
import warnings
import screen
import search
import utils
import validators
from flask import (Flask, request)
from flask_cors import CORS
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
app = Flask(__name__)
CORS(app)
app.config.from_object(__name__)
@app.route('/candidate/compare', methods=['POST'])
def candidate_compare():
code, msg = validators.has_params(request.json, [['offer', 'candidates'], ['candidate', 'offers']])
if code == 400:
return utils.make_response(code, msg)
if 'offer' and 'candidates' in request.json:
item = request.json['offer']
compare_with = request.json['candidates']
else:
item = request.json['candidate']
compare_with = request.json['offers']
code, msg = validators.run_param_validators(item, compare_with)
if code == 400:
return utils.make_response(code, msg)
try:
results = screen.res(item, compare_with)
return utils.make_response(200, results)
except Exception as e:
return utils.make_response(500, str(e))
@app.route('/candidate/search', methods=['POST'])
def candidate_search():
code, msg = validators.has_params(request.json, [['search', 'candidates']])
if code == 400:
return utils.make_response(code, msg)
search_text = request.json['search']
candidates = request.json['candidates']
code, msg = validators.run_param_validators(search_text, candidates)
if code == 400:
return utils.make_response(code, msg)
try:
results = search.res(search_text, candidates)
return utils.make_response(200, results)
except Exception as e:
return utils.make_response(500, str(e))
if __name__ == '__main__':
app.run('0.0.0.0', 5000, debug=True, threaded=True)
| 26.647059
| 103
| 0.678256
|
50f1a1d7b2b13c1be9983514717847335f627b48
| 978
|
py
|
Python
|
spendit/urls.py
|
walki/spend_it
|
88bf2961a5e1b0b199ec839f70f170b2678c9358
|
[
"MIT"
] | null | null | null |
spendit/urls.py
|
walki/spend_it
|
88bf2961a5e1b0b199ec839f70f170b2678c9358
|
[
"MIT"
] | null | null | null |
spendit/urls.py
|
walki/spend_it
|
88bf2961a5e1b0b199ec839f70f170b2678c9358
|
[
"MIT"
] | null | null | null |
"""spendit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from spend import views
urlpatterns = [
url(r'^$', views.home_page, name='home'),
url(r'^spends/new$', views.new_expense_list, name='new_expense_list'),
url(r'^spends/(\d+)/$', views.view_list, name='view_list'),
url(r'^spends/(\d+)/add_expense$', views.add_expense, name='add_expense'),
]
| 39.12
| 79
| 0.694274
|
63def8e397fdb37f2ffdfd58d924d5d957d9588e
| 1,902
|
py
|
Python
|
microbit/compass.py
|
elecfreaks/EF_Produce_MicroPython
|
7ed7da11b0ea9dcb7e317d1b5ebc1396a6baf7e6
|
[
"MIT"
] | 2
|
2020-08-06T07:32:57.000Z
|
2022-02-11T02:37:21.000Z
|
microbit/compass.py
|
elecfreaks/EF_Produce_MicroPython
|
7ed7da11b0ea9dcb7e317d1b5ebc1396a6baf7e6
|
[
"MIT"
] | null | null | null |
microbit/compass.py
|
elecfreaks/EF_Produce_MicroPython
|
7ed7da11b0ea9dcb7e317d1b5ebc1396a6baf7e6
|
[
"MIT"
] | 2
|
2020-11-23T07:26:21.000Z
|
2021-09-11T02:34:09.000Z
|
"""This module lets you access the built-in electronic compass. Before using,
the compass should be calibrated, otherwise the readings may be wrong.
.. warning::
Calibrating the compass will cause your program to pause until calibration
is complete. Calibration consists of a little game to draw a circle set_power_on the
LED display by rotating the device.
"""
def calibrate() -> None:
"""Starts the calibration process. An instructive message will be scrolled
to the user after which they will need to rotate the device in order to
draw a circle set_power_on the LED display.
"""
def is_calibrated() -> bool:
"""Returns ``True`` if the compass has been successfully calibrated, and
returns ``False`` otherwise.
"""
def clear_calibration() -> None:
"""Undoes the calibration, making the compass uncalibrated again."""
def get_x() -> int:
"""Gives the reading of the magnetic force set_power_on the ``x`` axis, as a
positive or negative integer, depending set_power_on the direction of the
force.
"""
def get_y() -> int:
"""Gives the reading of the magnetic force set_power_on the ``x`` axis, as a
positive or negative integer, depending set_power_on the direction of the
force.
"""
def get_z() -> int:
"""Gives the reading of the magnetic force set_power_on the ``x`` axis, as a
positive or negative integer, depending set_power_on the direction of the
force.
"""
def heading() -> int:
"""Gives the compass heading, calculated from the above readings, as an
integer in the range from 0 to 360, representing the angle in degrees,
clockwise, with north as 0.
If the compass has not been calibrated, then this will call ``calibrate``.
"""
def get_field_strength() -> int:
"""Returns an integer indication of the magnitude of the magnetic field
around the device."""
| 31.180328
| 88
| 0.702944
|
882f4c20cb3f4118e43cf9980bf67808c90f2ef7
| 1,747
|
py
|
Python
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/name_availability_information.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/name_availability_information.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/name_availability_information.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NameAvailabilityInformation(Model):
"""Data Lake Analytics account name availability result information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name_available: The Boolean value of true or false to indicate
whether the Data Lake Analytics account name is available or not.
:vartype name_available: bool
:ivar reason: The reason why the Data Lake Analytics account name is not
available, if nameAvailable is false.
:vartype reason: str
:ivar message: The message describing why the Data Lake Analytics account
name is not available, if nameAvailable is false.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self):
super(NameAvailabilityInformation, self).__init__()
self.name_available = None
self.reason = None
self.message = None
| 35.653061
| 77
| 0.621065
|
ea8291517de32685f7da2922c00bb9a5dad84b84
| 11,811
|
py
|
Python
|
laboratory/settings.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
laboratory/settings.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
laboratory/settings.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | null | null | null |
import logging
import os
import sys
import warnings
from collections import OrderedDict
PROFILING = False
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = None
DEBUG = "DLIS" in os.environ
INTERNAL_IPS = ['127.0.0.1', '192.168.0.200', '192.168.0.101', '192.168.102.4', '192.168.0.128']
ALLOWED_HOSTS = ['lis', '127.0.0.1', 'localhost', 'testserver']
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_HSTS_SECONDS = 1
X_FRAME_OPTIONS = 'ALLOWALL'
CORS_ALLOW_ALL_ORIGINS = True
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'ajax_select',
'health',
'appconf.apps.AppconfConfig',
'manifest_loader',
'clients',
'users',
'mainmenu',
'podrazdeleniya',
'results',
'researches',
'directions',
'receivematerial',
'construct',
'slog',
'directory',
'statistic',
'api.apps.ApiConfig',
'discharge',
'rmis_integration',
'rest_framework',
'integration_framework',
'django_logtail',
'statistics_tickets',
'reports',
'mq.apps.MqConfig',
'cases.apps.CasesConfig',
'forms',
'contracts',
'lq',
'treatment',
'external_system',
'plans',
'medical_certificates',
'list_wait',
'doctor_call',
'hospitals.apps.HospitalsConfig',
'pharmacotherapy',
'command_utils',
'doctor_schedule',
'django_celery_results',
'dashboards',
)
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
INSTALLED_APPS_PRE_ADD = ()
INSTALLED_APPS_ADD = ()
MIDDLEWARE_ADD = []
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'django.contrib.auth.backends.RemoteUserBackend',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ['integration_framework.authentication.TokenAuthentication'],
}
ROOT_URLCONF = 'laboratory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'context_processors.utils.card_bases',
'context_processors.utils.ws',
'context_processors.utils.menu',
'context_processors.utils.profile',
'context_processors.utils.local_settings',
],
},
},
]
WSGI_APPLICATION = 'laboratory.wsgi.application'
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/ui/menu'
CSRF_USE_SESSIONS = False
CSRF_COOKIE_HTTPONLY = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'l2',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
CACHES = {
'default': {'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', 'KEY_PREFIX': 'lis' + ("" if not DEBUG else "_DBG")},
}
LANGUAGE_CODE = 'ru-ru'
DATE_FORMAT = 'd.m.Y'
DATE_FORMAT_SHORT = 'd.m.y'
TIME_FORMAT = 'd.m.Y'
USE_TZ = True
TIME_ZONE = 'Asia/Irkutsk'
USE_I18N = True
USE_L10N = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
FONTS_FOLDER = os.path.join(BASE_DIR, 'assets', 'fonts')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
FIXTURE_DIRS = [os.path.join(BASE_DIR, 'fixtures')]
AUTH_PROFILE_MODULE = 'users.models.DoctorsProfile'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'base': {'format': '\n[%(asctime)s] [%(levelname)s] %(module)s\n' 'Request: %(path)s [%(method)s] %(user)s %(data)s\n' 'Body: %(body)s\n' '%(stack_info)s\n'},
},
'filters': {
'requestdata': {
'()': 'utils.filters.RequestDataFilter',
},
},
'handlers': {
'file': {'level': 'DEBUG', 'class': 'logging.FileHandler', 'filters': ['requestdata'], 'filename': os.path.join(BASE_DIR, 'logs', 'log.txt'), 'formatter': 'base'},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
)
LDAP = {
"enable": False,
}
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 15 * 60 * 60
MAX_RMIS_THREADS = 10
RMIS_UPLOAD_WAIT_TIME_SECS = 8
RMIS_UPLOAD_WAIT_LONG_TIME_SECS = 300
RMIS_UPLOAD_COUNT_TO_REFRESH_CLIENT = 40
RMIS_UPLOAD_COUNT = 20
DOC_CALL_SYNC_WAIT_TIME_SECS = 8
DOC_CALL_SYNC_WAIT_LONG_TIME_SECS = 300
RATELIMIT_VIEW = 'mainmenu.views.ratelimited'
RMIS_PROXY = None
FIAS_PROXY = None
AFTER_DATE = None
AFTER_DATE_HOLTER = None
MAX_DOC_CALL_EXTERNAL_REQUESTS_PER_DAY = 3
PREFETCH_DEBUG = False
PREFETCH_ENABLED = False
PREFETCH_MAX_THREADS = 15
LOG_SQL = False
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return "notmigrations"
TESTS_IN_PROGRESS = False
if 'test' in sys.argv[1:] or 'jenkins' in sys.argv[1:]:
logging.disable(logging.CRITICAL)
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DEBUG = False
TEMPLATE_DEBUG = False
TESTS_IN_PROGRESS = True
MIGRATION_MODULES = DisableMigrations()
warnings.filterwarnings('ignore', message='DateTimeField*', category=RuntimeWarning)
MAX_UPLOAD_SIZE = DATA_UPLOAD_MAX_MEMORY_SIZE = 104857600
DEBUG = False
LOGOUT_REDIRECT_URL = '/'
LOGTAIL_FILES = {'L2': os.path.join(BASE_DIR, 'logs', 'log.txt')}
RMQ_URL = "amqp://t:t@localhost:5672/"
DEPRECATED_RMQ_ENABLED = False
WS_BASE = "localhost"
WS_PORT = 8822
WS_ENABLED = False
def SILKY_INTERCEPT_FUNC(request):
return request.path not in ['/mainmenu/']
AFTER_DATE_HOLTER = None
DICOM_SEARCH_TAGS = []
DICOM_ADDRESS = ''
DICOM_SERVER_DELETE = ''
DICOM_PORT = None
DICOM_SERVER = ''
ACSN_MODE = None
REMOTE_DICOM_ADDRESS = ''
REMOTE_DICOM_PORT = None
REMOTE_DICOM_SERVER = ""
REMOTE_DICOM_PEER = ""
URL_RMIS_AUTH = ""
URL_ELN_MADE = ""
URL_SCHEDULE = ""
EXTRA_MASTER_RESEARCH_PK = None
EXTRA_SLAVE_RESEARCH_PK = None
PAP_ANALYSIS_ID = []
PAP_ANALYSIS_FRACTION_QUALITY_ID = []
PAP_ANALYSIS_FRACTION_CONTAIN_ID = []
DEF_LABORATORY_AUTH_PK = None
DEF_LABORATORY_LEGAL_AUTH_PK = None
DEF_CONSULT_AUTH = None
DEF_CONSULT_LEGALAUTH = None
DEATH_RESEARCH_PK = None
PERINATAL_DEATH_RESEARCH_PK = None
COVID_RESEARCHES_PK = []
RESEARCH_SPECIAL_REPORT = {"driver_research": None, "weapon_research_pk": None}
CENTRE_GIGIEN_EPIDEMIOLOGY = ""
REGION = ""
EXCLUDE_HOSP_SEND_EPGU = []
SOME_LINKS = []
DISABLED_FORMS = []
DISABLED_STATISTIC_CATEGORIES = []
DISABLED_STATISTIC_REPORTS = []
COVID_QUESTION_ID = None
# Пример указания формы: [{'title': 'Согласие на обработку персональных данных', 'type': '101.02'}, {'title': 'Согласие на медицинское вмешательство', 'type': '101.03'}]
LK_FORMS = []
# Суррогатный юзер - подразделение "Личный кабинет" пользлватель "Личный кабинет"
LK_USER = -1
LK_FILE_SIZE_BYTES = -1
LK_FILE_COUNT = -1
LK_DAY_MONTH_START_SHOW_RESULT = "01.01."
SENTRY_DSN = "https://4a6968777ec240b190abd11cbf1c96e1@sentry.io/3083440"
QUERY_TIMEOUT = 120
FORM_100_08_A4_FORMAT = False
FORCE_CACHALOT = False
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
N3_ODII_BASE_URL = ""
N3_ODII_TOKEN = ""
N3_ODII_SYSTEM_ID = ""
DEFAULT_N3_DOCTOR = {
"pk": "",
"snils": "",
"speciality": "27",
"position": "73",
"family": "",
"name": "",
"patronymic": "",
}
SYSTEM_AS_VI = False
EMAIL_HOST = None
EMAIL_PORT = 465
EMAIL_HOST_USER = "your@yandex.ru"
EMAIL_HOST_PASSWORD = "password"
EMAIL_USE_TLS = False
EMAIL_USE_SSL = True
CELERY_TIMEZONE = 'Asia/Irkutsk'
CELERY_TASK_TRACK_STARTED = True
CELERY_TASK_TIME_LIMIT = 30 * 60
CELERY_BROKER_URL = 'redis://localhost:6379/4'
CELERY_RESULT_BACKEND = 'django-db'
FORWARD_DAYS_SCHEDULE = -1
SCHEDULE_AGE_LIMIT_LTE = None
QRCODE_OFFSET_SIZE = {}
LEFT_QRCODE_OFFSET_SIZE = {}
PROTOCOL_PLAIN_TEXT = True
SPLIT_PRINT_RESULT = False
REQUIRED_STATTALON_FIELDS = {} # {"purpose": "Данные статталона - Цель не указана"}
RESEARCHES_PK_REQUIRED_STATTALON_FIELDS = {} # {358: {"purpose": "Данные статталона - Цель не указана"}}
DISPANSERIZATION_STATTALON_FIELDS_RESULTS_PK = []
DISPANSERIZATION_STATTALON_FIELDS_PURPOSE_PK = []
DASHBOARD_CHARTS_CACHE_TIME_SEC = 60 * 5
TITLE_REPORT_FILTER_STATTALON_FIELDS = []
DISPANSERIZATION_SERVICE_PK = {} # {"pkServiceStart": [12, 13], "pkServiceEnd": [15])}
try:
from laboratory.local_settings import * # noqa: F403,F401
except ImportError:
pass
if PROFILING:
INSTALLED_APPS += ('silk',)
MIDDLEWARE += ('silk.middleware.SilkyMiddleware',)
MIDDLEWARE += MIDDLEWARE_ADD
MIDDLEWARE = list(OrderedDict.fromkeys(MIDDLEWARE))
INSTALLED_APPS += INSTALLED_APPS_ADD
if not FORCE_CACHALOT:
INSTALLED_APPS = [x for x in OrderedDict.fromkeys(INSTALLED_APPS_PRE_ADD + INSTALLED_APPS) if x not in ['cachalot']]
WS_URL = "ws://{}:{}/".format(WS_BASE, WS_PORT)
if LOG_SQL:
LOGGING['loggers']['django.db.backends'] = {
'level': 'DEBUG',
'handlers': ['console'],
}
MANIFEST_LOADER = {
'cache': False,
'output_dir': 'webpack_bundles/',
'manifest_file': os.path.join(BASE_DIR, 'assets/webpack_bundles/manifest.json'),
'ignore_missing_assets': DEBUG,
}
def get_env_value(env_variable):
return os.environ.get(env_variable)
DB_USER = get_env_value('DB_USER')
DB_PASSWORD = get_env_value('DB_PASSWORD')
DB_NAME = get_env_value('DB_NAME')
DB_HOST = get_env_value('DB_HOST')
DB_PORT = get_env_value('DB_PORT')
ENV_SECRET_KEY = get_env_value('SECRET_KEY')
if DB_USER:
DATABASES['default']['USER'] = DB_USER
if DB_PASSWORD:
DATABASES['default']['PASSWORD'] = DB_PASSWORD
if DB_NAME:
DATABASES['default']['NAME'] = DB_NAME
if DB_HOST:
DATABASES['default']['HOST'] = DB_HOST
if DB_PORT:
DATABASES['default']['PORT'] = DB_PORT
if ENV_SECRET_KEY:
SECRET_KEY = ENV_SECRET_KEY
# db = DATABASES.get('default', {})
# db['OPTIONS'] = db.get('OPTIONS', {})
# db['OPTIONS']['options'] = f'-c statement_timeout={QUERY_TIMEOUT * 1000}'
| 26.363839
| 171
| 0.695453
|
6c6dc8b256f0f0430101b75ec77c3d289a3a0bb9
| 2,222
|
py
|
Python
|
packages/syft/src/syft/core/node/common/action/garbage_collect_batched_action.py
|
exityan/PySyft
|
35166c487a5be57f9ad28929ed88a8ba6bdd5aeb
|
[
"Apache-2.0"
] | 2
|
2020-10-19T19:18:46.000Z
|
2020-12-07T12:06:30.000Z
|
packages/syft/src/syft/core/node/common/action/garbage_collect_batched_action.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 5
|
2020-09-11T05:47:12.000Z
|
2020-10-13T08:36:17.000Z
|
packages/syft/src/syft/core/node/common/action/garbage_collect_batched_action.py
|
Metrix1010/PySyft
|
6477f64b63dc285059c3766deab3993653cead2e
|
[
"Apache-2.0"
] | 1
|
2021-05-09T08:43:34.000Z
|
2021-05-09T08:43:34.000Z
|
# stdlib
from typing import List
from typing import Optional
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft relative
from ..... import serialize
from .....logger import critical
from .....proto.core.node.common.action.garbage_collect_batched_pb2 import (
GarbageCollectBatchedAction as GarbageCollectBatchedAction_PB,
)
from ....common.serde.deserialize import _deserialize
from ....common.serde.serializable import bind_protobuf
from ....common.uid import UID
from ....io.address import Address
from ...abstract.node import AbstractNode
from .common import EventualActionWithoutReply
@bind_protobuf
class GarbageCollectBatchedAction(EventualActionWithoutReply):
def __init__(
self, ids_at_location: List[UID], address: Address, msg_id: Optional[UID] = None
):
super().__init__(address=address, msg_id=msg_id)
self.ids_at_location = ids_at_location
def execute_action(self, node: AbstractNode, verify_key: VerifyKey) -> None:
try:
for id_at_location in self.ids_at_location:
node.store.delete(key=id_at_location)
except Exception as e:
critical(
"> GarbageCollectBatchedAction deletion exception "
+ f"{id_at_location} {e}"
)
def _object2proto(self) -> GarbageCollectBatchedAction_PB:
address = serialize(self.address)
res = GarbageCollectBatchedAction_PB(address=address)
for id_obj in self.ids_at_location:
res.ids_at_location.append(serialize(id_obj))
return res
@staticmethod
def _proto2object(
proto: GarbageCollectBatchedAction_PB,
) -> "GarbageCollectBatchedAction":
ids_at_location = []
for id_at_location in proto.ids_at_location:
ids_at_location.append(_deserialize(blob=id_at_location))
addr = _deserialize(blob=proto.address)
return GarbageCollectBatchedAction(
ids_at_location=ids_at_location,
address=addr,
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return GarbageCollectBatchedAction_PB
| 33.164179
| 88
| 0.710171
|
2046e36a0ccadf422e45ac2aab7dd54f6816a818
| 23,651
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_ddos_protection_plans_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_ddos_protection_plans_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_ddos_protection_plans_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations:
"""DdosProtectionPlansOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
**kwargs
) -> "_models.DdosProtectionPlan":
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs
) -> "_models.DdosProtectionPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_protection_plan_name: str,
parameters: "_models.DdosProtectionPlan",
**kwargs
) -> AsyncLROPoller["_models.DdosProtectionPlan"]:
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.DdosProtectionPlanListResult"]:
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| 50.002114
| 207
| 0.673587
|
a872a143536ff7e394312382273b2d1f8c628fcd
| 467
|
py
|
Python
|
src/runners/__init__.py
|
am-rutherford/pymarl
|
0919e1ab9109b5ac8f719dde7c41ca4403849af1
|
[
"Apache-2.0"
] | 1
|
2022-03-01T17:45:41.000Z
|
2022-03-01T17:45:41.000Z
|
src/runners/__init__.py
|
am-rutherford/pymarl
|
0919e1ab9109b5ac8f719dde7c41ca4403849af1
|
[
"Apache-2.0"
] | null | null | null |
src/runners/__init__.py
|
am-rutherford/pymarl
|
0919e1ab9109b5ac8f719dde7c41ca4403849af1
|
[
"Apache-2.0"
] | null | null | null |
REGISTRY = {}
from .episode_runner import EpisodeRunner
REGISTRY["episode"] = EpisodeRunner
from .parallel_runner import ParallelRunner
REGISTRY["parallel"] = ParallelRunner
from .async_episode_runner import AsyncEpisodeRunner
REGISTRY["async"] = AsyncEpisodeRunner
from .render_episode_runner import RenderEpisodeRunner
REGISTRY["render"] = RenderEpisodeRunner
from .timelim_episode_runner import TimeLimEpisodeRunner
REGISTRY["timelim"] = TimeLimEpisodeRunner
| 27.470588
| 56
| 0.837259
|
24d06bce763b9c1fa20c1f5c5739dd6f6f59f8de
| 281
|
py
|
Python
|
codeHL/codeHLshell.py
|
atria-tools/monk
|
4961457f4db5dfa98fc6001a289c24e460e5b025
|
[
"Apache-2.0"
] | null | null | null |
codeHL/codeHLshell.py
|
atria-tools/monk
|
4961457f4db5dfa98fc6001a289c24e460e5b025
|
[
"Apache-2.0"
] | 1
|
2015-03-22T12:37:18.000Z
|
2015-03-22T12:37:18.000Z
|
codeHL/codeHLshell.py
|
HeeroYui/monk
|
4961457f4db5dfa98fc6001a289c24e460e5b025
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import monkDebug as debug
import sys
import monkTools
import re
listRegExp = [
[ r'#(.*?)\n', r'<span class="code-preproc">#\1</span>\n']
]
def transcode(value):
for reg1, reg2 in listRegExp:
value = re.sub(reg1, reg2, value, flags=re.DOTALL)
return value
| 18.733333
| 59
| 0.683274
|
02c10eead09c90a3af3917e9ecd4ef40a2a4cc5d
| 322
|
py
|
Python
|
04_03_sos_return.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 6
|
2021-05-08T13:19:33.000Z
|
2022-03-20T08:29:44.000Z
|
04_03_sos_return.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 1
|
2021-03-05T20:27:15.000Z
|
2021-11-17T09:07:43.000Z
|
04_03_sos_return.py
|
simonmonk/prog_pico_ed1
|
36e70f88ea7dc73e75399cd390d1cc2023843971
|
[
"MIT"
] | 2
|
2021-07-02T15:19:37.000Z
|
2021-10-06T00:53:25.000Z
|
from machine import Pin
from utime import sleep
led = Pin(25, Pin.OUT)
def blink(times, delay):
for x in range(1, times+1):
led.on()
sleep(delay)
led.off()
sleep(delay)
return times * delay * 2
while True:
print(blink(3, 0.2))
sleep(0.4)
print(blink(3, 0.6))
| 17.888889
| 31
| 0.559006
|
ffe5a9ff7e8ae043b3ab04a9894699da4a93d0f6
| 1,914
|
py
|
Python
|
utils/plots.py
|
danilojodas/keras-YOLOv3-model-set
|
da6d1af57bcea139d548843f0488cf61ab00f965
|
[
"MIT"
] | null | null | null |
utils/plots.py
|
danilojodas/keras-YOLOv3-model-set
|
da6d1af57bcea139d548843f0488cf61ab00f965
|
[
"MIT"
] | null | null | null |
utils/plots.py
|
danilojodas/keras-YOLOv3-model-set
|
da6d1af57bcea139d548843f0488cf61ab00f965
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 22:56:27 2020
@author: DANILO
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def loss_function(folder='', save_plot=False):
plt.rc('axes', labelsize=12)
plt.rc('xtick', labelsize=11)
plt.rc('ytick', labelsize=11)
plt.rc('legend', fontsize=11)
plt.rc('axes', titlesize=14)
fig = plt.figure(figsize=(9,4))
for i in range(2):
path_ = 'D:/Projetos Machine Learning/Python/YOLOv3-keras/results/v' + str(i+1)
loss_train = pd.read_csv(os.path.join(path_,'train-epoch_loss.csv'),delimiter=',')
loss_valid = pd.read_csv(os.path.join(path_,'validation-epoch_loss.csv'),delimiter=',')
ax = fig.add_subplot(1,2,i+1)
ax.plot(np.arange(1,len(loss_train)+1), loss_train.iloc[:,-1],label='Train',antialiased=True)
ax.plot(np.arange(1,len(loss_valid)+1), loss_valid.iloc[:,-1],label='Validation',antialiased=True)
ax.set_xlabel('Epoch\n\n')
ax.set_ylabel('Loss')
ax.set_title('Setup ' + str(i+1))
ax.set_ylim([0,100])
plt.legend()
fig.tight_layout(pad=3)
if (save_plot):
plt.savefig('D:/Vida académica/Pós-doc/Publicações/Tree Detection/tex - Springer/Figures/loss_cnn.eps',format='eps',
dpi=600,bbox_inches='tight')
def bar_char():
plt.rc('axes', labelsize=12)
plt.rc('xtick', labelsize=11)
plt.rc('ytick', labelsize=11)
plt.rc('legend', fontsize=11)
plt.rc('axes', titlesize=14)
labels = ('stick','tree','crown','stem')
x_map = np.array([0.94,0.78,0.72,0.68])
y_pos = np.arange(len(labels))
fig, ax = plt.subplots()
ax.barh(y_pos,x_map)
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.set_xlabel('Average precision')
ax.set_title('Mean Average Precision')
| 30.870968
| 124
| 0.613898
|
488e3cd1a3077f8ec18829832358426121872afa
| 9,257
|
py
|
Python
|
examples/language_model/gpt-3/dygraph/args.py
|
MrJStyle/PaddleNLP
|
fd02d65d0a5dc6a2adac21ec5703424508e38acd
|
[
"Apache-2.0"
] | 1
|
2021-10-14T05:35:00.000Z
|
2021-10-14T05:35:00.000Z
|
examples/language_model/gpt-3/dygraph/args.py
|
Yingyingcososc/PaddleNLP
|
0f2d8a6343ac4854f99e6353a5d3e14aabc66c8e
|
[
"Apache-2.0"
] | null | null | null |
examples/language_model/gpt-3/dygraph/args.py
|
Yingyingcososc/PaddleNLP
|
0f2d8a6343ac4854f99e6353a5d3e14aabc66c8e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import paddle
from paddlenlp.utils.log import logger
def process_batch_size(args):
if args.global_batch_size is None and args.local_batch_size is None:
raise ValueError("global_batch_size or local_batch_size should be set.")
elif args.global_batch_size is not None and args.local_batch_size is not None:
assert args.global_batch_size // args.local_batch_size == args.dp_degree, \
"global_batch_size[{}] should be divided by local_batch_size[{}] when dp_degree is [{}]"\
.format(args.global_batch_size, args.local_batch_size, args.dp_degree)
elif args.global_batch_size is not None and args.local_batch_size is None:
args.local_batch_size = args.global_batch_size // (args.dp_degree *
args.sharding_degree)
else:
args.global_batch_size = args.local_batch_size * args.dp_degree * args.sharding_degree
assert args.local_batch_size % args.micro_batch_size == 0
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def parse_args(MODEL_CLASSES):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
# Train I/O config
parser.add_argument(
"--input_dir",
default=None,
type=str,
required=True,
help="The input directory where the data will be read from.", )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the training logs and checkpoints will be written."
)
parser.add_argument(
"--split",
type=str,
default='949,50,1',
help="Train/valid/test data split.")
parser.add_argument(
"--max_seq_len", type=int, default=1024, help="Max sequence length.")
parser.add_argument(
"--global_batch_size",
default=None,
type=int,
help="Global batch size for all training process. None for not check the size is valid. If we only use data parallelism, it should be device_num * micro_batch_size."
)
parser.add_argument(
"--local_batch_size",
default=None,
type=int,
help="Global batch size for all training process. None for not check the size is valid. If we only use data parallelism, it should be device_num * micro_batch_size."
)
parser.add_argument(
"--micro_batch_size",
default=8,
type=int,
help="Batch size per device for one step training.", )
# Default training config
parser.add_argument(
"--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--grad_clip",
default=0.0,
type=float,
help="Grad clip for the parameter.")
parser.add_argument(
"--max_lr",
default=1e-5,
type=float,
help="The initial max learning rate for Adam.")
parser.add_argument(
"--min_lr",
default=5e-5,
type=float,
help="The initial min learning rate for Adam.")
parser.add_argument(
"--warmup_rate",
default=0.01,
type=float,
help="Linear warmup over warmup_steps for learing rate.")
# Adam optimizer config
parser.add_argument(
"--adam_beta1",
default=0.9,
type=float,
help="The beta1 for Adam optimizer. The exponential decay rate for the 1st moment estimates."
)
parser.add_argument(
"--adam_beta2",
default=0.999,
type=float,
help="The bate2 for Adam optimizer. The exponential decay rate for the 2nd moment estimates."
)
parser.add_argument(
"--adam_epsilon",
default=1e-8,
type=float,
help="Epsilon for Adam optimizer.")
# Training steps config
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=500000,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs."
)
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--decay_steps",
default=360000,
type=int,
help="The steps use to control the learing rate. If the step > decay_steps, will use the min_lr."
)
parser.add_argument(
"--logging_freq",
type=int,
default=1,
help="Log every X updates steps.")
parser.add_argument(
"--eval_freq",
type=int,
default=500,
help="Evaluate for every X updates steps.")
parser.add_argument(
"--eval_iters",
type=int,
default=10,
help="Evaluate the model use X steps data.")
# Config for 4D Parallelism
parser.add_argument(
"--sharding_degree",
type=int,
default=1,
help="Sharding degree. Share the parameters to many cards.")
parser.add_argument(
"--dp_degree", type=int, default=1, help="Data Parallelism degree.")
parser.add_argument(
"--mp_degree",
type=int,
default=1,
help="Model Parallelism degree. Spliting the linear layers to many cards."
)
parser.add_argument(
"--pp_degree",
type=int,
default=1,
help="Pipeline Parallelism degree. Spliting the the model layers to different parts."
)
parser.add_argument(
"--use_recompute",
type=str2bool,
nargs='?',
const=False,
help="Using the recompute to save the memory.")
# AMP config
parser.add_argument(
"--use_amp",
type=str2bool,
nargs='?',
const=False,
help="Enable mixed precision training.")
parser.add_argument(
"--scale_loss",
type=float,
default=32768,
help="The value of scale_loss for fp16. This is only used for AMP training."
)
parser.add_argument(
"--hidden_dropout_prob",
type=float,
default=0.1,
help="The hidden dropout prob.")
parser.add_argument(
"--attention_probs_dropout_prob",
type=float,
default=0.1,
help="The attention probs dropout prob.")
# Other config
parser.add_argument(
"--seed", type=int, default=1234, help="Random seed for initialization")
parser.add_argument(
"--check_accuracy",
type=str2bool,
nargs='?',
const=False,
help="Check accuracy for training process.")
parser.add_argument(
"--device",
type=str,
default="gpu",
choices=["cpu", "gpu", "xpu"],
help="select cpu, gpu, xpu devices.")
parser.add_argument(
"--lr_decay_style",
type=str,
default="cosine",
choices=["cosine", "none"],
help="Learning rate decay style.")
args = parser.parse_args()
args.test_iters = args.eval_iters * 10
# process batch size
process_batch_size(args)
if args.check_accuracy:
if args.hidden_dropout_prob != 0:
args.hidden_dropout_prob = .0
logger.warning(
"The hidden_dropout_prob should set to 0 for accuracy checking.")
if args.attention_probs_dropout_prob != 0:
args.attention_probs_dropout_prob = .0
logger.warning(
"The attention_probs_dropout_prob should set to 0 for accuracy checking."
)
logger.info('{:20}:{}'.format("paddle commit id", paddle.version.commit))
for arg in vars(args):
logger.info('{:20}:{}'.format(arg, getattr(args, arg)))
return args
| 31.593857
| 173
| 0.608729
|
54760faf7be239b06b8a8c177a53feef74a39876
| 311
|
py
|
Python
|
Scrapper_example/items.py
|
danielpassy/Hebrew-Song-SImilarity
|
cebe87618e165c89a4cfa7cf591965f402cdbe0b
|
[
"MIT"
] | null | null | null |
Scrapper_example/items.py
|
danielpassy/Hebrew-Song-SImilarity
|
cebe87618e165c89a4cfa7cf591965f402cdbe0b
|
[
"MIT"
] | null | null | null |
Scrapper_example/items.py
|
danielpassy/Hebrew-Song-SImilarity
|
cebe87618e165c89a4cfa7cf591965f402cdbe0b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
import scrapy
class QuotetutorialItem(scrapy.Item):
title = scrapy.Field()
author = scrapy.Field()
tag = scrapy.Field()
pass
| 18.294118
| 53
| 0.688103
|
1204394084ace020e7b4399f1039dd312fa7ebd3
| 142
|
py
|
Python
|
Python/test/helloword.py
|
isai129/CodeTask
|
bb4d6ee7f2a39424fddc939a792a0a78171ca580
|
[
"MIT"
] | 1
|
2021-02-06T01:56:43.000Z
|
2021-02-06T01:56:43.000Z
|
Python/test/helloword.py
|
isai129/CodeTask
|
bb4d6ee7f2a39424fddc939a792a0a78171ca580
|
[
"MIT"
] | null | null | null |
Python/test/helloword.py
|
isai129/CodeTask
|
bb4d6ee7f2a39424fddc939a792a0a78171ca580
|
[
"MIT"
] | null | null | null |
# 第一个python程序
print('hello,word')
print('The quick brown fox', 'jumps over', 'the laazy dog')
print('100+200=', 100 + 200)
name = input()
| 14.2
| 59
| 0.647887
|
29f89133bac80a90e9b04034632114a6d1c76e8d
| 4,577
|
py
|
Python
|
v2/os_cmd/cdk/lib/IotPolicy/assets/iot_policy.py
|
bvitali/aws-iot-greengrass-accelerators
|
31d9920fc8312bceb89c3f610fe7368d78b117d6
|
[
"MIT-0"
] | 47
|
2019-09-13T09:22:44.000Z
|
2022-03-04T12:19:03.000Z
|
v2/os_cmd/cdk/lib/IotPolicy/assets/iot_policy.py
|
bvitali/aws-iot-greengrass-accelerators
|
31d9920fc8312bceb89c3f610fe7368d78b117d6
|
[
"MIT-0"
] | 5
|
2020-04-13T02:43:47.000Z
|
2021-11-16T19:44:22.000Z
|
v2/os_cmd/cdk/lib/IotPolicy/assets/iot_policy.py
|
bvitali/aws-iot-greengrass-accelerators
|
31d9920fc8312bceb89c3f610fe7368d78b117d6
|
[
"MIT-0"
] | 27
|
2019-09-13T18:19:42.000Z
|
2022-02-21T21:13:32.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import sys
import json
import logging as logger
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError
logger.getLogger().setLevel(logger.INFO)
def get_aws_client(name):
return boto3.client(
name,
config=Config(retries={"max_attempts": 10, "mode": "standard"}),
)
def create_resources(
iot_policy: str,
iot_policy_name: str,
certificate_arn: str,
):
"""Create AWS IoT policy and attach it to referenced certificate."""
c_iot = get_aws_client("iot")
result = {}
# Create policy
try:
response = c_iot.create_policy(
policyName=iot_policy_name, policyDocument=iot_policy
)
result["IotPolicyArn"] = response["policyArn"]
except ClientError as e:
logger.error(f"Error creating policy {iot_policy_name}, {e}")
sys.exit(1)
# attach cert-pol
try:
c_iot.attach_policy(policyName=iot_policy_name, target=certificate_arn)
except ClientError as e:
logger.error(
f"Error attaching certificate {certificate_arn} to policy {iot_policy_name}, {e}"
)
sys.exit(1)
except ClientError as e:
logger.error(f"Error creating secure string parameters, {e}")
sys.exit(1)
return result
def delete_resources(certificate_arn: str, iot_policy_name: str):
"""Detach certificate from policy then delete the IoT policy"""
c_iot = get_aws_client("iot")
result = {}
# delete policy (prune versions, detach from targets)
# delete all non active policy versions
try:
response = c_iot.list_policy_versions(policyName=iot_policy_name)
for version in response["policyVersions"]:
if not version["isDefaultVersion"]:
c_iot.delete_policy_version(
policyName=iot_policy_name, policyVersionId=version["versionId"]
)
except ClientError as e:
logger.error(
f"Unable to delete policy versions for policy {iot_policy_name}, {e}"
)
# Detach any targets (things or principals from policy)
try:
response = c_iot.list_targets_for_policy(policyName=iot_policy_name)
for target in response["targets"]:
c_iot.detach_policy(policyName=iot_policy_name, target=target)
except ClientError as e:
logger.error(f"Unable to detach targets from policy {iot_policy_name}, {e}")
# delete policy
try:
c_iot.delete_policy(policyName=iot_policy_name)
except ClientError as e:
logger.error(f"Unable to delete policy {iot_policy_name}, {e}")
return result
def handler(event, context):
logger.info("Received event: %s", json.dumps(event, indent=2))
logger.info("Environment: %s", dict(os.environ))
props = event["ResourceProperties"]
physical_resource_id = ""
try:
# Check if this is a Create and we're failing Creates
if event["RequestType"] == "Create" and event["ResourceProperties"].get(
"FailCreate", False
):
raise RuntimeError("Create failure requested, logging")
elif event["RequestType"] == "Create":
logger.info("Request CREATE")
resp = create_resources(
iot_policy=props["IotPolicy"],
iot_policy_name=props["IoTPolicyName"],
certificate_arn=props["CertificateArn"],
)
# set response data (PascalCase key)
response_data = {
"IotPolicyArn": resp["IotPolicyArn"],
}
physical_resource_id = response_data["IotPolicyArn"]
elif event["RequestType"] == "Update":
logger.info("Request UPDATE")
response_data = {}
elif event["RequestType"] == "Delete":
logger.info("Request DELETE")
resp = delete_resources(
certificate_arn=props["CertificateArn"],
iot_policy_name=props["IoTPolicyName"],
)
response_data = {}
physical_resource_id = event["PhysicalResourceId"]
else:
logger.info("Should not get here in normal cases - could be REPLACE")
output = {"PhysicalResourceId": physical_resource_id, "Data": response_data}
logger.info("Output from Lambda: %s", json.dumps(output, indent=2))
return output
except Exception as e:
logger.exception(e)
sys.exit(1)
| 33.166667
| 93
| 0.634695
|
7895862e2f8e9671545aacaf9e00913099146308
| 7,736
|
py
|
Python
|
mercury/fog_model/iot_devices/ue/ue.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 1
|
2020-07-21T11:22:39.000Z
|
2020-07-21T11:22:39.000Z
|
mercury/fog_model/iot_devices/ue/ue.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:09:58.000Z
|
2022-02-10T02:21:03.000Z
|
mercury/fog_model/iot_devices/ue/ue.py
|
greenlsi/mercury_mso_framework
|
8b9639e5cb4b2c526a65861c93a9fe9db2460ea4
|
[
"Apache-2.0"
] | 1
|
2021-02-24T15:54:09.000Z
|
2021-02-24T15:54:09.000Z
|
from xdevs.models import Coupled, Port
from mercury.config.iot_devices import UserEquipmentConfig
from mercury.msg.network import PhysicalPacket, NetworkPacket
from mercury.msg.iot_devices import ServiceDelayReport
from .ue_antenna import UserEquipmentAntenna
from .service_mux import UEServiceMux
from .service import Service
from .access_manager import AccessManager
class UserEquipment(Coupled):
def __init__(self, ue_config: UserEquipmentConfig, guard_time: float = 0):
"""
User Equipment xDEVS model
:param ue_config: User Equipment Configuration
:param guard_time: Initial guard time in order to avoid identical simultaneous behavior between UEs
"""
# Unpack configuration parameters
ue_id = ue_config.ue_id
services_config = ue_config.services_config
t_start = ue_config.t_start
t_end = ue_config.t_end
super().__init__('iot_devices_{}'.format(ue_id))
self.ue_id = ue_id
# Define and add components
antenna = UserEquipmentAntenna(ue_id)
self.access_manager = AccessManager(ue_id, t_start)
service_mux = UEServiceMux(ue_id, set(services_config.keys()))
services = [Service(ue_id, service_config, guard_time, t_start=t_start, t_end=t_end)
for service_config in services_config.values()]
self.add_component(antenna)
self.add_component(self.access_manager)
self.add_component(service_mux)
[self.add_component(service) for service in services]
# I/O ports
self.input_radio_bc = Port(PhysicalPacket, 'input_radio_bc')
self.input_radio_control_dl = Port(PhysicalPacket, 'input_radio_control_dl')
self.input_radio_transport_dl = Port(PhysicalPacket, 'input_radio_transport_dl')
self.output_radio_control_ul = Port(PhysicalPacket, 'output_radio_control_ul')
self.output_radio_transport_ul = Port(PhysicalPacket, 'output_radio_transport_ul')
self.add_in_port(self.input_radio_bc)
self.add_in_port(self.input_radio_control_dl)
self.add_in_port(self.input_radio_transport_dl)
self.add_out_port(self.output_radio_control_ul)
self.add_out_port(self.output_radio_transport_ul)
self.output_repeat_pss = Port(str, 'output_repeat_pss')
self.output_service_delay_report = Port(ServiceDelayReport, 'output_service_delay_report')
self.add_out_port(self.output_repeat_pss)
self.add_out_port(self.output_service_delay_report)
self.external_couplings_antenna(antenna)
self.external_couplings_access(self.access_manager)
for service in services:
self.external_couplings_service(service)
self.internal_couplings_antenna_access(antenna, self.access_manager)
self.internal_couplings_antenna_mux(antenna, service_mux)
for service in services:
self.internal_couplings_antenna_service(antenna, service)
self.internal_couplings_access_service(self.access_manager, service)
self.internal_couplings_mux_service(service_mux, service)
def external_couplings_antenna(self, antenna: UserEquipmentAntenna):
self.add_coupling(self.input_radio_bc, antenna.input_radio_bc)
self.add_coupling(self.input_radio_control_dl, antenna.input_radio_control_dl)
self.add_coupling(self.input_radio_transport_dl, antenna.input_radio_transport_dl)
self.add_coupling(antenna.output_radio_control_ul, self.output_radio_control_ul)
self.add_coupling(antenna.output_radio_transport_ul, self.output_radio_transport_ul)
def external_couplings_service(self, service: Service):
self.add_coupling(service.output_service_delay_report, self.output_service_delay_report)
def external_couplings_access(self, access: AccessManager):
self.add_coupling(access.output_repeat_location, self.output_repeat_pss)
def internal_couplings_antenna_access(self, antenna: UserEquipmentAntenna, access_manager: AccessManager):
self.add_coupling(antenna.output_pss, access_manager.input_pss)
self.add_coupling(antenna.output_access_response, access_manager.input_access_response)
self.add_coupling(antenna.output_disconnect_response, access_manager.input_disconnect_response)
self.add_coupling(antenna.output_ho_started, access_manager.input_ho_started)
self.add_coupling(antenna.output_ho_finished, access_manager.input_ho_finished)
self.add_coupling(access_manager.output_access_request, antenna.input_access_request)
self.add_coupling(access_manager.output_disconnect_request, antenna.input_disconnect_request)
self.add_coupling(access_manager.output_rrc, antenna.input_rrc)
self.add_coupling(access_manager.output_ho_ready, antenna.input_ho_ready)
self.add_coupling(access_manager.output_ho_response, antenna.input_ho_response)
self.add_coupling(access_manager.output_connected_ap, antenna.input_connected_ap)
self.add_coupling(access_manager.output_antenna_powered, antenna.input_antenna_powered)
def internal_couplings_antenna_service(self, antenna: UserEquipmentAntenna, service: Service):
self.add_coupling(service.output_network, antenna.input_service)
def internal_couplings_antenna_mux(self, antenna: UserEquipmentAntenna, service_mux: UEServiceMux):
self.add_coupling(antenna.output_service, service_mux.input_network)
def internal_couplings_mux_service(self, service_mux: UEServiceMux, service: Service):
service_id = service.service_id
self.add_coupling(service_mux.outputs_network[service_id], service.input_network)
def internal_couplings_access_service(self, access_manager: AccessManager, service: Service):
self.add_coupling(access_manager.output_connected_ap, service.input_connected_ap)
self.add_coupling(service.output_service_required, access_manager.input_service_required)
class UserEquipmentLite(Coupled):
def __init__(self, ue_config: UserEquipmentConfig, guard_time: float = 0):
services_config = ue_config.services_config
ue_id = ue_config.ue_id
t_start = ue_config.t_start
t_end = ue_config.t_end
super().__init__('iot_devices_{}'.format(ue_id))
self.ue_id = ue_id
# Define and add components
services = [Service(ue_id, service_config, guard_time, t_start, t_end, lite=True)
for service_config in services_config.values()]
[self.add_component(service) for service in services]
self.input_network = Port(NetworkPacket, 'input_network')
self.output_network = Port(NetworkPacket, 'output_network')
self.output_service_delay_report = Port(ServiceDelayReport, 'output_service_delay_report')
self.add_in_port(self.input_network)
self.add_out_port(self.output_network)
self.add_out_port(self.output_service_delay_report)
if len(services) > 1: # More than one service -> we add a multiplexer
service_mux = UEServiceMux(ue_id, set(services_config.keys()))
self.add_component(service_mux)
self.add_coupling(self.input_network, service_mux.input_network)
for service in services:
self.add_coupling(service_mux.outputs_network[service.service_id], service.input_network)
else: # Otherwise, multiplexer is not required
self.add_coupling(self.input_network, services[0].input_network)
for service in services:
self.add_coupling(service.output_network, self.output_network)
self.add_coupling(service.output_service_delay_report, self.output_service_delay_report)
| 53.351724
| 110
| 0.757239
|
7f8536e330e42176e576535a4d16a46be7b42850
| 62
|
py
|
Python
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/AnnotationImportTypingUnion/main.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/AnnotationImportTypingUnion/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/AnnotationImportTypingUnion/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from lib import foo
def func():
var = foo
v<caret>ar
| 10.333333
| 19
| 0.596774
|
8926194856163eccfb63e2bae9f2c14f8c1fb88f
| 4,696
|
py
|
Python
|
benchmark/startQiskit1671.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1671.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1671.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=66
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[1]) # number=29
prog.cz(input_qubit[3],input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=31
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=48
prog.x(input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=57
prog.cz(input_qubit[1],input_qubit[0]) # number=58
prog.h(input_qubit[0]) # number=59
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[1],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.h(input_qubit[4]) # number=41
prog.h(input_qubit[0]) # number=61
prog.cz(input_qubit[1],input_qubit[0]) # number=62
prog.h(input_qubit[0]) # number=63
prog.x(input_qubit[1]) # number=10
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.x(input_qubit[3]) # number=33
prog.h(input_qubit[3]) # number=42
prog.cz(input_qubit[0],input_qubit[3]) # number=43
prog.h(input_qubit[3]) # number=44
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(0.6157521601035993,input_qubit[1]) # number=60
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.swap(input_qubit[3],input_qubit[0]) # number=64
prog.swap(input_qubit[3],input_qubit[0]) # number=65
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1671.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 33.070423
| 82
| 0.608603
|
a1bd7c6bfa24d76ed9c9642e70b0f33e0c20c87d
| 5,180
|
py
|
Python
|
shadowsocks/server_pool.py
|
ovear/shadowsocks
|
1c32446f1adf6e50bed65fb4baf28d22431ed7e6
|
[
"MIT"
] | 10
|
2015-01-11T13:26:25.000Z
|
2016-04-16T04:58:48.000Z
|
shadowsocks/server_pool.py
|
ShenXuGongZi/shadowsocks
|
1c32446f1adf6e50bed65fb4baf28d22431ed7e6
|
[
"MIT"
] | null | null | null |
shadowsocks/server_pool.py
|
ShenXuGongZi/shadowsocks
|
1c32446f1adf6e50bed65fb4baf28d22431ed7e6
|
[
"MIT"
] | 1
|
2021-04-23T12:36:32.000Z
|
2021-04-23T12:36:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import logging
import utils
import time
import eventloop
import tcprelay
import udprelay
import asyncdns
import thread
import threading
import sys
import asyncmgr
import Config
from socket import *
class ServerPool(object):
instance = None
def __init__(self):
utils.check_python()
self.config = utils.get_config(False)
utils.print_shadowsocks()
self.dns_resolver = asyncdns.DNSResolver()
self.mgr = asyncmgr.ServerMgr()
self.tcp_servers_pool = {}
#self.udp_servers_pool = {}
self.loop = eventloop.EventLoop()
thread.start_new_thread(ServerPool._loop, (self.loop, self.dns_resolver, self.mgr))
@staticmethod
def get_instance():
if ServerPool.instance is None:
ServerPool.instance = ServerPool()
return ServerPool.instance
@staticmethod
def _loop(loop, dns_resolver, mgr):
try:
mgr.add_to_loop(loop)
dns_resolver.add_to_loop(loop)
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
import traceback
traceback.print_exc()
os.exit(0)
def server_is_run(self, port):
port = int(port)
if port in self.tcp_servers_pool:
return True
return False
def new_server(self, port, password):
port = int(port)
logging.info("start server at %d" % port)
try:
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.sendto('%s:%s:%s:1' % (Config.MANAGE_PASS, port, password), (Config.MANAGE_BIND_IP, Config.MANAGE_PORT))
udpsock.close()
except Exception, e:
logging.warn(e)
return True
def cb_new_server(self, port, password):
ret = True
port = int(port)
if 'server' in self.config:
if port in self.tcp_servers_pool:
logging.info("server already at %s:%d" % (self.config['server'], port))
return 'this port server is already running'
else:
a_config = self.config.copy()
a_config['server_port'] = port
a_config['password'] = password
try:
logging.info("starting server at %s:%d" % (a_config['server'], port))
tcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False)
tcp_server.add_to_loop(self.loop)
self.tcp_servers_pool.update({port: tcp_server})
#udp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False)
#udp_server.add_to_loop(self.loop)
#self.udp_servers_pool.update({port: udp_server})
except Exception, e:
logging.warn(e)
return True
def del_server(self, port):
port = int(port)
logging.info("del server at %d" % port)
try:
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.sendto('%s:%s:0:0' % (Config.MANAGE_PASS, port), (Config.MANAGE_BIND_IP, Config.MANAGE_PORT))
udpsock.close()
except Exception, e:
logging.warn(e)
return True
def cb_del_server(self, port):
port = int(port)
if port not in self.tcp_servers_pool:
logging.info("stopped server at %s:%d already stop" % (self.config['server'], port))
else:
logging.info("stopped server at %s:%d" % (self.config['server'], port))
try:
server = self.tcp_servers_pool[port]
del self.tcp_servers_pool[port]
server.destroy()
except Exception, e:
logging.warn(e)
return True
def get_servers_transfer(self):
ret = {}
servers = self.tcp_servers_pool.copy()
for port in servers.keys():
ret[port] = [servers[port].server_transfer_ul, servers[port].server_transfer_dl]
return ret
| 35.724138
| 124
| 0.622201
|
de6b220a074d12a2af3ec321e4151988f8b328c3
| 1,233
|
py
|
Python
|
Pytorch Basic Exercises/Building an AutoEncoder with Pytorch/helper.py
|
padam56/Miscellaneous
|
596ed3491dc65b38cca1e960b20beb80557224aa
|
[
"MIT"
] | null | null | null |
Pytorch Basic Exercises/Building an AutoEncoder with Pytorch/helper.py
|
padam56/Miscellaneous
|
596ed3491dc65b38cca1e960b20beb80557224aa
|
[
"MIT"
] | null | null | null |
Pytorch Basic Exercises/Building an AutoEncoder with Pytorch/helper.py
|
padam56/Miscellaneous
|
596ed3491dc65b38cca1e960b20beb80557224aa
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import torch
def show_image(noisy_image,org_image,pred_image = None):
if pred_image == None:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
ax1.set_title('noisy_image')
ax1.imshow(noisy_image.permute(1,2,0).squeeze(),cmap = 'gray')
ax2.set_title('original_image')
ax2.imshow(org_image.permute(1,2,0).squeeze(),cmap = 'gray')
elif pred_image != None :
f, (ax1, ax2,ax3) = plt.subplots(1, 3, figsize=(10,5))
ax1.set_title('noisy_image')
ax1.imshow(noisy_image.permute(1,2,0).squeeze(),cmap = 'gray')
ax2.set_title('original_image')
ax2.imshow(org_image.permute(1,2,0).squeeze(),cmap = 'gray')
ax3.set_title('denoised_image')
ax3.imshow(pred_image.permute(1,2,0).squeeze(),cmap = 'gray')
class ToTensorForAE(object):
def __call__(self,sample):
images,labels = sample
images = images.transpose((2,0,1))
labels = labels.transpose((2,0,1))
return torch.from_numpy(images).float(),torch.from_numpy(labels).float()
| 29.357143
| 80
| 0.575831
|
6bb9c371fc0f4f62db8dd9bc7dabf6596ac74164
| 9,707
|
py
|
Python
|
Lib/test/test_compiler/test_static/non_static_inheritance.py
|
mananpal1997/cinder
|
a8804cc6e3a5861463ff959abcd09ad60a0763e5
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/test/test_compiler/test_static/non_static_inheritance.py
|
mananpal1997/cinder
|
a8804cc6e3a5861463ff959abcd09ad60a0763e5
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/test/test_compiler/test_static/non_static_inheritance.py
|
mananpal1997/cinder
|
a8804cc6e3a5861463ff959abcd09ad60a0763e5
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
import unittest
from compiler.pycodegen import CinderCodeGenerator
from .common import StaticTestBase
class NonStaticInheritanceTests(StaticTestBase):
def test_static_return_is_resolved_with_multiple_levels_of_inheritance(self):
codestr = """
class C:
def foobar(self, x: int) -> int:
return x
def f(self) -> int:
return self.foobar(1)
"""
with self.in_strict_module(codestr, name="mymod", enable_patching=True) as mod:
C = mod.C
class D(C):
def foobar(self, x: int) -> int:
return x + 1
class E(D):
def foobar(self, x: int) -> int:
return x + 2
self.assertEqual(D().f(), 2)
self.assertEqual(E().f(), 3)
def test_multiple_inheritance_initialization(self):
"""Primarily testing that when we have multiple inheritance that
we safely initialize all of our v-tables. Previously we could
init B2 while initializing the bases for DM, and then we wouldn't
initialize the classes derived from it."""
codestr = """
class C:
def foobar(self, x: int) -> int:
return x
def f(self) -> int:
return self.foobar(1)
def g(self): pass
def f(x: C):
return x.f()
"""
with self.in_strict_module(
codestr, name="mymod", enable_patching=True, freeze=False
) as mod:
C = mod.C
f = mod.f
class B1(C):
def f(self):
return 10
class B2(C):
def f(self):
return 20
class D(B2):
def f(self):
return 30
class DM(B2, B1):
pass
# Force initialization of C down
C.g = 42
self.assertEqual(f(B1()), 10)
self.assertEqual(f(B2()), 20)
self.assertEqual(f(D()), 30)
self.assertEqual(f(DM()), 20)
def test_multiple_inheritance_initialization_invoke_only(self):
"""Primarily testing that when we have multiple inheritance that
we safely initialize all of our v-tables. Previously we could
init B2 while initializing the bases for DM, and then we wouldn't
initialize the classes derived from it."""
codestr = """
class C:
def foobar(self, x: int) -> int:
return x
def f(self) -> int:
return self.foobar(1)
def g(self): pass
def f(x: C):
return x.f()
"""
with self.in_strict_module(codestr, name="mymod", enable_patching=True) as mod:
C = mod.C
f = mod.f
class B1(C):
def f(self):
return 10
class B2(C):
def f(self):
return 20
class D(B2):
def f(self):
return 30
class DM(B2, B1):
pass
# No forced initialization, only invokes
self.assertEqual(f(C()), 1)
self.assertEqual(f(B1()), 10)
self.assertEqual(f(B2()), 20)
self.assertEqual(f(D()), 30)
self.assertEqual(f(DM()), 20)
def test_inherit_abc(self):
codestr = """
from abc import ABC
class C(ABC):
@property
def f(self) -> int:
return 42
def g(self) -> int:
return self.f
"""
with self.in_module(codestr) as mod:
C = mod.C
a = C()
self.assertEqual(a.g(), 42)
def test_static_decorator_non_static_class(self):
codestr = """
def mydec(f):
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
class B:
def g(self): pass
def f(x: B):
return x.g()
"""
with self.in_module(codestr) as mod:
mydec = mod.mydec
B = mod.B
f = mod.f
# force v-table initialization on base
f(B())
class D(B):
@mydec
def f(self):
pass
self.assertEqual(D().f(), None)
D.f = lambda self: 42
self.assertEqual(f(B()), None)
self.assertEqual(f(D()), None)
self.assertEqual(D().f(), 42)
def test_nonstatic_multiple_inheritance_invoke(self):
"""multiple inheritance from non-static classes should
result in only static classes in the v-table"""
codestr = """
def f(x: str):
return x.encode('utf8')
"""
class C:
pass
class D(C, str):
pass
with self.in_module(codestr) as mod:
self.assertEqual(mod.f(D("abc")), b"abc")
def test_nonstatic_multiple_inheritance_invoke_static_base(self):
codestr = """
class B:
def f(self):
return 42
def f(x: B):
return x.f()
"""
class C:
def f(self):
return "abc"
with self.in_module(codestr) as mod:
class D(C, mod.B):
pass
self.assertEqual(mod.f(D()), "abc")
def test_nonstatic_multiple_inheritance_invoke_static_base_2(self):
codestr = """
class B:
def f(self):
return 42
def f(x: B):
return x.f()
"""
class C:
def f(self):
return "abc"
with self.in_module(codestr) as mod:
class D(C, mod.B):
def f(self):
return "foo"
self.assertEqual(mod.f(D()), "foo")
def test_no_inherit_multiple_static_bases(self):
codestr = """
class A:
pass
class B:
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"multiple bases have instance lay-out conflict"
):
class C(mod.A, mod.B):
pass
def test_no_inherit_multiple_static_bases_indirect(self):
codestr = """
class A:
pass
class B:
pass
"""
with self.in_module(codestr) as mod:
class C(mod.B):
pass
with self.assertRaisesRegex(
TypeError, r"multiple bases have instance lay-out conflict"
):
class D(C, mod.A):
pass
def test_no_inherit_static_and_builtin(self):
codestr = """
class A:
pass
"""
with self.in_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, r"multiple bases have instance lay-out conflict"
):
class C(mod.A, str):
pass
def test_mutate_sub_sub_class(self):
"""patching non-static class through multiple levels
of inheritance shouldn't crash"""
codestr = """
class B:
def __init__(self): pass
def f(self):
return 42
def f(b: B):
return b.f()
"""
with self.in_module(codestr) as mod:
# force initialization of the class
self.assertEqual(mod.f(mod.B()), 42)
class D1(mod.B):
def __init__(self):
pass
class D2(D1):
def __init__(self):
pass
D1.__init__ = lambda self: None
D2.__init__ = lambda self: None
self.assertEqual(mod.f(D1()), 42)
self.assertEqual(mod.f(D2()), 42)
def test_invoke_class_method_dynamic_base(self):
bases = """
class B1: pass
"""
codestr = """
from bases import B1
class D(B1):
@classmethod
def f(cls):
return cls.g()
@classmethod
def g(cls):
return 42
def f():
return D.f()
"""
with self.in_module(
bases, name="bases", code_gen=CinderCodeGenerator
), self.in_module(codestr) as mod:
f = mod.f
self.assertEqual(f(), 42)
def test_no_inherit_static_through_nonstatic(self):
base = """
class A:
pass
"""
nonstatic = """
from base import A
class B(A):
pass
"""
static = """
from nonstatic import B
class C(B):
pass
"""
with self.in_module(base, name="base"), self.in_module(
nonstatic, name="nonstatic", code_gen=CinderCodeGenerator
):
with self.assertRaisesRegex(
TypeError,
r"Static compiler cannot verify that static type 'C' is a "
r"valid override of static base 'A' because intervening base "
r"'B' is non-static",
):
self.run_code(static)
if __name__ == "__main__":
unittest.main()
| 26.667582
| 87
| 0.463686
|
feb70962a1f7fd5519703a10a605386138fc5c05
| 3,646
|
py
|
Python
|
tensorflow_model_optimization/python/examples/clustering/keras/mnist/mnist_mha.py
|
arovir01/model-optimization
|
92bfb45da34715eeff8849c2007cf3b734429120
|
[
"Apache-2.0"
] | 1,318
|
2018-10-31T23:57:52.000Z
|
2022-03-30T11:07:40.000Z
|
tensorflow_model_optimization/python/examples/clustering/keras/mnist/mnist_mha.py
|
arovir01/model-optimization
|
92bfb45da34715eeff8849c2007cf3b734429120
|
[
"Apache-2.0"
] | 410
|
2019-05-15T14:11:13.000Z
|
2022-03-31T07:27:07.000Z
|
tensorflow_model_optimization/python/examples/clustering/keras/mnist/mnist_mha.py
|
arovir01/model-optimization
|
92bfb45da34715eeff8849c2007cf3b734429120
|
[
"Apache-2.0"
] | 290
|
2019-05-14T17:42:49.000Z
|
2022-03-28T02:21:45.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=missing-docstring
"""Train a simple convnet with MultiHeadAttention layer on MNIST dataset
and cluster it.
"""
import tensorflow as tf
import tensorflow_model_optimization as tfmot
import numpy as np
NUMBER_OF_CLUSTERS = 3
# Load MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# define model
input = tf.keras.layers.Input(shape=(28, 28))
x = tf.keras.layers.MultiHeadAttention(num_heads=2, key_dim=16, name="mha")(
query=input, value=input
)
x = tf.keras.layers.Flatten()(x)
out = tf.keras.layers.Dense(10)(x)
model = tf.keras.Model(inputs=input, outputs=out)
# Train the digit classification model
model.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model.fit(
train_images, train_labels, epochs=1, validation_split=0.1,
)
score = model.evaluate(test_images, test_labels, verbose=0)
print('Model test loss:', score[0])
print('Model test accuracy:', score[1])
# Compute end step to finish pruning after 2 epochs.
batch_size = 128
epochs = 1
validation_split = 0.1 # 10% of training set will be used for validation set.
# Define model for clustering
cluster_weights = tfmot.clustering.keras.cluster_weights
CentroidInitialization = tfmot.clustering.keras.CentroidInitialization
clustering_params = {
"number_of_clusters": NUMBER_OF_CLUSTERS,
"cluster_centroids_init": CentroidInitialization.KMEANS_PLUS_PLUS,
}
model_for_clustering = cluster_weights(model, **clustering_params)
# `cluster_weights` requires a recompile.
model_for_clustering.compile(
optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
model_for_clustering.fit(
train_images,
train_labels,
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split,
)
score = model_for_clustering.evaluate(test_images, test_labels, verbose=0)
print('Clustered model test loss:', score[0])
print('Clustered model test accuracy:', score[1])
# Strip clustering from the model
clustered_model = tfmot.clustering.keras.strip_clustering(model_for_clustering)
clustered_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer='adam',
metrics=['accuracy'])
score = clustered_model.evaluate(test_images, test_labels, verbose=0)
print('Stripped clustered model test loss:', score[0])
print('Stripped clustered model test accuracy:', score[1])
# Check that numbers of weights for MHA layer is the given number of clusters.
mha_weights = list(filter(lambda x: 'mha' in x.name and 'kernel' in x.name, clustered_model.weights))
for x in mha_weights:
assert len(np.unique(x.numpy())) == NUMBER_OF_CLUSTERS
| 34.074766
| 101
| 0.75096
|
e22b1cf7369f6851c0ea78bb9647c19e347f7e7b
| 552
|
py
|
Python
|
handlers/eventhandlers/74-friend-remove.py
|
osukurikku/kuriso
|
ae6a6f2d87f2aa592c9fe105a6c01872a02cc057
|
[
"MIT"
] | 6
|
2021-03-07T20:14:29.000Z
|
2022-03-10T20:28:20.000Z
|
handlers/eventhandlers/74-friend-remove.py
|
osukurikku/kuriso
|
ae6a6f2d87f2aa592c9fe105a6c01872a02cc057
|
[
"MIT"
] | 3
|
2021-04-20T17:18:58.000Z
|
2022-03-28T18:17:35.000Z
|
handlers/eventhandlers/74-friend-remove.py
|
osukurikku/kuriso
|
ae6a6f2d87f2aa592c9fe105a6c01872a02cc057
|
[
"MIT"
] | 4
|
2021-03-30T12:55:07.000Z
|
2022-03-10T09:01:16.000Z
|
from handlers.decorators import OsuEvent
from packets.OsuPacketID import OsuPacketID
from packets.Reader.PacketResolver import PacketResolver
from helpers import userHelper
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from objects.Player import Player
# Client packet: 74
@OsuEvent.register_handler(OsuPacketID.Client_FriendRemove)
async def remove_friend(packet_data: bytes, token: 'Player'):
not_friend_id = await PacketResolver.read_friend_id(packet_data)
await userHelper.remove_friend(token.id, not_friend_id)
return True
| 32.470588
| 68
| 0.827899
|
893b3c7a5bac6c984505f8c2fd2301bb1aa6e663
| 17,614
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_virtual_machine_images_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_virtual_machine_images_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_virtual_machine_images_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
skus, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualMachineImage"
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.VirtualMachineImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'} # type: ignore
def list(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
skus, # type: str
expand=None, # type: Optional[str]
top=None, # type: Optional[int]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.VirtualMachineImageResource"]
"""Gets a list of all virtual machine image versions for the specified location, publisher, offer,
and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'} # type: ignore
def list_offers(
self,
location, # type: str
publisher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.list_offers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'} # type: ignore
def list_publishers(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.list_publishers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'} # type: ignore
def list_skus(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2019_03_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.list_skus.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'} # type: ignore
| 46.721485
| 221
| 0.659589
|
1f837d58bf821ed2597cbb4c62fe9c31be77a2fb
| 6,135
|
py
|
Python
|
tests/components/smartthings/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 4
|
2016-06-22T12:00:41.000Z
|
2018-06-11T20:31:25.000Z
|
tests/components/smartthings/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 58
|
2020-08-03T07:33:02.000Z
|
2022-03-31T06:02:05.000Z
|
tests/components/smartthings/test_fan.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6
|
2019-07-06T00:43:13.000Z
|
2021-01-16T13:27:06.000Z
|
"""
Test for the SmartThings fan platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from homeassistant.components.fan import (
ATTR_SPEED,
ATTR_SPEED_LIST,
DOMAIN as FAN_DOMAIN,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
)
from homeassistant.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
STATE_UNAVAILABLE,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_state(hass, device_factory):
"""Tests the state attributes properly match the fan types."""
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "on", Attribute.fan_speed: 2},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Dimmer 1
state = hass.states.get("fan.fan_1")
assert state.state == "on"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_SET_SPEED
assert state.attributes[ATTR_SPEED] == SPEED_MEDIUM
assert state.attributes[ATTR_SPEED_LIST] == [
SPEED_OFF,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_HIGH,
]
async def test_entity_and_device_attributes(hass, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "on", Attribute.fan_speed: 2},
)
# Act
await setup_platform(hass, FAN_DOMAIN, devices=[device])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
device_registry = await hass.helpers.device_registry.async_get_registry()
# Assert
entry = entity_registry.async_get("fan.fan_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_turn_off(hass, device_factory):
"""Test the fan turns of successfully."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "on", Attribute.fan_speed: 2},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"fan", "turn_off", {"entity_id": "fan.fan_1"}, blocking=True
)
# Assert
state = hass.states.get("fan.fan_1")
assert state is not None
assert state.state == "off"
async def test_turn_on(hass, device_factory):
"""Test the fan turns of successfully."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "off", Attribute.fan_speed: 0},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"fan", "turn_on", {ATTR_ENTITY_ID: "fan.fan_1"}, blocking=True
)
# Assert
state = hass.states.get("fan.fan_1")
assert state is not None
assert state.state == "on"
async def test_turn_on_with_speed(hass, device_factory):
"""Test the fan turns on to the specified speed."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "off", Attribute.fan_speed: 0},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"fan",
"turn_on",
{ATTR_ENTITY_ID: "fan.fan_1", ATTR_SPEED: SPEED_HIGH},
blocking=True,
)
# Assert
state = hass.states.get("fan.fan_1")
assert state is not None
assert state.state == "on"
assert state.attributes[ATTR_SPEED] == SPEED_HIGH
async def test_set_speed(hass, device_factory):
"""Test setting to specific fan speed."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "off", Attribute.fan_speed: 0},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"fan",
"set_speed",
{ATTR_ENTITY_ID: "fan.fan_1", ATTR_SPEED: SPEED_HIGH},
blocking=True,
)
# Assert
state = hass.states.get("fan.fan_1")
assert state is not None
assert state.state == "on"
assert state.attributes[ATTR_SPEED] == SPEED_HIGH
async def test_update_from_signal(hass, device_factory):
"""Test the fan updates when receiving a signal."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "off", Attribute.fan_speed: 0},
)
await setup_platform(hass, FAN_DOMAIN, devices=[device])
await device.switch_on(True)
# Act
async_dispatcher_send(hass, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await hass.async_block_till_done()
state = hass.states.get("fan.fan_1")
assert state is not None
assert state.state == "on"
async def test_unload_config_entry(hass, device_factory):
"""Test the fan is removed when the config entry is unloaded."""
# Arrange
device = device_factory(
"Fan 1",
capabilities=[Capability.switch, Capability.fan_speed],
status={Attribute.switch: "off", Attribute.fan_speed: 0},
)
config_entry = await setup_platform(hass, FAN_DOMAIN, devices=[device])
# Act
await hass.config_entries.async_forward_entry_unload(config_entry, "fan")
# Assert
assert hass.states.get("fan.fan_1").state == STATE_UNAVAILABLE
| 31.953125
| 88
| 0.683293
|
7380d837740ad063089328c0370d3c058c9424e4
| 1,227
|
py
|
Python
|
delete.py
|
stephen-eades/auto-delete-tweets
|
c4dae327d4b6394c1add1323ae36636978c37d88
|
[
"MIT"
] | null | null | null |
delete.py
|
stephen-eades/auto-delete-tweets
|
c4dae327d4b6394c1add1323ae36636978c37d88
|
[
"MIT"
] | 1
|
2020-12-22T22:34:05.000Z
|
2021-04-05T17:14:23.000Z
|
delete.py
|
stephen-eades/auto-delete-tweets
|
c4dae327d4b6394c1add1323ae36636978c37d88
|
[
"MIT"
] | null | null | null |
import tweepy
from datetime import datetime, timedelta
import sys
from os import environ
# Delete tweets older than 365 days that are not pinned or favorited by user
def delete_old_tweets():
twitter_user = 'stephen_eades'
consumer_key = environ['API_KEY']
consumer_secret_key = environ['API_SECRET_KEY']
access_token = environ['ACCESS_TOKEN']
access_token_secret = environ['ACCESS_TOKEN_SECRET']
# Authorize with the Twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret_key)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Loop through all tweets
for status in tweepy.Cursor(api.user_timeline, screen_name=api.get_user(twitter_user).screen_name).items():
# If the date is equal or older than one year from now
if datetime.today() - timedelta(days=365) > status.created_at:
# if the tweet is not a retweet
if not hasattr(status, 'retweeted_status') and status.favorited == False:
# Delete it
try:
api.destroy_status(status.id)
except:
print("Error while deleting tweet:", status.id)
| 37.181818
| 111
| 0.671557
|
5e2b8d7dd21f748eb4181df86914d8464cc78977
| 786
|
py
|
Python
|
account/forms.py
|
maretaatmadja/django_patronus
|
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
|
[
"MIT"
] | null | null | null |
account/forms.py
|
maretaatmadja/django_patronus
|
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
|
[
"MIT"
] | 8
|
2021-03-30T13:46:40.000Z
|
2022-03-12T00:35:09.000Z
|
account/forms.py
|
maretaatmadja/django_patronus
|
fe34e1a7c40b535165ef4e752adb61e4c41c32ae
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from django.forms import ModelForm, DateInput
from .models import Profile
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ('username', 'first_name', 'last_name', 'email')
class MyDateInput(DateInput):
input_type = 'date'
class UserUpdateForm(ModelForm):
class Meta:
model = get_user_model()
fields = ('first_name', 'last_name', 'email')
class ProfileUpdateForm(ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['date_of_birth'].widget = MyDateInput()
| 31.44
| 65
| 0.694656
|
0d38a3828531c9c383900b5000d03f570d8e3331
| 3,310
|
py
|
Python
|
transformer/plugins/test_resolve.py
|
jsabak/Transformer
|
c96ff1bed109f114f7c69143c85ee362e5f518d1
|
[
"MIT"
] | null | null | null |
transformer/plugins/test_resolve.py
|
jsabak/Transformer
|
c96ff1bed109f114f7c69143c85ee362e5f518d1
|
[
"MIT"
] | null | null | null |
transformer/plugins/test_resolve.py
|
jsabak/Transformer
|
c96ff1bed109f114f7c69143c85ee362e5f518d1
|
[
"MIT"
] | null | null | null |
import logging
import random
import sys
import uuid
from pathlib import Path
from types import ModuleType
import pytest
from hypothesis import given
from hypothesis._strategies import permutations
from transformer.plugins.contracts import plugin, Contract
from .resolve import load_plugins_from_module, resolve, NoPluginError
@pytest.fixture()
def module_root(tmp_path: Path, monkeypatch) -> Path:
monkeypatch.setattr(sys, "path", [str(tmp_path), *sys.path])
return tmp_path
class TestResolve:
def test_raises_for_module_not_found(self):
modname = f"that_module_does_not_exist.{uuid.uuid4().hex}"
with pytest.raises(ImportError):
list(resolve(modname)) # must force evaluation of the generator
def test_calls_load_plugins_from_module_with_module(self, module_root: Path):
modname = "ab.cd.ef"
modpath = Path(*modname.split(".")).with_suffix(".py")
Path(module_root, modpath.parent).mkdir(parents=True)
with Path(module_root, modpath).open("w") as f:
f.write("from transformer.plugins.contracts import plugin, Contract\n")
f.write("@plugin(Contract.OnTask)\n")
f.write("def f(t):\n")
f.write(" ...\n")
f.write("def helper(t):\n")
f.write(" ...\n")
plugins = list(resolve(modname))
assert len(plugins) == 1
f = plugins[0]
assert callable(f)
assert f.__name__ == "f"
def test_resolve_is_exported_by_the_transformer_plugins_module(self):
try:
from transformer.plugins import resolve
except ImportError:
pytest.fail("resolve should be exported by transformer.plugins")
@pytest.fixture()
def module() -> ModuleType:
"""Creates and returns an empty module."""
return ModuleType(f"fake_{random.randint(0, 99999999)}")
class TestLoadPluginsFromModule:
def test_raises_error_for_non_module(self):
class A:
pass
with pytest.raises(TypeError):
# Iterators are lazy, we need list()
list(load_plugins_from_module(A))
def not_a_plugin(_):
...
def plugin_not_a_plugin_either(_):
...
@plugin(Contract.OnTask)
def plugin_valid(_):
...
@given(permutations((not_a_plugin, plugin_not_a_plugin_either, plugin_valid)))
def test_ignores_non_plugin_stuff_in_module(self, module, caplog, functions):
for f in functions:
module.__dict__[f.__name__] = f
caplog.clear()
caplog.set_level(logging.DEBUG)
plugins = list(load_plugins_from_module(module))
plugin_valid = next(f for f in functions if f.__name__ == "plugin_valid")
assert plugins == [plugin_valid]
non_plugin_functions = {f for f in functions if f is not plugin_valid}
print(f">>> log messages: {caplog.messages}")
for f in non_plugin_functions:
assert any(
f.__name__ in msg for msg in caplog.messages
), "ignored function names should be logged"
def test_raises_for_modules_without_any_plugin(self, module):
with pytest.raises(NoPluginError, match=module.__name__):
# must force evaluation of the generator
list(load_plugins_from_module(module))
| 32.772277
| 83
| 0.662236
|
25b1d4bb7815373bb6aaa494f729e73cbb8d0c5a
| 1,696
|
py
|
Python
|
nomadgram/images/models.py
|
Yesdoing/nomadgram
|
a78b48de15d7897c0ee959ae8ee8e92eb991eb88
|
[
"MIT"
] | null | null | null |
nomadgram/images/models.py
|
Yesdoing/nomadgram
|
a78b48de15d7897c0ee959ae8ee8e92eb991eb88
|
[
"MIT"
] | 17
|
2020-06-05T16:49:27.000Z
|
2022-03-11T23:26:03.000Z
|
nomadgram/images/models.py
|
Yesdoing/nomadgram
|
a78b48de15d7897c0ee959ae8ee8e92eb991eb88
|
[
"MIT"
] | null | null | null |
from django.db import models
from nomadgram.users import models as user_models
from taggit.managers import TaggableManager
from django.utils.encoding import python_2_unicode_compatible
# Create your models here.
@python_2_unicode_compatible
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
@python_2_unicode_compatible
class Image(TimeStampedModel):
""" Image Model """
file = models.ImageField()
locations = models.CharField(max_length=140)
caption = models.TextField()
creator = models.ForeignKey(user_models.User, null=True, related_name='images')
tags = TaggableManager()
@property
def like_count(self):
return self.likes.all().count()
@property
def comment_count(self):
return self.comments.all().count()
def __str__(self):
return '{} - {}'.format(self.locations, self.caption)
class Meta:
ordering = ['-created_at']
@python_2_unicode_compatible
class Comment(TimeStampedModel):
""" Comment Model """
message = models.TextField()
creator = models.ForeignKey(user_models.User, null=True)
image = models.ForeignKey(Image, null=True, related_name='comments')
def __str__(self):
return self.message
@python_2_unicode_compatible
class Like(TimeStampedModel):
""" Like Model """
creator = models.ForeignKey(user_models.User, null=True)
image = models.ForeignKey(Image, null=True, related_name='likes')
def __str__(self):
return 'User: {} - Image Caption: {}'.format(self.creator.username, self.image.caption)
| 27.354839
| 95
| 0.711675
|
ba397a5de902e7ff73909a6ec3837eb46cd0c6be
| 7,544
|
py
|
Python
|
lib/models/cell_searchs/search_cells.py
|
wangguangyuan/AutoDL-Projects
|
1ce3249a5a58af3506b8c9af977008ddf8198445
|
[
"MIT"
] | 1
|
2020-01-19T01:28:07.000Z
|
2020-01-19T01:28:07.000Z
|
lib/models/cell_searchs/search_cells.py
|
wangguangyuan/AutoDL-Projects
|
1ce3249a5a58af3506b8c9af977008ddf8198445
|
[
"MIT"
] | null | null | null |
lib/models/cell_searchs/search_cells.py
|
wangguangyuan/AutoDL-Projects
|
1ce3249a5a58af3506b8c9af977008ddf8198445
|
[
"MIT"
] | null | null | null |
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import math, random, torch
import warnings
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from ..cell_operations import OPS
# This module is used for NAS-Bench-201, represents a small search space with a complete DAG
class NAS201SearchCell(nn.Module):
def __init__(self, C_in, C_out, stride, max_nodes, op_names, affine=False, track_running_stats=True):
super(NAS201SearchCell, self).__init__()
self.op_names = deepcopy(op_names)
self.edges = nn.ModuleDict()
self.max_nodes = max_nodes
self.in_dim = C_in
self.out_dim = C_out
for i in range(1, max_nodes):
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
if j == 0:
xlists = [OPS[op_name](C_in , C_out, stride, affine, track_running_stats) for op_name in op_names]
else:
xlists = [OPS[op_name](C_in , C_out, 1, affine, track_running_stats) for op_name in op_names]
self.edges[ node_str ] = nn.ModuleList( xlists )
self.edge_keys = sorted(list(self.edges.keys()))
self.edge2index = {key:i for i, key in enumerate(self.edge_keys)}
self.num_edges = len(self.edges)
def extra_repr(self):
string = 'info :: {max_nodes} nodes, inC={in_dim}, outC={out_dim}'.format(**self.__dict__)
return string
def forward(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[ self.edge2index[node_str] ]
inter_nodes.append( sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) )
nodes.append( sum(inter_nodes) )
return nodes[-1]
# GDAS
def forward_gdas(self, inputs, hardwts, index):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = hardwts[ self.edge2index[node_str] ]
argmaxs = index[ self.edge2index[node_str] ].item()
weigsum = sum( weights[_ie] * edge(nodes[j]) if _ie == argmaxs else weights[_ie] for _ie, edge in enumerate(self.edges[node_str]) )
inter_nodes.append( weigsum )
nodes.append( sum(inter_nodes) )
return nodes[-1]
# joint
def forward_joint(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[ self.edge2index[node_str] ]
#aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) / weights.numel()
aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) )
inter_nodes.append( aggregation )
nodes.append( sum(inter_nodes) )
return nodes[-1]
# uniform random sampling per iteration, SETN
def forward_urs(self, inputs):
nodes = [inputs]
for i in range(1, self.max_nodes):
while True: # to avoid select zero for all ops
sops, has_non_zero = [], False
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
candidates = self.edges[node_str]
select_op = random.choice(candidates)
sops.append( select_op )
if not hasattr(select_op, 'is_zero') or select_op.is_zero is False: has_non_zero=True
if has_non_zero: break
inter_nodes = []
for j, select_op in enumerate(sops):
inter_nodes.append( select_op(nodes[j]) )
nodes.append( sum(inter_nodes) )
return nodes[-1]
# select the argmax
def forward_select(self, inputs, weightss):
nodes = [inputs]
for i in range(1, self.max_nodes):
inter_nodes = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
weights = weightss[ self.edge2index[node_str] ]
inter_nodes.append( self.edges[node_str][ weights.argmax().item() ]( nodes[j] ) )
#inter_nodes.append( sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) )
nodes.append( sum(inter_nodes) )
return nodes[-1]
# forward with a specific structure
def forward_dynamic(self, inputs, structure):
nodes = [inputs]
for i in range(1, self.max_nodes):
cur_op_node = structure.nodes[i-1]
inter_nodes = []
for op_name, j in cur_op_node:
node_str = '{:}<-{:}'.format(i, j)
op_index = self.op_names.index( op_name )
inter_nodes.append( self.edges[node_str][op_index]( nodes[j] ) )
nodes.append( sum(inter_nodes) )
return nodes[-1]
class MixedOp(nn.Module):
def __init__(self, space, C, stride, affine, track_running_stats):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in space:
op = OPS[primitive](C, C, stride, affine, track_running_stats)
self._ops.append(op)
def forward_gdas(self, x, weights, index):
return self._ops[index](x) * weights[index]
def forward_darts(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
# Learning Transferable Architectures for Scalable Image Recognition, CVPR 2018
class NASNetSearchCell(nn.Module):
def __init__(self, space, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, affine, track_running_stats):
super(NASNetSearchCell, self).__init__()
self.reduction = reduction
self.op_names = deepcopy(space)
if reduction_prev: self.preprocess0 = OPS['skip_connect'](C_prev_prev, C, 2, affine, track_running_stats)
else : self.preprocess0 = OPS['nor_conv_1x1'](C_prev_prev, C, 1, affine, track_running_stats)
self.preprocess1 = OPS['nor_conv_1x1'](C_prev, C, 1, affine, track_running_stats)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self.edges = nn.ModuleDict()
for i in range(self._steps):
for j in range(2+i):
node_str = '{:}<-{:}'.format(i, j)
stride = 2 if reduction and j < 2 else 1
op = MixedOp(space, C, stride, affine, track_running_stats)
self.edges[ node_str ] = op
self.edge_keys = sorted(list(self.edges.keys()))
self.edge2index = {key:i for i, key in enumerate(self.edge_keys)}
self.num_edges = len(self.edges)
def forward_gdas(self, s0, s1, weightss, indexs):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
clist = []
for j, h in enumerate(states):
node_str = '{:}<-{:}'.format(i, j)
op = self.edges[ node_str ]
weights = weightss[ self.edge2index[node_str] ]
index = indexs[ self.edge2index[node_str] ].item()
clist.append( op.forward_gdas(h, weights, index) )
states.append( sum(clist) )
return torch.cat(states[-self._multiplier:], dim=1)
def forward_darts(self, s0, s1, weightss):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
clist = []
for j, h in enumerate(states):
node_str = '{:}<-{:}'.format(i, j)
op = self.edges[ node_str ]
weights = weightss[ self.edge2index[node_str] ]
clist.append( op.forward_darts(h, weights) )
states.append( sum(clist) )
return torch.cat(states[-self._multiplier:], dim=1)
| 38.10101
| 140
| 0.627651
|
3a55d611ed58ba8610373289576ed71daa2c6caa
| 843
|
py
|
Python
|
tests/conftest.py
|
binking/News_website
|
7f1ed5d64e46ab5001660c7efb83419bb603da6f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
binking/News_website
|
7f1ed5d64e46ab5001660c7efb83419bb603da6f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/conftest.py
|
binking/News_website
|
7f1ed5d64e46ab5001660c7efb83419bb603da6f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import os
import pytest
from webtest import TestApp
from news_website.settings import TestConfig
from news_website.app import create_app
from news_website.database import db as _db
from .factories import UserFactory
@pytest.yield_fixture(scope='function')
def app():
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
_db.app = app
with app.app_context():
_db.create_all()
yield _db
_db.drop_all()
@pytest.fixture
def user(db):
user = UserFactory(password='myprecious')
db.session.commit()
return user
| 17.5625
| 46
| 0.695136
|
178debc7dc006e79a7825337d999bbd82a056489
| 5,128
|
py
|
Python
|
tests/platforms/test_weibo.py
|
he0119/nonebot-bison
|
31f02d73dbdfcbbd4b824a9b6a1cfc9aa0fc6b09
|
[
"MIT"
] | null | null | null |
tests/platforms/test_weibo.py
|
he0119/nonebot-bison
|
31f02d73dbdfcbbd4b824a9b6a1cfc9aa0fc6b09
|
[
"MIT"
] | null | null | null |
tests/platforms/test_weibo.py
|
he0119/nonebot-bison
|
31f02d73dbdfcbbd4b824a9b6a1cfc9aa0fc6b09
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import feedparser
import pytest
import respx
from httpx import Response
from nonebug.app import App
from pytz import timezone
from .utils import get_file, get_json
@pytest.fixture
def weibo(app: App):
from nonebot_bison.platform import platform_manager
return platform_manager["weibo"]
@pytest.fixture(scope="module")
def weibo_ak_list_1():
return get_json("weibo_ak_list_1.json")
@pytest.mark.asyncio
@respx.mock
async def test_get_name(weibo):
profile_router = respx.get(
"https://m.weibo.cn/api/container/getIndex?containerid=1005056279793937"
)
profile_router.mock(
return_value=Response(200, json=get_json("weibo_ak_profile.json"))
)
name = await weibo.get_target_name("6279793937")
assert name == "明日方舟Arknights"
@pytest.mark.asyncio
@respx.mock
async def test_fetch_new(weibo, dummy_user_subinfo):
ak_list_router = respx.get(
"https://m.weibo.cn/api/container/getIndex?containerid=1076036279793937"
)
detail_router = respx.get("https://m.weibo.cn/detail/4649031014551911")
ak_list_router.mock(
return_value=Response(200, json=get_json("weibo_ak_list_0.json"))
)
detail_router.mock(
return_value=Response(200, text=get_file("weibo_detail_4649031014551911"))
)
target = "6279793937"
res = await weibo.fetch_new_post(target, [dummy_user_subinfo])
assert ak_list_router.called
assert len(res) == 0
assert not detail_router.called
mock_data = get_json("weibo_ak_list_1.json")
ak_list_router.mock(return_value=Response(200, json=mock_data))
# import ipdb; ipdb.set_trace()
res2 = await weibo.fetch_new_post(target, [dummy_user_subinfo])
assert len(res2) == 0
mock_data["data"]["cards"][1]["mblog"]["created_at"] = datetime.now(
timezone("Asia/Shanghai")
).strftime("%a %b %d %H:%M:%S %z %Y")
ak_list_router.mock(return_value=Response(200, json=mock_data))
res3 = await weibo.fetch_new_post(target, [dummy_user_subinfo])
assert len(res3[0][1]) == 1
assert not detail_router.called
post = res3[0][1][0]
assert post.target_type == "weibo"
assert post.text == "#明日方舟#\nSideStory「沃伦姆德的薄暮」复刻现已开启! "
assert post.url == "https://weibo.com/6279793937/KkBtUx2dv"
assert post.target_name == "明日方舟Arknights"
assert len(post.pics) == 1
@pytest.mark.asyncio
async def test_classification(weibo):
mock_data = get_json("weibo_ak_list_1.json")
tuwen = mock_data["data"]["cards"][1]
retweet = mock_data["data"]["cards"][3]
video = mock_data["data"]["cards"][0]
mock_data_ys = get_json("weibo_ys_list_0.json")
text = mock_data_ys["data"]["cards"][2]
assert weibo.get_category(retweet) == 1
assert weibo.get_category(video) == 2
assert weibo.get_category(tuwen) == 3
assert weibo.get_category(text) == 4
@pytest.mark.asyncio
@respx.mock
async def test_parse_long(weibo):
detail_router = respx.get("https://m.weibo.cn/detail/4645748019299849")
detail_router.mock(
return_value=Response(200, text=get_file("weibo_detail_4645748019299849"))
)
raw_post = get_json("weibo_ak_list_1.json")["data"]["cards"][0]
post = await weibo.parse(raw_post)
assert not "全文" in post.text
assert detail_router.called
def test_tag(weibo, weibo_ak_list_1):
raw_post = weibo_ak_list_1["data"]["cards"][0]
assert weibo.get_tags(raw_post) == ["明日方舟", "音律联觉"]
@pytest.mark.asyncio
@pytest.mark.compare
async def test_rsshub_compare(weibo):
target = "6279793937"
raw_posts = filter(weibo.filter_platform_custom, await weibo.get_sub_list(target))
posts = []
for raw_post in raw_posts:
posts.append(await weibo.parse(raw_post))
url_set = set(map(lambda x: x.url, posts))
feedres = feedparser.parse("https://rsshub.app/weibo/user/6279793937")
for entry in feedres.entries[:5]:
# print(entry)
assert entry.link in url_set
test_post = {
"mblog": {
"text": '<a href="https://m.weibo.cn/search?containerid=231522type%3D1%26t%3D10%26q%3D%23%E5%88%9A%E5%87%BA%E7%94%9F%E7%9A%84%E5%B0%8F%E7%BE%8A%E9%A9%BC%E9%95%BF%E5%95%A5%E6%A0%B7%23&extparam=%23%E5%88%9A%E5%87%BA%E7%94%9F%E7%9A%84%E5%B0%8F%E7%BE%8A%E9%A9%BC%E9%95%BF%E5%95%A5%E6%A0%B7%23&luicode=10000011&lfid=1076036003966749" data-hide=""><span class="surl-text">#刚出生的小羊驼长啥样#</span></a> <br />小羊驼三三来也<span class="url-icon"><img alt=[好喜欢] src="https://h5.sinaimg.cn/m/emoticon/icon/lxh/lxh_haoxihuan-51860b62e6.png" style="width:1em; height:1em;" /></span><br /><a href="https://m.weibo.cn/p/index?extparam=%E5%B0%8F%E7%BE%8A%E9%A9%BC%E4%B8%89%E4%B8%89&containerid=1008085ae16d2046db677de1b8491d2b708597&luicode=10000011&lfid=1076036003966749" data-hide=""><span class=\'url-icon\'><img style=\'width: 1rem;height: 1rem\' src=\'https://n.sinaimg.cn/photo/5213b46e/20180926/timeline_card_small_super_default.png\'></span><span class="surl-text">小羊驼三三</span></a> ',
"bid": "KnssqeqKK",
}
}
def test_chaohua_tag(weibo):
tags = weibo.get_tags(test_post)
assert "刚出生的小羊驼长啥样" in tags
assert "小羊驼三三超话" in tags
| 37.985185
| 975
| 0.704563
|
44ebdea987783db7943a76eff8283118cc24382b
| 19,887
|
py
|
Python
|
tf2onnx/rewriter/loop_rewriter_base.py
|
natke/tensorflow-onnx
|
af083a2e070d67b7ca47e9babe7ff6938b169176
|
[
"MIT"
] | 1
|
2021-10-13T05:38:36.000Z
|
2021-10-13T05:38:36.000Z
|
tf2onnx/rewriter/loop_rewriter_base.py
|
daquexian/tensorflow-onnx
|
cb016ef5b2483b78b0c0ceea23652d4a6a142cf0
|
[
"MIT"
] | null | null | null |
tf2onnx/rewriter/loop_rewriter_base.py
|
daquexian/tensorflow-onnx
|
cb016ef5b2483b78b0c0ceea23652d4a6a142cf0
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.loop_rewriter_base
"""
from __future__ import division
from __future__ import print_function
import logging
from collections import OrderedDict
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.utils import is_tf_loopcond_op, is_tf_tensor_array_op
from tf2onnx.utils import is_tf_tensor_array_gather_op, is_tf_tensor_array_write_op
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT
from tf2onnx.utils import TensorValueInfo
logger = logging.getLogger(__name__)
INVALID_INPUT_ID = utils.make_name("invalid_input_id")
# todo(pengwa) remove protected-access with changes to Graph/Node later.
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,protected-access
class Context(object):
def __init__(self):
self.while_context_scope = None
self.loop_properties = LoopProperties()
self.loop_cond = None
self.cell_graph = None # GraphInfo of cell graph
self.cond_graph = None # GraphInfo of condition graph
class GraphInfo(object):
def __init__(self, ops, inputs, outputs):
self.nodes = ops
self.inputs = inputs # list of TensorValueInfo in order
self.outputs = outputs # list of TensorValueInfo in order
self.dependent_vars = None
class LoopProperties(object):
def __init__(self):
# use enter name as key, they are initial inputs.
# we don't use enter_input_id because it might be
# used as initial input for more than one Enter nodes.
self.state_variables = OrderedDict()
self.scan_variables = OrderedDict()
self.tensor_array_inputs = [] # list of type InputTensorArray
def add_variable(self, var):
utils.make_sure(var.enter_name not in self.scan_variables,
"variable %s already exists as scan variable.", var.enter_name)
utils.make_sure(var.enter_name not in self.state_variables,
"variable %s already exists as state variable.", var.enter_name)
if not var.is_tensor_array:
self.state_variables[var.enter_name] = var
else:
self.scan_variables[var.enter_name] = var
def get_variables(self, checker):
if not checker:
return self.all_variables.values()
return [v for v in self.all_variables.values() if checker(v)]
@property
def all_variables(self):
items = self.state_variables.copy()
items.update(self.scan_variables)
return items
# state inputs and outputs are in pairs, even though some outputs are not depending on corresponding input,
# we leave the input id be None.
@property
def state_inputs(self):
return [v.switch_true_identity_output for v in self.state_variables.values()]
@property
def state_inputs_initial_values(self):
return [v.enter_input_id for v in self.state_variables.values()]
@property
def state_outputs(self):
return [v.next_iteration_input for v in self.state_variables.values()]
@property
def state_outputs_exits(self):
return [v.exit_output for v in self.state_variables.values()]
# scan output (e.g. tensor array) won't be used by next iteration calculation
@property
def scan_outputs(self):
return [v.next_iteration_input for v in self.scan_variables.values()]
@property
def scan_outputs_exits(self):
return [v.exit_output for v in self.scan_variables.values()]
# treat input tensor array as scan inputs
def add_scan_input(self, input_tensor_array):
self.tensor_array_inputs.append(input_tensor_array)
# usually it is called TensorArrayReadV3
@property
def scan_inputs(self):
return [i.consumer for i in self.tensor_array_inputs]
@property
def scan_inputs_initial_values(self):
return [i.data_input_id for i in self.tensor_array_inputs]
class LoopVariable(object):
"""In TensorFlow loop, all loop variables are listed both in iteration body graph's inputs, and outputs.
Loop (state variable 1, state variable 2) {
# do the calculation
# updated state variable 1 not necessarily only depends on state variable 1, it might depend
# on 0, 1 or more state variables.
# So if it depends on 0 state variable, then switch_true_identity_output.id is None. For this case,
# during conversion, a fake input for ONNX Loop body graph is created, but not consumed by any node.
return (updated) state variable 1, (updated) state variable 2, scan variable 1, scan variable 2
}
Here we take the perspective of body graph's outputs:
1. start from the iteration body graph's output (e.g. next_iteration_input.id)
2. find body graph generating it (those node between NextIteration and Switch)
3. find the variable initial value (e.g. enter_input_id)
4. check whether it is a tensor array
5. the body graph output might go to next iteration as corresponding input
(e.g. switch_true_identity_output.id).
"""
def __init__(self, enter_name, enter_input_id, next_iteration_input_id,
switch_true_identity_output_id, exit_output_id, is_tensor_array, ta_index_id, g):
self.enter_name = enter_name
self.enter_input_id = enter_input_id
# the output of iteration body graph for this variable
# should not be None
utils.make_sure(next_iteration_input_id, "next_iteration_input_id should not be None")
self.next_iteration_input = TensorValueInfo(next_iteration_input_id, g)
# the starting point of iteration body graph,
# might be None when this variable value (either initial value or last iteration output value)
# is not consumed iteration body graph nodes.
self.switch_true_identity_output = TensorValueInfo(switch_true_identity_output_id, g)
# the switch_false branch is ended with Exit, which is a boundary for the loop,
# might be None when no consumers for the variable output.
self.exit_output = TensorValueInfo(exit_output_id, g)
# only applicable for tensor array variable
self.is_tensor_array = is_tensor_array
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan output behavior.
self.ta_index_id = ta_index_id
class InputTensorArray(object):
def __init__(self, data_input_id, index_input_id, consumer_id, g):
self.index_input_id = index_input_id
self.data_input_id = data_input_id
# tensor array is unstacked before being used in loop, consumer_id is the node
# (in the iteration body graph) consuming one of the element of tensor array.
self.consumer = TensorValueInfo(consumer_id, g)
class LoopRewriterBase(object):
def __init__(self, g):
self.g = g
self.ta_read_input_pattern = \
OpTypePattern("TensorArrayReadV3", name="ta_read", inputs=[
OpTypePattern("Enter", name="ta_enter", inputs=[
OpTypePattern("TensorArrayV3")
]),
OpTypePattern("Identity", name="ta_index"),
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
]),
])
def create_context(self):
return Context()
def need_rewrite(self, context):
return False
def rewrite(self, context):
return REWRITER_RESULT.FAIL
def run_internal(self):
loopcond_ops = []
for op in self.g.get_nodes():
if is_tf_loopcond_op(op):
loopcond_ops.append(op)
# self.g.get_nodes may change inside this loop so that we parse all LoopCond first
for op in loopcond_ops:
logger.debug("======================\n handling loop cond node called %s", op.name)
context = self.create_context()
context.loop_cond = op
self._check_in_read_only_mode(context)
if self.need_rewrite(context):
# cut off connection between cell/cond graphs and useless nodes like Merge, NextIteration.
self._cut_off_connection_for_cell(context)
context.cell_graph = self._crop_loop_body_sub_graph(context)
context.cond_graph = self._crop_loop_condition_sub_graph(context)
_result = self.rewrite(context)
if _result == REWRITER_RESULT.OK:
logger.debug("rewrite successfully")
elif _result == REWRITER_RESULT.SKIP:
logger.debug("rewrite skipped for LoopCond called %s", op.name)
continue
elif _result == REWRITER_RESULT.FAIL:
raise ValueError("rewrite failed, so just fast fail it")
if self.g.outputs:
# clean the graph based on output names.
self.g.delete_unused_nodes(self.g.outputs)
return self.g.get_nodes()
def _check_in_read_only_mode(self, context):
self._parse_loop_variables(context)
self._parse_input_ta(context)
def _parse_loop_variables(self, context):
loop_cond_op = context.loop_cond
parts = loop_cond_op.name.split('/')
context.while_context_scope = '/'.join(parts[0:-1]) + "/"
logger.debug("found while loop scope %s", context.while_context_scope)
switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])
for s in switch_nodes:
if s.type != 'Switch':
raise ValueError("LoopCond's output node should be followed with a Switch node")
loop_var = self._get_loop_var_from_switch(s)
context.loop_properties.add_variable(loop_var)
def _parse_input_ta(self, context):
graph_inputs = [v.switch_true_identity_output.id for v in context.loop_properties.all_variables.values()
if v.switch_true_identity_output.id]
matcher = GraphMatcher(self.ta_read_input_pattern, allow_reorder=False)
match_results = matcher.match_ops(self.g.get_nodes())
match_results = [r for r in match_results if r.get_op("ta_index").output[0] in graph_inputs]
for match in match_results:
ta_input_scatter = match.get_op("ta_input_scatter")
# the 3rd input of scatter is the value
data_input_id = ta_input_scatter.input[2]
ta_read_node = match.get_op("ta_read")
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan input behavior.
index_input_id = ta_read_node.input[1]
unstacked_ta_consumer = match.get_op("ta_read").output[0]
ta = InputTensorArray(data_input_id, index_input_id, unstacked_ta_consumer, self.g)
context.loop_properties.add_scan_input(ta)
def _crop_loop_body_sub_graph(self, context):
# according to input and output, find the body graph
loop_props = context.loop_properties
inputs = loop_props.state_inputs + loop_props.scan_inputs
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
outputs = loop_props.state_outputs + loop_props.scan_outputs
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
ops, enter_nodes, _ = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=False)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(ops, enter_node.output[0], enter_node.input[0])
return GraphInfo(ops, inputs, outputs)
def _crop_loop_condition_sub_graph(self, context):
input_ids = []
output_ids = [context.loop_cond.input[0]]
outputs = [TensorValueInfo(o, self.g) for o in output_ids]
ops, enter_nodes, merge_nodes = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=True)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(ops, enter_node.output[0], enter_node.input[0])
dependent_vars = []
for merge_node in merge_nodes:
enter_node = [n for n in merge_node.inputs if n.type == "Enter"][0]
loop_var = context.loop_properties.all_variables[enter_node.name]
# cut off connection between condition graph and Merge node.
# replace condition graph's inputs to be cell graph's outputs, because we want condition graph
# to consumer cell graph outputs.
non_switch_consumers = [n for n in self.g.find_output_consumers(merge_node.output[0]) if n.type != "Switch"]
self.g.replace_all_inputs(non_switch_consumers, merge_node.output[0],
loop_var.next_iteration_input.id)
dependent_vars.append(loop_var)
# cut off connection between condition graph and LoopCond node.
self.g.replace_all_inputs([context.loop_cond], context.loop_cond.output[0], INVALID_INPUT_ID)
graph_info = GraphInfo(ops, [], outputs)
graph_info.dependent_vars = dependent_vars
return graph_info
def _cut_off_connection_for_cell(self, context):
for val in context.loop_properties.all_variables.values():
if val.switch_true_identity_output.id:
# remove the node to cut off a starting node of the cell (e.g. loop body).
n = self.g.get_node_by_output(val.switch_true_identity_output.id)
self.g.remove_node(n.name)
if val.is_tensor_array:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
ta_write_nodes = [n for n in self.g.get_nodes() if is_tf_tensor_array_write_op(n)]
self.g.replace_all_inputs(ta_write_nodes, val.next_iteration_input.id, INVALID_INPUT_ID)
else:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
self.g.replace_all_inputs(next_iter_nodes, val.next_iteration_input.id, INVALID_INPUT_ID)
for scan_input in context.loop_properties.scan_inputs:
# remove the node to cut off connection between scan_input and the cell.
self.g.remove_node(self.g.get_node_by_output(scan_input.id).name)
def _get_loop_var_from_switch(self, switch_node):
if switch_node.type != 'Switch':
logger.error("not a switch node, skip")
return None
# the first input is data
merge_node = switch_node.inputs[0]
if merge_node.type != "Merge":
logger.error("switch node does not has Merge as its first input")
return None
# find the output_true consumers
switch_consumers = self.g.find_output_consumers(switch_node.output[1])
switch_true_consumer_cnt = len(switch_consumers)
if switch_true_consumer_cnt == 0:
switch_true_identity_output = None
elif switch_true_consumer_cnt == 1:
if switch_consumers[0].type == "Identity":
switch_true_identity_output = switch_consumers[0].output[0]
else:
# using grappler there is not necessarily an identity behind switch
switch_true_identity_output = switch_node.output[1]
else:
raise ValueError("switch_true " + switch_node.name + " has unexpected count of consumers:",
[n.name for n in switch_consumers])
target_node_input_id = None
enter_node = [n for n in merge_node.inputs if n.type == 'Enter'][0]
target_node_input_id = enter_node.input[0]
logger.debug("a Switch >> Merge >> Enter is found called %s", enter_node.inputs[0].name)
next_iteration_node = [n for n in merge_node.inputs if n.type == 'NextIteration'][0]
last_iteration_output_id = next_iteration_node.input[0]
# find the output_false consumers to see whether there is consumer for this var
switch_false_consumers = self.g.find_output_consumers(switch_node.output[0])
false_consumer_count = len(switch_false_consumers)
exit_output_id = None
if false_consumer_count == 1:
exit_node = switch_false_consumers[0]
if exit_node.type != "Exit":
raise ValueError("switch false branch is followed by non-Exit")
exit_output_id = exit_node.output[0]
elif false_consumer_count == 0:
# sometime, the variable output won't be used in the new iteration as input.
exit_output_id = None
else:
raise ValueError("unexpected number of switch false consumers")
is_ta = False
ta_index_id = None
if is_tf_tensor_array_op(self.g.get_node_by_output(target_node_input_id)):
is_ta = True
ta_write_node = self.g.get_node_by_output(last_iteration_output_id)
utils.make_sure(is_tf_tensor_array_write_op(ta_write_node), "ta nextiteration is not following ta write op")
last_iteration_output_id = ta_write_node.input[2]
ta_index_id = ta_write_node.input[1]
# here we parse patterns generated by
# ta.write(), then ta.stack(), because this is the most frequent usage pattern.
if exit_output_id:
exit_consumers = self.g.find_output_consumers(exit_output_id)
ta_gather_node = [n for n in exit_consumers if is_tf_tensor_array_gather_op(n)][0]
# update exit output id, treat the gather output as ta's output
exit_output_id = ta_gather_node.output[0]
loop_var = LoopVariable(enter_node.name, target_node_input_id, last_iteration_output_id,
switch_true_identity_output, exit_output_id, is_ta, ta_index_id, self.g)
return loop_var
@staticmethod
def find_subgraph(input_ids, output_ids, g, merge_as_end=False):
logger.debug("input ids %s ", input_ids)
logger.debug("output ids %s ", output_ids)
enter_nodes = set()
merge_nodes = set()
def find_input_boundary(node):
if node.type == "Enter":
enter_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if merge_as_end is True and node.type == "Merge":
merge_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if node.is_const():
logger.debug("terminate search at const node %s", node.name)
return False
for o in node.output:
if o in input_ids:
return False
return True
nodes = g.extract_sub_graph_nodes(output_ids, input_checker=find_input_boundary)
return nodes, enter_nodes, merge_nodes
@staticmethod
def construct_graph_from_nodes(parent_g, nodes, outputs):
return utils.construct_graph_from_nodes(
parent_g,
nodes,
[out.id for out in outputs],
[out.shape for out in outputs],
[out.dtype for out in outputs]
)
| 44.689888
| 120
| 0.663851
|
4bf6853dc91f970b10de065c232b2b60375be661
| 29,879
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 3.0.0
# Copyright (C) 2018-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_bios
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
from ansible_collections.dellemc.openmanage.tests.unit.compat.mock import MagicMock, patch, Mock
from ansible_collections.dellemc.openmanage.tests.unit.compat.mock import PropertyMock
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from pytest import importorskip
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
class TestConfigBios(FakeAnsibleModule):
module = idrac_bios
@pytest.fixture
def idrac_configure_bios_mock(self):
omsdk_mock = MagicMock()
idrac_obj = MagicMock()
omsdk_mock.file_share_manager = idrac_obj
omsdk_mock.config_mgr = idrac_obj
return idrac_obj
@pytest.fixture
def idrac_file_manager_config_bios_mock(self, mocker):
try:
file_manager_obj = mocker.patch(
MODULE_PATH + 'idrac_bios.file_share_manager')
except AttributeError:
file_manager_obj = MagicMock()
obj = MagicMock()
file_manager_obj.create_share_obj.return_value = obj
return file_manager_obj
@pytest.fixture
def idrac_connection_configure_bios_mock(self, mocker, idrac_configure_bios_mock):
idrac_conn_class_mock = mocker.patch(MODULE_PATH +
'idrac_bios.iDRACConnection',
return_value=idrac_configure_bios_mock)
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_configure_bios_mock
return idrac_configure_bios_mock
def test_main_idrac_config_bios_success_Case(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename"})
message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}}
mocker.patch(MODULE_PATH +
'idrac_bios.run_server_bios_config', return_value=message)
result = self._run_module(idrac_default_args)
assert result == {
'msg': {'changed': False, 'msg': {'Status': 'Success', 'message': 'No changes found to commit!'}},
'changed': False, 'failed': False}
@pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, HTTPError, URLError])
def test_main_idrac_config_bios_exception_handling_case(self, exc_type, mocker,
idrac_connection_configure_bios_mock,
idrac_default_args, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename"})
json_str = to_text(json.dumps({"data": "out"}))
if exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(
MODULE_PATH + 'idrac_bios.run_server_bios_config',
side_effect=exc_type('test'))
else:
mocker.patch(
MODULE_PATH + 'idrac_bios.run_server_bios_config',
side_effect=exc_type('http://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
def test_run_idrac_bios_config_success_case01(self, idrac_connection_configure_bios_mock,
idrac_default_args, mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {"changes_applicable": True, "message": "changes are applicable"}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "message of validate params"))
idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = True
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'changes_applicable': True, 'message': 'changes are applicable'}
def test_run_idrac_bios_config_success_case02(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {"changes_applicable": True, "Status": "Success", "message": "changes found to commit!"}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "message of validate params"))
idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Status': 'Success',
'changes_applicable': True,
'message': 'changes found to commit!'}
def test_run_idrac_bios_config_success_case03(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {"changes_applicable": False, "Status": "Success", "Message": "No changes found to commit!"}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "message of validate params"))
idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'No changes found to commit!',
'Status': 'Success',
'changes_applicable': False}
def test_run_idrac_bios_config_success_case04(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {"changes_applicable": False, "Status": "Success", "Message": "No changes found to apply."}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "message of validate params"))
idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'No changes found to apply.',
'Status': 'Success',
'changes_applicable': False}
def test_run_idrac_bios_config_bootmode_failed_case0(self, idrac_connection_configure_bios_mock,
idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {"changes_applicable": False, "Status": "failed", "Message": "No changes found to apply."}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "message of validate params"))
idrac_connection_configure_bios_mock.config_mgr.is_change_applicabl.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'No changes found to apply.',
'Status': 'failed',
'changes_applicable': False}
def test_run_idrac_bios_config_errorhandle_failed_case0(self, idrac_connection_configure_bios_mock,
idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(True, "Error occurs"))
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources()
def test_run_idrac_bios_config_status_failed_case01(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources"})
message = {'Status': 'Failed', 'Message': 'message of validate params'}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(True, "Error occurs"))
idrac_connection_configure_bios_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources()
def test_run_idrac_bios_config_status_success_case01(self, idrac_connection_configure_bios_mock, idrac_default_args,
mocker, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources",
"attributes": {"boot_mode": "BootMode", "nvme_mode": "NvmeMode"}})
message = {'Status': 'Successs', 'Message': 'message of validate params'}
mocker.patch(MODULE_PATH +
'idrac_bios._validate_params', return_value=(False, "Error did not occurs"))
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'message of validate params', 'Status': 'Successs'}
def test_run_bios_config_status_boot_sources_failed_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": "bootsources",
"boot_mode": "Bios",
"nvme_mode": "Raid", 'secure_boot_mode': "AuditMode",
'onetime_boot_mode': "OneTimeBootSeq",
"attributes": {"boot_mode": "BootMode", "nvme_mode": "NvmeMode"}})
message = {'Status': 'Failed', "Data": {'Message': 'message of validate params'}}
idrac_connection_configure_bios_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == idrac_connection_configure_bios_mock.config_mgr.configure_boot_sources()
def test_run_bios_config_status_boot_success_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": None, "boot_mode": "Bios",
"nvme_mode": "NonRaid", "secure_boot_mode": "AuditMode",
"onetime_boot_mode": "OneTimeBootSeq", "attributes": [""], "boot_sequence": None})
message = {"Status": "Success", "changes_applicable": True}
f_module = self.get_module_mock(params=idrac_default_args)
f_module.deprecate.return_value = "boot_mode, nvme_mode, secure_boot_mode, onetime_boot_mode and " \
"boot_sequence options have been deprecated, and will be removed. ' \
'Please use the attributes option for Bios attributes configuration instead."
f_module.check_mode = True
obj = MagicMock()
obj1 = MagicMock()
obj2 = MagicMock()
obj3 = MagicMock()
idrac_connection_configure_bios_mock.config_mgr.configure_boot_mode = obj
type(obj).BootModeTypes = PropertyMock(return_value="Bios")
idrac_connection_configure_bios_mock.config_mgr.configure_nvme_mode = obj1
type(obj).NvmeModeTypes = PropertyMock(return_value="NonRaid")
idrac_connection_configure_bios_mock.config_mgr.configure_secure_boot_mode = obj2
type(obj).SecureBootModeTypes = PropertyMock(return_value="AuditMode")
idrac_connection_configure_bios_mock.config_mgr.configure_onetime_boot_mode = obj3
type(obj).OneTimeBootModeTypes = PropertyMock(return_value="OneTimeBootSeq")
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
idrac_connection_configure_bios_mock.config_mgr.is_change_applicable.return_value = message
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Status': 'Success', 'changes_applicable': True}
def test_run_bios_config_status_success_changed_true_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args, idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": None, "boot_mode": "Bios",
"nvme_mode": "NonRaid", "secure_boot_mode": "AuditMode",
"onetime_boot_mode": "OneTimeBootSeq", "attributes": [""], "boot_sequence": None})
message = {"Status": "Success"}
f_module = self.get_module_mock(params=idrac_default_args)
f_module.deprecate.return_value = "boot_mode, nvme_mode, secure_boot_mode, onetime_boot_mode and " \
"boot_sequence options have been deprecated, and will be removed. ' \
'Please use the attributes option for Bios attributes configuration instead."
f_module.check_mode = False
obj = MagicMock()
obj1 = MagicMock()
obj2 = MagicMock()
obj3 = MagicMock()
idrac_connection_configure_bios_mock.config_mgr.configure_boot_mode = obj
type(obj).BootModeTypes = PropertyMock(return_value="Bios")
idrac_connection_configure_bios_mock.config_mgr.configure_nvme_mode = obj1
type(obj).NvmeModeTypes = PropertyMock(return_value="NonRaid")
idrac_connection_configure_bios_mock.config_mgr.configure_secure_boot_mode = obj2
type(obj).SecureBootModeTypes = PropertyMock(return_value="AuditMode")
idrac_connection_configure_bios_mock.config_mgr.configure_onetime_boot_mode = obj3
type(obj).OneTimeBootModeTypes = PropertyMock(return_value="OneTimeBootSeq")
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
idrac_connection_configure_bios_mock.config_mgr.apply_changes.return_value = message
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Status': 'Success'}
def test_run_bios_config_status_success_changed_false_case01(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args,
idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": None, "boot_mode": "Bios",
"nvme_mode": "NonRaid", "secure_boot_mode": "AuditMode",
"onetime_boot_mode": "OneTimeBootSeq", "attributes": [""], "boot_sequence": None})
message = {"Status": "Success", "Message": "No changes found to commit!"}
f_module = self.get_module_mock(params=idrac_default_args)
f_module.deprecate.return_value = "boot_mode, nvme_mode, secure_boot_mode, onetime_boot_mode and " \
"boot_sequence options have been deprecated, and will be removed. ' \
'Please use the attributes option for Bios attributes configuration instead."
f_module.check_mode = False
obj = MagicMock()
obj1 = MagicMock()
obj2 = MagicMock()
obj3 = MagicMock()
idrac_connection_configure_bios_mock.config_mgr.configure_boot_mode = obj
type(obj).BootModeTypes = PropertyMock(return_value="Bios")
idrac_connection_configure_bios_mock.config_mgr.configure_nvme_mode = obj1
type(obj).NvmeModeTypes = PropertyMock(return_value="NonRaid")
idrac_connection_configure_bios_mock.config_mgr.configure_secure_boot_mode = obj2
type(obj).SecureBootModeTypes = PropertyMock(return_value="AuditMode")
idrac_connection_configure_bios_mock.config_mgr.configure_onetime_boot_mode = obj3
type(obj).OneTimeBootModeTypes = PropertyMock(return_value="OneTimeBootSeq")
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
idrac_connection_configure_bios_mock.config_mgr.apply_changes.return_value = message
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'No changes found to commit!', 'Status': 'Success'}
def test_run_bios_config_status_success_changed_false_case02(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args,
idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": None, "boot_mode": "Bios",
"nvme_mode": "NonRaid", "secure_boot_mode": "AuditMode",
"onetime_boot_mode": "OneTimeBootSeq", "attributes": [""], "boot_sequence": None})
message = {"Status": "Success", "Message": "No changes found to apply."}
f_module = self.get_module_mock(params=idrac_default_args)
f_module.deprecate.return_value = "boot_mode, nvme_mode, secure_boot_mode, onetime_boot_mode and " \
"boot_sequence options have been deprecated, and will be removed. ' \
'Please use the attributes option for Bios attributes configuration instead."
f_module.check_mode = False
obj = MagicMock()
obj1 = MagicMock()
obj2 = MagicMock()
obj3 = MagicMock()
idrac_connection_configure_bios_mock.config_mgr.configure_boot_mode = obj
type(obj).BootModeTypes = PropertyMock(return_value="Bios")
idrac_connection_configure_bios_mock.config_mgr.configure_nvme_mode = obj1
type(obj).NvmeModeTypes = PropertyMock(return_value="NonRaid")
idrac_connection_configure_bios_mock.config_mgr.configure_secure_boot_mode = obj2
type(obj).SecureBootModeTypes = PropertyMock(return_value="AuditMode")
idrac_connection_configure_bios_mock.config_mgr.configure_onetime_boot_mode = obj3
type(obj).OneTimeBootModeTypes = PropertyMock(return_value="OneTimeBootSeq")
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
idrac_connection_configure_bios_mock.config_mgr.apply_changes.return_value = message
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == {'Message': 'No changes found to apply.', 'Status': 'Success'}
def test_run_bios_config_status_attribute_failed_error_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_default_args,
idrac_file_manager_config_bios_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
"share_password": "sharepassword", "boot_sources": None, "boot_mode": "Bios",
"nvme_mode": "NonRaid", "secure_boot_mode": "AuditMode",
"onetime_boot_mode": "OneTimeBootSeq", "attributes": [""], "boot_sequence": None})
message = {"Status": "Failed"}
f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
f_module.deprecate.return_value = "boot_mode, nvme_mode, secure_boot_mode, onetime_boot_mode and " \
"boot_sequence options have been deprecated, and will be removed. ' \
'Please use the attributes option for Bios attributes configuration instead."
obj = MagicMock()
obj1 = MagicMock()
obj2 = MagicMock()
obj3 = MagicMock()
idrac_connection_configure_bios_mock.config_mgr.configure_boot_mode = obj
type(obj).BootModeTypes = PropertyMock(return_value="Bios")
idrac_connection_configure_bios_mock.config_mgr.configure_nvme_mode = obj1
type(obj).NvmeModeTypes = PropertyMock(return_value="NonRaid")
idrac_connection_configure_bios_mock.config_mgr.configure_secure_boot_mode = obj2
type(obj).SecureBootModeTypes = PropertyMock(return_value="AuditMode")
idrac_connection_configure_bios_mock.config_mgr.configure_onetime_boot_mode = obj3
type(obj).OneTimeBootModeTypes = PropertyMock(return_value="OneTimeBootSeq")
idrac_connection_configure_bios_mock.config_mgr.configure_bios.return_value = message
msg = self.module.run_server_bios_config(idrac_connection_configure_bios_mock, f_module)
assert msg == idrac_connection_configure_bios_mock.config_mgr.is_change_applicable()
def test__validate_params_error_case(self, idrac_connection_configure_bios_mock, idrac_default_args,
idrac_file_manager_config_bios_mock):
idrac_default_args.update({})
attr = {"name": "Name"}
msg = self.module._validate_params(attr)
assert msg == "attribute values must be of type: {0}. name ({1}) provided.".format(dict, str)
def test__validate_params_error_keys_case(self, idrac_connection_configure_bios_mock, idrac_default_args,
idrac_file_manager_config_bios_mock, mocker):
idrac_default_args.update({})
attr = [{"name": "Name"}, {"index": "Index"}, {"enabled": "Enabled"}]
msg = self.module._validate_params(attr)
assert msg == "attribute keys must be one of the ['Name', 'Index', 'Enabled']."
def test__validate_params_check_params_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_file_manager_config_bios_mock, idrac_default_args):
mocker.patch(MODULE_PATH +
'idrac_bios.check_params', return_value=(True, "Error occurs in check params"))
attr = [{"name": "name1"}, {"Index": "index1"}]
msg = self.module._validate_params(attr)
assert msg == "attribute keys must be one of the ['Name', 'Index', 'Enabled']."
def test__validate_params_empty_params_case(self, idrac_connection_configure_bios_mock, mocker,
idrac_file_manager_config_bios_mock, idrac_default_args):
mocker.patch(MODULE_PATH +
'idrac_bios._validate_name_index_duplication', return_value=(True, "Error occurs in "
"validate name"))
msg = self.module._validate_params([])
assert msg == (True, 'Error occurs in validate name')
def test__validate_name_index_duplication_error_true_case(self, idrac_connection_configure_bios_mock,
idrac_default_args):
result = self.module._validate_name_index_duplication([{"Name": "Name1"}, {"Name": "Name1"}])
assert result == 'duplicate name Name1'
def test__validate_name_index_duplication_error_false_case(self, idrac_connection_configure_bios_mock,
idrac_default_args):
result = self.module._validate_name_index_duplication([{"Name": "Name1"}, {"Name": "Name2"}])
assert result == ''
def test_check_params_false_case(self, idrac_connection_configure_bios_mock, idrac_default_args):
result = self.module.check_params({"required": False}, [{"name": "Name1", "required": False},
{"name": "Name2", "required": False}])
assert result == ''
def test_check_params_required_true_case(self, idrac_connection_configure_bios_mock, idrac_default_args):
result = self.module.check_params({"required": True},
[{"name": "Name0", "type": {}, "required": True},
{"name": "Name2", "required": False}])
assert result == 'Name0 is required and must be of type: {}'
| 70.138498
| 120
| 0.65705
|
aec8f968419a7d5a71c3eaf5a4b54168dadb7c31
| 164
|
py
|
Python
|
rsocket/datetime_helpers.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
rsocket/datetime_helpers.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
rsocket/datetime_helpers.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
def to_milliseconds(period: timedelta) -> int:
return round(period.total_seconds() * 1000) + round(period.microseconds / 1000)
| 27.333333
| 83
| 0.756098
|
c018b89980196e7b23508e5cb3dd490e354f62b7
| 570
|
py
|
Python
|
src/processing/IJM/lipidomics/3.0-lipidomics_processing.py
|
coongroup/MITOMICS
|
6515c23017288ee91984ba052ce6b6cc74ade60a
|
[
"MIT"
] | null | null | null |
src/processing/IJM/lipidomics/3.0-lipidomics_processing.py
|
coongroup/MITOMICS
|
6515c23017288ee91984ba052ce6b6cc74ade60a
|
[
"MIT"
] | null | null | null |
src/processing/IJM/lipidomics/3.0-lipidomics_processing.py
|
coongroup/MITOMICS
|
6515c23017288ee91984ba052ce6b6cc74ade60a
|
[
"MIT"
] | null | null | null |
# load modules
import pandas as pd
# load data
combined_collapsed_path = "combined_lipidomics_data_collapsed.tsv"
combined_collapsed_df = pd.read_csv(combined_collapsed_path, sep="\t", index_col="replicate")
# load redundant CoQ details
Coq10_species_path = "coq_species_to_drop.xlsx"
Coq10_species_df = pd.read_excel(Coq10_species_path)
# get list of identifiers
lipid_drop_list = Coq10_species_df['Identifier'].tolist()
# write out data for Yuriy 6/4/20
combined_collapsed_df.drop(lipid_drop_list, axis=1).to_csv("combined_lipidomics_data_filtered.tsv", sep="\t")
| 33.529412
| 109
| 0.812281
|
376117089990516c929c670dc99516684776dbaf
| 1,823
|
py
|
Python
|
src/ggrc/notifications/job_emails.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/notifications/job_emails.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/notifications/job_emails.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
""" Import/Export notifications """
from urlparse import urljoin
from ggrc import settings
from ggrc.notifications import common
IMPORT_COMPLETED = {
"title": u"{filename} was imported successfully",
"body": u"Go to import page to check details or submit new import "
u"request.",
"url": u"import"
}
IMPORT_BLOCKED = {
"title": u"Could not import {filename} due to warnings",
"body": u"Go to import page to check details or submit new import "
u"request.",
"url": u"import"
}
IMPORT_FAILED = {
"title": u"[WARNING] Could not import {filename} due to errors",
"body": u"Go to import page to check details or submit new import "
u"request.",
"url": u"import"
}
EXPORT_COMPLETED = {
"title": u"{filename} was exported successfully",
"body": u"Go to export page to download the result. "
u"If the file generated for this export request "
u"has been downloaded, you can ignore the email.",
"url": u"export"
}
EXPORT_FAILED = {
"title": (u"[WARNING] Your GGRC export request did not finish due "
u"to errors"),
"body": u"Please follow the link to write to sheets or download .csv",
"url": u"export"
}
def send_email(template, user_email, url_root, filename="", ie_id=None):
""" Send email """
subject = template["title"].format(filename=filename)
url = urljoin(url_root, template["url"])
if ie_id is not None:
url = "{}#!&job_id={}".format(url, str(ie_id))
data = {
"body": template["body"],
"url": url,
"title": subject
}
body = settings.EMAIL_IMPORT_EXPORT.render(import_export=data)
common.send_email(user_email, subject, body)
| 28.484375
| 78
| 0.645639
|
e324e3b590d37873469a789b8db88e6444936146
| 8,239
|
py
|
Python
|
law/contrib/docker/sandbox.py
|
HerrHorizontal/law
|
c31091d3bf39a25e79b3796ed5742346ddff8b77
|
[
"BSD-3-Clause"
] | null | null | null |
law/contrib/docker/sandbox.py
|
HerrHorizontal/law
|
c31091d3bf39a25e79b3796ed5742346ddff8b77
|
[
"BSD-3-Clause"
] | null | null | null |
law/contrib/docker/sandbox.py
|
HerrHorizontal/law
|
c31091d3bf39a25e79b3796ed5742346ddff8b77
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Docker sandbox implementation.
"""
__all__ = ["DockerSandbox"]
import os
import sys
import uuid
import socket
import subprocess
import luigi
import six
from law.config import Config
from law.sandbox.base import Sandbox
from law.target.local import LocalFileTarget
from law.cli.software import deps as law_deps
from law.util import make_list, interruptable_popen, quote_cmd, flatten
class DockerSandbox(Sandbox):
sandbox_type = "docker"
# env cache per image
_envs = {}
@property
def image(self):
return self.name
@property
def tag(self):
return None if ":" not in self.image else self.image.split(":", 1)[1]
def _docker_run_cmd(self):
"""
Part of the "docker run" command that is common to env requests and run.
"""
cmd = ["docker", "run"]
# rm flag
cmd.extend(["--rm"])
# use the pid namespace of the host so killing the outer process will stop the container
cmd.extend(["--pid", "host"])
# task-specific arguments
if self.task:
# user flag
sandbox_user = self.task.sandbox_user()
if sandbox_user:
if not isinstance(sandbox_user, (tuple, list)) or len(sandbox_user) != 2:
raise Exception("sandbox_user() must return 2-tuple")
cmd.extend(["-u", "{}:{}".format(*sandbox_user)])
# add args configured on the task
args_getter = getattr(self.task, "docker_args", None)
if callable(args_getter):
cmd.extend(make_list(args_getter()))
return cmd
@property
def env(self):
# strategy: create a tempfile, forward it to a container, let python dump its full env,
# close the container and load the env file
if self.image not in self._envs:
tmp = LocalFileTarget(is_tmp=".env")
tmp.touch()
env_file = os.path.join("/tmp", tmp.unique_basename)
# get the docker run command
docker_run_cmd = self._docker_run_cmd()
# mount the env file
docker_run_cmd.extend(["-v", "{}:{}".format(tmp.path, env_file)])
# build commands to setup the environment
setup_cmds = self._build_setup_cmds(self._get_env())
# build the python command that dumps the environment
py_cmd = "import os,pickle;" \
+ "pickle.dump(dict(os.environ),open('{}','wb'),protocol=2)".format(env_file)
# build the full command
cmd = quote_cmd(docker_run_cmd + [self.image, "bash", "-l", "-c",
"; ".join(flatten(setup_cmds, quote_cmd(["python", "-c", py_cmd]))),
])
# run it
code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash",
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if code != 0:
raise Exception("docker sandbox env loading failed:\n{}".format(out))
# load the environment from the tmp file
env = tmp.load(formatter="pickle")
# cache
self._envs[self.image] = env
return self._envs[self.image]
def cmd(self, proxy_cmd):
# docker run command arguments
args = []
# container name
args.extend(["--name", "{}_{}".format(self.task.task_id, str(uuid.uuid4())[:8])])
# container hostname
args.extend(["-h", "{}".format(socket.gethostname())])
# helper to build forwarded paths
cfg = Config.instance()
cfg_section = self.get_config_section()
forward_dir = cfg.get_expanded(cfg_section, "forward_dir")
python_dir = cfg.get_expanded(cfg_section, "python_dir")
bin_dir = cfg.get_expanded(cfg_section, "bin_dir")
stagein_dir_name = cfg.get_expanded(cfg_section, "stagein_dir_name")
stageout_dir_name = cfg.get_expanded(cfg_section, "stageout_dir_name")
def dst(*args):
return os.path.join(forward_dir, *(str(arg) for arg in args))
# helper for mounting a volume
volume_srcs = []
def mount(*vol):
src = vol[0]
# make sure, the same source directory is not mounted twice
if src in volume_srcs:
return
volume_srcs.append(src)
# ensure that source directories exist
if not os.path.isfile(src) and not os.path.exists(src):
os.makedirs(src)
# store the mount point
args.extend(["-v", ":".join(vol)])
# environment variables to set
env = self._get_env()
# add staging directories
if self.stagein_info:
env["LAW_SANDBOX_STAGEIN_DIR"] = dst(stagein_dir_name)
mount(self.stagein_info.stage_dir.path, dst(stagein_dir_name))
if self.stageout_info:
env["LAW_SANDBOX_STAGEOUT_DIR"] = dst(stageout_dir_name)
mount(self.stageout_info.stage_dir.path, dst(stageout_dir_name))
# prevent python from writing byte code files
env["PYTHONDONTWRITEBYTECODE"] = "1"
# adjust path variables
env["PATH"] = os.pathsep.join([dst("bin"), "$PATH"])
env["PYTHONPATH"] = os.pathsep.join([dst(python_dir), "$PYTHONPATH"])
# forward python directories of law and dependencies
for mod in law_deps:
path = os.path.dirname(mod.__file__)
name, ext = os.path.splitext(os.path.basename(mod.__file__))
if name == "__init__":
vsrc = path
vdst = dst(python_dir, os.path.basename(path))
else:
vsrc = os.path.join(path, name + ".py")
vdst = dst(python_dir, name + ".py")
mount(vsrc, vdst, "ro")
# forward the law cli dir to bin as it contains a law executable
env["PATH"] = os.pathsep.join([dst(python_dir, "law", "cli"), env["PATH"]])
# forward the law config file
if cfg.config_file:
mount(cfg.config_file, dst("law.cfg"))
env["LAW_CONFIG_FILE"] = dst("law.cfg")
# forward the luigi config file
for p in luigi.configuration.LuigiConfigParser._config_paths[::-1]:
if os.path.exists(p):
mount(p, dst("luigi.cfg"))
env["LUIGI_CONFIG_PATH"] = dst("luigi.cfg")
break
# forward volumes defined in the config and by the task
vols = self._get_volumes()
for hdir, cdir in six.iteritems(vols):
if not cdir:
mount(hdir, hdir)
else:
cdir = self._expand_volume(cdir, bin_dir=dst(bin_dir), python_dir=dst(python_dir))
mount(hdir, cdir)
# handle local scheduling within the container
if self.force_local_scheduler():
proxy_cmd.add_arg("--local-scheduler", "True", overwrite=True)
elif self.scheduler_on_host():
# when the scheduler runs on the host system, we need to set the network interface to
# the host system and set the correct host address as seen by the container
args.extend(["--network", "host"])
proxy_cmd.add_arg("--scheduler-host", self.get_host_ip(), overwrite=True)
# get the docker run command, add arguments from above
docker_run_cmd = self._docker_run_cmd() + args
# build commands to setup the environment
setup_cmds = self._build_setup_cmds(env)
# build the final command
cmd = quote_cmd(docker_run_cmd + [self.image, "bash", "-l", "-c",
"; ".join(flatten(setup_cmds, proxy_cmd.build()))
])
return cmd
def get_host_ip(self):
# in host network mode, docker containers can normally be accessed via 127.0.0.1 on Linux
# or via docker.for.mac.localhost on Mac (as of docker 17.06), however, in some cases it
# might be required to use a different ip which can be set via an env variable
default_ip = "docker.for.mac.localhost" if sys.platform == "darwin" else "127.0.0.1"
return os.getenv("LAW_DOCKER_HOST_IP", default_ip)
| 35.360515
| 98
| 0.593397
|
e751ee0569c881a85f034d4e7fed235140c2fd3c
| 2,272
|
py
|
Python
|
src/gluonts/testutil/dummy_datasets.py
|
ykaitao/gluon-ts
|
9622550974e9e0819e25438fc45353f8a6474b55
|
[
"Apache-2.0"
] | 1
|
2020-01-19T13:27:51.000Z
|
2020-01-19T13:27:51.000Z
|
src/gluonts/testutil/dummy_datasets.py
|
ykaitao/gluon-ts
|
9622550974e9e0819e25438fc45353f8a6474b55
|
[
"Apache-2.0"
] | null | null | null |
src/gluonts/testutil/dummy_datasets.py
|
ykaitao/gluon-ts
|
9622550974e9e0819e25438fc45353f8a6474b55
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from functools import partial
from random import randint
from typing import List, Tuple
# Third-party imports
import numpy as np
import pytest
# First-party imports
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
def make_dummy_datasets_with_features(
num_ts: int = 5,
start: str = "2018-01-01",
freq: str = "D",
min_length: int = 5,
max_length: int = 10,
prediction_length: int = 3,
cardinality: List[int] = [],
num_feat_dynamic_real: int = 0,
) -> Tuple[ListDataset, ListDataset]:
data_iter_train = []
data_iter_test = []
for k in range(num_ts):
ts_length = randint(min_length, max_length)
data_entry_train = {
FieldName.START: start,
FieldName.TARGET: [0.0] * ts_length,
}
if len(cardinality) > 0:
data_entry_train[FieldName.FEAT_STATIC_CAT] = [
randint(0, c) for c in cardinality
]
data_entry_test = data_entry_train.copy()
if num_feat_dynamic_real > 0:
data_entry_train[FieldName.FEAT_DYNAMIC_REAL] = [
[float(1 + k)] * ts_length
for k in range(num_feat_dynamic_real)
]
data_entry_test[FieldName.FEAT_DYNAMIC_REAL] = [
[float(1 + k)] * (ts_length + prediction_length)
for k in range(num_feat_dynamic_real)
]
data_iter_train.append(data_entry_train)
data_iter_test.append(data_entry_test)
return (
ListDataset(data_iter=data_iter_train, freq=freq),
ListDataset(data_iter=data_iter_test, freq=freq),
)
| 32.927536
| 75
| 0.664613
|
ab0e5333213556a84c797b30a420f36487e4eeff
| 7
|
py
|
Python
|
HeadFirstPython/chapter7/Test.py
|
yhj630520/pythonLearn
|
1f151f2ead1b3ee30a96f5cb8b31e352def48b27
|
[
"Apache-2.0"
] | null | null | null |
HeadFirstPython/chapter7/Test.py
|
yhj630520/pythonLearn
|
1f151f2ead1b3ee30a96f5cb8b31e352def48b27
|
[
"Apache-2.0"
] | null | null | null |
HeadFirstPython/chapter7/Test.py
|
yhj630520/pythonLearn
|
1f151f2ead1b3ee30a96f5cb8b31e352def48b27
|
[
"Apache-2.0"
] | null | null | null |
# 测试代码
| 3.5
| 6
| 0.571429
|
d0524c2f9348ff763787d12f9c3ea34d8eb5d89a
| 18,949
|
py
|
Python
|
src/conversation_analytics_toolkit/filtering2.py
|
watson-developer-cloud/assistant-dialog-flow-analysis
|
0c7bcd9527636dce77c74b80f60dbe23e6682e13
|
[
"Apache-2.0"
] | 19
|
2020-06-07T19:13:06.000Z
|
2022-01-22T02:34:11.000Z
|
src/conversation_analytics_toolkit/filtering2.py
|
watson-developer-cloud/assistant-dialog-flow-analysis
|
0c7bcd9527636dce77c74b80f60dbe23e6682e13
|
[
"Apache-2.0"
] | 32
|
2020-06-04T14:09:03.000Z
|
2021-02-11T15:05:07.000Z
|
src/conversation_analytics_toolkit/filtering2.py
|
watson-developer-cloud/assistant-dialog-flow-analysis
|
0c7bcd9527636dce77c74b80f60dbe23e6682e13
|
[
"Apache-2.0"
] | 10
|
2020-06-04T18:49:53.000Z
|
2021-11-26T12:42:08.000Z
|
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from IPython.display import display, HTML
import pandas as pd
from numpy import nan
import time
class ChainFilter():
def __init__(self, df, description="<<ALL-DATA>>"):
self.df = df
self.description = ""
self.filters = []
self._append_filter(self.df, "<<ALL-DATA>>", 0)
def _append_filter(self, df, filter_name, duration_sec):
users = conversations = logs = timestamp_from = timestamp_to = nan
if "conversation_id" in df.columns:
conversations = len(df["conversation_id"].unique())
if "log_id" in df.columns:
logs = len(df["log_id"].unique())
if "user_id" in df.columns:
users = len(df["user_id"].unique())
if len(df) > 0:
if "response_timestamp" in df.columns:
timestamp_from = min(df["response_timestamp"])
if "response_timestamp" in df.columns:
timestamp_to = max(df["response_timestamp"])
self.filters.append({"filter_name": filter_name,
"filter_duration_sec": duration_sec,
"rows":len(df),
"users": users,
"conversations": conversations,
"timestamp_from": timestamp_from,
"timestamp_to": timestamp_to,
"df": df})
self.df = df
def setDescription(self, description):
self.description = description
return self
def printConversationFilters(self):
display(HTML("<h1>" + self.description + "</h1>"))
display(HTML(pd.DataFrame(self.filters)[["filter_name", "filter_duration_sec", "rows", "users", "conversations", "timestamp_from","timestamp_to"]].to_html()))
return self
def getDataFrame(self, i=-1):
return self.filters[i]["df"]
def getLineageDetails(self, i=-1):
if i == -1:
i = len(self.filters)-1
result = ""
for j in range(0,i+1):
if j >0:
result+= " --> "
result+= self.filters[j]["filter_name"] + "(" + str(self.filters[j]["conversations"]) + ")"
return result
def by_dialog_node_id(self, dialog_node_id):
"""
filters conversations that include node_id in nodes_visited list
"""
now = time.time()
filtered_df = self.df
#find which rows include the dialog node
rows = filtered_df[filtered_df.apply(lambda x: dialog_node_id in x["nodes_visited"], axis=1)]
#find their conversation_id
unique_conversations = rows["conversation_id"].unique()
#filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_dialog_node_id (" + dialog_node_id + ")", duration_sec)
return self
def by_dialog_not_node_id(self, dialog_node_id):
"""
filters conversations that do not include node_id in nodes_visited list
"""
now = time.time()
filtered_df = self.df
#find which rows include the dialog node
rows = filtered_df[filtered_df.apply(lambda x: dialog_node_id in x["nodes_visited"], axis=1)]
#find their conversation_id
unique_conversations = rows["conversation_id"].unique()
#filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] not in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_dialog_node_id (" + dialog_node_id + ")", duration_sec)
return self
def by_dialog_node_str(self, dialog_node_str):
"""
filters conversations that include a node string (title/condition) value in the nodes_visited_str list
"""
now = time.time()
filtered_df = self.df
#find which rows include the dialog node str
rows = filtered_df[filtered_df.apply(lambda x: dialog_node_str in x["nodes_visited_str"], axis=1)]
#find their conversation_id
unique_conversations = rows["conversation_id"].unique()
#filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_dialog_node_str (" + dialog_node_str + ")", duration_sec)
return self
def by_turn_label(self, label):
"""
return the rows of conversations that include a value in turn_label
"""
now = time.time()
filtered_df = self.df
#find which rows include the dialog node
#rows = filtered_df[filtered_df.apply(lambda x: turn_label in x["node_visited"], axis=1)]
rows = filtered_df[filtered_df["turn_label"] == label]
#find their conversation_id
unique_conversations = rows["conversation_id"].unique()
#filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_turn_label (" + label + ")", duration_sec)
return self
def by_node_name(self, node_name, force_deprecation=False):
"""
DEPRECATED: return the rows of conversations that include node in node_visited
"""
if force_deprecation == False:
print("Error: by_node_name() is deprecated and will be removed in the future. Use by_turn_label() or by_node_id() instead")
print("For back compatibility, you can use the force_deprecation=True parameter, but make sure to have a 'node_visited' column in your data frame")
return
else:
print("Warning: by_node_name() is deprecated and will be removed in the future. Use by_turn_label() or by_node_id() instead")
now = time.time()
filtered_df = self.df
#find which rows include the dialog node
rows = filtered_df[filtered_df.apply(lambda x: node_name in x["node_visited"], axis=1)]
#find their conversation_id
unique_conversations = rows["conversation_id"].unique()
#filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_node_name (" + node_name + ")", duration_sec)
return self
def trim_from_turn_label(self, turn_label):
"""
filter and trim conversations steps prior to first step that includes a turn label
"""
now = time.time()
# create an empty dataframe with the same column names and types
filtered_df = pd.DataFrame(data=None, columns=self.df.columns)
for column in filtered_df.columns:
filtered_df[column] = filtered_df[column].astype(self.df[column].dtypes.name)
df_by_conversation_id = self.df.groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
conversation_df = conversation_df.sort_values(by=["response_timestamp"])
for index, row in conversation_df.iterrows():
i=i+1
if turn_label == row["turn_label"]:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df = pd.concat([filtered_df,conversation_df.tail(num_of_elements_to_copy)])
break
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "trim_from_turn_label (" + turn_label + ")", duration_sec)
return self
def remove_turn_by_label(self, turn_label):
"""
filter and trim steps that include turn label.
"""
now = time.time()
filtered_df = self.df[self.df["turn_label"] != turn_label]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "remove_turn_by_label (" + str(len(self.df) - len(filtered_df)) + ")", duration_sec)
return self
def remove_turn_by_column_value(self, column, value):
"""
filter and trim steps that include turn label.
"""
now = time.time()
filtered_df = self.df[self.df[column] != value]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "remove_turn_by_column_value (" + str(len(self.df) - len(filtered_df)) + ")", duration_sec)
return self
def trim_from_node_id(self, node_id):
"""
filter and trim conversations steps prior to first step that includes node_id in nodes_visited.
"""
now = time.time()
# create an empty dataframe with the same column names and types
filtered_df = pd.DataFrame(data=None, columns=self.df.columns)
for column in filtered_df.columns:
filtered_df[column] = filtered_df[column].astype(self.df[column].dtypes.name)
df_by_conversation_id = self.df.groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
conversation_df = conversation_df.sort_values(by=["response_timestamp"])
for index, row in conversation_df.iterrows():
i=i+1
nodes_visited = row["nodes_visited"]
if node_id in nodes_visited:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df = pd.concat([filtered_df,conversation_df.tail(num_of_elements_to_copy)])
break
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "trim_from_node_id (" + node_id + ")", duration_sec)
return self
def trim_after_node_id(self, node_id):
"""
filter and trim conversations steps after to last step includes node_id in nodes_visited.
"""
now = time.time()
# create an empty dataframe with the same column names and types
filtered_df = pd.DataFrame(data=None, columns=self.df.columns)
for column in filtered_df.columns:
filtered_df[column] = filtered_df[column].astype(self.df[column].dtypes.name)
df_by_conversation_id = self.df.groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
# reverse sort the conversation steps to trip from the end
conversation_df = conversation_df.sort_values(by=["response_timestamp"], ascending=False)
for index, row in conversation_df.iterrows():
i=i+1
nodes_visited = row["nodes_visited"]
if node_id in nodes_visited:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df = pd.concat([filtered_df,conversation_df.tail(num_of_elements_to_copy)])
break
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "trim_from_node_id (" + node_id + ")", duration_sec)
return self
def trim_after_last_node_id(self, node_id):
"""
filter and trim conversations steps after the last step that includes node_id in nodes_visited.
"""
now = time.time()
# create an empty dataframe with the same column names and types
filtered_df = pd.DataFrame(data=None, columns=self.df.columns)
for column in filtered_df.columns:
filtered_df[column] = filtered_df[column].astype(self.df[column].dtypes.name)
df_by_conversation_id = self.df.groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
conversation_df = conversation_df.sort_values(by=["response_timestamp"], ascending=False)
for index, row in conversation_df.iterrows():
i=i+1
nodes_visited = row["nodes_visited"]
if node_id in nodes_visited:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df = pd.concat([filtered_df,conversation_df.tail(num_of_elements_to_copy)])
break
filtered_df.sort_values(by=["response_timestamp"], ascending=True)
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "trim_from_node_id (" + node_id + ")", duration_sec)
return self
def from_node_onwards(self, node_name):
"""
DEPCRECATED: filter & trim conversations starting from node_id (in node_visited)
"""
print("Error: from_node_onwards() is deprecated. Use trim_from_turn_label() or trim_from_node_id() instead")
return
now = time.time()
# create an empty dataframe with the same column names and types
filtered_df = pd.DataFrame(data=None, columns=self.df.columns)
for column in filtered_df.columns:
filtered_df[column] = filtered_df[column].astype(self.df[column].dtypes.name)
df_by_conversation_id = self.df.groupby(by="conversation_id")
#df_by_conversation_id = filtered_df.sort_values(by=["response_timestamp"]).groupby(by="conversation_id")
for conversation_id, conversation_df in df_by_conversation_id:
i=0
conversation_df = conversation_df.sort_values(by=["response_timestamp"])
for index, row in conversation_df.iterrows():
i=i+1
node_visited = row["node_visited"]
if node_name == node_visited:
num_of_elements_to_copy = len(conversation_df)-i+1
filtered_df = pd.concat([filtered_df,conversation_df.tail(num_of_elements_to_copy)])
break
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "from_node_onwards (" + node_name + ")", duration_sec)
return self
def by_date_range(self, start_date, end_date):
now = time.time()
mask = (self.df['response_timestamp'] >= start_date) & (self.df['response_timestamp'] <= end_date)
filtered_df = self.df.loc[mask]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_date_range (" + str(start_date) + ", " + str(end_date) + ")", duration_sec)
return self
def by_column_values(self, column_name, values):
"""
filters conversations that include values (str or list) in a given column in any of their conversation steps.
"""
now = time.time()
filtered_df = self.df
if column_name not in filtered_df.columns.values:
raise ValueError(f"{column_name} not in data")
unique_col_values = filtered_df[column_name].astype(str).unique()
# convert numeric to string
if isinstance(values, (int, float, complex)):
values = str(values)
# process values = single value
if isinstance(values, str):
if values not in unique_col_values:
raise ValueError(f"value {values} provided is not in {column_name}")
rows = filtered_df.loc[filtered_df[column_name].astype(str) == values, :]
elif isinstance(values, (list, tuple)):
values_column_diff = set(values).difference(set(unique_col_values))
if len(values_column_diff) > 0:
print(f"WARNING: values {values_column_diff} are not in {column_name}")
rows = filtered_df.loc[filtered_df[column_name].astype(str).isin(values)]
else:
raise ValueError("values must be type numeric/string (single value), or list/tuple (multiple values).")
# find conversation ids
unique_conversations = rows["conversation_id"].unique()
# filter by conversation
filtered_df = filtered_df[filtered_df.apply(lambda x: x["conversation_id"] in unique_conversations, axis=1)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_column_values (" + column_name + ': ' + str(values) + ")", duration_sec)
return self
def by_consecutive_values(self, feature, feature_type, feature_values):
now = time.time()
self.df = self.df.sort_values(['conversation_id','response_timestamp'])
self.df["prev_conversation_id"] = self.df.conversation_id.shift(1)
if feature_type == "cat":
self.df["prev_feature"] = self.df[feature].shift(1)
conv_id = self.df[(self.df[feature]==feature_values[1])&(self.df["prev_feature"]==feature_values[0])&\
(self.df.conversation_id==self.df.prev_conversation_id)].conversation_id.unique()
del self.df["prev_feature"]
elif feature_type == "text":
self.df["clean_text"] = self.df.apply (lambda row: clean_text(row[feature]), axis=1)
self.df["prev_clean_text"] = self.df["clean_text"].shift(1)
conv_id = self.df[(self.df["clean_text"].str.contains(feature_values[1]))&\
(self.df["prev_clean_text"].str.contains(feature_values[0]))&\
(self.df.conversation_id==self.df.prev_conversation_id)].conversation_id.unique()
del self.df['clean_text']
del self.df["prev_clean_text"]
else:
print("Error! Unknown feature type.")
return self
del self.df["prev_conversation_id"]
filtered_df = self.df[self.df.conversation_id.isin(conv_id)]
later = time.time()
duration_sec = int(later - now)
self._append_filter(filtered_df, "by_consecutive_values (" + str(feature) + " : " + str(feature_values) + ")", duration_sec)
return self
| 48.093909
| 166
| 0.629796
|
89f7dbb3d30b7466688198db18cea712e378bf69
| 13,472
|
py
|
Python
|
sagemaker-python-sdk/mxnet_gluon_sentiment/sentiment.py
|
cswiercz/amazon-sagemaker-examples
|
8e6864f4d70012ca7b5f2d3abfcaea0da97ed918
|
[
"Apache-2.0"
] | 7
|
2018-10-25T16:35:54.000Z
|
2022-02-12T15:24:11.000Z
|
sagemaker-python-sdk/mxnet_gluon_sentiment/sentiment.py
|
cswiercz/amazon-sagemaker-examples
|
8e6864f4d70012ca7b5f2d3abfcaea0da97ed918
|
[
"Apache-2.0"
] | null | null | null |
sagemaker-python-sdk/mxnet_gluon_sentiment/sentiment.py
|
cswiercz/amazon-sagemaker-examples
|
8e6864f4d70012ca7b5f2d3abfcaea0da97ed918
|
[
"Apache-2.0"
] | 2
|
2019-03-27T06:06:45.000Z
|
2019-04-10T07:21:13.000Z
|
from __future__ import print_function
import logging
import mxnet as mx
from mxnet import gluon, autograd, nd
from mxnet.gluon import nn
import numpy as np
import json
import time
import re
from mxnet.io import DataIter, DataBatch, DataDesc
import bisect, random
from collections import Counter
from itertools import chain, islice
logging.basicConfig(level=logging.DEBUG)
# ------------------------------------------------------------ #
# Training methods #
# ------------------------------------------------------------ #
def train(current_host, hosts, num_cpus, num_gpus, channel_input_dirs, model_dir, hyperparameters, **kwargs):
# retrieve the hyperparameters we set in notebook (with some defaults)
batch_size = hyperparameters.get('batch_size', 8)
epochs = hyperparameters.get('epochs', 2)
learning_rate = hyperparameters.get('learning_rate', 0.01)
log_interval = hyperparameters.get('log_interval', 1000)
embedding_size = hyperparameters.get('embedding_size', 50)
if len(hosts) == 1:
kvstore = 'device' if num_gpus > 0 else 'local'
else:
kvstore = 'dist_device_sync' if num_gpus > 0 else 'dist_sync'
ctx = mx.gpu() if num_gpus > 0 else mx.cpu()
training_dir = channel_input_dirs['training']
train_sentences, train_labels, _ = get_dataset(training_dir + '/train')
val_sentences, val_labels, _ = get_dataset(training_dir + '/test')
num_classes = len(set(train_labels))
vocab = create_vocab(train_sentences)
vocab_size = len(vocab)
train_sentences = [[vocab.get(token, 1) for token in line if len(line)>0] for line in train_sentences]
val_sentences = [[vocab.get(token, 1) for token in line if len(line)>0] for line in val_sentences]
# Alternatively to splitting in memory, the data could be pre-split in S3 and use ShardedByS3Key
# to do parallel training.
shard_size = len(train_sentences) // len(hosts)
for i, host in enumerate(hosts):
if host == current_host:
start = shard_size * i
end = start + shard_size
break
train_iterator = BucketSentenceIter(train_sentences[start:end], train_labels[start:end], batch_size)
val_iterator = BucketSentenceIter(val_sentences, val_labels, batch_size)
# define the network
net = TextClassifier(vocab_size, embedding_size, num_classes)
# Collect all parameters from net and its children, then initialize them.
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
# Trainer is for updating parameters with gradient.
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate},
kvstore=kvstore)
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
for epoch in range(epochs):
# reset data iterator and metric at begining of epoch.
metric.reset()
btic = time.time()
i = 0
for batch in train_iterator:
# Copy data to ctx if necessary
data = batch.data[0].as_in_context(ctx)
label = batch.label[0].as_in_context(ctx)
# Start recording computation graph with record() section.
# Recorded graphs can then be differentiated with backward.
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
# take a gradient step with batch_size equal to data.shape[0]
trainer.step(data.shape[0])
# update metric at last.
metric.update([label], [output])
if i % log_interval == 0 and i > 0:
name, acc = metric.get()
print('[Epoch %d Batch %d] Training: %s=%f, %f samples/s' %
(epoch, i, name, acc, batch_size / (time.time() - btic)))
btic = time.time()
i += 1
name, acc = metric.get()
print('[Epoch %d] Training: %s=%f' % (epoch, name, acc))
name, val_acc = test(ctx, net, val_iterator)
print('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc))
train_iterator.reset()
return net, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for text classification model.
Parameters
----------
sentences : list of list of int
Encoded sentences.
labels : list of int
Corresponding labels.
batch_size : int
Batch size of the data.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
invalid_label : int, optional
Key for invalid label, e.g. <unk. The default is 0.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, labels, batch_size, buckets=None, invalid_label=0,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NT'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
self.labels = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.labels[buck].append(labels[i])
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
self.labels = [np.asarray(i, dtype=dtype) for i in self.labels]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.layout = layout
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [DataDesc(
name=self.data_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(batch_size,),
layout=self.layout)]
elif self.major_axis == 1:
self.provide_data = [DataDesc(
name=self.data_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for i in range(len(self.data)):
data, labels = self.data[i], self.labels[i]
p = np.random.permutation(len(data))
self.data[i], self.labels[i] = data[p], labels[p]
self.nddata = []
self.ndlabel = []
for buck,label_buck in zip(self.data, self.labels):
self.nddata.append(nd.array(buck, dtype=self.dtype))
self.ndlabel.append(nd.array(label_buck, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)])
class TextClassifier(gluon.HybridBlock):
def __init__(self, vocab_size, embedding_size, classes, **kwargs):
super(TextClassifier, self).__init__(**kwargs)
with self.name_scope():
self.dense = gluon.nn.Dense(classes)
self.embedding = gluon.nn.Embedding(input_dim=vocab_size, output_dim=embedding_size)
def hybrid_forward(self, F, x):
x = self.embedding(x)
x = F.mean(x, axis=1)
x = self.dense(x)
return x
def get_dataset(filename):
labels = []
sentences = []
max_length = -1
with open(filename) as f:
for line in f:
tokens = line.split()
label = int(tokens[0])
words = tokens[1:]
max_length = max(max_length, len(words))
labels.append(label)
sentences.append(words)
return sentences, labels, max_length
def create_vocab(sentences, min_count=5, num_words = 100000):
BOS_SYMBOL = "<s>"
EOS_SYMBOL = "</s>"
UNK_SYMBOL = "<unk>"
PAD_SYMBOL = "<pad>"
PAD_ID = 0
TOKEN_SEPARATOR = " "
VOCAB_SYMBOLS = [PAD_SYMBOL, UNK_SYMBOL, BOS_SYMBOL, EOS_SYMBOL]
VOCAB_ENCODING = "utf-8"
vocab_symbols_set = set(VOCAB_SYMBOLS)
raw_vocab = Counter(token for line in sentences for token in line)
pruned_vocab = sorted(((c, w) for w, c in raw_vocab.items() if c >= min_count), reverse=True)
vocab = islice((w for c, w in pruned_vocab), num_words)
word_to_id = {word: idx for idx, word in enumerate(chain(VOCAB_SYMBOLS, vocab))}
return word_to_id
def vocab_to_json(vocab, path):
with open(path, "w") as out:
json.dump(vocab, out, indent=4, ensure_ascii=True)
print('Vocabulary saved to "%s"', path)
def vocab_from_json(path):
with open(path) as inp:
vocab = json.load(inp)
print('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
def save(net, model_dir):
# save the model
net, vocab = net
y = net(mx.sym.var('data'))
y.save('%s/model.json' % model_dir)
net.collect_params().save('%s/model.params' % model_dir)
vocab_to_json(vocab, '%s/vocab.json' % model_dir)
def test(ctx, net, val_data):
val_data.reset()
metric = mx.metric.Accuracy()
for batch in val_data:
data = batch.data[0].as_in_context(ctx)
label = batch.label[0].as_in_context(ctx)
output = net(data)
metric.update([label], [output])
return metric.get()
# ------------------------------------------------------------ #
# Hosting methods #
# ------------------------------------------------------------ #
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
symbol = mx.sym.load('%s/model.json' % model_dir)
vocab = vocab_from_json('%s/vocab.json' % model_dir)
outputs = mx.symbol.softmax(data=symbol, name='softmax_label')
inputs = mx.sym.var('data')
param_dict = gluon.ParameterDict('model_')
net = gluon.SymbolBlock(outputs, inputs, param_dict)
net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())
return net, vocab
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
# we can use content types to vary input/output handling, but
# here we just assume json for both
net, vocab = net
parsed = json.loads(data)
outputs = []
for row in parsed:
tokens = [vocab.get(token, 1) for token in row.split()]
nda = mx.nd.array([tokens])
output = net(nda)
prediction = mx.nd.argmax(output, axis=1)
outputs.append(int(prediction.asscalar()))
response_body = json.dumps(outputs)
return response_body, output_content_type
| 37.526462
| 109
| 0.599985
|
d2f782888a9395e15c18a100768c0badaa90a7db
| 1,034
|
py
|
Python
|
wizard.py
|
VukAnd/os
|
9708eab438e841a46603b81bfb3e251fe24a8ce0
|
[
"MIT"
] | null | null | null |
wizard.py
|
VukAnd/os
|
9708eab438e841a46603b81bfb3e251fe24a8ce0
|
[
"MIT"
] | 1
|
2020-05-18T09:23:56.000Z
|
2020-05-18T09:24:22.000Z
|
wizard.py
|
VukAnd/os
|
9708eab438e841a46603b81bfb3e251fe24a8ce0
|
[
"MIT"
] | 2
|
2020-05-18T09:05:25.000Z
|
2020-05-18T10:01:32.000Z
|
import os
import urllib.request
import time
print('Welcome to the setup wizard. This will install the required packages and the OS on your computer. The OS will be installed in the folder of this wizard.')
proceed = input('Proceed? (Y/N)\n')
if proceed == 'Y' or proceed == 'y':
print('Okay. Do you accept the disclaimer below?\nThe developer is not responsible for any losses this program may be responsible for.')
proceed = input('(Y/N)\n')
if proceed == 'Y' or proceed == 'y':
os.system('pip install keyboard')
os.system('pip install bext')
os.system('pip install pyfiglet')
os.system('pip install requests')
print('Required modules installed. Installing OS...')
urllib.request.urlopen('https://github.com/hellogoose/os/releases/download/v1.3/os.zip')
print('Finished. Please unzip the file. Welcome to the new age of operating systems!')
time.sleep(1)
else:
print('Cancelled. Have a nice day!')
else:
print('Cancelled. Have a nice day!')
| 47
| 161
| 0.675048
|
25ab4b6f061c1d86438a033cf67afc06e2f3af99
| 826
|
py
|
Python
|
del_all.py
|
KungPaoChick/Find_LinkedIn_jobs
|
51266e83a8269cfcfad15562342f25650e3ec30c
|
[
"MIT"
] | 30
|
2021-03-10T13:57:52.000Z
|
2021-09-10T16:48:01.000Z
|
del_all.py
|
KungPaoChick/Find_LinkedIn_jobs
|
51266e83a8269cfcfad15562342f25650e3ec30c
|
[
"MIT"
] | 2
|
2021-10-04T03:19:53.000Z
|
2022-01-11T01:43:14.000Z
|
del_all.py
|
KungPaoChick/Find_LinkedIn_jobs
|
51266e83a8269cfcfad15562342f25650e3ec30c
|
[
"MIT"
] | 4
|
2021-03-11T06:10:37.000Z
|
2021-08-28T15:02:56.000Z
|
# This file just deletes everything you have scraped...
# Basically just think of this file as cleaning up this directory.
import os
import shutil
import colorama
def del_recs():
deleted = []
for content in os.listdir(os.getcwd()):
if not content in repo_content:
deleted.append(content)
shutil.rmtree(content)
if not deleted == []:
for x in deleted:
print(f'{x} has been deleted')
else:
print(colorama.Fore.GREEN,
'[*] Repository is clean. Nothing to delete here.',
colorama.Style.RESET_ALL)
if __name__ == '__main__':
colorama.init()
repo_content = ['img', '.git', '.gitignore', 'LICENSE', 'README.md', 'read_data.py',
'requirements.txt', 'search_jobs.py', 'del_all.py']
del_recs()
| 27.533333
| 88
| 0.605327
|
6f32cca8fe66cbd2e1c34125e31262e4bb89ff75
| 1,185
|
py
|
Python
|
util/parse_magdir.py
|
aking1012/python-magic
|
92d532b940a25aff09ca97c53d969ffa1e172efa
|
[
"MIT"
] | null | null | null |
util/parse_magdir.py
|
aking1012/python-magic
|
92d532b940a25aff09ca97c53d969ffa1e172efa
|
[
"MIT"
] | null | null | null |
util/parse_magdir.py
|
aking1012/python-magic
|
92d532b940a25aff09ca97c53d969ffa1e172efa
|
[
"MIT"
] | null | null | null |
'''
Generate fileMimes as:
cd libmagic/magic/Magicdir
grep -R mime . > fileMimes
Then run this script to generate file_types.py
It's a simple dictionary of all mime-types and what Magicdir file
detects them.
'''
lines = open('fileMimes', 'r').readlines()
def dedupe(fileMimes):
mimeType = []
source = []
final = {}
for item in fileMimes:
try:
index = source[mimeType.index(item[0])]
try:
index.index(item[1])
except:
index.append(item[1])
except:
mimeType.append(item[0])
source.append([item[1]])
for item in mimeType:
final[item] = source[mimeType.index(item)]
return final
file_types = []
for line in lines:
try:
temp = line.split(':!:mime')
temp[0] = temp[0].lstrip('./')
temp[1] = temp[1].lstrip().rstrip()
if temp[1][0] != '#':
try:
temp[1] = temp[1].split('#')[0].rstrip().rstrip('.')
except:
pass
file_types.append((temp[1], temp[0]))
except:
pass
file_types = dedupe(file_types)
file_types = ("],\n" + ' ' * 12 + "'").join(str(file_types).split("], '"))
open('file_types.py', 'w').write(' '*8 + 'self.file_types = ' + file_types)
| 22.358491
| 75
| 0.586498
|
ffbab1163cf040615a1a20903344685789c2294c
| 412
|
py
|
Python
|
sdk/python/pulumi_gcp/osconfig/__init__.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/osconfig/__init__.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/osconfig/__init__.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .guest_policies import *
from .os_policy_assignment import *
from .patch_deployment import *
from ._inputs import *
from . import outputs
| 31.692308
| 87
| 0.745146
|
a02b8d9d61f46d40f313c313018494c8a18862a5
| 63
|
py
|
Python
|
src/enrich/__init__.py
|
Somsubhra/Simplify
|
eeb0899347fb6474de2d674a04b6cd2838dc3490
|
[
"MIT"
] | null | null | null |
src/enrich/__init__.py
|
Somsubhra/Simplify
|
eeb0899347fb6474de2d674a04b6cd2838dc3490
|
[
"MIT"
] | null | null | null |
src/enrich/__init__.py
|
Somsubhra/Simplify
|
eeb0899347fb6474de2d674a04b6cd2838dc3490
|
[
"MIT"
] | null | null | null |
__author__ = 's7a'
# All imports
from enricher import Enricher
| 15.75
| 29
| 0.777778
|
67d46af06ec779c35fe664326815cb311695424d
| 1,311
|
py
|
Python
|
alshamelah_api/apps/support/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
alshamelah_api/apps/support/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
alshamelah_api/apps/support/permissions.py
|
devna-dev/durar-backend
|
36ea29bafd4cb95098e4057eb71df211dc923008
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from rest_framework import permissions
class CanManageSupport(permissions.IsAuthenticated):
"""
Write documentation
"""
# book_lookup = 'parent_lookup_book' case of parent child
def has_permission(self, request, view):
# Allow list to all
if request.method in ['POST']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
parent_permission = super(CanManageSupport, self).has_permission(request, view)
if not parent_permission:
return False
return False
def has_object_permission(self, request, view, obj):
"""
Manages only permissions for editing and deleting the objects
"""
# Allow get to all
if request.method in ['POST']:
return True
# Superuser can manage all the objects
if request.user.is_authenticated and request.user.is_superuser:
return True
parent_permission = super(CanManageSupport, self).has_permission(request, view)
if not parent_permission:
return False
return True
| 28.5
| 87
| 0.654462
|
777a6a87721dc793166c86f2891bbb9e4817b5d6
| 337
|
py
|
Python
|
src/tests/binary_number_tests.py
|
cschlay/mini-toy-computer
|
986802b019697e732ff8de04233c27c1a1900284
|
[
"MIT"
] | null | null | null |
src/tests/binary_number_tests.py
|
cschlay/mini-toy-computer
|
986802b019697e732ff8de04233c27c1a1900284
|
[
"MIT"
] | null | null | null |
src/tests/binary_number_tests.py
|
cschlay/mini-toy-computer
|
986802b019697e732ff8de04233c27c1a1900284
|
[
"MIT"
] | null | null | null |
from tests.randomizer import random_binary_number
CASES = 5
print("POSITIVE NUMBERS TEST")
for x in range(CASES):
A = random_binary_number()
A[0] = 0
print(f"{str(A)} = {A.decimal()}")
print("NEGATIVE NUMBERS TEST")
for x in range(CASES):
A = random_binary_number()
A[0] = 1
print(f"{str(A)} = {A.decimal()}")
| 21.0625
| 49
| 0.637982
|
0c2fd6a3180f45a812b54bbd85ab022e1645aab5
| 2,343
|
py
|
Python
|
scenario-1/python/whole_test.py
|
ludwikkazmierczak/selenium-wbb
|
11ee4db5630d74502fd131724047a90d40a6be42
|
[
"Apache-2.0"
] | null | null | null |
scenario-1/python/whole_test.py
|
ludwikkazmierczak/selenium-wbb
|
11ee4db5630d74502fd131724047a90d40a6be42
|
[
"Apache-2.0"
] | null | null | null |
scenario-1/python/whole_test.py
|
ludwikkazmierczak/selenium-wbb
|
11ee4db5630d74502fd131724047a90d40a6be42
|
[
"Apache-2.0"
] | null | null | null |
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class whole_test:
def run_test(self):
for n in range(120):
start = time.time()
driver = webdriver.Chrome(service_args=["--verbose", "--log-path=chromelog.log"])
driver.get("http://mediawiki119.wikia.com")
driver.maximize_window()
elem = driver.find_element_by_id("searchInput")
elem.send_keys("qa")
elem.send_keys(Keys.RETURN)
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//iframe[@title='VisualDNA Analytics']")))
driver.find_element_by_css_selector("article h1 a").click()
WebDriverWait(driver, 10).until(EC.title_is("QA - Mediawiki 1.19 test Wiki"))
text = "ASDASD" + str(n)
# driver.get(driver.current_url + "?action=edit")
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, "//iframe[@title='VisualDNA Analytics']")))
contribute_button = driver.find_element(By.CSS_SELECTOR, "nav.contribute")
ActionChains(driver).click(contribute_button).move_to_element(
contribute_button.find_element(By.CSS_SELECTOR, "li a[data-id='edit']")).click().perform()
WebDriverWait(driver, 20).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, "div.cke_contents iframe")))
driver.switch_to.frame(driver.find_element_by_css_selector("div.cke_contents iframe"))
driver.find_element_by_tag_name("body").clear()
driver.find_element_by_tag_name("body").send_keys(text)
driver.switch_to.default_content()
driver.find_element_by_css_selector("input#wpSave").click()
WebDriverWait(driver, 10).until(EC.title_is("QA - Mediawiki 1.19 test Wiki"))
assert text in driver.find_element_by_css_selector("#WikiaArticle p").text
driver.quit()
print(time.time() - start)
if __name__ == "__main__":
whole_test().run_test()
| 43.388889
| 106
| 0.661118
|
acc0b8dd1f6a15078663b9c6cd36362585cb76d6
| 576
|
py
|
Python
|
CPAC/registration/__init__.py
|
gkiar/C-PAC
|
0926b451dd8622b25eb68c7bcc770f0156238b23
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/registration/__init__.py
|
gkiar/C-PAC
|
0926b451dd8622b25eb68c7bcc770f0156238b23
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/registration/__init__.py
|
gkiar/C-PAC
|
0926b451dd8622b25eb68c7bcc770f0156238b23
|
[
"BSD-3-Clause"
] | null | null | null |
from .registration import create_fsl_flirt_linear_reg, \
create_fsl_fnirt_nonlinear_reg, \
create_register_func_to_anat, \
create_bbregister_func_to_anat, \
create_wf_calculate_ants_warp
from .output_func_to_standard import output_func_to_standard
__all__ = ['create_fsl_flirt_linear_reg', \
'create_fsl_fnirt_nonlinear_reg', \
'create_register_func_to_anat', \
'create_bbregister_func_to_anat', \
'create_wf_calculate_ants_warp']
| 44.307692
| 60
| 0.647569
|
59da6ff7ac03fc22cefeae6a1c4c0bfd2a331daf
| 24,274
|
py
|
Python
|
poky-dunfell/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | 14
|
2021-11-04T07:47:37.000Z
|
2022-03-21T10:10:30.000Z
|
poky-dunfell/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | null | null | null |
poky-dunfell/bitbake/lib/toaster/toastermain/management/commands/buildimport.py
|
lacie-life/YoctoPi
|
3412e78468a9b84da50bb1aadb12b459001a3712
|
[
"MIT"
] | 6
|
2021-11-02T10:56:19.000Z
|
2022-03-06T11:58:20.000Z
|
#
# BitBake Toaster Implementation
#
# Copyright (C) 2018 Wind River Systems
#
# SPDX-License-Identifier: GPL-2.0-only
#
# buildimport: import a project for project specific configuration
#
# Usage:
# (a) Set up Toaster environent
#
# (b) Call buildimport
# $ /path/to/bitbake/lib/toaster/manage.py buildimport \
# --name=$PROJECTNAME \
# --path=$BUILD_DIRECTORY \
# --callback="$CALLBACK_SCRIPT" \
# --command="configure|reconfigure|import"
#
# (c) Return is "|Default_image=%s|Project_id=%d"
#
# (d) Open Toaster to this project using for example:
# $ xdg-open http://localhost:$toaster_port/toastergui/project_specific/$project_id
#
# (e) To delete a project:
# $ /path/to/bitbake/lib/toaster/manage.py buildimport \
# --name=$PROJECTNAME --delete-project
#
# ../bitbake/lib/toaster/manage.py buildimport --name=test --path=`pwd` --callback="" --command=import
from django.core.management.base import BaseCommand
from orm.models import Project, Release, ProjectVariable
from orm.models import Layer, Layer_Version, LayerSource, ProjectLayer
from toastergui.api import scan_layer_content
import os
import re
import os.path
import subprocess
import shutil
# Toaster variable section delimiters
TOASTER_PROLOG = '#=== TOASTER_CONFIG_PROLOG ==='
TOASTER_EPILOG = '#=== TOASTER_CONFIG_EPILOG ==='
# quick development/debugging support
verbose = 2
def _log(msg):
if 1 == verbose:
print(msg)
elif 2 == verbose:
f1=open('/tmp/toaster.log', 'a')
f1.write("|" + msg + "|\n" )
f1.close()
__config_regexp__ = re.compile( r"""
^
(?P<exp>export\s+)?
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
\s* (
(?P<colon>:=) |
(?P<lazyques>\?\?=) |
(?P<ques>\?=) |
(?P<append>\+=) |
(?P<prepend>=\+) |
(?P<predot>=\.) |
(?P<postdot>\.=) |
=
) \s*
(?!'[^']*'[^']*'$)
(?!\"[^\"]*\"[^\"]*\"$)
(?P<apo>['\"])
(?P<value>.*)
(?P=apo)
$
""", re.X)
class Command(BaseCommand):
args = "<name> <path> <release>"
help = "Import a command line build directory"
vars = {}
toaster_vars = {}
def add_arguments(self, parser):
parser.add_argument(
'--name', dest='name', required=True,
help='name of the project',
)
parser.add_argument(
'--path', dest='path', required=True,
help='path to the project',
)
parser.add_argument(
'--release', dest='release', required=False,
help='release for the project',
)
parser.add_argument(
'--callback', dest='callback', required=False,
help='callback for project config update',
)
parser.add_argument(
'--delete-project', dest='delete_project', required=False,
help='delete this project from the database',
)
parser.add_argument(
'--command', dest='command', required=False,
help='command (configure,reconfigure,import)',
)
def get_var(self, varname):
value = self.vars.get(varname, '')
if value:
varrefs = re.findall('\${([^}]*)}', value)
for ref in varrefs:
if ref in self.vars:
value = value.replace('${%s}' % ref, self.vars[ref])
return value
# Extract the bb variables from a conf file
def scan_conf(self,fn):
vars = self.vars
toaster_vars = self.toaster_vars
#_log("scan_conf:%s" % fn)
if not os.path.isfile(fn):
return
f = open(fn, 'r')
#statements = ast.StatementGroup()
lineno = 0
is_toaster_section = False
while True:
lineno = lineno + 1
s = f.readline()
if not s:
break
w = s.strip()
# skip empty lines
if not w:
continue
# evaluate Toaster sections
if w.startswith(TOASTER_PROLOG):
is_toaster_section = True
continue
if w.startswith(TOASTER_EPILOG):
is_toaster_section = False
continue
s = s.rstrip()
while s[-1] == '\\':
s2 = f.readline().strip()
lineno = lineno + 1
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
echo("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
s = s[:-1] + s2
# skip comments
if s[0] == '#':
continue
# process the line for just assignments
m = __config_regexp__.match(s)
if m:
groupd = m.groupdict()
var = groupd['var']
value = groupd['value']
if groupd['lazyques']:
if not var in vars:
vars[var] = value
continue
if groupd['ques']:
if not var in vars:
vars[var] = value
continue
# preset empty blank for remaining operators
if not var in vars:
vars[var] = ''
if groupd['append']:
vars[var] += value
elif groupd['prepend']:
vars[var] = "%s%s" % (value,vars[var])
elif groupd['predot']:
vars[var] = "%s %s" % (value,vars[var])
elif groupd['postdot']:
vars[var] = "%s %s" % (vars[var],value)
else:
vars[var] = "%s" % (value)
# capture vars in a Toaster section
if is_toaster_section:
toaster_vars[var] = vars[var]
# DONE WITH PARSING
f.close()
self.vars = vars
self.toaster_vars = toaster_vars
# Update the scanned project variables
def update_project_vars(self,project,name):
pv, create = ProjectVariable.objects.get_or_create(project = project, name = name)
if (not name in self.vars.keys()) or (not self.vars[name]):
self.vars[name] = pv.value
else:
if pv.value != self.vars[name]:
pv.value = self.vars[name]
pv.save()
# Find the git version of the installation
def find_layer_dir_version(self,path):
# * rocko ...
install_version = ''
cwd = os.getcwd()
os.chdir(path)
p = subprocess.Popen(['git', 'branch', '-av'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
out = out.decode("utf-8")
for branch in out.split('\n'):
if ('*' == branch[0:1]) and ('no branch' not in branch):
install_version = re.sub(' .*','',branch[2:])
break
if 'remotes/m/master' in branch:
install_version = re.sub('.*base/','',branch)
break
os.chdir(cwd)
return install_version
# Compute table of the installation's registered layer versions (branch or commit)
def find_layer_dir_versions(self,INSTALL_URL_PREFIX):
lv_dict = {}
layer_versions = Layer_Version.objects.all()
for lv in layer_versions:
layer = Layer.objects.filter(pk=lv.layer.pk)[0]
if layer.vcs_url:
url_short = layer.vcs_url.replace(INSTALL_URL_PREFIX,'')
else:
url_short = ''
# register the core, branch, and the version variations
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,'')] = (lv.id,layer.name)
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,lv.branch)] = (lv.id,layer.name)
lv_dict["%s,%s,%s" % (url_short,lv.dirpath,lv.commit)] = (lv.id,layer.name)
#_log(" (%s,%s,%s|%s) = (%s,%s)" % (url_short,lv.dirpath,lv.branch,lv.commit,lv.id,layer.name))
return lv_dict
# Apply table of all layer versions
def extract_bblayers(self):
# set up the constants
bblayer_str = self.get_var('BBLAYERS')
TOASTER_DIR = os.environ.get('TOASTER_DIR')
INSTALL_CLONE_PREFIX = os.path.dirname(TOASTER_DIR) + "/"
TOASTER_CLONE_PREFIX = TOASTER_DIR + "/_toaster_clones/"
INSTALL_URL_PREFIX = ''
layers = Layer.objects.filter(name='openembedded-core')
for layer in layers:
if layer.vcs_url:
INSTALL_URL_PREFIX = layer.vcs_url
break
INSTALL_URL_PREFIX = INSTALL_URL_PREFIX.replace("/poky","/")
INSTALL_VERSION_DIR = TOASTER_DIR
INSTALL_URL_POSTFIX = INSTALL_URL_PREFIX.replace(':','_')
INSTALL_URL_POSTFIX = INSTALL_URL_POSTFIX.replace('/','_')
INSTALL_URL_POSTFIX = "%s_%s" % (TOASTER_CLONE_PREFIX,INSTALL_URL_POSTFIX)
# get the set of available layer:layer_versions
lv_dict = self.find_layer_dir_versions(INSTALL_URL_PREFIX)
# compute the layer matches
layers_list = []
for line in bblayer_str.split(' '):
if not line:
continue
if line.endswith('/local'):
continue
# isolate the repo
layer_path = line
line = line.replace(INSTALL_URL_POSTFIX,'').replace(INSTALL_CLONE_PREFIX,'').replace('/layers/','/').replace('/poky/','/')
# isolate the sub-path
path_index = line.rfind('/')
if path_index > 0:
sub_path = line[path_index+1:]
line = line[0:path_index]
else:
sub_path = ''
# isolate the version
if TOASTER_CLONE_PREFIX in layer_path:
is_toaster_clone = True
# extract version from name syntax
version_index = line.find('_')
if version_index > 0:
version = line[version_index+1:]
line = line[0:version_index]
else:
version = ''
_log("TOASTER_CLONE(%s/%s), version=%s" % (line,sub_path,version))
else:
is_toaster_clone = False
# version is from the installation
version = self.find_layer_dir_version(layer_path)
_log("LOCAL_CLONE(%s/%s), version=%s" % (line,sub_path,version))
# capture the layer information into layers_list
layers_list.append( (line,sub_path,version,layer_path,is_toaster_clone) )
return layers_list,lv_dict
#
def find_import_release(self,layers_list,lv_dict,default_release):
# poky,meta,rocko => 4;openembedded-core
release = default_release
for line,path,version,layer_path,is_toaster_clone in layers_list:
key = "%s,%s,%s" % (line,path,version)
if key in lv_dict:
lv_id = lv_dict[key]
if 'openembedded-core' == lv_id[1]:
_log("Find_import_release(%s):version=%s,Toaster=%s" % (lv_id[1],version,is_toaster_clone))
# only versions in Toaster managed layers are accepted
if not is_toaster_clone:
break
try:
release = Release.objects.get(name=version)
except:
pass
break
_log("Find_import_release:RELEASE=%s" % release.name)
return release
# Apply the found conf layers
def apply_conf_bblayers(self,layers_list,lv_dict,project,release=None):
for line,path,version,layer_path,is_toaster_clone in layers_list:
# Assert release promote if present
if release:
version = release
# try to match the key to a layer_version
key = "%s,%s,%s" % (line,path,version)
key_short = "%s,%s,%s" % (line,path,'')
lv_id = ''
if key in lv_dict:
lv_id = lv_dict[key]
lv = Layer_Version.objects.get(pk=int(lv_id[0]))
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=lv)
pl.optional=False
pl.save()
_log(" %s => %s;%s" % (key,lv_id[0],lv_id[1]))
elif key_short in lv_dict:
lv_id = lv_dict[key_short]
lv = Layer_Version.objects.get(pk=int(lv_id[0]))
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=lv)
pl.optional=False
pl.save()
_log(" %s ?> %s" % (key,lv_dict[key_short]))
else:
_log("%s <= %s" % (key,layer_path))
found = False
# does local layer already exist in this project?
try:
for pl in ProjectLayer.objects.filter(project=project):
if pl.layercommit.layer.local_source_dir == layer_path:
found = True
_log(" Project Local Layer found!")
except Exception as e:
_log("ERROR: Local Layer '%s'" % e)
pass
if not found:
# Does Layer name+path already exist?
try:
layer_name_base = os.path.basename(layer_path)
_log("Layer_lookup: try '%s','%s'" % (layer_name_base,layer_path))
layer = Layer.objects.get(name=layer_name_base,local_source_dir = layer_path)
# Found! Attach layer_version and ProjectLayer
layer_version = Layer_Version.objects.create(
layer=layer,
project=project,
layer_source=LayerSource.TYPE_IMPORTED)
layer_version.save()
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=layer_version)
pl.optional=False
pl.save()
found = True
# add layer contents to this layer version
scan_layer_content(layer,layer_version)
_log(" Parent Local Layer found in db!")
except Exception as e:
_log("Layer_exists_test_failed: Local Layer '%s'" % e)
pass
if not found:
# Insure that layer path exists, in case of user typo
if not os.path.isdir(layer_path):
_log("ERROR:Layer path '%s' not found" % layer_path)
continue
# Add layer to db and attach project to it
layer_name_base = os.path.basename(layer_path)
# generate a unique layer name
layer_name_matches = {}
for layer in Layer.objects.filter(name__contains=layer_name_base):
layer_name_matches[layer.name] = '1'
layer_name_idx = 0
layer_name_test = layer_name_base
while layer_name_test in layer_name_matches.keys():
layer_name_idx += 1
layer_name_test = "%s_%d" % (layer_name_base,layer_name_idx)
# create the layer and layer_verion objects
layer = Layer.objects.create(name=layer_name_test)
layer.local_source_dir = layer_path
layer_version = Layer_Version.objects.create(
layer=layer,
project=project,
layer_source=LayerSource.TYPE_IMPORTED)
layer.save()
layer_version.save()
pl,created = ProjectLayer.objects.get_or_create(project=project,
layercommit=layer_version)
pl.optional=False
pl.save()
# register the layer's content
_log(" Local Layer Add content")
scan_layer_content(layer,layer_version)
_log(" Local Layer Added '%s'!" % layer_name_test)
# Scan the project's conf files (if any)
def scan_conf_variables(self,project_path):
self.vars['TOPDIR'] = project_path
# scan the project's settings, add any new layers or variables
if os.path.isfile("%s/conf/local.conf" % project_path):
self.scan_conf("%s/conf/local.conf" % project_path)
self.scan_conf("%s/conf/bblayers.conf" % project_path)
# Import then disable old style Toaster conf files (before 'merged_attr')
old_toaster_local = "%s/conf/toaster.conf" % project_path
if os.path.isfile(old_toaster_local):
self.scan_conf(old_toaster_local)
shutil.move(old_toaster_local, old_toaster_local+"_old")
old_toaster_layer = "%s/conf/toaster-bblayers.conf" % project_path
if os.path.isfile(old_toaster_layer):
self.scan_conf(old_toaster_layer)
shutil.move(old_toaster_layer, old_toaster_layer+"_old")
# Scan the found conf variables (if any)
def apply_conf_variables(self,project,layers_list,lv_dict,release=None):
if self.vars:
# Catch vars relevant to Toaster (in case no Toaster section)
self.update_project_vars(project,'DISTRO')
self.update_project_vars(project,'MACHINE')
self.update_project_vars(project,'IMAGE_INSTALL_append')
self.update_project_vars(project,'IMAGE_FSTYPES')
self.update_project_vars(project,'PACKAGE_CLASSES')
# These vars are typically only assigned by Toaster
#self.update_project_vars(project,'DL_DIR')
#self.update_project_vars(project,'SSTATE_DIR')
# Assert found Toaster vars
for var in self.toaster_vars.keys():
pv, create = ProjectVariable.objects.get_or_create(project = project, name = var)
pv.value = self.toaster_vars[var]
_log("* Add/update Toaster var '%s' = '%s'" % (pv.name,pv.value))
pv.save()
# Assert found BBLAYERS
if 0 < verbose:
for pl in ProjectLayer.objects.filter(project=project):
release_name = 'None' if not pl.layercommit.release else pl.layercommit.release.name
print(" BEFORE:ProjectLayer=%s,%s,%s,%s" % (pl.layercommit.layer.name,release_name,pl.layercommit.branch,pl.layercommit.commit))
self.apply_conf_bblayers(layers_list,lv_dict,project,release)
if 0 < verbose:
for pl in ProjectLayer.objects.filter(project=project):
release_name = 'None' if not pl.layercommit.release else pl.layercommit.release.name
print(" AFTER :ProjectLayer=%s,%s,%s,%s" % (pl.layercommit.layer.name,release_name,pl.layercommit.branch,pl.layercommit.commit))
def handle(self, *args, **options):
project_name = options['name']
project_path = options['path']
project_callback = options['callback'] if options['callback'] else ''
release_name = options['release'] if options['release'] else ''
#
# Delete project
#
if options['delete_project']:
try:
print("Project '%s' delete from Toaster database" % (project_name))
project = Project.objects.get(name=project_name)
# TODO: deep project delete
project.delete()
print("Project '%s' Deleted" % (project_name))
return
except Exception as e:
print("Project '%s' not found, not deleted (%s)" % (project_name,e))
return
#
# Create/Update/Import project
#
# See if project (by name) exists
project = None
try:
# Project already exists
project = Project.objects.get(name=project_name)
except Exception as e:
pass
# Find the installation's default release
default_release = Release.objects.get(id=1)
# SANITY: if 'reconfig' but project does not exist (deleted externally), switch to 'import'
if ("reconfigure" == options['command']) and project is None:
options['command'] = 'import'
# 'Configure':
if "configure" == options['command']:
# Note: ignore any existing conf files
# create project, SANITY: reuse any project of same name
project = Project.objects.create_project(project_name,default_release,project)
# 'Re-configure':
if "reconfigure" == options['command']:
# Scan the directory's conf files
self.scan_conf_variables(project_path)
# Scan the layer list
layers_list,lv_dict = self.extract_bblayers()
# Apply any new layers or variables
self.apply_conf_variables(project,layers_list,lv_dict)
# 'Import':
if "import" == options['command']:
# Scan the directory's conf files
self.scan_conf_variables(project_path)
# Remove these Toaster controlled variables
for var in ('DL_DIR','SSTATE_DIR'):
self.vars.pop(var, None)
self.toaster_vars.pop(var, None)
# Scan the layer list
layers_list,lv_dict = self.extract_bblayers()
# Find the directory's release, and promote to default_release if local paths
release = self.find_import_release(layers_list,lv_dict,default_release)
# create project, SANITY: reuse any project of same name
project = Project.objects.create_project(project_name,release,project)
# Apply any new layers or variables
self.apply_conf_variables(project,layers_list,lv_dict,release)
# WORKAROUND: since we now derive the release, redirect 'newproject_specific' to 'project_specific'
project.set_variable('INTERNAL_PROJECT_SPECIFIC_SKIPRELEASE','1')
# Set up the project's meta data
project.builddir = project_path
project.merged_attr = True
project.set_variable(Project.PROJECT_SPECIFIC_CALLBACK,project_callback)
project.set_variable(Project.PROJECT_SPECIFIC_STATUS,Project.PROJECT_SPECIFIC_EDIT)
if ("configure" == options['command']) or ("import" == options['command']):
# preset the mode and default image recipe
project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,Project.PROJECT_SPECIFIC_NEW)
project.set_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE,"core-image-minimal")
# Assert any extended/custom actions or variables for new non-Toaster projects
if not len(self.toaster_vars):
pass
else:
project.set_variable(Project.PROJECT_SPECIFIC_ISNEW,Project.PROJECT_SPECIFIC_NONE)
# Save the updated Project
project.save()
_log("Buildimport:project='%s' at '%d'" % (project_name,project.id))
if ('DEFAULT_IMAGE' in self.vars) and (self.vars['DEFAULT_IMAGE']):
print("|Default_image=%s|Project_id=%d" % (self.vars['DEFAULT_IMAGE'],project.id))
else:
print("|Project_id=%d" % (project.id))
| 41.851724
| 203
| 0.546634
|
840e7ad4ffa1da0e7f84310f6c671e19aba3814d
| 13,987
|
py
|
Python
|
test/functest/testcase_5.py
|
georgepar/sdnvpn-mirror
|
0224ff358ef1ee79f090a79a6e54346f945b994f
|
[
"Apache-2.0"
] | null | null | null |
test/functest/testcase_5.py
|
georgepar/sdnvpn-mirror
|
0224ff358ef1ee79f090a79a6e54346f945b994f
|
[
"Apache-2.0"
] | null | null | null |
test/functest/testcase_5.py
|
georgepar/sdnvpn-mirror
|
0224ff358ef1ee79f090a79a6e54346f945b994f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import argparse
import os
from random import randint
import sys
import time
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
import utils as test_utils
from results import Results
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
args = parser.parse_args()
logger = ft_logger.Logger("sdnvpn-testcase-2").getLogger()
REPO_PATH = os.environ['repos_dir'] + '/sdnvpn/'
VM_BOOT_TIMEOUT = 180
config_file = REPO_PATH + 'test/functest/config.yaml'
INSTANCE_1_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.instance_1_name", config_file)
INSTANCE_1_IP = "10.10.10.11"
INSTANCE_2_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.instance_2_name", config_file)
INSTANCE_2_IP = "10.10.10.12"
INSTANCE_3_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.instance_3_name", config_file)
INSTANCE_3_IP = "10.10.11.13"
INSTANCE_4_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.instance_4_name", config_file)
INSTANCE_4_IP = "10.10.10.12"
INSTANCE_5_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.instance_5_name", config_file)
INSTANCE_5_IP = "10.10.11.13"
IMAGE_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.image_name", config_file)
IMAGE_FILENAME = ft_utils.get_functest_config(
"general.openstack.image_file_name")
IMAGE_FORMAT = ft_utils.get_functest_config(
"general.openstack.image_disk_format")
IMAGE_PATH = ft_utils.get_functest_config(
"general.directories.dir_functest_data") + "/" + IMAGE_FILENAME
KEYFILE_PATH = REPO_PATH + 'test/functest/id_rsa'
# NEUTRON Private Network parameters
NET_1_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.net_1_name", config_file)
SUBNET_1a_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_1a_name", config_file)
SUBNET_1a_CIDR = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_1a_cidr", config_file)
SUBNET_1b_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_1b_name", config_file)
SUBNET_1b_CIDR = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_1b_cidr", config_file)
ROUTER_1_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.router_1_name", config_file)
NET_2_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.net_2_name", config_file)
SUBNET_2a_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_2a_name", config_file)
SUBNET_2a_CIDR = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_2a_cidr", config_file)
SUBNET_2b_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_2b_name", config_file)
SUBNET_2b_CIDR = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.subnet_2b_cidr", config_file)
ROUTER_1_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.router_1_name", config_file)
ROUTER_2_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.router_2_name", config_file)
SECGROUP_NAME = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.sdnvpn_sg_name", config_file)
SECGROUP_DESCR = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.sdnvpn_sg_descr", config_file)
TARGETS_1 = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.targets1", config_file)
TARGETS_2 = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.targets2", config_file)
SUCCESS_CRITERIA = ft_utils.get_parameter_from_yaml(
"testcases.testcase_5.success_criteria", config_file)
TEST_DB = ft_utils.get_functest_config("results.test_db_url")
LINE_LENGTH = 90 # length for the summary table
def main():
global LINE_LENGTH
results = Results(LINE_LENGTH)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
results.add_to_summary(0, "=")
nova_client = os_utils.get_nova_client()
neutron_client = os_utils.get_neutron_client()
glance_client = os_utils.get_glance_client()
logger.debug("Using private key %s injected to the VMs." % KEYFILE_PATH)
keyfile = open(KEYFILE_PATH, 'r')
key = keyfile.read()
keyfile.close()
files = {"/home/cirros/id_rsa": key}
image_id = os_utils.create_glance_image(glance_client,
IMAGE_NAME,
IMAGE_PATH,
disk=IMAGE_FORMAT,
container="bare",
public=True)
network_1_id, _, _ = test_utils.create_network(neutron_client,
NET_1_NAME,
SUBNET_1a_NAME,
SUBNET_1a_CIDR,
ROUTER_1_NAME,
SUBNET_1b_NAME,
SUBNET_1b_CIDR)
network_2_id, _, router_2_id = test_utils.create_network(neutron_client,
NET_2_NAME,
SUBNET_2a_NAME,
SUBNET_2a_CIDR,
ROUTER_2_NAME,
SUBNET_2b_NAME,
SUBNET_2b_CIDR)
sg_id = os_utils.create_security_group_full(neutron_client,
SECGROUP_NAME, SECGROUP_DESCR)
# Get hypervisors zones
compute_nodes = os_utils.get_hypervisors(nova_client)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
sys.exit(-1)
logger.debug("Compute nodes: %s" % compute_nodes)
av_zone_1 = "nova:" + compute_nodes[0]
av_zone_2 = "nova:" + compute_nodes[1]
# boot INSTANCES
userdata_common = test_utils.generate_userdata_common()
vm_2 = test_utils.create_instance(nova_client,
INSTANCE_2_NAME,
image_id,
network_1_id,
sg_id,
fixed_ip=INSTANCE_2_IP,
secgroup_name=SECGROUP_NAME,
compute_node=av_zone_1,
userdata=userdata_common)
vm_2_ip = vm_2.networks.itervalues().next()[0]
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(INSTANCE_2_NAME, vm_2_ip))
vm_3 = test_utils.create_instance(nova_client,
INSTANCE_3_NAME,
image_id,
network_1_id,
sg_id,
fixed_ip=INSTANCE_3_IP,
secgroup_name=SECGROUP_NAME,
compute_node=av_zone_2,
userdata=userdata_common)
vm_3_ip = vm_3.networks.itervalues().next()[0]
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(INSTANCE_3_NAME, vm_3_ip))
vm_5 = test_utils.create_instance(nova_client,
INSTANCE_5_NAME,
image_id,
network_2_id,
sg_id,
fixed_ip=INSTANCE_5_IP,
secgroup_name=SECGROUP_NAME,
compute_node=av_zone_2,
userdata=userdata_common)
vm_5_ip = vm_5.networks.itervalues().next()[0]
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(INSTANCE_5_NAME, vm_5_ip))
# We boot vm5 first because we need vm5_ip for vm4 userdata
u4 = test_utils.generate_userdata_with_ssh(
[INSTANCE_1_IP, INSTANCE_3_IP, INSTANCE_5_IP])
vm_4 = test_utils.create_instance(nova_client,
INSTANCE_4_NAME,
image_id,
network_2_id,
sg_id,
fixed_ip=INSTANCE_4_IP,
secgroup_name=SECGROUP_NAME,
compute_node=av_zone_1,
userdata=u4,
files=files)
vm_4_ip = vm_4.networks.itervalues().next()[0]
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(INSTANCE_4_NAME, vm_4_ip))
# We boot VM1 at the end because we need to get the IPs first to generate
# the userdata
u1 = test_utils.generate_userdata_with_ssh(
[INSTANCE_2_IP, INSTANCE_3_IP, INSTANCE_4_IP, INSTANCE_5_IP])
vm_1 = test_utils.create_instance(nova_client,
INSTANCE_1_NAME,
image_id,
network_1_id,
sg_id,
fixed_ip=INSTANCE_1_IP,
secgroup_name=SECGROUP_NAME,
compute_node=av_zone_1,
userdata=u1,
files=files)
vm_1_ip = vm_1.networks.itervalues().next()[0]
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(INSTANCE_1_NAME, vm_1_ip))
msg = ("Create VPN1 with eRT=iRT")
logger.info(msg)
results.add_to_summary(1, msg)
vpn1_name = "sdnvpn-1-" + str(randint(100000, 999999))
kwargs = {"import_targets": TARGETS_2,
"export_targets": TARGETS_2,
"route_targets": TARGETS_2,
"name": vpn1_name}
bgpvpn1 = os_utils.create_bgpvpn(neutron_client, **kwargs)
bgpvpn1_id = bgpvpn1['bgpvpn']['id']
logger.debug("VPN1 created details: %s" % bgpvpn1)
msg = ("Associate network '%s' to the VPN." % NET_1_NAME)
logger.info(msg)
results.add_to_summary(1, msg)
results.add_to_summary(0, "-")
os_utils.create_network_association(
neutron_client, bgpvpn1_id, network_1_id)
test_utils.wait_for_bgp_net_assoc(
neutron_client, bgpvpn1_id, network_1_id)
# Wait for VMs to get ips.
instances_up = test_utils.wait_for_instances_up(vm_1, vm_2,
vm_3, vm_4,
vm_5)
if not instances_up:
logger.error("One or more instances is down")
sys.exit(-1)
logger.info("Waiting for the VMs to connect to each other using the"
" updated network configuration")
time.sleep(30)
# 10.10.10.12 should return sdnvpn-2 to sdnvpn-1
results.check_ssh_output(
vm_1, vm_1_ip, vm_2, vm_2_ip, expected=INSTANCE_2_NAME, timeout=200)
# 10.10.11.13 should return sdnvpn-3 to sdnvpn-1
results.check_ssh_output(
vm_1, vm_1_ip, vm_3, vm_3_ip, expected=INSTANCE_3_NAME, timeout=30)
results.add_to_summary(0, "-")
msg = ("Create VPN2 with eRT=iRT")
logger.info(msg)
results.add_to_summary(1, msg)
vpn2_name = "sdnvpn-2-" + str(randint(100000, 999999))
kwargs = {"import_targets": TARGETS_1,
"export_targets": TARGETS_1,
"route_targets": TARGETS_1,
"name": vpn2_name}
bgpvpn2 = os_utils.create_bgpvpn(neutron_client, **kwargs)
bgpvpn2_id = bgpvpn2['bgpvpn']['id']
logger.debug("VPN created details: %s" % bgpvpn2)
msg = ("Associate network '%s' to the VPN2." % NET_2_NAME)
logger.info(msg)
results.add_to_summary(1, msg)
results.add_to_summary(0, "-")
os_utils.create_router_association(
neutron_client, bgpvpn2_id, router_2_id)
test_utils.wait_for_bgp_router_assoc(
neutron_client, bgpvpn2_id, router_2_id)
logger.info("Waiting for the VMs to connect to each other using the"
" updated network configuration")
time.sleep(30)
# 10.10.11.13 should return sdnvpn-5 to sdnvpn-4
results.check_ssh_output(
vm_4, vm_4_ip, vm_5, vm_5_ip, expected=INSTANCE_5_NAME, timeout=30)
# 10.10.10.11 should return "not reachable" to sdnvpn-4
results.check_ssh_output(
vm_4, vm_4_ip, vm_1, vm_1_ip, expected="not reachable", timeout=30)
results.add_to_summary(0, "=")
logger.info("\n%s" % results.summary)
if results.test_result == "PASS":
logger.info("All the sub tests have passed as expected.")
else:
logger.info("One or more sub tests have failed.")
status = "PASS"
success = 100 - \
(100 * int(results.num_tests_failed) / int(results.num_tests))
if success < int(SUCCESS_CRITERIA):
status = "FAILED"
return {"status": status, "details": results.details}
if __name__ == '__main__':
main()
| 41.627976
| 78
| 0.595053
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.