blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5c99755f83c77bbb115fd5e364be87b9f009220 | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/containerregistry/v20190601preview/_enums.py | d0e595ad3fef9d9ce637c62acc0fa0a0ab83b51f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 2,852 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'Architecture',
'BaseImageTriggerType',
'OS',
'ResourceIdentityType',
'SecretObjectType',
'SourceControlType',
'SourceRegistryLoginMode',
'SourceTriggerEvent',
'StepType',
'TaskStatus',
'TokenType',
'TriggerStatus',
'UpdateTriggerPayloadType',
'Variant',
]
class Architecture(str, Enum):
"""
The OS architecture.
"""
AMD64 = "amd64"
X86 = "x86"
ARCHITECTURE_386 = "386"
ARM = "arm"
ARM64 = "arm64"
class BaseImageTriggerType(str, Enum):
"""
The type of the auto trigger for base image dependency updates.
"""
ALL = "All"
RUNTIME = "Runtime"
class OS(str, Enum):
"""
The operating system type required for the run.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class ResourceIdentityType(str, Enum):
"""
The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class SecretObjectType(str, Enum):
"""
The type of the secret object which determines how the value of the secret object has to be
interpreted.
"""
OPAQUE = "Opaque"
VAULTSECRET = "Vaultsecret"
class SourceControlType(str, Enum):
"""
The type of source control service.
"""
GITHUB = "Github"
VISUAL_STUDIO_TEAM_SERVICE = "VisualStudioTeamService"
class SourceRegistryLoginMode(str, Enum):
"""
The authentication mode which determines the source registry login scope. The credentials for the source registry
will be generated using the given scope. These credentials will be used to login to
the source registry during the run.
"""
NONE = "None"
DEFAULT = "Default"
class SourceTriggerEvent(str, Enum):
COMMIT = "commit"
PULLREQUEST = "pullrequest"
class StepType(str, Enum):
"""
The type of the step.
"""
DOCKER = "Docker"
FILE_TASK = "FileTask"
ENCODED_TASK = "EncodedTask"
class TaskStatus(str, Enum):
"""
The current status of task.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class TokenType(str, Enum):
"""
The type of Auth token.
"""
PAT = "PAT"
O_AUTH = "OAuth"
class TriggerStatus(str, Enum):
"""
The current status of trigger.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class UpdateTriggerPayloadType(str, Enum):
"""
Type of Payload body for Base image update triggers.
"""
DEFAULT = "Default"
TOKEN = "Token"
class Variant(str, Enum):
"""
Variant of the CPU.
"""
V6 = "v6"
V7 = "v7"
V8 = "v8"
| [
"github@mikhail.io"
] | github@mikhail.io |
9c4bf09d819e17fe582f6b48d0e82b956fa722d2 | bdfb6084a33e4b443ffc2a97673ecbfa736d947b | /.history/vision/tensorflow_object_detect/scripts/detect_lane_20210224132019.py | 517152289cafe1d8a511b7cb127705ebdbd59333 | [
"MIT"
] | permissive | robcn/Autopilot-Demo | 5c830a0f721d3e8df864c0fcb26e9ea280bbe3fe | 0b7178ae3f417f529d7015373a1e51eb71df28ab | refs/heads/master | 2023-03-16T00:20:31.498672 | 2021-02-24T13:34:06 | 2021-02-24T13:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,127 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import rospy
import numpy as np
import cv2
from cv_bridge import CvBridge
from std_msgs.msg import UInt8, Float64
from sensor_msgs.msg import Image, CompressedImage
import time
# from vision_msgs.msg import Center
class DetectLane():
def __init__(self):
self.sub_image_original = rospy.Subscriber('/camera/image', Image, self.cbFindLane, queue_size = 1)
self.pub_image_detect = rospy.Publisher('/detect/lane', Image, queue_size = 1)
self.pub_center_white_lane = rospy.Publisher('/control/white_lane', Float64, queue_size = 1)
self.pub_center_yellow_lane = rospy.Publisher('/control/yellow_lane', Float64, queue_size = 1)
self.pub_center = rospy.Publisher('/control/center', Float64, queue_size = 1)
self.cvBridge = CvBridge()
self.counter = 1
self.hue_white_l = 0
self.hue_white_h = 179
self.saturation_white_l = 0
self.saturation_white_h = 30
self.lightness_white_l = 221
self.lightness_white_h = 255
self.hue_yellow_l = 26
self.hue_yellow_h = 34
self.saturation_yellow_l = 43
self.saturation_yellow_h = 255
self.lightness_yellow_l = 46
self.lightness_yellow_h = 255
def cbFindLane(self, image_msg):
# Change the frame rate by yourself. Now, it is set to 1/3 (10fps).
# Unappropriate value of frame rate may cause huge delay on entire recognition process.
# This is up to your computer's operating power.
if self.counter % 3 != 0:
self.counter += 1
return
else:
self.counter = 1
cv_image = self.cvBridge.imgmsg_to_cv2(image_msg, "bgr8")
# find White and Yellow Lanes
self.maskLane(cv_image)
# yellow_fraction, cv_yellow_lane = self.maskYellowLane(cv_image)
def maskLane(self,image):
# convert image to hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# param of irange hsv(white & yellow)
Hue_white_h = self.hue_white_l
Hue_white_l = self.hue_white_h
Saturation_white_h = self.saturation_white_l
Saturation_white_l = self.saturation_white_h
Lightness_white_h = self.lightness_white_h
Lightness_white_l = self.lightness_white_l
Hue_yellow_h = self.hue_yellow_l
Hue_yellow_l = self.hue_yellow_h
Saturation_yellow_h = self.saturation_yellow_l
Saturation_yellow_l = self.saturation_yellow_h
Lightness_yellow_h = self.lightness_yellow_h
Lightness_yellow_l = self.lightness_yellow_l
# define range of white color in HSV
lower_white = np.array([Hue_white_h, Saturation_white_h, Lightness_white_l])
upper_white = np.array([Hue_white_l, Saturation_white_l, Lightness_white_h])
lower_yellow = np.array([Hue_yellow_h, Saturation_yellow_h, Lightness_yellow_l])
upper_yellow = np.array([Hue_yellow_l, Saturation_yellow_l, Lightness_yellow_h])
# Threshold the HSV image to get only white colors
mask_white = cv2.inRange(hsv, lower_white, upper_white)
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
kernel = np.ones((5,5))
erosion_white = cv2.erode(mask_white,kernel)
erosion_yellow = cv2.erode(mask_yellow,kernel)
Gaussian_white = cv2.GaussianBlur(erosion_white, (5,5),0)
Gaussian_yellow = cv2.GaussianBlur(erosion_yellow, (5,5),0)
# cv2.imshow("g",Gaussian_yellow)
# cv2.waitKey(3)
# findContours of image
contours_white, hierarchy_white = cv2.findContours(Gaussian_white, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours_yellow, hierarchy_yellow = cv2.findContours(Gaussian_yellow, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# draw the contours in origin image
cv2.drawContours(image, contours_white, -1, (139,104,0), 3)
cv2.drawContours(image, contours_yellow, -1, (139,104,0), 3)
# try:
white_center = self.calculate_average(contours_white[0])
# print("white: ",white_center)
# except:
# is_detect_white = 0
# print("The Camera Can`t Catch The White Lane.")
# try:
yellow_center = self.calculate_average(contours_yellow[0])
# print("yellow: ",yellow_center)
# except:
# is_detect_yellow = 0
# print("The Camera Can`t Catch The Yellow Lane.")
# Publish Image
self.pub_image_detect.publish(self.cvBridge.cv2_to_imgmsg(image,'bgr8'))
# Publish Center
self.pub_center_white_lane.publish(white_center)
self.pub_center_yellow_lane.publish(yellow_center)
self.pub_center.publish((white_center+yellow_center)/2)
def calculate_average(self,input):
sum_x = 0
for i in input:
sum_x += i[0][0]
return sum_x/len(input)
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('detect_lane')
node = DetectLane()
node.main() | [
"hemingshan_1999@163.com"
] | hemingshan_1999@163.com |
9196ebb5aec8baa5f60c3c5cf2fb5d3011f98fff | c16d4c86ef1f874b701c707b5a01556d36fa0d4f | /test/test_ipamsvc_create_option_code_response.py | dc4d9225dc29304f6595bb33cef120d70bfe5095 | [] | no_license | uuand/ibcsp_ipamsvc | 7f9f4c2509ce9049e2ab39d1072e9d1d31a5165c | 94a5475b11997551f732ceffcbd627d27ac37b2c | refs/heads/master | 2022-11-21T07:22:12.131660 | 2020-07-24T08:38:55 | 2020-07-24T08:38:55 | 281,871,334 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | # coding: utf-8
"""
IP Address Management API
The IPAM/DHCP Application is a BloxOne DDI service providing IP address management and DHCP protocol features. The IPAM component provides visibility into and provisioning tools to manage networking spaces, monitoring and reporting of entire IP address infrastructures, and integration with DNS and DHCP protocols. The DHCP component provides DHCP protocol configuration service with on-prem host serving DHCP protocol. It is part of the full-featured, DDI cloud solution that enables customers to deploy large numbers of protocol servers to deliver DNS and DHCP throughout their enterprise network. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import ibcsp_ipamsvc
from ibcsp_ipamsvc.models.ipamsvc_create_option_code_response import IpamsvcCreateOptionCodeResponse # noqa: E501
from ibcsp_ipamsvc.rest import ApiException
class TestIpamsvcCreateOptionCodeResponse(unittest.TestCase):
"""IpamsvcCreateOptionCodeResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIpamsvcCreateOptionCodeResponse(self):
"""Test IpamsvcCreateOptionCodeResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = ibcsp_ipamsvc.models.ipamsvc_create_option_code_response.IpamsvcCreateOptionCodeResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"jens-peter.wand@metrosystems.net"
] | jens-peter.wand@metrosystems.net |
1b51955dcd03e031fd0e85fcc4b87bdab823692d | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/siege_component.py | 73f51182224a9c1ece84e7e55c2e91739f7e0985 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,817 | py | # 2017.02.03 21:49:10 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/siege_component.py
import BigWorld
from gui.shared.utils.TimeInterval import TimeInterval
from constants import VEHICLE_SIEGE_STATE
class _ComponentUpdater(object):
"""
This is a base updater class, it contains the common logic for updating Siege indicator.
"""
__slots__ = ('_parentObj', '_totalTime', '_timeLeft', '_siegeState', '_engineState', '_isSmooth')
def __init__(self, parentObj, totalTime, timeLeft, siegeState, engineState, isSmooth):
"""
Constructor, initializes internal variables.
:param parentObj: reference on SiegeModeIndicator class
:param totalTime: time which is necessary to switch state (normal/siege)
:param timeLeft: time left to switch to a state
:param siegeState: integer, see constants for each constant
:param engineState: string, describing engine's state
"""
super(_ComponentUpdater, self).__init__()
self._parentObj = parentObj
self._totalTime = totalTime
self._timeLeft = timeLeft
self._siegeState = siegeState
self._engineState = engineState
self._isSmooth = isSmooth
def __repr__(self):
return '_UpdaterComponent(totalTime = {}, timeLeft = {}, siegeState = {}, engineState = {})'.format(self._totalTime, self._timeLeft, self._siegeState, self._engineState)
def clear(self):
self._stopTick()
self._parentObj = None
return
def show(self):
self._startTick()
def _startTick(self):
raise NotImplementedError
def _stopTick(self):
raise NotImplementedError
class _ActionScriptUpdater(_ComponentUpdater):
"""
This updater is used only in real battle (non-replays) for performance reasons.
It will tell Flash about times and states. Animations(and ticks) are implemented on Flash side.
"""
__slots__ = ()
def _startTick(self):
self._parentObj.as_switchSiegeStateS(self._totalTime, self._timeLeft, self._siegeState, self._engineState, self._isSmooth)
def _stopTick(self):
pass
class _PythonUpdater(_ComponentUpdater):
"""
This updater is used only in REPLAYS.
It will use internal timer to tick every 0.05 second.
This solution is necessary to display actual timeLeft, states, etc correctly
during replay's timeWarp, rewind, start/stop, etc.
"""
__slots__ = ('_timeInterval', '_startTime', '_finishTime', '__weakref__')
def __init__(self, parentObj, totalTime, timeLeft, siegeState, engineState, isSmooth):
super(_PythonUpdater, self).__init__(parentObj, totalTime, timeLeft, siegeState, engineState, isSmooth)
self._timeInterval = TimeInterval(0.05, self, '_tick')
self._startTime = BigWorld.serverTime()
self._finishTime = self._startTime + timeLeft
def clear(self):
self._timeInterval.stop()
super(_PythonUpdater, self).clear()
def _startTick(self):
if self._siegeState in VEHICLE_SIEGE_STATE.SWITCHING:
timeLeft = max(0, self._finishTime - BigWorld.serverTime())
if timeLeft:
self._updateSnapshot(timeLeft)
self._timeInterval.start()
else:
self._updateSnapshot(self._timeLeft)
self._isSmooth = False
def _stopTick(self):
self._timeInterval.stop()
def _tick(self):
timeLeft = self._finishTime - BigWorld.serverTime()
if timeLeft >= 0 and self._engineState != 'destroyed':
self._updateSnapshot(timeLeft)
def _updateSnapshot(self, timeLeft):
self._parentObj.as_switchSiegeStateSnapshotS(self._totalTime, timeLeft, self._siegeState, self._engineState, self._isSmooth)
class _SiegeComponent(object):
"""
This class maintains a componentUpdater class. It creates and shows an updater after any changes
"""
__slots__ = ('_componentUpdater', '_parentObj', '_clazz')
def __init__(self, parentObj, clazz):
super(_SiegeComponent, self).__init__()
self._componentUpdater = None
self._parentObj = parentObj
self._clazz = clazz
return
def invalidate(self, totalTime, timeLeft, siegeState, engineState, isSmooth):
self._clearUpdater()
self._componentUpdater = self._clazz(self._parentObj, totalTime, timeLeft, siegeState, engineState, isSmooth)
self._componentUpdater.show()
def clear(self):
self._parentObj = None
self._clearUpdater()
return
def _clearUpdater(self):
if self._componentUpdater is not None:
self._componentUpdater.clear()
return
class _DefaultSiegeComponent(_SiegeComponent):
"""
The component is used in real battles, it will use _ActionScriptUpdater.
"""
__slots__ = ()
def __init__(self, parentObj):
super(_DefaultSiegeComponent, self).__init__(parentObj, _ActionScriptUpdater)
class _ReplaySiegeComponent(_SiegeComponent):
"""
The component is used in Replays, it will use _PythonUpdater.
"""
__slots__ = ()
def __init__(self, parentObj):
super(_ReplaySiegeComponent, self).__init__(parentObj, _PythonUpdater)
def createSiegeComponent(siegeModeIndicator, isReplayPlaying):
if isReplayPlaying:
component = _ReplaySiegeComponent(siegeModeIndicator)
else:
component = _DefaultSiegeComponent(siegeModeIndicator)
return component
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\siege_component.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:49:11 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
3a2072c419b039df9feecca52bca18f6bb7d8ad4 | c81d0270a4e924357735dd955b9cd463c5fa87e7 | /note/migrations/0009_notes_title_salt.py | 9c3192de173d8d6a48c40bbf2396b30beca46347 | [
"MIT"
] | permissive | pyprism/Hiren-Notes | 25210384ee8ef777f1e05bb6e32a1bde2d6c4f31 | 7548ef8927cdac4342ab54e72cf949fd484342be | refs/heads/master | 2021-06-05T04:17:24.229239 | 2019-04-12T16:17:22 | 2019-04-12T16:17:22 | 19,016,855 | 1 | 0 | MIT | 2021-03-19T21:58:00 | 2014-04-22T04:16:53 | JavaScript | UTF-8 | Python | false | false | 409 | py | # Generated by Django 2.0.2 on 2018-03-02 10:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('note', '0008_auto_20180302_1025'),
]
operations = [
migrations.AddField(
model_name='notes',
name='title_salt',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| [
"git.pyprism@gmail.com"
] | git.pyprism@gmail.com |
14f22c74f6937fb2d3e1b639b5efb8a0673e5765 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_SELU_1.py | a2c481e2d501436f0c628320c52c9085d53004f5 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 602 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_SELU_1():
"""test SELU_1"""
jit_case = JitTrans(case=yml.get_case_info("SELU_1"))
jit_case.jit_run()
| [
"825276847@qq.com"
] | 825276847@qq.com |
8d4da3d22d15ab100ea55a5fc8e0ff016faad178 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Windows Screen Grabber.py | ad25866ec1abc82b1fb1290512d123242b04df15 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d42d1123aca40cb02062035961a92fbd679ac421ced273e1fe467c16558b125b
size 1194
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
3a1984fc6224def23eb696ddb9f5dad7ba49d56a | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D09B/RPCALLD09BUN.py | 1841fa1a0ab031e47d5def4db3d66a6b4b480a92 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,502 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD09BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99},
]},
{ID: 'NAD', MIN: 0, MAX: 999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CTA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'DOC', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'LIN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 99},
{ID: 'IMD', MIN: 0, MAX: 99},
{ID: 'DTM', MIN: 0, MAX: 99},
{ID: 'ALI', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 999, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'CCI', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'CAV', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 99},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
3314d516df6394caddba78195cce8da8b6f91c13 | 69f1529b5b4ef46f0b5a963667b9c00fd484487b | /docassemble_webapp/docassemble/webapp/restart.py | 0a665feeb0fba0085fc5bc2769f202a86083772f | [
"MIT"
] | permissive | vfulco/docassemble | 571e9e0e5140cc8c667cb51d4308744107b1c214 | 482aae3217dd80e018a670588142cf694c300fc9 | refs/heads/master | 2021-01-11T04:56:25.263813 | 2016-12-15T03:58:15 | 2016-12-15T03:58:15 | 76,629,690 | 1 | 0 | null | 2016-12-16T06:53:07 | 2016-12-16T06:53:07 | null | UTF-8 | Python | false | false | 800 | py | import sys
import os
import docassemble.base.config
from docassemble.base.config import daconfig, s3_config, S3_ENABLED
if __name__ == "__main__":
docassemble.base.config.load(arguments=sys.argv)
WEBAPP_PATH = daconfig.get('webapp', '/usr/share/docassemble/webapp/docassemble.wsgi')
def main():
if S3_ENABLED:
import docassemble.webapp.amazon
s3 = docassemble.webapp.amazon.s3object(s3_config)
key = s3.get_key('config.yml')
if key.exists():
key.get_contents_to_filename(config_file)
sys.stderr.write("Wrote config file based on copy on s3\n")
wsgi_file = WEBAPP_PATH
if os.path.isfile(wsgi_file):
with open(wsgi_file, 'a'):
os.utime(wsgi_file, None)
sys.exit(0)
if __name__ == "__main__":
main()
| [
"jpyle@philalegal.org"
] | jpyle@philalegal.org |
dd97068117e43a24fd426e68dc7383d79d6320fa | 7945fefc483129a486e0e856a478979cfc26d011 | /hcp/azure/task_dki_hcp_variability.py | 6129df7817e41a20ee436966241d04e234a62c3f | [] | no_license | dipy/dipy-dki-paper | dfc6f0e662f1e845d17c3929591203454f3d3343 | af6a65d8880cf589ccb069d6b1b7ce9bcc437d71 | refs/heads/master | 2021-07-01T15:08:54.805406 | 2021-06-01T20:31:31 | 2021-06-01T20:31:31 | 236,548,899 | 2 | 1 | null | 2021-06-01T20:30:40 | 2020-01-27T17:26:11 | Jupyter Notebook | UTF-8 | Python | false | false | 12,437 | py |
import argparse
import logging
from AFQ.data import fetch_hcp
import s3fs
import nibabel as nib
from dipy.core.gradients import gradient_table
import os.path as op
import AFQ.data as afd
from dipy.reconst import dti, dki
import time
import numpy as np
def hcp_dki(subject, aws_access_key, aws_secret_key, hcp_aws_access_key,
hcp_aws_secret_key, outbucket):
fs = s3fs.S3FileSystem(key=aws_access_key, secret=aws_secret_key)
remote_dti1000_path =\
"%s/derivatives/dti1000" % (outbucket)
remote_dti1000_2000_path =\
"%s/derivatives/dti1000_2000" % (outbucket)
remote_dki1000_2000_path =\
"%s/derivatives/dki1000_2000" % (outbucket)
remote_dki2000_3000_path =\
"%s/derivatives/dki2000_3000" % (outbucket)
remote_dki1000_3000_path =\
"%s/derivatives/dki1000_3000" % (outbucket)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__) # noqa
log.info(f"Getting data for subject {subject}")
# get HCP data for the given subject / session
_, hcp_bids = fetch_hcp(
[subject],
profile_name=False,
aws_access_key_id=hcp_aws_access_key,
aws_secret_access_key=hcp_aws_secret_key)
dwi_path = op.join(afd.afq_home, 'HCP_1200', 'derivatives', 'dmriprep',
f'sub-{subject}', 'ses-01', 'dwi')
dwi_img = nib.load(op.join(dwi_path, f'sub-{subject}_dwi.nii.gz'))
dwi_data = dwi_img.get_fdata()
b0_threshold = 50
gtab = gradient_table(
op.join(dwi_path, f'sub-{subject}_dwi.bval'),
op.join(dwi_path, f'sub-{subject}_dwi.bvec'),
b0_threshold=b0_threshold)
### DTI 1000
last_result = op.join(
remote_dti1000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_MD.nii.gz')
if not fs.exists(last_result):
lpath = "dti1000_params.nii.gz"
rpath = op.join(remote_dti1000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_diffmodel.nii.gz')
dwi1000 = dwi_data[..., gtab.bvals < 1100]
gtab1000 = gradient_table(gtab.bvals[gtab.bvals < 1100],
gtab.bvecs[gtab.bvals < 1100])
if not fs.exists(rpath):
log.info("Fitting DTI")
t1 = time.time()
dtim = dti.TensorModel(gtab1000)
dtif = dtim.fit(dwi1000, mask=np.ones(dwi_data.shape[:3]))
nib.save(nib.Nifti1Image(dtif.model_params, dwi_img.affine), lpath)
fs.upload(lpath, rpath)
log.info(f"That took {time.time() - t1} seconds")
else:
log.info("Looks like I've already fit DTI with b=1000")
log.info("Downloading DTI params from S3")
fs.download(rpath, lpath)
dtim = dti.TensorModel(gtab1000)
dti_params = nib.load(lpath).get_fdata()
dtif = dti.TensorFit(dtim, dti_params)
lpath = "dti1000_fa.nii.gz"
nib.save(nib.Nifti1Image(dtif.fa, dwi_img.affine), lpath)
rpath = op.join(remote_dti1000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_FA.nii.gz')
fs.upload(lpath, rpath)
lpath = "dti1000_md.nii.gz"
nib.save(nib.Nifti1Image(dtif.md, dwi_img.affine), lpath)
rpath = op.join(remote_dti1000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_MD.nii.gz')
fs.upload(lpath, rpath)
### DTI 1000 + 2000
last_result = op.join(
remote_dti1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_MD.nii.gz')
if not fs.exists(last_result):
lpath = "dti1000_2000_params.nii.gz"
rpath = op.join(
remote_dti1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_diffmodel.nii.gz')
dwi1000_2000 = dwi_data[..., gtab.bvals < 2100]
gtab1000_2000 = gradient_table(
gtab.bvals[gtab.bvals < 2100],
gtab.bvecs[gtab.bvals < 2100])
if not fs.exists(rpath):
log.info("Fitting DTI with b=1000 and 2000")
t1 = time.time()
dtim = dti.TensorModel(gtab1000_2000)
dtif = dtim.fit(dwi1000_2000, mask=np.ones(dwi_data.shape[:3]))
nib.save(nib.Nifti1Image(dtif.model_params, dwi_img.affine), lpath)
fs.upload(lpath, rpath)
log.info(f"That took {time.time() - t1} seconds")
else:
log.info("Looks like I've already fit DTI with b=1000 and b=2000")
log.info("Downloading DTI params from S3")
fs.download(rpath, lpath)
dtim = dti.TensorModel(gtab1000_2000)
dti_params = nib.load(lpath).get_fdata()
dtif = dti.TensorFit(dtim, dti_params)
lpath = "dti1000_2000_fa.nii.gz"
nib.save(nib.Nifti1Image(dtif.fa, dwi_img.affine), lpath)
rpath = op.join(
remote_dti1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_FA.nii.gz')
fs.upload(lpath, rpath)
lpath = "dti1000_2000_md.nii.gz"
nib.save(nib.Nifti1Image(dtif.md, dwi_img.affine), lpath)
rpath = op.join(
remote_dti1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DTI_MD.nii.gz')
fs.upload(lpath, rpath)
### DKI 1000 + 2000
last_result = op.join(
remote_dki1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
if not fs.exists(last_result):
lpath = "dki1000_2000_params.nii.gz"
rpath = op.join(
remote_dki1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_diffmodel.nii.gz')
dwi1000_2000 = dwi_data[..., gtab.bvals < 2100]
gtab1000_2000 = gradient_table(gtab.bvals[gtab.bvals < 2100],
gtab.bvecs[gtab.bvals < 2100])
if not fs.exists(rpath):
log.info("Fitting DKI with b=1000 + 2000")
t1 = time.time()
dkim = dki.DiffusionKurtosisModel(gtab1000_2000)
dkif = dkim.fit(dwi1000_2000, mask=np.ones(dwi_data.shape[:3]))
nib.save(nib.Nifti1Image(dkif.model_params, dwi_img.affine), lpath)
fs.upload(lpath, rpath)
log.info(f"That took {time.time() - t1} seconds")
else:
log.info("Looks like I've already fit DKI with b=1000 and b=2000")
log.info("Downloading DKI params from S3")
fs.download(rpath, lpath)
dkim = dki.DiffusionKurtosisModel(gtab1000_2000)
dki_params = nib.load(lpath).get_fdata()
dkif = dki.DiffusionKurtosisFit(dkim, dki_params)
lpath = "dki1000_2000_fa.nii.gz"
nib.save(nib.Nifti1Image(dkif.fa, dwi_img.affine), lpath)
rpath = op.join(
remote_dki1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_FA.nii.gz')
fs.upload(lpath, rpath)
lpath = "dki1000_2000_md.nii.gz"
nib.save(nib.Nifti1Image(dkif.md, dwi_img.affine), lpath)
rpath = op.join(
remote_dki1000_2000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
fs.upload(lpath, rpath)
### DKI 2000 + 3000
last_result = op.join(
remote_dki2000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
if not fs.exists(last_result):
lpath = "dki2000_3000_params.nii.gz"
rpath = op.join(
remote_dki2000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_diffmodel.nii.gz')
dwi2000_3000 = dwi_data[..., (gtab.bvals > 1985) | (gtab.bvals < 50)]
gtab2000_3000 = gradient_table(
gtab.bvals[(gtab.bvals > 1985) | (gtab.bvals < 50)],
gtab.bvecs[(gtab.bvals > 1985) | (gtab.bvals < 50)])
if not fs.exists(rpath):
log.info("Fitting DKI with b=2000 + 3000")
t1 = time.time()
dkim = dki.DiffusionKurtosisModel(gtab2000_3000)
dkif = dkim.fit(dwi2000_3000, mask=np.ones(dwi_data.shape[:3]))
nib.save(nib.Nifti1Image(dkif.model_params, dwi_img.affine), lpath)
fs.upload(lpath, rpath)
log.info(f"That took {time.time() - t1} seconds")
else:
log.info("Looks like I've already fit DKI with b=2000 and b=3000")
log.info("Downloading DKI params from S3")
fs.download(rpath, lpath)
dkim = dki.DiffusionKurtosisModel(gtab2000_3000)
dki_params = nib.load(lpath).get_fdata()
dkif = dki.DiffusionKurtosisFit(dkim, dki_params)
lpath = "dki2000_3000_fa.nii.gz"
nib.save(nib.Nifti1Image(dkif.fa, dwi_img.affine), lpath)
rpath = op.join(
remote_dki2000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_FA.nii.gz')
fs.upload(lpath, rpath)
lpath = "dki2000_3000_md.nii.gz"
nib.save(nib.Nifti1Image(dkif.md, dwi_img.affine), lpath)
rpath = op.join(
remote_dki2000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
fs.upload(lpath, rpath)
### DKI 1000 + 3000
last_result = op.join(
remote_dki1000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
if not fs.exists(last_result):
lpath = "dki1000_3000_params.nii.gz"
rpath = op.join(
remote_dki1000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_diffmodel.nii.gz')
dwi1000_3000 = dwi_data[..., (gtab.bvals > 2500) | (gtab.bvals < 1500)]
gtab1000_3000 = gradient_table(
gtab.bvals[(gtab.bvals > 2500) | (gtab.bvals < 1500)],
gtab.bvecs[(gtab.bvals > 2500) | (gtab.bvals < 1500)])
if not fs.exists(rpath):
log.info("Fitting DKI with b=1000 + 3000")
t1 = time.time()
dkim = dki.DiffusionKurtosisModel(gtab1000_3000)
dkif = dkim.fit(dwi1000_3000, mask=np.ones(dwi_data.shape[:3]))
nib.save(nib.Nifti1Image(dkif.model_params, dwi_img.affine), lpath)
fs.upload(lpath, rpath)
log.info(f"That took {time.time() - t1} seconds")
else:
log.info("Looks like I've already fit DKI with b=1000 and b=3000")
log.info("Downloading DKI params from S3")
fs.download(rpath, lpath)
dkim = dki.DiffusionKurtosisModel(gtab1000_3000)
dki_params = nib.load(lpath).get_fdata()
dkif = dki.DiffusionKurtosisFit(dkim, dki_params)
lpath = "dki1000_3000_fa.nii.gz"
nib.save(nib.Nifti1Image(dkif.fa, dwi_img.affine), lpath)
rpath = op.join(
remote_dki1000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_FA.nii.gz')
fs.upload(lpath, rpath)
lpath = "dki1000_3000_md.nii.gz"
nib.save(nib.Nifti1Image(dkif.md, dwi_img.affine), lpath)
rpath = op.join(
remote_dki1000_3000_path, f'sub-{subject}', 'ses-01', 'dwi',
f'sub-{subject}_dwi_model-DKI_MD.nii.gz')
fs.upload(lpath, rpath)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--subject', type=int, required=True,
help='subject ID in the HCP dataset')
parser.add_argument('--ak', type=str, required=True,
help='AWS Access Key')
parser.add_argument('--sk', type=str, required=True,
help='AWS Secret Key')
parser.add_argument('--hcpak', type=str, required=True,
help='AWS Access Key for HCP dataset')
parser.add_argument('--hcpsk', type=str, required=True,
help='AWS Secret Key for HCP dataset')
parser.add_argument('--outbucket', type=str, required=True,
help='Where do I put the outputs')
args = parser.parse_args()
hcp_dki(args.subject,
args.ak, args.sk,
args.hcpak, args.hcpsk,
args.outbucket)
| [
"arokem@gmail.com"
] | arokem@gmail.com |
e9adcfea412be35def881f9ad89b9d1f617a9db5 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-media/azure/mgmt/media/models/image.py | 494ef17110669de589fc08332b0dc0c445d6da6d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 3,591 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .video import Video
class Image(Video):
"""Describes the basic properties for generating thumbnails from the input
video.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JpgImage, PngImage
All required parameters must be populated in order to send to Azure.
:param label: An optional label for the codec. The label can be used to
control muxing behavior.
:type label: str
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param key_frame_interval: The distance between two key frames, thereby
defining a group of pictures (GOP). The value should be a non-zero integer
in the range [1, 30] seconds, specified in ISO 8601 format. The default is
2 seconds (PT2S).
:type key_frame_interval: timedelta
:param stretch_mode: The resizing mode - how the input video will be
resized to fit the desired output resolution(s). Default is AutoSize.
Possible values include: 'None', 'AutoSize', 'AutoFit'
:type stretch_mode: str or ~azure.mgmt.media.models.StretchMode
:param start: Required. The position in the input video from where to
start generating thumbnails. The value can be in absolute timestamp (ISO
8601, e.g: PT05S), or a frame count (For example, 10 for the 10th frame),
or a relative value (For example, 1%). Also supports a macro {Best}, which
tells the encoder to select the best thumbnail from the first few seconds
of the video.
:type start: str
:param step: The intervals at which thumbnails are generated. The value
can be in absolute timestamp (ISO 8601, e.g: PT05S for one image every 5
seconds), or a frame count (For example, 30 for every 30 frames), or a
relative value (For example, 1%).
:type step: str
:param range: The position in the input video at which to stop generating
thumbnails. The value can be in absolute timestamp (ISO 8601, e.g: PT5M30S
to stop at 5 minutes and 30 seconds), or a frame count (For example, 300
to stop at the 300th frame), or a relative value (For example, 100%).
:type range: str
"""
_validation = {
'odatatype': {'required': True},
'start': {'required': True},
}
_attribute_map = {
'label': {'key': 'label', 'type': 'str'},
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'},
'stretch_mode': {'key': 'stretchMode', 'type': 'str'},
'start': {'key': 'start', 'type': 'str'},
'step': {'key': 'step', 'type': 'str'},
'range': {'key': 'range', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'#Microsoft.Media.JpgImage': 'JpgImage', '#Microsoft.Media.PngImage': 'PngImage'}
}
def __init__(self, **kwargs):
super(Image, self).__init__(**kwargs)
self.start = kwargs.get('start', None)
self.step = kwargs.get('step', None)
self.range = kwargs.get('range', None)
self.odatatype = '#Microsoft.Media.Image'
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
6aad7ad5c4ed7a14ea13239ee6e47bfdae400035 | a2da53c90ed7876c8e790b525a66818df6fc57ac | /main.py | 0ace4937af7ab9dd166d73930f96c69dc2312714 | [] | no_license | Rishi05051997/flask-learning | 103a82378e37370a78c01c2e8a95f4032b13b0f5 | 4109fe641283bf6c86178ece72a1444580b3c101 | refs/heads/main | 2023-06-18T14:27:20.940448 | 2021-07-20T10:42:04 | 2021-07-20T10:42:04 | 387,658,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from flask import Flask, render_template
app = Flask(__name__, template_folder='templates')
@app.route("/")
def home():
return render_template('index.html')
@app.route("/about")
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/post')
def post():
return render_template('post.html')
app.run(debug=True) | [
"vrushabhdhatral10@gmail.com"
] | vrushabhdhatral10@gmail.com |
3ad7f1f5cd34f8b6e9246a83cf38e08f43df17c2 | e6fac8e0289d9f82369d2eb8e22bc175c6f51b3b | /Arcade/Intro/Level 9/Knapsack Light/code.py | ab01c71d4bb8495d7fb0d3ce190f55db1992bf6f | [] | no_license | Zahidsqldba07/CodeFights-9 | f361c15d24f96afa26de08af273a7f8f507ced4a | 6c5d152b1ad35cf178dd74acbc44ceb5fdcdf139 | refs/heads/master | 2023-03-18T23:52:43.274786 | 2017-05-12T07:28:08 | 2017-05-12T07:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Uh... these problems are kind of trivial.
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
if weight1 > maxW or weight2 > maxW:
if weight1 > maxW and weight2 > maxW:
return 0
if weight1 > maxW:
return value2
if weight2 > maxW:
return value1
return value1 if value1 > value2 else value2
| [
"hallosputnik@gmail.com"
] | hallosputnik@gmail.com |
e3c83cbf552b41004b3b9b6a6caa62486ed99d3b | 83fd7f0b557b40d0c00bdcf33f8a4d5a39fba919 | /modules/Tkinter/tkinter_examples.py | 6f983ba3bf6d58018eac6f08d211c987b334c0ae | [] | no_license | stradtkt/Python-Examples | 2ad6eef5e651803976ae05ce92a0e98944e10d13 | 6e700a23e65ba37d61ef7f3ff3da5feb1da3b33f | refs/heads/master | 2020-03-09T22:22:31.437381 | 2018-04-14T03:56:49 | 2018-04-14T03:56:49 | 129,032,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | try:
import tkinter
except ImportError:
import Tkinter as tkinter
# tkinter._test()
# mainWindow = tkinter.Tk()
# mainWindow.title("Hello world")
# mainWindow.geometry('640x480+8+400')
# label = tkinter.Label(mainWindow, text="Hello World")
# label.pack(side='top')
# leftFrame = tkinter.Frame(mainWindow)
# leftFrame.pack(side='left', anchor='n', fill=tkinter.Y, expand=False)
# canvas = tkinter.Canvas(leftFrame, relief='raised', borderwidth=3)
# canvas.pack(side='left', anchor='n', fill=tkinter.BOTH, expand=True)
# rightFrame = tkinter.Frame(mainWindow)
# rightFrame.pack(side='right', anchor='n', expand=True)
# button1 = tkinter.Button(rightFrame, text="Open")
# button2 = tkinter.Button(rightFrame, text="Edit")
# button3 = tkinter.Button(rightFrame, text="Close")
# button1.pack(side='left')
# button2.pack(side='left')
# button3.pack(side='left')
# mainWindow.mainloop()
mainWindow = tkinter.Tk()
mainWindow.title("Hello world")
mainWindow.geometry('640x480-8-200')
label = tkinter.Label(mainWindow, text="Hello World")
label.grid(row=0, column=0)
leftFrame = tkinter.Frame(mainWindow)
leftFrame.grid(row=1, column=1)
canvas = tkinter.Canvas(leftFrame, relief='raised', borderwidth=1)
canvas.grid(row=1, column=0)
rightFrame = tkinter.Frame(mainWindow)
rightFrame.grid(row=1, column=2, sticky='n')
button1 = tkinter.Button(rightFrame, text="Open")
button2 = tkinter.Button(rightFrame, text="Edit")
button3 = tkinter.Button(rightFrame, text="Close")
button1.grid(row=0, column=0)
button2.grid(row=1, column=0)
button3.grid(row=2, column=0)
mainWindow.columnconfigure(0, weight=1)
mainWindow.columnconfigure(1, weight=1)
mainWindow.grid_columnconfigure(2, weight=1)
leftFrame.config(relief='sunken', borderwidth=1)
rightFrame.config(relief='sunken', borderwidth=1)
leftFrame.grid(sticky='ns')
rightFrame.grid(sticky='new')
rightFrame.columnconfigure(0, weight=1)
button1.grid(sticky='ew')
button2.grid(sticky='ew')
button3.grid(sticky='ew')
mainWindow.mainloop() | [
"stradtkt22@gmail.com"
] | stradtkt22@gmail.com |
60d249fc4269358356d06893e79b8dffc1366916 | 81d635211686b1bc87af5892bd9e0fb95cc2ddb8 | /adwords api/googleads-python-lib-master/examples/dfp/v201502/custom_targeting_service/update_custom_targeting_keys.py | 365f935de4fca4a2f72a434162745dcabdd01844 | [
"Apache-2.0"
] | permissive | analyticsbot/Python-Code---Part-2 | de2f0581258b6c8b8808b4ef2884fe7e323876f0 | 12bdcfdef4472bcedc77ae61707c25a4a09cba8a | refs/heads/master | 2021-06-04T05:10:33.185766 | 2016-08-31T13:45:45 | 2016-08-31T13:45:45 | 66,679,512 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of a single custom targeting key.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
# Import appropriate modules from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201502')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE id = :keyId'
statement = dfp.FilterStatement(query, values, 1)
# Get custom targeting keys by statement.
response = custom_targeting_service.getCustomTargetingKeysByStatement(
statement.ToStatement())
# Update each local custom targeting key object by changing its display name.
if 'results' in response:
updated_keys = []
for key in response['results']:
if not key['displayName']:
key['displayName'] = key['name']
key['displayName'] += ' (Deprecated)'
updated_keys.append(key)
keys = custom_targeting_service.updateCustomTargetingKeys(updated_keys)
# Display results.
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was updated.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No custom targeting keys were found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
| [
"ravi.shankar1788@gmail.com"
] | ravi.shankar1788@gmail.com |
4ecbb5094d9e12a552d5a5feb96339bc971ade3b | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/minWindow_20200618151102.py | c6c0994a2195ff78f8088916365811c2e25eeb39 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | # for this question we have two pointers
# left pointer
# right pointer
# we move the right pointer and maintain the position of the left pointer
# when we find the word we move the left pointer
# store that word its shortest form
# we keep moving the right pointer
# sz,azjskfzts
def minWindow(str1,str2):
left = 0
right = 0
word = set(str1)
print(word)
while left < len(str2) and right < len(str2):
# print(str2[left:right])
if str2[left:right].find(str1):
newWord = str2[left:right]
print(newWord)
print("yes")
left +=1
else:
print(,str2[left:right])
right +=1
minWindow("sz","azjskfzts")
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
02bc1425fec78e9b004e5d15542a8180ac5f69b1 | 4e6fbaabd7d8e2504c9ac75dda322327f5514fb1 | /rooms/views.py | c2a54f88f2101396ae080809a5d610c300223da4 | [] | no_license | amrebrahem22/lavish | bd82d0131a6923f1749b53b7bd5f54c2ebc08be0 | d3a9d492f93f441aafeb2082f2d627c9d4f16769 | refs/heads/master | 2023-01-09T03:00:54.881316 | 2020-11-16T23:04:21 | 2020-11-16T23:04:21 | 300,782,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,112 | py | from django.views.generic import ListView, DetailView, View, UpdateView, FormView
from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import Http404
from django.contrib.messages.views import SuccessMessageMixin
from django_countries import countries
from .models import Room, RoomType, Amenity, Facility, Photo
from . import forms
from users import mixins as user_mixins
class HomeView(ListView):
model = Room
paginate_by = 10
paginate_orphans = 5
ordering = 'created'
context_object_name = 'rooms'
template_name = 'rooms/home.html'
class RoomDetail(DetailView):
model = Room
context_object_name = 'room'
template_name = 'rooms/room_detail.html'
class SearchView(View):
""" SearchView Definition """
def get(self, request):
country = request.GET.get("country")
if country:
form = forms.SearchForm(request.GET)
if form.is_valid():
city = form.cleaned_data.get("city")
country = form.cleaned_data.get("country")
room_type = form.cleaned_data.get("room_type")
price = form.cleaned_data.get("price")
guests = form.cleaned_data.get("guests")
bedrooms = form.cleaned_data.get("bedrooms")
beds = form.cleaned_data.get("beds")
baths = form.cleaned_data.get("baths")
instant_book = form.cleaned_data.get("instant_book")
superhost = form.cleaned_data.get("superhost")
amenities = form.cleaned_data.get("amenities")
facilities = form.cleaned_data.get("facilities")
filter_args = {}
if city != "Anywhere":
filter_args["city__startswith"] = city
filter_args["country"] = country
if room_type is not None:
filter_args["room_type"] = room_type
if price is not None:
filter_args["price__lte"] = price
if guests is not None:
filter_args["guests__gte"] = guests
if bedrooms is not None:
filter_args["bedrooms__gte"] = bedrooms
if beds is not None:
filter_args["beds__gte"] = beds
if baths is not None:
filter_args["baths__gte"] = baths
if instant_book is True:
filter_args["instant_book"] = True
if superhost is True:
filter_args["host__superhost"] = True
for amenity in amenities:
filter_args["amenities"] = amenity
for facility in facilities:
filter_args["facilities"] = facility
qs = Room.objects.filter(**filter_args).order_by("-created")
paginator = Paginator(qs, 10, orphans=5)
page = request.GET.get("page", 1)
rooms = paginator.get_page(page)
return render(
request, "rooms/search.html", {"form": form, "rooms": rooms}
)
else:
form = forms.SearchForm()
return render(request, "rooms/search.html", {"form": form})
class EditRoomView(user_mixins.LoggedInOnlyView,UpdateView):
model = Room
template_name = "rooms/room_edit.html"
fields = (
"name",
"description",
"country",
"city",
"price",
"address",
"guests",
"beds",
"bedrooms",
"baths",
"check_in",
"check_out",
"instant_book",
"room_type",
"amenities",
"facilities",
"house_rules",
)
def get_object(self, queryset=None):
room = super().get_object(queryset=queryset)
if room.host.pk != self.request.user.pk:
raise Http404()
return room
class RoomPhotosView(user_mixins.LoggedInOnlyView, DetailView):
model = Room
template_name = "rooms/room_photos.html"
def get_object(self, queryset=None):
room = super().get_object(queryset=queryset)
if room.host.pk != self.request.user.pk:
raise Http404()
return room
@login_required
def delete_photo(request, room_pk, photo_pk):
user = request.user
try:
room = Room.objects.get(pk=room_pk)
if room.host.pk != user.pk:
messages.error(request, "Cant delete that photo")
else:
Photo.objects.filter(pk=photo_pk).delete()
messages.success(request, "Photo Deleted")
return redirect(reverse("rooms:photos", kwargs={"pk": room_pk}))
except Room.DoesNotExist:
return redirect(reverse("core:home"))
class EditPhotoView(user_mixins.LoggedInOnlyView, SuccessMessageMixin, UpdateView):
model = models.Photo
template_name = "rooms/photo_edit.html"
pk_url_kwarg = "photo_pk"
success_message = "Photo Updated"
fields = ("caption",)
def get_success_url(self):
room_pk = self.kwargs.get("room_pk")
return reverse("rooms:photos", kwargs={"pk": room_pk})
class AddPhotoView(user_mixins.LoggedInOnlyView, FormView):
template_name = "rooms/photo_create.html"
form_class = forms.CreatePhotoForm
def form_valid(self, form):
pk = self.kwargs.get("pk")
form.save(pk)
messages.success(self.request, "Photo Uploaded")
return redirect(reverse("rooms:photos", kwargs={"pk": pk}))
class CreateRoomView(user_mixins.LoggedInOnlyView, FormView):
form_class = forms.CreateRoomForm
template_name = "rooms/room_create.html"
def form_valid(self, form):
room = form.save()
room.host = self.request.user
room.save()
form.save_m2m()
messages.success(self.request, "Room Uploaded")
return redirect(reverse("rooms:detail", kwargs={"pk": room.pk})) | [
"amrebrahem226@gmail.com"
] | amrebrahem226@gmail.com |
c60efcd71d1eaff4867f2a16f1e709d21fde306d | 2827d7a837eb29c3cb07793ab6d3d5a753e18669 | /alipay/aop/api/request/AlipayOpenPublicLabelUserQueryRequest.py | d7190c5a4e866c01cc8ea08ddd9e55c82f9a8d1f | [
"Apache-2.0"
] | permissive | shaobenbin/alipay-sdk-python | 22e809b8f5096bec57d2bb25414f64bdc87fa8b3 | 5232ad74dff2e8a6e0e7646ab3318feefa07a37d | refs/heads/master | 2020-03-21T04:51:39.935692 | 2018-06-21T07:03:31 | 2018-06-21T07:03:31 | 138,131,022 | 0 | 0 | null | 2018-06-21T06:50:24 | 2018-06-21T06:50:24 | null | UTF-8 | Python | false | false | 3,980 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenPublicLabelUserQueryModel import AlipayOpenPublicLabelUserQueryModel
class AlipayOpenPublicLabelUserQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenPublicLabelUserQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenPublicLabelUserQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.public.label.user.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1137b8d72fb9f4c1d45625d4c93ff119b780a6cf | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleHub/hub_all_func/all_module/all_hrnet48_imagenet_ssld.py | 4e18dac3eaa0bf62f908389b457657a2fc4cccee | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 520 | py | """hrnet48_imagenet_ssld"""
import os
import paddle
import paddlehub as hub
if paddle.is_compiled_with_cuda():
paddle.set_device("gpu")
use_gpu = True
else:
paddle.set_device("cpu")
use_gpu = False
def test_hrnet48_imagenet_ssld_predict():
"""hrnet48_imagenet_ssld predict"""
os.system("hub install hrnet48_imagenet_ssld")
model = hub.Module(name="hrnet48_imagenet_ssld")
result = model.predict(["doc_img.jpeg"])
print(result)
os.system("hub uninstall hrnet48_imagenet_ssld")
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
7120ce421c25f5d5078df1119fe035cdf4506131 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /The Complete Data Structures and Algorithms Course in Python/src/Section 18 Cracking Linked List Interview Questions/Q1_RemoveDups.py | 179d364fc062a36387c167b262f78109862dcb5d | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,143 | py | # Created by Elshad Karimov on 17/05/2020.
# Copyright © 2020 AppMillers. All rights reserved.
# Question 1 - Remove Dups : Write a code to remove duplicates from an unsorted linked list.
from LinkedList import LinkedList
def removeDups(ll):
if ll.head is None:
return
else:
currentNode = ll.head
visited = set([currentNode.value])
while currentNode.next:
if currentNode.next.value in visited:
currentNode.next = currentNode.next.next
else:
visited.add(currentNode.next.value)
currentNode = currentNode.next
return ll
def removeDups1(ll):
if ll.head is None:
return
currentNode = ll.head
while currentNode:
runner = currentNode
while runner.next:
if runner.next.value == currentNode.value:
runner.next = runner.next.next
else:
runner = runner.next
currentNode = currentNode.next
return ll.head
customLL = LinkedList()
customLL.generate(10, 0, 99)
print(customLL)
removeDups1(customLL)
print(customLL) | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b4ae7da39f3c9febb2d2ccb2a33598d86c4aa524 | 2eeeefe48c56d0dfae4fd568dbaee3c8d2cf3463 | /0Demo/RqDemo/tests/test_worker.py | bae0f6fbdb0addf77fb0f6f45319bc0b693d4259 | [] | no_license | lijianmingCN/pybase | f6377f7944c043f7241452fcffccc3f49ef0cef9 | 7286a022ff7f40a7289cf69d73e8418a1ecf7b88 | refs/heads/master | 2021-01-02T08:10:42.215672 | 2017-08-01T03:16:29 | 2017-08-01T03:16:29 | 98,953,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,835 | py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from datetime import timedelta
from time import sleep
import signal
import time
from multiprocessing import Process
from tests import RQTestCase, slow
from tests.fixtures import (create_file, create_file_after_timeout,
div_by_zero, do_nothing, say_hello, say_pid)
from tests.helpers import strip_microseconds
from rq import get_failed_queue, Queue, SimpleWorker, Worker
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus
from rq.registry import StartedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth() # ugly: our test should only call public APIs
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Frank!')
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(before <= job.enqueued_at <= after, 'Not %s <= %s <= %s' % (before, job.enqueued_at, after))
self.assertTrue(before <= job.started_at <= after, 'Not %s <= %s <= %s' % (before, job.started_at, after))
self.assertTrue(before <= job.ended_at <= after, 'Not %s <= %s <= %s' % (before, job.ended_at, after))
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,))
job.save()
data = self.testconn.hget(job.key, 'data')
invalid_data = data.replace(b'div_by_zero', b'nonexisting')
assert data != invalid_data
self.testconn.hset(job.key, 'data', invalid_data)
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = strip_microseconds(job.enqueued_at)
w = Worker([q])
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 1)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(job.enqueued_at, enqueued_at_date)
self.assertIsNotNone(job.exc_info) # should contain exc_info
def test_custom_exc_handling(self):
"""Custom exception handling."""
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
failed_q = get_failed_queue()
# Preconditions
self.assertEqual(failed_q.count, 0)
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
w = Worker([q], exception_handlers=black_hole)
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
self.assertEqual(failed_q.count, 0)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.is_failed, True)
def test_cancelled_jobs_arent_executed(self): # noqa
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt'
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
w.work(burst=True)
self.assertNotEqual(self.testconn._ttl(job.key), 0)
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
w.work(burst=True)
self.assertEqual(self.testconn._ttl(job.key), -1)
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt'
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertEqual(type(death_date).__name__, 'datetime')
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, 1, 'foo')
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, 1, 'bar')
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, 1, 'foo')
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def kill_worker(pid, double_kill):
# wait for the worker to be started over on the main process
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
if double_kill:
# give the worker time to switch signal handler
time.sleep(0.5)
os.kill(pid, signal.SIGTERM)
class TestWorkerShutdown(RQTestCase):
def setUp(self):
# we want tests to fail if signal are ignored and the work remain running,
# so set a signal to kill them after 5 seconds
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(5)
def _timeout(self, signal, frame):
raise AssertionError("test still running after 5 seconds, "
"likely the worker wasn't shutdown correctly")
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""worker with an ongoing job receiving double SIGTERM signal and shutting down immediately"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
| [
"lijianming@baidu.com"
] | lijianming@baidu.com |
6618e38830a00f90c991e5818fa906b046a6e345 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/ida.py | d5269f1e50109ec477e4ea38f4bbf72cde13df52 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'iDA':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
148e21cdaebd61c3d3edd7f4b8df1223f60239a5 | 78d160bba37805d9ce1f55a02f17d8923e43fcfc | /learning_resources/models_test.py | d45a238691389f6e87fa206396b2e3b5c1649b6c | [
"BSD-3-Clause"
] | permissive | mitodl/mit-open | a79a4d55533d7abecedda48b62aec23a6b3086f0 | 51636099fb2bfaa77cd8c2154f7bcb09f3d36f71 | refs/heads/main | 2023-08-30T19:24:08.426942 | 2023-08-30T17:03:09 | 2023-08-30T17:03:09 | 672,068,771 | 0 | 0 | BSD-3-Clause | 2023-09-14T20:34:23 | 2023-07-28T20:54:37 | Python | UTF-8 | Python | false | false | 3,884 | py | """Tests for learning_resources.models"""
import pytest
from learning_resources import constants
from learning_resources.constants import LearningResourceType
from learning_resources.factories import (
CourseFactory,
LearningResourceFactory,
LearningResourcePlatformFactory,
LearningResourceRunFactory,
PlatformTypeChoice,
ProgramFactory,
)
pytestmark = [pytest.mark.django_db]
def test_program_creation():
"""Test that a program has associated LearningResource, run, topics, etc"""
program = ProgramFactory.create()
resource = program.learning_resource
assert resource.title is not None
assert resource.image.url is not None
assert resource.resource_type == LearningResourceType.program.value
assert resource.program == program
assert program.courses.count() >= 1
run = program.runs.first()
assert run.start_date is not None
assert run.image.url is not None
assert len(run.prices) > 0
assert run.instructors.count() > 0
assert resource.topics.count() > 0
assert resource.offered_by.count() > 0
assert resource.runs.count() == program.runs.count()
def test_course_creation():
"""Test that a course has associated LearningResource, runs, topics, etc"""
course = CourseFactory.create()
resource = course.learning_resource
assert resource.resource_type == LearningResourceType.course.value
assert resource.title is not None
assert resource.image.url is not None
assert 0 <= len(resource.prices) <= 3
assert resource.course == course
run = resource.runs.first()
assert run.start_date is not None
assert run.image.url is not None
assert len(run.prices) > 0
assert run.instructors.count() > 0
assert resource.topics.count() > 0
assert resource.offered_by.count() > 0
assert resource.runs.count() == course.runs.count()
@pytest.mark.parametrize(
"platform", [PlatformTypeChoice.ocw.value, PlatformTypeChoice.mitx.value]
)
@pytest.mark.parametrize("audience", [constants.OPEN, constants.PROFESSIONAL])
def test_lr_audience(platform, audience):
"""The audience property should return the expected value"""
lr = LearningResourceFactory.create(
platform=LearningResourcePlatformFactory.create(
platform=platform, audience=audience
)
)
assert lr.audience == lr.platform.audience
@pytest.mark.parametrize(
"platform, audience, availability, has_cert",
[
[
PlatformTypeChoice.ocw.value,
constants.PROFESSIONAL,
constants.AvailabilityType.archived.value,
True,
],
[
PlatformTypeChoice.ocw.value,
constants.OPEN,
constants.AvailabilityType.archived.value,
False,
],
[
PlatformTypeChoice.mitx.value,
constants.PROFESSIONAL,
constants.AvailabilityType.archived.value,
True,
],
[
PlatformTypeChoice.mitx.value,
constants.OPEN,
constants.AvailabilityType.archived.value,
False,
],
[
PlatformTypeChoice.mitx.value,
constants.OPEN,
constants.AvailabilityType.current.value,
True,
],
],
)
def test_lr_certification(platform, audience, availability, has_cert):
"""The certification property should return the expected value"""
platform_object = LearningResourcePlatformFactory.create(
platform=platform, audience=audience
)
course = CourseFactory.create(
platform=platform_object,
runs=[],
)
course.learning_resource.runs.set(
[LearningResourceRunFactory.create(availability=availability)]
)
assert course.learning_resource.certification == (
constants.CERTIFICATE if has_cert else None
)
| [
"noreply@github.com"
] | mitodl.noreply@github.com |
be167ab810c8d83a44feb646cfcaa0d54a7e0268 | 747974d83629a8ba28fcb3f4a33a17319002f169 | /tensorflow/python/distribute/strategy_combinations.py | 2a7afabf166e23f716a11d831268ee87318a510b | [
"Apache-2.0"
] | permissive | ayushmankumar7/tensorflow | 72f60290e4187644b4b254d25ec3033c9fda0c55 | 69a7d3abbbf5be791d1db397fcfea5d8e6efc4b9 | refs/heads/master | 2022-06-19T01:14:46.563563 | 2020-10-19T10:13:23 | 2020-10-19T10:13:23 | 244,302,166 | 2 | 0 | Apache-2.0 | 2022-05-21T12:44:00 | 2020-03-02T06:58:50 | C++ | UTF-8 | Python | false | false | 15,864 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy combinations for combinations.combine()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import cluster_resolver
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import remote
from tensorflow.python.platform import flags
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.util.tf_export import tf_export
_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
_did_connect_to_cluster = False
CollectiveAllReduceExtended = (
collective_all_reduce_strategy.CollectiveAllReduceExtended)
def _version_chooser(tf1_cls, tf2_cls):
def creator(*args, **kwargs):
if tf2.enabled():
return tf2_cls(*args, **kwargs)
return tf1_cls(*args, **kwargs)
return creator
MirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,
mirrored_lib.MirroredStrategy)
CentralStorageStrategy = _version_chooser(
central_storage_strategy.CentralStorageStrategyV1,
central_storage_strategy.CentralStorageStrategy)
OneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,
one_device_lib.OneDeviceStrategy)
# Only V2 CollectiveAllReduceStrategy combinations are supported.
CollectiveAllReduceStrategy = (
collective_all_reduce_strategy.CollectiveAllReduceStrategy)
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run,
use_single_core=False,
enable_packed_variable=False,
**kwargs):
def _create_tpu_strategy():
FLAGS = flags.FLAGS # pylint: disable=invalid-name
global _did_connect_to_cluster
try:
# Attempt to locally discover the TPU. This will fail for Cloud TPU, in
# which case we fall back to the values passed as flags.
resolver = tpu_cluster_resolver.TPUClusterResolver()
did_automatically_resolve = True
except ValueError:
did_automatically_resolve = False
# These flags will be defined by tpu_test_wrapper.py.
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=hasattr(FLAGS, "tpu") and FLAGS.tpu or "",
zone=hasattr(FLAGS, "zone") and FLAGS.zone or None,
project=hasattr(FLAGS, "project") and FLAGS.project or None,
)
# Only connect once per process, rather than per test method.
if getattr(FLAGS, "tpu", "") or did_automatically_resolve:
if not _did_connect_to_cluster:
remote.connect_to_cluster(resolver)
_did_connect_to_cluster = True
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology,
core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)
# Steps per run is only supported in TF 1.x
if tf2.enabled():
strategy = tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
else:
strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
device_assignment, **kwargs)
strategy._enable_packed_variable_in_eager_mode = enable_packed_variable # pylint: disable=protected-access
return strategy
return _create_tpu_strategy
def _get_multi_worker_mirrored_creator(required_gpus):
def _create_multi_worker_mirrored():
tf_config = cluster_resolver.TFConfigClusterResolver()
master = tf_config.master()
if tf_config.rpc_layer:
# Strip off the rpc_layer suffix.
master = master[len("%s://" % tf_config.rpc_layer):]
resolver = cluster_resolver.SimpleClusterResolver(
cluster_spec=tf_config.cluster_spec(),
task_type=tf_config.task_type,
task_id=tf_config.task_id,
master=master,
environment=tf_config.environment,
num_accelerators={"GPU": required_gpus},
rpc_layer=tf_config.rpc_layer or "grpc",
)
# Disable health check. We don't have a reliable to shutdown the strategy
# (and thus the health check) at the end of a test. Turning on health check
# causes some flakiness since we re-create part of the server when creating
# a strategy, and our tests are capable of handling failures.
CollectiveAllReduceExtended._enable_check_health = False # pylint: disable=protected-access
# Always create the strategy in eager mode so that it starts the server and
# configures the eager context. The eager context can no longer be
# configured after initialization.
with context.eager_mode():
strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)
# TODO(b/152320929): Wait for the cluster before proceeding, otherwise
# collectives may hang if any worker launches collectives before the chief
# creates the strategy.
try:
multi_process_runner.get_barrier().wait()
except ValueError:
# If the creator is called in the main process,
# multi_process_runner.get_barrier() raises ValueError, which is safe to
# ignore.
pass
return strategy
return _create_multi_worker_mirrored
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1CPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
"OneDeviceOnWorker1GPU",
lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_packed_var = combinations.NamedDistribution(
"TPUPackedVar",
_get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),
required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
cloud_tpu_strategy = combinations.NamedDistribution(
"CloudTPU",
_get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True,
use_cloud_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU", lambda: MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU", lambda: MirroredStrategy(["/gpu:0"]), required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
"Mirrored2CPU", lambda: MirroredStrategy(["/cpu:1", "/cpu:2"]))
mirrored_strategy_with_cpu_1_and_2.__doc__ = (
"""Mirrored strategy with 2 virtual CPUs.
Should set up logical devices before use
""")
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
"CentralStorage2GPUs",
lambda: CentralStorageStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"CentralStorageCPUAndGPU",
lambda: CentralStorageStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
# chief + 1 worker, with CPU.
multi_worker_mirrored_2x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=1,
use_pool_runner=True,
no_xla=True,
)
# chief + 1 worker, with 1 GPU each.
multi_worker_mirrored_2x1_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x1GPU",
_get_multi_worker_mirrored_creator(required_gpus=1),
has_chief=True,
num_workers=1,
required_gpus=1,
use_pool_runner=True,
no_xla=True,
)
# chief + 1 worker, with 2 GPU each.
multi_worker_mirrored_2x2_gpu = combinations.NamedDistribution(
"MultiWorkerMirrored2x2GPU",
_get_multi_worker_mirrored_creator(required_gpus=2),
has_chief=True,
num_workers=1,
required_gpus=2,
use_pool_runner=True,
no_xla=True,
)
# chief + 3 workers, with CPU.
multi_worker_mirrored_4x1_cpu = combinations.NamedDistribution(
"MultiWorkerMirrored4x1CPU",
_get_multi_worker_mirrored_creator(required_gpus=0),
has_chief=True,
num_workers=3,
use_pool_runner=True,
no_xla=True,
)
graph_and_eager_modes = ["graph", "eager"]
# TODO(crccw): remove after tf-nightly picks up the new API.
def set_virtual_cpus_to_at_least(num_virtual_cpus):
test_util.set_logical_devices_to_at_least("CPU", num_virtual_cpus)
strategies_minus_tpu = [
default_strategy,
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
central_storage_strategy_with_gpu_and_cpu,
]
strategies_minus_default_and_tpu = [
one_device_strategy,
one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
tpu_strategy_packed_var,
cloud_tpu_strategy,
]
all_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies
all_strategies = strategies_minus_tpu + tpu_strategies
two_replica_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
multi_worker_mirrored_2x1_cpu,
multi_worker_mirrored_2x1_gpu,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step,
central_storage_strategy_with_gpu_and_cpu,
]
four_replica_strategies = [
multi_worker_mirrored_2x2_gpu,
multi_worker_mirrored_4x1_cpu,
]
# TODO(b/159831907): replace with two_replica_strategies after the tests using
# it work with MWMS.
multidevice_strategies = [
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
multiworker_strategies = [
multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,
multi_worker_mirrored_2x2_gpu
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__,
"central_storage_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
v1=[]).export_constant(__name__, "cloud_tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "default_strategy",
v1=[]).export_constant(__name__, "default_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy",
v1=[]).export_constant(__name__, "one_device_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
v1=[]).export_constant(__name__, "one_device_strategy_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy",
v1=[]).export_constant(__name__, "tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
v1=[]).export_constant(__name__, "tpu_strategy_one_core")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
v1=[]).export_constant(__name__, "tpu_strategy_packed_var")
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
fdc763851d07a181c2b83e74f4ee362cdc352308 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/elastic-san/azext_elastic_san/aaz/latest/elastic_san/_delete.py | 683649b4034cb0874134a924fa1b1467b4df0263 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 5,340 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"elastic-san delete",
is_preview=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete an Elastic SAN.
:example: Delete an Elastic SAN.
az elastic-san delete -g {rg} -n {san_name}
"""
_aaz_info = {
"version": "2022-12-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.elasticsan/elasticsans/{}", "2022-12-01-preview"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.elastic_san_name = AAZStrArg(
options=["-n", "--name", "--elastic-san-name"],
help="The name of the ElasticSan.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[A-Za-z0-9]+((-|_)[a-z0-9A-Z]+)*$",
max_length=24,
min_length=3,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.ElasticSansDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ElasticSansDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ElasticSan/elasticSans/{elasticSanName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"elasticSanName", self.ctx.args.elastic_san_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-12-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
a98253484df11ef7cbc32bafc3d54614999236ed | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03738/s260957685.py | c50252035879f7823dd92792c49d4e20f4d40741 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | # -*- coding: utf-8 -*-
A, B = map(int, open(0))
print('GREATER' if A > B else 'LESS' if A < B else 'EQUAL') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
39969c1880d5f4bd42afe17981b473f757389342 | 52a15d4fabf68bf23a23799312ae40465764908c | /src/maintenance/dumppreferences.py | 963258beee0d9b9d24e4a3c25021bdff41c3da25 | [
"MIT",
"Apache-2.0"
] | permissive | jensl/critic | 2071a1b0600051967323df48f4d3a5656a5d2bb8 | c2d962b909ff7ef2f09bccbeb636333920b3659e | refs/heads/stable/1 | 2022-05-28T03:51:15.108944 | 2018-03-27T18:47:46 | 2018-03-29T15:08:30 | 6,430,552 | 224 | 36 | NOASSERTION | 2023-05-29T15:38:00 | 2012-10-28T18:26:04 | Python | UTF-8 | Python | false | false | 4,036 | py | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.dirname(sys.argv[0])))
from dbaccess import connect
db = connect()
cursor = db.cursor()
cursor.execute("SELECT item, type, default_integer, default_string, description FROM preferences")
preferences = cursor.fetchall()
installpreferences_py = open(os.path.join(os.path.dirname(sys.argv[0]), "installpreferences.py"), "w")
print >>installpreferences_py, "PREFERENCES = [ ",
for index, (item, type, default_integer, default_string, description) in enumerate(preferences):
if index != 0:
installpreferences_py.write(""",
""")
installpreferences_py.write("""{ "item": %r,
"type": %r,""" % (item, type))
if type == "string":
installpreferences_py.write("""
"default_string": %r,""" % default_string)
else:
installpreferences_py.write("""
"default_integer": %r,""" % default_integer)
installpreferences_py.write("""
"description": %r }""" % description)
print >>installpreferences_py, " ]"
print >>installpreferences_py
print >>installpreferences_py, "def installPreferences(db, quiet):"
print >>installpreferences_py, " cursor = db.cursor()"
print >>installpreferences_py
print >>installpreferences_py, " for preference in PREFERENCES:"
print >>installpreferences_py, " item = preference[\"item\"]"
print >>installpreferences_py, " type = preference[\"type\"]"
print >>installpreferences_py, " default_integer = preference.get(\"default_integer\")"
print >>installpreferences_py, " default_string = preference.get(\"default_string\")"
print >>installpreferences_py, " description = preference[\"description\"]"
print >>installpreferences_py
print >>installpreferences_py, " cursor.execute(\"SELECT 1 FROM preferences WHERE item=%s\", (item,))"
print >>installpreferences_py
print >>installpreferences_py, " if cursor.fetchone():"
print >>installpreferences_py, " if not quiet: print \"Updating: %s\" % item"
print >>installpreferences_py, " cursor.execute(\"UPDATE preferences SET type=%s, default_integer=%s, default_string=%s, description=%s WHERE item=%s\", (type, default_integer, default_string, description, item))"
print >>installpreferences_py, " else:"
print >>installpreferences_py, " if not quiet: print \"Adding: %s\" % item"
print >>installpreferences_py, " cursor.execute(\"INSERT INTO preferences (item, type, default_integer, default_string, description) VALUES (%s, %s, %s, %s, %s)\", (item, type, default_integer, default_string, description))"
print >>installpreferences_py
print >>installpreferences_py, "if __name__ == \"__main__\":"
print >>installpreferences_py, " import sys"
print >>installpreferences_py, " import os.path"
print >>installpreferences_py
print >>installpreferences_py, " sys.path.insert(0, os.path.dirname(os.path.dirname(sys.argv[0])))"
print >>installpreferences_py
print >>installpreferences_py, " import dbaccess"
print >>installpreferences_py
print >>installpreferences_py, " db = dbaccess.connect()"
print >>installpreferences_py
print >>installpreferences_py, " installPreferences(db, \"--quiet\" in sys.argv or \"-q\" in sys.argv)"
print >>installpreferences_py
print >>installpreferences_py, " db.commit()"
| [
"jl@opera.com"
] | jl@opera.com |
78b36772d1120f0b4fc1cc8b334f2d27170c12da | c698fb03aa2bf034904a0310931b473b6da66fdc | /com/study/algorithm/offer/面试题 04.02. 最小高度树.py | cba62dda0a65f8ccc1bc044898548fa4d9b03b0a | [] | no_license | pi408637535/Algorithm | e46df1d07a519ab110e4f97755f461a1b2b7c308 | 75f4056ec6da01f7466a272871a7f7db579166b4 | refs/heads/master | 2021-08-29T19:19:53.368953 | 2021-08-22T16:30:32 | 2021-08-22T16:30:32 | 213,289,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if not nums:
return None
root = TreeNode(nums[len(nums) // 2] )
root.left = self.sortedArrayToBST(nums[:len(nums) // 2])
root.right = self.sortedArrayToBST(nums[len(nums) // 2 +1:])
return root
if __name__ == '__main__':
nums = [-10,-3,0,5,9]
tree = Solution().sortedArrayToBST(nums)
tree | [
"piguanghua@163.com"
] | piguanghua@163.com |
47a1919e488e11c462b12e89fd858ceb063b0bc4 | 9a5438bdb8e84d0167ddea5458a7f729fdd54121 | /dataproviders/serializers/EndpointSerializer.py | cdc9b598acdf3c773cfc7c4d32fc9060ea968bde | [] | no_license | Grusinator/MetaDataApi | 740fd2be4cb97b670f827a071a0ac8c50f79f8ff | 081f881c735466ed1dbbd68646b821299c5168f8 | refs/heads/master | 2023-07-25T23:58:22.179717 | 2020-03-15T09:36:05 | 2020-03-15T09:36:05 | 149,087,967 | 5 | 1 | null | 2023-07-25T15:39:12 | 2018-09-17T07:45:09 | CSS | UTF-8 | Python | false | false | 368 | py | from rest_framework import serializers
from dataproviders.models import Endpoint
class EndpointSerializer(serializers.ModelSerializer):
class Meta:
model = Endpoint
exclude = ["id", "data_provider"]
def create(self, validated_data):
validated_data.pop("data_fetches")
return self.Meta.model.objects.create(**validated_data)
| [
"grusinator@gmail.com"
] | grusinator@gmail.com |
844c09cc9e371c8dfe2d5499cd6f071d91fda930 | 722de5766ccf7e7a2d63c425a0c8dd78287f1853 | /homework7/Ex3.py | 7756cb00992285fba8a460dc1fe621d60707ee8a | [] | no_license | Alice-Avetisyan/project | 79a61bbd0ce3f5d8571a3c1d112f078f85583e0b | eb51676cdce1ff787738317aacb4c869a001b769 | refs/heads/master | 2020-07-30T09:04:07.555681 | 2019-12-15T17:15:05 | 2019-12-15T17:15:05 | 210,166,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | employees = {'Jack Smith': 'Engineer', 'Hideo Kojima': 'Director', 'Yoji Shinkawa': 'Art_director', 'John Anderson': 'Engineer'}
def find_employee(employees):
keys = list(employees.keys())
count = 0
for key in keys:
if key[:4] == 'John':
count += 1
print("There is/are {0} employee/s with the name John".format(count))
def find_engineer(employees):
values = list(employees.values())
count = 0
for value in values:
if value == 'Engineer':
count += 1
print("There is/are {0} employee/s with the Engineer position".format(count))
find_employee(employees)
find_engineer(employees) | [
"noreply@github.com"
] | Alice-Avetisyan.noreply@github.com |
2f6f7434f4f404d3c911fc7cebf7efb8705a4e22 | 292d23019c18d0b724aed88f04a0f20b5b616bb9 | /date20190110/V3/testcase/test_login.py | 33890fbf02875157430929453396eba0ee274ed2 | [] | no_license | RedAnanas/macbendi | 6f1f6fd41ed1fe8b71408dffa0b964464bd00aa8 | 8d5aa39d9389937f0e0c3f7a7d6532537f33cda8 | refs/heads/master | 2023-06-11T18:57:31.061009 | 2021-06-29T15:04:56 | 2021-06-29T15:04:56 | 380,759,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:Administrator
# datetime:2019/1/10 20:36
# software: PyCharm
from date20190110.V3.common.browser_manager import BrowserManager
from date20190110.V3.common.login import Login
with open("D:\Python\date20190110\V3\data\login.txt","r") as f:
lines = f.readlines()
for line in lines:
username, password,verifycode= line.strip().split(',')
bm = BrowserManager()
Login(bm).go_login(username,password,verifycode)
if username=="admin" and password=="admin" and verifycode=="0000":
logxx =bm.driver.find_element_by_link_text("注销").text
if "注销" in logxx:
print("case 成功")
else:
print("case 失败")
else:
logxx=bm.driver.find_element_by_class_name("bootbox-body").text
if username=="admin"and("登录失败" in logxx):
print("case 成功")
elif verifycode=="1234"and("验证码失效" in logxx):
print("case 成功")
else:
print("case 失败")
bm.driver.quit()
| [
"1315157388@qq.com"
] | 1315157388@qq.com |
73cbe5cf938fa4c42f221b7766cf9eff7a249e5e | 88332700e893553ed52247b83f7ae8e542e67239 | /test/test_livenodesnagiosplugin.py | f65af48119016c5cce45e6333bff12c7b1222c23 | [] | no_license | zhuohuwu0603/pylib | 3857a8ec3954d09e58c21c14d683bc95cb7f9d23 | 02b0a343b055dfd6bed4d64c40aa77ed8394c1b2 | refs/heads/master | 2020-08-12T02:55:35.956301 | 2019-10-09T23:08:58 | 2019-10-09T23:08:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,593 | py | # vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2014-09-15 20:49:22 +0100 (Mon, 15 Sep 2014)
#
# https://github.com/harisekhon/pylib
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback
# to help improve or steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
# ============================================================================ #
# PyUnit Tests for HariSekhon.LiveNodesNagiosPlugin
# ============================================================================ #
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import sys
import unittest
libdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(libdir)
# pylint: disable=wrong-import-position
from harisekhon.utils import log
from harisekhon.nagiosplugin import LiveNodesNagiosPlugin
class LiveNodesNagiosPluginTester(unittest.TestCase):
# must prefix with test_ in order for the tests to be called
# Not using assertRaises >= 2.7 and maintaining compatibility with Python 2.6 servers
class SubLiveNodesNagiosPlugin(LiveNodesNagiosPlugin):
def get_nodecount(self):
print("running SubLiveNodesNagiosPlugin().get_nodecount()")
def setUp(self):
self.plugin = self.SubLiveNodesNagiosPlugin()
def test_unknown_exit(self):
try:
self.plugin.main()
raise Exception('LiveNodes plugin failed to terminate')
except SystemExit as _:
if _.code != 3:
raise Exception('LiveNodesNagiosPlugin failed to exit UNKNOWN (3), got exit code {0} instead'
.format(_.code))
def test_plugin_abstract(self): # pylint: disable=no-self-use
try:
LiveNodesNagiosPlugin() # pylint: disable=abstract-class-instantiated
raise Exception('failed to raise a TypeError when attempting to instantiate abstract class ' +
'LiveNodesNagiosPlugin')
except TypeError as _:
pass
def main():
# increase the verbosity
# verbosity Python >= 2.7
#unittest.main(verbosity=2)
log.setLevel(logging.DEBUG)
suite = unittest.TestLoader().loadTestsFromTestCase(LiveNodesNagiosPluginTester)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
| [
"harisekhon@gmail.com"
] | harisekhon@gmail.com |
520d31450393ae8cbefdc361225a76ce4ee9f826 | 310a832285e3eb9b0ed13bf24b96f509295649cd | /python/pyspark/sql/dataframe.pyi | 1351c59470c9dbf172aa25665d01befa4e2bf8f5 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"Python-2.0",
"MIT",
"CDDL-1.1",
"EPL-2.0",
"CDDL-1.0",
"BSD-2-Clause",
"GCC-exception-3.1",
"LicenseRef-scancode-generic-cla",
"CC-BY-SA-3.... | permissive | Shopify/spark | 7b83550a1b6c398b4296cccf2d9ffc3b465080a5 | 88ced28141beb696791ae67eac35219de942bf31 | refs/heads/master | 2023-08-26T03:22:46.659368 | 2021-02-08T12:08:34 | 2021-02-08T12:08:34 | 103,296,198 | 4 | 3 | Apache-2.0 | 2023-07-16T05:26:23 | 2017-09-12T16:38:54 | Scala | UTF-8 | Python | false | false | 12,275 | pyi | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import overload
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
from py4j.java_gateway import JavaObject # type: ignore[import]
from pyspark.sql._typing import ColumnOrName, LiteralType, OptionalPrimitiveType
from pyspark._typing import PrimitiveType
from pyspark.sql.types import ( # noqa: F401
StructType,
StructField,
StringType,
IntegerType,
Row,
) # noqa: F401
from pyspark.sql.context import SQLContext
from pyspark.sql.group import GroupedData
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.column import Column
from pyspark.rdd import RDD
from pyspark.storagelevel import StorageLevel
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
sql_ctx: SQLContext
is_cached: bool
def __init__(self, jdf: JavaObject, sql_ctx: SQLContext) -> None: ...
@property
def rdd(self) -> RDD[Row]: ...
@property
def na(self) -> DataFrameNaFunctions: ...
@property
def stat(self) -> DataFrameStatFunctions: ...
def toJSON(self, use_unicode: bool = ...) -> RDD[str]: ...
def registerTempTable(self, name: str) -> None: ...
def createTempView(self, name: str) -> None: ...
def createOrReplaceTempView(self, name: str) -> None: ...
def createGlobalTempView(self, name: str) -> None: ...
@property
def write(self) -> DataFrameWriter: ...
@property
def writeStream(self) -> DataStreamWriter: ...
@property
def schema(self) -> StructType: ...
def printSchema(self) -> None: ...
def explain(
self, extended: Optional[Union[bool, str]] = ..., mode: Optional[str] = ...
) -> None: ...
def exceptAll(self, other: DataFrame) -> DataFrame: ...
def isLocal(self) -> bool: ...
@property
def isStreaming(self) -> bool: ...
def show(
self, n: int = ..., truncate: Union[bool, int] = ..., vertical: bool = ...
) -> None: ...
def checkpoint(self, eager: bool = ...) -> DataFrame: ...
def localCheckpoint(self, eager: bool = ...) -> DataFrame: ...
def withWatermark(
self, eventTime: ColumnOrName, delayThreshold: str
) -> DataFrame: ...
def hint(self, name: str, *parameters: Union[PrimitiveType, List[PrimitiveType]]) -> DataFrame: ...
def count(self) -> int: ...
def collect(self) -> List[Row]: ...
def toLocalIterator(self, prefetchPartitions: bool = ...) -> Iterator[Row]: ...
def limit(self, num: int) -> DataFrame: ...
def take(self, num: int) -> List[Row]: ...
def tail(self, num: int) -> List[Row]: ...
def foreach(self, f: Callable[[Row], None]) -> None: ...
def foreachPartition(self, f: Callable[[Iterator[Row]], None]) -> None: ...
def cache(self) -> DataFrame: ...
def persist(self, storageLevel: StorageLevel = ...) -> DataFrame: ...
@property
def storageLevel(self) -> StorageLevel: ...
def unpersist(self, blocking: bool = ...) -> DataFrame: ...
def coalesce(self, numPartitions: int) -> DataFrame: ...
@overload
def repartition(self, numPartitions: int, *cols: ColumnOrName) -> DataFrame: ...
@overload
def repartition(self, *cols: ColumnOrName) -> DataFrame: ...
@overload
def repartitionByRange(
self, numPartitions: int, *cols: ColumnOrName
) -> DataFrame: ...
@overload
def repartitionByRange(self, *cols: ColumnOrName) -> DataFrame: ...
def distinct(self) -> DataFrame: ...
@overload
def sample(self, fraction: float, seed: Optional[int] = ...) -> DataFrame: ...
@overload
def sample(
self,
withReplacement: Optional[bool],
fraction: float,
seed: Optional[int] = ...,
) -> DataFrame: ...
def sampleBy(
self, col: ColumnOrName, fractions: Dict[Any, float], seed: Optional[int] = ...
) -> DataFrame: ...
def randomSplit(
self, weights: List[float], seed: Optional[int] = ...
) -> List[DataFrame]: ...
@property
def dtypes(self) -> List[Tuple[str, str]]: ...
@property
def columns(self) -> List[str]: ...
def colRegex(self, colName: str) -> Column: ...
def alias(self, alias: str) -> DataFrame: ...
def crossJoin(self, other: DataFrame) -> DataFrame: ...
def join(
self,
other: DataFrame,
on: Optional[Union[str, List[str], Column, List[Column]]] = ...,
how: Optional[str] = ...,
) -> DataFrame: ...
def sortWithinPartitions(
self,
*cols: Union[str, Column, List[Union[str, Column]]],
ascending: Union[bool, List[bool]] = ...
) -> DataFrame: ...
def sort(
self,
*cols: Union[str, Column, List[Union[str, Column]]],
ascending: Union[bool, List[bool]] = ...
) -> DataFrame: ...
def orderBy(
self,
*cols: Union[str, Column, List[Union[str, Column]]],
ascending: Union[bool, List[bool]] = ...
) -> DataFrame: ...
def describe(self, *cols: Union[str, List[str]]) -> DataFrame: ...
def summary(self, *statistics: str) -> DataFrame: ...
@overload
def head(self) -> Row: ...
@overload
def head(self, n: int) -> List[Row]: ...
def first(self) -> Row: ...
def __getitem__(self, item: Union[int, str, Column, List, Tuple]) -> Column: ...
def __getattr__(self, name: str) -> Column: ...
@overload
def select(self, *cols: ColumnOrName) -> DataFrame: ...
@overload
def select(self, __cols: Union[List[Column], List[str]]) -> DataFrame: ...
@overload
def selectExpr(self, *expr: str) -> DataFrame: ...
@overload
def selectExpr(self, *expr: List[str]) -> DataFrame: ...
def filter(self, condition: ColumnOrName) -> DataFrame: ...
@overload
def groupBy(self, *cols: ColumnOrName) -> GroupedData: ...
@overload
def groupBy(self, __cols: Union[List[Column], List[str]]) -> GroupedData: ...
@overload
def rollup(self, *cols: ColumnOrName) -> GroupedData: ...
@overload
def rollup(self, __cols: Union[List[Column], List[str]]) -> GroupedData: ...
@overload
def cube(self, *cols: ColumnOrName) -> GroupedData: ...
@overload
def cube(self, __cols: Union[List[Column], List[str]]) -> GroupedData: ...
def agg(self, *exprs: Union[Column, Dict[str, str]]) -> DataFrame: ...
def union(self, other: DataFrame) -> DataFrame: ...
def unionAll(self, other: DataFrame) -> DataFrame: ...
def unionByName(
self, other: DataFrame, allowMissingColumns: bool = ...
) -> DataFrame: ...
def intersect(self, other: DataFrame) -> DataFrame: ...
def intersectAll(self, other: DataFrame) -> DataFrame: ...
def subtract(self, other: DataFrame) -> DataFrame: ...
def dropDuplicates(self, subset: Optional[List[str]] = ...) -> DataFrame: ...
def dropna(
self,
how: str = ...,
thresh: Optional[int] = ...,
subset: Optional[Union[str, Tuple[str, ...], List[str]]] = ...,
) -> DataFrame: ...
@overload
def fillna(
self,
value: LiteralType,
subset: Optional[Union[str, Tuple[str, ...], List[str]]] = ...,
) -> DataFrame: ...
@overload
def fillna(self, value: Dict[str, LiteralType]) -> DataFrame: ...
@overload
def replace(
self,
to_replace: LiteralType,
value: OptionalPrimitiveType,
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: List[LiteralType],
value: List[OptionalPrimitiveType],
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: Dict[LiteralType, OptionalPrimitiveType],
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: List[LiteralType],
value: OptionalPrimitiveType,
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
def approxQuantile(
self,
col: Union[str, Tuple[str, ...], List[str]],
probabilities: Union[List[float], Tuple[float, ...]],
relativeError: float
) -> List[float]: ...
def corr(self, col1: str, col2: str, method: Optional[str] = ...) -> float: ...
def cov(self, col1: str, col2: str) -> float: ...
def crosstab(self, col1: str, col2: str) -> DataFrame: ...
def freqItems(
self, cols: Union[List[str], Tuple[str]], support: Optional[float] = ...
) -> DataFrame: ...
def withColumn(self, colName: str, col: Column) -> DataFrame: ...
def withColumnRenamed(self, existing: str, new: str) -> DataFrame: ...
@overload
def drop(self, cols: ColumnOrName) -> DataFrame: ...
@overload
def drop(self, *cols: str) -> DataFrame: ...
def toDF(self, *cols: ColumnOrName) -> DataFrame: ...
def transform(self, func: Callable[[DataFrame], DataFrame]) -> DataFrame: ...
@overload
def groupby(self, *cols: ColumnOrName) -> GroupedData: ...
@overload
def groupby(self, __cols: Union[List[Column], List[str]]) -> GroupedData: ...
def drop_duplicates(self, subset: Optional[List[str]] = ...) -> DataFrame: ...
def where(self, condition: ColumnOrName) -> DataFrame: ...
def sameSemantics(self, other: DataFrame) -> bool: ...
def semanticHash(self) -> int: ...
def inputFiles(self) -> List[str]: ...
def writeTo(self, table: str) -> DataFrameWriterV2: ...
class DataFrameNaFunctions:
df: DataFrame
def __init__(self, df: DataFrame) -> None: ...
def drop(
self,
how: str = ...,
thresh: Optional[int] = ...,
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def fill(
self, value: LiteralType, subset: Optional[List[str]] = ...
) -> DataFrame: ...
@overload
def fill(self, value: Dict[str, LiteralType]) -> DataFrame: ...
@overload
def replace(
self,
to_replace: LiteralType,
value: OptionalPrimitiveType,
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: List[LiteralType],
value: List[OptionalPrimitiveType],
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: Dict[LiteralType, OptionalPrimitiveType],
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
@overload
def replace(
self,
to_replace: List[LiteralType],
value: OptionalPrimitiveType,
subset: Optional[List[str]] = ...,
) -> DataFrame: ...
class DataFrameStatFunctions:
df: DataFrame
def __init__(self, df: DataFrame) -> None: ...
def approxQuantile(
self, col: str, probabilities: List[float], relativeError: float
) -> List[float]: ...
def corr(self, col1: str, col2: str, method: Optional[str] = ...) -> float: ...
def cov(self, col1: str, col2: str) -> float: ...
def crosstab(self, col1: str, col2: str) -> DataFrame: ...
def freqItems(
self, cols: List[str], support: Optional[float] = ...
) -> DataFrame: ...
def sampleBy(
self, col: str, fractions: Dict[Any, float], seed: Optional[int] = ...
) -> DataFrame: ...
| [
"gurwls223@apache.org"
] | gurwls223@apache.org |
564310cd4df822c020ea2b749d187050a4ea21e3 | 68e65df90da9169733025dfede0a8b30a5e3d7e3 | /Loops_in_python/15_practice_test7.py | c665f2ca8055e909e438a0c795745fb2d1d0d25f | [] | no_license | shubam-garg/Python-Beginner | 290346cbb309a28d28d6ac04034cb084b71ccbc6 | 30742006c380a0a18aff574567a95c8b8c694754 | refs/heads/main | 2023-05-06T07:11:29.943475 | 2021-05-29T20:35:59 | 2021-05-29T20:35:59 | 354,527,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | ''' Write a program to print following pattern
*
* *
* * * '''
for i in range(0,4):
print(" * " * i)
| [
"81907680+shubam-garg@users.noreply.github.com"
] | 81907680+shubam-garg@users.noreply.github.com |
268dcec0173a7fd2959befc6e084e29c3a918513 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/containeranalysis/v1alpha1/containeranalysis_v1alpha1_client.py | e7c73ecf4740a744ca2afd8627a5a073a9206f82 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 46,756 | py | """Generated client library for containeranalysis version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.containeranalysis.v1alpha1 import containeranalysis_v1alpha1_messages as messages
class ContaineranalysisV1alpha1(base_api.BaseApiClient):
"""Generated client library for service containeranalysis version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://containeranalysis.googleapis.com/'
_PACKAGE = u'containeranalysis'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ContaineranalysisV1alpha1'
_URL_VERSION = u'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new containeranalysis handle."""
url = url or self.BASE_URL
super(ContaineranalysisV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_notes_occurrences = self.ProjectsNotesOccurrencesService(self)
self.projects_notes = self.ProjectsNotesService(self)
self.projects_occurrences = self.ProjectsOccurrencesService(self)
self.projects_operations = self.ProjectsOperationsService(self)
self.projects_scan_configs = self.ProjectsScanConfigsService(self)
self.projects = self.ProjectsService(self)
self.providers_notes_occurrences = self.ProvidersNotesOccurrencesService(self)
self.providers_notes = self.ProvidersNotesService(self)
self.providers = self.ProvidersService(self)
class ProjectsNotesOccurrencesService(base_api.BaseApiService):
"""Service class for the projects_notes_occurrences resource."""
_NAME = u'projects_notes_occurrences'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsNotesOccurrencesService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists `Occurrences` referencing the specified `Note`. Use this method to.
get all occurrences referencing your `Note` across all your customer
projects.
Args:
request: (ContaineranalysisProjectsNotesOccurrencesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNoteOccurrencesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}/occurrences',
http_method=u'GET',
method_id=u'containeranalysis.projects.notes.occurrences.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+name}/occurrences',
request_field='',
request_type_name=u'ContaineranalysisProjectsNotesOccurrencesListRequest',
response_type_name=u'ListNoteOccurrencesResponse',
supports_download=False,
)
class ProjectsNotesService(base_api.BaseApiService):
"""Service class for the projects_notes resource."""
_NAME = u'projects_notes'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsNotesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a new `Note`.
Args:
request: (ContaineranalysisProjectsNotesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes',
http_method=u'POST',
method_id=u'containeranalysis.projects.notes.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'name', u'noteId'],
relative_path=u'v1alpha1/{+parent}/notes',
request_field=u'note',
request_type_name=u'ContaineranalysisProjectsNotesCreateRequest',
response_type_name=u'Note',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes the given `Note` from the system.
Args:
request: (ContaineranalysisProjectsNotesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}',
http_method=u'DELETE',
method_id=u'containeranalysis.projects.notes.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProjectsNotesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Returns the requested `Note`.
Args:
request: (ContaineranalysisProjectsNotesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}',
http_method=u'GET',
method_id=u'containeranalysis.projects.notes.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProjectsNotesGetRequest',
response_type_name=u'Note',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a note or an `Occurrence` resource.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a note or occurrence, respectively.
Attempting to call this method on a resource without the required
permission will result in a `PERMISSION_DENIED` error. Attempting to call
this method on a non-existent resource will result in a `NOT_FOUND` error
if the user has list permission on the project, or a `PERMISSION_DENIED`
error otherwise. The resource takes the following formats:
`projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for occurrences and
projects/{PROJECT_ID}/notes/{NOTE_ID} for notes
Args:
request: (ContaineranalysisProjectsNotesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}:getIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.projects.notes.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'ContaineranalysisProjectsNotesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists all `Notes` for a given project.
Args:
request: (ContaineranalysisProjectsNotesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNotesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes',
http_method=u'GET',
method_id=u'containeranalysis.projects.notes.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'name', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+parent}/notes',
request_field='',
request_type_name=u'ContaineranalysisProjectsNotesListRequest',
response_type_name=u'ListNotesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates an existing `Note`.
Args:
request: (ContaineranalysisProjectsNotesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}',
http_method=u'PATCH',
method_id=u'containeranalysis.projects.notes.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1alpha1/{+name}',
request_field=u'note',
request_type_name=u'ContaineranalysisProjectsNotesPatchRequest',
response_type_name=u'Note',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified `Note` or `Occurrence`.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a `Note` or an `Occurrence`, respectively.
Attempting to call this method without these permissions will result in a `
`PERMISSION_DENIED` error.
Attempting to call this method on a non-existent resource will result in a
`NOT_FOUND` error if the user has `containeranalysis.notes.list` permission
on a `Note` or `containeranalysis.occurrences.list` on an `Occurrence`, or
a `PERMISSION_DENIED` error otherwise. The resource takes the following
formats: `projects/{projectid}/occurrences/{occurrenceid}` for occurrences
and projects/{projectid}/notes/{noteid} for notes
Args:
request: (ContaineranalysisProjectsNotesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}:setIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.projects.notes.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'ContaineranalysisProjectsNotesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns the permissions that a caller has on the specified note or.
occurrence resource. Requires list permission on the project (for example,
"storage.objects.list" on the containing bucket for testing permission of
an object). Attempting to call this method on a non-existent resource will
result in a `NOT_FOUND` error if the user has list permission on the
project, or a `PERMISSION_DENIED` error otherwise. The resource takes the
following formats: `projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for
`Occurrences` and `projects/{PROJECT_ID}/notes/{NOTE_ID}` for `Notes`
Args:
request: (ContaineranalysisProjectsNotesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/notes/{notesId}:testIamPermissions',
http_method=u'POST',
method_id=u'containeranalysis.projects.notes.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'ContaineranalysisProjectsNotesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsOccurrencesService(base_api.BaseApiService):
"""Service class for the projects_occurrences resource."""
_NAME = u'projects_occurrences'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsOccurrencesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a new `Occurrence`. Use this method to create `Occurrences`.
for a resource.
Args:
request: (ContaineranalysisProjectsOccurrencesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Occurrence) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences',
http_method=u'POST',
method_id=u'containeranalysis.projects.occurrences.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'name'],
relative_path=u'v1alpha1/{+parent}/occurrences',
request_field=u'occurrence',
request_type_name=u'ContaineranalysisProjectsOccurrencesCreateRequest',
response_type_name=u'Occurrence',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes the given `Occurrence` from the system. Use this when.
an `Occurrence` is no longer applicable for the given resource.
Args:
request: (ContaineranalysisProjectsOccurrencesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}',
http_method=u'DELETE',
method_id=u'containeranalysis.projects.occurrences.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProjectsOccurrencesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Returns the requested `Occurrence`.
Args:
request: (ContaineranalysisProjectsOccurrencesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Occurrence) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}',
http_method=u'GET',
method_id=u'containeranalysis.projects.occurrences.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProjectsOccurrencesGetRequest',
response_type_name=u'Occurrence',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a note or an `Occurrence` resource.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a note or occurrence, respectively.
Attempting to call this method on a resource without the required
permission will result in a `PERMISSION_DENIED` error. Attempting to call
this method on a non-existent resource will result in a `NOT_FOUND` error
if the user has list permission on the project, or a `PERMISSION_DENIED`
error otherwise. The resource takes the following formats:
`projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for occurrences and
projects/{PROJECT_ID}/notes/{NOTE_ID} for notes
Args:
request: (ContaineranalysisProjectsOccurrencesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}:getIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.projects.occurrences.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'ContaineranalysisProjectsOccurrencesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def GetNotes(self, request, global_params=None):
"""Gets the `Note` attached to the given `Occurrence`.
Args:
request: (ContaineranalysisProjectsOccurrencesGetNotesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('GetNotes')
return self._RunMethod(
config, request, global_params=global_params)
GetNotes.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}/notes',
http_method=u'GET',
method_id=u'containeranalysis.projects.occurrences.getNotes',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}/notes',
request_field='',
request_type_name=u'ContaineranalysisProjectsOccurrencesGetNotesRequest',
response_type_name=u'Note',
supports_download=False,
)
def GetVulnerabilitySummary(self, request, global_params=None):
"""Gets a summary of the number and severity of occurrences.
Args:
request: (ContaineranalysisProjectsOccurrencesGetVulnerabilitySummaryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetVulnzOccurrencesSummaryResponse) The response message.
"""
config = self.GetMethodConfig('GetVulnerabilitySummary')
return self._RunMethod(
config, request, global_params=global_params)
GetVulnerabilitySummary.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences:vulnerabilitySummary',
http_method=u'GET',
method_id=u'containeranalysis.projects.occurrences.getVulnerabilitySummary',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter'],
relative_path=u'v1alpha1/{+parent}/occurrences:vulnerabilitySummary',
request_field='',
request_type_name=u'ContaineranalysisProjectsOccurrencesGetVulnerabilitySummaryRequest',
response_type_name=u'GetVulnzOccurrencesSummaryResponse',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists active `Occurrences` for a given project matching the filters.
Args:
request: (ContaineranalysisProjectsOccurrencesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOccurrencesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences',
http_method=u'GET',
method_id=u'containeranalysis.projects.occurrences.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'kind', u'name', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+parent}/occurrences',
request_field='',
request_type_name=u'ContaineranalysisProjectsOccurrencesListRequest',
response_type_name=u'ListOccurrencesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates an existing occurrence.
Args:
request: (ContaineranalysisProjectsOccurrencesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Occurrence) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}',
http_method=u'PATCH',
method_id=u'containeranalysis.projects.occurrences.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1alpha1/{+name}',
request_field=u'occurrence',
request_type_name=u'ContaineranalysisProjectsOccurrencesPatchRequest',
response_type_name=u'Occurrence',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified `Note` or `Occurrence`.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a `Note` or an `Occurrence`, respectively.
Attempting to call this method without these permissions will result in a `
`PERMISSION_DENIED` error.
Attempting to call this method on a non-existent resource will result in a
`NOT_FOUND` error if the user has `containeranalysis.notes.list` permission
on a `Note` or `containeranalysis.occurrences.list` on an `Occurrence`, or
a `PERMISSION_DENIED` error otherwise. The resource takes the following
formats: `projects/{projectid}/occurrences/{occurrenceid}` for occurrences
and projects/{projectid}/notes/{noteid} for notes
Args:
request: (ContaineranalysisProjectsOccurrencesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}:setIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.projects.occurrences.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'ContaineranalysisProjectsOccurrencesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns the permissions that a caller has on the specified note or.
occurrence resource. Requires list permission on the project (for example,
"storage.objects.list" on the containing bucket for testing permission of
an object). Attempting to call this method on a non-existent resource will
result in a `NOT_FOUND` error if the user has list permission on the
project, or a `PERMISSION_DENIED` error otherwise. The resource takes the
following formats: `projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for
`Occurrences` and `projects/{PROJECT_ID}/notes/{NOTE_ID}` for `Notes`
Args:
request: (ContaineranalysisProjectsOccurrencesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/occurrences/{occurrencesId}:testIamPermissions',
http_method=u'POST',
method_id=u'containeranalysis.projects.occurrences.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'ContaineranalysisProjectsOccurrencesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsOperationsService(base_api.BaseApiService):
"""Service class for the projects_operations resource."""
_NAME = u'projects_operations'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a new `Operation`.
Args:
request: (ContaineranalysisProjectsOperationsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/operations',
http_method=u'POST',
method_id=u'containeranalysis.projects.operations.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1alpha1/{+parent}/operations',
request_field=u'createOperationRequest',
request_type_name=u'ContaineranalysisProjectsOperationsCreateRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates an existing operation returns an error if operation.
does not exist. The only valid operations are to update mark the done bit
change the result.
Args:
request: (ContaineranalysisProjectsOperationsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/operations/{operationsId}',
http_method=u'PATCH',
method_id=u'containeranalysis.projects.operations.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field=u'updateOperationRequest',
request_type_name=u'ContaineranalysisProjectsOperationsPatchRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsScanConfigsService(base_api.BaseApiService):
"""Service class for the projects_scan_configs resource."""
_NAME = u'projects_scan_configs'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsScanConfigsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Gets a specific scan configuration for a project.
Args:
request: (ContaineranalysisProjectsScanConfigsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ScanConfig) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/scan_configs/{scan_configsId}',
http_method=u'GET',
method_id=u'containeranalysis.projects.scan_configs.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProjectsScanConfigsGetRequest',
response_type_name=u'ScanConfig',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists scan configurations for a project.
Args:
request: (ContaineranalysisProjectsScanConfigsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListScanConfigsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/scan_configs',
http_method=u'GET',
method_id=u'containeranalysis.projects.scan_configs.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+parent}/scan_configs',
request_field='',
request_type_name=u'ContaineranalysisProjectsScanConfigsListRequest',
response_type_name=u'ListScanConfigsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates the scan configuration to a new value.
Args:
request: (ContaineranalysisProjectsScanConfigsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ScanConfig) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/scan_configs/{scan_configsId}',
http_method=u'PATCH',
method_id=u'containeranalysis.projects.scan_configs.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1alpha1/{+name}',
request_field=u'scanConfig',
request_type_name=u'ContaineranalysisProjectsScanConfigsPatchRequest',
response_type_name=u'ScanConfig',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
class ProvidersNotesOccurrencesService(base_api.BaseApiService):
"""Service class for the providers_notes_occurrences resource."""
_NAME = u'providers_notes_occurrences'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProvidersNotesOccurrencesService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists `Occurrences` referencing the specified `Note`. Use this method to.
get all occurrences referencing your `Note` across all your customer
projects.
Args:
request: (ContaineranalysisProvidersNotesOccurrencesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNoteOccurrencesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}/occurrences',
http_method=u'GET',
method_id=u'containeranalysis.providers.notes.occurrences.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+name}/occurrences',
request_field='',
request_type_name=u'ContaineranalysisProvidersNotesOccurrencesListRequest',
response_type_name=u'ListNoteOccurrencesResponse',
supports_download=False,
)
class ProvidersNotesService(base_api.BaseApiService):
"""Service class for the providers_notes resource."""
_NAME = u'providers_notes'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProvidersNotesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a new `Note`.
Args:
request: (ContaineranalysisProvidersNotesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes',
http_method=u'POST',
method_id=u'containeranalysis.providers.notes.create',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'noteId', u'parent'],
relative_path=u'v1alpha1/{+name}/notes',
request_field=u'note',
request_type_name=u'ContaineranalysisProvidersNotesCreateRequest',
response_type_name=u'Note',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes the given `Note` from the system.
Args:
request: (ContaineranalysisProvidersNotesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}',
http_method=u'DELETE',
method_id=u'containeranalysis.providers.notes.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProvidersNotesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Returns the requested `Note`.
Args:
request: (ContaineranalysisProvidersNotesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}',
http_method=u'GET',
method_id=u'containeranalysis.providers.notes.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContaineranalysisProvidersNotesGetRequest',
response_type_name=u'Note',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a note or an `Occurrence` resource.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a note or occurrence, respectively.
Attempting to call this method on a resource without the required
permission will result in a `PERMISSION_DENIED` error. Attempting to call
this method on a non-existent resource will result in a `NOT_FOUND` error
if the user has list permission on the project, or a `PERMISSION_DENIED`
error otherwise. The resource takes the following formats:
`projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for occurrences and
projects/{PROJECT_ID}/notes/{NOTE_ID} for notes
Args:
request: (ContaineranalysisProvidersNotesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}:getIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.providers.notes.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'ContaineranalysisProvidersNotesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists all `Notes` for a given project.
Args:
request: (ContaineranalysisProvidersNotesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNotesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes',
http_method=u'GET',
method_id=u'containeranalysis.providers.notes.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken', u'parent'],
relative_path=u'v1alpha1/{+name}/notes',
request_field='',
request_type_name=u'ContaineranalysisProvidersNotesListRequest',
response_type_name=u'ListNotesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates an existing `Note`.
Args:
request: (ContaineranalysisProvidersNotesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Note) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}',
http_method=u'PATCH',
method_id=u'containeranalysis.providers.notes.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1alpha1/{+name}',
request_field=u'note',
request_type_name=u'ContaineranalysisProvidersNotesPatchRequest',
response_type_name=u'Note',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified `Note` or `Occurrence`.
Requires `containeranalysis.notes.setIamPolicy` or
`containeranalysis.occurrences.setIamPolicy` permission if the resource is
a `Note` or an `Occurrence`, respectively.
Attempting to call this method without these permissions will result in a `
`PERMISSION_DENIED` error.
Attempting to call this method on a non-existent resource will result in a
`NOT_FOUND` error if the user has `containeranalysis.notes.list` permission
on a `Note` or `containeranalysis.occurrences.list` on an `Occurrence`, or
a `PERMISSION_DENIED` error otherwise. The resource takes the following
formats: `projects/{projectid}/occurrences/{occurrenceid}` for occurrences
and projects/{projectid}/notes/{noteid} for notes
Args:
request: (ContaineranalysisProvidersNotesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}:setIamPolicy',
http_method=u'POST',
method_id=u'containeranalysis.providers.notes.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'ContaineranalysisProvidersNotesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns the permissions that a caller has on the specified note or.
occurrence resource. Requires list permission on the project (for example,
"storage.objects.list" on the containing bucket for testing permission of
an object). Attempting to call this method on a non-existent resource will
result in a `NOT_FOUND` error if the user has list permission on the
project, or a `PERMISSION_DENIED` error otherwise. The resource takes the
following formats: `projects/{PROJECT_ID}/occurrences/{OCCURRENCE_ID}` for
`Occurrences` and `projects/{PROJECT_ID}/notes/{NOTE_ID}` for `Notes`
Args:
request: (ContaineranalysisProvidersNotesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/providers/{providersId}/notes/{notesId}:testIamPermissions',
http_method=u'POST',
method_id=u'containeranalysis.providers.notes.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'ContaineranalysisProvidersNotesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProvidersService(base_api.BaseApiService):
"""Service class for the providers resource."""
_NAME = u'providers'
def __init__(self, client):
super(ContaineranalysisV1alpha1.ProvidersService, self).__init__(client)
self._upload_configs = {
}
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
857994885a60be47a6073d60ecb803208b9d6203 | a9e60d0e5b3b5062a81da96be2d9c748a96ffca7 | /configurations/i16-config/scripts/diffractometer/calc/Rotations.py | 0e9a9194d69dc76b3538e46850b2d5a1c0f31ba7 | [] | no_license | openGDA/gda-diamond | 3736718596f47607335ada470d06148d7b57526e | bbb64dcfd581c30eddb210c647db5b5864b59166 | refs/heads/master | 2023-08-16T08:01:11.075927 | 2023-08-15T16:01:52 | 2023-08-15T16:01:52 | 121,757,699 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | import java
import math
from math import *
#class Rotations(java.lang.Object):
# def __init__(self):
# self.rrrr=0.0
def R_x_l(alpha):
"@sig public double R_x_l(double alpha)"
# phi=phi*pi/180.0
ALPHA=[[1.0, 0.0, 0.0],[0.0, cos(alpha), sin(alpha)], [0.0, -sin(alpha), cos(alpha)]]
return ALPHA
def R_x_r(alpha):
"@sig public double R_x_r(double alpha)"
# phi=phi*pi/180.0
ALPHA=[[1.0, 0.0, 0.0],[0.0, cos(alpha),-sin(alpha)], [0.0, sin(alpha), cos(alpha)]]
return ALPHA
def R_y_r(alpha):
"@sig public double R_y_r(double alpha)"
ALPHA=[[cos(alpha),0.0, sin(alpha)],[0.0, 1.0, 0.0], [-sin(alpha), 0.0, cos(alpha)]]
return ALPHA
def R_y_l(alpha):
"@sig public double R_z_l(double alpha)"
ALPHA=[[cos(alpha),0.0, -sin(alpha)],[0.0, 1.0, 0.0], [sin(alpha), 0.0, cos(alpha)]]
return ALPHA
def R_z_l(alpha):
"@sig public double R_z_l(double alpha)"
ALPHA=[[cos(alpha),sin(alpha), 0.0],[-sin(alpha), cos(alpha), 0.0], [0.0, 0.0, 1.0]]
return ALPHA
def R_z_r(alpha):
"@sig public double R_z_r(double alpha)"
ALPHA=[[cos(alpha),-sin(alpha), 0.0],[sin(alpha), cos(alpha), 0.0], [0.0, 0.0, 1.0]]
return ALPHA
| [
"matthew.webber@diamond.ac.uk"
] | matthew.webber@diamond.ac.uk |
10e9d60fe572976b31b043e91b1f37ebfe91e3cc | 021dcf39f7cfb303ff427d7344026004f9d4cfdd | /bookit/users/models.py | d0d670ddce49d8602444e4a6ff2ca76b0307716c | [
"MIT"
] | permissive | kamranhossain/bookit | dfaca266b93e0ee8a50e88a2a7702a6f5ece35f1 | 4189a0ed620d7a595de2c113bb3a2d435d66d5f0 | refs/heads/master | 2021-05-11T23:36:00.630917 | 2017-08-16T20:30:33 | 2017-08-16T20:30:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
def __str__(self):
return self.username
def name_or_username(self):
return self.name or self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| [
"aniruddha@adhikary.net"
] | aniruddha@adhikary.net |
7845b036d7efd63ae70213db83a48143f7ba9111 | 07e750cb558de03104f7c033286329b69bbdfc23 | /Chapter2_TimeFrequency_ShortTime/enframe.py | 73b2105d3c4ebfbe12af01200251ac78b583cdb7 | [
"MIT"
] | permissive | BarryZM/Python_Speech_SZY | d1e2fb6eadd6b10cfa5a65a622ae73f578d2f6d1 | 0074ad1d519387a75d5eca42c77f4d6966eb0a0e | refs/heads/master | 2023-03-09T21:47:54.465905 | 2021-03-02T17:21:32 | 2021-03-02T17:21:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | # 分帧
from scipy.io import wavfile
import numpy as np
import matplotlib.pyplot as plt
def enframe(x, win, inc=None):
nx = len(x)
if isinstance(win, np.ndarray):
nwin = len(win)
nlen = nwin # 帧长=窗长
elif isinstance(win, int):
nwin = 1
nlen = win # 设置为帧长
if inc is None:
inc = nlen
nf = (nx - nlen + inc) // inc # 计算帧数
frameout = np.zeros((nf, nlen)) # 初始化
indf = np.multiply(inc, np.array([i for i in range(nf)])) # 设置每帧在x中的位移量位置
for i in range(nf):
frameout[i, :] = x[indf[i]:indf[i] + nlen] # 分帧
if isinstance(win, np.ndarray):
frameout = np.multiply(frameout, np.array(win)) # 每帧乘以窗函数的值
return frameout
if __name__ == '__main__':
fs, data = wavfile.read('bluesky3.wav')
inc = 100
wlen = 200
en = enframe(data, wlen, inc)
i = input('Start frame(i):')
i = int(i)
tlabel = i
plt.figure(figsize=(15, 20))
plt.subplot(4, 1, 1)
x = [i for i in range((tlabel - 1) * inc, (tlabel - 1) * inc + wlen)]
plt.plot(x, en[tlabel, :])
plt.xlim([(i - 1) * inc + 1, (i + 2) * inc + wlen])
plt.title('(a)The {} Frame Waveform'.format(tlabel))
plt.subplot(4, 1, 2)
x = [i for i in range((tlabel + 1 - 1) * inc, (tlabel + 1 - 1) * inc + wlen)]
plt.plot(x, en[i + 1, :])
plt.xlim([(i - 1) * inc + 1, (i + 2) * inc + wlen])
plt.title('(b)The {} Frame Waveform'.format(tlabel + 1))
plt.subplot(4, 1, 3)
x = [i for i in range((tlabel + 2 - 1) * inc, (tlabel + 2 - 1) * inc + wlen)]
plt.plot(x, en[i + 2, :])
plt.xlim([(i - 1) * inc + 1, (i + 2) * inc + wlen])
plt.title('(c)The {} Frame Waveform'.format(tlabel + 2))
plt.subplot(4, 1, 4)
x = [i for i in range((tlabel + 3 - 1) * inc, (tlabel + 3 - 1) * inc + wlen)]
plt.plot(x, en[i + 3, :])
plt.xlim([(i - 1) * inc + 1, (i + 2) * inc + wlen])
plt.title('(d)The {} Frame Waveform'.format(tlabel + 3))
plt.savefig('images/enframe.png')
plt.show()
plt.close()
| [
"you@example.com"
] | you@example.com |
bada7cdd5041895620cc70ad02fc19f09856c9f5 | b25e66b3588bad601cc7b0fb5cf0ab5a015e2812 | /python_td6/colors.py | 50a35127da411b509486472c51a09eafaf335291 | [] | no_license | bentoonsmurf/bi423 | 93f4eb7ce8c4f73fab4bed0ece34113b4bbc757e | ca3aa7b6028b4ae932819e5a8d4e962e23a2aa90 | refs/heads/master | 2021-01-23T08:09:35.807316 | 2017-04-25T14:32:30 | 2017-04-25T14:32:30 | 80,526,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | def color(text, **user_styles):
styles = {
# styles
'reset': '\033[0m',
'bold': '\033[01m',
'disabled': '\033[02m',
'underline': '\033[04m',
'reverse': '\033[07m',
'strike_through': '\033[09m',
'invisible': '\033[08m',
# text colors
'fg_black': '\033[30m',
'fg_red': '\033[31m',
'fg_green': '\033[32m',
'fg_orange': '\033[33m',
'fg_blue': '\033[34m',
'fg_purple': '\033[35m',
'fg_cyan': '\033[36m',
'fg_light_grey': '\033[37m',
'fg_dark_grey': '\033[90m',
'fg_light_red': '\033[91m',
'fg_light_green': '\033[92m',
'fg_yellow': '\033[93m',
'fg_light_blue': '\033[94m',
'fg_pink': '\033[95m',
'fg_light_cyan': '\033[96m',
# background colors
'bg_black': '\033[40m',
'bg_red': '\033[41m',
'bg_green': '\033[42m',
'bg_orange': '\033[43m',
'bg_blue': '\033[44m',
'bg_purple': '\033[45m',
'bg_cyan': '\033[46m',
'bg_light_grey': '\033[47m'
}
color_text = ''
for style in user_styles:
try:
color_text += styles[style]
except KeyError:
raise KeyError('def color: parameter `{}` does not exist'.format(style))
color_text += text
return '\033[0m{}\033[0m'.format(color_text)
def error(text):
return color(text, bold=True, fg_red=True)
def warning(text):
return color(text, bold=True, fg_orange=True)
def success(text):
return color(text, bold=True, fg_green=True)
def ao(text):
return color(text, bold=True, fg_blue=True)
| [
"you@example.com"
] | you@example.com |
f21f81beb5eb2e2a0a9ad5f5247d386a4eadad56 | f3b233e5053e28fa95c549017bd75a30456eb50c | /tyk2_input/42/42-50_MD_NVT_rerun/set_1ns_equi_1.py | c1682e2c05fa1d504664d4e98fb47ce4b66a087c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | import os
dir = '/mnt/scratch/songlin3/run/tyk2/L42/MD_NVT_rerun/ti_one-step/42_50/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1.in'
temp_pbs = filesdir + 'temp_1ns_equi_1.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../42-50_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
61f00b19422267995d747b0f748d6fe5b85b1257 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /Validation/RecoParticleFlow/python/caloTauBenchmarkGeneric_cfi.py | cb53cbdcfc487688393622fee092cd1df46a79bb | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 762 | py | import FWCore.ParameterSet.Config as cms
#'tauGenJets'
caloTauBenchmarkGeneric = cms.EDAnalyzer("GenericBenchmarkAnalyzer",
OutputFile = cms.untracked.string('benchmark.root'),
InputTruthLabel = cms.InputTag(''),
minEta = cms.double(-1),
maxEta = cms.double(2.8),
recPt = cms.double(10.0),
deltaRMax = cms.double(0.3),
StartFromGen = cms.bool(True),
PlotAgainstRecoQuantities = cms.bool(False),
OnlyTwoJets = cms.bool(False),
BenchmarkLabel = cms.string( 'CaloTaus' ),
InputRecoLabel = cms.InputTag(''),
minDeltaEt = cms.double(-100.),
maxDeltaEt = cms.double(50.),
minDeltaPhi = cms.double(-0.5),
maxDeltaPhi = cms.double(0.5),
doMetPlots = cms.bool(False)
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
7202984ea6bdd4793307700d6d3f9ba888bb1692 | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/interviewbit/arrays/simple_queries/simple_queries.py | e3d4eaeabebf74094a6312db15aea1592f2a2ba7 | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 3,221 | py | '''
https://www.interviewbit.com/problems/simple-queries/
Simple Queries
You are given an array A having N integers.
You have to perform the following steps in a given order.
1. generate all subarrays of A.
2. take the maximum element from each subarray of A and insert it into a new array G.
3. replace every element of G with the product of their divisors mod 1e9 + 7.
4. sort G in descending order
5. perform Q queries
In each query, you are given an integer K, where you have to find the Kth element in G.
Note: Your solution will run on multiple test cases so do clear global variables after using them.
Input Format
The first argument given is an Array A, having N integers.
The second argument given is an Array B, where B[i] is the ith query.
Output Format
Return an Array X, where X[i] will have the answer for the ith query.
Constraints
1 <= N <= 1e5
1 <= A[i] <= 1e5
1 <= Q <= 1e5
1 <= k <= (N * (N + 1))/2
For Example
Input:
A = [1, 2, 4]
B = [1, 2, 3, 4, 5, 6]
Output:
X = [8, 8, 8, 2, 2, 1]
Explanation:
subarrays of A maximum element
------------------------------------
1. [1] 1
2. [1, 2] 2
3. [1, 2, 4] 4
4. [2] 2
5. [2, 4] 4
6. [4] 4
original
G = [1, 2, 4, 2, 4, 4]
after changing every element of G with product of their divisors
G = [1, 2, 8, 2, 8, 8]
after sorting G in descending order
G = [8, 8, 8, 2, 2, 1]
'''
import math
class Solution:
def simple_queries(self, A, B):
# Calculate x**y using binary exponentiation in O(logn) time
def power(x, y) :
res = 1
while y > 0:
if (y & 1 == 1):
res = (res * x) % 1000000007
y = (y >> 1) % 1000000007
x = (x * x) % 1000000007
return res
# return product of its divisors % 1e9+7
def product_of_divisors(subarray_maximum):
if not product_cache.has_key(subarray_maximum):
# product of divisors of a number can be written as N ^ D/2,
# where N is number and D is number of divisors of N.
# Count number of divisors -- GeeksforGeeks
num_d = 0
i = 1
while i * i <= subarray_maximum :
if (subarray_maximum % i == 0) :
# If factors are equal,
# count only once
if (subarray_maximum / i == i) :
num_d = num_d + 1
# Otherwise count both
else :
num_d = num_d + 2
i = i + 1
# Calculate product of divisors
prod = power(subarray_maximum, num_d/2)
# if num_d is odd, we need to multiply prod by sqrt(subarray_maximum)
# for eg,
# a^5/2 = a^2 * sqrt(a)
if (num_d & 1) == 1:
prod = (prod * (int)(math.sqrt(subarray_maximum))) % 1000000007
product_cache[subarray_maximum] = prod
return product_cache[subarray_maximum]
product_cache = {}
n = len(A)
G = []
for i in xrange(n):
subarray_maximum = A[i]
for j in xrange(i, n):
subarray_maximum = max(subarray_maximum, A[j])
G.append(product_of_divisors(subarray_maximum))
G.sort(reverse=True)
query_answers = []
for query in B:
query_answers.append(G[query-1])
return query_answers
if __name__ == '__main__':
s = Solution()
assert s.simple_queries([1,2,4], [1,2,3,4,5,6]) == [8,8,8,2,2,1]
| [
"vinithepooh@gmail.com"
] | vinithepooh@gmail.com |
22b7fb80baf0b76de06030f5189cd3e98926630e | ad0857eaba945c75e705594a53c40dbdd40467fe | /leetCode/three_sum.py | e8e07df23fe4316b0c6ff752ff301e5f14a276a6 | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 27,019 | py | # https://leetcode.com/problems/3sum/description/
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return []
elif len(nums) < 3:
return []
nums = sorted(nums)
nums_map = {}
for n in nums:
if n in nums_map:
nums_map[n] += 1
else:
nums_map[n] = 1
if len(nums_map) == 1 and 0 in nums_map:
return [[0, 0, 0]]
r = []
before = nums[-1]
for first in range(len(nums)-2):
if nums[first] > 0:
break
if before == nums[first] and before != 0:
continue
for second in range(first+1, len(nums)-1):
if nums[first] + nums[second] > 0:
break
nums_map[nums[first]] -= 1
nums_map[nums[second]] -= 1
print(nums_map)
if (0 - (nums[first] + nums[second])) in nums_map and nums_map[0 - (nums[first] + nums[second])] > 0 and nums[second] <= (0 - (nums[first] + nums[second])):
if len(r) == 0:
r.append([nums[first], nums[second], 0 - (nums[first] + nums[second])])
elif r[-1] != [nums[first], nums[second], 0 - (nums[first] + nums[second])]:
r.append([nums[first], nums[second], 0 - (nums[first] + nums[second])])
nums_map[nums[first]] += 1
nums_map[nums[second]] += 1
before = nums[first]
return r
if __name__ == "__main__":
solution = Solution()
# print(solution.threeSum([82597,-9243,62390,83030,-97960,-26521,-61011,83390,-38677,12333,75987,46091,83794,19355,-71037,-6242,-28801,324,1202,-90885,-2989,-95597,-34333,35528,5680,89093,-90606,50360,-29393,-27012,53313,65213,99818,-82405,-41661,-3333,-51952,72135,-1523,26377,74685,96992,92263,15929,5467,-99555,-43348,-41689,-60383,-3990,32165,65265,-72973,-58372,12741,-48568,-46596,72419,-1859,34153,62937,81310,-61823,-96770,-54944,8845,-91184,24208,-29078,31495,65258,14198,85395,70506,-40908,56740,-12228,-40072,32429,93001,68445,-73927,25731,-91859,-24150,10093,-60271,-81683,-18126,51055,48189,-6468,25057,81194,-58628,74042,66158,-14452,-49851,-43667,11092,39189,-17025,-79173,13606,83172,92647,-59741,19343,-26644,-57607,82908,-20655,1637,80060,98994,39331,-31274,-61523,91225,-72953,13211,-75116,-98421,-41571,-69074,99587,39345,42151,-2460,98236,15690,-52507,-95803,-48935,-46492,-45606,-79254,-99851,52533,73486,39948,-7240,71815,-585,-96252,90990,-93815,93340,-71848,58733,-14859,-83082,-75794,-82082,-24871,-15206,91207,-56469,-93618,67131,-8682,75719,87429,-98757,-7535,-24890,-94160,85003,33928,75538,97456,-66424,-60074,-8527,-28697,-22308,2246,-70134,-82319,-10184,87081,-34949,-28645,-47352,-83966,-60418,-15293,-53067,-25921,55172,75064,95859,48049,34311,-86931,-38586,33686,-36714,96922,76713,-22165,-80585,-34503,-44516,39217,-28457,47227,-94036,43457,24626,-87359,26898,-70819,30528,-32397,-69486,84912,-1187,-98986,-32958,4280,-79129,-65604,9344,58964,50584,71128,-55480,24986,15086,-62360,-42977,-49482,-77256,-36895,-74818,20,3063,-49426,28152,-97329,6086,86035,-88743,35241,44249,19927,-10660,89404,24179,-26621,-6511,57745,-28750,96340,-97160,-97822,-49979,52307,79462,94273,-24808,77104,9255,-83057,77655,21361,55956,-9096,48599,-40490,-55107,2689,29608,20497,66834,-34678,23553,-81400,-66630,-96321,-34499,-12957,-20564,25610,-4322,-58462,20801,53700,71527,24669,-54534,57879,-3221,33636,3900,97832,-27688,-98715,5992,24520,-55401,-57613,-69926,57377,-77610,20123,52174,860,60429,-91994,-62403,-6218,-90610,-37263,-15052,62069,-96465,44254,89892,-3406,19121,-41842,-87783,-64125,-56120,73904,-22797,-58118,-4866,5356,75318,46119,21276,-19246,-9241,-97425,57333,-15802,93149,25689,-5532,95716,39209,-87672,-29470,-16324,-15331,27632,-39454,56530,-16000,29853,46475,78242,-46602,83192,-73440,-15816,50964,-36601,89758,38375,-40007,-36675,-94030,67576,46811,-64919,45595,76530,40398,35845,41791,67697,-30439,-82944,63115,33447,-36046,-50122,-34789,43003,-78947,-38763,-89210,32756,-20389,-31358,-90526,-81607,88741,86643,98422,47389,-75189,13091,95993,-15501,94260,-25584,-1483,-67261,-70753,25160,89614,-90620,-48542,83889,-12388,-9642,-37043,-67663,28794,-8801,13621,12241,55379,84290,21692,-95906,-85617,-17341,-63767,80183,-4942,-51478,30997,-13658,8838,17452,-82869,-39897,68449,31964,98158,-49489,62283,-62209,-92792,-59342,55146,-38533,20496,62667,62593,36095,-12470,5453,-50451,74716,-17902,3302,-16760,-71642,-34819,96459,-72860,21638,47342,-69897,-40180,44466,76496,84659,13848,-91600,-90887,-63742,-2156,-84981,-99280,94326,-33854,92029,-50811,98711,-36459,-75555,79110,-88164,-97397,-84217,97457,64387,30513,-53190,-83215,252,2344,-27177,-92945,-89010,82662,-11670,86069,53417,42702,97082,3695,-14530,-46334,17910,77999,28009,-12374,15498,-46941,97088,-35030,95040,92095,-59469,-24761,46491,67357,-66658,37446,-65130,-50416,99197,30925,27308,54122,-44719,12582,-99525,-38446,-69050,-22352,94757,-56062,33684,-40199,-46399,96842,-50881,-22380,-65021,40582,53623,-76034,77018,-97074,-84838,-22953,-74205,79715,-33920,-35794,-91369,73421,-82492,63680,-14915,-33295,37145,76852,-69442,60125,-74166,74308,-1900,-30195,-16267,-60781,-27760,5852,38917,25742,-3765,49097,-63541,98612,-92865,-30248,9612,-8798,53262,95781,-42278,-36529,7252,-27394,-5021,59178,80934,-48480,-75131,-54439,-19145,-48140,98457,-6601,-51616,-89730,78028,32083,-48904,16822,-81153,-8832,48720,-80728,-45133,-86647,-4259,-40453,2590,28613,50523,-4105,-27790,-74579,-17223,63721,33489,-47921,97628,-97691,-14782,-65644,18008,-93651,-71266,80990,-76732,-47104,35368,28632,59818,-86269,-89753,34557,-92230,-5933,-3487,-73557,-13174,-43981,-43630,-55171,30254,-83710,-99583,-13500,71787,5017,-25117,-78586,86941,-3251,-23867,-36315,75973,86272,-45575,77462,-98836,-10859,70168,-32971,-38739,-12761,93410,14014,-30706,-77356,-85965,-62316,63918,-59914,-64088,1591,-10957,38004,15129,-83602,-51791,34381,-89382,-26056,8942,5465,71458,-73805,-87445,-19921,-80784,69150,-34168,28301,-68955,18041,6059,82342,9947,39795,44047,-57313,48569,81936,-2863,-80932,32976,-86454,-84207,33033,32867,9104,-16580,-25727,80157,-70169,53741,86522,84651,68480,84018,61932,7332,-61322,-69663,76370,41206,12326,-34689,17016,82975,-23386,39417,72793,44774,-96259,3213,79952,29265,-61492,-49337,14162,65886,3342,-41622,-62659,-90402,-24751,88511,54739,-21383,-40161,-96610,-24944,-602,-76842,-21856,69964,43994,-15121,-85530,12718,13170,-13547,69222,62417,-75305,-81446,-38786,-52075,-23110,97681,-82800,-53178,11474,35857,94197,-58148,-23689,32506,92154,-64536,-73930,-77138,97446,-83459,70963,22452,68472,-3728,-25059,-49405,95129,-6167,12808,99918,30113,-12641,-26665,86362,-33505,50661,26714,33701,89012,-91540,40517,-12716,-57185,-87230,29914,-59560,13200,-72723,58272,23913,-45586,-96593,-26265,-2141,31087,81399,92511,-34049,20577,2803,26003,8940,42117,40887,-82715,38269,40969,-50022,72088,21291,-67280,-16523,90535,18669,94342,-39568,-88080,-99486,-20716,23108,-28037,63342,36863,-29420,-44016,75135,73415,16059,-4899,86893,43136,-7041,33483,-67612,25327,40830,6184,61805,4247,81119,-22854,-26104,-63466,63093,-63685,60369,51023,51644,-16350,74438,-83514,99083,10079,-58451,-79621,48471,67131,-86940,99093,11855,-22272,-67683,-44371,9541,18123,37766,-70922,80385,-57513,-76021,-47890,36154,72935,84387,-92681,-88303,-7810,59902,-90,-64704,-28396,-66403,8860,13343,33882,85680,7228,28160,-14003,54369,-58893,92606,-63492,-10101,64714,58486,29948,-44679,-22763,10151,-56695,4031,-18242,-36232,86168,-14263,9883,47124,47271,92761,-24958,-73263,-79661,-69147,-18874,29546,-92588,-85771,26451,-86650,-43306,-59094,-47492,-34821,-91763,-47670,33537,22843,67417,-759,92159,63075,94065,-26988,55276,65903,30414,-67129,-99508,-83092,-91493,-50426,14349,-83216,-76090,32742,-5306,-93310,-60750,-60620,-45484,-21108,-58341,-28048,-52803,69735,78906,81649,32565,-86804,-83202,-65688,-1760,89707,93322,-72750,84134,71900,-37720,19450,-78018,22001,-23604,26276,-21498,65892,-72117,-89834,-23867,55817,-77963,42518,93123,-83916,63260,-2243,-97108,85442,-36775,17984,-58810,99664,-19082,93075,-69329,87061,79713,16296,70996,13483,-74582,49900,-27669,-40562,1209,-20572,34660,83193,75579,7344,64925,88361,60969,3114,44611,-27445,53049,-16085,-92851,-53306,13859,-33532,86622,-75666,-18159,-98256,51875,-42251,-27977,-18080,23772,38160,41779,9147,94175,99905,-85755,62535,-88412,-52038,-68171,93255,-44684,-11242,-104,31796,62346,-54931,-55790,-70032,46221,56541,-91947,90592,93503,4071,20646,4856,-63598,15396,-50708,32138,-85164,38528,-89959,53852,57915,-42421,-88916,-75072,67030,-29066,49542,-71591,61708,-53985,-43051,28483,46991,-83216,80991,-46254,-48716,39356,-8270,-47763,-34410,874,-1186,-7049,28846,11276,21960,-13304,-11433,-4913,55754,79616,70423,-27523,64803,49277,14906,-97401,-92390,91075,70736,21971,-3303,55333,-93996,76538,54603,-75899,98801,46887,35041,48302,-52318,55439,24574,14079,-24889,83440,14961,34312,-89260,-22293,-81271,-2586,-71059,-10640,-93095,-5453,-70041,66543,74012,-11662,-52477,-37597,-70919,92971,-17452,-67306,-80418,7225,-89296,24296,86547,37154,-10696,74436,-63959,58860,33590,-88925,-97814,-83664,85484,-8385,-50879,57729,-74728,-87852,-15524,-91120,22062,28134,80917,32026,49707,-54252,-44319,-35139,13777,44660,85274,25043,58781,-89035,-76274,6364,-63625,72855,43242,-35033,12820,-27460,77372,-47578,-61162,-70758,-1343,-4159,64935,56024,-2151,43770,19758,-30186,-86040,24666,-62332,-67542,73180,-25821,-27826,-45504,-36858,-12041,20017,-24066,-56625,-52097,-47239,-90694,8959,7712,-14258,-5860,55349,61808,-4423,-93703,64681,-98641,-25222,46999,-83831,-54714,19997,-68477,66073,51801,-66491,52061,-52866,79907,-39736,-68331,68937,91464,98892,910,93501,31295,-85873,27036,-57340,50412,21,-2445,29471,71317,82093,-94823,-54458,-97410,39560,-7628,66452,39701,54029,37906,46773,58296,60370,-61090,85501,-86874,71443,-72702,-72047,14848,34102,77975,-66294,-36576,31349,52493,-70833,-80287,94435,39745,-98291,84524,-18942,10236,93448,50846,94023,-6939,47999,14740,30165,81048,84935,-19177,-13594,32289,62628,-90612,-542,-66627,64255,71199,-83841,-82943,-73885,8623,-67214,-9474,-35249,62254,-14087,-90969,21515,-83303,94377,-91619,19956,-98810,96727,-91939,29119,-85473,-82153,-69008,44850,74299,-76459,-86464,8315,-49912,-28665,59052,-69708,76024,-92738,50098,18683,-91438,18096,-19335,35659,91826,15779,-73070,67873,-12458,-71440,-46721,54856,97212,-81875,35805,36952,68498,81627,-34231,81712,27100,-9741,-82612,18766,-36392,2759,41728,69743,26825,48355,-17790,17165,56558,3295,-24375,55669,-16109,24079,73414,48990,-11931,-78214,90745,19878,35673,-15317,-89086,94675,-92513,88410,-93248,-19475,-74041,-19165,32329,-26266,-46828,-18747,45328,8990,-78219,-25874,-74801,-44956,-54577,-29756,-99822,-35731,-18348,-68915,-83518,-53451,95471,-2954,-13706,-8763,-21642,-37210,16814,-60070,-42743,27697,-36333,-42362,11576,85742,-82536,68767,-56103,-63012,71396,-78464,-68101,-15917,-11113,-3596,77626,-60191,-30585,-73584,6214,-84303,18403,23618,-15619,-89755,-59515,-59103,-74308,-63725,-29364,-52376,-96130,70894,-12609,50845,-2314,42264,-70825,64481,55752,4460,-68603,-88701,4713,-50441,-51333,-77907,97412,-66616,-49430,60489,-85262,-97621,-18980,44727,-69321,-57730,66287,-92566,-64427,-14270,11515,-92612,-87645,61557,24197,-81923,-39831,-10301,-23640,-76219,-68025,92761,-76493,68554,-77734,-95620,-11753,-51700,98234,-68544,-61838,29467,46603,-18221,-35441,74537,40327,-58293,75755,-57301,-7532,-94163,18179,-14388,-22258,-46417,-48285,18242,-77551,82620,250,-20060,-79568,-77259,82052,-98897,-75464,48773,-79040,-11293,45941,-67876,-69204,-46477,-46107,792,60546,-34573,-12879,-94562,20356,-48004,-62429,96242,40594,2099,99494,25724,-39394,-2388,-18563,-56510,-83570,-29214,3015,74454,74197,76678,-46597,60630,-76093,37578,-82045,-24077,62082,-87787,-74936,58687,12200,-98952,70155,-77370,21710,-84625,-60556,-84128,925,65474,-15741,-94619,88377,89334,44749,22002,-45750,-93081,-14600,-83447,46691,85040,-66447,-80085,56308,44310,24979,-29694,57991,4675,-71273,-44508,13615,-54710,23552,-78253,-34637,50497,68706,81543,-88408,-21405,6001,-33834,-21570,-46692,-25344,20310,71258,-97680,11721,59977,59247,-48949,98955,-50276,-80844,-27935,-76102,55858,-33492,40680,66691,-33188,8284,64893,-7528,6019,-85523,8434,-64366,-56663,26862,30008,-7611,-12179,-70076,21426,-11261,-36864,-61937,-59677,929,-21052,3848,-20888,-16065,98995,-32293,-86121,-54564,77831,68602,74977,31658,40699,29755,98424,80358,-69337,26339,13213,-46016,-18331,64713,-46883,-58451,-70024,-92393,-4088,70628,-51185,71164,-75791,-1636,-29102,-16929,-87650,-84589,-24229,-42137,-15653,94825,13042,88499,-47100,-90358,-7180,29754,-65727,-42659,-85560,-9037,-52459,20997,-47425,17318,21122,20472,-23037,65216,-63625,-7877,-91907,24100,-72516,22903,-85247,-8938,73878,54953,87480,-31466,-99524,35369,-78376,89984,-15982,94045,-7269,23319,-80456,-37653,-76756,2909,81936,54958,-12393,60560,-84664,-82413,66941,-26573,-97532,64460,18593,-85789,-38820,-92575,-43663,-89435,83272,-50585,13616,-71541,-53156,727,-27644,16538,34049,57745,34348,35009,16634,-18791,23271,-63844,95817,21781,16590,59669,15966,-6864,48050,-36143,97427,-59390,96931,78939,-1958,50777,43338,-51149,39235,-27054,-43492,67457,-83616,37179,10390,85818,2391,73635,87579,-49127,-81264,-79023,-81590,53554,-74972,-83940,-13726,-39095,29174,78072,76104,47778,25797,-29515,-6493,-92793,22481,-36197,-65560,42342,15750,97556,99634,-56048,-35688,13501,63969,-74291,50911,39225,93702,-3490,-59461,-30105,-46761,-80113,92906,-68487,50742,36152,-90240,-83631,24597,-50566,-15477,18470,77038,40223,-80364,-98676,70957,-63647,99537,13041,31679,86631,37633,-16866,13686,-71565,21652,-46053,-80578,-61382,68487,-6417,4656,20811,67013,-30868,-11219,46,74944,14627,56965,42275,-52480,52162,-84883,-52579,-90331,92792,42184,-73422,-58440,65308,-25069,5475,-57996,59557,-17561,2826,-56939,14996,-94855,-53707,99159,43645,-67719,-1331,21412,41704,31612,32622,1919,-69333,-69828,22422,-78842,57896,-17363,27979,-76897,35008,46482,-75289,65799,20057,7170,41326,-76069,90840,-81253,-50749,3649,-42315,45238,-33924,62101,96906,58884,-7617,-28689,-66578,62458,50876,-57553,6739,41014,-64040,-34916,37940,13048,-97478,-11318,-89440,-31933,-40357,-59737,-76718,-14104,-31774,28001,4103,41702,-25120,-31654,63085,-3642,84870,-83896,-76422,-61520,12900,88678,85547,33132,-88627,52820,63915,-27472,78867,-51439,33005,-23447,-3271,-39308,39726,-74260,-31874,-36893,93656,910,-98362,60450,-88048,99308,13947,83996,-90415,-35117,70858,-55332,-31721,97528,82982,-86218,6822,25227,36946,97077,-4257,-41526,56795,89870,75860,-70802,21779,14184,-16511,-89156,-31422,71470,69600,-78498,74079,-19410,40311,28501,26397,-67574,-32518,68510,38615,19355,-6088,-97159,-29255,-92523,3023,-42536,-88681,64255,41206,44119,52208,39522,-52108,91276,-70514,83436,63289,-79741,9623,99559,12642,85950,83735,-21156,-67208,98088,-7341,-27763,-30048,-44099,-14866,-45504,-91704,19369,13700,10481,-49344,-85686,33994,19672,36028,60842,66564,-24919,33950,-93616,-47430,-35391,-28279,56806,74690,39284,-96683,-7642,-75232,37657,-14531,-86870,-9274,-26173,98640,88652,64257,46457,37814,-19370,9337,-22556,-41525,39105,-28719,51611,-93252,98044,-90996,21710,-47605,-64259,-32727,53611,-31918,-3555,33316,-66472,21274,-37731,-2919,15016,48779,-88868,1897,41728,46344,-89667,37848,68092,-44011,85354,-43776,38739,-31423,-66330,65167,-22016,59405,34328,-60042,87660,-67698,-59174,-1408,-46809,-43485,-88807,-60489,13974,22319,55836,-62995,-37375,-4185,32687,-36551,-75237,58280,26942,-73756,71756,78775,-40573,14367,-71622,-77338,24112,23414,-7679,-51721,87492,85066,-21612,57045,10673,-96836,52461,-62218,-9310,65862,-22748,89906,-96987,-98698,26956,-43428,46141,47456,28095,55952,67323,-36455,-60202,-43302,-82932,42020,77036,10142,60406,70331,63836,58850,-66752,52109,21395,-10238,-98647,-41962,27778,69060,98535,-28680,-52263,-56679,66103,-42426,27203,80021,10153,58678,36398,63112,34911,20515,62082,-15659,-40785,27054,43767,-20289,65838,-6954,-60228,-72226,52236,-35464,25209,-15462,-79617,-41668,-84083,62404,-69062,18913,46545,20757,13805,24717,-18461,-47009,-25779,68834,64824,34473,39576,31570,14861,-15114,-41233,95509,68232,67846,84902,-83060,17642,-18422,73688,77671,-26930,64484,-99637,73875,6428,21034,-73471,19664,-68031,15922,-27028,48137,54955,-82793,-41144,-10218,-24921,-28299,-2288,68518,-54452,15686,-41814,66165,-72207,-61986,80020,50544,-99500,16244,78998,40989,14525,-56061,-24692,-94790,21111,37296,-90794,72100,70550,-31757,17708,-74290,61910,78039,-78629,-25033,73172,-91953,10052,64502,99585,-1741,90324,-73723,68942,28149,30218,24422,16659,10710,-62594,94249,96588,46192,34251,73500,-65995,-81168,41412,-98724,-63710,-54696,-52407,19746,45869,27821,-94866,-76705,-13417,-61995,-71560,43450,67384,-8838,-80293,-28937,23330,-89694,-40586,46918,80429,-5475,78013,25309,-34162,37236,-77577,86744,26281,-29033,-91813,35347,13033,-13631,-24459,3325,-71078,-75359,81311,19700,47678,-74680,-84113,45192,35502,37675,19553,76522,-51098,-18211,89717,4508,-82946,27749,85995,89912,-53678,-64727,-14778,32075,-63412,-40524,86440,-2707,-36821,63850,-30883,67294,-99468,-23708,34932,34386,98899,29239,-23385,5897,54882,98660,49098,70275,17718,88533,52161,63340,50061,-89457,19491,-99156,24873,-17008,64610,-55543,50495,17056,-10400,-56678,-29073,-42960,-76418,98562,-88104,-96255,10159,-90724,54011,12052,45871,-90933,-69420,67039,37202,78051,-52197,-40278,-58425,65414,-23394,-1415,6912,-53447,7352,17307,-78147,63727,98905,55412,-57658,-32884,-44878,22755,39730,3638,35111,39777,74193,38736,-11829,-61188,-92757,55946,-71232,-63032,-83947,39147,-96684,-99233,25131,-32197,24406,-55428,-61941,25874,-69453,64483,-19644,-68441,12783,87338,-48676,66451,-447,-61590,50932,-11270,29035,65698,-63544,10029,80499,-9461,86368,91365,-81810,-71914,-52056,-13782,44240,-30093,-2437,24007,67581,-17365,-69164,-8420,-69289,-29370,48010,90439,13141,69243,50668,39328,61731,78266,-81313,17921,-38196,55261,9948,-24970,75712,-72106,28696,7461,31621,61047,51476,56512,11839,-96916,-82739,28924,-99927,58449,37280,69357,11219,-32119,-62050,-48745,-83486,-52376,42668,82659,68882,38773,46269,-96005,97630,25009,-2951,-67811,99801,81587,-79793,-18547,-83086,69512,33127,-92145,-88497,47703,59527,1909,88785,-88882,69188,-46131,-5589,-15086,36255,-53238,-33009,82664,53901,35939,-42946,-25571,33298,69291,53199,74746,-40127,-39050,91033,51717,-98048,87240,36172,65453,-94425,-63694,-30027,59004,88660,3649,-20267,-52565,-67321,34037,4320,91515,-56753,60115,27134,68617,-61395,-26503,-98929,-8849,-63318,10709,-16151,61905,-95785,5262,23670,-25277,90206,-19391,45735,37208,-31992,-92450,18516,-90452,-58870,-58602,93383,14333,17994,82411,-54126,-32576,35440,-60526,-78764,-25069,-9022,-394,92186,-38057,55328,-61569,67780,77169,19546,-92664,-94948,44484,-13439,83529,27518,-48333,72998,38342,-90553,-98578,-76906,81515,-16464,78439,92529,35225,-39968,-10130,-7845,-32245,-74955,-74996,67731,-13897,-82493,33407,93619,59560,-24404,-57553,19486,-45341,34098,-24978,-33612,79058,71847,76713,-95422,6421,-96075,-59130,-28976,-16922,-62203,69970,68331,21874,40551,89650,51908,58181,66480,-68177,34323,-3046,-49656,-59758,43564,-10960,-30796,15473,-20216,46085,-85355,41515,-30669,-87498,57711,56067,63199,-83805,62042,91213,-14606,4394,-562,74913,10406,96810,-61595,32564,31640,-9732,42058,98052,-7908,-72330,1558,-80301,34878,32900,3939,-8824,88316,20937,21566,-3218,-66080,-31620,86859,54289,90476,-42889,-15016,-18838,75456,30159,-67101,42328,-92703,85850,-5475,23470,-80806,68206,17764,88235,46421,-41578,74005,-81142,80545,20868,-1560,64017,83784,68863,-97516,-13016,-72223,79630,-55692,82255,88467,28007,-34686,-69049,-41677,88535,-8217,68060,-51280,28971,49088,49235,26905,-81117,-44888,40623,74337,-24662,97476,79542,-72082,-35093,98175,-61761,-68169,59697,-62542,-72965,59883,-64026,-37656,-92392,-12113,-73495,98258,68379,-21545,64607,-70957,-92254,-97460,-63436,-8853,-19357,-51965,-76582,12687,-49712,45413,-60043,33496,31539,-57347,41837,67280,-68813,52088,-13155,-86430,-15239,-45030,96041,18749,-23992,46048,35243,-79450,85425,-58524,88781,-39454,53073,-48864,-82289,39086,82540,-11555,25014,-5431,-39585,-89526,2705,31953,-81611,36985,-56022,68684,-27101,11422,64655,-26965,-63081,-13840,-91003,-78147,-8966,41488,1988,99021,-61575,-47060,65260,-23844,-21781,-91865,-19607,44808,2890,63692,-88663,-58272,15970,-65195,-45416,-48444,-78226,-65332,-24568,42833,-1806,-71595,80002,-52250,30952,48452,-90106,31015,-22073,62339,63318,78391,28699,77900,-4026,-76870,-45943,33665,9174,-84360,-22684,-16832,-67949,-38077,-38987,-32847,51443,-53580,-13505,9344,-92337,26585,70458,-52764,-67471,-68411,-1119,-2072,-93476,67981,40887,-89304,-12235,41488,1454,5355,-34855,-72080,24514,-58305,3340,34331,8731,77451,-64983,-57876,82874,62481,-32754,-39902,22451,-79095,-23904,78409,-7418,77916]))
print(solution.threeSum([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
| [
"yongsung.kang@gmail.com"
] | yongsung.kang@gmail.com |
95ddec165981568dae2b31d941760a612ec760ab | ab1c920583995f372748ff69d38a823edd9a06af | /shultais_courses/data_types/type_conversion/full_tank_cost.py | a3ff76560da0e4080668d6d3de29664a45c0667c | [] | no_license | adyadyat/pyprojects | 5e15f4e33892f9581b8ebe518b82806f0cd019dc | c8f79c4249c22eb9e3e19998d5b504153faae31f | refs/heads/master | 2022-11-12T16:59:17.482303 | 2020-07-04T09:08:18 | 2020-07-04T09:08:18 | 265,461,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import sys
# Получаем данные.
price = float(sys.argv[1])
full = float(sys.argv[2])
busy = float(sys.argv[3])
# Делаем финальный расчет.
amount = (full - busy) * price
# Выводим результат.
print(amount)
"""
СТОИМОСТЬ ПОЛНОГО БАКА
Начинающий разработчик написал программу для расчета стоимости заправки автомобиля до полного бака. Программа принимает три параметра: цену 1 литра бензина, полный объем бензобака и объем уже залитого топлива в бак.
Однако программа не работает как было запланировано и выводит неверные данные
Исправьте все ошибки в коде.
Пример использования:
> python program.py 45.2 50 11.7
> 1731.16
""" | [
"omorbekov.a@gmail.com"
] | omorbekov.a@gmail.com |
79a9f6f9308b9bdc741eb0617b42712e08bbd3a2 | 98a86ec3182e9eef7e21db734118f15f2514cd5c | /python/pygl/test1.py | 63b6d54c074494e0cc7f03575710532256e0a820 | [
"MIT"
] | permissive | icefoxen/lang | ad3d4d2bdb162f95416e9d805c29951cc812b6f6 | 628185020123aabe4753f96c6ab4378637a2dbf5 | refs/heads/master | 2020-07-03T12:04:05.512367 | 2017-01-27T18:05:00 | 2017-01-27T18:05:00 | 74,171,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | # Okay.
# THANK you.
# Now, let's get to hacking Python OpenGL.
# Foist, we want to create a shape and move it with da arrow keys.
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.locals import *
translatex = -1.5
translatey = 0.0
translatez = -6.0
tRot = hRot = 0.0
def setSizeGL( (width, height) ):
if height == 0:
height = 1
glViewport( 0, 0, width, height )
# Do stuff to projection matrix
glMatrixMode( GL_PROJECTION )
glLoadIdentity()
# 0.1 and 100.0 are the min and max depth distance
gluPerspective( 45, 1.0 * width / height, 0.1, 100.0 )
# Do stuff to model view matrix
glMatrixMode( GL_MODELVIEW )
glLoadIdentity()
def initGL():
glShadeModel( GL_SMOOTH )
glClearColor( 0.0, 0.0, 0.0, 0.0 )
glClearDepth( 1.0 )
glEnable( GL_DEPTH_TEST )
glDepthFunc( GL_LEQUAL )
glHint( GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST )
def drawGL():
global translatex
global translatey
global translatez
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
glLoadIdentity()
#glTranslatef( -1.5, 0.0, -6.0 )
#print translatex, translatey, translatez
glTranslatef( translatex, translatey, translatez )
global tRot
glRotate( tRot, 1.0, 1.0, 0.0 )
glBegin( GL_TRIANGLES )
glColor3f( 1.0, 0.0, 0.0 )
glVertex3f( 0.0, 1.0, 0.0 )
glColor3f( 0.0, 1.0, 0.0 )
glVertex3f( -1.0, -1.0, 0.0 )
glColor3f( 0.0, 0.0, 1.0 )
glVertex3f( 1.0, -1.0, 0.0 )
glEnd()
glLoadIdentity()
# We make a hexagon!
global hRot
glTranslatef( 1.5, 0.0, -6.0 )
glRotate( hRot, -1.0, 0.0, 0.0 )
glBegin( GL_POLYGON )
glColor3f( 1.0, 0.0, 0.0 )
glVertex3f( -1.0, 1.0, 0.0 )
glColor3f( 1.0, 1.0, 0.0 )
glVertex3f( 0.0, 2.0, 0.0 )
glColor3f( 0.0, 1.0, 0.0 )
glVertex3f( 1.0, 1.0, 0.0 )
glColor3f( 0.0, 1.0, 1.0 )
glVertex3f( 1.0, -0.0, 0.0 )
glColor3f( 0.0, 0.0, 1.0 )
glVertex3f( 0.0, -1.0, 0.0 )
glColor3f( 1.0, 0.0, 1.0 )
glVertex3f( -1.0, -0.0, 0.0 )
#glVertex3f( -1.0, -2.0, 0.0 )
#glVertex3f( -1.5, -1.0, 0.0 )
glEnd()
tRot += 0.2
hRot += 0.2
def main():
global translatex
global translatey
global translatez
videoFlags = OPENGL | DOUBLEBUF
screenSize = (640,480)
pygame.init()
pygame.display.set_mode( screenSize, videoFlags )
setSizeGL( screenSize )
initGL()
frames = 0
ticks = pygame.time.get_ticks()
while True:
event = pygame.event.poll()
if event.type == QUIT:
break
if event.type == KEYDOWN:
if event.key == K_RIGHT:
translatex += 0.25
elif event.key == K_LEFT:
translatex -= 0.25
elif event.key == K_UP:
translatey += 0.25
elif event.key == K_DOWN:
translatey -= 0.25
elif event.key == K_a:
translatez += 0.25
elif event.key == K_z:
translatez -= 0.25
drawGL()
pygame.display.flip()
frames += 1
print "FPS: %d" % ((frames * 1000) / (pygame.time.get_ticks() - ticks))
if __name__ == '__main__':
main()
| [
"icefoxen@gmail.com"
] | icefoxen@gmail.com |
08f4a816cfb1b292a9d05ce8682450615698b86a | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/518.coin-change-2.py | bb27135b95f304b145ea54cd5c448d0346f160c8 | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | #
# @lc app=leetcode id=518 lang=python3
#
# [518] Coin Change 2
#
# https://leetcode.com/problems/coin-change-2/description/
#
# algorithms
# Medium (44.22%)
# Total Accepted: 56.2K
# Total Submissions: 127.1K
# Testcase Example: '5\n[1,2,5]'
#
# You are given coins of different denominations and a total amount of money.
# Write a function to compute the number of combinations that make up that
# amount. You may assume that you have infinite number of each kind of
# coin.
#
#
#
#
#
#
# Example 1:
#
#
# Input: amount = 5, coins = [1, 2, 5]
# Output: 4
# Explanation: there are four ways to make up the amount:
# 5=5
# 5=2+2+1
# 5=2+1+1+1
# 5=1+1+1+1+1
#
#
# Example 2:
#
#
# Input: amount = 3, coins = [2]
# Output: 0
# Explanation: the amount of 3 cannot be made up just with coins of 2.
#
#
# Example 3:
#
#
# Input: amount = 10, coins = [10]
# Output: 1
#
#
#
#
# Note:
#
# You can assume that
#
#
# 0 <= amount <= 5000
# 1 <= coin <= 5000
# the number of coins is less than 500
# the answer is guaranteed to fit into signed 32-bit integer
#
#
#
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
| [
"sodgso262@gmail.com"
] | sodgso262@gmail.com |
5a18ecad0b7d2b084aa63bc977407eed2c5e79a6 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startQiskit_QC108.py | b2ba0c7d4c4750e3ffdad273d797ff0ae74a0d0c | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | # qubit number=2
# total number=9
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=4
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=5
prog.cx(input_qubit[0],input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC108.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
beeb0da85923f1b285686879a18c74bd2d14ecba | 44cb2643ec3474eebcd1015108e074e73b318c07 | /hike_schedule/admin.py | 782deaf69f4a47c6c521c43ac08bda403daa77b3 | [] | no_license | kdechant/hike-schedule | 010211b6b23c8e129e812710468c458c74bf5cef | 95a0653145d19e992025bbecb6f1d95423a26450 | refs/heads/master | 2021-01-10T08:52:40.940894 | 2016-02-12T07:05:29 | 2016-02-12T07:05:29 | 44,289,843 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from django.contrib import admin
from .models import *
@admin.register(EventType)
class EventTypeAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_type', 'title', 'event_date', 'status']
list_filter = ['event_type', 'route_id', 'leaders']
@admin.register(Area)
class AreaAdmin(admin.ModelAdmin):
list_display = ['name']
@admin.register(Route)
class RouteAdmin(admin.ModelAdmin):
list_display = ['name', 'distance', 'elevation_gain', 'favorite']
list_filter = ['area_id', 'favorite']
| [
"keith.dechant@gmail.com"
] | keith.dechant@gmail.com |
13df13c4bfbd6372e9b6e913d210ced255442e76 | 575cd8511fde538c3912e7ff33dea2a10f195f25 | /portfolio/views.py | 4b991c0bcff6b6c493393a8a1570ba2955b0b518 | [] | no_license | jakiiii/jqurity.com | 710959e07bb115d3178c72891bd92f7d377964c5 | fc7119c258fdbed1deaddbf56ebacfc293c470e8 | refs/heads/master | 2020-04-11T17:24:25.643981 | 2018-12-16T18:30:48 | 2018-12-16T18:30:48 | 161,959,589 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | from django.urls import reverse_lazy
from django.views.generic import DetailView, CreateView
from django.views.generic.edit import FormMixin
from contact.forms import ContactForm
from .models import (
Slider,
Ayat,
Experience,
Familiar,
Interest,
Portfolio
)
from about.models import (
AboutModel,
SocialModel
)
# Create your views here.
class PortfolioView(CreateView):
form_class = ContactForm
template_name = 'portfolio/portfolio.html'
success_url = reverse_lazy('portfolio')
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'Portfolio'
context['slider_info'] = Slider.objects.all()[:1]
context['al_Quran_verse'] = Ayat.objects.all()[:1]
context['about_info'] = AboutModel.objects.all()[:1]
context['social_link'] = SocialModel.objects.all()[:1]
context['experience_info'] = Experience.objects.all()
context['familiar_info'] = Familiar.objects.all()
context['interest_info'] = Interest.objects.all()
context['portfolio_info'] = Portfolio.objects.all()[:8]
return context
def form_valid(self, form):
instance = form.save(commit=True)
print(instance)
return super().form_valid(form)
| [
"me.jaki@outlook.com"
] | me.jaki@outlook.com |
3f872bd081b1053662722f47e8bd43eb38def392 | be73248aa4f1171e81b65cf955c4bd6110d56095 | /plugins/__init__.py | f6a247f3cb53e13670b4621dc27bc7acf3d1692d | [] | no_license | rogerhoward/lambot | 781c158e58bd71e2f3eb480aab31f181aee55e62 | d5588041fc92b779ba88479d8657f9b8a4916692 | refs/heads/development | 2022-02-18T05:03:23.911978 | 2017-06-22T03:22:11 | 2017-06-22T03:22:11 | 86,493,856 | 1 | 1 | null | 2022-02-04T15:04:55 | 2017-03-28T18:30:43 | Python | UTF-8 | Python | false | false | 170 | py |
from pluginbase import PluginBase
plugin_source = PluginBase(package='plugins').make_plugin_source(searchpath=['./plugins'])
plugin_names = plugin_source.list_plugins() | [
"rogerhoward@mac.com"
] | rogerhoward@mac.com |
03b17a1ec30dbc3ca1b0a583753b6290d482c239 | 73320add14214fa860e9dfdd6ea4dfa3862f7806 | /Logistic/logistic.py | 51485f84bc4585d084b96a7166bcd9167422d091 | [] | no_license | mayk93/DeepLearning | 5aa52df3175f38f5b7fa95f33f929ac937409770 | d7db13f5a46b3abb853dab9a4469f81386f27109 | refs/heads/master | 2020-07-02T14:30:05.542467 | 2016-11-20T21:33:15 | 2016-11-20T21:33:15 | 74,298,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,536 | py | import tensorflow as tf
import numpy as np
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
# Load data
data = np.load('data_with_labels.npz')
train = data['arr_0']/255.
labels = data['arr_1']
# Look at some data
print(train[0])
print(labels[0])
# If you have matplotlib installed
import matplotlib.pyplot as plt
plt.ion()
# Let's look at a subplot of one of A in each font
f, plts = plt.subplots(5, sharex=True)
c = 91
for i in range(5):
plts[i].pcolor(train[c + i * 558],
cmap=plt.cm.gray_r)
def to_onehot(labels,nclasses = 5):
'''
Convert labels to "one-hot" format.
>>> a = [0,1,2,3]
>>> to_onehot(a,5)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.]])
'''
outlabels = np.zeros((len(labels),nclasses))
for i,l in enumerate(labels):
outlabels[i,l] = 1
return outlabels
onehot = to_onehot(labels)
# Split data into training and validation
indices = np.random.permutation(train.shape[0])
valid_cnt = int(train.shape[0] * 0.1)
test_idx, training_idx = indices[:valid_cnt],\
indices[valid_cnt:]
test, train = train[test_idx,:],\
train[training_idx,:]
onehot_test, onehot_train = onehot[test_idx,:],\
onehot[training_idx,:]
sess = tf.InteractiveSession()
# These will be inputs
## Input pixels, flattened
x = tf.placeholder("float", [None, 1296])
## Known labels
y_ = tf.placeholder("float", [None,5])
# Variables
W = tf.Variable(tf.zeros([1296,5]))
b = tf.Variable(tf.zeros([5]))
# Just initialize
sess.run(tf.initialize_all_variables())
# Define model
y = tf.nn.softmax(tf.matmul(x,W) + b)
### End model specification, begin training code
# Climb on cross-entropy
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
y + 1e-50, y_))
# How we train
train_step = tf.train.GradientDescentOptimizer(
0.1).minimize(cross_entropy)
# Define accuracy
correct_prediction = tf.equal(tf.argmax(y,1),
tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(
correct_prediction, "float"))
# Actually train
epochs = 1000
train_acc = np.zeros(epochs//10)
test_acc = np.zeros(epochs//10)
for i in tqdm(range(epochs)):
# Record summary data, and the accuracy
if i % 10 == 0:
# Check accuracy on train set
A = accuracy.eval(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
train_acc[i//10] = A
# And now the validation set
A = accuracy.eval(feed_dict={
x: test.reshape([-1,1296]),
y_: onehot_test})
test_acc[i//10] = A
train_step.run(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
# Notice that accuracy flattens out
print(train_acc[-1])
print(test_acc[-1])
# Plot the accuracy curves
plt.plot(train_acc,'bo')
plt.plot(test_acc,'rx')
# Look at a subplot of the weights for each font
f, plts = plt.subplots(5, sharex=True)
for i in range(5):
plts[i].pcolor(W.eval()[:,i].reshape([36,36]))
plt.savefig("test.png")
plt.show()
# import matplotlib.pyplot as plt
# import numpy as np
#
# t = np.arange(0.0, 2.0, 0.01)
# s = np.sin(2*np.pi*t)
# plt.plot(t, s)
#
# plt.xlabel('time (s)')
# plt.ylabel('voltage (mV)')
# plt.title('About as simple as it gets, folks')
# plt.grid(True)
# plt.savefig("test.png")
# plt.show()
| [
"mihai.mandrescu@gmail.com"
] | mihai.mandrescu@gmail.com |
a97ae738d52d67ff57849264badfff4ac984c976 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03006/s651349603.py | ab084aa71596b708f5dc27efe7169f4f7b9f0fe0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # ベクトルを固定して全探索まではわかる 実装がわからん
# いやべつに順番は関係ないのか? 「残っているボールを 1つ選んで回収する。」
n = int(input())
if n == 1:
print(1) # 「ただし、1つ目に選んだボールについては必ずコスト 1かかる。」
exit()
points = []
for _ in range(n):
points.append(list(map(int, input().split())))
ans = 10**18
for p in range(n):
for q in range(n):
if p == q:
continue
vector = [points[p][0] - points[q][0], points[p][1] - points[q][1]]
tmp = 0
for i in range(n):
for j in range(n):
if i == j:
continue
if points[i][0] - points[j][0] == vector[
0] and points[i][1] - points[j][1] == vector[1]:
tmp += 1
ans = min(ans, n - tmp)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
99f9d3e10ae4a78ea585e3d71cd708bc9bcf35ba | f282ed1fa6776b52d0b11a375067c9c2e1ef2db3 | /qsome/ext_methods/ext_factory.py | 3b56d2a11095de73a08fb47949efb3b5de0f778d | [
"Apache-2.0"
] | permissive | Goodpaster/QSoME | fc96d87fecab3c13771121b3986f3fc909e8c8b5 | 7f95524665602821c9b1030cebe2e97af057c056 | refs/heads/master | 2022-06-16T03:45:08.893218 | 2022-03-09T21:47:16 | 2022-03-09T21:47:16 | 150,792,671 | 8 | 2 | Apache-2.0 | 2021-08-16T14:24:54 | 2018-09-28T20:43:54 | Python | UTF-8 | Python | false | false | 1,015 | py | from qsome.ext_methods.molpro_ext import MolproExt
from qsome.ext_methods.openmolcas_ext import OpenMolCASExt
from qsome.ext_methods.psi4_ext import Psi4Ext
#from ext_methods.bagel_ext import BagelExt
class ExtFactory:
def get_ext_obj(self, ext_prgm, mol_obj, method_name, ext_pot, core_ham=None, filename='temp', work_dir=None, scr_dir=None, nproc=None, pmem=None, save_orbs=False, save_density=False, hl_dict=None, hl_excited_dict=None):
if ext_prgm == 'molpro':
return MolproExt(mol_obj, method_name, ext_pot, core_ham, filename, work_dir, scr_dir, nproc, pmem, save_orbs, save_density, hl_dict)
elif ext_prgm == 'molcas' or ext_prgm == 'openmolcas':
return OpenMolCASExt(mol_obj, method_name, ext_pot)
#elif ext_prgm == 'bagel':
# return BagelExt(mol_obj, method_name, ext_pot)
elif ext_prgm == 'psi4':
return Psi4Ext(mol_obj, method_name, ext_pot, core_ham, filename, work_dir, scr_dir, nproc, pmem, hl_dict, hl_excited_dict)
| [
"dan.s.graham@gmail.com"
] | dan.s.graham@gmail.com |
2a24c302a4ffc32edf996f5e2b129bfaa4593284 | af4d80ad935f1aedae6e43bc39def7c58bedd332 | /benchdev/config.py | 715f49c5a01cbde4ce3fc3b2467df9a0a7eb8217 | [
"LicenseRef-scancode-hdf5",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | Qi-max/automatminer | 39a96d63a7235870401fde4e34a8ab47d8bfbf6c | a366709798c5886350f5b71cc7d06f474122c2f3 | refs/heads/master | 2020-05-02T17:00:11.506603 | 2019-03-27T22:45:11 | 2019-03-27T22:45:11 | 178,085,605 | 1 | 0 | NOASSERTION | 2019-03-27T22:43:34 | 2019-03-27T22:43:33 | null | UTF-8 | Python | false | false | 3,730 | py | """
The environment variables you need for this all to work are:
- AMM_BENCH_DIR: where to store benchmarks
- AMM_DATASET_DIR: where to store datasets
- AMM_CODE_DIR: where to run tests
"""
from fireworks import LaunchPad
from automatminer.utils.ml import AMM_CLF_NAME, AMM_REG_NAME
from hmte.db import get_connection
# Private production
LP = get_connection("hackingmaterials", write=True, connection_type="launchpad")
# Debugging locally
# LP = LaunchPad(name="automatminer")
# Constants for running benchmarks and builds
KFOLD_DEFAULT = {"shuffle": True, "random_state": 18012019, "n_splits": 5}
RUN_TESTS_CMD = "cd $AMM_CODE_DIR && coverage run setup.py test"
EXPORT_COV_CMD = "coverage xml && python-codacy-coverage -r coverage.xml"
# Local testing configuration...
LOCAL_DEBUG_REG = {
"name": "debug_local_reg",
"data_pickle": "jdft2d_smalldf.pickle.gz",
"target": "exfoliation_en",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
LOCAL_DEBUG_CLF = {
"name": "debug_local_clf",
"data_pickle": "expt_gaps_smalldf.pickle.gz",
"target": "is_metal",
"problem_type": AMM_CLF_NAME,
"clf_pos_label": True
}
LOCAL_DEBUG_SET = [LOCAL_DEBUG_CLF, LOCAL_DEBUG_REG]
# Real benchmark sets
BULK = {
"name": "mp_bulk",
"data_pickle": "elasticity_K_VRH.pickle.gz",
"target": "K_VRH",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
SHEAR = {
"name": "mp_shear",
"data_pickle": "elasticity_G_VRH.pickle.gz",
"target": "G_VRH",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
LOG_BULK = {
"name": "mp_log_bulk",
"data_pickle": "elasticity_log10(K_VRH).pickle.gz",
"target": "log10(K_VRH)",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
LOG_SHEAR = {
"name": "mp_log_shear",
"data_pickle": "elasticity_log10(G_VRH).pickle.gz",
"target": "log10(G_VRH)",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
REFRACTIVE = {
"name": "refractive_index",
"data_pickle": "dielectric.pickle.gz",
"target": "n",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
JDFT2D = {
"name": "jdft2d",
"data_pickle": "jdft2d.pickle.gz",
"target": "exfoliation_en",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
MP_GAP = {
"name": "mp_gap",
"data_pickle": "mp_gap.pickle.gz",
"target": "gap pbe",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
MP_IS_METAL = {
"name": "mp_is_metal",
"data_pickle": "mp_is_metal.pickle.gz",
"target": "is_metal",
"problem_type": AMM_CLF_NAME,
"clf_pos_label": True
}
MP_E_FORM = {
"name": "mp_e_form",
"data_pickle": "mp_e_form.pickle.gz",
"target": "e_form",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
CASTELLI_E_FORM = {
"name": "castelli",
"data_pickle": "castelli.pickle.gz",
"target": "e_form",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
GFA = {
"name": "glass_formation",
"data_pickle": "glass.pickle.gz",
"target": "gfa",
"problem_type": AMM_CLF_NAME,
"clf_pos_label": True
}
EXPT_IS_METAL = {
"name": "expt_is_metal",
"data_pickle": "expt_gaps.pickle.gz",
"target": "is_metal",
"problem_type": AMM_CLF_NAME,
"clf_pos_label": True
}
PHONONS = {
"name": "phonons",
"data_pickle": "phonons.pickle.gz",
"target": "last phdos peak",
"problem_type": AMM_REG_NAME,
"clf_pos_label": None
}
BENCHMARK_DEBUG_SET = [JDFT2D, PHONONS, EXPT_IS_METAL]
BENCHMARK_FULL_SET = [BULK, SHEAR, LOG_BULK, LOG_SHEAR, REFRACTIVE, JDFT2D,
MP_GAP, MP_IS_METAL, MP_E_FORM, CASTELLI_E_FORM, GFA,
EXPT_IS_METAL, PHONONS]
| [
"ardunn@lbl.gov"
] | ardunn@lbl.gov |
3e44fd3291ef6dfd06a19d5ff8552b9b0b34fe1f | 7cb3e5e16fd93e6f8a1c07c211cee16dc248ef5d | /venv/lib/python3.6/site-packages/django/contrib/gis/gdal/raster/source.py | 777d21e156c61858642b38c84d83aeee7462724a | [
"BSD-3-Clause"
] | permissive | JustynaJBroniszewska/Blog | d74a8cb19fa037b834f5218522ff1397eb60d370 | cfd8efbcce3e23c7ebeea82b2e732de63c663ac8 | refs/heads/master | 2022-11-03T22:01:07.165652 | 2020-06-05T14:25:01 | 2020-06-05T14:25:01 | 266,791,768 | 0 | 0 | null | 2020-06-05T14:25:02 | 2020-05-25T13:52:19 | Python | UTF-8 | Python | false | false | 17,214 | py | import json
import os
import sys
import uuid
from ctypes import (
addressof,
byref,
c_buffer,
c_char_p,
c_double,
c_int,
c_void_p,
string_at,
)
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import BandList
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.gdal.raster.const import (
GDAL_RESAMPLE_ALGORITHMS,
VSI_DELETE_BUFFER_ON_READ,
VSI_FILESYSTEM_BASE_PATH,
VSI_TAKE_BUFFER_OWNERSHIP,
)
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry import json_regex
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
class TransformPoint(list):
indices = {
"origin": (0, 3),
"scale": (1, 5),
"skew": (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
class GDALRaster(GDALRasterBase):
"""
Wrap a raster GDAL Data Source object.
"""
destructor = capi.close_ds
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, str) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, str):
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException(
'Could not open the datasource at "{}" ({}).'.format(ds_input, err)
)
elif isinstance(ds_input, bytes):
# Create a new raster in write mode.
self._write = 1
# Get size of buffer.
size = sys.getsizeof(ds_input)
# Pass data to ctypes, keeping a reference to the ctypes object so
# that the vsimem file remains available until the GDALRaster is
# deleted.
self._ds_input = c_buffer(ds_input)
# Create random name to reference in vsimem filesystem.
vsi_path = os.path.join(VSI_FILESYSTEM_BASE_PATH, str(uuid.uuid4()))
# Create vsimem file from buffer.
capi.create_vsi_file_from_mem_buffer(
force_bytes(vsi_path),
byref(self._ds_input),
size,
VSI_TAKE_BUFFER_OWNERSHIP,
)
# Open the new vsimem file as a GDALRaster.
try:
self._ptr = capi.open_ds(force_bytes(vsi_path), self._write)
except GDALException:
# Remove the broken file from the VSI filesystem.
capi.unlink_vsi_file(force_bytes(vsi_path))
raise GDALException("Failed creating VSI raster from the input buffer.")
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get("driver", "MEM"))
# For out of memory drivers, check filename argument
if driver.name != "MEM" and "name" not in ds_input:
raise GDALException(
'Specify name for creation of raster with driver "{}".'.format(
driver.name
)
)
# Check if width and height where specified
if "width" not in ds_input or "height" not in ds_input:
raise GDALException(
"Specify width and height attributes for JSON or dict input."
)
# Check if srid was specified
if "srid" not in ds_input:
raise GDALException("Specify srid for JSON or dict input.")
# Create null terminated gdal options array.
papsz_options = []
for key, val in ds_input.get("papsz_options", {}).items():
option = "{}={}".format(key, val)
papsz_options.append(option.upper().encode())
papsz_options.append(None)
# Convert papszlist to ctypes array.
papsz_options = (c_char_p * len(papsz_options))(*papsz_options)
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get("name", "")),
ds_input["width"],
ds_input["height"],
ds_input.get("nr_of_bands", len(ds_input.get("bands", []))),
ds_input.get("datatype", 6),
byref(papsz_options),
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get("bands", [])):
band = self.bands[i]
if "nodata_value" in band_input:
band.nodata_value = band_input["nodata_value"]
# Instantiate band filled with nodata values if only
# partial input data has been provided.
if band.nodata_value is not None and (
"data" not in band_input
or "size" in band_input
or "shape" in band_input
):
band.data(data=(band.nodata_value,), shape=(1, 1))
# Set band data values from input.
band.data(
data=band_input.get("data"),
size=band_input.get("size"),
shape=band_input.get("shape"),
offset=band_input.get("offset"),
)
# Set SRID
self.srs = ds_input.get("srid")
# Set additional properties if provided
if "origin" in ds_input:
self.origin.x, self.origin.y = ds_input["origin"]
if "scale" in ds_input:
self.scale.x, self.scale.y = ds_input["scale"]
if "skew" in ds_input:
self.skew.x, self.skew.y = ds_input["skew"]
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException(
'Invalid data source input type: "{}".'.format(type(ds_input))
)
def __del__(self):
if self.is_vsi_based:
# Remove the temporary file from the VSI in-memory filesystem.
capi.unlink_vsi_file(force_bytes(self.name))
super().__del__()
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return "<Raster object at %s>" % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException(
"Raster needs to be opened in write mode to change values."
)
capi.flush_ds(self._ptr)
@property
def vsi_buffer(self):
if not self.is_vsi_based:
return None
# Prepare an integer that will contain the buffer length.
out_length = c_int()
# Get the data using the vsi file name.
dat = capi.get_mem_buffer_from_vsi_file(
force_bytes(self.name), byref(out_length), VSI_DELETE_BUFFER_ON_READ,
)
# Read the full buffer pointer.
return string_at(dat, out_length.value)
@cached_property
def is_vsi_based(self):
return self.name.startswith(VSI_FILESYSTEM_BASE_PATH)
@property
def name(self):
"""
Return the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Return the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Return the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type="wkt")
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Set the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, (int, str)):
srs = SpatialReference(value)
else:
raise ValueError("Could not create a SpatialReference from input.")
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def srid(self):
"""
Shortcut to access the srid of this GDALRaster.
"""
return self.srs.srid
@srid.setter
def srid(self, value):
"""
Shortcut to set this GDALRaster's srs from an srid.
"""
self.srs = value
@property
def geotransform(self):
"""
Return the geotransform of the data source.
Return the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Set the geotransform for the data source."
if len(values) != 6 or not all(isinstance(x, (int, float)) for x in values):
raise ValueError("Geotransform must consist of 6 numeric values.")
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, "origin")
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, "scale")
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, "skew")
@property
def extent(self):
"""
Return the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling="NearestNeighbour", max_error=0.0):
"""
Return a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if "width" not in ds_input:
ds_input["width"] = self.width
if "height" not in ds_input:
ds_input["height"] = self.height
if "srid" not in ds_input:
ds_input["srid"] = self.srs.srid
if "origin" not in ds_input:
ds_input["origin"] = self.origin
if "scale" not in ds_input:
ds_input["scale"] = self.scale
if "skew" not in ds_input:
ds_input["skew"] = self.skew
# Get the driver, name, and datatype of the target raster
if "driver" not in ds_input:
ds_input["driver"] = self.driver.name
if "name" not in ds_input:
ds_input["name"] = self.name + "_copy." + self.driver.name
if "datatype" not in ds_input:
ds_input["datatype"] = self.bands[0].datatype()
# Instantiate raster bands filled with nodata values.
ds_input["bands"] = [{"nodata_value": bnd.nodata_value} for bnd in self.bands]
# Create target raster
target = GDALRaster(ds_input, write=True)
# Select resampling algorithm
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Reproject image
capi.reproject_image(
self._ptr,
self.srs.wkt.encode(),
target._ptr,
target.srs.wkt.encode(),
algorithm,
0.0,
max_error,
c_void_p(),
c_void_p(),
c_void_p(),
)
# Make sure all data is written to file
target._flush()
return target
def transform(
self, srid, driver=None, name=None, resampling="NearestNeighbour", max_error=0.0
):
"""
Return a copy of this raster reprojected into the given SRID.
"""
# Convert the resampling algorithm name into an algorithm id
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Instantiate target spatial reference system
target_srs = SpatialReference(srid)
# Create warped virtual dataset in the target reference system
target = capi.auto_create_warped_vrt(
self._ptr,
self.srs.wkt.encode(),
target_srs.wkt.encode(),
algorithm,
max_error,
c_void_p(),
)
target = GDALRaster(target)
# Construct the target warp dictionary from the virtual raster
data = {
"srid": srid,
"width": target.width,
"height": target.height,
"origin": [target.origin.x, target.origin.y],
"scale": [target.scale.x, target.scale.y],
"skew": [target.skew.x, target.skew.y],
}
# Set the driver and filepath if provided
if driver:
data["driver"] = driver
if name:
data["name"] = name
# Warp the raster into new srid
return self.warp(data, resampling=resampling, max_error=max_error)
@property
def info(self):
"""
Return information about this raster in a string format equivalent
to the output of the gdalinfo command line utility.
"""
if not capi.get_ds_info:
raise ValueError("GDAL ≥ 2.1 is required for using the info property.")
return capi.get_ds_info(self.ptr, None).decode()
| [
"jj.broniszewska@gmail.com"
] | jj.broniszewska@gmail.com |
0f7268d500044e8de9c6efbe2e15524605de5aee | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/firmware/FirmwareDistributableFsm.py | 873821edf14e6a45bdc8b39f0b6b9027e21e636e | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,353 | py | """This module contains the general information for FirmwareDistributableFsm ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareDistributableFsmConsts():
COMPLETION_TIME_ = ""
CURRENT_FSM_DELETE = "Delete"
CURRENT_FSM_NOP = "nop"
FSM_STATUS_FAIL = "fail"
FSM_STATUS_IN_PROGRESS = "inProgress"
FSM_STATUS_NOP = "nop"
FSM_STATUS_PENDING = "pending"
FSM_STATUS_SKIP = "skip"
FSM_STATUS_SUCCESS = "success"
FSM_STATUS_THROTTLED = "throttled"
RMT_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
RMT_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
RMT_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
RMT_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
RMT_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
RMT_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
RMT_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
RMT_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
RMT_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
RMT_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
RMT_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
RMT_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
RMT_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
RMT_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
RMT_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
RMT_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
RMT_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
RMT_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
RMT_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
RMT_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
RMT_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
RMT_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
RMT_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
RMT_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
RMT_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
RMT_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
RMT_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
RMT_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
RMT_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
RMT_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
RMT_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
RMT_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
RMT_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
RMT_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
RMT_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
RMT_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
RMT_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
RMT_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
RMT_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
RMT_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
RMT_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
RMT_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
RMT_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
RMT_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
RMT_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
RMT_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
RMT_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
RMT_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
RMT_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
RMT_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
RMT_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
RMT_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
RMT_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
RMT_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
RMT_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
RMT_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
RMT_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
RMT_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
RMT_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
RMT_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
RMT_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
RMT_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
RMT_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
RMT_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
RMT_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
RMT_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
RMT_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
RMT_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
RMT_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
RMT_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
RMT_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
RMT_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
RMT_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
RMT_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
RMT_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
RMT_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
RMT_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
RMT_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
RMT_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
RMT_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
RMT_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
RMT_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
RMT_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
RMT_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
RMT_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
RMT_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
RMT_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
RMT_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
RMT_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
RMT_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
RMT_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
RMT_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
RMT_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
RMT_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
RMT_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
RMT_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
RMT_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
RMT_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
RMT_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
RMT_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
RMT_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
RMT_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
RMT_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
RMT_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
RMT_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
RMT_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
RMT_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
RMT_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
RMT_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
RMT_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
RMT_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
RMT_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
RMT_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
RMT_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
RMT_ERR_CODE_NONE = "none"
class FirmwareDistributableFsm(ManagedObject):
"""This is FirmwareDistributableFsm class."""
consts = FirmwareDistributableFsmConsts()
naming_props = set([])
mo_meta = MoMeta("FirmwareDistributableFsm", "firmwareDistributableFsm", "fsm", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], [u'firmwareDistributable'], [u'firmwareDistributableFsmStage'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion_time": MoPropertyMeta("completion_time", "completionTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"current_fsm": MoPropertyMeta("current_fsm", "currentFsm", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Delete", "nop"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"instance_id": MoPropertyMeta("instance_id", "instanceId", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"rmt_err_code": MoPropertyMeta("rmt_err_code", "rmtErrCode", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"rmt_err_descr": MoPropertyMeta("rmt_err_descr", "rmtErrDescr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rmt_rslt": MoPropertyMeta("rmt_rslt", "rmtRslt", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completionTime": "completion_time",
"currentFsm": "current_fsm",
"descr": "descr",
"dn": "dn",
"fsmStatus": "fsm_status",
"instanceId": "instance_id",
"progress": "progress",
"rmtErrCode": "rmt_err_code",
"rmtErrDescr": "rmt_err_descr",
"rmtRslt": "rmt_rslt",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.completion_time = None
self.current_fsm = None
self.descr = None
self.fsm_status = None
self.instance_id = None
self.progress = None
self.rmt_err_code = None
self.rmt_err_descr = None
self.rmt_rslt = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FirmwareDistributableFsm", parent_mo_or_dn, **kwargs)
| [
"test@cisco.com"
] | test@cisco.com |
b50d9d5a961b736d337ce61c6c85eb2536d3ab26 | 1d75146a66245dc046dc216bb602129208e00733 | /closed/Lenovo/code/dlrm/tensorrt/scripts/data_utils.py | 5eb2b32cc8c359829175331116b96458d6e54723 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | georgelyuan/inference_results_v1.1 | febf287bd5967bf7f087355a81f06a2bd298cbfe | 3196a5587887c39203ee3ac246fa5dbe789d9085 | refs/heads/main | 2023-08-16T08:49:45.274284 | 2021-09-23T20:57:17 | 2021-09-23T20:57:17 | 409,773,141 | 0 | 0 | NOASSERTION | 2021-09-23T23:36:37 | 2021-09-23T23:36:37 | null | UTF-8 | Python | false | false | 49,005 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# - Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
#
# After downloading dataset, run:
# getCriteoAdData(
# datafile="<path-to-train.txt>",
# o_filename=kaggleAdDisplayChallenge_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=7,
# data_split='train',
# randomize='total',
# criteo_kaggle=True,
# memory_map=False
# )
# getCriteoAdData(
# datafile="<path-to-day_{0,...,23}>",
# o_filename=terabyte_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=24,
# data_split='train',
# randomize='total',
# criteo_kaggle=False,
# memory_map=False
# )
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from os import path
import numpy as np
def convertUStringToDistinctIntsDict(mat, convertDicts, counts):
# Converts matrix of unicode strings into distinct integers.
#
# Inputs:
# mat (np.array): array of unicode strings to convert
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
#
# Outputs:
# out (np.array): array of output integers
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
# check if convertDicts and counts match correct length of mat
if len(convertDicts) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of convertDicts or counts does not match input shape")
print("Generating convertDicts and counts...")
convertDicts = [{} for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
for j in range(mat.shape[1]):
for i in range(mat.shape[0]):
# add to convertDict and increment count
if mat[i, j] not in convertDicts[j]:
convertDicts[j][mat[i, j]] = counts[j]
counts[j] += 1
out[i, j] = convertDicts[j][mat[i, j]]
return out, convertDicts, counts
def convertUStringToDistinctIntsUnique(mat, mat_uni, counts):
# mat is an array of 0,...,# samples, with each being 26 categorical features
# check if mat_unique and counts match correct length of mat
if len(mat_uni) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of mat_unique or counts does not match input shape")
print("Generating mat_unique and counts...")
mat_uni = [np.array([]) for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
ind_map = [np.array([]) for _ in range(mat.shape[1])]
# find out and assign unique ids to features
for j in range(mat.shape[1]):
m = mat_uni[j].size
mat_concat = np.concatenate((mat_uni[j], mat[:, j]))
mat_uni[j], ind_map[j] = np.unique(mat_concat, return_inverse=True)
out[:, j] = ind_map[j][m:]
counts[j] = mat_uni[j].size
return out, mat_uni, counts
def processCriteoAdData(d_path, d_file, npzfile, split, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# split (int): total number of splits in the dataset (typically 7 or 24)
# process data if not all files exist
for i in range(split):
filename_i = npzfile + "_{0}_processed.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i, end="\r")
else:
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# categorical features
'''
# Approach 1a: using empty dictionaries
X_cat, convertDicts, counts = convertUStringToDistinctIntsDict(
data["X_cat"], convertDicts, counts
)
'''
'''
# Approach 1b: using empty np.unique
X_cat, convertDicts, counts = convertUStringToDistinctIntsUnique(
data["X_cat"], convertDicts, counts
)
'''
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\r")
print("")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
):
"""
Concatenates different days and saves the result.
Inputs:
days (int): total number of days in the dataset (typically 7 or 24)
d_path (str): path for {kaggle|terabyte}_day_i.npz files
o_filename (str): output file name
Output:
o_file (str): output file path
"""
if memory_map:
# dataset break up per fea
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
'''
# Approach 1, 2 and 3 use indices, while Approach 4 does not use them
# create indices
indices = np.arange(total_count)
if data_split == "none":
if randomize == "total":
indices = np.random.permutation(indices)
else:
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
indices = np.concatenate((train_indices, test_indices))
# no reordering
# indices = np.arange(total_count)
'''
'''
# Approach 1: simple and slow (no grouping is used)
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
if j < tar_fea:
fj[indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
if j < tar_fea:
y = fj[start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[start:end]
else:
X_cat_t[j - tad_fea, :] = fj[start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 2: group days
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
group_day = 3 # e.g. 8, 4 or 3
group_num = days // group_day
file_group = [i*group_day for i in range(group_num)] + [days]
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t = [0]*group_size
X_int_t = [0]*group_size
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# print(filename_i)
# load a group of files
with np.load(filename_i) as data:
X_cat_t[ig] = np.transpose(data["X_cat"])
X_int_t[ig] = np.transpose(data["X_int"])
y[ig] = data["y"]
# sanity check
if total_per_file[i] != len(y[ig]):
sys.exit("ERROR: sanity check on number of samples failed")
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end[ig]-start[ig]) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
for ig in range(group_size):
if j < tar_fea:
fj[indices[start[ig]:end[ig]]] = y[ig]
elif tar_fea <= j and j < tad_fea:
fj[indices[start[ig]:end[ig]]] = X_int_t[ig][j - tar_fea, :]
else:
fj[indices[start[ig]:end[ig]]] = X_cat_t[ig][j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t= []; X_int_t = []
for ig in range(group_size):
i = file_group[ii] + ig
X_int_t.append(np.zeros((den_fea, total_per_file[i])))
X_cat_t.append(np.zeros((spa_fea, total_per_file[i])))
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
# load a group of files
for ig in range(group_size):
i = file_group[ii] + ig
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# load data for the group of files
if j < tar_fea:
y[ig] = fj[start[ig]:end[ig]]
elif tar_fea <= j and j < tad_fea:
X_int_t[ig][j - tar_fea, :] = fj[start[ig]:end[ig]]
else:
X_cat_t[ig][j - tad_fea, :] = fj[start[ig]:end[ig]]
del fj
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
print("Creating " + filename_i)
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t[ig]), # transpose of the data
X_int=np.transpose(X_int_t[ig]), # transpose of the data
y=y[ig],
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 3: group features
# check if data already exists
group_fea = 5 # e.g. 8, 5 or 4
group_num = tot_fea // group_fea
if tot_fea % group_fea != 0: # sanity check
sys.exit("ERROR: the group_fea must divided tot_fea evenly.")
recreate_flag = False
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((group_fea, total_count))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}".format(
jn, group_fea
)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r+')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
fj[jg, indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[jg, indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[jg, indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing" + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
y = fj[jg, start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[jg, start:end]
else:
X_cat_t[j - tad_fea, :] = fj[jg, start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
# Approach 4: Fisher-Yates-Rao (FYR) shuffle algorithm
# 1st pass of FYR shuffle
# check if data already exists
recreate_flag = False
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
if (
path.exists(filename_j_y)
and path.exists(filename_j_d)
and path.exists(filename_j_s)
):
print(
"Using existing\n"
+ filename_j_y + "\n"
+ filename_j_d + "\n"
+ filename_j_s
)
else:
recreate_flag = True
# reorder across buckets using sampling
if recreate_flag:
# init intermediate files (.npy appended automatically)
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s".format(j)
np.save(filename_j_y, np.zeros((total_per_file[j])))
np.save(filename_j_d, np.zeros((total_per_file[j], den_fea)))
np.save(filename_j_s, np.zeros((total_per_file[j], spa_fea)))
# start processing files
total_counter = [0] * days
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
print("Reordering (1st pass) " + filename_i)
# create buckets using sampling of random ints
# from (discrete) uniform distribution
buckets = []
for _j in range(days):
buckets.append([])
counter = [0] * days
days_to_sample = days if data_split == "none" else days - 1
if randomize == "total":
rand_u = np.random.randint(low=0, high=days_to_sample, size=size)
for k in range(size):
# sample and make sure elements per buckets do not overflow
if data_split == "none" or i < days - 1:
# choose bucket
p = rand_u[k]
# retry of the bucket is full
while total_counter[p] + counter[p] >= total_per_file[p]:
p = np.random.randint(low=0, high=days_to_sample)
else: # preserve the last day/bucket if needed
p = i
buckets[p].append(k)
counter[p] += 1
else: # randomize is day or none
for k in range(size):
# do not sample, preserve the data in this bucket
p = i
buckets[p].append(k)
counter[p] += 1
# sanity check
if np.sum(counter) != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
# print(counter)
# print(str(np.sum(counter)) + " = " + str(size))
# print([len(x) for x in buckets])
# print(total_counter)
# partially feel the buckets
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
start = total_counter[j]
end = total_counter[j] + counter[j]
# target buckets
fj_y = np.load(filename_j_y, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_y[start:end].shape) + " "
# + str(len(buckets[j])))
fj_y[start:end] = y[buckets[j]]
del fj_y
# dense buckets
fj_d = np.load(filename_j_d, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_d[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_d[start:end, :] = X_int[buckets[j], :]
del fj_d
# sparse buckets
fj_s = np.load(filename_j_s, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_s[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_s[start:end, :] = X_cat[buckets[j], :]
del fj_s
# update counters for next step
total_counter[j] += counter[j]
# 2nd pass of FYR shuffle
# check if data already exists
for j in range(days):
filename_j = npzfile + "_{0}_reordered.npz".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# reorder within buckets
if recreate_flag:
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
fj_y = np.load(filename_j_y)
fj_d = np.load(filename_j_d)
fj_s = np.load(filename_j_s)
indices = range(total_per_file[j])
if randomize == "day" or randomize == "total":
if data_split == "none" or j < days - 1:
indices = np.random.permutation(range(total_per_file[j]))
filename_r = npzfile + "_{0}_reordered.npz".format(j)
print("Reordering (2nd pass) " + filename_r)
np.savez_compressed(
filename_r,
X_cat=fj_s[indices, :],
X_int=fj_d[indices, :],
y=fj_y[indices],
)
'''
# sanity check (under no reordering norms should be zero)
for i in range(days):
filename_i_o = npzfile + "_{0}_processed.npz".format(i)
print(filename_i_o)
with np.load(filename_i_o) as data_original:
X_cat_o = data_original["X_cat"]
X_int_o = data_original["X_int"]
y_o = data_original["y"]
filename_i_r = npzfile + "_{0}_reordered.npz".format(i)
print(filename_i_r)
with np.load(filename_i_r) as data_reordered:
X_cat_r = data_reordered["X_cat"]
X_int_r = data_reordered["X_int"]
y_r = data_reordered["y"]
print(np.linalg.norm(y_o - y_r))
print(np.linalg.norm(X_int_o - X_int_r))
print(np.linalg.norm(X_cat_o - X_cat_r))
'''
else:
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def transformCriteoAdData(X_cat, X_int, y, days, data_split, randomize, total_per_file):
"""
Transforms Criteo Kaggle or terabyte data by applying log transformation
on dense features and converting everything to appropriate tensors.
Inputs:
X_cat (ndarray): array of integers corresponding to preprocessed
categorical features
X_int (ndarray): array of integers corresponding to dense features
y (ndarray): array of bool corresponding to labels
data_split(str): flag for splitting dataset into training/validation/test
sets
randomize (str): determines randomization scheme
"none": no randomization
"day": randomizes each day"s data (only works if split = True)
"total": randomizes total dataset
Outputs:
if split:
X_cat_train (tensor): sparse features for training set
X_int_train (tensor): dense features for training set
y_train (tensor): labels for training set
X_cat_val (tensor): sparse features for validation set
X_int_val (tensor): dense features for validation set
y_val (tensor): labels for validation set
X_cat_test (tensor): sparse features for test set
X_int_test (tensor): dense features for test set
y_test (tensor): labels for test set
else:
X_cat (tensor): sparse features
X_int (tensor): dense features
y (tensor): label
"""
# define initial set of indices
indices = np.arange(len(y))
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
# split dataset
if data_split == 'train':
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined training and testing indices...")
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# indices = np.concatenate((train_indices, test_indices))
# create training, validation, and test sets
X_cat_train = X_cat[train_indices]
X_int_train = X_int[train_indices]
y_train = y[train_indices]
X_cat_val = X_cat[val_indices]
X_int_val = X_int[val_indices]
y_val = y[val_indices]
X_cat_test = X_cat[test_indices]
X_int_test = X_int[test_indices]
y_test = y[test_indices]
print("Split data according to indices...")
X_cat_train = X_cat_train.astype(np.long)
X_int_train = np.log(X_int_train.astype(np.float32) + 1)
y_train = y_train.astype(np.float32)
X_cat_val = X_cat_val.astype(np.long)
X_int_val = np.log(X_int_val.astype(np.float32) + 1)
y_val = y_val.astype(np.float32)
X_cat_test = X_cat_test.astype(np.long)
X_int_test = np.log(X_int_test.astype(np.float32) + 1)
y_test = y_test.astype(np.float32)
print("Converted to tensors...done!")
return (
X_cat_train,
X_int_train,
y_train,
X_cat_val,
X_int_val,
y_val,
X_cat_test,
X_int_test,
y_test,
)
else:
# randomize data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_cat = X_cat[indices].astype(np.long)
X_int = np.log(X_int[indices].astype(np.float32) + 1)
y = y[indices].astype(np.float32)
print("Converted to tensors...done!")
return (X_cat, X_int, y, [], [], [], [], [], [])
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
criteo_kaggle=True,
memory_map=False
):
"""
Passes through entire dataset and defines dictionaries for categorical
features and determines the number of total categories.
Inputs:
datafile : path to downloaded raw data file
o_filename (str): saves results under o_filename if filename is not ""
Output:
o_file (str): output file path
"""
# split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if criteo_kaggle else lstr[-1]
npzfile = d_path + ((d_file + "_day") if criteo_kaggle else d_file)
trafile = d_path + ((d_file + "_fea") if criteo_kaggle else "fea")
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
if criteo_kaggle:
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
else:
# WARNING: The raw data consist of day_0.gz,... ,day_23.gz text files
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
for i in range(days):
datafile_i = datafile + "_" + str(i) # + ".gz"
if path.exists(str(datafile_i)):
print("Reading data from path=%s" % (str(datafile_i)))
# file day_<number>
total_per_file_count = 0
with open(str(datafile_i)) as f:
for _ in f:
total_per_file_count += 1
total_per_file.append(total_per_file_count)
total_count += total_per_file_count
else:
sys.exit("ERROR: Criteo Terabyte Dataset path is invalid; please download from https://labs.criteo.com/2013/12/download-terabyte-click-logs")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
):
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
datfile_i = npzfile + "_{0}".format(i) # + ".gz"
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
total_per_file[i] = process_one_file(
datfile_i,
npzfile,
i,
total_per_file[i],
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
unique=np.array(list(convertDicts[j]), dtype=np.int32)
)
counts[j] = len(convertDicts[j])
# store (uniques and) counts
count_file = d_path + d_file + "_fea_count.npz"
if not path.exists(count_file):
np.savez_compressed(count_file, counts=counts)
else:
# create dictionaries (from existing files)
for j in range(26):
with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data:
unique = data["unique"]
for i, x in enumerate(unique):
convertDicts[j][x] = i
# load (uniques and) counts
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
# process all splits
processCriteoAdData(d_path, d_file, npzfile, days, convertDicts, counts)
o_file = concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
)
return o_file
def loadDataset(
dataset,
max_ind_range,
sub_sample_rate,
randomize,
data_split,
raw_path="",
pro_data="",
memory_map=False
):
"""
Load dataset required by DLRM.
"""
# dataset
if dataset == "kaggle":
days = 7
o_filename = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
o_filename = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
# split the datafile into path and filename
lstr = raw_path.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
npzfile = d_path + ((d_file + "_day") if dataset == "kaggle" else d_file)
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = d_path + npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = getCriteoAdData(
raw_path,
o_filename,
max_ind_range,
sub_sample_rate,
days,
data_split,
randomize,
dataset == "kaggle",
memory_map
)
return file, days
if __name__ == "__main__":
### import packages ###
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Preprocess Criteo dataset"
)
# model related parameters
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
args = parser.parse_args()
loadDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
| [
"tjablin@google.com"
] | tjablin@google.com |
cb84e3be8cebdfd17fa883614c4b5d74c60c74ef | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03387/s553318326.py | 2e8b35e38961aeb83e521ce2f6135fcdf35594fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # coding=utf-8
import math
if __name__ == '__main__':
A, B, C = map(int, input().split())
MV = max(A, B, C) * 3
SumABC = A + B + C
if MV % 2 == 0 and SumABC % 2 == 0:
ans = (MV - SumABC) / 2
elif MV % 2 == 1 and SumABC % 2 == 1:
ans = math.ceil(MV - SumABC) / 2
else:
ans = math.ceil((MV - SumABC) / 2) + 1
print(int(ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3d200a378f11ca1e8627ffe56b6ec0d71913e106 | 38941ac80bb2473baa8558d870daa53459688f35 | /tests/api/test_api_predictors.py | 6af555d356479721a5a9585545f184d80c82cdd9 | [] | no_license | waxiao1214/flask | 15ce2a710322e4b288c444bef9e365492fdbb626 | f2f8aa551a9236ed9c64bc27d62741b363c043a8 | refs/heads/master | 2022-04-06T00:28:58.500293 | 2020-02-05T17:47:34 | 2020-02-05T17:47:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,614 | py | import json
from solariat_bottle.db.predictors.base_predictor import BasePredictor, TYPE_AGENTS, HYBRID, ERROR_NO_ACTIVE_MODELS
from solariat_bottle.db.predictors.models.linucb import ModelState
from solariat_bottle.db.predictors.operators import UNIQ_OPERATORS, DB_OPERATORS, OPERATOR_REGISTRY
import unittest
from solariat_bottle.db.dynamic_classes import InfomartEvent
from solariat_bottle.tests.base import UICase
from solariat_bottle.db.account import Account
from solariat_bottle.db.channel.twitter import TwitterServiceChannel
from solariat_bottle.db.roles import ADMIN
from solariat_bottle.db.predictors.factory import create_agent_matching_predictor
from solariat_bottle.db.predictors.entities_registry import EntitiesRegistry
from solariat_bottle.tests.api.test_api_agents import TestApiAgentsBase
from solariat_bottle.api.predictors import (ERR_MSG_MISSING_FIELD, ERR_MSG_NO_PREDICTOR, ERR_MSG_NO_ACCESS)
from solariat_bottle.db.schema_based import KEY_IS_ID, KEY_NAME, KEY_TYPE, KEY_EXPRESSION, TYPE_INTEGER, \
TYPE_STRING, TYPE_BOOLEAN, TYPE_LIST, TYPE_DICT
from solariat_bottle.db.dynamic_event import KEY_IS_NATIVE_ID
from solariat_bottle.schema_data_loaders.base import SchemaProvidedDataLoader
def retrain(predictor):
predictor.refresh_cardinalities()
for model in predictor.models:
total = predictor.training_data_class.objects(predictor_id=predictor.id).count()
progress = 0
predictor.save_progress(model, progress, total)
context_list = []
action_list = []
reward_list = []
for data in predictor.training_data_class.objects(predictor_id=predictor.id):
context_list.append(data.context)
action_list.append(data.action)
reward_list.append(data.reward)
import itertools
bulk = predictor.training_data_class.objects.coll.initialize_ordered_bulk_op()
for context, action, reward in itertools.izip(context_list, action_list, reward_list):
training_data = predictor.training_data_class(predictor_id=predictor.id,
context=context,
action=action,
reward=reward,
n_batch=predictor.get_n_batch_value())
bulk.insert(training_data.data)
bulk.execute()
predictor.refresh_cardinalities()
predictor.train_models(model=model)
# model.version += 1
predictor.save_model(model)
class APIPredictorsCase(UICase, TestApiAgentsBase):
def setup_agent_schema(self, user, extra_schema=[]):
schema = list()
schema.extend(extra_schema)
schema.append({KEY_NAME: 'name', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'skills', KEY_TYPE: TYPE_DICT})
schema.append({KEY_NAME: 'attached_data', KEY_TYPE: TYPE_DICT})
schema.append({KEY_NAME: 'date_of_birth', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'date_of_hire', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'gender', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'location', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'native_id', KEY_TYPE: TYPE_STRING, KEY_IS_NATIVE_ID: True})
#schema.append({KEY_NAME: 'id', KEY_TYPE: TYPE_STRING})
schema.append({KEY_NAME: 'on_call', KEY_TYPE: TYPE_BOOLEAN})
schema_entity = user.account.agent_profile.create(user, SchemaProvidedDataLoader(schema))
schema_entity.update_schema(schema)
schema_entity.schema = schema_entity.discovered_schema
schema_entity.save()
schema_entity.apply_sync()
schema_entity.accept_sync()
def test_predictor_score(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_ACTIVE,
state=ModelState.CYCLE_NEW), is_test=True
)
feedback_data = dict(action=dict(action_id='UUID_2',
skill='testing2',
age=28,
fluency='good2',
seniority='veteran2'),
context=dict(AGE=36,
GENDER='M',
LOCATION='San Francisco2',
N_SUBS=16,
INTENTION='Closing an account2',
SENIORITY='ancient2'),
token=token,
reward=0)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
score_data = dict(actions=[dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(age=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
#import pdb; pdb.set_trace()
self.assertEquals(resp_data['model'], u'Full feature set')
self.assertEquals(resp_data['predictor'], u'Test Agent Matching Predictor')
self.assertEquals(len(resp_data['list']), 2)
self.assertTrue('predictor_id' in resp_data.keys())
self.assertTrue('model_id' in resp_data.keys())
max_ucb = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
# if entry['id'] == 'UUID_2':
self.assertEqual(entry["score"], 0) # Only data was a zero
self.assertEqual(entry["estimated_reward"], 0) # Only data was a zero
# else:
# self.assertEqual(entry["score"], 0.25) # Only data was a zero
# self.assertEqual(entry["estimated_reward"], 0.25) # Only data was a zero
if max_ucb < entry["score"]:
max_ucb = entry["score"]
feedback_data = dict(action=dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
context=dict(AGE=35,
GENDER='M',
LOCATION='San Francisco',
N_SUBS=15,
INTENTION='Closing an account',
SENIORITY='ancient'),
token=token,
reward=100)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
new_max_ucb = 0
uuid1_score = 0
uuid2_score = 0
uuid1_ex_reward = 0
uuid2_ex_reward = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue(entry["score"] > 0) # Some upper confidence at first
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
self.assertTrue(entry["estimated_reward"] > 0) # Had positive example, both should be positive
if new_max_ucb < entry["score"]:
new_max_ucb = entry["score"]
if entry['id'] == "UUID_1":
uuid1_score = entry["score"]
uuid1_ex_reward = entry["estimated_reward"]
if entry['id'] == "UUID_2":
uuid2_score = entry["score"]
uuid2_ex_reward = entry["estimated_reward"]
# Overall max ucb should be higher since we just got positive reward
self.assertTrue(new_max_ucb > max_ucb)
# UCB for non-rated agent should be higher but estimated reward lower
self.assertTrue(uuid1_ex_reward > uuid2_ex_reward, '%s > %s' % (uuid1_ex_reward, uuid2_ex_reward))
self.assertTrue(uuid1_score > uuid2_score, '%s > %s' % (uuid1_score, uuid2_score))
score_data = dict(actions=[dict(action_id='UUID_1',
Skill='testing',
Age=27,
Fluency='good',
Seniority='veteran'),
dict(action_id='UUID_2',
Skill='testing',
Age=77,
Fluency='good',
Seniority='new')],
context=dict(aGe=35,
genDer='M',
locatIon='San Francisco',
n_sUbs=15,
iNtention='Closing an account',
senioRity='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data2 = json.loads(resp.data)
self.assertEqual(resp_data['list'][0]['score'], resp_data2['list'][0]['score'])
self.assertEqual(resp_data['list'][1]['score'], resp_data2['list'][1]['score'])
# Now test same score with a score expression
from solariat_bottle.api.predictors import KEY_P_SCORE, PredictorsAPIView
predictor.score_expression = "2000 + " + KEY_P_SCORE
predictor.save()
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
score_result_data = json.loads(resp.data)
self.assertTrue(score_result_data['ok'])
for entry in score_result_data['list']:
self.assertTrue(entry['score'] >= 2000)
self.assertFalse('warning' in score_result_data)
# Now try an invalid expression, check default scores + warning is returned
PredictorsAPIView._parsers_cache = dict()
predictor.score_expression = "2000 + unknown_key"
predictor.save()
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
score_result_data = json.loads(resp.data)
self.assertTrue(score_result_data['ok'])
for entry in score_result_data['list']:
self.assertTrue(entry['score'] <= 2000)
self.assertTrue('warning' in score_result_data)
def test_score_non_active(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_INACTIVE,
state=ModelState.CYCLE_NEW), is_test=True
)
for mdl in predictor.models:
mdl.state.status = mdl.state.STATUS_INACTIVE
mdl.save()
feedback_data = dict(action=dict(action_id='UUID_2',
skill='testing2',
age=28,
fluency='good2',
seniority='veteran2'),
context=dict(AGE=36,
GENDER='M',
LOCATION='San Francisco2',
N_SUBS=16,
INTENTION='Closing an account2',
SENIORITY='ancient2'),
token=token,
reward=0)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
score_data = dict(actions=[dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(age=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
self.assertEqual(resp_data['error'], ERROR_NO_ACTIVE_MODELS)
def test_predictor_expressions_metadata(self):
self.login()
# expression_type = feedback_model
params = {'expression_type': 'feedback_model',
'suggestion_type': 'collections'}
response = self.client.post('/predictors/expressions/metadata',
data=json.dumps(params),
content_type='application/json')
resp_data = json.loads(response.data)
self.assertTrue(resp_data['ok'])
self.assertTrue(resp_data['metadata'])
# expression_type = reward
params = {'expression_type': 'reward',
'collections': resp_data['metadata'],
'suggestion_type': 'fields'}
response = self.client.post('/predictors/expressions/metadata',
data=json.dumps(params),
content_type='application/json')
resp_data = json.loads(response.data)
self.assertTrue(resp_data['ok'])
self.assertTrue(resp_data['metadata'])
for item in resp_data['metadata']:
self.assertTrue(item['fields'])
self.assertTrue(item['collection'])
# expression_type = action_id
params = {'expression_type': 'action_id',
'suggestion_type': 'operators'}
response = self.client.post('/predictors/expressions/metadata',
data=json.dumps(params),
content_type='application/json')
resp_data = json.loads(response.data)
self.assertTrue(resp_data['ok'])
self.assertEqual(resp_data['metadata'], UNIQ_OPERATORS.keys())
# expression_type = feedback_model, suggestion_type = operators
params = {'expression_type': 'feedback_model',
'suggestion_type': 'operators'}
response = self.client.post('/predictors/expressions/metadata',
data=json.dumps(params),
content_type='application/json')
resp_data = json.loads(response.data)
self.assertTrue(resp_data['ok'])
self.assertEqual(resp_data['metadata'], DB_OPERATORS.keys())
def test_composite_predictor(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
first_predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_ACTIVE,
state=ModelState.CYCLE_NEW), is_test=True
)
first_predictor.name = "pred1"
first_predictor.save()
second_predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_ACTIVE,
state=ModelState.CYCLE_NEW), is_test=True
)
second_predictor.name = "pred2"
second_predictor.save()
third_predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_ACTIVE,
state=ModelState.CYCLE_NEW), is_test=True
)
third_predictor.name = "pred3"
third_predictor.save()
feedback_data = dict(action=dict(action_id='UUID_2',
skill='testing2',
age=28,
fluency='good2',
seniority='veteran2'),
context=dict(age=36,
gender='M',
location='San Francisco2',
n_subs=16,
intent='Closing an account2',
seniority='ancient2'),
token=token,
reward=0)
self.client.post('/api/v2.0/predictors/%s/feedback' % first_predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(first_predictor)
self.client.post('/api/v2.0/predictors/%s/feedback' % second_predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(second_predictor)
self.client.post('/api/v2.0/predictors/%s/feedback' % third_predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(third_predictor)
composite_predictor_data = {'predictors_list': [str(first_predictor.id),
str(second_predictor.id),
str(third_predictor.id)],
'predictor_type': BasePredictor.TYPE_COMPOSITE,
'raw_expression': "%s + %s * %s" % (first_predictor.name,
second_predictor.name,
third_predictor.name),
'account_id': str(admin_user.account.id),
'name': 'The Composite Predictor'}
self.login(user_mail, user_password)
resp_data = self.client.post('/predictors/json',
data=json.dumps(composite_predictor_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp_data.data)
predictor_id = resp_data['obj']['id']
# edit predictor
composite_predictor_data['name'] = 'The Composite Predictor 2'
resp_data = self.client.post('/predictors/%s' % predictor_id,
data=json.dumps(composite_predictor_data),
content_type='application/json',
base_url='https://localhost')
score_data = dict(actions=[dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(age=35,
gender='M',
location='San Francisco',
n_subs=15,
intent='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor_id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
max_ucb = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
# if entry['id'] != 'UUID_2':
# self.assertTrue(entry["score"] > 0) # Some upper confidence at first
# self.assertTrue(entry["estimated_reward"], 0) # No data at first
# else:
self.assertEqual(entry["score"], 0) # Some upper confidence at first
self.assertEqual(entry["estimated_reward"], 0) # No data at first
if max_ucb < entry["score"]:
max_ucb = entry["score"]
feedback_data = dict(action=dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
context=dict(age=35,
gender='M',
location='San Francisco',
n_subs=15,
intent='Closing an account',
seniority='ancient'),
token=token,
reward=100)
self.client.post('/api/v2.0/predictors/%s/feedback' % first_predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(first_predictor)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor_id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
new_max_ucb = 0
uuid1_score = 0
uuid2_score = 0
uuid1_ex_reward = 0
uuid2_ex_reward = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue(entry["score"] > 0) # Some upper confidence at first
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
self.assertTrue(entry["estimated_reward"] > 0) # Had positive example, both should be positive
if new_max_ucb < entry["score"]:
new_max_ucb = entry["score"]
if entry['id'] == "UUID_1":
uuid1_score = entry["score"]
uuid1_ex_reward = entry["estimated_reward"]
if entry['id'] == "UUID_2":
uuid2_score = entry["score"]
uuid2_ex_reward = entry["estimated_reward"]
# Overall max ucb should be higher since we just got positive reward
self.assertTrue(new_max_ucb > max_ucb)
# UCB for non-rated agent should be higher but estimated reward lower
self.assertTrue(uuid1_ex_reward > uuid2_ex_reward, '%s > %s' % (uuid1_ex_reward, uuid2_ex_reward))
self.assertTrue(uuid1_score > uuid2_score, '%s > %s' % (uuid1_score, uuid2_score))
def test_filter_based_score(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
batch_data = [{u'name': u'Tester Testerson',
u'gender': u'M',
u'date_of_birth': u'06/06/1985',
u'date_of_hire': u'01/01/1988',
u'native_id': u'UUID_1',
u'location': u'San Francisco',
u'attached_data': dict(skill='testing',
age=27,
fluency='good',
seniority='veteran',
location='San Francisco')},
{u'name': u'Badboy Agent',
u'gender': u'M',
u'date_of_birth': u'07/11/1951',
u'date_of_hire': u'04/04/2004',
u'native_id': u'UUID_2',
u'location': u'San Francisco',
u'attached_data': dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new',
location='San Francisco')},
{u'name': u'Sadboy Agent',
u'gender': u'M',
u'date_of_birth': u'07/11/1982',
u'date_of_hire': u'04/04/2005',
u'native_id': u'UUID_3',
u'location': u'San Jose',
u'skills': {u'products': 3, u'hardware': 10}}]
self.batch_create(token, batch_data)
predictor = create_agent_matching_predictor(admin_user.account.id, is_test=True)
predictor.action_type = TYPE_AGENTS
predictor.save()
feedback_data = dict(action=dict(action_id='UUID_2',
skill='testing2',
age=28,
fluency='good2',
seniority='veteran2'),
context=dict(AGE=36,
GENDER='M',
LOCATION='San Francisco2',
N_SUBS=16,
INTENTION='Closing an account2',
SENIORITY='ancient2'),
token=token,
reward=0)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
score_data = dict(action_filters='location=San Francisco',
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
max_ucb = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
self.assertEqual(entry["score"], 0) # One case with a reward of 0
self.assertEqual(entry["estimated_reward"], 0) # One case with a reward of 0
if max_ucb < entry["score"]:
max_ucb = entry["score"]
feedback_data = dict(action=dict(action_id=resp_data['list'][0]['id']),
context=dict(AGE=35,
GENDER='M',
LOCATION='San Francisco',
N_SUBS=15,
INTENTION='Closing an account',
SENIORITY='ancient'),
token=token,
reward=100)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
new_max_ucb = 0
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue(entry["score"] > 0) # Some upper confidence at first
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
self.assertTrue(entry["estimated_reward"] > 0) # Had positive example, both should be positive
if new_max_ucb < entry["score"]:
new_max_ucb = entry["score"]
# Overall max ucb should be higher since we just got positive reward
self.assertTrue(new_max_ucb > max_ucb)
def test_get_dataset_fields(self):
self.login()
expected_dataset_fields = [{'name': k, 'type': v.__class__.__name__} for k, v in InfomartEvent.fields.items()]
params = {'dataset_name': 'TestDataSet'}
response = self.client.post('/predictors/expressions/dataset_fields',
data=json.dumps(params),
content_type='application/json')
resp_data = json.loads(response.data)
self.assertTrue(resp_data['ok'])
self.assertTrue(resp_data['dataset_fields'])
self.assertTrue(resp_data['dataset_name'])
self.assertEqual(resp_data['dataset_fields'], expected_dataset_fields)
@unittest.skip('Skip for now, because it always fails at jenkins job')
def test_predictor_multi_intentions(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
predictor = create_agent_matching_predictor(
admin_user.account.id,
state=ModelState(status=ModelState.STATUS_ACTIVE,
state=ModelState.CYCLE_NEW), is_test=True)
predictor.models[0]._clf = 0
predictor.models[0].model_type = HYBRID
predictor.models[0].save()
feedback_data = dict(action=dict(action_id='UUID_2',
skill='testing2',
age=28,
fluency='good2',
seniority='veteran2'),
context=dict(AGE=36,
GENDER='M',
LOCATION='San Francisco2',
N_SUBS=16,
INTENTION='Closing an account2',
SENIORITY='ancient2'),
token=token,
reward=0)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
# Each agent skill should create new intention label and new feature index in the classifier
# test what happens if we have more than expected
n_agents = 20
n_scores = 15
base_product = "PRODUCT_%s"
base_uuid = 'UUID_%s'
base_intention = "INTENTION_%s"
agents = []
for idx in xrange(n_agents):
agents.append(dict(skills={base_product % idx : 7},
action_id=base_uuid % idx,
age=27,
seniority='veteran',
location="San Francisco"))
# Each agent had a separate skill, intents would be created for all of them
for idx in xrange(n_scores):
score_data = dict(actions=[agents[idx], agents[idx + 1]],
context=dict(age=35,
gender='M',
location='San Francisco',
n_subs=15,
intention=base_intention % idx,
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertEquals(len(resp_data['list']), 2)
for entry in resp_data['list']:
self.assertTrue("score" in entry)
self.assertTrue("id" in entry)
self.assertTrue("estimated_reward" in entry)
if entry['id'] != 'UUID_2':
self.assertTrue(entry["score"] > 0) # Hybrid. Only global model got a 0.
self.assertTrue(entry["estimated_reward"] > 0)
else:
self.assertEquals(entry["score"], 0)
self.assertEquals(entry["estimated_reward"], 0)
for idx in xrange(n_scores):
feedback_data = dict(action=agents[idx],
context=dict(AGE=35,
GENDER='M',
LOCATION='San Francisco',
N_SUBS=15,
intention=base_intention % idx,
seniority='ancient'),
token=token,
reward=100 if idx % 2 == 0 else 7)
self.client.post('/api/v2.0/predictors/%s/feedback' % predictor.id,
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
retrain(predictor)
# Check agents that actually got scored and see improvements
for idx in xrange(n_scores - 1):
score_data = dict(actions=[agents[idx], agents[idx + 1]],
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention=base_intention % idx,
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor.id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
for entry in resp_data['list']:
agent_number = int(entry['id'].split('_')[1])
if agent_number % 2 == 0:
# We gave positive feedback
self.assertTrue(entry['estimated_reward'] > 40, entry)
else:
# We gave negative feedback
self.assertTrue(entry['estimated_reward'] < 40, entry)
def test_predictors_invalid_requests(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
predictor = create_agent_matching_predictor(admin_user.account.id, is_test=True)
token = self.get_token(user_mail, user_password)
# Test with score / feedback invalid predictor id
score_data = dict(actions=[dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % (str(predictor.id) + 'invalid'),
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
self.assertEqual(ERR_MSG_NO_PREDICTOR % (str(predictor.id) + 'invalid'), resp_data['error'])
feedback_data = dict(action=dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token,
reward=100)
self.client.post('/api/v2.0/predictors/%s/feedback' % (str(predictor.id) + 'invalid'),
data=json.dumps(feedback_data),
content_type='application/json',
base_url='https://localhost')
# Missing required field
score_data.pop('context')
resp = self.client.post('/api/v2.0/predictors/%s/score' % (str(predictor.id)),
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
self.assertEquals(ERR_MSG_MISSING_FIELD % 'context', resp_data['error'])
# Missing action id
score_data = dict(actions=[dict(skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % (str(predictor.id)),
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
# Invalid context
score_data = dict(actions=[dict(skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(skill='testing',
age=77,
fluency='good',
seniority='new')],
context=[1, 2, 3, 4],
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % (str(predictor.id)),
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
# No access to predictor
new_account = Account.objects.create(name='test2')
other_predictor = create_agent_matching_predictor(new_account.id, is_test=True)
score_data = dict(actions=[dict(action_id='UUID_1',
skill='testing',
age=27,
fluency='good',
seniority='veteran'),
dict(action_id='UUID_2',
skill='testing',
age=77,
fluency='good',
seniority='new')],
context=dict(AGE=35,
gender='M',
location='San Francisco',
n_subs=15,
intention='Closing an account',
seniority='ancient'),
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % (str(other_predictor.id)),
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
self.assertEquals(ERR_MSG_NO_PREDICTOR % other_predictor.id, resp_data['error'])
def test_testpredictor(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password,
extra_schema=[{KEY_NAME: 'employeeId',
KEY_TYPE: TYPE_DICT}])
token = self.get_token(user_mail, user_password)
# create/reset testpredictor
agent_profile_schema = admin_user.account.agent_profile._get()
AgentProfile = agent_profile_schema.get_data_class()
action_id = "17"
ap = AgentProfile()
ap.id = action_id
ap.native_id = action_id
# RELOAD IS NOT WORKING. SUPSECT IT HAS SOMETING TO DO WITH THE WAY
# ID IS BEING HANDLED. Curiously it is stored as _id, and retrieved as id
# when the details are printed out.
ap.save()
ap.reload()
context = {u'status': u'VIP', u'last_call_intent': {}, u'first_name': u'John', u'last_name': u'B',
u'assigned_segments': {}, u'age': u'53', u'prr-ixn-start-utc': u'53583', u'cust_intent_ced': u'1',
u'intention': u'CLOSING_AN_ACCOUNT', u'products': {}, u'location': u'USA', u'groups': {},
u'seniority': u'NEW', u'sex': u'M', u'id': u'574db52707d0a354e79f327d', u'cust_req_survey': u'0'}
score = 0.7
request_data = dict(
token=token,
name="TestPredictor",
lookup_map=(
((str(ap.id), context), score),
)
)
resp = self.client.post('/api/v2.0/predictors/testid/testpredictor',
data=json.dumps(request_data),
content_type='application/json',
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertTrue(resp_data["ok"])
predictor_id = resp_data["data"]["predictor_id"]
score_data = dict(actions=[dict(action_id=action_id)],
context=context,
token=token)
resp = self.client.post('/api/v2.0/predictors/%s/score' % predictor_id,
data=json.dumps(score_data),
content_type='application/json',
base_url='https://localhost')
resp_data2 = json.loads(resp.data)
reponse_item = resp_data2["list"][0]
self.assertEquals(1, len(resp_data2["list"]))
self.assertEquals(score, reponse_item["score"])
self.assertEquals(action_id, reponse_item["id"])
self.assertTrue(resp_data2['ok'])
def test_reset(self):
user_mail = 'admin1@test_channels.com'
user_password = 'password'
admin_user = self.setup_requirements(user_mail, user_password)
token = self.get_token(user_mail, user_password)
predictor = create_agent_matching_predictor(admin_user.account.id, is_test=True)
resp = self.client.post('/api/v2.0/predictors/%s/reset' % str(predictor.id),
content_type='application/json',
data=json.dumps({'token': token}),
base_url='https://localhost')
resp_data = json.loads(resp.data)
self.assertTrue("ok", resp_data['status'])
def test_expression_validate(self):
self.login()
predictor = create_agent_matching_predictor(str(self.user.account.id), is_test=True)
entities_registry = EntitiesRegistry()
def _validate(user_expression):
expression_data = entities_registry.generate_expression_context(predictor, user_expression)
params = {'expression_type': 'feedback_model',
'expression': expression_data['expression']}
return self.client.post('/predictors/expressions/validate',
data=json.dumps(params),
content_type='application/json')
# No longer valid
# resp = _validate("collect(InfomartEvent)") # simple mongo db expression
# resp_data = json.loads(resp.data)
# self.assertTrue(resp_data['ok'])
# resp = _validate("collect(InfomartEvent) + isin(1, [1, 2])") # multiple expression
resp = _validate("isin(1, [1, 2])")
resp_data = json.loads(resp.data)
self.assertTrue(resp_data['ok'])
resp = _validate("1 + 3")
resp_data = json.loads(resp.data)
self.assertTrue(resp_data['ok'])
resp = _validate("1 + []") # invalid expression
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
resp = _validate("collect(InfomartEvent") # invalid syntax, missing )
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
resp = _validate("abcdef(InfomartEvent)") # invalid operator
resp_data = json.loads(resp.data)
self.assertFalse(resp_data['ok'])
| [
"henryza1994@outlook.com"
] | henryza1994@outlook.com |
5d9ef8ec652fcaaa445cf31edc0e29f45e7c8046 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/crud/models/openconfig/openconfig-lldp/nc-create-config-lldp-20-ydk.py | 511c445106d13e34a5fd9a0ae7386f7c0a536ce3 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,632 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create config for model openconfig-lldp.
usage: nc-create-config-lldp-20-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.openconfig import openconfig_lldp as oc_lldp
import logging
def config_lldp(lldp):
"""Add config data to lldp object."""
lldp.config.enabled = True
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
lldp = oc_lldp.Lldp() # create config object
config_lldp(lldp) # add object configuration
crud.create(provider, lldp) # create object on NETCONF device
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
061f3b653e2531aaab6be7deac110e76ea9b4b75 | 9a3262882123a0937d5b713919688ba7c580ae9f | /eslearn/visualization/lc_scatterplot.py | bba48c93e72b3c2f8a6240a8987344545dda8ac5 | [
"MIT"
] | permissive | sysunwenbo/easylearn | fa7f6dd6cf2e0e66780469dc7046b46e486574fd | 145274cb374c0337b7ec5aed367841663c527a6f | refs/heads/master | 2022-04-18T00:16:56.263547 | 2020-04-16T15:45:07 | 2020-04-16T15:45:07 | 256,792,139 | 0 | 1 | MIT | 2020-04-18T15:49:10 | 2020-04-18T15:49:09 | null | UTF-8 | Python | false | false | 2,569 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:29:02 2018
sns.set(stynamele='ticks', palette='muted', color_codes=True, font_scale=1.5)
sns.set_stynamele('dark')
主题 stynamele:darkgrid, whitegrid, dark, white, ticks,默认为darkgrid。
sns.set_palette:deep, muted, bright, pastel, dark, colorblind
sns.set_contexnamet('notebook', rc={'lines.linewidth':1.5})
sns.despine():
对于白底(white,whitegrid)以及带刻度(ticks)而言,顶部的轴是不需要的,默认为去掉顶部的轴;
sns.despine(left=True):去掉左部的轴,也即 yname 轴;
注意这条语句要放在 plot 的动作之后,才会起作用;
@author: li chao
"""
# 载入绘图模块
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
# 散点图和拟合直线 以及分布
def scatter_LC(df, x='x', y='y', color='g', marker='o'):
sns.set(context='paper', style='whitegrid', palette='colorblind', font='sans-serif',font_scale=1, color_codes=False, rc=None)
# sns.JointGrid(data=df, x=x, y=y).plot(sns.regplot, sns.distplot)
sns.regplot(data=df, x=x, y=y, fit_reg=1, color=color, marker=marker)
# set
ax = plt.gca()
sns.despine()
xticklabel = ax.get_xticklabels()
yticklabel = ax.get_yticklabels()
xlabel = ax.get_xlabel()
ylabel = ax.get_ylabel()
plt.setp(xticklabel, size=10,rotation=0, horizontalalignment='right')
plt.setp(yticklabel, size=10,rotation=0, horizontalalignment='right')
plt.xlabel(xlabel, size=15, rotation=0)
plt.ylabel(ylabel, size=15, rotation=0)
# plt.show()
if __name__ == "__main__":
plt.figure(figsize=(10,8))
signal_p = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\ROISignals_ROISignal_patients.txt'
signal_c = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\ROISignals_ROISignal_controls.txt'
s = r'D:\WorkStation_2018\Workstation_Old\Workstation_2019_Insomnia_caudate_GCA\GCA\Y2X\ROISignals_T2\sas.txt'
df_signal_p = pd.read_csv(signal_p,header=None)
df_signal_c = pd.read_csv(signal_c, header=None)
df_scale = pd.read_csv(s,header=None)
df = pd.concat([df_signal_p,df_signal_c],axis=0)
dia = np.hstack([np.zeros(31,), np.ones(47,)])
df['dia'] = pd.DataFrame(dia)
df = pd.concat([df_signal_p,df_scale],axis=1)
df.columns = ['x','y']
scatter_LC(df, 'x', 'y', color='#008B8B', marker='o')
plt.show()
plt.savefig('pDMN_sas.tif', dpi=600)
| [
"you@example.com"
] | you@example.com |
b3d4d7e07e5de3d880910b6037db32cf7729422c | 71fafe9fb2190b6acf09f109105ca362bb9018c2 | /jcsbms/lottery/migrations/0007_auto_20160426_1919.py | 9c521e85586a5c412a25d51a1cb6aada2a875126 | [] | no_license | zhangyibo007/bms | 1f43ca98057a72f1b62769719cb4aefbb4ffb289 | 1ae88e90415f0495d3a647112de0876da0b18e5e | refs/heads/master | 2021-06-21T05:40:24.468473 | 2017-08-02T12:35:08 | 2017-08-02T12:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-26 11:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lottery', '0006_auto_20151223_1530'),
]
operations = [
migrations.CreateModel(
name='TeamName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cup_name', models.CharField(max_length=16)),
('original_name', models.CharField(max_length=16)),
('stand_name', models.CharField(max_length=16, null=True)),
],
),
migrations.AlterUniqueTogether(
name='teamname',
unique_together=set([('cup_name', 'original_name')]),
),
]
| [
"zhangyibo@caifuzhinan.com"
] | zhangyibo@caifuzhinan.com |
640284a8f9ab1215ae55d163b7b3ebdf170c1771 | d032bc0c01a7cd598481644e22043de8df4c71c4 | /consultad/routing.py | 6c3b0ea360f97d12db2ccd7bfdf474c4dd9c66f3 | [] | no_license | amrit-kumar/project-for-engineering | eb5f410cd2f0a271633fb6c24132a36e6215f0e0 | 7e975866e540ab4625e735009fdba971df74e393 | refs/heads/master | 2020-12-03T01:49:02.429186 | 2017-06-30T09:09:46 | 2017-06-30T09:09:46 | 95,863,800 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from channels.routing import route
# from consultant_app.consumers import ws_add, ws_message, ws_disconnect
from consultant_app.consumers import ws_connect, ws_message, ws_disconnect,repeat_me
from consultant_app.consumers import ws_message
from django.conf.urls import url,include
# channel_routing = [
# route("http.request", "consultant_app.consumers.http_consumer"),
# ]
channel_routing = [
route("websocket.connect", ws_connect,path=r"^/chat"),
route("websocket.receive", ws_message,path=r"^/chat"),
route("websocket.disconnect", ws_disconnect),
route("repeat-me", repeat_me),
]
# inner_routes = [
# route("websocket.connect", repeat_me, path=r'^/stream/'),
# ]
# routing = [
# include(inner_routes, path=r'^/repeat_me')
# ]
| [
"kumaramrit38@gmail.com"
] | kumaramrit38@gmail.com |
802e8ff796848053362c0ffc27d0950a0150d1ef | e415323eec1a2dd547d988465c9174355b4a4f4c | /setup.py | 5fd8e98a10a230dea5667ce80ca3c3001218f4f6 | [
"MIT"
] | permissive | mjuenema/micropython-bitstring | 5e87d054d445ee0368acfe4e01c90dfa79245016 | 500c5fe987e7205dd9f937b5d5e0a8f4505a1f11 | refs/heads/master | 2021-01-21T22:06:12.469623 | 2017-06-26T13:33:34 | 2017-06-26T13:33:34 | 95,167,519 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | from ubitstring import __version__
import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
kwds = {'long_description': open('README.rst').read()}
if sys.version_info[0] < 2:
raise Exception('This version of bitstring needs Python 3 or later.')
setup(name='micropython-bitstring',
version=__version__,
description="Very stripped down version of Scrott Griffith's Bitstring package.",
author='Markus Juenemann',
author_email='markus@juenemann.net',
url='https://github.com/mjuenema/micropython-bitstring',
download_url='https://pypi.python.org/pypi/micropython-bitstring/',
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
py_modules=['ubitstring'],
platforms='all',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: MicroPython',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=['micropython-copy','micropython-binascii','micropython-struct','micropython-types'],
**kwds
)
| [
"markus@juenemann.net"
] | markus@juenemann.net |
fa210d6b01ae3ecaabdd4162852c0f7eb986306f | 34f365117eb1d846fa922c24f3fc650188ce9746 | /bin/bed2removeNeighbors.py | cd193585aa637fee5aa5499c7c8172c90c2db7fb | [
"MIT"
] | permissive | PinarSiyah/NGStoolkit | 53ac6d87a572c498414a246ae051785b40fbc80d | b360da965c763de88c9453c4fd3d3eb7a61c935d | refs/heads/master | 2021-10-22T04:49:51.153970 | 2019-03-08T08:03:28 | 2019-03-08T08:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #!/usr/bin/env python
import argparse
import bed
PARSER = argparse.ArgumentParser(description='removes neighboring intervals')
PARSER.add_argument('-i', required=True, help='input')
PARSER.add_argument('-d', required=True, help='neighbor distance')
ARGS = PARSER.parse_args()
bedFile = ARGS.i
distance = int(ARGS.d)
bed.bed(bedFile).removeNeighbors(distance)
| [
"adebali@users.noreply.github.com"
] | adebali@users.noreply.github.com |
e10b665b0f221ffa1d49d77e5e8df9c3f842961c | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /get_dir_total_size__examples/get_dir_total_size__using__os_walk.py | aca823f1b36c9c1fb9b74ad826f6c01cf9aca6fc | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://docs.python.org/3/library/os.html#os.walk
from os.path import join, getsize
from common import sizeof_fmt
def get_dir_total_size(dir_name: str) -> (int, str):
total_size = 0
for root, dirs, files in os.walk(dir_name):
total_size += sum(getsize(join(root, name)) for name in files)
return total_size, sizeof_fmt(total_size)
if __name__ == '__main__':
import os
# paths = [r"C:\Users\Default", r"C:\Program Files (x86)", os.path.expanduser(r'~\Desktop')]
paths = ['..']
for path in paths:
path = os.path.abspath(path)
size, size_str = get_dir_total_size(path)
print(f'"{path}": {size} bytes / {size_str}')
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
d4eccb9f13bce33a5aae052b88b3c3fbddc99723 | 5f58ee3c7e5c4cca0310f33335fb36ec61ffd8cf | /build/sensing/drivers/camera/packages/hexacam/catkin_generated/pkg.develspace.context.pc.py | 2d4ec2266029d6ad3ed98f5a1b0fbc3bdc0c8a59 | [] | no_license | qqsskk/p3dx_velodyne_slam | 4bad90ea64fb50539982a44f3df7abd0041516db | cafe3f81eb17bc5e8c8edc6b83b84a8d5fbf2805 | refs/heads/master | 2021-06-08T17:05:52.386391 | 2016-10-26T09:44:05 | 2016-10-26T09:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lhexacam".split(';') if "-lhexacam" != "" else []
PROJECT_NAME = "hexacam"
PROJECT_SPACE_DIR = "/home/hj/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"jhjune91@gmail.com"
] | jhjune91@gmail.com |
f44ea363fb79cfff9b558fcc7d02d4f6e0133279 | bc647095de9c9dd658b82179ff9e398029f53756 | /tests/unit/test_action.py | aa46b2efe960852218b762f3cbf13cea1662f4c3 | [
"BSD-3-Clause"
] | permissive | ettoreleandrotognoli/python-ami | 2b9711bd484ceaf524f0b2109ac665ed4271065a | 9922ccae5db73d0ee248942b8cfab9f939962024 | refs/heads/master | 2023-04-28T08:30:18.603575 | 2022-05-04T13:25:03 | 2022-05-04T14:02:28 | 43,763,556 | 112 | 68 | BSD-3-Clause | 2023-04-25T09:25:30 | 2015-10-06T16:36:48 | Python | UTF-8 | Python | false | false | 1,804 | py | import unittest
from asterisk.ami import LoginAction, LogoffAction, SimpleAction
class AMIActionTest(unittest.TestCase):
def compare_actions(self, a1, a2):
a1 = str(a1).split('\r\n')
a2 = str(a2).split('\r\n')
self.assertEqual(a1[0], a2[0])
self.assertSetEqual(set(a1[1:]), set(a2[1:]))
def test_login_action(self):
expected = '\r\n'.join([
'Action: Login',
'Username: username',
'Secret: password',
]) + '\r\n'
action = LoginAction('username', 'password')
self.compare_actions(action, expected)
self.assertEqual(action.name, 'Login')
self.assertEqual(action.Username, 'username')
self.assertEqual(action.Secret, 'password')
self.assertDictEqual(action.keys, {'Username': 'username', 'Secret': 'password'})
self.assertEqual(len(action.variables), 0)
def test_logoff_action(self):
expected = '\r\n'.join([
'Action: Logoff',
]) + '\r\n'
action = LogoffAction()
self.compare_actions(action, expected)
self.assertEqual(action.name, 'Logoff')
self.assertEqual(len(action.keys), 0)
self.assertEqual(len(action.variables), 0)
def test_with_variable(self):
expected = '\r\n'.join([
'Action: GetVar',
'Channel: channel-1',
'Variable: <Variable 1>=<Value 1>',
]) + '\r\n'
action = SimpleAction('GetVar', Channel='channel-1')
action['<Variable 1>'] = '<Value 1>'
self.compare_actions(action, expected)
self.assertEqual(action.Channel, 'channel-1')
self.assertEqual(action['<Variable 1>'], '<Value 1>')
action.Channel = 'channel-2'
self.assertEqual(action.Channel, 'channel-2')
| [
"ettore.leandro.tognoli@gmail.com"
] | ettore.leandro.tognoli@gmail.com |
f0e290020c43602881ec59fc0628c8cbd0ac225a | 365967082720f3fda31afccfc237b7a67e8ffc07 | /sorting_searching/quick_sort.py | d95e226eb91824833e53fcd1a73f4ebdd12112a5 | [] | no_license | hulaba/geekInsideYou | ec68dee3fa24d63f5470aa40b600ef34d37c5da1 | 72c1f1b4fbf115db91c908a68c9ac3ca4cb22a4f | refs/heads/master | 2022-12-11T11:11:03.149336 | 2020-09-12T16:12:40 | 2020-09-12T16:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | '''
parition() function is called as follows:
def quickSort(arr,low,high):
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr,low,high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
'''
'''
This function takes last element as pivot, places the pivot element
at its correct position in sorted array, and places all smaller (smaller
than pivot) to left of pivot and all greater elements to right of pivot
'''
def partition(arr, low, high):
# add code here
i = low - 1
piv = arr[high]
for j in range(low, high):
if arr[j] < piv:
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return i + 1
# {
# Driver Code Starts
# Initial Template for Python 3
def quickSort(arr, low, high):
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi - 1)
quickSort(arr, pi + 1, high)
if __name__ == "__main__":
t = int(input())
for i in range(t):
n = int(input())
arr = list(map(int, input().split()))
quickSort(arr, 0, n - 1)
for i in range(n):
print(arr[i], end=" ")
print()
# } Driver Code Ends
| [
"nainamehta2110@gmail.com"
] | nainamehta2110@gmail.com |
cb3f0f4aa798ac93644ed624c67bc75ba6fa96cf | b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd | /High_Frequency/two_pointers/计数双指针/Substring With At Least K Distinct Characters/sliding_window.py | e968ee945eebc7729c33d701cc0cff334db3d4e9 | [] | no_license | atomextranova/leetcode-python | 61381949f2e78805dfdd0fb221f8497b94b7f12b | 5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56 | refs/heads/master | 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class Solution:
"""
@param s: a string
@param k: an integer
@return: the number of substrings there are that contain at least k distinct characters
"""
def kDistinctCharacters(self, s, k):
# Write your code here
if len(s) < k:
return 0
cur_chars = dict()
left = 0
right = 0
total = 0
for right in range(len(s)):
cur_char = s[right]
cur_chars[cur_char] = cur_chars.get(cur_char, 0) + 1
while len(cur_chars) >= k:
cur_chars[s[left]] -= 1
if cur_chars[s[left]] == 0:
del cur_chars[s[left]]
left += 1
total += left
return total | [
"atomextranova@gmail.com"
] | atomextranova@gmail.com |
5d655badb02d2592a799ffa107983c0328663925 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03567/s531693844.py | 3ed11284ed2f73c3d938aa72611285b742a6c509 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | S = input().strip()
for i in range(len(S)-1):
if S[i:i+2] == 'AC':
print('Yes')
exit()
print('No')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
66cf4ff262f64c8cbf47aea01229c40a532336c3 | a81d21f98dd558416f8731f001cb8151d8309f4f | /interviewbit/test/test_excel_column_number.py | e1b9a6d25f15bc235f307c800a1150e10dc893e6 | [] | no_license | marquesarthur/programming_problems | 1128c38e65aade27e2435f7987d7ee2b328fda51 | 2f7df25d0d735f726b7012e4aa2417dee50526d9 | refs/heads/master | 2022-01-25T18:19:02.575634 | 2022-01-18T02:07:06 | 2022-01-18T02:07:06 | 32,213,919 | 2 | 0 | null | 2020-10-13T01:29:08 | 2015-03-14T13:44:06 | Python | UTF-8 | Python | false | false | 1,026 | py | import unittest
from interviewbit.math import excel_column_number
class TestExcelColumnNumber(unittest.TestCase):
def test_base_case(self):
s = excel_column_number.Solution()
A = 'A'
B = 1
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'B'
B = 2
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'C'
B = 3
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'Z'
B = 26
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'AA'
B = 27
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'AB'
B = 28
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'CB'
B = 80
result = s.titleToNumber(A)
self.assertEqual(result, B)
A = 'AAC'
B = 705
result = s.titleToNumber(A)
self.assertEqual(result, B)
| [
"marques.art@gmail.com"
] | marques.art@gmail.com |
8234cbe836cb2e4d9efcf0914b8cd6810e01c590 | 71217d0679438a49749f7e1a0dda2d0aab8f6c00 | /sdk/python/kfp/cli/cli.py | 3c3f497dfbac037f01f99c66ea4da57808cda171 | [
"Apache-2.0"
] | permissive | RedbackThomson/pipelines | 65e8c7411e32419404d9c0729798a8bf63c3280d | a5b3e7e3f00feb8dc908b84db4158409c20aa594 | refs/heads/master | 2023-01-30T00:46:02.086217 | 2020-04-28T16:42:06 | 2020-04-28T16:42:06 | 255,702,063 | 1 | 0 | Apache-2.0 | 2020-04-14T19:11:53 | 2020-04-14T19:11:52 | null | UTF-8 | Python | false | false | 1,978 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import logging
import sys
from .._client import Client
from .run import run
from .pipeline import pipeline
from .diagnose_me_cli import diagnose_me
@click.group()
@click.option('--endpoint', help='Endpoint of the KFP API service to connect.')
@click.option('--iap-client-id', help='Client ID for IAP protected endpoint.')
@click.option('-n', '--namespace', default='kubeflow', help='Kubernetes namespace to connect to the KFP API.')
@click.option('--other-client-id', help='Client ID for IAP protected endpoint to obtain the refresh token.')
@click.option('--other-client-secret', help='Client ID for IAP protected endpoint to obtain the refresh token.')
@click.pass_context
def cli(ctx, endpoint, iap_client_id, namespace, other_client_id, other_client_secret):
"""kfp is the command line interface to KFP service."""
if ctx.invoked_subcommand == 'diagnose_me':
# Do not create a client for diagnose_me
return
ctx.obj['client'] = Client(endpoint, iap_client_id, namespace, other_client_id, other_client_secret)
ctx.obj['namespace']= namespace
def main():
logging.basicConfig(format='%(message)s', level=logging.INFO)
cli.add_command(run)
cli.add_command(pipeline)
cli.add_command(diagnose_me,'diagnose_me')
try:
cli(obj={}, auto_envvar_prefix='KFP')
except Exception as e:
logging.error(e)
sys.exit(1)
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
0e599945ce4472d58f9866a53ab0704c0f221798 | e811a08b8b653da94e516ca147ec49b534f74a62 | /inflearn/selenium/youtube_crawling.py | 5ee1344531250a2e894b10b021c4dc663b4896ac | [] | no_license | HoYaStudy/Python_Study | 0feb4a9ba7e68ebea6b2db15b20a3680f979a4de | 59c2cc093ae8ae87c8e07365cc432d87ded29ccc | refs/heads/master | 2023-02-07T23:40:16.135565 | 2023-01-24T06:17:58 | 2023-01-24T06:17:58 | 200,445,372 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,630 | py | import time
import urllib.request as req
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from io import BytesIO
import xlsxwriter
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--mute-audio')
# browser = webdriver.Chrome('./chromedriver.exe')
browser = webdriver.Chrome('./chromedriver.exe', options=chrome_options)
browser.implicitly_wait(2)
browser.set_window_size(1920, 1280)
browser.get('https://www.youtube.com/watch?v=oS8f7fbMHbI')
time.sleep(2)
WebDriverWait(browser, 2).until(EC.presence_of_element_located((By.TAG_NAME, 'html'))).send_keys(Keys.PAGE_DOWN)
time.sleep(2)
# print('Before Page Contents: {}'.format(browser.page_source))
scroll_pause_time = 2
last_height = browser.execute_script('return document.documentElement.scrollHeight')
# last_height = browser.execute_script('return document.body.scrollHeight') # IE
while True:
browser.execute_script('window.scrollTo(0, document.documentElement.scrollHeight)')
time.sleep(scroll_pause_time)
new_height = browser.execute_script('return document.documentElement.scrollHeight')
print('Last Height: {}, Current Height: {}'.format(last_height, new_height))
if new_height == last_height:
break
last_height = new_height
workbook = xlsxwriter.Workbook('./result.xlsx')
worksheet = workbook.add_worksheet()
row = 2
soup = BeautifulSoup(browser.page_source, 'html.parser')
top_level = soup.select('div#menu-container yt-formatted-string#text')
comment = soup.select('ytd-comment-renderer#comment')
for dom in comment:
img_src = dom.select_one('#img').get('src')
author = dom.select_one('#author-text > span').text.strip()
content = dom.select_one('#content-text').text.strip()
vote = dom.select_one('#vote-count-middle').text.strip()
print('Thumbnail: {}'.format(img_src if img_src else 'None'))
print('Author: {}'.format(author))
print('Content: {}'.format(content))
print('Vote: {}'.format(vote))
worksheet.write('A%s' % row, author)
worksheet.write('B%s' % row, content)
worksheet.write('B%s' % row, vote)
if img_src:
img_data = BytesIO(req.urlopen(img_src).read())
worksheet.insert_image('D%s' % row, author, {'image_data': img_data})
else:
worksheet.write('D%s' % row, 'None')
row += 1
browser.quit()
workbook.close()
| [
"hoya128@gmail.com"
] | hoya128@gmail.com |
0741ed35c4873218e7ae43cd5ed12585cd1c90a2 | 1c1ae14f9bcbac9761d9486209cd6100098db5df | /poem/core/keypoint_profiles.py | 81ce607cab6bb5ffc2b520c9284921ff45e7deed | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | TopHK/google-research | 15aa6323217414c88502cad7aa5a412cbf06b68f | 501208f069810f2024febed13a9972dea75c8020 | refs/heads/master | 2022-12-04T14:11:43.324779 | 2020-08-14T06:31:35 | 2020-08-14T06:35:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,899 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keypoint profile class and utility functions."""
import abc
import enum
import six
import tensorflow as tf
from poem.core import keypoint_utils
class LeftRightType(enum.Enum):
"""Keypoint/segment left/right type."""
UNKNOWN = 0
CENTRAL = 1
LEFT = 2
RIGHT = 3
def infer_keypoint_left_right_type(left_right_types, indices):
"""Infers keypoint left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
indices: A list of integers for keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
Raises:
ValueError: If any index is out of range.
"""
if not indices:
return LeftRightType.UNKNOWN
def lookup(i):
if i < 0 or i >= len(left_right_types):
raise ValueError('Left/right type index is out of range: %d.' % i)
return left_right_types[i]
if len(indices) == 1:
return lookup(indices[0])
output_type = LeftRightType.CENTRAL
for i in indices:
current_type = lookup(i)
if current_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if output_type == LeftRightType.CENTRAL:
output_type = current_type
elif current_type != LeftRightType.CENTRAL and current_type != output_type:
output_type = LeftRightType.CENTRAL
return output_type
def infer_segment_left_right_type(left_right_types, start_indices, end_indices):
"""Infers segment left/right type.
The inferred left/right type is decided as follows:
1. If either type is UNKNOWN, returns UNKNOWN.
2. If both types are the same, returns this type.
3. If one type is CENTRAL, and the other type is LEFT or RIGHT, returns the
other type.
4. If one type is LEFT and the other type is RIGHT, returns CENTRAL.
Args:
left_right_types: A list of LeftRightType enum values for all keypoints.
start_indices: A list of integers for LHS keypoint indices.
end_indices: A list of integers for RHS keypoint indices.
Returns:
A LeftRightType enum value for inferred type.
"""
lhs_type = infer_keypoint_left_right_type(left_right_types, start_indices)
rhs_type = infer_keypoint_left_right_type(left_right_types, end_indices)
if lhs_type == LeftRightType.UNKNOWN or rhs_type == LeftRightType.UNKNOWN:
return LeftRightType.UNKNOWN
if lhs_type == LeftRightType.CENTRAL:
return rhs_type
if rhs_type == LeftRightType.CENTRAL:
return lhs_type
return lhs_type if lhs_type == rhs_type else LeftRightType.CENTRAL
class KeypointProfile(six.with_metaclass(abc.ABCMeta, object)):
"""Keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
scale_distance_reduction_fn,
scale_unit,
segment_name_pairs,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
self._name = name
self._keypoint_names = [name for name, _ in keypoint_names]
self._keypoint_left_right_types = [
left_right_type for _, left_right_type in keypoint_names
]
self._offset_keypoint_index = [
self._keypoint_names.index(keypoint_name)
for keypoint_name in offset_keypoint_names
]
self._scale_keypoint_index_pairs = []
for start_names, end_names in scale_keypoint_name_pairs:
self._scale_keypoint_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._scale_distance_reduction_fn = scale_distance_reduction_fn
self._scale_unit = scale_unit
self._segment_index_pairs = []
for start_names, end_names in segment_name_pairs:
self._segment_index_pairs.append(
([self._keypoint_names.index(name) for name in start_names],
[self._keypoint_names.index(name) for name in end_names]))
self._head_keypoint_name = head_keypoint_name
self._neck_keypoint_name = neck_keypoint_name
self._left_shoulder_keypoint_name = left_shoulder_keypoint_name
self._right_shoulder_keypoint_name = right_shoulder_keypoint_name
self._left_elbow_keypoint_name = left_elbow_keypoint_name
self._right_elbow_keypoint_name = right_elbow_keypoint_name
self._left_wrist_keypoint_name = left_wrist_keypoint_name
self._right_wrist_keypoint_name = right_wrist_keypoint_name
self._spine_keypoint_name = spine_keypoint_name
self._pelvis_keypoint_name = pelvis_keypoint_name
self._left_hip_keypoint_name = left_hip_keypoint_name
self._right_hip_keypoint_name = right_hip_keypoint_name
self._left_knee_keypoint_name = left_knee_keypoint_name
self._right_knee_keypoint_name = right_knee_keypoint_name
self._left_ankle_keypoint_name = left_ankle_keypoint_name
self._right_ankle_keypoint_name = right_ankle_keypoint_name
@property
def name(self):
"""Gets keypoint profile name."""
return self._name
@property
def keypoint_names(self):
"""Gets keypoint names."""
return self._keypoint_names
@property
@abc.abstractmethod
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
raise NotImplementedError
@property
def keypoint_num(self):
"""Gets number of keypoints."""
return len(self._keypoint_names)
def keypoint_left_right_type(self, keypoint_index):
"""Gets keypoint left/right type given index."""
if isinstance(keypoint_index, int):
keypoint_index = [keypoint_index]
return infer_keypoint_left_right_type(self._keypoint_left_right_types,
keypoint_index)
def segment_left_right_type(self, start_index, end_index):
"""Gets segment left/right type given index."""
if isinstance(start_index, int):
start_index = [start_index]
if isinstance(end_index, int):
end_index = [end_index]
return infer_segment_left_right_type(self._keypoint_left_right_types,
start_index, end_index)
@property
def offset_keypoint_index(self):
"""Gets offset keypoint index."""
return self._offset_keypoint_index
@property
def scale_keypoint_index_pairs(self):
"""Gets scale keypoint index pairs."""
return self._scale_keypoint_index_pairs
@property
def scale_unit(self):
"""Gets scale unit."""
return self._scale_unit
@property
def segment_index_pairs(self):
"""Gets segment index pairs."""
return self._segment_index_pairs
@property
def keypoint_affinity_matrix(self):
"""Gets keypoint affinity matrix.
If a segment has multi-point end, all pairs of relevant points are
considered as in affinity.
Returns:
matrix: A double list of floats for the keypoint affinity matrix.
Raises:
ValueError: If affinity matrix has any isolated node.
"""
matrix = [[0.0
for _ in range(self.keypoint_num)]
for _ in range(self.keypoint_num)]
# Self-affinity.
for i in range(self.keypoint_num):
matrix[i][i] = 1.0
for lhs_index, rhs_index in self._segment_index_pairs:
for i in lhs_index:
for j in lhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in rhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
for i in lhs_index:
for j in rhs_index:
matrix[i][j] = 1.0
matrix[j][i] = 1.0
# Check if the affinity matrix is valid, i.e., each node must have degree
# greater than 1 (no isolated node).
for row in matrix:
if sum(row) <= 1.0:
raise ValueError(
'Affinity matrix has a node with degree less than 2: %s.' %
str(matrix))
return matrix
def keypoint_index(self, keypoint_name, raise_error_if_not_found=False):
"""Gets keypoint index given name.
If `raise_error_if_not_found` is True, raises ValueError if keypoint does
not exist. Otherwise, returns -1 if keypoint does not exist.
Args:
keypoint_name: A string for keypoint name to find index of.
raise_error_if_not_found: A boolean for whether to raise ValueError if
keypoint does not exist.
Returns:
An integer for keypoint index.
Raises:
ValueError: If keypoint does not exist and `raise_error_if_not_found` is
True.
"""
if keypoint_name in self._keypoint_names:
return self._keypoint_names.index(keypoint_name)
if raise_error_if_not_found:
raise ValueError('Failed to find keypoint: `%s`.' % str(keypoint_name))
return -1
@property
def head_keypoint_index(self):
"""Gets head keypoint index."""
if not self._head_keypoint_name:
raise ValueError('Head keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._head_keypoint_name
]
@property
def neck_keypoint_index(self):
"""Gets neck keypoint index."""
if not self._neck_keypoint_name:
raise ValueError('Neck keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._neck_keypoint_name
]
@property
def left_shoulder_keypoint_index(self):
"""Gets left shoulder keypoint index."""
if not self._left_shoulder_keypoint_name:
raise ValueError('Left shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_shoulder_keypoint_name
]
@property
def right_shoulder_keypoint_index(self):
"""Gets right shoulder keypoint index."""
if not self._right_shoulder_keypoint_name:
raise ValueError('Right shoulder keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_shoulder_keypoint_name
]
@property
def left_elbow_keypoint_index(self):
"""Gets left elbow keypoint index."""
if not self._left_elbow_keypoint_name:
raise ValueError('Left elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_elbow_keypoint_name
]
@property
def right_elbow_keypoint_index(self):
"""Gets right elbow keypoint index."""
if not self._right_elbow_keypoint_name:
raise ValueError('Right elbow keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_elbow_keypoint_name
]
@property
def left_wrist_keypoint_index(self):
"""Gets left wrist keypoint index."""
if not self._left_wrist_keypoint_name:
raise ValueError('Left wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_wrist_keypoint_name
]
@property
def right_wrist_keypoint_index(self):
"""Gets right wrist keypoint index."""
if not self._right_wrist_keypoint_name:
raise ValueError('Right wrist keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_wrist_keypoint_name
]
@property
def spine_keypoint_index(self):
"""Gets spine keypoint index."""
if not self._spine_keypoint_name:
raise ValueError('Spine keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._spine_keypoint_name
]
@property
def pelvis_keypoint_index(self):
"""Gets pelvis keypoint index."""
if not self._pelvis_keypoint_name:
raise ValueError('Pelvis keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._pelvis_keypoint_name
]
@property
def left_hip_keypoint_index(self):
"""Gets left hip keypoint index."""
if not self._left_hip_keypoint_name:
raise ValueError('Left hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_hip_keypoint_name
]
@property
def right_hip_keypoint_index(self):
"""Gets right hip keypoint index."""
if not self._right_hip_keypoint_name:
raise ValueError('Right hip keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_hip_keypoint_name
]
@property
def left_knee_keypoint_index(self):
"""Gets left knee keypoint index."""
if not self._left_knee_keypoint_name:
raise ValueError('Left knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_knee_keypoint_name
]
@property
def right_knee_keypoint_index(self):
"""Gets right knee keypoint index."""
if not self._right_knee_keypoint_name:
raise ValueError('Right knee keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_knee_keypoint_name
]
@property
def left_ankle_keypoint_index(self):
"""Gets left ankle keypoint index."""
if not self._left_ankle_keypoint_name:
raise ValueError('Left ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._left_ankle_keypoint_name
]
@property
def right_ankle_keypoint_index(self):
"""Gets right ankle keypoint index."""
if not self._right_ankle_keypoint_name:
raise ValueError('Right ankle keypoint is not specified.')
return [
self.keypoint_index(name, raise_error_if_not_found=True)
for name in self._right_ankle_keypoint_name
]
def normalize(self, keypoints, keypoint_masks=None):
"""Normalizes keypoints."""
del keypoint_masks
return keypoint_utils.normalize_points(
keypoints,
offset_point_indices=self._offset_keypoint_index,
scale_distance_point_index_pairs=self._scale_keypoint_index_pairs,
scale_distance_reduction_fn=self._scale_distance_reduction_fn,
scale_unit=self._scale_unit)
def denormalize(self,
normalized_keypoints,
offset_points,
scale_distances,
keypoint_masks=None):
"""Denormalizes keypoints."""
del keypoint_masks
return (normalized_keypoints / self._scale_unit * scale_distances +
offset_points)
class KeypointProfile3D(KeypointProfile):
"""3D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
scale_distance_reduction_fn=tf.math.reduce_sum,
scale_unit=1.0,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile3D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 3
class KeypointProfile2D(KeypointProfile):
"""2D keypoint profile base class."""
def __init__(self,
name,
keypoint_names,
offset_keypoint_names,
scale_keypoint_name_pairs,
segment_name_pairs,
compatible_keypoint_name_dict=None,
scale_distance_reduction_fn=tf.math.reduce_max,
scale_unit=0.5,
head_keypoint_name=None,
neck_keypoint_name=None,
left_shoulder_keypoint_name=None,
right_shoulder_keypoint_name=None,
left_elbow_keypoint_name=None,
right_elbow_keypoint_name=None,
left_wrist_keypoint_name=None,
right_wrist_keypoint_name=None,
spine_keypoint_name=None,
pelvis_keypoint_name=None,
left_hip_keypoint_name=None,
right_hip_keypoint_name=None,
left_knee_keypoint_name=None,
right_knee_keypoint_name=None,
left_ankle_keypoint_name=None,
right_ankle_keypoint_name=None):
"""Initializer."""
super(KeypointProfile2D, self).__init__(
name=name,
keypoint_names=keypoint_names,
offset_keypoint_names=offset_keypoint_names,
scale_keypoint_name_pairs=scale_keypoint_name_pairs,
scale_distance_reduction_fn=scale_distance_reduction_fn,
scale_unit=scale_unit,
segment_name_pairs=segment_name_pairs,
head_keypoint_name=head_keypoint_name,
neck_keypoint_name=neck_keypoint_name,
left_shoulder_keypoint_name=left_shoulder_keypoint_name,
right_shoulder_keypoint_name=right_shoulder_keypoint_name,
left_elbow_keypoint_name=left_elbow_keypoint_name,
right_elbow_keypoint_name=right_elbow_keypoint_name,
left_wrist_keypoint_name=left_wrist_keypoint_name,
right_wrist_keypoint_name=right_wrist_keypoint_name,
spine_keypoint_name=spine_keypoint_name,
pelvis_keypoint_name=pelvis_keypoint_name,
left_hip_keypoint_name=left_hip_keypoint_name,
right_hip_keypoint_name=right_hip_keypoint_name,
left_knee_keypoint_name=left_knee_keypoint_name,
right_knee_keypoint_name=right_knee_keypoint_name,
left_ankle_keypoint_name=left_ankle_keypoint_name,
right_ankle_keypoint_name=right_ankle_keypoint_name)
self._compatible_keypoint_name_dict = {}
if compatible_keypoint_name_dict is not None:
for _, compatible_keypoint_names in compatible_keypoint_name_dict.items():
if len(compatible_keypoint_names) != len(self._keypoint_names):
raise ValueError('Compatible keypoint names must be of the same size '
'as keypoint names.')
self._compatible_keypoint_name_dict = compatible_keypoint_name_dict
@property
def keypoint_dim(self):
"""Gets keypoint dimensionality."""
return 2
@property
def compatible_keypoint_name_dict(self):
"""Gets compatible keypoint name dictionary."""
return self._compatible_keypoint_name_dict
class Std16KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 16-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std16KeypointProfile3D,
self).__init__(
name='3DSTD16',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('NECK', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('SPINE', LeftRightType.CENTRAL),
('PELVIS', LeftRightType.CENTRAL),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['PELVIS'],
scale_keypoint_name_pairs=[(['NECK'], ['SPINE']),
(['SPINE'], ['PELVIS'])],
segment_name_pairs=[(['HEAD'], ['NECK']),
(['NECK'], ['LEFT_SHOULDER']),
(['NECK'], ['RIGHT_SHOULDER']),
(['NECK'], ['SPINE']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['SPINE'], ['PELVIS']),
(['PELVIS'], ['LEFT_HIP']),
(['PELVIS'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['NECK'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=['SPINE'],
pelvis_keypoint_name=['PELVIS'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class Std13KeypointProfile3D(KeypointProfile3D):
"""Standard 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile3D, self).__init__(
name='3DSTD13',
keypoint_names=[('HEAD', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_HIP', 'RIGHT_HIP'])],
segment_name_pairs=[
(['HEAD'], ['LEFT_SHOULDER', 'RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['LEFT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP',
'RIGHT_HIP'], ['LEFT_HIP', 'RIGHT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['LEFT_HIP']),
(['LEFT_HIP', 'RIGHT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']), (['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']), (['RIGHT_KNEE'], ['RIGHT_ANKLE'])
],
head_keypoint_name=['HEAD'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyH36m17KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m17KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M17',
keypoint_names=[('Hip', LeftRightType.CENTRAL),
('Head', LeftRightType.CENTRAL),
('Neck/Nose', LeftRightType.CENTRAL),
('Thorax', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('Spine', LeftRightType.CENTRAL),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['Hip'],
scale_keypoint_name_pairs=[(['Hip'], ['Spine']),
(['Spine'], ['Thorax'])],
segment_name_pairs=[(['Hip'], ['Spine']), (['Hip'], ['LHip']),
(['Hip'], ['RHip']), (['Spine'], ['Thorax']),
(['LHip'], ['LKnee']), (['RHip'], ['RKnee']),
(['LKnee'], ['LFoot']), (['RKnee'], ['RFoot']),
(['Thorax'], ['Neck/Nose']),
(['Thorax'], ['LShoulder']),
(['Thorax'], ['RShoulder']),
(['Neck/Nose'], ['Head']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['Thorax'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['Spine'],
pelvis_keypoint_name=['Hip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyH36m13KeypointProfile3D(KeypointProfile3D):
"""Legacy Human3.6M 3D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile3D, self).__init__(
name='LEGACY_3DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip'],
scale_keypoint_name_pairs=[
(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
],
segment_name_pairs=[(['LHip', 'RHip'], ['LShoulder', 'RShoulder']),
(['LHip', 'RHip'], ['LHip']),
(['LHip', 'RHip'], ['RHip']), (['LHip'], ['LKnee']),
(['RHip'], ['RKnee']), (['LKnee'], ['LFoot']),
(['RKnee'], ['RFoot']),
(['LShoulder', 'RShoulder'], ['Head']),
(['LShoulder', 'RShoulder'], ['LShoulder']),
(['LShoulder', 'RShoulder'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['RShoulder'], ['RElbow']),
(['LElbow'], ['LWrist']), (['RElbow'], ['RWrist'])],
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
class LegacyMpii3dhp17KeypointProfile3D(KeypointProfile3D):
"""Legacy MPII-3DHP 3D 17-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyMpii3dhp17KeypointProfile3D, self).__init__(
name='LEGACY_3DMPII3DHP17',
keypoint_names=[('pelvis', LeftRightType.CENTRAL),
('head', LeftRightType.CENTRAL),
('neck', LeftRightType.CENTRAL),
('head_top', LeftRightType.CENTRAL),
('left_shoulder', LeftRightType.LEFT),
('right_shoulder', LeftRightType.RIGHT),
('left_elbow', LeftRightType.LEFT),
('right_elbow', LeftRightType.RIGHT),
('left_wrist', LeftRightType.LEFT),
('right_wrist', LeftRightType.RIGHT),
('spine', LeftRightType.CENTRAL),
('left_hip', LeftRightType.LEFT),
('right_hip', LeftRightType.RIGHT),
('left_knee', LeftRightType.LEFT),
('right_knee', LeftRightType.RIGHT),
('left_ankle', LeftRightType.LEFT),
('right_ankle', LeftRightType.RIGHT)],
offset_keypoint_names=['pelvis'],
scale_keypoint_name_pairs=[(['pelvis'], ['spine']),
(['spine'], ['neck'])],
segment_name_pairs=[(['pelvis'], ['spine']), (['pelvis'], ['left_hip']),
(['pelvis'], ['right_hip']), (['spine'], ['neck']),
(['left_hip'], ['left_knee']),
(['right_hip'], ['right_knee']),
(['left_knee'], ['left_ankle']),
(['right_knee'], ['right_ankle']),
(['neck'], ['head']), (['neck'], ['left_shoulder']),
(['neck'], ['right_shoulder']),
(['head'], ['head_top']),
(['left_shoulder'], ['left_elbow']),
(['right_shoulder'], ['right_elbow']),
(['left_elbow'], ['left_wrist']),
(['right_elbow'], ['right_wrist'])],
head_keypoint_name=['head'],
neck_keypoint_name=['neck'],
left_shoulder_keypoint_name=['left_shoulder'],
right_shoulder_keypoint_name=['right_shoulder'],
left_elbow_keypoint_name=['left_elbow'],
right_elbow_keypoint_name=['right_elbow'],
left_wrist_keypoint_name=['left_wrist'],
right_wrist_keypoint_name=['right_wrist'],
spine_keypoint_name=['spine'],
pelvis_keypoint_name=['pelvis'],
left_hip_keypoint_name=['left_hip'],
right_hip_keypoint_name=['right_hip'],
left_knee_keypoint_name=['left_knee'],
right_knee_keypoint_name=['right_knee'],
left_ankle_keypoint_name=['left_ankle'],
right_ankle_keypoint_name=['right_ankle'])
class Std13KeypointProfile2D(KeypointProfile2D):
"""Standard 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(Std13KeypointProfile2D, self).__init__(
name='2DSTD13',
keypoint_names=[('NOSE_TIP', LeftRightType.CENTRAL),
('LEFT_SHOULDER', LeftRightType.LEFT),
('RIGHT_SHOULDER', LeftRightType.RIGHT),
('LEFT_ELBOW', LeftRightType.LEFT),
('RIGHT_ELBOW', LeftRightType.RIGHT),
('LEFT_WRIST', LeftRightType.LEFT),
('RIGHT_WRIST', LeftRightType.RIGHT),
('LEFT_HIP', LeftRightType.LEFT),
('RIGHT_HIP', LeftRightType.RIGHT),
('LEFT_KNEE', LeftRightType.LEFT),
('RIGHT_KNEE', LeftRightType.RIGHT),
('LEFT_ANKLE', LeftRightType.LEFT),
('RIGHT_ANKLE', LeftRightType.RIGHT)],
offset_keypoint_names=['LEFT_HIP', 'RIGHT_HIP'],
scale_keypoint_name_pairs=[(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['LEFT_SHOULDER'], ['RIGHT_HIP']),
(['RIGHT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP'])],
segment_name_pairs=[(['NOSE_TIP'], ['LEFT_SHOULDER']),
(['NOSE_TIP'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['RIGHT_SHOULDER']),
(['LEFT_SHOULDER'], ['LEFT_ELBOW']),
(['RIGHT_SHOULDER'], ['RIGHT_ELBOW']),
(['LEFT_ELBOW'], ['LEFT_WRIST']),
(['RIGHT_ELBOW'], ['RIGHT_WRIST']),
(['LEFT_SHOULDER'], ['LEFT_HIP']),
(['RIGHT_SHOULDER'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['RIGHT_HIP']),
(['LEFT_HIP'], ['LEFT_KNEE']),
(['RIGHT_HIP'], ['RIGHT_KNEE']),
(['LEFT_KNEE'], ['LEFT_ANKLE']),
(['RIGHT_KNEE'], ['RIGHT_ANKLE'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow', 'LWrist',
'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee', 'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['NOSE_TIP'],
neck_keypoint_name=['LEFT_SHOULDER', 'RIGHT_SHOULDER'],
left_shoulder_keypoint_name=['LEFT_SHOULDER'],
right_shoulder_keypoint_name=['RIGHT_SHOULDER'],
left_elbow_keypoint_name=['LEFT_ELBOW'],
right_elbow_keypoint_name=['RIGHT_ELBOW'],
left_wrist_keypoint_name=['LEFT_WRIST'],
right_wrist_keypoint_name=['RIGHT_WRIST'],
spine_keypoint_name=[
'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_HIP', 'RIGHT_HIP'
],
pelvis_keypoint_name=['LEFT_HIP', 'RIGHT_HIP'],
left_hip_keypoint_name=['LEFT_HIP'],
right_hip_keypoint_name=['RIGHT_HIP'],
left_knee_keypoint_name=['LEFT_KNEE'],
right_knee_keypoint_name=['RIGHT_KNEE'],
left_ankle_keypoint_name=['LEFT_ANKLE'],
right_ankle_keypoint_name=['RIGHT_ANKLE'])
class LegacyCoco13KeypointProfile2D(Std13KeypointProfile2D):
"""Legacy COCO 2D 13-keypoint profile.
This profile is the same as the `2DSTD13` profil, except the name.
"""
def __init__(self):
"""Initializer."""
super(LegacyCoco13KeypointProfile2D, self).__init__()
self._name = 'LEGACY_2DCOCO13'
class LegacyH36m13KeypointProfile2D(KeypointProfile2D):
"""Legacy Human3.6M 2D 13-keypoint profile."""
def __init__(self):
"""Initializer."""
super(LegacyH36m13KeypointProfile2D,
self).__init__(
name='LEGACY_2DH36M13',
keypoint_names=[('Head', LeftRightType.CENTRAL),
('LShoulder', LeftRightType.LEFT),
('RShoulder', LeftRightType.RIGHT),
('LElbow', LeftRightType.LEFT),
('RElbow', LeftRightType.RIGHT),
('LWrist', LeftRightType.LEFT),
('RWrist', LeftRightType.RIGHT),
('LHip', LeftRightType.LEFT),
('RHip', LeftRightType.RIGHT),
('LKnee', LeftRightType.LEFT),
('RKnee', LeftRightType.RIGHT),
('LFoot', LeftRightType.LEFT),
('RFoot', LeftRightType.RIGHT)],
offset_keypoint_names=['LHip', 'RHip'],
scale_keypoint_name_pairs=[(['LShoulder'], ['RShoulder']),
(['LShoulder'], ['LHip']),
(['LShoulder'], ['RHip']),
(['RShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['RHip'])],
segment_name_pairs=[(['Head'], ['LShoulder']),
(['Head'], ['RShoulder']),
(['LShoulder'], ['LElbow']),
(['LElbow'], ['LWrist']),
(['RShoulder'], ['RElbow']),
(['RElbow'], ['RWrist']),
(['LShoulder'], ['LHip']),
(['RShoulder'], ['RHip']),
(['LHip'], ['LKnee']), (['LKnee'], ['LFoot']),
(['RHip'], ['RKnee']), (['RKnee'], ['RFoot']),
(['LShoulder'], ['RShoulder']),
(['LHip'], ['RHip'])],
compatible_keypoint_name_dict={
'3DSTD16': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'3DSTD13': [
'HEAD', 'LEFT_SHOULDER', 'RIGHT_SHOULDER', 'LEFT_ELBOW',
'RIGHT_ELBOW', 'LEFT_WRIST', 'RIGHT_WRIST', 'LEFT_HIP',
'RIGHT_HIP', 'LEFT_KNEE', 'RIGHT_KNEE', 'LEFT_ANKLE',
'RIGHT_ANKLE'
],
'LEGACY_3DH36M17': [
'Head', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow',
'LWrist', 'RWrist', 'LHip', 'RHip', 'LKnee', 'RKnee',
'LFoot', 'RFoot'
],
'LEGACY_3DMPII3DHP17': [
'head', 'left_shoulder', 'right_shoulder', 'left_elbow',
'right_elbow', 'left_wrist', 'right_wrist', 'left_hip',
'right_hip', 'left_knee', 'right_knee', 'left_ankle',
'right_ankle'
],
},
head_keypoint_name=['Head'],
neck_keypoint_name=['LShoulder', 'RShoulder'],
left_shoulder_keypoint_name=['LShoulder'],
right_shoulder_keypoint_name=['RShoulder'],
left_elbow_keypoint_name=['LElbow'],
right_elbow_keypoint_name=['RElbow'],
left_wrist_keypoint_name=['LWrist'],
right_wrist_keypoint_name=['RWrist'],
spine_keypoint_name=['LShoulder', 'RShoulder', 'LHip', 'RHip'],
pelvis_keypoint_name=['LHip', 'RHip'],
left_hip_keypoint_name=['LHip'],
right_hip_keypoint_name=['RHip'],
left_knee_keypoint_name=['LKnee'],
right_knee_keypoint_name=['RKnee'],
left_ankle_keypoint_name=['LFoot'],
right_ankle_keypoint_name=['RFoot'])
def create_keypoint_profile_or_die(keypoint_profile_name):
"""Creates keypoint profile based on name.
Args:
keypoint_profile_name: A string for keypoint profile name.
Returns:
A keypint profile class object.
Raises:
ValueError: If keypoint profile name is unsupported.
"""
if keypoint_profile_name == '3DSTD16':
return Std16KeypointProfile3D()
if keypoint_profile_name == '3DSTD13':
return Std13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M17':
return LegacyH36m17KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DH36M13':
return LegacyH36m13KeypointProfile3D()
if keypoint_profile_name == 'LEGACY_3DMPII3DHP17':
return LegacyMpii3dhp17KeypointProfile3D()
if keypoint_profile_name == '2DSTD13':
return Std13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DCOCO13':
return LegacyCoco13KeypointProfile2D()
if keypoint_profile_name == 'LEGACY_2DH36M13':
return LegacyH36m13KeypointProfile2D()
raise ValueError('Unsupported keypoint profile name: `%s`.' %
str(keypoint_profile_name))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
06059aed5c5948562a19955d6daf315d91ff4e1f | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/test_20210719160759.py | 39a3c495535fc8cbd8e9c4ea3de97d83e654feef | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | print(("Good",) + (4,) + ("You",))
print([["Solar", "Power"]] * 2)
print("pitch perfect" in ["pitch", "perfect"])
print({print("scary"): "hours", 2: ";p"}[None])
print(( "Surf's Up", {"The": "Beach Boys"[2], "Beach": [2]} ) [1] ["The"] * 3)
# aList = ["Dolly Parton", "Arlo Guthrie", "Paul Simon"]
# cList = aList
# cList.append("Pete Seeger")
# bList = cList[:]
# bList.remove("Paul Simon")
# cList += "Elivis"
# print(cList)
aList = [["Everything", "Everything"], "jammin’", 2020]
bList = aList
cList = bList[:]
aList[2] = aList[2] + 1
cList[0][1] = "All the Time"
bList.append("vibes")
cList[0] = "Fleet Foxes"
print(cList)
def festival(artistList):
goodArtists = []
songRating = {"Breezeblocks": 9, "Skinny Love": 9, "Riptide": 5, "Oxford Comma": 8, "Holland, 1946": 7}
for artist in artistList:
try:
if songRating[artistList[artist]] > 7:
goodArtists.append(artist)
else:
print("not good enough")
except:
print("not one of your artists")
continue
return goodArtists
artistList= {"alt—J": "Breezeblocks", "The Strokes": "Hard To Explain", "Bon Iver": "Skinny Love", "Vampire Weekend": "Oxford Comma"}
print(festival(artistList))
def noteFile(notes):
sheet = open('sheet.txt', 'w')
for note in notes:
sheet.write(note + '\n')
sheet.close()
music = open('sheet.txt')
one = music.readlines()
print(one[2][0])
notes = 'ABCAG'
noteFile(notes)
def concerts():
ratings = {5.0: ["The Shins"], 4.5: ["The", "Beatles"]}
venues = [(5.0, "infinite energy"), (2, "the loft")]
for r, c in venues:
if r in ratings:
print("Add {} to {}".format(c, r))
ratings[r].append(c)
else:
print("Add {} to ratings".format(r))
print(concerts())
def listen(platformDict):
platformName = ''
platformUsers = 0
for key in platformDict:
if len(platformDict[key]) > platformUsers:
platformName = key
platformUsers = len(platformDict[key])
return (platformName, platformUsers)
print(listen({'spotify': ['c','k','e'], 'apple music': ['m', 'e'], 'soundcloud': ['c', 'b']}))
def bestSongs(totalDict):
newDict = {}
for key in totalDict:
for song in totalDict[key]
print(song)
music = {"Drake": [("What's Next", 7), ("POPSTAR", 8), ("Headlines", 9)], "The Weeknd": [("Save Your Tears", 9), ("Starboy", 8), ("After Hours", 10)]} | [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
28a643bf09bcd27175187850ab9c60376afe9b41 | 3a22d9a1c4a8d5530208f4b9af004711bd620111 | /reinforcement_learning/rl_hvac_ray_energyplus/source/hvac_ray_launcher.py | 01a94ec321cd97d3d0981453d39be0e9264c3571 | [
"Apache-2.0"
] | permissive | jme3192/amazon-sagemaker-examples | 45fae7b1e2a8b8c0b8149eb1195caa8fd701a12a | 93cd954b6f57c8f905340479b92eaca17b2527ff | refs/heads/master | 2023-04-29T02:40:42.483934 | 2021-05-11T21:31:59 | 2021-05-11T21:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,722 | py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import ast
import os
import json
import subprocess
import ray
from ray.tune.tune import run_experiments, run, _make_scheduler
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
from sagemaker_rl.tf_serving_utils import export_tf_serving, natural_keys
TERMINATION_SIGNAL = "JOB_TERMINATED"
MODEL_OUTPUT_DIR = "/opt/ml/model"
CHECKPOINTS_DIR = "/opt/ml/checkpoints"
def custom_sync_func(source, target):
"""Custom rsync cmd to sync experiment artifact from remote nodes to driver node."""
sync_cmd = (
'rsync -havP --inplace --stats -e "ssh -i /root/.ssh/id_rsa" {source} {target}'.format(
source=source, target=target
)
)
sync_process = subprocess.Popen(sync_cmd, shell=True)
sync_process.wait()
class HVACSageMakerRayLauncher(SageMakerRayLauncher):
"""Launcher class for Procgen experiments using Ray-RLLib.
Customers should sub-class this, fill in the required methods, and
call .train_main() to start a training process.
Example::
class MyLauncher(ProcgenSageMakerRayLauncher):
def register_env_creator(self):
register_env(
"stacked_procgen_env", # This should be different from procgen_env_wrapper
lambda config: gym.wrappers.FrameStack(ProcgenEnvWrapper(config), 4)
)
def get_experiment_config(self):
return {
"training": {
"env": "procgen_env_wrapper",
"run": "PPO",
...
}
}
if __name__ == "__main__":
MyLauncher().train_main()
"""
def register_algorithms_and_preprocessors(self):
raise NotImplementedError()
def create_tf_serving_model(self, algorithm=None, env_string=None):
self.register_env_creator()
self.register_algorithms_and_preprocessors()
if ray.__version__ >= "0.6.5":
from ray.rllib.agents.registry import get_agent_class
else:
from ray.rllib.agents.agent import get_agent_class
cls = get_agent_class(algorithm)
with open(os.path.join(MODEL_OUTPUT_DIR, "params.json")) as config_json:
config = json.load(config_json)
use_torch = config.get("use_pytorch", False)
if not use_torch:
if "callbacks" in config:
callback_cls_str = config["callbacks"]
callback_cls = callback_cls_str.split("'")[-2].split(".")[-1]
config["callbacks"] = ast.literal_eval()(callback_cls)
print("Loaded config for TensorFlow serving.")
config["monitor"] = False
config["num_workers"] = 1
config["num_gpus"] = 0
agent = cls(env=env_string, config=config)
checkpoint = os.path.join(MODEL_OUTPUT_DIR, "checkpoint")
agent.restore(checkpoint)
export_tf_serving(agent, MODEL_OUTPUT_DIR)
def find_checkpoint_path_for_spot(self, prefix):
ckpts = []
ckpts_prefix = ""
for root, directories, files in os.walk(prefix):
for directory in directories:
if directory.startswith("checkpoint"):
if not ckpts_prefix:
ckpts_prefix = root
ckpts.append(directory)
return ckpts_prefix, ckpts
def find_checkpoint_file_for_spot(self, prefix):
ckpts_prefix, ckpts = self.find_checkpoint_path_for_spot(prefix)
if not ckpts:
return ""
else:
ckpts.sort(key=natural_keys)
ckpt_name = ckpts[-1].replace("_", "-")
return os.path.join(ckpts_prefix, ckpts[-1], ckpt_name)
def launch(self):
"""Actual entry point into the class instance where everything happens."""
self.register_env_creator()
self.register_algorithms_and_preprocessors()
experiment_config, args, verbose = self.get_experiment_config()
# All worker nodes will block at this step during training
ray_cluster_config = self.ray_init_config()
if not self.is_master_node:
return
ray_custom_cluster_config = {
"object_store_memory": args.ray_object_store_memory,
"memory": args.ray_memory,
"redis_max_memory": args.ray_redis_max_memory,
"num_cpus": args.ray_num_cpus,
"num_gpus": args.ray_num_gpus,
}
all_workers_host_names = self.get_all_host_names()[1:]
# Overwrite redis address for single instance job
if len(all_workers_host_names) == 0:
ray_custom_cluster_config.update({"address": args.ray_address})
ray_cluster_config.update(ray_custom_cluster_config)
# Start the driver on master node
ray.init(**ray_cluster_config)
# Spot instance is back
if os.path.exists(CHECKPOINTS_DIR) and os.listdir(CHECKPOINTS_DIR):
print("Instance is back. Local checkpoint path detected.")
checkpoint_file = self.find_checkpoint_file_for_spot(CHECKPOINTS_DIR)
print("Setting checkpoint path to {}".format(checkpoint_file))
if checkpoint_file:
experiment_config["training"]["restore"] = checkpoint_file # Overwrite
experiment_config = self.customize_experiment_config(experiment_config)
experiment_config = self.set_up_checkpoint(experiment_config)
experiment_config["training"]["sync_to_driver"] = custom_sync_func
run_experiments(
experiment_config,
scheduler=_make_scheduler(args),
queue_trials=args.queue_trials,
resume=args.resume,
verbose=verbose,
concurrent=True,
)
# If distributed job, send TERMINATION_SIGNAL to all workers.
if len(all_workers_host_names) > 0:
self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)
@classmethod
def train_main(cls, args):
"""main function that kicks things off"""
launcher = cls(args)
launcher.launch()
| [
"noreply@github.com"
] | jme3192.noreply@github.com |
4f5cae61a674637eef37f4ce0af3e1cecb883c8c | 5dccb539427d6bd98b4b4eab38b524dc930229c7 | /monai/bundle/config_parser.py | 613ad4e44aa80f678ca9ed2f28e6764a4f2a70e4 | [
"Apache-2.0"
] | permissive | Warvito/MONAI | 794aca516e6b3ed365ee912164743a3696735cf3 | 8eceabf281ab31ea4bda0ab8a6d2c8da06027e82 | refs/heads/dev | 2023-04-27T19:07:56.041733 | 2023-03-27T09:23:53 | 2023-03-27T09:23:53 | 512,893,750 | 0 | 0 | Apache-2.0 | 2022-08-05T16:51:05 | 2022-07-11T20:04:47 | null | UTF-8 | Python | false | false | 22,265 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import json
import re
from collections.abc import Sequence
from copy import deepcopy
from pathlib import Path
from typing import TYPE_CHECKING, Any
from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
from monai.bundle.reference_resolver import ReferenceResolver
from monai.bundle.utils import ID_REF_KEY, ID_SEP_KEY, MACRO_KEY
from monai.config import PathLike
from monai.utils import ensure_tuple, look_up_option, optional_import
if TYPE_CHECKING:
import yaml
else:
yaml, _ = optional_import("yaml")
__all__ = ["ConfigParser"]
_default_globals = {"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"}
class ConfigParser:
"""
The primary configuration parser. It traverses a structured config (in the form of nested Python dict or list),
creates ``ConfigItem``, and assign unique IDs according to the structures.
This class provides convenient access to the set of ``ConfigItem`` of the config by ID.
A typical workflow of config parsing is as follows:
- Initialize ``ConfigParser`` with the ``config`` source.
- Call ``get_parsed_content()`` to get expected component with `id`.
.. code-block:: python
from monai.bundle import ConfigParser
config = {
"my_dims": 2,
"dims_1": "$@my_dims + 1",
"my_xform": {"_target_": "LoadImage"},
"my_net": {"_target_": "BasicUNet", "spatial_dims": "@dims_1", "in_channels": 1, "out_channels": 4},
"trainer": {"_target_": "SupervisedTrainer", "network": "@my_net", "preprocessing": "@my_xform"}
}
# in the example $@my_dims + 1 is an expression, which adds 1 to the value of @my_dims
parser = ConfigParser(config)
# get/set configuration content, the set method should happen before calling parse()
print(parser["my_net"]["in_channels"]) # original input channels 1
parser["my_net"]["in_channels"] = 4 # change input channels to 4
print(parser["my_net"]["in_channels"])
# instantiate the network component
parser.parse(True)
net = parser.get_parsed_content("my_net", instantiate=True)
print(net)
# also support to get the configuration content of parsed `ConfigItem`
trainer = parser.get_parsed_content("trainer", instantiate=False)
print(trainer)
Args:
config: input config source to parse.
excludes: when importing modules to instantiate components,
excluding components from modules specified in ``excludes``.
globals: pre-import packages as global variables to ``ConfigExpression``,
so that expressions, for example, ``"$monai.data.list_data_collate"`` can use ``monai`` modules.
The current supported globals and alias names are
``{"monai": "monai", "torch": "torch", "np": "numpy", "numpy": "numpy"}``.
These are MONAI's minimal dependencies. Additional packages could be included with `globals={"itk": "itk"}`.
Set it to ``False`` to disable `self.globals` module importing.
See also:
- :py:class:`monai.bundle.ConfigItem`
- :py:class:`monai.bundle.scripts.run`
"""
suffixes = ("json", "yaml", "yml")
suffix_match = rf".*\.({'|'.join(suffixes)})"
path_match = rf"({suffix_match}$)"
# match relative id names, e.g. "@#data", "@##transform#1"
relative_id_prefix = re.compile(rf"(?:{ID_REF_KEY}|{MACRO_KEY}){ID_SEP_KEY}+")
meta_key = "_meta_" # field key to save metadata
def __init__(
self,
config: Any = None,
excludes: Sequence[str] | str | None = None,
globals: dict[str, Any] | None | bool = None,
):
self.config: ConfigItem | None = None
self.globals: dict[str, Any] = {}
_globals = _default_globals.copy()
if isinstance(_globals, dict) and globals not in (None, False):
_globals.update(globals) # type: ignore
if _globals is not None and globals is not False:
for k, v in _globals.items():
self.globals[k] = optional_import(v)[0] if isinstance(v, str) else v
self.locator = ComponentLocator(excludes=excludes)
self.ref_resolver = ReferenceResolver()
if config is None:
config = {self.meta_key: {}}
self.set(config=config)
def __repr__(self):
return f"{self.config}"
def __getattr__(self, id):
"""
Get the parsed result of ``ConfigItem`` with the specified ``id``
with default arguments (e.g. ``lazy=True``, ``instantiate=True`` and ``eval_expr=True``).
Args:
id: id of the ``ConfigItem``.
See also:
:py:meth:`get_parsed_content`
"""
return self.get_parsed_content(id)
def __getitem__(self, id: str | int) -> Any:
"""
Get the config by id.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if id == "":
return self.config
config = self.config
for k in str(id).split(ID_SEP_KEY):
if not isinstance(config, (dict, list)):
raise ValueError(f"config must be dict or list for key `{k}`, but got {type(config)}: {config}.")
try:
config = (
look_up_option(k, config, print_all_options=False) if isinstance(config, dict) else config[int(k)]
)
except ValueError as e:
raise KeyError(f"query key: {k}") from e
return config
def __setitem__(self, id: str | int, config: Any) -> None:
"""
Set config by ``id``. Note that this method should be used before ``parse()`` or ``get_parsed_content()``
to ensure the updates are included in the parsed content.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
config: config to set at location ``id``.
"""
if id == "":
self.config = config
self.ref_resolver.reset()
return
keys = str(id).split(ID_SEP_KEY)
# get the last parent level config item and replace it
last_id = ID_SEP_KEY.join(keys[:-1])
conf_ = self[last_id]
indexing = keys[-1] if isinstance(conf_, dict) else int(keys[-1])
conf_[indexing] = config
self.ref_resolver.reset()
return
def get(self, id: str = "", default: Any | None = None) -> Any:
"""
Get the config by id.
Args:
id: id to specify the expected position. See also :py:meth:`__getitem__`.
default: default value to return if the specified ``id`` is invalid.
"""
try:
return self[id]
except (KeyError, IndexError, ValueError): # Index error for integer indexing
return default
def set(self, config: Any, id: str = "", recursive: bool = True) -> None:
"""
Set config by ``id``.
Args:
config: config to set at location ``id``.
id: id to specify the expected position. See also :py:meth:`__setitem__`.
recursive: if the nested id doesn't exist, whether to recursively create the nested items in the config.
default to `True`. for the nested id, only support `dict` for the missing section.
"""
keys = str(id).split(ID_SEP_KEY)
conf_ = self.get()
if recursive:
if conf_ is None:
self.config = conf_ = {} # type: ignore
for k in keys[:-1]:
if isinstance(conf_, dict) and k not in conf_:
conf_[k] = {}
conf_ = conf_[k if isinstance(conf_, dict) else int(k)]
self[id] = config
def update(self, pairs: dict[str, Any]) -> None:
"""
Set the ``id`` and the corresponding config content in pairs, see also :py:meth:`__setitem__`.
For example, ``parser.update({"train#epoch": 100, "train#lr": 0.02})``
Args:
pairs: dictionary of `id` and config pairs.
"""
for k, v in pairs.items():
self[k] = v
def __contains__(self, id: str | int) -> bool:
"""
Returns True if `id` is stored in this configuration.
Args:
id: id to specify the expected position. See also :py:meth:`__getitem__`.
"""
try:
_ = self[id]
return True
except (KeyError, IndexError, ValueError): # Index error for integer indexing
return False
def parse(self, reset: bool = True) -> None:
"""
Recursively resolve `self.config` to replace the macro tokens with target content.
Then recursively parse the config source, add every item as ``ConfigItem`` to the reference resolver.
Args:
reset: whether to reset the ``reference_resolver`` before parsing. Defaults to `True`.
"""
if reset:
self.ref_resolver.reset()
self.resolve_macro_and_relative_ids()
self._do_parse(config=self.get())
def get_parsed_content(self, id: str = "", **kwargs: Any) -> Any:
"""
Get the parsed result of ``ConfigItem`` with the specified ``id``.
- If the item is ``ConfigComponent`` and ``instantiate=True``, the result is the instance.
- If the item is ``ConfigExpression`` and ``eval_expr=True``, the result is the evaluated output.
- Else, the result is the configuration content of `ConfigItem`.
Args:
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
kwargs: additional keyword arguments to be passed to ``_resolve_one_item``.
Currently support ``lazy`` (whether to retain the current config cache, default to `True`),
``instantiate`` (whether to instantiate the `ConfigComponent`, default to `True`) and
``eval_expr`` (whether to evaluate the `ConfigExpression`, default to `True`), ``default``
(the default config item if the `id` is not in the config content).
"""
if not self.ref_resolver.is_resolved():
# not parsed the config source yet, parse it
self.parse(reset=True)
elif not kwargs.get("lazy", True):
self.parse(reset=not kwargs.get("lazy", True))
return self.ref_resolver.get_resolved_content(id=id, **kwargs)
def read_meta(self, f: PathLike | Sequence[PathLike] | dict, **kwargs: Any) -> None:
"""
Read the metadata from specified JSON or YAML file.
The metadata as a dictionary will be stored at ``self.config["_meta_"]``.
Args:
f: filepath of the metadata file, the content must be a dictionary,
if providing a list of files, will merge the content of them.
if providing a dictionary directly, use it as metadata.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
self.set(self.load_config_files(f, **kwargs), self.meta_key)
def read_config(self, f: PathLike | Sequence[PathLike] | dict, **kwargs: Any) -> None:
"""
Read the config from specified JSON or YAML file.
The config content in the `self.config` dictionary.
Args:
f: filepath of the config file, the content must be a dictionary,
if providing a list of files, wil merge the content of them.
if providing a dictionary directly, use it as config.
kwargs: other arguments for ``json.load`` or ``yaml.safe_load``, depends on the file format.
"""
content = {self.meta_key: self.get(self.meta_key, {})}
content.update(self.load_config_files(f, **kwargs))
self.set(config=content)
def _do_resolve(self, config: Any, id: str = "") -> Any:
"""
Recursively resolve `self.config` to replace the relative ids with absolute ids, for example,
`@##A` means `A` in the upper level. and replace the macro tokens with target content,
The macro tokens start with "%", can be from another structured file, like:
``"%default_net"``, ``"%/data/config.json#net"``.
Note that the macro replacement doesn't support recursive macro tokens.
Args:
config: input config file to resolve.
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if isinstance(config, (dict, list)):
for k, v in enumerate(config) if isinstance(config, list) else config.items():
sub_id = f"{id}{ID_SEP_KEY}{k}" if id != "" else k
config[k] = self._do_resolve(v, sub_id)
if isinstance(config, str):
config = self.resolve_relative_ids(id, config)
if config.startswith(MACRO_KEY):
path, ids = ConfigParser.split_path_id(config[len(MACRO_KEY) :])
parser = ConfigParser(config=self.get() if not path else ConfigParser.load_config_file(path))
return parser[ids]
return config
def resolve_macro_and_relative_ids(self):
"""
Recursively resolve `self.config` to replace the relative ids with absolute ids, for example,
`@##A` means `A` in the upper level. and replace the macro tokens with target content,
The macro tokens are marked as starting with "%", can be from another structured file, like:
``"%default_net"``, ``"%/data/config.json#net"``.
"""
self.set(self._do_resolve(config=deepcopy(self.get())))
def _do_parse(self, config: Any, id: str = "") -> None:
"""
Recursively parse the nested data in config source, add every item as `ConfigItem` to the resolver.
Args:
config: config source to parse.
id: id of the ``ConfigItem``, ``"#"`` in id are interpreted as special characters to
go one level further into the nested structures.
Use digits indexing from "0" for list or other strings for dict.
For example: ``"xform#5"``, ``"net#channels"``. ``""`` indicates the entire ``self.config``.
"""
if isinstance(config, (dict, list)):
for k, v in enumerate(config) if isinstance(config, list) else config.items():
sub_id = f"{id}{ID_SEP_KEY}{k}" if id != "" else k
self._do_parse(config=v, id=sub_id)
# copy every config item to make them independent and add them to the resolver
item_conf = deepcopy(config)
if ConfigComponent.is_instantiable(item_conf):
self.ref_resolver.add_item(ConfigComponent(config=item_conf, id=id, locator=self.locator))
elif ConfigExpression.is_expression(item_conf):
self.ref_resolver.add_item(ConfigExpression(config=item_conf, id=id, globals=self.globals))
else:
self.ref_resolver.add_item(ConfigItem(config=item_conf, id=id))
@classmethod
def load_config_file(cls, filepath: PathLike, **kwargs: Any) -> dict:
"""
Load config file with specified file path (currently support JSON and YAML files).
Args:
filepath: path of target file to load, supported postfixes: `.json`, `.yml`, `.yaml`.
kwargs: other arguments for ``json.load`` or ```yaml.safe_load``, depends on the file format.
"""
if not filepath:
return {}
_filepath: str = str(Path(filepath))
if not re.compile(cls.path_match, re.IGNORECASE).findall(_filepath):
raise ValueError(f'unknown file input: "{filepath}"')
with open(_filepath) as f:
if _filepath.lower().endswith(cls.suffixes[0]):
return json.load(f, **kwargs) # type: ignore[no-any-return]
if _filepath.lower().endswith(cls.suffixes[1:]):
return yaml.safe_load(f, **kwargs) # type: ignore[no-any-return]
raise ValueError(f"only support JSON or YAML config file so far, got name {_filepath}.")
@classmethod
def load_config_files(cls, files: PathLike | Sequence[PathLike] | dict, **kwargs: Any) -> dict:
"""
Load config files into a single config dict.
The latter config file in the list will override or add the former config file.
``"#"`` in the config keys are interpreted as special characters to go one level
further into the nested structures.
Args:
files: path of target files to load, supported postfixes: `.json`, `.yml`, `.yaml`.
kwargs: other arguments for ``json.load`` or ```yaml.safe_load``, depends on the file format.
"""
if isinstance(files, dict): # already a config dict
return files
parser = ConfigParser(config={})
for i in ensure_tuple(files):
for k, v in (cls.load_config_file(i, **kwargs)).items():
parser[k] = v
return parser.get() # type: ignore
@classmethod
def export_config_file(cls, config: dict, filepath: PathLike, fmt: str = "json", **kwargs: Any) -> None:
"""
Export the config content to the specified file path (currently support JSON and YAML files).
Args:
config: source config content to export.
filepath: target file path to save.
fmt: format of config content, currently support ``"json"`` and ``"yaml"``.
kwargs: other arguments for ``json.dump`` or ``yaml.safe_dump``, depends on the file format.
"""
_filepath: str = str(Path(filepath))
writer = look_up_option(fmt.lower(), {"json", "yaml"})
with open(_filepath, "w") as f:
if writer == "json":
json.dump(config, f, **kwargs)
return
if writer == "yaml":
return yaml.safe_dump(config, f, **kwargs)
raise ValueError(f"only support JSON or YAML config file so far, got {writer}.")
@classmethod
def split_path_id(cls, src: str) -> tuple[str, str]:
"""
Split `src` string into two parts: a config file path and component id.
The file path should end with `(json|yaml|yml)`. The component id should be separated by `#` if it exists.
If no path or no id, return "".
Args:
src: source string to split.
"""
result = re.compile(rf"({cls.suffix_match}(?=(?:{ID_SEP_KEY}.*)|$))", re.IGNORECASE).findall(src)
if not result:
return "", src # the src is a pure id
path_name = result[0][0] # at most one path_name
_, ids = src.rsplit(path_name, 1)
return path_name, ids[len(ID_SEP_KEY) :] if ids.startswith(ID_SEP_KEY) else ""
@classmethod
def resolve_relative_ids(cls, id: str, value: str) -> str:
"""
To simplify the reference or macro tokens ID in the nested config content, it's available to use
relative ID name which starts with the `ID_SEP_KEY`, for example, "@#A" means `A` in the same level,
`@##A` means `A` in the upper level.
It resolves the relative ids to absolute ids. For example, if the input data is:
.. code-block:: python
{
"A": 1,
"B": {"key": "@##A", "value1": 2, "value2": "%#value1", "value3": [3, 4, "@#1"]},
}
It will resolve `B` to `{"key": "@A", "value1": 2, "value2": "%B#value1", "value3": [3, 4, "@B#value3#1"]}`.
Args:
id: id name for current config item to compute relative id.
value: input value to resolve relative ids.
"""
# get the prefixes like: "@####", "%###", "@#"
prefixes = sorted(set().union(cls.relative_id_prefix.findall(value)), reverse=True)
current_id = id.split(ID_SEP_KEY)
for p in prefixes:
sym = ID_REF_KEY if ID_REF_KEY in p else MACRO_KEY
length = p[len(sym) :].count(ID_SEP_KEY)
if length > len(current_id):
raise ValueError(f"the relative id in `{value}` is out of the range of config content.")
if length == len(current_id):
new = "" # root id is `""`
else:
new = ID_SEP_KEY.join(current_id[:-length]) + ID_SEP_KEY
value = value.replace(p, sym + new)
return value
| [
"noreply@github.com"
] | Warvito.noreply@github.com |
0429f7a6497c3ad8beb348791d38ddfb746c4c92 | 91dfd2193d73d4c0f547706bb1a954025dd8c9fd | /autolens/pipeline/phase/imaging/result.py | 925f183b193e70609d45b35b8d5a11c8055b42dc | [
"MIT"
] | permissive | FreeworkEarth/PyAutoLens | 13913d6a8b9696f225e85164e62dff3251aa7831 | 434f4bb329c93bcdc11b1f87962e7e2bd1097d9b | refs/heads/master | 2023-03-21T19:51:58.245105 | 2021-03-18T14:32:13 | 2021-03-18T14:32:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | from autoconf import conf
import autoarray as aa
import numpy as np
from autogalaxy.galaxy import galaxy as g
from autolens.pipeline.phase import dataset
class Result(dataset.Result):
@property
def max_log_likelihood_fit(self):
hyper_image_sky = self.analysis.hyper_image_sky_for_instance(
instance=self.instance
)
hyper_background_noise = self.analysis.hyper_background_noise_for_instance(
instance=self.instance
)
return self.analysis.masked_imaging_fit_for_tracer(
tracer=self.max_log_likelihood_tracer,
hyper_image_sky=hyper_image_sky,
hyper_background_noise=hyper_background_noise,
)
@property
def unmasked_model_image(self):
return self.max_log_likelihood_fit.unmasked_blurred_image
@property
def unmasked_model_image_of_planes(self):
return self.max_log_likelihood_fit.unmasked_blurred_image_of_planes
@property
def unmasked_model_image_of_planes_and_galaxies(self):
fit = self.max_log_likelihood_fit
return fit.unmasked_blurred_image_of_planes_and_galaxies
def image_for_galaxy(self, galaxy: g.Galaxy) -> np.ndarray:
"""
Parameters
----------
galaxy
A galaxy used in this phase
Returns
-------
ndarray or None
A numpy arrays giving the model image of that galaxy
"""
return self.max_log_likelihood_fit.galaxy_model_image_dict[galaxy]
@property
def image_galaxy_dict(self) -> {str: g.Galaxy}:
"""
A dictionary associating galaxy names with model images of those galaxies
"""
return {
galaxy_path: self.image_for_galaxy(galaxy)
for galaxy_path, galaxy in self.path_galaxy_tuples
}
@property
def hyper_galaxy_image_path_dict(self):
"""
A dictionary associating 1D hyper_galaxies galaxy images with their names.
"""
hyper_minimum_percent = conf.instance["general"]["hyper"][
"hyper_minimum_percent"
]
hyper_galaxy_image_path_dict = {}
for path, galaxy in self.path_galaxy_tuples:
galaxy_image = self.image_galaxy_dict[path]
if not np.all(galaxy_image == 0):
minimum_galaxy_value = hyper_minimum_percent * max(galaxy_image)
galaxy_image[galaxy_image < minimum_galaxy_value] = minimum_galaxy_value
hyper_galaxy_image_path_dict[path] = galaxy_image
return hyper_galaxy_image_path_dict
@property
def hyper_model_image(self):
hyper_model_image = aa.Array2D.manual_mask(
array=np.zeros(self.mask.mask_sub_1.pixels_in_mask),
mask=self.mask.mask_sub_1,
)
for path, galaxy in self.path_galaxy_tuples:
hyper_model_image += self.hyper_galaxy_image_path_dict[path]
return hyper_model_image
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
932d48399c77d21d7a6888b2120007a165bd202e | 38f9e6dc2643d955dbf04c4cfd5430c441f72b44 | /pyweb/css/__init__.py | b56e53aab520ef3401bc5eae0c36e5413fe1c3df | [
"MIT"
] | permissive | Dmunch04/PyWeb | 0119b3eaf4e456376603a3b43b17c1a4dc8eb5f2 | 459d3953e4a31a91619d1911d9eda2b2e14b721c | refs/heads/master | 2022-07-29T00:27:55.383733 | 2022-07-13T21:30:56 | 2022-07-13T21:30:56 | 177,850,308 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from pyweb.css.animation import *
from pyweb.css.backdropfilter import *
from pyweb.css.border import *
from pyweb.css.color import *
from pyweb.css.gradient import *
from pyweb.css.position import *
from pyweb.css.style_value import *
from pyweb.css.style import *
from pyweb.css.unit import *
| [
"daniellmunch@gmail.com"
] | daniellmunch@gmail.com |
84e7f1e74bf3f7a0b387804908de4724c54b6157 | 128d593efd591dc83a3aef2d4bfad39e73ee637e | /python_code/complete/no032 | f900a4b44d95b290e9c26d3ca7550214d7c86d20 | [] | no_license | jwan/ProjectEuler | 93be87d89cc58516d503dd5ed53bdbd706748cda | 65aec4f87b8899db6bad94a36412a28a4b4527e9 | refs/heads/master | 2021-01-17T08:21:46.654529 | 2011-05-02T23:11:35 | 2011-05-02T23:11:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,469 | #!/usr/bin/env python
# an n-digit number is >= 10^(n-1)
# n*m >= 10^(n+m-2), must have at least n + m - 1 digits
#subsets of cardinality 5,6
from python_code.decorators import euler_timer
def all_orderings(list_):
if len(list_) == 1:
return [list_]
result = []
for elt in list_:
sublist = list_[:]
sublist.remove(elt)
result.extend([[elt] + ordering
for ordering in all_orderings(sublist)])
return result
# Will take a list and break it at various places, returning
# the product of the integers formed
def possible_products(list_):
result = []
for i in range(1,len(list_)):
left = list_[:i]
left = int("".join([str(elt) for elt in left]))
right = list_[i:]
right = int("".join([str(elt) for elt in right]))
result.append(left*right)
return result
@euler_timer(32)
def main():
products = set()
candidates = all_orderings(range(1,10))
for candidate in candidates:
prods = possible_products(candidate[:5])
last4 = candidate[-4:]
last4 = int("".join([str(elt) for elt in last4]))
if last4 in prods:
products.add(last4)
prods = possible_products(candidate[:6])
last3 = candidate[-3:]
last3 = int("".join([str(elt) for elt in last3]))
if last3 in prods:
products.add(last3)
print sum(products)
if __name__ == "__main__":
main()
| [
"dan@counsyl.com"
] | dan@counsyl.com | |
0d3c14efd033d21a13c78aebe4b02a60e3327ca1 | 7fb469e93ff89b1c697d5a53a39188127e50d272 | /utils/migration_gitlab.py | d1c48ce75256887b1e71426bfbd77b6e43a5bfee | [] | no_license | seekplum/seekplum | fde98f93145a78fc030032a4499090583aba154a | 9e66f5e62214e566528003d434ef2b74877419fd | refs/heads/master | 2023-02-13T19:00:49.866130 | 2023-01-31T08:55:19 | 2023-02-02T04:33:45 | 182,075,292 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import logging.handlers
import os
import shutil
import subprocess
import uuid
from datetime import datetime
from multiprocessing import Process
color = lambda c, s: "\033[3%sm%s\033[0m" % (c, s)
red = lambda s: color(1, s)
green = lambda s: color(2, s)
def print_ok(check_status):
fmt = green("[ OK ] %s" % check_status)
print fmt
def print_error(check_status):
fmt = red("[ ERROR ] %s" % check_status)
print fmt
def get_logger(level=None):
"""设置日志格式,路径
"""
if level is None:
level = logging.INFO
file_name = os.path.basename(__file__).rsplit(".", 1)[0]
log_file_name = "%s.log" % file_name
_logger = logging.getLogger(file_name)
formatter = logging.Formatter('[%(name)s %(levelname)s %(asctime)s %(module)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log_file_handle = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=10 * 1024 * 1024, backupCount=10)
log_file_handle.setFormatter(formatter)
_logger.addHandler(log_file_handle)
_logger.setLevel(level)
return _logger
logger = get_logger()
temp_dir = "/tmp" # 临时目录
def run_cmd(cmd, force=True):
"""执行系统命令
:param cmd: str 系统命令
:param force: bool 执行命令出错是否抛出异常
:rtype str
:return 执行 `cmd` 命令的输出结果
"""
logger.info("cmd: %s" % cmd)
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if stderr:
logger.error("cmd stderr: %s" % stderr)
if not force:
raise Exception("cmd: %s, stderr: %s" % (cmd, stderr))
else:
logger.info("cmd result: %s" % stdout)
return stdout
def md5sum(file_name):
"""计算文件的md5值
:param file_name: str 文件路径
"""
cmd = "md5sum {}".format(file_name)
file_md5 = run_cmd(cmd).split(" ")[0].strip()
return file_md5
def get_time_str():
"""日期字符串
:rtype str
:return
2017-01-05_10-45-00
"""
_str = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
return _str
def update(group, project):
"""执行克隆重新推送到新仓库操作
1. 执行克隆操作
2. 重新推送到新的地址
:param group: str 组名
:param project: str 项目名
"""
path = os.path.join(temp_dir, project, get_time_str()) # 克隆到本地的项目路径
try:
# 执行克隆操作
cmd1 = "git clone --bare git@192.168.1.121:{group}/{project}.git {path}".format(project=project,
path=path,
group=group)
run_cmd(cmd1)
# 重新推送到新的地址
cmd2 = "cd {path} && git push --mirror git@gitlab.woqutech.com:{group}/{project}.git".format(path=path,
project=project,
group=group)
run_cmd(cmd2)
except Exception as e:
print_error(e.message)
else:
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
def check(group, project):
"""检查log/branch/tag是否一致
把 git log / git branch -a / git tag 三条命令的执行结果重定向到文件中.看文件md5值是否一致
"""
check_cmd = [
"git log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) "
"%C(bold blue)<%an>%Creset' --abbrev-commit",
"git branch -a",
"git tag"
]
hosts = [
]
file_name = "{}_commit.txt".format(project)
file_md5 = set()
for host in hosts:
path = os.path.join(temp_dir, "{}_{}_{}".format(project, host, get_time_str())) # 克隆到本地的项目路径
md5 = uuid.uuid4().hex
try:
cmd1 = "git clone git@{host}:{group}/{project}.git {path}".format(project=project,
path=path,
host=host,
group=group
)
run_cmd(cmd1)
file_path = os.path.join(path, file_name)
# 把检查命令的结果重定向到文件中
for cmd in check_cmd:
cmd2 = "cd {} && {} >> {}".format(path, cmd, file_path)
run_cmd(cmd2)
except Exception as e:
print_error(e.message)
else:
md5 = md5sum(file_path)
finally:
file_md5.add(md5)
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=False)
# 在后面打印的 . 数
count = 80 - (len(group) + len(project))
count = count if count > 0 else 0
text = count * "."
# 对比两个文件的md5值是否一致
if len(file_md5) == 1:
print_ok("{}/{} {}".format(group, project, text))
else:
print_error("{}/{} {}".format(group, project, text))
def run(group, project):
"""执行克隆重新推送到新仓库操作
:param group: str 组名
:param project: str 项目名
"""
# update(group, project)
check(group, project)
def main():
projects = [
{
"group": "",
"projects": [
]
}
]
process_list = []
for info in projects:
projects = info["projects"] # 项目名
group = info["group"] # 组名
for project in projects:
process = Process(target=run, args=(group, project,))
process.start()
process_list.append(process)
for process in process_list:
process.join()
if __name__ == '__main__':
main()
| [
"1131909224@qq.com"
] | 1131909224@qq.com |
de5d5f1bc08a672e7ae40f702255876007218523 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/formrecognizer/azure-ai-formrecognizer/tests/test_invoice_from_url.py | d083e353c82ae6db4c8b4cf95f772515f99ba3bc | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 13,880 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from datetime import date, time
from azure.core.exceptions import ClientAuthenticationError, ServiceRequestError, HttpResponseError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer import FormRecognizerClient, FormContentType, FormRecognizerApiVersion
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
GlobalClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestInvoiceFromUrl(FormRecognizerTest):
@FormRecognizerPreparer()
def test_polling_interval(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), polling_interval=7)
self.assertEqual(client._client._config.polling_interval, 7)
poller = client.begin_recognize_invoices_from_url(self.invoice_url_pdf, polling_interval=6)
poller.wait()
self.assertEqual(poller._polling_method._timeout, 6)
poller2 = client.begin_recognize_invoices_from_url(self.invoice_url_pdf)
poller2.wait()
self.assertEqual(poller2._polling_method._timeout, 7) # goes back to client default
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_encoded_url(self, client):
try:
poller = client.begin_recognize_invoices_from_url("https://fakeuri.com/blank%20space")
except HttpResponseError as e:
self.assertIn("https://fakeuri.com/blank%20space", e.response.request.body)
@FormRecognizerPreparer()
def test_invoice_url_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
with self.assertRaises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
poller = client.begin_recognize_invoices_from_url(self.invoice_url_pdf)
@FormRecognizerPreparer()
def test_authentication_bad_key(self, formrecognizer_test_endpoint, formrecognizer_test_api_key):
client = FormRecognizerClient(formrecognizer_test_endpoint, AzureKeyCredential("xxxx"))
with self.assertRaises(ClientAuthenticationError):
poller = client.begin_recognize_invoices_from_url(self.invoice_url_tiff)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_bad_url(self, client):
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_invoices_from_url("https://badurl.jpg")
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_url_pass_stream(self, client):
with open(self.invoice_tiff, "rb") as invoice:
with self.assertRaises(HttpResponseError):
poller = client.begin_recognize_invoices_from_url(invoice)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_url_transform_pdf(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_invoice = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_invoice)
poller = client.begin_recognize_invoices_from_url(
invoice_url=self.invoice_url_pdf,
include_field_elements=True,
cls=callback
)
result = poller.result()
raw_response = responses[0]
returned_model = responses[1]
invoice = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(invoice.fields, actual, read_results)
# check page range
self.assertEqual(invoice.page_range.first_page_number, document_results[0].page_range[0])
self.assertEqual(invoice.page_range.last_page_number, document_results[0].page_range[1])
# Check page metadata
self.assertFormPagesTransformCorrect(invoice.pages, read_results, page_results)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_url_transform_tiff(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_invoice = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_invoice)
poller = client.begin_recognize_invoices_from_url(
invoice_url=self.invoice_url_tiff,
include_field_elements=True,
cls=callback
)
result = poller.result()
raw_response = responses[0]
returned_model = responses[1]
invoice = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(invoice.fields, actual, read_results)
# check page range
self.assertEqual(invoice.page_range.first_page_number, document_results[0].page_range[0])
self.assertEqual(invoice.page_range.last_page_number, document_results[0].page_range[1])
# Check page metadata
self.assertFormPagesTransformCorrect(invoice.pages, read_results, page_results)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_url_multipage_transform_pdf(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_invoice = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_invoice)
poller = client.begin_recognize_invoices_from_url(
invoice_url=self.multipage_vendor_url_pdf,
include_field_elements=True,
cls=callback
)
result = poller.result()
raw_response = responses[0]
returned_models = responses[1]
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertEqual(1, len(returned_models))
returned_model = returned_models[0]
self.assertEqual(2, len(returned_model.pages))
self.assertEqual(1, returned_model.page_range.first_page_number)
self.assertEqual(2, returned_model.page_range.last_page_number)
self.assertEqual(1, len(document_results))
document_result = document_results[0]
self.assertEqual(1, document_result.page_range[0]) # checking first page number
self.assertEqual(2, document_result.page_range[1]) # checking last page number
for invoice, document_result in zip(returned_models, document_results):
self.assertFormFieldsTransformCorrect(invoice.fields, document_result.fields, read_results)
self.assertFormPagesTransformCorrect(returned_model.pages, read_results, page_results)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_tiff(self, client):
poller = client.begin_recognize_invoices_from_url(self.invoice_url_tiff)
result = poller.result()
self.assertEqual(len(result), 1)
invoice = result[0]
# check dict values
self.assertEqual(invoice.fields.get("VendorName").value, "Contoso")
self.assertEqual(invoice.fields.get("VendorAddress").value, '1 Redmond way Suite 6000 Redmond, WA 99243')
self.assertEqual(invoice.fields.get("CustomerAddressRecipient").value, "Microsoft")
self.assertEqual(invoice.fields.get("CustomerAddress").value, '1020 Enterprise Way Sunnayvale, CA 87659')
self.assertEqual(invoice.fields.get("CustomerName").value, "Microsoft")
self.assertEqual(invoice.fields.get("InvoiceId").value, '34278587')
self.assertEqual(invoice.fields.get("InvoiceDate").value, date(2017, 6, 18))
self.assertEqual(invoice.fields.get("Items").value[0].value["Amount"].value, 56651.49)
self.assertEqual(invoice.fields.get("DueDate").value, date(2017, 6, 24))
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_multipage_pdf(self, client):
poller = client.begin_recognize_invoices_from_url(self.multipage_vendor_url_pdf)
result = poller.result()
self.assertEqual(len(result), 1)
invoice = result[0]
self.assertEqual("prebuilt:invoice", invoice.form_type)
self.assertEqual(1, invoice.page_range.first_page_number)
self.assertEqual(2, invoice.page_range.last_page_number)
vendor_name = invoice.fields["VendorName"]
self.assertEqual(vendor_name.value, 'Southridge Video')
self.assertEqual(vendor_name.value_data.page_number, 2)
remittance_address_recipient = invoice.fields["RemittanceAddressRecipient"]
self.assertEqual(remittance_address_recipient.value, "Contoso Ltd.")
self.assertEqual(remittance_address_recipient.value_data.page_number, 1)
remittance_address = invoice.fields["RemittanceAddress"]
self.assertEqual(remittance_address.value, '2345 Dogwood Lane Birch, Kansas 98123')
self.assertEqual(remittance_address.value_data.page_number, 1)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_pdf_include_field_elements(self, client):
poller = client.begin_recognize_invoices_from_url(self.invoice_url_pdf, include_field_elements=True)
result = poller.result()
self.assertEqual(len(result), 1)
invoice = result[0]
self.assertFormPagesHasValues(invoice.pages)
for field in invoice.fields.values():
if field.name == "Items":
continue
self.assertFieldElementsHasValues(field.value_data.field_elements, invoice.page_range.first_page_number)
self.assertInvoiceItemsHasValues(invoice.fields["Items"].value, invoice.page_range.first_page_number, True)
# check dict values
self.assertEqual(invoice.fields.get("VendorName").value, "Contoso")
self.assertEqual(invoice.fields.get("VendorAddress").value, '1 Redmond way Suite 6000 Redmond, WA 99243')
self.assertEqual(invoice.fields.get("CustomerAddressRecipient").value, "Microsoft")
self.assertEqual(invoice.fields.get("CustomerAddress").value, '1020 Enterprise Way Sunnayvale, CA 87659')
self.assertEqual(invoice.fields.get("CustomerName").value, "Microsoft")
self.assertEqual(invoice.fields.get("InvoiceId").value, '34278587')
self.assertEqual(invoice.fields.get("InvoiceDate").value, date(2017, 6, 18))
# self.assertEqual(invoice.fields.get("InvoiceTotal").value, 56651.49) FIXME: not finding InvoiceTotal
self.assertEqual(invoice.fields.get("DueDate").value, date(2017, 6, 24))
@FormRecognizerPreparer()
@GlobalClientPreparer()
@pytest.mark.live_test_only
def test_invoice_continuation_token(self, client):
initial_poller = client.begin_recognize_invoices_from_url(self.invoice_url_tiff)
cont_token = initial_poller.continuation_token()
poller = client.begin_recognize_invoices_from_url(None, continuation_token=cont_token)
result = poller.result()
self.assertIsNotNone(result)
initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@GlobalClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
def test_invoice_v2(self, client):
with pytest.raises(ValueError) as e:
client.begin_recognize_invoices_from_url(self.invoice_url_tiff)
assert "Method 'begin_recognize_invoices_from_url' is only available for API version V2_1 and up" in str(e.value)
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_locale_specified(self, client):
poller = client.begin_recognize_invoices_from_url(self.invoice_url_pdf, locale="en-US")
assert 'en-US' == poller._polling_method._initial_response.http_response.request.query['locale']
result = poller.result()
assert result
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_invoice_locale_error(self, client):
with pytest.raises(HttpResponseError) as e:
client.begin_recognize_invoices_from_url(self.invoice_url_pdf, locale="not a locale")
assert "locale" in e.value.error.message
@FormRecognizerPreparer()
@GlobalClientPreparer()
def test_pages_kwarg_specified(self, client):
poller = client.begin_recognize_invoices_from_url(self.invoice_url_pdf, pages=["1"])
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = poller.result()
assert result
| [
"noreply@github.com"
] | hivyas.noreply@github.com |
acb8459364de29de9dc72b61525dc9fdfba1d32b | 94a2c4417c1fdd8577a75b09a17912ebae129e6c | /test/test_props.py | efa2d0c3525309dfcb01666e5b8b0fe8ed37bda3 | [
"MIT"
] | permissive | slavaGanzin/ramda.py | ad88a3cf6e7eb1461d4a09aad35ae1c18ca32db8 | 634bfbe0dcb300315ded327756cb3e33241589b8 | refs/heads/master | 2023-01-23T04:43:48.485314 | 2023-01-06T10:11:53 | 2023-01-06T10:11:53 | 142,413,822 | 68 | 7 | MIT | 2021-12-22T13:59:56 | 2018-07-26T08:43:31 | Python | UTF-8 | Python | false | false | 469 | py | from ramda.private.asserts import *
from ramda import *
def test_props():
assert_equal(props(["x", "y"], {"x": 1, "y": 2}), [1, 2])
assert_equal(props(["c", "a", "b"], {"b": 2, "a": 1}), [None, 1, 2])
full_name = compose(join(" "), props(["first", "last"]))
full_name({"last": "Bullet-Tooth", "age": 33, "first": "Tony"})
assert_equal(
full_name({"last": "Bullet-Tooth", "age": 33, "first": "Tony"}),
"Tony Bullet-Tooth",
)
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
5b10d745f5ec3e972d48224fa8217d4cc56ab1a6 | 4ae7cdc9292009398a292bdf6bee61428559fdfd | /SourceCodeTools/models/graph/NodeEmbedder.py | 28ab60152da81c09bc37998d1ef4801169fe359f | [] | no_license | VitalyRomanov/method-embedding | 52a4e6e7bf726b4db0872902a0eaf1d8cb82b4a8 | 1c8f0fc04eb1f495555272d9747fd2fea68525e1 | refs/heads/master | 2023-08-31T17:39:04.051912 | 2023-01-08T05:02:52 | 2023-01-08T05:02:52 | 219,153,628 | 5 | 7 | null | 2023-07-22T20:27:20 | 2019-11-02T12:54:12 | Python | UTF-8 | Python | false | false | 15,753 | py | from SourceCodeTools.nlp import token_hasher
import torch
import torch.nn as nn
class NodeEmbedder(nn.Module):
def __init__(self, nodes, emb_size, dtype=None, n_buckets=500000, pretrained=None):
super(NodeEmbedder, self).__init__()
self.init(nodes, emb_size, dtype, n_buckets, pretrained)
def init(self, nodes, emb_size, dtype=None, n_buckets=500000, pretrained=None):
self.emb_size = emb_size
self.dtype = dtype
if dtype is None:
self.dtype = torch.float32
self.n_buckets = n_buckets
self.buckets = None
embedding_field = "embeddable_name"
nodes_with_embeddings = nodes.query("embeddable == True")[
['global_graph_id', 'typed_id', 'type', 'type_backup', embedding_field]
]
type_name = list(zip(nodes_with_embeddings['type_backup'], nodes_with_embeddings[embedding_field]))
self.node_info = dict(zip(
list(zip(nodes_with_embeddings['type'], nodes_with_embeddings['typed_id'])),
type_name
))
assert len(nodes_with_embeddings) == len(self.node_info)
self.node_info_global = dict(zip(
nodes_with_embeddings['global_graph_id'],
type_name
))
if pretrained is None:
self._create_buckets()
else:
self._create_buckets_from_pretrained(pretrained)
def _create_buckets(self):
self.buckets = nn.Embedding(self.n_buckets + 1, self.emb_size, padding_idx=self.n_buckets, sparse=True)
def _create_buckets_from_pretrained(self, pretrained):
assert pretrained.shape[1] == self.emb_size
import numpy as np
weights_with_pad = torch.tensor(np.vstack([pretrained, np.zeros((1, self.emb_size), dtype=np.float32)]))
self.buckets = nn.Embedding.from_pretrained(weights_with_pad, freeze=False, padding_idx=self.n_buckets, sparse=True)
def _get_embedding_from_node_info(self, keys, node_info, masked=None):
idxs = []
if isinstance(masked, dict):
new_masked = set()
for ntype, nids in masked.items():
for nid in nids:
new_masked.add((ntype, nid))
masked = new_masked
for key in keys:
if key not in node_info or masked is not None and key in masked:
# if key in node_info and key not in masked:
idxs.append(self.n_buckets)
else:
real_type, name = node_info[key]
idxs.append(token_hasher(name, self.n_buckets))
return self.buckets(torch.LongTensor(idxs))
def _get_embeddings_with_type(self, node_type, ids, masked=None):
type_ids = ((node_type, id_) for id_ in ids)
return self._get_embedding_from_node_info(type_ids, self.node_info, masked=masked)
def _get_embeddings_global(self, ids, masked=None):
return self._get_embedding_from_node_info(ids, self.node_info_global, masked=masked)
def get_embeddings(self, node_type=None, node_ids=None, masked=None):
assert node_ids is not None
if node_type is None:
return self._get_embeddings_global(node_ids, masked=masked)
else:
return self._get_embeddings_with_type(node_type, node_ids, masked=masked)
def forward(self, node_type=None, node_ids=None, train_embeddings=True, masked=None):
if train_embeddings:
return self.get_embeddings(node_type, node_ids.tolist(), masked=masked)
else:
with torch.set_grad_enabled(False):
return self.get_embeddings(node_type, node_ids.tolist(), masked=masked)
class NodeIdEmbedder(NodeEmbedder):
def __init__(self, nodes=None, emb_size=None, dtype=None, n_buckets=500000, pretrained=None):
super(NodeIdEmbedder, self).__init__(nodes, emb_size, dtype, n_buckets, pretrained)
def init(self, nodes, emb_size, dtype=None, n_buckets=500000, pretrained=None):
self.emb_size = emb_size
self.dtype = dtype
if dtype is None:
self.dtype = torch.float32
self.n_buckets = n_buckets
self.buckets = None
embedding_field = "embeddable_name"
nodes_with_embeddings = nodes.query("embeddable == True")[
['global_graph_id', 'typed_id', 'type', 'type_backup', embedding_field]
]
self.to_global_map = {}
for global_graph_id, typed_id, type_, type_backup, name in nodes_with_embeddings.values:
if type_ not in self.to_global_map:
self.to_global_map[type_] = {}
self.to_global_map[type_][typed_id] = global_graph_id
self._create_buckets()
def get_embeddings(self, node_type=None, node_ids=None, masked=None):
assert node_ids is not None
if node_type is not None:
node_ids = list(map(lambda local_id: self.to_global_map[node_type][local_id], node_ids))
return self.buckets(torch.LongTensor(node_ids))
# class SimpleNodeEmbedder(nn.Module):
# def __init__(self, dataset, emb_size, dtype=None, n_buckets=500000, pretrained=None):
# super(SimpleNodeEmbedder, self).__init__()
#
# self.emb_size = emb_size
# self.dtype = dtype
# if dtype is None:
# self.dtype = torch.float32
# self.n_buckets = n_buckets
#
# self.buckets = None
#
# from SourceCodeTools.code.data.sourcetrail.sourcetrail_ast_edges import PythonSharedNodes
#
# leaf_types = PythonSharedNodes.shared_node_types
#
# if len(dataset.nodes.query("type_backup == 'subword'")) > 0:
# # some of the types should not be embedded if subwords were generated
# leaf_types = leaf_types - {"#attr#"}
# leaf_types = leaf_types - {"#keyword#"}
#
# nodes_with_embeddings = dataset.nodes[
# dataset.nodes['type_backup'].apply(lambda type_: type_ in leaf_types)
# ][['global_graph_id', 'typed_id', 'type', 'type_backup', 'name']]
#
# type_name = list(zip(nodes_with_embeddings['type_backup'], nodes_with_embeddings['name']))
#
# self.node_info = dict(zip(
# list(zip(nodes_with_embeddings['type'], nodes_with_embeddings['typed_id'])),
# type_name
# ))
#
# assert len(nodes_with_embeddings) == len(self.node_info)
#
# self.node_info_global = dict(zip(
# nodes_with_embeddings['global_graph_id'],
# type_name
# ))
#
# if pretrained is None:
# self._create_buckets()
# else:
# self._create_buckets_from_pretrained(pretrained)
#
# def _create_buckets(self):
# self.buckets = nn.Embedding(self.n_buckets + 1, self.emb_size, padding_idx=self.n_buckets)
#
# def _create_buckets_from_pretrained(self, pretrained):
#
# assert pretrained.n_dims == self.emb_size
#
# import numpy as np
#
# embs_init = np.random.randn(self.n_buckets, self.emb_size).astype(np.float32)
#
# for word in pretrained.keys():
# ind = token_hasher(word, self.n_buckets)
# embs_init[ind, :] = pretrained[word]
#
# from SourceCodeTools.code.python_tokens_to_bpe_subwords import python_ops_to_bpe
#
# def op_embedding(op_tokens):
# embedding = None
# for token in op_tokens:
# token_emb = pretrained.get(token, None)
# if embedding is None:
# embedding = token_emb
# else:
# embedding = embedding + token_emb
# return embedding
#
# for op, op_tokens in python_ops_to_bpe.items():
# op_emb = op_embedding(op_tokens)
# if op_emb is not None:
# op_ind = token_hasher(op, self.n_buckets)
# embs_init[op_ind, :] = op_emb
#
# weights_with_pad = torch.tensor(np.vstack([embs_init, np.zeros((1, self.emb_size), dtype=np.float32)]))
#
# self.buckets = nn.Embedding.from_pretrained(weights_with_pad, freeze=False, padding_idx=self.n_buckets)
#
# def _get_embedding_from_node_info(self, keys, node_info):
# idxs = []
#
# for key in keys:
# if key in node_info:
# real_type, name = node_info[key]
# idxs.append(token_hasher(name, self.n_buckets))
# else:
# idxs.append(self.n_buckets)
#
# return self.buckets(torch.LongTensor(idxs))
#
# def _get_embeddings_with_type(self, node_type, ids):
# type_ids = ((node_type, id_) for id_ in ids)
# return self._get_embedding_from_node_info(type_ids, self.node_info)
#
# def _get_embeddings_global(self, ids):
# return self._get_embedding_from_node_info(ids, self.node_info_global)
#
# def get_embeddings(self, node_type=None, node_ids=None):
# assert node_ids is not None
# if node_type is None:
# return self._get_embeddings_global(node_ids)
# else:
# return self._get_embeddings_with_type(node_type, node_ids)
#
# def forward(self, node_type=None, node_ids=None, train_embeddings=True):
# if train_embeddings:
# return self.get_embeddings(node_type, node_ids.tolist())
# else:
# with torch.set_grad_enabled(False):
# return self.get_embeddings(node_type, node_ids.tolist())
#
# class NodeEmbedder(nn.Module):
# def __init__(self, dataset, emb_size, tokenizer_path, dtype=None, n_buckets=100000, pretrained=None):
# super(NodeEmbedder, self).__init__()
#
# self.emb_size = emb_size
# self.dtype = dtype
# if dtype is None:
# self.dtype = torch.float32
# self.n_buckets = n_buckets
#
# self.bpe_tokenizer = None
# self.op_tokenizer = None
# # self.graph_id_to_pretrained_name = None
# self.pretrained_name_to_ind = None
# self.pretrained_embeddings = None
# self.buckets = None
#
# from SourceCodeTools.code.data.sourcetrail.sourcetrail_ast_edges import SharedNodeDetector
#
# leaf_types = SharedNodeDetector.shared_node_types
#
# if len(dataset.nodes.query("type_backup == 'subword'")) > 0:
# # some of the types should not be embedded if subwords were generated
# leaf_types = leaf_types - {"#attr#"}
# leaf_types = leaf_types - {"#keyword#"}
#
# nodes_with_embeddings = dataset.nodes[
# dataset.nodes['type_backup'].apply(lambda type_: type_ in leaf_types)
# ][['global_graph_id', 'typed_id', 'type', 'type_backup', 'name']]
#
# type_name = list(zip(nodes_with_embeddings['type_backup'], nodes_with_embeddings['name']))
#
# self.node_info = dict(zip(
# list(zip(nodes_with_embeddings['type'], nodes_with_embeddings['typed_id'])),
# type_name
# ))
#
# assert len(nodes_with_embeddings) == len(self.node_info)
#
# self.node_info_global = dict(zip(
# nodes_with_embeddings['global_graph_id'],
# type_name
# ))
#
# # self._create_ops_tokenization(nodes_with_embeddings)
# self._create_buckets()
#
# if pretrained is not None:
# self._create_pretrained_embeddings(nodes_with_embeddings, pretrained)
#
# self._create_zero_embedding()
# self._init_tokenizer(tokenizer_path)
#
# def _create_zero_embedding(self):
# self.zero = torch.zeros((self.emb_size, ), requires_grad=False)
#
# def _create_pretrained_embeddings(self, nodes, pretrained):
# # self.graph_id_to_pretrained_name = dict(zip(nodes['global_graph_id'], nodes['name']))
# self.pretrained_name_to_ind = pretrained.ind
# embed = nn.Parameter(torch.tensor(pretrained.e, dtype=self.dtype))
# # nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain('relu'))
# nn.init.xavier_normal_(embed)
# self.pretrained_embeddings = embed
#
# def _create_ops_tokenization(self, nodes_with_embeddings):
# ops = nodes_with_embeddings.query("type_backup == 'Op'")
# from SourceCodeTools.code.python_tokens_to_bpe_subwords import op_tokenizer
#
# self.ops_tokenized = dict(zip(ops['name'], ops['name'].apply(op_tokenizer)))
#
# def _create_buckets(self):
# embed = nn.Parameter(torch.Tensor(self.n_buckets, self.emb_size))
# # nn.init.xavier_uniform_(embed, gain=nn.init.calculate_gain('relu'))
# nn.init.xavier_normal_(embed)
# self.buckets = embed
#
# def _init_tokenizer(self, tokenizer_path):
# from SourceCodeTools.nlp.embed.bpe import load_bpe_model, make_tokenizer
# self.bpe_tokenizer = make_tokenizer(load_bpe_model(tokenizer_path))
# from SourceCodeTools.code.python_tokens_to_bpe_subwords import op_tokenizer
# self.op_tokenizer = op_tokenizer
#
# def _tokenize(self, type_, name):
# tokenized = None
# if type_ == "Op":
# try_tokenized = self.op_tokenizer(name)
# if try_tokenized == name:
# tokenized = None
#
# if tokenized is None:
# tokenized = self.bpe_tokenizer(name)
# return tokenized
#
# def _get_pretrained_or_none(self, name):
# if self.pretrained_name_to_ind is not None and name in self.pretrained_name_to_ind:
# return self.pretrained_embeddings[self.pretrained_name_to_ind[name], :]
# else:
# return None
#
# def _get_from_buckets(self, name):
# return self.buckets[token_hasher(name, self.n_buckets), :]
#
# def _get_from_tokenized(self, type_, name):
# tokens = self._tokenize(type_, name)
# embedding = None
# for token in tokens:
# token_emb = self._get_pretrained_or_none(token)
# if token_emb is None:
# token_emb = self._get_from_buckets(token)
#
# if embedding is None:
# embedding = token_emb
# else:
# embedding = embedding + token_emb
# return embedding
#
# def _get_embedding(self, type_id, node_info):
# if type_id in node_info:
# real_type, name = self.node_info[type_id]
# embedding = self._get_pretrained_or_none(name)
#
# if embedding is None:
# embedding = self._get_from_tokenized(real_type, name)
# else:
# embedding = self.zero
#
# return embedding
#
# def _get_embeddings_with_type(self, node_type, ids):
# embeddings = []
# for id_ in ids:
# type_id = (node_type, id_)
# embeddings.append(self._get_embedding(type_id, self.node_info))
# embeddings = torch.stack(embeddings)
# return embeddings
#
# def _get_embeddings_global(self, ids):
# embeddings = []
# for global_id in ids:
# embeddings.append(self._get_embedding(global_id, self.node_info_global))
# embeddings = torch.stack(embeddings)
# return embeddings
#
# def get_embeddings(self, node_type=None, node_ids=None):
# assert node_ids is not None
# if node_type is None:
# return self._get_embeddings_global(node_ids)
# else:
# return self._get_embeddings_with_type(node_type, node_ids)
#
# def forward(self, node_type=None, node_ids=None, train_embeddings=True):
# if train_embeddings:
# return self.get_embeddings(node_type, node_ids.tolist())
# else:
# with torch.set_grad_enabled(False):
# return self.get_embeddings(node_type, node_ids.tolist()) | [
"mortiv16@gmail.com"
] | mortiv16@gmail.com |
63b52855ad46d1f9d67fc5e651b85dde89e2167d | 01dd174a3a7d26226564711e32711f137513663f | /pyscf/hessian/uhf.py | 296a0eaaf11d64e93b1f94a8cf1dc1ffa109f934 | [
"Apache-2.0"
] | permissive | cherishyli/pyscf | 00cb09c873edc8890be8501414678cdfa54b177e | 468a4bfc4ce067eb7dab6f9289d71122b219609e | refs/heads/master | 2020-04-18T11:40:00.398066 | 2019-01-24T23:07:36 | 2019-01-24T23:07:36 | 167,508,739 | 1 | 0 | Apache-2.0 | 2019-01-25T08:00:12 | 2019-01-25T08:00:12 | null | UTF-8 | Python | false | false | 20,315 | py | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic UHF analytical Hessian
'''
from functools import reduce
import time
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import ucphf
from pyscf.soscf.newton_ah import _gen_uhf_response
from pyscf.hessian import rhf as rhf_hess
_get_jk = rhf_hess._get_jk
def hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
mo1=None, mo_e1=None, h1ao=None,
atmlst=None, max_memory=4000, verbose=None):
log = logger.new_logger(hessobj, verbose)
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
mf = hessobj.base
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(mol.natm)
de2 = hessobj.partial_hess_elec(mo_energy, mo_coeff, mo_occ, atmlst,
max_memory, log)
if h1ao is None:
h1ao = hessobj.make_h1(mo_coeff, mo_occ, hessobj.chkfile, atmlst, log)
t1 = log.timer_debug1('making H1', *time0)
if mo1 is None or mo_e1 is None:
mo1, mo_e1 = hessobj.solve_mo1(mo_energy, mo_coeff, mo_occ, h1ao,
None, atmlst, max_memory, log)
t1 = log.timer_debug1('solving MO1', *t1)
if isinstance(h1ao, str):
h1ao = lib.chkfile.load(h1ao, 'scf_f1ao')
h1aoa = h1ao['0']
h1aob = h1ao['1']
h1aoa = dict([(int(k), h1aoa[k]) for k in h1aoa])
h1aob = dict([(int(k), h1aob[k]) for k in h1aob])
else:
h1aoa, h1aob = h1ao
if isinstance(mo1, str):
mo1 = lib.chkfile.load(mo1, 'scf_mo1')
mo1a = mo1['0']
mo1b = mo1['1']
mo1a = dict([(int(k), mo1a[k]) for k in mo1a])
mo1b = dict([(int(k), mo1b[k]) for k in mo1b])
else:
mo1a, mo1b = mo1
mo_e1a, mo_e1b = mo_e1
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
mo_ea = mo_energy[0][mo_occ[0]>0]
mo_eb = mo_energy[1][mo_occ[1]>0]
s1a = -mol.intor('int1e_ipovlp', comp=3)
aoslices = mol.aoslice_by_atom()
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1ooa = numpy.einsum('xpq,pi,qj->xij', s1ao, mocca, mocca)
s1oob = numpy.einsum('xpq,pi,qj->xij', s1ao, moccb, moccb)
for j0, ja in enumerate(atmlst[:i0+1]):
q0, q1 = aoslices[ja][2:]
dm1a = numpy.einsum('ypi,qi->ypq', mo1a[ja], mocca)
dm1b = numpy.einsum('ypi,qi->ypq', mo1b[ja], moccb)
de2[i0,j0] += numpy.einsum('xpq,ypq->xy', h1aoa[ia], dm1a) * 2
de2[i0,j0] += numpy.einsum('xpq,ypq->xy', h1aob[ia], dm1b) * 2
dm1a = numpy.einsum('ypi,qi,i->ypq', mo1a[ja], mocca, mo_ea)
dm1b = numpy.einsum('ypi,qi,i->ypq', mo1b[ja], moccb, mo_eb)
de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1ao, dm1a) * 2
de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1ao, dm1b) * 2
de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1ooa, mo_e1a[ja])
de2[i0,j0] -= numpy.einsum('xpq,ypq->xy', s1oob, mo_e1b[ja])
for j0 in range(i0):
de2[j0,i0] = de2[i0,j0].T
log.timer('UHF hessian', *time0)
return de2
def partial_hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,
atmlst=None, max_memory=4000, verbose=None):
log = logger.new_logger(hessobj, verbose)
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
mf = hessobj.base
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
if atmlst is None: atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
dm0 = dm0a + dm0b
# Energy weighted density matrix
mo_ea = mo_energy[0][mo_occ[0]>0]
mo_eb = mo_energy[1][mo_occ[1]>0]
dme0 = numpy.einsum('pi,qi,i->pq', mocca, mocca, mo_ea)
dme0+= numpy.einsum('pi,qi,i->pq', moccb, moccb, mo_eb)
hcore_deriv = hessobj.hcore_generator(mol)
s1aa, s1ab, s1a = rhf_hess.get_ovlp(mol)
vj1a, vj1b, vk1a, vk1b = \
_get_jk(mol, 'int2e_ipip1', 9, 's2kl',
['lk->s1ij', dm0a, 'lk->s1ij', dm0b,
'jk->s1il', dm0a, 'jk->s1il', dm0b])
vj1 = vj1a + vj1b
vhfa_diag = vj1 - vk1a
vhfb_diag = vj1 - vk1b
vhfa_diag = vhfa_diag.reshape(3,3,nao,nao)
vhfb_diag = vhfb_diag.reshape(3,3,nao,nao)
vj1 = vj1a = vj1b = vk1a = vk1b = None
t1 = log.timer_debug1('contracting int2e_ipip1', *t1)
aoslices = mol.aoslice_by_atom()
de2 = numpy.zeros((mol.natm,mol.natm,3,3)) # (A,B,dR_A,dR_B)
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
vj1a, vj1b, vk1a, vk1b, vk2a, vk2b = \
_get_jk(mol, 'int2e_ip1ip2', 9, 's1',
['ji->s1kl', dm0a[:,p0:p1], 'ji->s1kl', dm0b[:,p0:p1],
'li->s1kj', dm0a[:,p0:p1], 'li->s1kj', dm0b[:,p0:p1],
'lj->s1ki', dm0a , 'lj->s1ki', dm0b ],
shls_slice=shls_slice)
vj1 = vj1a + vj1b
vhfa = vj1 * 2 - vk1a
vhfb = vj1 * 2 - vk1b
vhfa[:,:,p0:p1] -= vk2a
vhfb[:,:,p0:p1] -= vk2b
t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)
vj1a, vj1b, vk1a, vk1b = \
_get_jk(mol, 'int2e_ipvip1', 9, 's2kl',
['lk->s1ij', dm0a , 'lk->s1ij', dm0b ,
'li->s1kj', dm0a[:,p0:p1], 'li->s1kj', dm0b[:,p0:p1]],
shls_slice=shls_slice)
vj1 = vj1a + vj1b
vhfa[:,:,p0:p1] += vj1.transpose(0,2,1)
vhfb[:,:,p0:p1] += vj1.transpose(0,2,1)
vhfa -= vk1a.transpose(0,2,1)
vhfb -= vk1b.transpose(0,2,1)
vj1 = vj1a = vj1b = vk1a = vk1b = vk2a = vk2b = None
t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)
vhfa = vhfa.reshape(3,3,nao,nao)
vhfb = vhfb.reshape(3,3,nao,nao)
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1ooa = numpy.einsum('xpq,pi,qj->xij', s1ao, mocca, mocca)
s1oob = numpy.einsum('xpq,pi,qj->xij', s1ao, moccb, moccb)
de2[i0,i0] += numpy.einsum('xypq,pq->xy', vhfa_diag[:,:,p0:p1], dm0a[p0:p1])*2
de2[i0,i0] += numpy.einsum('xypq,pq->xy', vhfb_diag[:,:,p0:p1], dm0b[p0:p1])*2
de2[i0,i0] -= numpy.einsum('xypq,pq->xy', s1aa[:,:,p0:p1], dme0[p0:p1])*2
for j0, ja in enumerate(atmlst[:i0+1]):
q0, q1 = aoslices[ja][2:]
de2[i0,j0] += numpy.einsum('xypq,pq->xy', vhfa[:,:,q0:q1], dm0a[q0:q1])*2
de2[i0,j0] += numpy.einsum('xypq,pq->xy', vhfb[:,:,q0:q1], dm0b[q0:q1])*2
de2[i0,j0] -= numpy.einsum('xypq,pq->xy', s1ab[:,:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2
h1ao = hcore_deriv(ia, ja)
de2[i0,j0] += numpy.einsum('xypq,pq->xy', h1ao, dm0)
for j0 in range(i0):
de2[j0,i0] = de2[i0,j0].T
log.timer('UHF partial hessian', *time0)
return de2
def make_h1(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=None):
time0 = t1 = (time.clock(), time.time())
mol = hessobj.mol
if atmlst is None:
atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(mocca, mocca.T)
dm0b = numpy.dot(moccb, moccb.T)
hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)
aoslices = mol.aoslice_by_atom()
h1aoa = [None] * mol.natm
h1aob = [None] * mol.natm
for i0, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
shls_slice = (shl0, shl1) + (0, mol.nbas)*3
vj1a, vj1b, vj2a, vj2b, vk1a, vk1b, vk2a, vk2b = \
_get_jk(mol, 'int2e_ip1', 3, 's2kl',
['ji->s2kl', -dm0a[:,p0:p1], 'ji->s2kl', -dm0b[:,p0:p1],
'lk->s1ij', -dm0a , 'lk->s1ij', -dm0b ,
'li->s1kj', -dm0a[:,p0:p1], 'li->s1kj', -dm0b[:,p0:p1],
'jk->s1il', -dm0a , 'jk->s1il', -dm0b ],
shls_slice=shls_slice)
vj1 = vj1a + vj1b
vj2 = vj2a + vj2b
vhfa = vj1 - vk1a
vhfb = vj1 - vk1b
vhfa[:,p0:p1] += vj2 - vk2a
vhfb[:,p0:p1] += vj2 - vk2b
h1 = hcore_deriv(ia)
h1a = h1 + vhfa + vhfa.transpose(0,2,1)
h1b = h1 + vhfb + vhfb.transpose(0,2,1)
if chkfile is None:
h1aoa[ia] = h1a
h1aob[ia] = h1b
else:
lib.chkfile.save(chkfile, 'scf_f1ao/0/%d' % ia, h1a)
lib.chkfile.save(chkfile, 'scf_f1ao/1/%d' % ia, h1b)
if chkfile is None:
return (h1aoa,h1aob)
else:
return chkfile
def solve_mo1(mf, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,
fx=None, atmlst=None, max_memory=4000, verbose=None):
mol = mf.mol
if atmlst is None: atmlst = range(mol.natm)
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
if fx is None:
fx = gen_vind(mf, mo_coeff, mo_occ)
s1a = -mol.intor('int1e_ipovlp', comp=3)
def _ao2mo(mat, mo_coeff, mocc):
return numpy.asarray([reduce(numpy.dot, (mo_coeff.T, x, mocc)) for x in mat])
mem_now = lib.current_memory()[0]
max_memory = max(2000, max_memory*.9-mem_now)
blksize = max(2, int(max_memory*1e6/8 / (nao*(nocca+noccb)*3*6)))
mo1sa = [None] * mol.natm
mo1sb = [None] * mol.natm
e1sa = [None] * mol.natm
e1sb = [None] * mol.natm
aoslices = mol.aoslice_by_atom()
for ia0, ia1 in lib.prange(0, len(atmlst), blksize):
s1voa = []
s1vob = []
h1voa = []
h1vob = []
for i0 in range(ia0, ia1):
ia = atmlst[i0]
shl0, shl1, p0, p1 = aoslices[ia]
s1ao = numpy.zeros((3,nao,nao))
s1ao[:,p0:p1] += s1a[:,p0:p1]
s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1voa.append(_ao2mo(s1ao, mo_coeff[0], mocca))
s1vob.append(_ao2mo(s1ao, mo_coeff[1], moccb))
if isinstance(h1ao_or_chkfile, str):
h1aoa = lib.chkfile.load(h1ao_or_chkfile, 'scf_f1ao/0/%d'%ia)
h1aob = lib.chkfile.load(h1ao_or_chkfile, 'scf_f1ao/1/%d'%ia)
else:
h1aoa = h1ao_or_chkfile[0][ia]
h1aob = h1ao_or_chkfile[1][ia]
h1voa.append(_ao2mo(h1aoa, mo_coeff[0], mocca))
h1vob.append(_ao2mo(h1aob, mo_coeff[1], moccb))
h1vo = (numpy.vstack(h1voa), numpy.vstack(h1vob))
s1vo = (numpy.vstack(s1voa), numpy.vstack(s1vob))
mo1, e1 = ucphf.solve(fx, mo_energy, mo_occ, h1vo, s1vo)
mo1a = numpy.einsum('pq,xqi->xpi', mo_coeff[0], mo1[0]).reshape(-1,3,nao,nocca)
mo1b = numpy.einsum('pq,xqi->xpi', mo_coeff[1], mo1[1]).reshape(-1,3,nao,noccb)
e1a = e1[0].reshape(-1,3,nocca,nocca)
e1b = e1[1].reshape(-1,3,noccb,noccb)
for k in range(ia1-ia0):
ia = atmlst[k+ia0]
if isinstance(h1ao_or_chkfile, str):
lib.chkfile.save(h1ao_or_chkfile, 'scf_mo1/0/%d'%ia, mo1a[k])
lib.chkfile.save(h1ao_or_chkfile, 'scf_mo1/1/%d'%ia, mo1b[k])
else:
mo1sa[ia] = mo1a[k]
mo1sb[ia] = mo1b[k]
e1sa[ia] = e1a[k].reshape(3,nocca,nocca)
e1sb[ia] = e1b[k].reshape(3,noccb,noccb)
mo1 = e1 = mo1a = mo1b = e1a = e1b = None
if isinstance(h1ao_or_chkfile, str):
return h1ao_or_chkfile, (e1sa,e1sb)
else:
return (mo1sa,mo1sb), (e1sa,e1sb)
def gen_vind(mf, mo_coeff, mo_occ):
nao, nmoa = mo_coeff[0].shape
nmob = mo_coeff[1].shape[1]
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
vresp = _gen_uhf_response(mf, mo_coeff, mo_occ, hermi=1)
def fx(mo1):
mo1 = mo1.reshape(-1,nmoa*nocca+nmob*noccb)
nset = len(mo1)
dm1 = numpy.empty((2,nset,nao,nao))
for i, x in enumerate(mo1):
xa = x[:nmoa*nocca].reshape(nmoa,nocca)
xb = x[nmoa*nocca:].reshape(nmob,noccb)
dma = reduce(numpy.dot, (mo_coeff[0], xa, mocca.T))
dmb = reduce(numpy.dot, (mo_coeff[1], xb, moccb.T))
dm1[0,i] = dma + dma.T
dm1[1,i] = dmb + dmb.T
v1 = vresp(dm1)
v1vo = numpy.empty_like(mo1)
for i in range(nset):
v1vo[i,:nmoa*nocca] = reduce(numpy.dot, (mo_coeff[0].T, v1[0,i], mocca)).ravel()
v1vo[i,nmoa*nocca:] = reduce(numpy.dot, (mo_coeff[1].T, v1[1,i], moccb)).ravel()
return v1vo
return fx
def gen_hop(hobj, mo_energy=None, mo_coeff=None, mo_occ=None, verbose=None):
log = logger.new_logger(hobj, verbose)
mol = hobj.mol
mf = hobj.base
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
natm = mol.natm
nao, nmo = mo_coeff[0].shape
mocca = mo_coeff[0][:,mo_occ[0]>0]
moccb = mo_coeff[1][:,mo_occ[1]>0]
mo_ea = mo_energy[0][mo_occ[0]>0]
mo_eb = mo_energy[1][mo_occ[1]>0]
nocca = mocca.shape[1]
noccb = moccb.shape[1]
atmlst = range(natm)
max_memory = max(2000, hobj.max_memory - lib.current_memory()[0])
de2 = hobj.partial_hess_elec(mo_energy, mo_coeff, mo_occ, atmlst,
max_memory, log)
de2 += hobj.hess_nuc()
# Compute H1 integrals and store in hobj.chkfile
hobj.make_h1(mo_coeff, mo_occ, hobj.chkfile, atmlst, log)
aoslices = mol.aoslice_by_atom()
s1a = -mol.intor('int1e_ipovlp', comp=3)
fvind = gen_vind(mf, mo_coeff, mo_occ)
def h_op(x):
x = x.reshape(natm,3)
hx = numpy.einsum('abxy,ax->by', de2, x)
h1aoa = 0
h1aob = 0
s1ao = 0
for ia in range(natm):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao_i = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/0/%d' % ia)
h1aoa += numpy.einsum('x,xij->ij', x[ia], h1ao_i)
h1ao_i = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/1/%d' % ia)
h1aob += numpy.einsum('x,xij->ij', x[ia], h1ao_i)
s1ao_i = numpy.zeros((3,nao,nao))
s1ao_i[:,p0:p1] += s1a[:,p0:p1]
s1ao_i[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)
s1ao += numpy.einsum('x,xij->ij', x[ia], s1ao_i)
s1voa = reduce(numpy.dot, (mo_coeff[0].T, s1ao, mocca))
s1vob = reduce(numpy.dot, (mo_coeff[1].T, s1ao, moccb))
h1voa = reduce(numpy.dot, (mo_coeff[0].T, h1aoa, mocca))
h1vob = reduce(numpy.dot, (mo_coeff[1].T, h1aob, moccb))
mo1, mo_e1 = ucphf.solve(fvind, mo_energy, mo_occ,
(h1voa,h1vob), (s1voa,s1vob))
mo1a = numpy.dot(mo_coeff[0], mo1[0])
mo1b = numpy.dot(mo_coeff[1], mo1[1])
mo_e1a = mo_e1[0].reshape(nocca,nocca)
mo_e1b = mo_e1[1].reshape(noccb,noccb)
dm1a = numpy.einsum('pi,qi->pq', mo1a, mocca)
dm1b = numpy.einsum('pi,qi->pq', mo1b, moccb)
dme1a = numpy.einsum('pi,qi,i->pq', mo1a, mocca, mo_ea)
dme1a = dme1a + dme1a.T + reduce(numpy.dot, (mocca, mo_e1a, mocca.T))
dme1b = numpy.einsum('pi,qi,i->pq', mo1b, moccb, mo_eb)
dme1b = dme1b + dme1b.T + reduce(numpy.dot, (moccb, mo_e1b, moccb.T))
dme1 = dme1a + dme1b
for ja in range(natm):
q0, q1 = aoslices[ja][2:]
h1aoa = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/0/%d' % ja)
h1aob = lib.chkfile.load(hobj.chkfile, 'scf_f1ao/1/%d' % ja)
hx[ja] += numpy.einsum('xpq,pq->x', h1aoa, dm1a) * 2
hx[ja] += numpy.einsum('xpq,pq->x', h1aob, dm1b) * 2
hx[ja] -= numpy.einsum('xpq,pq->x', s1a[:,q0:q1], dme1[q0:q1])
hx[ja] -= numpy.einsum('xpq,qp->x', s1a[:,q0:q1], dme1[:,q0:q1])
return hx.ravel()
hdiag = numpy.einsum('aaxx->ax', de2).ravel()
return h_op, hdiag
class Hessian(rhf_hess.Hessian):
'''Non-relativistic UHF hessian'''
partial_hess_elec = partial_hess_elec
hess_elec = hess_elec
make_h1 = make_h1
gen_hop = gen_hop
def solve_mo1(self, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,
fx=None, atmlst=None, max_memory=4000, verbose=None):
return solve_mo1(self.base, mo_energy, mo_coeff, mo_occ, h1ao_or_chkfile,
fx, atmlst, max_memory, verbose)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
[1 , (1. , 0. , 0.000)],
[1 , (0. , 1. , 0.000)],
[1 , (0. , -1.517 , 1.177)],
[1 , (0. , 1.517 , 1.177)] ]
mol.basis = '631g'
mol.unit = 'B'
mol.build()
mf = scf.UHF(mol)
mf.conv_tol = 1e-14
mf.scf()
n3 = mol.natm * 3
hobj = Hessian(mf)
e2 = hobj.kernel().transpose(0,2,1,3).reshape(n3,n3)
print(lib.finger(e2) - -0.50693144355876429)
mol.spin = 2
mf = scf.UHF(mol)
mf.conv_tol = 1e-14
mf.scf()
n3 = mol.natm * 3
hobj = Hessian(mf)
e2 = hobj.kernel().transpose(0,2,1,3).reshape(n3,n3)
def grad_full(ia, inc):
coord = mol.atom_coord(ia).copy()
ptr = mol._atm[ia,gto.PTR_COORD]
de = []
for i in range(3):
mol._env[ptr+i] = coord[i] + inc
mf = scf.UHF(mol).run(conv_tol=1e-14)
e1a = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i] - inc
mf = scf.UHF(mol).run(conv_tol=1e-14)
e1b = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i]
de.append((e1a-e1b)/(2*inc))
return de
e2ref = [grad_full(ia, .5e-3) for ia in range(mol.natm)]
e2ref = numpy.asarray(e2ref).reshape(n3,n3)
print(numpy.linalg.norm(e2-e2ref))
print(abs(e2-e2ref).max())
print(numpy.allclose(e2,e2ref,atol=1e-4))
# \partial^2 E / \partial R \partial R'
e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
e2 += hobj.hess_nuc(mol)
e2 = e2.transpose(0,2,1,3).reshape(n3,n3)
def grad_partial_R(ia, inc):
coord = mol.atom_coord(ia).copy()
ptr = mol._atm[ia,gto.PTR_COORD]
de = []
for i in range(3):
mol._env[ptr+i] = coord[i] + inc
e1a = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i] - inc
e1b = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i]
de.append((e1a-e1b)/(2*inc))
return de
e2ref = [grad_partial_R(ia, .5e-4) for ia in range(mol.natm)]
e2ref = numpy.asarray(e2ref).reshape(n3,n3)
print(numpy.linalg.norm(e2-e2ref))
print(abs(e2-e2ref).max())
print(numpy.allclose(e2,e2ref,atol=1e-8))
| [
"osirpt.sun@gmail.com"
] | osirpt.sun@gmail.com |
f106117df3ad5eb8f234be7b240431f878d123cf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/75/usersdata/247/39815/submittedfiles/maiormenor.py | 214f114523f4a56308de11ca5ddbcce30d0dc097 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
import math
a = int(input('Digite o número 1: '))
b = int(input('Digite o número 2: '))
c = int(input('Digite o número 3: '))
d = int(input('Digite o número 4: '))
e = int(input('Digite o número 5: '))
if a>b>c>d>e:
print('%d'%a)
print('%d'%e)
if d>b>c>d>e:
print('%d'%a)
print('%d'%e)
if a>b>c>d>e:
print('%d'%a)
print('%d'%e)
if b>c>e:
print('%d'%b)
print('%d'%e)
if e>b>a>c>d:
print('%d'%e)
print('%d'%d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b8ec637cf8622bb6e117563c1cb3f60b0272b2f9 | 1b36425f798f484eda964b10a5ad72b37b4da916 | /posthog/migrations/0163_insights_favorited_updatedat_tags.py | c5a40dcc94e66a2437a01cb833cff674ba5315e1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dorucioclea/posthog | 0408baa2a7ae98e5bea352c516f741ddc17c0a3e | 8848981baf237117fb22d28af0770a0165881423 | refs/heads/master | 2023-01-23T11:01:57.942146 | 2023-01-13T09:03:00 | 2023-01-13T09:03:00 | 241,222,000 | 0 | 0 | MIT | 2020-02-17T22:34:37 | 2020-02-17T22:34:36 | null | UTF-8 | Python | false | false | 902 | py | # Generated by Django 3.1.12 on 2021-08-05 12:24
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0162_organization_is_member_join_email_enabled"),
]
operations = [
migrations.AddField(
model_name="dashboarditem",
name="favorited",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="dashboarditem",
name="tags",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=32), blank=True, default=list, size=None
),
),
migrations.AddField(
model_name="dashboarditem",
name="updated_at",
field=models.DateTimeField(auto_now=True),
),
]
| [
"noreply@github.com"
] | dorucioclea.noreply@github.com |
e148e7a991356e2098a861f3df4ded833d05c410 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/codec/models/cisco-ios-xr/Cisco-IOS-XR-ipv4-bgp-cfg/cd-encode-xr-ipv4-bgp-cfg-40-ydk.py | deae332d3af6766cbb63dddd700f23a1d621ed68 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 4,034 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Encode configuration for model Cisco-IOS-XR-ipv4-bgp-cfg.
usage: cd-encode-xr-ipv4-bgp-cfg-40-ydk.py [-h] [-v]
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CodecService
from ydk.providers import CodecServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ipv4_bgp_cfg \
as xr_ipv4_bgp_cfg
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ipv4_bgp_datatypes \
as xr_ipv4_bgp_datatypes
from ydk.types import Empty
import logging
def config_bgp(bgp):
"""Add config data to bgp object."""
# global configuration
instance = bgp.Instance()
instance.instance_name = "default"
instance_as = instance.InstanceAs()
instance_as.as_ = 0
four_byte_as = instance_as.FourByteAs()
four_byte_as.as_ = 65001
four_byte_as.bgp_running = Empty()
# global address family
global_af = four_byte_as.default_vrf.global_.global_afs.GlobalAf()
global_af.af_name = xr_ipv4_bgp_datatypes.BgpAddressFamily.ipv4_unicast
global_af.enable = Empty()
four_byte_as.default_vrf.global_.global_afs.global_af.append(global_af)
instance_as.four_byte_as.append(four_byte_as)
instance.instance_as.append(instance_as)
bgp.instance.append(instance)
# configure IBGP neighbor group
neighbor_groups = four_byte_as.default_vrf.bgp_entity.neighbor_groups
neighbor_group = neighbor_groups.NeighborGroup()
neighbor_group.neighbor_group_name = "IBGP"
neighbor_group.create = Empty()
# remote AS
neighbor_group.remote_as.as_xx = 0
neighbor_group.remote_as.as_yy = 65001
neighbor_group.update_source_interface = "Loopback0"
neighbor_groups.neighbor_group.append(neighbor_group)
# ipv4 unicast
neighbor_group_af = neighbor_group.neighbor_group_afs.NeighborGroupAf()
neighbor_group_af.af_name = xr_ipv4_bgp_datatypes.BgpAddressFamily.ipv4_unicast
neighbor_group_af.activate = Empty()
neighbor_group_afs = neighbor_group.neighbor_group_afs
neighbor_group_afs.neighbor_group_af.append(neighbor_group_af)
# configure IBGP neighbor
neighbor = four_byte_as.default_vrf.bgp_entity.neighbors.Neighbor()
neighbor.neighbor_address = "172.16.255.2"
neighbor.neighbor_group_add_member = "IBGP"
four_byte_as.default_vrf.bgp_entity.neighbors.neighbor.append(neighbor)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create codec provider
provider = CodecServiceProvider(type="xml")
# create codec service
codec = CodecService()
bgp = xr_ipv4_bgp_cfg.Bgp() # create object
config_bgp(bgp) # add object configuration
# encode and print object
print(codec.encode(provider, bgp))
exit()
# End of script
| [
"saalvare@cisco.com"
] | saalvare@cisco.com |
2b1f0d7ff4f43be0da442ede0ce52cd16efbfa97 | 0ec8af8988245d864c63d923e5524403090cd7e0 | /policy_gov_mianyang/policy_gov_mianyang/mongo_work.py | fe86d89c1e9ee93962704279f654c52647018315 | [] | no_license | radtek/Spider | d26b685cb5e41c67c6a7ce0d632072f3cac5f061 | 5a419e8ec77915804d3e659631f09b19aa90a088 | refs/heads/master | 2022-10-30T13:13:29.736256 | 2020-06-11T03:34:58 | 2020-06-11T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # coding: utf-8
# Author:houszhou
# Date :2020/6/2 14:03
# Tool :PyCharm
import pymongo
import re
def obj_first(obj, error=''):
return obj[0] if obj else error
def format_file_type(doc_no: str):
file_first = obj_first(re.findall(r'^(.*?)[〔\[【]', doc_no))
if file_first:
file_type = file_first
elif obj_first(re.findall(r'^(.*?)\d{4}', doc_no)):
file_type = obj_first(re.findall(r'^(.*?)\d{4}', doc_no))
elif '第' in doc_no:
file_type = obj_first(re.findall('^(.*?)第', doc_no))
elif obj_first(re.findall(r'^(.*?)\d', doc_no)):
file_type = obj_first(re.findall(r'^(.*?)\d', doc_no))
else:
file_type = ''
return '' if re.findall(r'^\d', file_type) else file_type
def change():
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.pdsp_beta_db
collection = db.gov_info_data
result = collection.find({'website': '北大法宝'})
for i, data in enumerate(result):
if i % 1000 == 0:
print(i)
id_ = data.get('_id')
extension = data.get('extension')
doc_no = extension.get('doc_no', '')
file_type = format_file_type(doc_no) if doc_no else ''
print('id: {}, doc_no: {}, new_file_type: {}'.format(id_, doc_no, file_type))
collection.find_one_and_update({'_id': id_}, {'$set': {'file_type': file_type}})
if __name__ == '__main__':
change()
| [
"1733776802@qq.com"
] | 1733776802@qq.com |
4cec18da9c0917f7fa06dacea09aba3fbf63a37f | 0225e57cc4c579bb0e1ccff01e69823b6bfebf4e | /wxmplot/stackedplotframe.py | 5894edf1f56ddfbcbbf27b02bee4bed6c94e1190 | [
"MIT"
] | permissive | tula32/wxmplot | c40e0ee2b0c111f43a7dc601e189a8143f0642a0 | 0c770e980e2b314ae0d1f2c4be2c001be588c3f6 | refs/heads/master | 2020-05-01T13:30:57.505719 | 2019-01-25T03:30:20 | 2019-01-25T03:30:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,431 | py | #!/usr/bin/python
##
## StackedPlotFrame: a wx.Frame for 2 PlotPanels, top and bottom
## with the top panel being the main panel and the lower panel
## being 1/4 the height (configurable) and the dependent panel
import wx
import numpy as np
import matplotlib
from matplotlib.ticker import NullFormatter, NullLocator
from functools import partial
from .utils import pack, MenuItem
from .plotpanel import PlotPanel
from .baseframe import BaseFrame
class StackedPlotFrame(BaseFrame):
"""
Top/Bottom MatPlotlib panels in a single frame
"""
def __init__(self, parent=None, title ='Stacked Plot Frame',
framesize=(850,450), panelsize=(550,450),
ratio=3.0, **kws):
BaseFrame.__init__(self, parent=parent, title=title,
size=framesize, **kws)
self.ratio = ratio
self.panelsize = panelsize
self.panel = None
self.panel_bot = None
self.xlabel = None
self.BuildFrame()
def get_panel(self, panelname):
if panelname.lower().startswith('bot'):
return self.panel_bot
return self.panel
def plot(self, x, y, panel='top', xlabel=None, **kws):
"""plot after clearing current plot """
panel = self.get_panel(panel)
panel.plot(x, y, **kws)
if xlabel is not None:
self.xlabel = xlabel
if self.xlabel is not None:
self.panel_bot.set_xlabel(self.xlabel)
def oplot(self, x, y, panel='top', xlabel=None, **kws):
"""plot method, overplotting any existing plot """
panel = self.get_panel(panel)
panel.oplot(x, y, **kws)
if xlabel is not None:
self.xlabel = xlabel
if self.xlabel is not None:
self.panel_bot.set_xlabel(self.xlabel)
def unzoom_all(self, event=None):
""" zoom out full data range """
for p in (self.panel, self.panel_bot):
p.conf.zoom_lims = []
p.conf.unzoom(full=True)
# self.panel.set_viewlimits()
def unzoom(self, event=None, panel='top'):
"""zoom out 1 level, or to full data range """
panel = self.get_panel(panel)
panel.conf.unzoom(event=event)
self.panel.set_viewlimits()
def update_line(self, t, x, y, panel='top', **kws):
"""overwrite data for trace t """
panel = self.get_panel(panel)
panel.update_line(t, x, y, **kws)
def set_xylims(self, lims, axes=None, panel='top', **kws):
"""set xy limits"""
panel = self.get_panel(panel)
# print("Stacked set_xylims ", panel, self.panel)
panel.set_xylims(lims, axes=axes, **kws)
def clear(self, panel='top'):
"""clear plot """
panel = self.get_panel(panel)
panel.clear()
def set_title(self,s, panel='top'):
"set plot title"
panel = self.get_panel(panel)
panel.set_title(s)
def set_xlabel(self,s, panel='top'):
"set plot xlabel"
self.panel_bot.set_xlabel(s)
def set_ylabel(self,s, panel='top'):
"set plot xlabel"
panel = self.get_panel(panel)
panel.set_ylabel(s)
def save_figure(self, event=None, panel='top'):
""" save figure image to file"""
panel = self.get_panel(panel)
panel.save_figure(event=event)
def configure(self, event=None, panel='top'):
panel = self.get_panel(panel)
panel.configure(event=event)
####
## create GUI
####
def BuildFrame(self):
sbar = self.CreateStatusBar(2, wx.CAPTION)
sfont = sbar.GetFont()
sfont.SetWeight(wx.BOLD)
sfont.SetPointSize(10)
sbar.SetFont(sfont)
self.SetStatusWidths([-3,-1])
self.SetStatusText('',0)
sizer = wx.BoxSizer(wx.VERTICAL)
botsize = self.panelsize[0], self.panelsize[1]/self.ratio
margins = {'top': dict(left=0.15, bottom=0.005, top=0.10, right=0.05),
'bot': dict(left=0.15, bottom=0.300, top=0.01, right=0.05)}
self.panel = PlotPanel(self, size=self.panelsize)
self.panel_bot = PlotPanel(self, size=botsize)
self.panel.xformatter = self.null_formatter
lsize = self.panel.conf.labelfont.get_size()
self.panel_bot.conf.labelfont.set_size(lsize-2)
self.panel_bot.yformatter = self.bot_yformatter
# self.panel_bot.axes.tick_params(axis='y', labelsize=8)
self.panel.conf.theme_color_callback = self.onThemeColor
self.panel.conf.margin_callback = self.onMargins
for pan, pname in ((self.panel, 'top'), (self.panel_bot, 'bot')):
pan.messenger = self.write_message
pan.conf.auto_margins = False
pan.conf.set_margins(**margins[pname])
pan.axes.update_params()
pan.axes.set_position(pan.axes.figbox)
pan.set_viewlimits = partial(self.set_viewlimits, panel=pname)
pan.unzoom_all = self.unzoom_all
pan.unzoom = self.unzoom
pan.canvas.figure.set_facecolor('#F4F4EC')
# suppress mouse events on the bottom panel
null_events = {'leftdown': None, 'leftup': None, 'rightdown': None,
'rightup': None, 'motion': None, 'keyevent': None}
self.panel_bot.cursor_modes = {'zoom': null_events}
sizer.Add(self.panel,self.ratio, wx.GROW|wx.EXPAND|wx.ALIGN_CENTER, 2)
sizer.Add(self.panel_bot, 1, wx.GROW|wx.EXPAND|wx.ALIGN_CENTER, 2)
pack(self, sizer)
self.SetAutoLayout(True)
self.SetSizerAndFit(sizer)
self.BuildMenu()
def BuildMenu(self):
mfile = self.Build_FileMenu()
mopts = wx.Menu()
MenuItem(self, mopts, "Configure Plot\tCtrl+K",
"Configure Plot styles, colors, labels, etc",
self.panel.configure)
MenuItem(self, mopts, "Configure Lower Plot",
"Configure Plot styles, colors, labels, etc",
self.panel_bot.configure)
MenuItem(self, mopts, "Toggle Legend\tCtrl+L",
"Toggle Legend Display",
self.panel.toggle_legend)
MenuItem(self, mopts, "Toggle Grid\tCtrl+G",
"Toggle Grid Display",
self.toggle_grid)
mopts.AppendSeparator()
MenuItem(self, mopts, "Zoom Out\tCtrl+Z",
"Zoom out to full data range",
self.unzoom_all)
mhelp = wx.Menu()
MenuItem(self, mhelp, "Quick Reference", "Quick Reference for WXMPlot", self.onHelp)
MenuItem(self, mhelp, "About", "About WXMPlot", self.onAbout)
mbar = wx.MenuBar()
mbar.Append(mfile, 'File')
mbar.Append(mopts, '&Options')
if self.user_menus is not None:
for title, menu in self.user_menus:
mbar.Append(menu, title)
mbar.Append(mhelp, '&Help')
self.SetMenuBar(mbar)
self.Bind(wx.EVT_CLOSE,self.onExit)
def toggle_grid(self, event=None, show=None):
"toggle grid on top/bottom panels"
if show is None:
show = not self.panel.conf.show_grid
for p in (self.panel, self.panel_bot):
p.conf.enable_grid(show)
def onThemeColor(self, color, item):
"""pass theme colors to bottom panel"""
bconf = self.panel_bot.conf
if item == 'grid':
bconf.set_gridcolor(color)
elif item == 'bg':
bconf.set_bgcolor(color)
elif item == 'frame':
bconf.set_framecolor(color)
elif item == 'text':
bconf.set_textcolor(color)
bconf.canvas.draw()
def onMargins(self, left=0.1, top=0.1, right=0.1, bottom=0.1):
""" pass left/right margins on to bottom panel"""
bconf = self.panel_bot.conf
l, t, r, b = bconf.margins
bconf.set_margins(left=left, top=t, right=right, bottom=b)
bconf.canvas.draw()
def set_viewlimits(self, panel='top'):
"""update xy limits of a plot, as used with .update_line() """
this_panel = self.get_panel(panel)
xmin, xmax, ymin, ymax = this_panel.conf.set_viewlimits()[0]
# print("Set ViewLimits ", xmin, xmax, ymin, ymax)
# make top/bottom panel follow xlimits
if this_panel == self.panel:
other = self.panel_bot
for _ax in other.fig.get_axes():
_ax.set_xlim((xmin, xmax), emit=True)
other.draw()
def null_formatter(self, x, pos, type='x'):
return ''
def bot_yformatter(self, val, type=''):
"""custom formatter for FuncFormatter() and bottom panel"""
fmt = '%1.5g'
ax = self.panel_bot.axes.yaxis
ticks = ax.get_major_locator()()
dtick = ticks[1] - ticks[0]
if dtick > 29999:
fmt = '%1.5g'
elif dtick > 1.99:
fmt = '%1.0f'
elif dtick > 0.099:
fmt = '%1.1f'
elif dtick > 0.0099:
fmt = '%1.2f'
elif dtick > 0.00099:
fmt = '%1.3f'
elif dtick > 0.000099:
fmt = '%1.4f'
elif dtick > 0.0000099:
fmt = '%1.5f'
s = fmt % val
s.strip()
s = s.replace('+', '')
while s.find('e0')>0:
s = s.replace('e0','e')
while s.find('-0')>0:
s = s.replace('-0','-')
return s
| [
"newville@cars.uchicago.edu"
] | newville@cars.uchicago.edu |
e7d1c7da90d8fe9d28b8cfeebb52df3c85988b16 | d1c605e89fe3f33ba34bc3d29c4f34946cf4835c | /src/openfermion/utils/_trotter_error.py | 02b6d27bfa4935fdcc5921b98841bf874d436e1f | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | nkkchem/OpenFermion | 0bdf776fd2efb3ed19172ddccbc41ecfa15060e0 | 35ca4b389438a63eebf97cf492c135811c1923a6 | refs/heads/master | 2020-03-26T07:07:49.276600 | 2018-08-11T06:34:01 | 2018-08-11T06:34:01 | 144,637,497 | 1 | 0 | Apache-2.0 | 2018-08-13T21:51:34 | 2018-08-13T21:51:34 | null | UTF-8 | Python | false | false | 6,763 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to compute the second order Trotter error."""
from future.utils import iteritems
from math import sqrt, ceil
from scipy.linalg import expm
from openfermion.config import *
from openfermion.ops import QubitOperator
from openfermion.utils._operator_utils import normal_ordered
def commutator(op1, op2):
return op1 * op2 - op2 * op1
def trivially_commutes(term_a, term_b):
position_a = 0
position_b = 0
commutes = True
term_op_a, = term_a.terms.keys()
term_op_b, = term_b.terms.keys()
while position_a < len(term_op_a) and position_b < len(term_op_b):
qubit_a, action_a = term_op_a[position_a]
qubit_b, action_b = term_op_b[position_b]
if qubit_a > qubit_b:
position_b += 1
elif qubit_a < qubit_b:
position_a += 1
else:
if action_a != action_b:
commutes = not commutes
position_a += 1
position_b += 1
return commutes
def trivially_double_commutes(term_a, term_b, term_c):
"""Check if the double commutator [term_a, [term_b, term_c]] is zero.
Args:
term_a, term_b, term_c: Single-term QubitOperators.
Notes:
If the sets of qubits which term_b and term_c act on, or if the
intersection of term_a's qubits with (term_b's qubits U term_c's
qubits) is empty, then the double commutator is trivially zero.
"""
# determine the set of qubits each term acts on
term_op_a, = term_a.terms.keys()
term_op_b, = term_b.terms.keys()
term_op_c, = term_c.terms.keys()
qubits_a = set([index for index, _ in term_op_a])
qubits_b = set([index for index, _ in term_op_b])
qubits_c = set([index for index, _ in term_op_c])
return (trivially_commutes(term_b, term_c) or
not qubits_a.intersection(set(qubits_b.union(qubits_c))))
def error_operator(terms, series_order=2):
"""Determine the difference between the exact generator of unitary
evolution and the approximate generator given by Trotter-Suzuki
to the given order.
Args:
terms: a list of QubitTerms in the Hamiltonian to be simulated.
series_order: the order at which to compute the BCH expansion.
Only the second order formula is currently implemented
(corresponding to Equation 9 of the paper).
Returns:
The difference between the true and effective generators of time
evolution for a single Trotter step.
Notes: follows Equation 9 of Poulin et al.'s work in "The Trotter Step
Size Required for Accurate Quantum Simulation of Quantum Chemistry".
"""
if series_order != 2:
raise NotImplementedError
error_operator = QubitOperator()
for beta in range(len(terms)):
for alpha in range(beta + 1):
for alpha_prime in range(beta):
if not trivially_double_commutes(terms[alpha], terms[beta],
terms[alpha_prime]):
double_com = commutator(terms[alpha],
commutator(terms[beta],
terms[alpha_prime]))
error_operator += double_com
if alpha == beta:
error_operator -= double_com / 2.0
return error_operator / 12.0
def error_bound(terms, tight=False):
"""
Numerically upper bound the error in the ground state energy
for the second order Trotter-Suzuki expansion.
Args:
terms: a list of single-term QubitOperators in the Hamiltonian
to be simulated.
tight: whether to use the triangle inequality to give a loose
upper bound on the error (default) or to calculate the
norm of the error operator.
Returns:
A float upper bound on norm of error in the ground state energy.
Notes: follows Poulin et al.'s work in "The Trotter Step Size
Required for Accurate Quantum Simulation of Quantum
Chemistry". In particular, Equation 16 is used for a loose
upper bound, and the norm of Equation 9 is calculated for
a tighter bound using the error operator from error_operator.
Possible extensions of this function would be to get the
expectation value of the error operator with the Hartree-Fock
state or CISD state, which can scalably bound the error in
the ground state but much more accurately than the triangle
inequality.
"""
zero = QubitOperator()
error = 0.0
if tight:
# return the 1-norm of the error operator (upper bound on error)
error = sum(abs(coefficient)
for coefficient in error_operator(terms).terms.values())
elif not tight:
for alpha in range(len(terms)):
term_a = terms[alpha]
coefficient_a, = term_a.terms.values()
if coefficient_a:
error_a = 0.
for beta in range(alpha + 1, len(terms)):
term_b = terms[beta]
coefficient_b, = term_b.terms.values()
if not (trivially_commutes(term_a, term_b) or
commutator(term_a, term_b) == zero):
error_a += abs(coefficient_b)
error += 4.0 * abs(coefficient_a) * error_a ** 2
return error
def trotter_steps_required(trotter_error_bound, time, energy_precision):
"""Determine the number of Trotter steps for accurate simulation.
Args:
trotter_error_bound (float): Upper bound on Trotter error in the
state of interest.
time (float): The total simulation time.
energy_precision (float): Acceptable shift in state energy.
Returns:
The integer minimum number of Trotter steps required for
simulation to the desired precision.
Notes:
The number of Trotter steps required is an upper bound on the
true requirement, which may be lower.
"""
return int(ceil(time * sqrt(trotter_error_bound / energy_precision)))
| [
"ryanbabbush@gmail.com"
] | ryanbabbush@gmail.com |
c869ed5f559a50650b263874da13d49651ae4d91 | e7e34e2726790686a1f239e22487fe7c957e179f | /homeassistant/components/tuya/const.py | 44b66b576e34756b7d4357dfe4e22767bdd5d40b | [
"Apache-2.0"
] | permissive | AlexxIT/home-assistant | 68a17b49644c5d943b204dc75e1f11fe3b701161 | 8de7966104911bca6f855a1755a6d71a07afb9de | refs/heads/dev | 2022-03-22T14:37:18.774214 | 2021-10-09T16:10:43 | 2021-10-09T16:10:43 | 100,278,871 | 9 | 0 | Apache-2.0 | 2022-01-31T06:18:02 | 2017-08-14T14:50:46 | Python | UTF-8 | Python | false | false | 10,193 | py | """Constants for the Tuya integration."""
from dataclasses import dataclass
DOMAIN = "tuya"
CONF_AUTH_TYPE = "auth_type"
CONF_PROJECT_TYPE = "tuya_project_type"
CONF_ENDPOINT = "endpoint"
CONF_ACCESS_ID = "access_id"
CONF_ACCESS_SECRET = "access_secret"
CONF_USERNAME = "username"
CONF_PASSWORD = "password"
CONF_COUNTRY_CODE = "country_code"
CONF_APP_TYPE = "tuya_app_type"
TUYA_DISCOVERY_NEW = "tuya_discovery_new_{}"
TUYA_DEVICE_MANAGER = "tuya_device_manager"
TUYA_HOME_MANAGER = "tuya_home_manager"
TUYA_MQTT_LISTENER = "tuya_mqtt_listener"
TUYA_HA_TUYA_MAP = "tuya_ha_tuya_map"
TUYA_HA_DEVICES = "tuya_ha_devices"
TUYA_RESPONSE_CODE = "code"
TUYA_RESPONSE_RESULT = "result"
TUYA_RESPONSE_MSG = "msg"
TUYA_RESPONSE_SUCCESS = "success"
TUYA_RESPONSE_PLATFROM_URL = "platform_url"
TUYA_HA_SIGNAL_UPDATE_ENTITY = "tuya_entry_update"
TUYA_SMART_APP = "tuyaSmart"
SMARTLIFE_APP = "smartlife"
ENDPOINT_AMERICA = "https://openapi.tuyaus.com"
ENDPOINT_CHINA = "https://openapi.tuyacn.com"
ENDPOINT_EASTERN_AMERICA = "https://openapi-ueaz.tuyaus.com"
ENDPOINT_EUROPE = "https://openapi.tuyaeu.com"
ENDPOINT_INDIA = "https://openapi.tuyain.com"
ENDPOINT_WESTERN_EUROPE = "https://openapi-weaz.tuyaeu.com"
PLATFORMS = ["climate", "fan", "light", "scene", "switch"]
@dataclass
class Country:
"""Describe a supported country."""
name: str
country_code: str
endpoint: str = ENDPOINT_AMERICA
# https://developer.tuya.com/en/docs/iot/oem-app-data-center-distributed?id=Kafi0ku9l07qb#title-4-China%20Data%20Center
TUYA_COUNTRIES = [
Country("Afghanistan", "93"),
Country("Albania", "355"),
Country("Algeria", "213"),
Country("American Samoa", "1-684"),
Country("Andorra", "376"),
Country("Angola", "244"),
Country("Anguilla", "1-264"),
Country("Antarctica", "672"),
Country("Antigua and Barbuda", "1-268"),
Country("Argentina", "54", ENDPOINT_EUROPE),
Country("Armenia", "374"),
Country("Aruba", "297"),
Country("Australia", "61"),
Country("Austria", "43", ENDPOINT_EUROPE),
Country("Azerbaijan", "994"),
Country("Bahamas", "1-242"),
Country("Bahrain", "973"),
Country("Bangladesh", "880"),
Country("Barbados", "1-246"),
Country("Belarus", "375"),
Country("Belgium", "32", ENDPOINT_EUROPE),
Country("Belize", "501"),
Country("Benin", "229"),
Country("Bermuda", "1-441"),
Country("Bhutan", "975"),
Country("Bolivia", "591"),
Country("Bosnia and Herzegovina", "387"),
Country("Botswana", "267"),
Country("Brazil", "55", ENDPOINT_EUROPE),
Country("British Indian Ocean Territory", "246"),
Country("British Virgin Islands", "1-284"),
Country("Brunei", "673"),
Country("Bulgaria", "359"),
Country("Burkina Faso", "226"),
Country("Burundi", "257"),
Country("Cambodia", "855"),
Country("Cameroon", "237"),
Country("Canada", "1", ENDPOINT_AMERICA),
Country("Cape Verde", "238"),
Country("Cayman Islands", "1-345"),
Country("Central African Republic", "236"),
Country("Chad", "235"),
Country("Chile", "56"),
Country("China", "86", ENDPOINT_CHINA),
Country("Christmas Island", "61"),
Country("Cocos Islands", "61"),
Country("Colombia", "57"),
Country("Comoros", "269"),
Country("Cook Islands", "682"),
Country("Costa Rica", "506"),
Country("Croatia", "385", ENDPOINT_EUROPE),
Country("Cuba", "53"),
Country("Curacao", "599"),
Country("Cyprus", "357", ENDPOINT_EUROPE),
Country("Czech Republic", "420", ENDPOINT_EUROPE),
Country("Democratic Republic of the Congo", "243"),
Country("Denmark", "45", ENDPOINT_EUROPE),
Country("Djibouti", "253"),
Country("Dominica", "1-767"),
Country("Dominican Republic", "1-809"),
Country("East Timor", "670"),
Country("Ecuador", "593"),
Country("Egypt", "20"),
Country("El Salvador", "503"),
Country("Equatorial Guinea", "240"),
Country("Eritrea", "291"),
Country("Estonia", "372", ENDPOINT_EUROPE),
Country("Ethiopia", "251"),
Country("Falkland Islands", "500"),
Country("Faroe Islands", "298"),
Country("Fiji", "679"),
Country("Finland", "358", ENDPOINT_EUROPE),
Country("France", "33", ENDPOINT_EUROPE),
Country("French Polynesia", "689"),
Country("Gabon", "241"),
Country("Gambia", "220"),
Country("Georgia", "995"),
Country("Germany", "49", ENDPOINT_EUROPE),
Country("Ghana", "233"),
Country("Gibraltar", "350"),
Country("Greece", "30", ENDPOINT_EUROPE),
Country("Greenland", "299"),
Country("Grenada", "1-473"),
Country("Guam", "1-671"),
Country("Guatemala", "502"),
Country("Guernsey", "44-1481"),
Country("Guinea", "224"),
Country("Guinea-Bissau", "245"),
Country("Guyana", "592"),
Country("Haiti", "509"),
Country("Honduras", "504"),
Country("Hong Kong", "852"),
Country("Hungary", "36", ENDPOINT_EUROPE),
Country("Iceland", "354", ENDPOINT_EUROPE),
Country("India", "91", ENDPOINT_INDIA),
Country("Indonesia", "62"),
Country("Iran", "98"),
Country("Iraq", "964"),
Country("Ireland", "353", ENDPOINT_EUROPE),
Country("Isle of Man", "44-1624"),
Country("Israel", "972"),
Country("Italy", "39", ENDPOINT_EUROPE),
Country("Ivory Coast", "225"),
Country("Jamaica", "1-876"),
Country("Japan", "81", ENDPOINT_EUROPE),
Country("Jersey", "44-1534"),
Country("Jordan", "962"),
Country("Kazakhstan", "7"),
Country("Kenya", "254"),
Country("Kiribati", "686"),
Country("Kosovo", "383"),
Country("Kuwait", "965"),
Country("Kyrgyzstan", "996"),
Country("Laos", "856"),
Country("Latvia", "371", ENDPOINT_EUROPE),
Country("Lebanon", "961"),
Country("Lesotho", "266"),
Country("Liberia", "231"),
Country("Libya", "218"),
Country("Liechtenstein", "423", ENDPOINT_EUROPE),
Country("Lithuania", "370", ENDPOINT_EUROPE),
Country("Luxembourg", "352", ENDPOINT_EUROPE),
Country("Macau", "853"),
Country("Macedonia", "389"),
Country("Madagascar", "261"),
Country("Malawi", "265"),
Country("Malaysia", "60"),
Country("Maldives", "960"),
Country("Mali", "223"),
Country("Malta", "356", ENDPOINT_EUROPE),
Country("Marshall Islands", "692"),
Country("Mauritania", "222"),
Country("Mauritius", "230"),
Country("Mayotte", "262"),
Country("Mexico", "52"),
Country("Micronesia", "691"),
Country("Moldova", "373"),
Country("Monaco", "377"),
Country("Mongolia", "976"),
Country("Montenegro", "382"),
Country("Montserrat", "1-664"),
Country("Morocco", "212"),
Country("Mozambique", "258"),
Country("Myanmar", "95"),
Country("Namibia", "264"),
Country("Nauru", "674"),
Country("Nepal", "977"),
Country("Netherlands", "31", ENDPOINT_EUROPE),
Country("Netherlands Antilles", "599"),
Country("New Caledonia", "687"),
Country("New Zealand", "64"),
Country("Nicaragua", "505"),
Country("Niger", "227"),
Country("Nigeria", "234"),
Country("Niue", "683"),
Country("North Korea", "850"),
Country("Northern Mariana Islands", "1-670"),
Country("Norway", "47"),
Country("Oman", "968"),
Country("Pakistan", "92"),
Country("Palau", "680"),
Country("Palestine", "970"),
Country("Panama", "507"),
Country("Papua New Guinea", "675"),
Country("Paraguay", "595"),
Country("Peru", "51"),
Country("Philippines", "63"),
Country("Pitcairn", "64"),
Country("Poland", "48", ENDPOINT_EUROPE),
Country("Portugal", "351", ENDPOINT_EUROPE),
Country("Puerto Rico", "1-787, 1-939"),
Country("Qatar", "974"),
Country("Republic of the Congo", "242"),
Country("Reunion", "262"),
Country("Romania", "40", ENDPOINT_EUROPE),
Country("Russia", "7", ENDPOINT_EUROPE),
Country("Rwanda", "250"),
Country("Saint Barthelemy", "590"),
Country("Saint Helena", "290"),
Country("Saint Kitts and Nevis", "1-869"),
Country("Saint Lucia", "1-758"),
Country("Saint Martin", "590"),
Country("Saint Pierre and Miquelon", "508"),
Country("Saint Vincent and the Grenadines", "1-784"),
Country("Samoa", "685"),
Country("San Marino", "378"),
Country("Sao Tome and Principe", "239"),
Country("Saudi Arabia", "966"),
Country("Senegal", "221"),
Country("Serbia", "381"),
Country("Seychelles", "248"),
Country("Sierra Leone", "232"),
Country("Singapore", "65"),
Country("Sint Maarten", "1-721"),
Country("Slovakia", "421", ENDPOINT_EUROPE),
Country("Slovenia", "386", ENDPOINT_EUROPE),
Country("Solomon Islands", "677"),
Country("Somalia", "252"),
Country("South Africa", "27"),
Country("South Korea", "82"),
Country("South Sudan", "211"),
Country("Spain", "34", ENDPOINT_EUROPE),
Country("Sri Lanka", "94"),
Country("Sudan", "249"),
Country("Suriname", "597"),
Country("Svalbard and Jan Mayen", "47", ENDPOINT_EUROPE),
Country("Swaziland", "268"),
Country("Sweden", "46", ENDPOINT_EUROPE),
Country("Switzerland", "41"),
Country("Syria", "963"),
Country("Taiwan", "886"),
Country("Tajikistan", "992"),
Country("Tanzania", "255"),
Country("Thailand", "66"),
Country("Togo", "228"),
Country("Tokelau", "690"),
Country("Tonga", "676"),
Country("Trinidad and Tobago", "1-868"),
Country("Tunisia", "216"),
Country("Turkey", "90"),
Country("Turkmenistan", "993"),
Country("Turks and Caicos Islands", "1-649"),
Country("Tuvalu", "688"),
Country("U.S. Virgin Islands", "1-340"),
Country("Uganda", "256"),
Country("Ukraine", "380"),
Country("United Arab Emirates", "971"),
Country("United Kingdom", "44", ENDPOINT_EUROPE),
Country("United States", "1", ENDPOINT_AMERICA),
Country("Uruguay", "598"),
Country("Uzbekistan", "998"),
Country("Vanuatu", "678"),
Country("Vatican", "379"),
Country("Venezuela", "58"),
Country("Vietnam", "84"),
Country("Wallis and Futuna", "681"),
Country("Western Sahara", "212"),
Country("Yemen", "967"),
Country("Zambia", "260"),
Country("Zimbabwe", "263"),
]
| [
"noreply@github.com"
] | AlexxIT.noreply@github.com |
194b57951d3551b17b1e5082928f57cc5902ee37 | f072d766c00c0931b753a9cc50e36994400153c0 | /plot_lnumass_lhe.py | e0a85908f54d6c53307dd67989977f191b379d32 | [] | no_license | UniMiBAnalyses/Utils | 475208a32bc77dfc43c78048f1652a0c96144704 | d0375a5c8debcba4e39c7187ca7933c810d68357 | refs/heads/master | 2021-06-27T10:02:01.438794 | 2020-11-16T23:29:36 | 2020-11-16T23:29:36 | 177,773,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | import ROOT as R
import os
import sys
import utils
from math import cosh
from itertools import combinations
from operator import itemgetter
file = R.TFile("/afs/cern.ch/work/d/dvalsecc/private/CMSSW_10_2_0/src/LatinoTreesGEN/GenDumper/test/output_lhe.root", "READ")
tree = file.Get("Analyzer/myTree")
h_lnujjmass= R.TH1D("h_lnujjmass", "lnu mass", 100, 0, 200)
for iev, event in enumerate(tree):
if iev % 1000 == 0: print(".", end="")
lep = R.TLorentzVector()
nu = R.TLorentzVector()
lep.SetPtEtaPhiE(event.lhept1, event.lheeta1, event.lhephi1, event.lhept1*cosh(event.lheeta1))
nu.SetPtEtaPhiE(event.nu_lhept1, event.nu_lheeta1, event.nu_lhephi1, event.nu_lhept1*cosh(event.nu_lheeta1))
jets = []
jetsids = []
for i in range(1,5):
jet = R.TLorentzVector()
# print(getattr(event, f"lhejetpt{i}"), getattr(event, f"lhejeteta{i}"),
# getattr(event, f"lhejetphi{i}"),getattr(event, f"lhejetpt{i}"))
jet.SetPtEtaPhiE(getattr(event, f"lhejetpt{i}"), getattr(event, f"lhejeteta{i}"),
getattr(event, f"lhejetphi{i}"),getattr(event, f"lhejetpt{i}")*cosh(getattr(event, f"lhejeteta{i}")))
jets.append(jet)
jetsids.append(getattr(event, f"lhejetpdgid{i}"))
if (lep+nu).M() < 60:
good_pair = utils.nearest_mass_pair(jets, 80.375)
W_jets = [j for ij, j in enumerate(jets) if ij in good_pair]
else:
# We are looking at WplusTo2J_WminusToLNu
W_jets = ()
Wp = [(2,-1),(2,-3),(2,-5),(4,-1),(4,-3),(4,-5)]
#print("ids", jetsids)
masses = []
for p1,p2 in combinations(range(len(jetsids)),2):
#print((jetsids[p1],jetsids[p2]))
if (jetsids[p1],jetsids[p2]) in Wp or (jetsids[p2],jetsids[p1]) in Wp:
#W_jets = (jets[p1], jets[p2])
masses.append((jets[p1],jets[p2], (jets[p1]+jets[p2]).M()))
#print(jetsids[p1],jetsids[p2],(jets[p1]+jets[p2]).M())
#print(list(map(itemgetter(2), masses)))
# Now get the pair with the smaller mass
W_jets = sorted(masses, key=itemgetter(2))[0]
lnujj = lep + nu + W_jets[0] + W_jets[1]
#print((good_jets[0] + good_jets[1]).M())
h_lnujjmass.Fill(lnujj.M())
c = R.TCanvas()
h_lnujjmass.Draw("hist")
c.SetLogy()
c.Draw()
| [
"davide.valsecchi@cern.ch"
] | davide.valsecchi@cern.ch |
8f255a8f0063b635ed375927facb346c0ad8997e | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/aniso8601/builders/python.py | 51335bef2b1ed9d4e1bb73acd4e705af9f776a82 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 17,874 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
from aniso8601.builders import BaseTimeBuilder, TupleBuilder
from aniso8601.exceptions import (DayOutOfBoundsError,
HoursOutOfBoundsError,
LeapSecondError, MidnightBoundsError,
MinutesOutOfBoundsError,
SecondsOutOfBoundsError,
WeekOutOfBoundsError, YearOutOfBoundsError)
from aniso8601.utcoffset import UTCOffset
MICROSECONDS_PER_SECOND = int(1e6)
MICROSECONDS_PER_MINUTE = 60 * MICROSECONDS_PER_SECOND
MICROSECONDS_PER_HOUR = 60 * MICROSECONDS_PER_MINUTE
MICROSECONDS_PER_DAY = 24 * MICROSECONDS_PER_HOUR
MICROSECONDS_PER_WEEK = 7 * MICROSECONDS_PER_DAY
MICROSECONDS_PER_MONTH = 30 * MICROSECONDS_PER_DAY
MICROSECONDS_PER_YEAR = 365 * MICROSECONDS_PER_DAY
class PythonTimeBuilder(BaseTimeBuilder):
@classmethod
def build_date(cls, YYYY=None, MM=None, DD=None, Www=None, D=None,
DDD=None):
if YYYY is not None:
#Truncated dates, like '19', refer to 1900-1999 inclusive,
#we simply parse to 1900
if len(YYYY) < 4:
#Shift 0s in from the left to form complete year
YYYY = YYYY.ljust(4, '0')
year = cls.cast(YYYY, int,
thrownmessage='Invalid year string.')
if MM is not None:
month = cls.cast(MM, int,
thrownmessage='Invalid month string.')
else:
month = 1
if DD is not None:
day = cls.cast(DD, int,
thrownmessage='Invalid day string.')
else:
day = 1
if Www is not None:
weeknumber = cls.cast(Www, int,
thrownmessage='Invalid week string.')
if weeknumber == 0 or weeknumber > 53:
raise WeekOutOfBoundsError('Week number must be between '
'1..53.')
else:
weeknumber = None
if DDD is not None:
dayofyear = cls.cast(DDD, int,
thrownmessage='Invalid day string.')
else:
dayofyear = None
if D is not None:
dayofweek = cls.cast(D, int,
thrownmessage='Invalid day string.')
if dayofweek == 0 or dayofweek > 7:
raise DayOutOfBoundsError('Weekday number must be between '
'1..7.')
else:
dayofweek = None
#0000 (1 BC) is not representable as a Python date so a ValueError is
#raised
if year == 0:
raise YearOutOfBoundsError('Year must be between 1..9999.')
if dayofyear is not None:
return PythonTimeBuilder._build_ordinal_date(year, dayofyear)
if weeknumber is not None:
return PythonTimeBuilder._build_week_date(year, weeknumber,
isoday=dayofweek)
return datetime.date(year, month, day)
@classmethod
def build_time(cls, hh=None, mm=None, ss=None, tz=None):
#Builds a time from the given parts, handling fractional arguments
#where necessary
hours = 0
minutes = 0
seconds = 0
microseconds = 0
if hh is not None:
if '.' in hh:
hours, remainingmicroseconds = cls._split_to_microseconds(hh, MICROSECONDS_PER_HOUR, 'Invalid hour string.')
microseconds += remainingmicroseconds
else:
hours = cls.cast(hh, int,
thrownmessage='Invalid hour string.')
if mm is not None:
if '.' in mm:
minutes, remainingmicroseconds = cls._split_to_microseconds(mm, MICROSECONDS_PER_MINUTE, 'Invalid minute string.')
microseconds += remainingmicroseconds
else:
minutes = cls.cast(mm, int,
thrownmessage='Invalid minute string.')
if ss is not None:
if '.' in ss:
seconds, remainingmicroseconds = cls._split_to_microseconds(ss, MICROSECONDS_PER_SECOND, 'Invalid second string.')
microseconds += remainingmicroseconds
else:
seconds = cls.cast(ss, int,
thrownmessage='Invalid second string.')
hours, minutes, seconds, microseconds = PythonTimeBuilder._distribute_microseconds(microseconds, (hours, minutes, seconds), (MICROSECONDS_PER_HOUR, MICROSECONDS_PER_MINUTE, MICROSECONDS_PER_SECOND))
#Range checks
if hours == 23 and minutes == 59 and seconds == 60:
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
raise LeapSecondError('Leap seconds are not supported.')
if (hours == 24
and (minutes != 0 or seconds != 0)):
raise MidnightBoundsError('Hour 24 may only represent midnight.')
if hours > 24:
raise HoursOutOfBoundsError('Hour must be between 0..24 with '
'24 representing midnight.')
if minutes >= 60:
raise MinutesOutOfBoundsError('Minutes must be less than 60.')
if seconds >= 60:
raise SecondsOutOfBoundsError('Seconds must be less than 60.')
#Fix ranges that have passed range checks
if hours == 24:
hours = 0
minutes = 0
seconds = 0
#Datetimes don't handle fractional components, so we use a timedelta
if tz is not None:
return (datetime.datetime(1, 1, 1,
hour=hours,
minute=minutes,
tzinfo=cls._build_object(tz))
+ datetime.timedelta(seconds=seconds,
microseconds=microseconds)
).timetz()
return (datetime.datetime(1, 1, 1,
hour=hours,
minute=minutes)
+ datetime.timedelta(seconds=seconds,
microseconds=microseconds)
).time()
@classmethod
def build_datetime(cls, date, time):
return datetime.datetime.combine(cls._build_object(date),
cls._build_object(time))
@classmethod
def build_duration(cls, PnY=None, PnM=None, PnW=None, PnD=None, TnH=None,
TnM=None, TnS=None):
years = 0
months = 0
days = 0
weeks = 0
hours = 0
minutes = 0
seconds = 0
microseconds = 0
if PnY is not None:
if '.' in PnY:
years, remainingmicroseconds = cls._split_to_microseconds(PnY, MICROSECONDS_PER_YEAR, 'Invalid year string.')
microseconds += remainingmicroseconds
else:
years = cls.cast(PnY, int,
thrownmessage='Invalid year string.')
if PnM is not None:
if '.' in PnM:
months, remainingmicroseconds = cls._split_to_microseconds(PnM, MICROSECONDS_PER_MONTH, 'Invalid month string.')
microseconds += remainingmicroseconds
else:
months = cls.cast(PnM, int,
thrownmessage='Invalid month string.')
if PnW is not None:
if '.' in PnW:
weeks, remainingmicroseconds = cls._split_to_microseconds(PnW, MICROSECONDS_PER_WEEK, 'Invalid week string.')
microseconds += remainingmicroseconds
else:
weeks = cls.cast(PnW, int,
thrownmessage='Invalid week string.')
if PnD is not None:
if '.' in PnD:
days, remainingmicroseconds = cls._split_to_microseconds(PnD, MICROSECONDS_PER_DAY, 'Invalid day string.')
microseconds += remainingmicroseconds
else:
days = cls.cast(PnD, int,
thrownmessage='Invalid day string.')
if TnH is not None:
if '.' in TnH:
hours, remainingmicroseconds = cls._split_to_microseconds(TnH, MICROSECONDS_PER_HOUR, 'Invalid hour string.')
microseconds += remainingmicroseconds
else:
hours = cls.cast(TnH, int,
thrownmessage='Invalid hour string.')
if TnM is not None:
if '.' in TnM:
minutes, remainingmicroseconds = cls._split_to_microseconds(TnM, MICROSECONDS_PER_MINUTE, 'Invalid minute string.')
microseconds += remainingmicroseconds
else:
minutes = cls.cast(TnM, int,
thrownmessage='Invalid minute string.')
if TnS is not None:
if '.' in TnS:
seconds, remainingmicroseconds = cls._split_to_microseconds(TnS, MICROSECONDS_PER_SECOND, 'Invalid second string.')
microseconds += remainingmicroseconds
else:
seconds = cls.cast(TnS, int,
thrownmessage='Invalid second string.')
years, months, weeks, days, hours, minutes, seconds, microseconds = PythonTimeBuilder._distribute_microseconds(microseconds, (years, months, weeks, days, hours, minutes, seconds), (MICROSECONDS_PER_YEAR, MICROSECONDS_PER_MONTH, MICROSECONDS_PER_WEEK, MICROSECONDS_PER_DAY, MICROSECONDS_PER_HOUR, MICROSECONDS_PER_MINUTE, MICROSECONDS_PER_SECOND))
#Note that weeks can be handled without conversion to days
totaldays = years * 365 + months * 30 + days
return datetime.timedelta(days=totaldays,
seconds=seconds,
microseconds=microseconds,
minutes=minutes,
hours=hours,
weeks=weeks)
@classmethod
def build_interval(cls, start=None, end=None, duration=None):
if start is not None and end is not None:
#<start>/<end>
startobject = cls._build_object(start)
endobject = cls._build_object(end)
return (startobject, endobject)
durationobject = cls._build_object(duration)
#Determine if datetime promotion is required
datetimerequired = (duration[4] is not None
or duration[5] is not None
or duration[6] is not None
or durationobject.seconds != 0
or durationobject.microseconds != 0)
if end is not None:
#<duration>/<end>
endobject = cls._build_object(end)
if end[-1] == 'date' and datetimerequired is True:
#<end> is a date, and <duration> requires datetime resolution
return (endobject,
cls.build_datetime(end, TupleBuilder.build_time())
- durationobject)
return (endobject,
endobject
- durationobject)
#<start>/<duration>
startobject = cls._build_object(start)
if start[-1] == 'date' and datetimerequired is True:
#<start> is a date, and <duration> requires datetime resolution
return (startobject,
cls.build_datetime(start, TupleBuilder.build_time())
+ durationobject)
return (startobject,
startobject
+ durationobject)
@classmethod
def build_repeating_interval(cls, R=None, Rnn=None, interval=None):
startobject = None
endobject = None
if interval[0] is not None:
startobject = cls._build_object(interval[0])
if interval[1] is not None:
endobject = cls._build_object(interval[1])
if interval[2] is not None:
durationobject = cls._build_object(interval[2])
else:
durationobject = endobject - startobject
if R is True:
if startobject is not None:
return cls._date_generator_unbounded(startobject,
durationobject)
return cls._date_generator_unbounded(endobject,
-durationobject)
iterations = cls.cast(Rnn, int,
thrownmessage='Invalid iterations.')
if startobject is not None:
return cls._date_generator(startobject, durationobject, iterations)
return cls._date_generator(endobject, -durationobject, iterations)
@classmethod
def build_timezone(cls, negative=None, Z=None, hh=None, mm=None, name=''):
if Z is True:
#Z -> UTC
return UTCOffset(name='UTC', minutes=0)
if hh is not None:
tzhour = cls.cast(hh, int,
thrownmessage='Invalid hour string.')
else:
tzhour = 0
if mm is not None:
tzminute = cls.cast(mm, int,
thrownmessage='Invalid minute string.')
else:
tzminute = 0
if negative is True:
return UTCOffset(name=name, minutes=-(tzhour * 60 + tzminute))
return UTCOffset(name=name, minutes=tzhour * 60 + tzminute)
@staticmethod
def _build_week_date(isoyear, isoweek, isoday=None):
if isoday is None:
return (PythonTimeBuilder._iso_year_start(isoyear)
+ datetime.timedelta(weeks=isoweek - 1))
return (PythonTimeBuilder._iso_year_start(isoyear)
+ datetime.timedelta(weeks=isoweek - 1, days=isoday - 1))
@staticmethod
def _build_ordinal_date(isoyear, isoday):
#Day of year to a date
#https://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
builtdate = (datetime.date(isoyear, 1, 1)
+ datetime.timedelta(days=isoday - 1))
#Enforce ordinal day limitation
#https://bitbucket.org/nielsenb/aniso8601/issues/14/parsing-ordinal-dates-should-only-allow
if isoday == 0 or builtdate.year != isoyear:
raise DayOutOfBoundsError('Day of year must be from 1..365, '
'1..366 for leap year.')
return builtdate
@staticmethod
def _iso_year_start(isoyear):
#Given an ISO year, returns the equivalent of the start of the year
#on the Gregorian calendar (which is used by Python)
#Stolen from:
#http://stackoverflow.com/questions/304256/whats-the-best-way-to-find-the-inverse-of-datetime-isocalendar
#Determine the location of the 4th of January, the first week of
#the ISO year is the week containing the 4th of January
#http://en.wikipedia.org/wiki/ISO_week_date
fourth_jan = datetime.date(isoyear, 1, 4)
#Note the conversion from ISO day (1 - 7) and Python day (0 - 6)
delta = datetime.timedelta(days=fourth_jan.isoweekday() - 1)
#Return the start of the year
return fourth_jan - delta
@staticmethod
def _date_generator(startdate, timedelta, iterations):
currentdate = startdate
currentiteration = 0
while currentiteration < iterations:
yield currentdate
#Update the values
currentdate += timedelta
currentiteration += 1
@staticmethod
def _date_generator_unbounded(startdate, timedelta):
currentdate = startdate
while True:
yield currentdate
#Update the value
currentdate += timedelta
@classmethod
def _split_to_microseconds(cls, floatstr, conversion, thrownmessage):
#Splits a string with a decimal point into an int, and
#int representing the floating point remainder as a number
#of microseconds, determined by multiplying by conversion
intpart, floatpart = floatstr.split('.')
intvalue = cls.cast(intpart, int,
thrownmessage=thrownmessage)
preconvertedvalue = cls.cast(floatpart, int,
thrownmessage=thrownmessage)
convertedvalue = ((preconvertedvalue * conversion) //
(10 ** len(floatpart)))
return (intvalue, convertedvalue)
@staticmethod
def _distribute_microseconds(todistribute, recipients, reductions):
#Given a number of microseconds as int, a tuple of ints length n
#to distribute to, and a tuple of ints length n to divide todistribute
#by (from largest to smallest), returns a tuple of length n + 1, with
#todistribute divided across recipients using the reductions, with
#the final remainder returned as the final tuple member
results = []
remainder = todistribute
for index, reduction in enumerate(reductions):
additional, remainder = divmod(remainder, reduction)
results.append(recipients[index] + additional)
#Always return the remaining microseconds
results.append(remainder)
return tuple(results)
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
a15f316975d4df0d503c6776c28a8a97f11bddd6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03229/s182952830.py | f7d0cdedb254735814e092d73da13fd79ac65aaf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | n = int(input())
a = [int(input()) for _ in range(n)]
a.sort()
front = a[n - 1]
back = a[n - 1]
i = 0
j = n - 2
ans = 0
while i <= j:
diff1 = abs(front - a[i])
diff2 = abs(back - a[i])
diff3 = abs(front - a[j])
diff4 = abs(back - a[j])
mx = max(diff1, diff2, diff3, diff4)
ans += mx
if mx == diff1:
front = a[i]
i += 1
elif mx == diff2:
back = a[i]
i += 1
elif mx == diff3:
front = a[j]
j -= 1
else:
back = a[j]
j -= 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
85aed72f0bd6e3d1dde024e704cac846d3c294a1 | 03c9cd5bd96874d6117fb17c37ac4d7450c15933 | /django-tutorial/chapter04/orm_intro_demo/book/models.py | fbfa5a5d79b26d829f5b15b1c5f0278bcae96c54 | [] | no_license | atiger808/opencv-tutorial | 603de35e97679d6beae104298ae355edfdd9036a | 2ea9bb3818284fb75f85697e36fde37b6479d1c6 | refs/heads/master | 2020-05-29T23:16:30.462022 | 2019-11-05T10:08:20 | 2019-11-05T10:08:20 | 189,425,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from django.db import models
# Create your models here.
class Book(models.Model):
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100, null=False)
author = models.CharField(max_length=100, null=False)
price= models.FloatField(null=False, default=0)
# 1 makemigrations命令 生成迁移脚本文件
# python manage.py makemigrations
# 2 migrate命令 将生成的迁移脚本文件映射到数据库
# python manage.py migrate
class Published(models.Model):
name = models.CharField(max_length=100, null=False)
address = models.CharField(max_length=100, null=False) | [
"atiger0614@163.com"
] | atiger0614@163.com |
3faa5711aeeb59f4ef00fa91833c41f63cacdad4 | 3027ca01be33d07d7acd3a08f8bc812fed71544c | /docs/source/conf.py | 6fddb100fbcd2cc38db5bc457f8c7508d827c92f | [] | no_license | hirune924/ayniy | 016c32e34bf61d074554b4bdd4339d76d14d718f | 10537ab50283144fa6267afd912b387e75f3790c | refs/heads/master | 2022-11-29T02:45:16.474280 | 2020-08-09T05:56:08 | 2020-08-09T05:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# -- Project information -----------------------------------------------------
project = 'Ayniy'
copyright = '2020, Shotaro Ishihara'
author = 'Shotaro Ishihara'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The master toctree document.
master_doc = 'index'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| [
"upura0@gmail.com"
] | upura0@gmail.com |
afb43707671fdb41caaf35d21da658269313c95c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02642/s476928460.py | 64de74dd01fb109dd7acd2a9fb2b1a7c2da06fd8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import collections
import heapq
import math
import random
import sys
input = sys.stdin.readline
sys.setrecursionlimit(500005)
ri = lambda: int(input())
rl = lambda: list(map(int, input().split()))
rs = lambda: input().rstrip()
n = ri()
a = rl()
N = 1000000
f = [0] * (N + 10)
for v in a:
f[v] += 1
for i in range(N, 0, -1):
if f[i] == 0:
continue
j = i * 2
while j <= N:
f[j] += f[i]
j += i
cnt = sum(f[i] == 1 for i in a)
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ddfab1ad5bd41515b7cf094827c444e373346c86 | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py-redhat/2.6/shared/amd64/twisted/web/_auth/__init__.py | 524586111ac0dd58ada6b7fbb553571f8252aa93 | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | ../../../../../../../py-unpure/twisted/web/_auth/__init__.py | [
"tushar239@gmail.com"
] | tushar239@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.