blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5d12a155ee80b50e6db313a688f1718a5fcc5a9 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/movianremote/testcase/firstcases/testcase3_026.py | 2eba182513b6be7c54e360d4b42691e24bf55512 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,409 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.claha.showtimeremote',
'appActivity' : 'com.claha.showtimeremote.MovianRemote',
'resetKeyboard' : True,
'androidCoverage' : 'com.claha.showtimeremote/com.claha.showtimeremote.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase3_026
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_a\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(82)
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_b\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_b\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_c\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_c\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
driver.press_keycode(82)
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_c\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
driver.press_keycode(82)
driver.press_keycode(82)
driver.press_keycode(82)
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_c\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.claha.showtimeremote:id/btn_prog_a\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_026\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.claha.showtimeremote'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
90030957d0a8b86aca542b255cb10573bd5794a9 | 31e9ae0b5431fdb643f228713001d052e93b303d | /flask_unchained/forms/validators.py | fb8290c321e55ba3982b622ac8398b75eee1fd34 | [
"MIT"
] | permissive | briancappello/flask-unchained | b35215b6e150febf8d00fd0164b49d355eae66e6 | a1f1323f63f59760e430001efef43af9b829ebed | refs/heads/master | 2023-05-13T09:11:14.745523 | 2022-04-03T00:27:09 | 2022-04-03T00:27:09 | 115,440,840 | 77 | 9 | MIT | 2023-05-01T23:40:25 | 2017-12-26T17:13:24 | Python | UTF-8 | Python | false | false | 96 | py | from flask_wtf.file import FileAllowed, FileRequired, FileSize
from wtforms.validators import *
| [
"briancappello@gmail.com"
] | briancappello@gmail.com |
f91b55ec00c84a171b7ef334cb54ec1a4add44ed | b33ddc7b89d05e19fdeb69593872fd174fab9f4f | /URI-py/2520a.py | 87d865aa4aae7d32dbe28a595c66d619f9542e3b | [] | no_license | ThiagoCComelli/URI-Online-Judge | 8b8d609d880342b39ba0d396c0610ecb7e01a5af | 5348f736b2d683f4b857232c22cccb7c1d8b8d65 | refs/heads/master | 2020-07-23T15:14:05.353948 | 2020-03-10T19:42:12 | 2020-03-10T19:42:12 | 207,606,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
while True:
try:
n,m = [int(x) for x in input().split()]
lista = []
a = 0
b = 0
lista1 = []
lista2 = []
lista3 = []
for i in range(n):
lista.append([int(x) for x in input().split()])
for i in range(len(lista)):
for j in lista[i]:
if j == 1:
lista1.append(lista[i].index(j)+1)
lista2.append(i+1)
if j == 2:
lista1.append(lista[i].index(j)+1)
lista2.append(i+1)
print(lista1)
print(lista2)
for a, b in zip(lista1,lista2):
lista3.append(a+b)
print(abs(lista3[0]-lista3[1]))
except EOFError:
break
| [
"thiago.comelli@outlook.com"
] | thiago.comelli@outlook.com |
99807557a16613d1e9a52417868f0c92aabaf9f8 | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/1832.CheckIfTheSentenceIsPangram.py | 32b154e7d1bd4e1cb3fe59f18af9a82760a895d9 | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # 1832. Check if the Sentence Is Pangram
# Easy
# 66
# 0
# Add to List
# Share
# A pangram is a sentence where every letter of the English alphabet appears at least once.
# Given a string sentence containing only lowercase English letters, return true if sentence is a pangram, or false otherwise.
# Example 1:
# Input: sentence = "thequickbrownfoxjumpsoverthelazydog"
# Output: true
# Explanation: sentence contains at least one of every letter of the English alphabet.
# Example 2:
# Input: sentence = "leetcode"
# Output: false
# Constraints:
# 1 <= sentence.length <= 1000
# sentence consists of lowercase English letters.
# This solution works:
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
counts = set(sentence)
return len(counts) == 26
# This solution works - 1 liner:
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
return len(set(sentence)) == 26 | [
"akimi.mimi.yano@gmail.com"
] | akimi.mimi.yano@gmail.com |
f51a8b4d78cea86f8c6db59b5927055f7b591891 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/xaxis/rangeselector/button/_stepmode.py | 64cc451271fb54a7c541f1f6ef14b4f31d0573d9 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 511 | py | import _plotly_utils.basevalidators
class StepmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='stepmode',
parent_name='layout.xaxis.rangeselector.button',
**kwargs
):
super(StepmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='info',
values=['backward', 'todate'],
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
6936d3e4254ce8516812c56fae8f2b0cfdd5bb3c | 8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc | /toontown/suit/DistributedSuit.py | 31fa50fb5df51556482b02c7ee23ca6c0598576b | [] | no_license | RegDogg/ttr-2014-dev | eb0d9da3e91b9504b83804c27e1a00d87a0b7220 | 8a392ea4697cf15bd83accd01dcf26d0f87557eb | refs/heads/master | 2023-07-13T02:40:56.171517 | 2021-07-12T00:31:28 | 2021-07-12T00:31:28 | 372,103,145 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,726 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.directtools.DirectGeometry import CLAMP
from direct.task import Task
from otp.avatar import DistributedAvatar
import Suit
from toontown.toonbase import ToontownGlobals
from toontown.battle import DistributedBattle
from direct.fsm import ClassicFSM, State
from direct.fsm import State
import SuitTimings
import SuitBase
import DistributedSuitPlanner
from direct.directnotify import DirectNotifyGlobal
import SuitDialog
from toontown.battle import BattleProps
from toontown.distributed.DelayDeletable import DelayDeletable
import math
import copy
import DistributedSuitBase
from otp.otpbase import OTPLocalizer
import random
from SuitLegList import *
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
STAND_OUTSIDE_DOOR = 2.5
BATTLE_IGNORE_TIME = 6
BATTLE_WAIT_TIME = 3
CATCHUP_SPEED_MULTIPLIER = 3
ALLOW_BATTLE_DETECT = 1
class DistributedSuit(DistributedSuitBase.DistributedSuitBase, DelayDeletable):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuit')
ENABLE_EXPANDED_NAME = 0
def __init__(self, cr):
try:
self.DistributedSuit_initialized
return
except:
self.DistributedSuit_initialized = 1
DistributedSuitBase.DistributedSuitBase.__init__(self, cr)
self.spDoId = None
self.pathEndpointStart = 0
self.pathEndpointEnd = 0
self.minPathLen = 0
self.maxPathLen = 0
self.pathPositionIndex = 0
self.pathPositionTimestamp = 0.0
self.pathState = 0
self.path = None
self.localPathState = 0
self.currentLeg = -1
self.pathStartTime = 0.0
self.legList = None
self.initState = None
self.finalState = None
self.buildingSuit = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedSuit', [
State.State('Off',
self.enterOff,
self.exitOff, [
'FromSky',
'FromSuitBuilding',
'Walk',
'Battle',
'neutral',
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'FromCogHQ',
'ToSky',
'FlyAway',
'DanceThenFlyAway',
'WalkToStreet',
'WalkFromStreet']),
State.State('FromSky',
self.enterFromSky,
self.exitFromSky, [
'Walk',
'Battle',
'neutral',
'ToSky',
'WalkFromStreet']),
State.State('FromSuitBuilding',
self.enterFromSuitBuilding,
self.exitFromSuitBuilding, [
'WalkToStreet',
'Walk',
'Battle',
'neutral',
'ToSky']),
State.State('WalkToStreet',
self.enterWalkToStreet,
self.exitWalkToStreet, [
'Walk',
'Battle',
'neutral',
'ToSky',
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'WalkFromStreet']),
State.State('WalkFromStreet',
self.enterWalkFromStreet,
self.exitWalkFromStreet, [
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'Battle',
'neutral',
'ToSky']),
State.State('Walk',
self.enterWalk,
self.exitWalk, [
'WaitForBattle',
'Battle',
'neutral',
'WalkFromStreet',
'ToSky',
'ToCogHQ',
'Walk']),
State.State('Battle',
self.enterBattle,
self.exitBattle, [
'Walk',
'ToToonBuilding',
'ToCogHQ',
'ToSuitBuilding',
'ToSky']),
State.State('neutral',
self.enterNeutral,
self.exitNeutral, []),
State.State('WaitForBattle',
self.enterWaitForBattle,
self.exitWaitForBattle, [
'Battle',
'neutral',
'Walk',
'WalkToStreet',
'WalkFromStreet',
'ToToonBuilding',
'ToCogHQ',
'ToSuitBuilding',
'ToSky']),
State.State('ToToonBuilding',
self.enterToToonBuilding,
self.exitToToonBuilding, [
'neutral',
'Battle']),
State.State('ToSuitBuilding',
self.enterToSuitBuilding,
self.exitToSuitBuilding, [
'neutral',
'Battle']),
State.State('ToCogHQ',
self.enterToCogHQ,
self.exitToCogHQ, [
'neutral',
'Battle']),
State.State('FromCogHQ',
self.enterFromCogHQ,
self.exitFromCogHQ, [
'neutral',
'Battle',
'Walk']),
State.State('ToSky',
self.enterToSky,
self.exitToSky, [
'Battle']),
State.State('FlyAway',
self.enterFlyAway,
self.exitFlyAway,
[]),
State.State('DanceThenFlyAway',
self.enterDanceThenFlyAway,
self.exitDanceThenFlyAway,
[])],
'Off', 'Off')
self.fsm.enterInitialState()
self.soundSequenceList = []
self.__currentDialogue = None
return
def generate(self):
DistributedSuitBase.DistributedSuitBase.generate(self)
def disable(self):
for soundSequence in self.soundSequenceList:
soundSequence.finish()
self.soundSequenceList = []
self.notify.debug('DistributedSuit %d: disabling' % self.getDoId())
self.resumePath(0)
self.stopPathNow()
self.setState('Off')
DistributedSuitBase.DistributedSuitBase.disable(self)
def delete(self):
try:
self.DistributedSuit_deleted
except:
self.DistributedSuit_deleted = 1
self.notify.debug('DistributedSuit %d: deleting' % self.getDoId())
del self.fsm
DistributedSuitBase.DistributedSuitBase.delete(self)
def setPathEndpoints(self, start, end, minPathLen, maxPathLen):
if self.pathEndpointStart == start and self.pathEndpointEnd == end and self.minPathLen == minPathLen and self.maxPathLen == maxPathLen and self.path != None:
return
self.pathEndpointStart = start
self.pathEndpointEnd = end
self.minPathLen = minPathLen
self.maxPathLen = maxPathLen
self.path = None
self.pathLength = 0
self.currentLeg = -1
self.legList = None
if self.maxPathLen == 0:
return
if not self.verifySuitPlanner():
return
self.startPoint = self.sp.pointIndexes[self.pathEndpointStart]
self.endPoint = self.sp.pointIndexes[self.pathEndpointEnd]
path = self.sp.genPath(self.startPoint, self.endPoint, self.minPathLen, self.maxPathLen)
self.setPath(self.sp.dnaData.suitGraph, path)
self.makeLegList()
return
def verifySuitPlanner(self):
if self.sp == None and self.spDoId != 0:
self.notify.warning('Suit %d does not have a suit planner! Expected SP doId %s.' % (self.doId, self.spDoId))
self.sp = self.cr.doId2do.get(self.spDoId, None)
if self.sp == None:
return 0
return 1
def setPathPosition(self, index, timestamp):
if not self.verifySuitPlanner():
return
if self.path == None:
self.setPathEndpoints(self.pathEndpointStart, self.pathEndpointEnd, self.minPathLen, self.maxPathLen)
self.pathPositionIndex = index
self.pathPositionTimestamp = globalClockDelta.networkToLocalTime(timestamp)
if self.legList != None:
self.pathStartTime = self.pathPositionTimestamp - self.legList.getStartTime(self.pathPositionIndex)
return
def setPathState(self, state):
self.pathState = state
self.resumePath(state)
def debugSuitPosition(self, elapsed, currentLeg, x, y, timestamp):
now = globalClock.getFrameTime()
chug = globalClock.getRealTime() - now
messageAge = now - globalClockDelta.networkToLocalTime(timestamp, now)
if messageAge < -(chug + 0.5) or messageAge > chug + 1.0:
print 'Apparently out of sync with AI by %0.2f seconds. Suggest resync!' % messageAge
return
localElapsed = now - self.pathStartTime
timeDiff = localElapsed - (elapsed + messageAge)
if abs(timeDiff) > 0.2:
print "%s (%d) appears to be %0.2f seconds out of sync along its path. Suggest '~cogs sync'." % (self.getName(), self.getDoId(), timeDiff)
return
if self.legList == None:
print "%s (%d) doesn't have a legList yet." % (self.getName(), self.getDoId())
return
netPos = Point3(x, y, 0.0)
leg = self.legList.getLeg(currentLeg)
calcPos = leg.getPosAtTime(elapsed - leg.getStartTime())
calcPos.setZ(0.0)
calcDelta = Vec3(netPos - calcPos)
diff = calcDelta.length()
if diff > 4.0:
print '%s (%d) is %0.2f feet from the AI computed path!' % (self.getName(), self.getDoId(), diff)
print 'Probably your DNA files are out of sync.'
return
localPos = Point3(self.getX(), self.getY(), 0.0)
localDelta = Vec3(netPos - localPos)
diff = localDelta.length()
if diff > 10.0:
print '%s (%d) in state %s is %0.2f feet from its correct position!' % (self.getName(),
self.getDoId(),
self.fsm.getCurrentState().getName(),
diff)
print 'Should be at (%0.2f, %0.2f), but is at (%0.2f, %0.2f).' % (x,
y,
localPos[0],
localPos[1])
return
print '%s (%d) is in the correct position.' % (self.getName(), self.getDoId())
return
def denyBattle(self):
DistributedSuitBase.DistributedSuitBase.denyBattle(self)
self.disableBattleDetect()
def resumePath(self, state):
if self.localPathState != state:
self.localPathState = state
if state == 0:
self.stopPathNow()
elif state == 1:
self.moveToNextLeg(None)
elif state == 2:
self.stopPathNow()
if self.sp != None:
self.setState('Off')
self.setState('FlyAway')
elif state == 3:
pass
elif state == 4:
self.stopPathNow()
if self.sp != None:
self.setState('Off')
self.setState('DanceThenFlyAway')
else:
self.notify.error('No such state as: ' + str(state))
return
def moveToNextLeg(self, task):
if self.legList == None:
self.notify.warning('Suit %d does not have a path!' % self.getDoId())
return Task.done
now = globalClock.getFrameTime()
elapsed = now - self.pathStartTime
nextLeg = self.legList.getLegIndexAtTime(elapsed, self.currentLeg)
numLegs = self.legList.getNumLegs()
if self.currentLeg != nextLeg:
self.currentLeg = nextLeg
self.doPathLeg(self.legList[nextLeg], elapsed - self.legList.getStartTime(nextLeg))
nextLeg += 1
if nextLeg < numLegs:
nextTime = self.legList.getStartTime(nextLeg)
delay = nextTime - elapsed
name = self.taskName('move')
taskMgr.remove(name)
taskMgr.doMethodLater(delay, self.moveToNextLeg, name)
return Task.done
def doPathLeg(self, leg, time):
self.fsm.request(SuitLeg.getTypeName(leg.getType()), [leg, time])
return 0
def stopPathNow(self):
name = self.taskName('move')
taskMgr.remove(name)
self.currentLeg = -1
def calculateHeading(self, a, b):
xdelta = b[0] - a[0]
ydelta = b[1] - a[1]
if ydelta == 0:
if xdelta > 0:
return -90
else:
return 90
elif xdelta == 0:
if ydelta > 0:
return 0
else:
return 180
else:
angle = math.atan2(ydelta, xdelta)
return rad2Deg(angle) - 90
def beginBuildingMove(self, moveIn, doneEvent, suit = 0):
doorPt = Point3(0)
buildingPt = Point3(0)
streetPt = Point3(0)
if self.virtualPos:
doorPt.assign(self.virtualPos)
else:
doorPt.assign(self.getPos())
if moveIn:
streetPt = self.prevPointPos()
else:
streetPt = self.currPointPos()
dx = doorPt[0] - streetPt[0]
dy = doorPt[1] - streetPt[1]
buildingPt = Point3(doorPt[0] + dx, doorPt[1] + dy, doorPt[2])
if moveIn:
if suit:
moveTime = SuitTimings.toSuitBuilding
else:
moveTime = SuitTimings.toToonBuilding
return self.beginMove(doneEvent, buildingPt, time=moveTime)
else:
return self.beginMove(doneEvent, doorPt, buildingPt, time=SuitTimings.fromSuitBuilding)
return None
def setSPDoId(self, doId):
self.spDoId = doId
self.sp = self.cr.doId2do.get(doId, None)
if self.sp == None and self.spDoId != 0:
self.notify.warning('Suit %s created before its suit planner, %d' % (self.doId, self.spDoId))
return
def d_requestBattle(self, pos, hpr):
self.cr.playGame.getPlace().setState('WaitForBattle')
self.sendUpdate('requestBattle', [pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2]])
def __handleToonCollision(self, collEntry):
if not base.localAvatar.wantBattles:
return
toonId = base.localAvatar.getDoId()
self.notify.debug('Distributed suit: requesting a Battle with ' + 'toon: %d' % toonId)
self.d_requestBattle(self.getPos(), self.getHpr())
self.setState('WaitForBattle')
def setAnimState(self, state):
self.setState(state)
def enterFromSky(self, leg, time):
self.enableBattleDetect('fromSky', self.__handleToonCollision)
self.loop('neutral', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
self.setPosHprScale(a[0], a[1], a[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = self.beginSupaFlyMove(a, 1, 'fromSky')
self.mtrack.start(time)
def exitFromSky(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterWalkToStreet(self, leg, time):
self.enableBattleDetect('walkToStreet', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta *= (length - STAND_OUTSIDE_DOOR) / length
a1 = Point3(b - delta)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b, startPos=a1), name=self.taskName('walkToStreet'))
self.mtrack.start(time)
def exitWalkToStreet(self):
self.disableBattleDetect()
self.enableRaycast(0)
self.mtrack.finish()
del self.mtrack
def enterWalkFromStreet(self, leg, time):
self.enableBattleDetect('walkFromStreet', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta *= (length - STAND_OUTSIDE_DOOR) / length
b1 = Point3(a + delta)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a), name=self.taskName('walkFromStreet'))
self.mtrack.start(time)
def exitWalkFromStreet(self):
self.disableBattleDetect()
self.enableRaycast(0)
self.mtrack.finish()
del self.mtrack
def enterWalk(self, leg, time):
self.enableBattleDetect('bellicose', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
pos = leg.getPosAtTime(time)
self.setPosHprScale(pos[0], pos[1], pos[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b, startPos=a), name=self.taskName('bellicose'))
self.mtrack.start(time)
def exitWalk(self):
self.disableBattleDetect()
self.mtrack.pause()
del self.mtrack
def enterToSky(self, leg, time):
self.enableBattleDetect('toSky', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
self.setPosHprScale(b[0], b[1], b[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = self.beginSupaFlyMove(b, 0, 'toSky')
self.mtrack.start(time)
def exitToSky(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterFromSuitBuilding(self, leg, time):
self.enableBattleDetect('fromSuitBuilding', self.__handleToonCollision)
self.loop('walk', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta2 = delta * (self.sp.suitWalkSpeed * leg.getLegTime()) / length
delta *= (length - STAND_OUTSIDE_DOOR) / length
b1 = Point3(b - delta)
a1 = Point3(b1 - delta2)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a1), name=self.taskName('fromSuitBuilding'))
self.mtrack.start(time)
def exitFromSuitBuilding(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
def enterToToonBuilding(self, leg, time):
self.loop('neutral', 0)
def exitToToonBuilding(self):
pass
def enterToSuitBuilding(self, leg, time):
self.loop('walk', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta2 = delta * (self.sp.suitWalkSpeed * leg.getLegTime()) / length
delta *= (length - STAND_OUTSIDE_DOOR) / length
a1 = Point3(a + delta)
b1 = Point3(a1 + delta2)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a1), name=self.taskName('toSuitBuilding'))
self.mtrack.start(time)
def exitToSuitBuilding(self):
self.mtrack.finish()
del self.mtrack
def enterToCogHQ(self, leg, time):
self.loop('neutral', 0)
def exitToCogHQ(self):
pass
def enterFromCogHQ(self, leg, time):
self.loop('neutral', 0)
self.detachNode()
def exitFromCogHQ(self):
self.reparentTo(render)
def enterBattle(self):
DistributedSuitBase.DistributedSuitBase.enterBattle(self)
self.resumePath(0)
def enterNeutral(self):
self.notify.debug('DistributedSuit: Neutral (entering a Door)')
self.resumePath(0)
self.loop('neutral', 0)
def exitNeutral(self):
pass
def enterWaitForBattle(self):
DistributedSuitBase.DistributedSuitBase.enterWaitForBattle(self)
self.resumePath(0)
def enterFlyAway(self):
self.enableBattleDetect('flyAway', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
b = Point3(self.getPos())
self.mtrack = self.beginSupaFlyMove(b, 0, 'flyAway')
self.mtrack.start()
def exitFlyAway(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterDanceThenFlyAway(self):
self.enableBattleDetect('danceThenFlyAway', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
danceTrack = self.actorInterval('victory')
b = Point3(self.getPos())
flyMtrack = self.beginSupaFlyMove(b, 0, 'flyAway')
self.mtrack = Sequence(danceTrack, flyMtrack, name=self.taskName('danceThenFlyAway'))
self.mtrack.start()
def exitDanceThenFlyAway(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def playCurrentDialogue(self, dialogue, chatFlags, interrupt = 1):
if interrupt and self.__currentDialogue is not None:
self.__currentDialogue.stop()
self.__currentDialogue = dialogue
if dialogue:
base.playSfx(dialogue, node=self)
elif chatFlags & CFSpeech != 0:
if self.nametag.getNumChatPages() > 0:
self.playDialogueForString(self.nametag.getChat())
if self.soundChatBubble != None:
base.playSfx(self.soundChatBubble, node=self)
elif self.nametag.getChatStomp() > 0:
self.playDialogueForString(self.nametag.getStompText(), self.nametag.getStompDelay())
return
def playDialogueForString(self, chatString, delay = 0.0):
if len(chatString) == 0:
return
searchString = chatString.lower()
if searchString.find(OTPLocalizer.DialogSpecial) >= 0:
type = 'special'
elif searchString.find(OTPLocalizer.DialogExclamation) >= 0:
type = 'exclamation'
elif searchString.find(OTPLocalizer.DialogQuestion) >= 0:
type = 'question'
elif random.randint(0, 1):
type = 'statementA'
else:
type = 'statementB'
stringLength = len(chatString)
if stringLength <= OTPLocalizer.DialogLength1:
length = 1
elif stringLength <= OTPLocalizer.DialogLength2:
length = 2
elif stringLength <= OTPLocalizer.DialogLength3:
length = 3
else:
length = 4
self.playDialogue(type, length, delay)
def playDialogue(self, type, length, delay = 0.0):
dialogueArray = self.getDialogueArray()
if dialogueArray == None:
return
sfxIndex = None
if type == 'statementA' or type == 'statementB':
if length == 1:
sfxIndex = 0
elif length == 2:
sfxIndex = 1
elif length >= 3:
sfxIndex = 2
elif type == 'question':
sfxIndex = 3
elif type == 'exclamation':
sfxIndex = 4
elif type == 'special':
sfxIndex = 5
else:
notify.error('unrecognized dialogue type: ', type)
if sfxIndex != None and sfxIndex < len(dialogueArray) and dialogueArray[sfxIndex] != None:
soundSequence = Sequence(Wait(delay), SoundInterval(dialogueArray[sfxIndex], node=None, listenerNode=base.localAvatar, loop=0, volume=1.0))
self.soundSequenceList.append(soundSequence)
soundSequence.start()
self.cleanUpSoundList()
return
def cleanUpSoundList(self):
removeList = []
for soundSequence in self.soundSequenceList:
if soundSequence.isStopped():
removeList.append(soundSequence)
for soundSequence in removeList:
self.soundSequenceList.remove(soundSequence)
| [
"regdogg.acr@gmail.com"
] | regdogg.acr@gmail.com |
9c73175eab6073df14a8204dd7342180d9ef43ec | 3c20f43fe658ee4123aa47548a1fed4d7852670a | /postgretest/wsgi.py | 13396466cf74ecbc61c94d93a9f1e37051e6e5bb | [] | no_license | Jordan-Ak/Postgresql_setup | 47a8bac3a62ce81582116bac9c3419fa783ff6ef | 5427f65de63911ed40cc866f10f6a21877d4af44 | refs/heads/master | 2023-03-26T09:54:57.213028 | 2021-03-24T08:10:51 | 2021-03-24T08:10:51 | 350,995,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for postgretest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'postgretest.settings')
application = get_wsgi_application()
| [
"JosiahDavid98@gmail.com"
] | JosiahDavid98@gmail.com |
b3b43555f0df6f58886f9e05854ba2ebb887715f | 5cf745769b0f891aca2198ace8935dce5221ec48 | /anima/rez_packages/resolve/17.4.3/package.py | e2646c27056c9ada1759edb15a496b39cd4cd30f | [
"MIT"
] | permissive | jangcode/anima | d827672e45275f52727d66cd915bc5c2f3a0ede6 | 59ddfe76004626fc8142918b27d15e7759372854 | refs/heads/master | 2022-05-31T14:40:41.703689 | 2022-05-12T00:30:36 | 2022-05-12T00:30:36 | 94,961,144 | 0 | 0 | null | 2017-06-21T04:00:09 | 2017-06-21T04:00:09 | null | UTF-8 | Python | false | false | 1,199 | py | # -*- coding: utf-8 -*-
name = "resolve"
version = "17.4.3"
author = ["Erkan Ozgur Yilmaz"]
uuid = "86791641abc04a189b2177f4eff55327"
description = "DaVinci Resolve package"
requires = [
"python",
"pyside2",
"anima",
]
variants = [
["python-2"],
["python-3"],
]
build_command = "python {root}/../build.py {install}"
def commands():
# env.PYTHONPATH.append("{root}/../python")
env.PATH.append("{root}/bin")
if system.platform == "linux":
env.PATH.append("/opt/resolve/")
env.RESOLVE_SCRIPT_API = "/opt/resolve/Developer/Scripting/"
env.RESOLVE_SCRIPT_LIB = "/opt/resolve/libs/Fusion/fusionscript.so"
env.PYTHONPATH.append("$RESOLVE_SCRIPT_API/Modules/")
elif system.platform == "osx":
env.PATH.append("/Applications/DaVinci Resolve/DaVinci Resolve.app/")
env.RESOLVE_SCRIPT_API = (
"/Applications/DaVinci Resolve/DaVinci Resolve.app/Developer/Scripting/"
)
env.RESOLVE_SCRIPT_LIB = (
"/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/"
"Fusion/fusionscript.so"
)
env.PYTHONPATH.append("$RESOLVE_SCRIPT_API/Modules/")
| [
"eoyilmaz@gmail.com"
] | eoyilmaz@gmail.com |
6b30ff421722f621c0fae728aa28f336fdb80fd6 | 47dfa145704444bb62d9e6e4f4163bde13abdbd5 | /fancy/eventbus/event_bus.py | 1769aa881e988abb23ba73cfb8737db1a3888b3f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | susautw/fancy-eventbus | 1c2e4e8d64c2726e2f5ac373a91f2218c1ad2500 | 8c2c693538906c35ae87e12ec8a66f2e5d31f86b | refs/heads/master | 2022-12-02T01:05:17.403909 | 2020-08-21T12:59:06 | 2020-08-21T12:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | from typing import Dict, List
from fancy.eventbus import EventListener, EventScheduler
from fancy.eventbus.scheduler import SchedulerBase
class EventBus:
_scheduler: SchedulerBase[EventListener]
event_listener_map: Dict[type, List[EventListener]]
default_bus: 'EventBus' = None
@classmethod
def get_default(cls) -> 'EventBus':
if cls.default_bus is None:
cls.default_bus = EventBus(EventScheduler.MULTILEVEL_FEEDBACK_QUEUE)
return cls.default_bus
def __init__(self, scheduler: SchedulerBase):
self._scheduler = scheduler
self.event_listener_map = {}
def post(self, event: object) -> bool:
"""
:param event:
:return: Is the event has been handled by at least one listener
"""
posted = False
for event_type in self.event_listener_map.keys():
if isinstance(event, event_type):
posted = True
self._post_listeners(event, self.event_listener_map[event_type])
return posted
def _post_listeners(self, event: object, listeners: List[EventListener]) -> None:
for listener in listeners:
listener = listener.clone()
listener.event = event
self._scheduler.add(listener)
def cancel(self, event: object) -> bool:
canceled = False
for event_type in self.event_listener_map.keys():
if isinstance(event, event_type):
canceled = True
self._cancel_listeners(event, self.event_listener_map[event_type])
return canceled
def _cancel_listeners(self, event: object, listeners: List[EventListener]):
for listener in listeners:
listener.event = event
try:
self._scheduler.remove(listener)
except ValueError as _:
pass
def register(self, event_listeners: object) -> None:
listeners: List[EventListener] = EventListener.get_marked_method(event_listeners).values()
for listener in listeners:
if listener.event_type not in self.event_listener_map:
self.event_listener_map[listener.event_type] = [listener]
else:
self.event_listener_map[listener.event_type].append(listener)
def unregister(self, event_listeners: object) -> None:
listeners: List[EventListener] = EventListener.get_marked_method(event_listeners).values()
for listener in listeners:
if listener.event_type not in self.event_listener_map:
raise ValueError(f"{EventBus.__name__}.unregister(item): item not in {EventBus.__name__}")
self.event_listener_map[listener.event_type].remove(listener)
def clear(self) -> None:
self.event_listener_map = {}
@property
def scheduler(self) -> SchedulerBase[EventListener]:
return self._scheduler
| [
"susautw@gmail.com"
] | susautw@gmail.com |
1e8cb8a12175625244c6506cd6d7aeaf2a555bdb | 700fe1a57d6306171f17f012587c938ba49f0212 | /rpg/view.py | 23ab9a1122ea2d88bcbf5b6f4429aca3e7e11b6b | [] | no_license | Michaelllllll25/class-rpg-game | cfd850bc8ac6a8c6bb549ad77825eed3ba5d3228 | 4229124bbaa170b5bbaf29f7588ee8af20dc11ba | refs/heads/main | 2023-09-06T02:14:34.161346 | 2021-11-15T19:24:02 | 2021-11-15T19:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | from typing import List
import pygame
class View:
"""A pseudo-interface for views that can be used in the game class."""
def event_loop(self, events: List[pygame.event.Event]) -> None:
"""View-specific event loop for key-bindings"""
raise NotImplementedError("You need to override the 'event_loop' method in every class inheriting from the View class.")
def update(self) -> None:
"""Update the view's state"""
raise NotImplementedError("You need to override the 'update' method in every class inheriting from the View class.")
def draw(self, screen: pygame.Surface) -> None:
"""Draw the view's contents."""
raise NotImplementedError("You need to override the 'draw' method in every class inheriting from the View class.")
| [
"daniel.gallo@ycdsbk12.ca"
] | daniel.gallo@ycdsbk12.ca |
d223cdd0d3cbce60464d864107e23cc94170871c | 118984fdbacf5eb71159eb511ccd055987498886 | /CH10/EX10.31.py | 8c3cce27c8f1ea9df7d33ec5deeecf9e903c37df | [] | no_license | 6igsm0ke/Introduction-to-Programming-Using-Python-Liang-1st-edtion | 321c6256be6ff78adbc8e3ddc73f2f43a51a75ab | 159489f3af296f87469ddddf3a1cb232917506b0 | refs/heads/master | 2023-06-05T20:03:17.951911 | 2021-06-18T18:04:42 | 2021-06-18T18:04:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # 10.31 (Occurrences of each digit in a string) Write a function that counts the occurrences
# of each digit in a string using the following header:
# def count(s):
# The function counts how many times a digit appears in the string. The return
# value is a list of ten elements, each of which holds the count for a digit. For
# example, after executing counts = count("12203AB3"), counts[0] is 1,
# counts[1] is 1, counts[2] is 2, and counts[3] is 2.
# Write a test program that prompts the user to enter a string and displays the
# number of occurrences of each digit in the string.
def getDistinctDigits(s):
lst = []
for x in s:
if x not in lst:
lst.append(x)
return lst
def count(s):
counts = []
lst = getDistinctDigits(s)
for i in range(len(lst)):
n = lst[i]
counts.append(s.count(n))
return counts
s = input("Enter a string: ")
counts = count(s)
s = list(s)
lst = getDistinctDigits(s)
lst2 = [x for x in lst]
lst.sort()
for i in lst:
print(i, "occurs", counts[lst2.index(i)], "times")
| [
"47993441+OmarAlmighty@users.noreply.github.com"
] | 47993441+OmarAlmighty@users.noreply.github.com |
8924c14bbaee7e2b250f6cbb42e3cdb4e97cdd76 | 5e3e7cf49dd6c25770066739699f7e5d7dc0d2c7 | /lstark/lstark/urls.py | 1bb63941e58367d4b7dc52bedc453ff74b5287ce | [] | no_license | veujs/CRM | a2830c9954c11d24fe3f9cb2b74c1d2273ece28e | b2e9219bdae1201dd9ace74b4456141fad2cba4b | refs/heads/master | 2021-05-21T08:55:25.726450 | 2020-04-03T03:51:19 | 2020-04-03T03:51:19 | 252,626,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | """lstark URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"624040034@qq.com"
] | 624040034@qq.com |
4ee7fe861e8995c8351d3a5f977b4130187ac57b | 260133e46c0c88fd20f2ed18309c5f46508b7fb9 | /opengever/api/users.py | 65100d01e8db0b06da8ab5d5773d5f00f43f9c8d | [] | no_license | robertmuehsig/opengever.core | 4180fbea1436fade9b33232a293b0d43ebfc6c51 | 63b3747793d5b824c56eb3659987bb361d25d8d8 | refs/heads/master | 2020-09-08T14:55:00.340222 | 2019-11-08T10:16:02 | 2019-11-08T10:16:02 | 221,163,734 | 0 | 0 | null | 2019-11-12T08:08:59 | 2019-11-12T08:08:54 | null | UTF-8 | Python | false | false | 877 | py | from plone import api
from plone.restapi.services.users.get import UsersGet
class GeverUsersGet(UsersGet):
"""Customize permissions to enumarate and query user information.
By default its protected with `manage portal` permission, but in GEVER all
users should be able to enumarate, query or access user information for
all.
"""
def _has_allowed_role(self):
# We're not able to check for the `View` permission, because also
# anonymous users have the `View` permissions (login form).
current_roles = api.user.get_roles()
for role in ['Member', 'Reader', 'Manager']:
if role in current_roles:
return True
return False
def has_permission_to_query(self):
return self._has_allowed_role()
def has_permission_to_enumerate(self):
return self._has_allowed_role()
| [
"philippe.gross@4teamwork.ch"
] | philippe.gross@4teamwork.ch |
11d20a2a42da4e748a24529582b4d8a926ece82d | be026334d457b1f78050f8262cd693922c6c8579 | /onnxruntime/test/testdata/transform/qdq_conv_gen.py | 9d26b42e820b6f43455e7e6005b854bc59b55d8b | [
"MIT"
] | permissive | ConnectionMaster/onnxruntime | 953c34c6599c9426043a8e5cd2dba05424084e3b | bac9c0eb50ed5f0361f00707dd6434061ef6fcfe | refs/heads/master | 2023-04-05T00:01:50.750871 | 2022-03-16T15:49:42 | 2022-03-16T15:49:42 | 183,019,796 | 1 | 0 | MIT | 2023-04-04T02:03:14 | 2019-04-23T13:21:11 | C++ | UTF-8 | Python | false | false | 2,177 | py | import onnx
from onnx import helper
from onnx import TensorProto
# Generate a basic QDQ Conv model with `num_convs` Conv nodes and their surrounding DQ/Q nodes
def GenerateModel(model_path, num_convs):
nodes = []
initializers = []
inputs = []
outputs = []
for i in range(num_convs):
def name(base):
return f"{base}_{i}"
nodes.extend([
helper.make_node("DequantizeLinear", [name("X"), name("Scale"), name("Zero_point_uint8")], [name("input_DQ")], name("input_DQ")),
helper.make_node("DequantizeLinear", [name("W"), name("Scale"), name("Zero_point_uint8")], [name("conv_weight_DQ")], name("conv_weight_DQ")),
helper.make_node("DequantizeLinear", [name("Bias"), name("Scale"), name("Zero_point_int32")], [name("conv_bias_DQ")], name("conv_bias_DQ")),
helper.make_node("Conv", [name("input_DQ"), name("conv_weight_DQ"), name("conv_bias_DQ")], [name("conv_output")], name("conv")),
helper.make_node("QuantizeLinear", [name("conv_output"), name("Scale"), name("Zero_point_uint8")], [name("Y")], name("output_Q")),
])
initializers.extend([
helper.make_tensor(name('Scale'), TensorProto.FLOAT, [1], [256.0]),
helper.make_tensor(name('Zero_point_uint8'), TensorProto.UINT8, [1], [0]),
helper.make_tensor(name('Zero_point_int32'), TensorProto.INT32, [1], [0]),
helper.make_tensor(name('W'), TensorProto.UINT8, [1, 1, 3, 3], [128] * 9),
helper.make_tensor(name('Bias'), TensorProto.INT32, [1], [64]),
])
inputs.extend([
helper.make_tensor_value_info(name('X'), TensorProto.UINT8, [1, 1, 5, 5]),
])
outputs.extend([
helper.make_tensor_value_info(name('Y'), TensorProto.UINT8, [1, 1, 3, 3]),
])
graph = helper.make_graph(
nodes,
f"QDQ_Conv_x_{num_convs}",
inputs,
outputs,
initializers
)
model = helper.make_model(graph)
onnx.save(model, model_path)
if __name__ == "__main__":
GenerateModel('qdq_conv.onnx', 1)
GenerateModel('runtime_optimization/qdq_convs.onnx', 3)
| [
"noreply@github.com"
] | ConnectionMaster.noreply@github.com |
ce24ba67d8f5e65ecdbe5a03cec6ad5c397e0e81 | 0f502992807cfc5547c5481c20a9e22992bca1e6 | /low5/file/Administrators.py | 490c75e77166f0c2f0c77bdd127745df57aaaed4 | [] | no_license | zpwlow/study | 38ca2dc4f120fa17e7fd211cf3bb740300506b2c | 20c693d67170560070bd079fb1283d13c4882fc2 | refs/heads/master | 2022-12-28T12:38:34.670674 | 2020-10-09T05:29:15 | 2020-10-09T05:29:15 | 302,217,039 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173,670 | py | """
作者:钟培望
名称:具体人工智能沉浸式学习系统管理员端
时间:2020.4.30
"""
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QGridLayout
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from captcha.image import ImageCaptcha
from PyQt5.QtGui import QFont
from PyQt5.QtCore import *
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import *
from PyQt5 import QtGui
from PyQt5.QtWidgets import *
import os, sys, time, re
import glob
import random
import base64
import datetime
import fitz
import sqlite3
import zipfile
import shutil
from PIL import Image
import subprocess
class QUnFrameWindow(QMainWindow):
"""
无边框窗口类
"""
def __init__(self): # 设置界面布局,界面大小,声名控件
super(QUnFrameWindow, self).__init__(None) # 设置为顶级窗口
self.setWindowTitle("low_Administrators")
self.setWindowIcon(QIcon("../datas/logo.ico"))
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.y = self.screenRect.height()
self.x = self.screenRect.width()
self.setMinimumWidth(670)
self.setMinimumHeight(560)
self.resize(self.x, self.y)
self.number = ''
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.bar = self.menuBar()
file = self.bar.addMenu("文件")
logonquit = QAction("退出登录", self)
file.addAction(logonquit)
quit = QAction("退出", self)
file.addAction(quit)
logonquit.triggered.connect(self.logonquit_fun)
quit.triggered.connect(self.close_win)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.horizontalLayout.addWidget(self.splitter)
self.setCentralWidget(self.centralwidget)
self.splitter.addWidget(Record())
#self.splitter.addWidget(Function())
def close_win(self):
rely = QMessageBox.question(self, "提示!", "是否退出程序?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if rely == 65536:
return
self.close()
sys.exit()
def logonquit_fun(self):
rely = QMessageBox.question(self, "提示!", "是否退出登录?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if rely == 65536:
return
self.splitter.widget(0).setParent(None)
self.splitter.addWidget(Record())
class Logon(QFrame):
def __init__(self):
super(Logon, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.usr = QLabel("用户:")
self.usrname = QLabel("用户名:")
self.password1 = QLabel("密码:")
self.password2 = QLabel("确认密码:")
self.usrLine = QLineEdit()
self.usrnameLine = QLineEdit()
self.pwdLineEdit1 = QLineEdit()
self.pwdLineEdit2 = QLineEdit()
self.codeLineEdit = QLineEdit()
self.okBtn = QPushButton("注册")
self.returnBtn = QPushButton("返回")
self.codebel = QLabel()
self.change_code = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(300, 0, 0, 0)
self.usr.setMaximumSize(50, 40)
self.usrname.setMaximumSize(60, 40)
self.password1.setMaximumSize(50, 40)
self.password2.setMaximumSize(80, 40)
# 设置QLabel 的字体颜色,大小,
self.usr.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.usrname.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.password1.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.password2.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.usrLine.setMaximumSize(420, 40)
self.usrnameLine.setMaximumSize(420, 40)
self.pwdLineEdit1.setMaximumSize(420, 40)
self.pwdLineEdit2.setMaximumSize(420, 40)
self.codeLineEdit.setMaximumSize(310, 40)
# self.usrLineEdit2.setText(a)
self.usrLine.setPlaceholderText("请输入手机号码")
self.usrnameLine.setPlaceholderText("请输入您的昵称")
self.pwdLineEdit1.setPlaceholderText("请输入密码")
self.pwdLineEdit2.setPlaceholderText("请重新输入密码")
self.codeLineEdit.setPlaceholderText("请输入右侧的验证码")
self.usrLine.setFont(QFont("宋体", 12)) # 设置QLineEditn 的字体及大小
self.usrnameLine.setFont(QFont("宋体", 12))
self.pwdLineEdit1.setFont(QFont("宋体", 12))
self.pwdLineEdit2.setFont(QFont("宋体", 12))
self.codeLineEdit.setFont(QFont("宋体", 12))
self.pwdLineEdit1.setEchoMode(QLineEdit.Password)
self.pwdLineEdit2.setEchoMode(QLineEdit.Password)
self.okBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:28px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.okBtn.setMaximumSize(420, 40)
self.change_code.setText("<A href='www.baidu.com'>看不清,换一个</a>")
self.change_code.setStyleSheet(
"QLabel{color:rgb(0,0,255);font-size:12px;font-weight:normal;font-family:Arial;}")
self.change_code.setMaximumSize(120, 40)
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnBtn.setMaximumSize(80, 40)
self.codebel.setMaximumSize(100, 40)
self.usrLine.returnPressed.connect(self.enterPress1) # 输入结束后按回车键跳到下一个控件
self.usrnameLine.returnPressed.connect(self.enterPress2)
self.pwdLineEdit1.returnPressed.connect(self.enterPress3)
self.pwdLineEdit2.returnPressed.connect(self.enterPress4)
self.returnBtn.clicked.connect(self.change_record) # 点击返回键连接管理员登录界面
self.okBtn.clicked.connect(self.accept)
self.change_code.linkActivated.connect(self.renovate_code)
self.layout.addWidget(self.returnBtn, 0, 1, 1, 1)
self.layout.addWidget(self.usr, 1, 3, 1, 1)
self.layout.addWidget(self.usrLine, 1, 5, 1, 14)
self.layout.addWidget(self.usrname, 2, 3, 1, 1)
self.layout.addWidget(self.usrnameLine, 2, 5, 1, 14)
self.layout.addWidget(self.password1, 3, 3, 1, 1)
self.layout.addWidget(self.pwdLineEdit1, 3, 5, 1, 14)
self.layout.addWidget(self.password2, 4, 3, 1, 1)
self.layout.addWidget(self.pwdLineEdit2, 4, 5, 1, 14)
self.layout.addWidget(self.codeLineEdit, 5, 5, 1, 5)
self.layout.addWidget(self.codebel, 5, 10, 1, 6)
self.layout.addWidget(self.change_code, 5, 12, 1, 1)
self.layout.addWidget(self.okBtn, 6, 5, 1, 14)
self.renovate_code()
def renovate_code(self): # 生成验证码图片
list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
self.code = ''
for num in range(1, 5):
self.code = self.code + list[random.randint(0, 61)]
image = ImageCaptcha().generate_image(self.code)
image.save("../datas/wen/code.png")
self.codebel.setPixmap(QPixmap("../datas/wen/code.png"))
self.codebel.setScaledContents(True) # 让图片自适应label大小
def checking1(self): # 注册时输入的号码检验是否已经注册过的
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
for variate in c.fetchall():
if variate[0] == self.usrLine.text():
return True
c.close()
conn.close()
return False
def checking2(self): # 注册时输入的号码检验是否已经让管理员批准
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller2")
for variate in c.fetchall():
if variate[0] == self.usrLine.text():
return True
c.close()
conn.close()
return False
def save_data(self): # 登录时密码在数据库中保存过来
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
a = self.usrLine.text()
b = self.usrnameLine.text()
c = self.pwdLineEdit1.text()
conn.execute("INSERT INTO Controller2 VALUES(?,?,?)", (a, b, c))
conn.commit()
conn.close()
def enterPress1(self): # 注册-》用户框回车确定时判断文字框是否有输入
if (len(self.usrLine.text()) == 0):
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLine.setFocus()
elif (len(self.usrLine.text()) != 11):
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLine.setFocus()
elif (self.checking1()):
QMessageBox.about(self, "提示!", "您输入的号码已注册!\n请您登录!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
elif (self.checking2()):
QMessageBox.about(self, "提示!", "您输入的号码正在等待注册批准通过!\n请您耐心等待!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
else:
self.usrnameLine.setFocus()
def enterPress2(self): # 注册-》用户名框回车确定时判断文字框是否有输入
if (len(self.usrnameLine.text()) == 0):
QMessageBox.about(self, "提示!", "用户名不能为空!")
self.usrnameLine.setFocus()
else:
self.pwdLineEdit1.setFocus()
def enterPress3(self): # 注册-》密码框回车确定时判断文字框是否有输入
if (len(self.pwdLineEdit1.text()) == 0):
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit1.setFocus()
else:
self.pwdLineEdit2.setFocus()
def enterPress4(self): # 注册-》确认密码框回车确定时判断文字框是否有输入
if (len(self.pwdLineEdit2.text()) == 0):
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit2.setFocus()
elif (self.pwdLineEdit1.text() != self.pwdLineEdit2.text()):
QMessageBox.about(self, "提示!", "您输入的密码前后不相同!!")
else:
self.codeLineEdit.setFocus()
def accept(self): # 注册时将账号密码保存并登录。
if len(self.usrLine.text()) == 0:
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLine.setFocus()
elif len(self.usrLine.text()) != 11:
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLine.setFocus()
elif (self.checking1()):
QMessageBox.about(self, "提示!", "您输入的号码已注册!\n请您登录!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
elif (self.checking2()):
QMessageBox.about(self, "提示!", "您输入的号码正在等待注册批准通过!\n请您耐心等待!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
elif (len(self.usrnameLine.text()) == 0):
QMessageBox.about(self, "提示!", "用户名不能为空!")
self.usrnameLine.setFocus()
elif len(self.pwdLineEdit1.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit1.setFocus()
elif len(self.pwdLineEdit2.text()) == 0:
QMessageBox.about(self, "提示!", "确认密码不能为空!")
self.pwdLineEdit2.setFocus()
elif self.pwdLineEdit1.text() != self.pwdLineEdit2.text():
QMessageBox.about(self, "提示!", "您输入的密码前后不相同!!")
elif self.code.lower() != self.codeLineEdit.text().lower():
QMessageBox.about(self, "提示!", "验证码输入错误")
self.renovate_code()
self.codeLineEdit.setText("")
self.codeLineEdit.setFocus()
else:
win.number = self.usrLine.text()
self.save_data()
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_informent())
# 连接主窗口界面。
def change_record(self): # 连接用户登录界面
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
class Record(QFrame):
# 用户登录界面
def __init__(self):
super(Record, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.usr = QLabel("用户:")
self.password = QLabel("密码:")
self.usrLineEdit = QLineEdit()
self.pwdLineEdit = QLineEdit()
self.codeLineEdit = QLineEdit()
self.okBtn = QPushButton("登录")
self.codebel = QLabel()
self.change_code = QLabel()
self.forgetbtn = QLabel()
self.logonbtn = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(300, 0, 0, 0)
self.usr.setMaximumSize(60, 60)
# 设置QLabel 的字体颜色,大小,
self.usr.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.password.setMaximumSize(60, 60)
# 设置QLabel 的字体颜色,大小,
self.password.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.usrLineEdit.setPlaceholderText("请输入手机号码")
self.usrLineEdit.setMaximumSize(420, 40)
self.usrLineEdit.setFont(QFont("宋体", 16)) # 设置QLineEditn 的字体及大小
self.pwdLineEdit.setMaximumSize(420, 40)
self.pwdLineEdit.setPlaceholderText("请输入密码")
self.pwdLineEdit.setFont(QFont("宋体", 16))
self.pwdLineEdit.setEchoMode(QLineEdit.Password)
self.okBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:28px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.okBtn.setMaximumSize(420, 40)
self.codeLineEdit.setPlaceholderText("请输入右侧的验证码")
self.codeLineEdit.setFont(QFont("宋体", 16))
self.codeLineEdit.setMaximumSize(310, 40)
self.change_code.setText("<A href='www.baidu.com'>看不清,换一个</a>")
self.change_code.setStyleSheet(
"QLabel{color:rgb(0,0,255);font-size:12px;font-weight:normal;font-family:Arial;}")
self.change_code.setMaximumSize(120, 40)
self.codebel.setMaximumSize(100, 40)
self.forgetbtn.setText("<A href='www.baidu.com'>忘记密码</a>")
self.logonbtn.setText("<A href='www.baidu.com'>注册</a>")
self.forgetbtn.setStyleSheet("QLabel{color:rgb(0,0,255);font-size:20px;font-weight:normal;font-family:Arial;}")
self.logonbtn.setStyleSheet("QLabel{color:rgb(0,0,255);font-size:20px;font-weight:normal;font-family:Arial;}")
self.forgetbtn.setMaximumSize(90, 50)
self.logonbtn.setMaximumSize(50, 50)
self.okBtn.clicked.connect(self.accept)
self.forgetbtn.linkActivated.connect(self.forgetfun) # 连接管理员忘记密码界面
self.logonbtn.linkActivated.connect(self.logonfun) # 连接管理员注册界面
self.usrLineEdit.returnPressed.connect(self.enterPress1) # 输入结束后按回车键跳到下一个控件
self.pwdLineEdit.returnPressed.connect(self.enterPress2)
self.change_code.linkActivated.connect(self.renovate_code)
self.layout.addWidget(self.usr, 1, 3, 1, 1)
self.layout.addWidget(self.usrLineEdit, 1, 4, 1, 14)
self.layout.addWidget(self.password, 2, 3, 1, 1)
self.layout.addWidget(self.pwdLineEdit, 2, 4, 1, 14)
self.layout.addWidget(self.codeLineEdit, 3, 4, 1, 5)
self.layout.addWidget(self.codebel, 3, 9, 1, 6)
self.layout.addWidget(self.change_code, 3, 11, 1, 1)
self.layout.addWidget(self.okBtn, 4, 4, 1, 14)
self.layout.addWidget(self.forgetbtn, 5, 4, 1, 2)
self.layout.addWidget(self.logonbtn, 5, 10, 1, 2)
self.renovate_code()
def renovate_code(self): # 生成验证码图片
list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
self.code = ''
for num in range(1, 5):
self.code = self.code + list[random.randint(0, 61)]
image = ImageCaptcha().generate_image(self.code)
image.save("../datas/wen/code.png")
self.codebel.setPixmap(QPixmap("../datas/wen/code.png"))
self.codebel.setScaledContents(True) # 让图片自适应label大小
def checking1(self): # 登录时检验号码是否没有注册
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
for variate in c.fetchall():
if variate[0] == self.usrLineEdit.text():
return False
c.close()
conn.close()
return True
def checking2(self): # 注册时输入的号码检验是否已经让管理员批准
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller2")
for variate in c.fetchall():
if variate[0] == self.usrLineEdit.text():
return True
c.close()
conn.close()
return False
def enterPress1(self): # 登录回车确定时判断文字框是否有输入
if len(self.usrLineEdit.text()) == 0:
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLineEdit.setFocus()
elif len(self.usrLineEdit.text()) != 11:
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLineEdit.setFocus()
elif (self.checking2()):
QMessageBox.about(self, "提示!", "您输入的号码正在等待注册批准通过!\n请您耐心等待!")
self.usrLineEdit.setText("")
elif (self.checking1()):
QMessageBox.about(self, "提示!", "该账号还未注册!\n请先注册!")
else:
self.pwdLineEdit.setFocus()
def enterPress2(self): # 登录回车确定时判断文字框是否有输入
if len(self.pwdLineEdit.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit.setFocus()
else:
self.codeLineEdit.setFocus()
def accept(self): # 登录时判断密码是否正确
if len(self.usrLineEdit.text()) == 0:
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLineEdit.setFocus()
elif len(self.usrLineEdit.text()) != 11:
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLineEdit.setFocus()
elif (self.checking2()):
QMessageBox.about(self, "提示!", "您输入的号码正在等待注册批准通过!\n请您耐心等待!")
self.usrLineEdit.setText("")
elif (self.checking1()):
QMessageBox.about(self, "提示!", "该账号还未注册!\n请先注册!")
elif len(self.pwdLineEdit.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit.setFocus()
elif self.code.lower() != self.codeLineEdit.text().lower():
QMessageBox.about(self, "提示!", "验证码输入错误")
self.renovate_code()
self.codeLineEdit.setText("")
self.codeLineEdit.setFocus()
else:
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
d = 0
for variate in c.fetchall():
if variate[0] == self.usrLineEdit.text() and variate[2] == self.pwdLineEdit.text():
d = 1
break
c.close()
conn.close()
if d == 1: # 连接主界面函数
win.number = self.usrLineEdit.text()
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Function())
else:
QMessageBox.about(self, "提示!", "账号或密码输入错误")
def forgetfun(self): # 连接超级管理员忘记密码界面
win.splitter.widget(0).setParent(None)
Forget().renovate_code()
win.splitter.insertWidget(0, Forget())
def logonfun(self): # 连接超级管理员注册界面
win.splitter.widget(0).setParent(None)
Logon().renovate_code()
win.splitter.insertWidget(0, Logon())
# 用户忘记密码
class Forget(QFrame):
def __init__(self):
super(Forget, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.usr2 = QLabel("用户:")
self.pwd2 = QLabel("密码:")
self.pwd3 = QLabel("确认密码:")
self.usrLineEdit2 = QLineEdit()
self.pwdLineEdit2 = QLineEdit()
self.pwdLineEdit3 = QLineEdit()
self.codeLineEdit1 = QLineEdit()
self.okBtn1 = QPushButton("确认")
self.returnBtn = QPushButton("返回")
self.codebel = QLabel()
self.change_code = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(300, 0, 0, 0)
self.usr2.setMaximumSize(50, 40)
self.pwd2.setMaximumSize(50, 40)
self.pwd3.setMaximumSize(80, 40)
# 设置QLabel 的字体颜色,大小,
self.usr2.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.pwd2.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.pwd3.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.usrLineEdit2.setMaximumSize(420, 40)
self.pwdLineEdit2.setMaximumSize(420, 40)
self.pwdLineEdit3.setMaximumSize(420, 40)
self.codeLineEdit1.setMaximumSize(310, 40)
self.usrLineEdit2.setPlaceholderText("请输入手机号码")
self.pwdLineEdit2.setPlaceholderText("请输入新的密码")
self.pwdLineEdit3.setPlaceholderText("请重新输入新的密码")
self.codeLineEdit1.setPlaceholderText("请输入右侧的验证码")
self.usrLineEdit2.setFont(QFont("宋体", 12)) # 设置QLineEditn 的字体及大小
self.pwdLineEdit2.setFont(QFont("宋体", 12))
self.pwdLineEdit3.setFont(QFont("宋体", 12))
self.codeLineEdit1.setFont(QFont("宋体", 12))
self.pwdLineEdit2.setEchoMode(QLineEdit.Password)
self.pwdLineEdit3.setEchoMode(QLineEdit.Password)
self.okBtn1.setStyleSheet("QPushButton{ font-family:'宋体';font-size:28px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.okBtn1.setMaximumSize(420, 40)
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.change_code.setText("<A href='www.baidu.com'>看不清,换一个</a>")
self.change_code.setStyleSheet(
"QLabel{color:rgb(0,0,255);font-size:12px;font-weight:normal;font-family:Arial;}")
self.change_code.setMaximumSize(120, 40)
self.returnBtn.setMaximumSize(80, 40)
self.codebel.setMaximumSize(100, 40)
self.okBtn1.clicked.connect(self.accept)
self.usrLineEdit2.returnPressed.connect(self.enterPress1) # 用户输入框按回车判断
self.pwdLineEdit2.returnPressed.connect(self.enterPress2) # 密码输入框按回车判断
self.pwdLineEdit3.returnPressed.connect(self.enterPress3) # 确认密码输入框回车判断
self.returnBtn.clicked.connect(self.return_record)
self.change_code.linkActivated.connect(self.renovate_code)
self.layout.addWidget(self.returnBtn, 0, 1, 1, 1)
self.layout.addWidget(self.usr2, 1, 3, 1, 1)
self.layout.addWidget(self.usrLineEdit2, 1, 5, 1, 14)
self.layout.addWidget(self.pwd2, 2, 3, 1, 1)
self.layout.addWidget(self.pwdLineEdit2, 2, 5, 1, 14)
self.layout.addWidget(self.pwd3, 3, 3, 1, 1)
self.layout.addWidget(self.pwdLineEdit3, 3, 5, 1, 14)
self.layout.addWidget(self.codeLineEdit1, 4, 5, 1, 5)
self.layout.addWidget(self.codebel, 4, 10, 1, 6)
self.layout.addWidget(self.okBtn1, 5, 5, 1, 14)
self.renovate_code()
def return_record(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
def renovate_code(self): # 生成验证码图片
list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u',
'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U',
'V', 'W', 'X', 'Y', 'Z']
self.code = ''
for num in range(1, 5):
self.code = self.code + list[random.randint(0, 61)]
image = ImageCaptcha().generate_image(self.code)
image.save("../datas/wen/code.png")
self.codebel.setPixmap(QPixmap("../datas/wen/code.png"))
self.codebel.setScaledContents(True) # 让图片自适应label大小
def checking1(self): # 忘记密码时检验号码是否没有注册
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
for variate in c.fetchall():
if variate[0] == self.usrLineEdit2.text():
return False
c.close()
conn.close()
return True
def savedate(self): # 忘记密码时将新的密码在数据库中修改过来
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
for variate in c.fetchall():
if variate[0] == self.usrLineEdit2.text():
win.number = variate[0]
conn.execute("update Controller set password=(?) where number=(?)",
(self.pwdLineEdit2.text(), variate[0],))
break
conn.commit()
c.close()
conn.close()
def enterPress1(self): # 忘记密码时回车确定时判断文字框是否有输入
if len(self.usrLineEdit2.text()) == 0:
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLineEdit2.setFocus()
elif len(self.usrLineEdit2.text()) != 11:
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLineEdit2.setFocus()
elif (self.checking1()):
QMessageBox.about(self, "提示!", "该账号还未注册!\n请先注册!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Logon())
else:
self.pwdLineEdit2.setFocus()
def enterPress2(self): # 忘记密码-》密码框回车确定时判断文字框是否有输入
if len(self.pwdLineEdit2.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit2.setFocus()
else:
self.pwdLineEdit3.setFocus()
def enterPress3(self): # 忘记密码-》确认密码框回车确定时判断文字框是否有输入
if len(self.pwdLineEdit3.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit3.setFocus()
elif self.pwdLineEdit2.text() != self.pwdLineEdit3.text():
QMessageBox.about(self, "提示!", "您输入的密码前后不相同!!")
else:
self.codeLineEdit1.setFocus()
def accept(self): # 忘记密码时验证是否可以登录
if len(self.usrLineEdit2.text()) == 0:
QMessageBox.about(self, "提示!", "号码不能为空!")
self.usrLineEdit2.setFocus()
elif len(self.usrLineEdit2.text()) != 11:
QMessageBox.about(self, "提示!", "您输入的号码是错误的!\n请重新输入")
self.usrLineEdit2.setFocus()
elif len(self.pwdLineEdit2.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit2.setFocus()
elif len(self.pwdLineEdit3.text()) == 0:
QMessageBox.about(self, "提示!", "密码不能为空!")
self.pwdLineEdit3.setFocus()
elif self.pwdLineEdit2.text() != self.pwdLineEdit3.text():
QMessageBox.about(self, "提示!", "您输入的密码前后不相同!!")
elif self.code.lower() != self.codeLineEdit1.text().lower():
QMessageBox.about(self, "提示!", "验证码输入错误")
self.renovate_code()
self.codeLineEdit1.setText("")
self.codeLineEdit1.setFocus()
else:
self.savedate()
# 设置一个查询用户年级的函数
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Function()) # 连接主窗口界面。
# 管理员信息填写
class Controller_informent(QFrame):
def __init__(self):
super(Controller_informent, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.sure = QPushButton("确认")
self.chang_image = QPushButton("换头像")
self.name = QLabel("姓名:")
self.year = QLabel("出生年月")
self.yearcb = QComboBox()
self.monthcb = QComboBox()
self.sex = QLabel("性别:")
self.sexcb = QComboBox()
self.school = QLabel("学校:")
self.nameEdit = QLineEdit()
self.tupian = QLabel()
self.schoolEiit = QLineEdit()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(100, 0, 0, 0)
yearnb = []
for i in range(1960, 2005):
yearnb.append(str(i))
monthmb = []
for i in range(1, 13):
monthmb.append(str(i))
self.sex.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.chang_image.setStyleSheet(
"QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.school.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.name.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.year.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.nameEdit.setPlaceholderText("请输入姓名")
self.schoolEiit.setPlaceholderText("请输入学校名称")
self.nameEdit.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.schoolEiit.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.name.setMaximumSize(50, 40)
self.chang_image.setMaximumSize(90, 40)
self.school.setMaximumSize(50, 40)
self.year.setMaximumSize(95, 40)
self.sex.setMaximumSize(50, 40)
self.nameEdit.setMaximumSize(420, 40)
self.schoolEiit.setMaximumSize(420, 40)
self.sure.setMaximumSize(420, 40)
self.sexcb.setMaximumSize(420, 40)
self.yearcb.setMaximumSize(220, 40)
self.monthcb.setMaximumSize(175, 40)
self.tupian.setMaximumSize(250, 250)
self.sexcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.yearcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.monthcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.sexcb.addItems(['男', '女'])
self.yearcb.addItems(yearnb)
self.monthcb.addItems(monthmb)
self.layout.addWidget(self.tupian, 1, 1, 4, 4)
self.layout.addWidget(self.chang_image, 4, 2, 1, 2)
self.layout.addWidget(self.name, 1, 6, 1, 1)
self.layout.addWidget(self.nameEdit, 1, 8, 1, 8)
self.layout.addWidget(self.sex, 2, 6, 1, 1)
self.layout.addWidget(self.sexcb, 2, 8, 1, 8)
self.layout.addWidget(self.year, 3, 6, 1, 1)
self.layout.addWidget(self.yearcb, 3, 8, 1, 4)
self.layout.addWidget(self.monthcb, 3, 11, 1, 7)
self.layout.addWidget(self.school, 4, 6, 1, 1)
self.layout.addWidget(self.schoolEiit, 4, 8, 1, 8)
self.layout.addWidget(self.sure, 6, 8, 1, 8)
self.image()
self.sure.clicked.connect(self.connect_fun)
self.chang_image.clicked.connect(self.chang_fun)
def image(self):
self.image_path = "../datas/image/a7.jpeg"
self.file = os.path.splitext(self.image_path)[1]
self.tupian.setPixmap(QPixmap(self.image_path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
QApplication.processEvents()
def chang_fun(self):
path, _ = QFileDialog.getOpenFileName(self, '请选择文件',
'/', 'image(*.jpg)')
if path:
self.image_path = path
self.file = os.path.splitext(self.image_path)[1]
self.tupian.setPixmap(QPixmap(self.image_path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
else:
self.image()
def save_data(self):
a = self.nameEdit.text()
b = self.yearcb.currentText() + '-' + self.monthcb.currentText()
c = self.sexcb.currentText()
d = self.schoolEiit.text()
with open(self.image_path, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
conn.execute("insert into Controller_image2 values(?,?,?)", (win.number, total, self.file,))
conn.commit()
conn.execute("INSERT INTO Controller_data2 VALUES(?,?,?,?,?)", (win.number, a, b, c, d,))
conn.commit()
conn.close()
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
try: # 文件信息表 序号 课程号 课程名 文件名 文件后缀
c.execute('''CREATE TABLE Filename(no text,Cno text,Cname text,name text,filename1 text,filename2 text)''')
except:
pass
try:
c.execute('''CREATE TABLE fileimage(no text,total LONGBLOB )''')
except:
pass
try:
c.execute('''CREATE TABLE Filedate(no text,total LONGBLOB )''')
except:
pass
try: # 文件信息表 序号 课程号 课程名 文件名 答案 文件后缀
c.execute('''CREATE TABLE Filename2(no text,Cno text,Cname text,name text,answer text,filename1 text)''')
except:
pass
try:
c.execute('''CREATE TABLE Filedate2(no text,total LONGBLOB )''')
except:
pass
c.close()
conn.close()
def connect_fun(self):
if len(self.nameEdit.text()) == 0:
QMessageBox.about(self, "提示!", "姓名框不能为空!!")
self.nameEdit.setFocus()
if len(self.schoolEiit.text()) == 0:
QMessageBox.about(self, "提示!", "学校框不能为空!!")
self.schoolEiit.setFocus()
else:
self.save_data()
QMessageBox.about(self, "提示!", "等待超级管理员的验证通过后再登录!!\n请您耐心等待!!")
time.sleep(2)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Record())
class Function(QFrame): # 超级管理员功能界面
def __init__(self):
super(Function, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.mainbutton1 = QPushButton("班级信息") # 用户功能界面的控件
self.mainbutton2 = QPushButton("统计信息")
self.mainbutton3 = QPushButton("我的")
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
b = self.screenRect.height() * 1.0 / 4
a = self.screenRect.width() * 1.0 / 5
self.mainbutton1.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.mainbutton2.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.mainbutton3.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.mainbutton1.clicked.connect(self.select_fun1)
self.mainbutton2.clicked.connect(self.select_fun2)
self.mainbutton3.clicked.connect(self.select_fun3)
self.layout.addWidget(self.mainbutton1, 0, 0) # 往网格的不同坐标添加不同的组件
self.layout.addWidget(self.mainbutton2, 0, 1)
self.layout.addWidget(self.mainbutton3,0,2)
self.mainbutton1.setMaximumSize(a, b)
self.mainbutton2.setMaximumSize(a, b)
self.mainbutton3.setMaximumSize(a,b)
def select_fun1(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Class_news())
def select_fun2(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Statistics_news())
def select_fun3(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0,Controller_myself())
class Class_news(QFrame):
def __init__(self):
super(Class_news, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.returnbut = QPushButton("返回")
self.addcourse = QPushButton("添加课程")
self.lab = QLabel()
self.add = AddCourse()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.qtool = QToolBox()
self.qtool.setStyleSheet("QToolBox{background:rgb(150,140,150);font-weight:Bold;color:rgb(0,0,0);}")
self.window = Coursewindow(self)
self.qtool.addItem(self.window, '我的课程')
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addcourse.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.setMaximumSize(100, 40)
self.addcourse.setMaximumSize(100, 40)
self.lab.setMaximumSize(200, 40)
self.returnbut.clicked.connect(self.returnfun)
self.addcourse.clicked.connect(self.addfun)
self.layout.addWidget(self.returnbut, 0, 0, 1, 2)
self.layout.addWidget(self.addcourse, 0, 17, 1, 2)
self.layout.addWidget(self.lab, 1, 1, 1, 7)
self.layout.addWidget(self.qtool, 2, 1, 8, 17)
def returnfun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Function())
def addfun(self):
self.add.nameEdit.setText('')
self.add.image()
# 接受子窗口传回来的信号 然后调用主界面的函数
self.add.my_Signal.connect(self.changfun)
self.add.show()
def changfun(self):
self.qtool.removeItem(0)
self.window = Coursewindow(self)
self.qtool.addItem(self.window, '我的课程')
def clicked(self, data):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Course_news(data))
class CustomWidget(QWidget):
def __init__(self, data):
super(CustomWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.imagelab = QLabel()
self.namelab = QLabel(data[1])
self.courselab = QLabel("课程编号:")
self.numlab = QLabel("人数:")
self.courselab2 = QLabel(data[0])
self.numlab2 = QLabel(str(data[2]))
self.image_path = "../datas/image/image" + data[4]
total = base64.b64decode(data[3])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.numlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.courselab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.courselab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.numlab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.imagelab.setMaximumSize(150, 150)
self.namelab.setMaximumSize(400, 80)
self.courselab.setMaximumSize(80, 40)
self.numlab.setMaximumSize(80, 40)
self.courselab2.setMaximumSize(100, 40)
self.numlab2.setMaximumSize(100, 40)
self.imagelab.setPixmap(QPixmap(self.image_path))
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.layout.addWidget(self.imagelab, 0, 0, 4, 4)
self.layout.addWidget(self.namelab, 1, 4, 3, 4)
self.layout.addWidget(self.courselab, 1, 8, 1, 1)
self.layout.addWidget(self.numlab, 3, 8, 1, 1)
self.layout.addWidget(self.courselab2, 1, 9, 1, 2)
self.layout.addWidget(self.numlab2, 3, 9, 1, 2)
class Coursewindow(QListWidget):
def __init__(self, dow):
super(Coursewindow, self).__init__()
self.dow = dow
self.doubleClicked.connect(self.opencourse)
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select Course.Cno,name,numble,total,filename \
from Course,Course_image,Teacher_Course \
where Course.Cno=Course_image.Cno and Course.Cno=Teacher_Course.Cno \
and number=(?)", (win.number,))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, CustomWidget(data))
def contextMenuEvent(self, event):
hitIndex = self.indexAt(event.pos()).column()
if hitIndex > -1:
pmenu = QMenu(self)
pDeleteAct = QAction("删除", pmenu)
pmenu.addAction(pDeleteAct)
pDeleteAct.triggered.connect(self.deleteItemSlot)
pmenu.popup(self.mapToGlobal(event.pos()))
def deleteItemSlot(self):
index = self.currentIndex().row()
if index > -1:
rely = QMessageBox.question(self, "提示!", "该操作会删除整个课程的数据\n请问是否继续?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if rely == 65536:
return
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("delete from Course where Cno=(?)",(self.datas[index][0]))
c.execute("delete from Course_image where Cno=(?)", (self.datas[index][0]))
c.execute("delete from Teacher_Course where Cno=(?)", (self.datas[index][0]))
c.execute("delete from Join_Course where Cno=(?)", (self.datas[index][0]))
conn.commit()
c.close()
conn.close()
item = self.takeItem(index)
# 删除widget
self.removeItemWidget(item)
del item
QMessageBox.about(self, "提示", '课程删除成功!!')
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:2]
self.dow.clicked(da)
class Course_news(QFrame):
def __init__(self, data):
super(Course_news, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.data = data
self.returnbut = QPushButton("返回")
self.addcufile = QPushButton("添加课件")
self.addexfile = QPushButton("添加练习")
self.lab = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.qtool = QToolBox()
self.qtool.setStyleSheet("QToolBox{background:rgb(150,140,150);font-weight:Bold;color:rgb(0,0,0);}")
self.window1 = CoursecuQlist(self, self.data)
self.window2 = CourseexQlist(self, self.data)
self.qtool.addItem(self.window1, self.data[1]+" 课件")
self.qtool.addItem(self.window2, self.data[1] + " 练习")
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addcufile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addexfile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.setMaximumSize(100, 40)
self.addcufile.setMaximumSize(100, 40)
self.addexfile.setMaximumSize(100,40)
self.lab.setMaximumSize(200, 40)
self.returnbut.clicked.connect(self.returnfun)
self.addcufile.clicked.connect(self.addcufun)
self.addexfile.clicked.connect(self.addexfun)
self.layout.addWidget(self.returnbut, 0, 0, 1, 2)
self.layout.addWidget(self.addcufile,0,15,1,2)
self.layout.addWidget(self.addexfile, 0, 17, 1, 2)
self.layout.addWidget(self.lab, 1, 1, 1, 7)
self.layout.addWidget(self.qtool, 2, 1, 8, 17)
def returnfun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Class_news())
def addcufun(self):
addcufile = Addcufile(self.data)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, addcufile)
def addexfun(self):
addexfile = Addexfile(self.data)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, addexfile)
def clicked(self):
self.max = max_widget()
self.max.show()
def clicked2(self,data,answer):
self.add = Addexfilewin2(data,answer)
self.add.show()
#课件的item 设计
class CoursecuWidget(QWidget):
def __init__(self, data):
super(CoursecuWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.imagelab = QLabel()
self.namelab = QLabel(data[1])
self.image_path = "../datas/image/image" + data[3]
total = base64.b64decode(data[2])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.imagelab.setMaximumSize(150, 150)
self.namelab.setMaximumSize(800, 80)
self.imagelab.setPixmap(QPixmap(self.image_path))
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.layout.addWidget(self.imagelab, 0, 0, 4, 4)
self.layout.addWidget(self.namelab, 1, 4, 3, 4)
#课件的QList
class CoursecuQlist(QListWidget):
def __init__(self, dow, data):
super(CoursecuQlist, self).__init__()
self.dow = dow
self.doubleClicked.connect(self.opencourse)
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select Filename.no,name,total,filename2 from \
Filename,Fileimage where Filename.no = Fileimage.no \
and Cno=(?) ", (data[0],))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, CoursecuWidget(data))
def contextMenuEvent(self, event):
hitIndex = self.indexAt(event.pos()).column()
if hitIndex > -1:
pmenu = QMenu(self)
pDeleteAct = QAction("删除", pmenu)
pmenu.addAction(pDeleteAct)
pDeleteAct.triggered.connect(self.deleteItemSlot)
pmenu.popup(self.mapToGlobal(event.pos()))
def deleteItemSlot(self):
index = self.currentIndex().row()
if index > -1:
rely = QMessageBox.question(self, "提示!", "该操作会造成数据完全删除无法恢复\n请问是否继续?",
QMessageBox.Yes | QMessageBox.No,QMessageBox.Yes)
if rely == 65536:
return
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("delete from Filename where no=(?)",(self.datas[index][0],))
c.execute("delete from Fileimage where no=(?)", (self.datas[index][0],))
c.execute("delete from Filedate where no=(?)", (self.datas[index][0],))
conn.commit()
c.close()
conn.close()
item = self.takeItem(index)
# 删除widget
self.removeItemWidget(item)
del item
QMessageBox.about(self, "提示", '文件删除成功!!')
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:2]
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select Cname,name,total,filename1 from \
Filename,Filedate where Filename.no= Filedate.no \
and Filename.no=(?)",(da[0],))
filedata = c.fetchall()[0]
zip_path = '../datas/'+filedata[0]
if (not (os.path.exists(zip_path))): # 创建文件夹。
os.makedirs(zip_path)
zip_path = zip_path +'/'+filedata[1]+filedata[3]
total = base64.b64decode(filedata[2])
f = open(zip_path, 'wb')
f.write(total)
f.close()
self.zip_to_files(zip_path)
self.dow.clicked()
def zip_to_files(self, zippath): # 将压缩包解压
path = '../datas/tupian'
if (os.path.isdir(path)): # 判断文件夹是否存在
fileNames = glob.glob(path + r'/*')
if fileNames:
for fileName in fileNames: # 将pa 文件夹中的文件删除。
os.remove(fileName)
else:
os.mkdir(path)
zf = zipfile.ZipFile(zippath)
for fn in zf.namelist(): # 循环压缩包中的文件并保存进新文件夹。
#right_fn = fn.replace('\\\\', '_').replace('\\', '_').replace('//', '_').replace('/', '_') # 将文件名正确编码
right_fn = fn.encode('cp437').decode('gbk') # 将文件名正确编码
right_fn = path + '/' + right_fn
with open(right_fn, 'wb') as output_file: # 创建并打开新文件
with zf.open(fn, 'r') as origin_file: # 打开原文件
shutil.copyfileobj(origin_file, output_file) # 将原文件内容复制到新文件
zf.close()
os.remove(zippath)
#管理员播放课件
class max_widget(QWidget):
def __init__(self):
super(max_widget, self).__init__()
self.pa = '../datas/tupian'
self.fileNames = glob.glob(self.pa + r'/*')
self.a = 1
self.setWindowFlags(Qt.FramelessWindowHint) # 无边框
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint | QtCore.Qt.Tool)
self.setWindowModality(QtCore.Qt.ApplicationModal) # 窗口置顶,父窗口不可操作
self.desktop = QApplication.desktop()
# 获取显示器分辨率大小
self.screenRect = self.desktop.screenGeometry()
self.height1 = self.screenRect.height()
self.width1 = self.screenRect.width()
self.resize(self.width1, self.height1)
self.setMouseTracking(True) # 设置widget鼠标跟踪
self.lab2 = QtWidgets.QLabel(self)
self.lab2.resize(self.width1, self.height1)
#self.MaximumButton1 = QPushButton(self)
#self.MaximumButton1.resize(10, 10)
#self.MaximumButton1.setStyleSheet("QPushButton{background-color:rgb(255,255, 255)}\
# QPushButton:hover{background-color:rgb(50, 10, 50)} ")
#self.MaximumButton1.move(24, 24)
#self.MaximumButton1.clicked.connect(self.closewin)
self.lab2.setMouseTracking(True) # 设置widget鼠标跟踪
pa1 = self.fileNames[self.a-1]
pa2 = self.pa + "/image" + str(self.a) + ".jpeg"
img = Image.open(pa1) # 将图片改变分辨率为self.lab窗口大小
out = img.resize((self.width1, self.height1), Image.ANTIALIAS)
out.save(pa2, 'jpeg')
pixmap = QPixmap(pa2) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.lab2.setPixmap(pixmap) # 在label上显示图片
# self.lab2.setScaledContents (True) # 让图片自适应label大小
def mousePressEvent(self, event): # 重写鼠标点击的事件
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.y = self.screenRect.height()
self.x = self.screenRect.width()
if (event.button() == Qt.LeftButton) and (event.pos().x() < self.x / 2):
self.cut_images()
if (event.button() == Qt.LeftButton) and (event.pos().x() > self.x / 2):
self.add_images()
def add_images(self): # 下一页ppt
self.a = self.a + 1
try:
pa1 = self.fileNames[self.a-1]
pa2 = self.pa + "/image" + str(self.a) + ".jpeg"
img = Image.open(pa1) # 将图片改变分辨率为self.lab窗口大小
out = img.resize((self.width1, self.height1), Image.ANTIALIAS)
out.save(pa2, 'jpeg')
pixmap = QPixmap(pa2) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.lab2.setPixmap(pixmap)
except:
self.a = self.a - 1
QMessageBox.about(self, "提示!", "这是最后一页")
def cut_images(self): # 上一页ppt
self.a = self.a - 1
pa1 = self.fileNames[self.a-1]
pa2 = self.pa + "/image" + str(self.a) + ".jpeg"
if self.a == 0:
self.a = self.a + 1
QMessageBox.about(self, "提示!", "这是第一页")
else:
img = Image.open(pa1) # 将图片改变分辨率为self.lab窗口大小
out = img.resize((self.width1, self.height1), Image.ANTIALIAS)
out.save(pa2, 'jpeg')
pixmap = QPixmap(pa2) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.lab2.setPixmap(pixmap)
#练习的item 设计
class CourseexWidget(QWidget):
def __init__(self, data):
super(CourseexWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.namelab = QLabel(data[1])
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.namelab.setMaximumSize(800, 60)
self.layout.addWidget(self.namelab, 1, 1, 1, 1)
#练习的QList
class CourseexQlist(QListWidget):
def __init__(self, dow, data):
super(CourseexQlist, self).__init__()
self.dow = dow
self.doubleClicked.connect(self.opencourse)
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select no,name from Filename2 where Cno=(?) ", (data[0],))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 80))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, CourseexWidget(data))
def contextMenuEvent(self, event):
hitIndex = self.indexAt(event.pos()).column()
if hitIndex > -1:
pmenu = QMenu(self)
pDeleteAct = QAction("删除", pmenu)
pmenu.addAction(pDeleteAct)
pDeleteAct.triggered.connect(self.deleteItemSlot)
pmenu.popup(self.mapToGlobal(event.pos()))
def deleteItemSlot(self):
index = self.currentIndex().row()
if index > -1:
rely = QMessageBox.question(self, "提示!", "该操作会造成数据完全删除无法恢复\n请问是否继续?",
QMessageBox.Yes | QMessageBox.No,QMessageBox.Yes)
if rely == 65536:
return
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("delete from Filename2 where no=(?)",(self.datas[index][0],))
c.execute("delete from Filedate2 where no=(?)", (self.datas[index][0],))
conn.commit()
c.close()
conn.close()
item = self.takeItem(index)
# 删除widget
self.removeItemWidget(item)
del item
QMessageBox.about(self, "提示", '文件删除成功!!')
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:2]
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select Cname,name,answer,total,filename1 from \
Filename2,Filedate2 where Filename2.no= Filedate2.no \
and Filename2.no=(?)",(da[0],))
filedata = c.fetchall()[0]
zip_path = '../datas/'+filedata[0]
if (not (os.path.exists(zip_path))): # 创建文件夹。
os.makedirs(zip_path)
zip_path = zip_path +'/'+filedata[1]+filedata[4]
total = base64.b64decode(filedata[3])
f = open(zip_path, 'wb')
f.write(total)
f.close()
self.zip_to_files(zip_path)
self.dow.clicked2(da[0],filedata[2])
def zip_to_files(self, zippath): # 将压缩包解压
path = '../datas/tupian'
if (os.path.isdir(path)): # 判断文件夹是否存在
fileNames = glob.glob(path + r'/*')
if fileNames:
for fileName in fileNames: # 将pa 文件夹中的文件删除。
os.remove(fileName)
else:
os.mkdir(path)
zf = zipfile.ZipFile(zippath)
for fn in zf.namelist(): # 循环压缩包中的文件并保存进新文件夹。
#right_fn = fn.replace('\\\\', '_').replace('\\', '_').replace('//', '_').replace('/', '_') # 将文件名正确编码
right_fn = fn.encode('cp437').decode('gbk') # 将文件名正确编码
right_fn = path + '/' + right_fn
with open(right_fn, 'wb') as output_file: # 创建并打开新文件
with zf.open(fn, 'r') as origin_file: # 打开原文件
shutil.copyfileobj(origin_file, output_file) # 将原文件内容复制到新文件
zf.close()
os.remove(zippath)
#添加课件
class Addcufile(QFrame):
def __init__(self, data):
super(Addcufile, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.data = data
self.returnbut = QPushButton("返回")
self.addfile = QPushButton("添加文件")
self.addmufile = QPushButton("添加目录")
self.addsystem = QPushButton("从系统添加")
self.devise_ui()
def devise_ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
b = self.screenRect.height() * 1.0 / 4
a = self.screenRect.width() * 1.0 / 5
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addfile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addmufile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addsystem.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.clicked.connect(self.returnfun)
self.addfile.clicked.connect(self.select_fun1)
self.addmufile.clicked.connect(self.select_fun2)
self.addsystem.clicked.connect(self.select_fun3)
self.layout.addWidget(self.returnbut, 0, 0) # 往网格的不同坐标添加不同的组件
self.layout.addWidget(self.addfile,0 , 1)
self.layout.addWidget(self.addmufile, 1, 0)
self.layout.addWidget(self.addsystem,1,1)
self.returnbut.setMaximumSize(a, b)
self.addfile.setMaximumSize(a, b)
self.addmufile.setMaximumSize(a, b)
self.addsystem.setMaximumSize(a,b)
def returnfun(self):
dow = Course_news(self.data)
#dow.changfun()
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0,dow)
def select_fun1(self):
path, _ = QFileDialog.getOpenFileName(self, '请选择文件',
'/', 'ppt(*.ppt *.pptx);;)')
if not path:
QMessageBox.about(self, "提示", '您没有选择任何文件!!')
return
end_file = os.path.splitext(path)[1]
file = os.path.split(path)[1][:-len(end_file)]
file1 = '../datas/tupian'
fileNames = glob.glob(file1 + r'/*')
if fileNames:
for fileName in fileNames:
os.remove(fileName)# 将pa 文件夹中的文件删除。
if end_file == '.ppt' or end_file == '.pptx':
self.ppt_to_pdf("../datas/wen/", path)
pdf_path = "../datas/wen/" + file + '.pdf'
self.pdf_to_image(pdf_path, file1)
os.remove(pdf_path)
self.file_to_zip(file1)
zip_file = file1 + '.zip'
with open(zip_file, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
with open(file1 + '/image' + '1.jpg', "rb") as f:
total2 = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
self.save_date(self.data, file, total, total2, '.zip', '.jpg')
QMessageBox.about(self,"提示",'添加文件成功!!')
else:
QMessageBox.about(self, "提示", '添加文件失败!!')
def select_fun2(self):
fname = QFileDialog.getExistingDirectory(self, 'open file', '/')
if fname:
files = glob.glob(fname + r'/*')
if files:
for path in files:
end_file = os.path.splitext(path)[1]
file = os.path.split(path)[1][:-len(end_file)]
file1 = '../datas/tupian'
fileNames = glob.glob(file1 + r'/*')
if fileNames:
for fileName in fileNames:
# 将pa 文件夹中的文件删除。
os.remove(fileName)
if end_file == '.ppt' or end_file == '.pptx':
self.ppt_to_pdf("../datas/wen/", path)
pdf_path = "../datas/wen/" + file + '.pdf'
self.pdf_to_image(pdf_path, file1)
os.remove(pdf_path)
self.file_to_zip(file1)
zip_file = file1 + '.zip'
with open(zip_file, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
with open(file1 + '/image' + '1.jpg', "rb") as f:
total2 = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
self.save_date(self.data, file, total, total2, '.zip', '.jpg')
QMessageBox.about(self, "提示", '添加文件成功!!')
else:
QMessageBox.about(self, "提示", '该目录没有任何文件!!')
else:
QMessageBox.about(self, "提示", '您没有选择任何文件!!')
def select_fun3(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0,Add_System(self.data) )
def save_date(self,data,file,total,total2,filename1,filename2):
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Filename")
no = len(c.fetchall())
c.execute("insert into Filename VALUES(?,?,?,?,?,?)",('C'+str(no),data[0],data[1],file,filename1,filename2))
c.execute("insert into Fileimage values(?,?)",('C'+str(no),total2))
c.execute("insert into Filedate values(?,?)",('C'+str(no),total))
conn.commit()
c.close()
conn.close()
def pdf_to_image(self, pdf_path, file1):
pdf = fitz.open(pdf_path)
for pg in range(pdf.pageCount):
page = pdf.loadPage(pg) # 使用循环将所有转换为图片。
pagePixmap = page.getPixmap()
# 获取 image 格式
imageFormat = QtGui.QImage.Format_RGB888
# 生成 QImage 对象
pageQImage = QtGui.QImage(pagePixmap.samples, pagePixmap.width, pagePixmap.height, pagePixmap.stride,
imageFormat)
pageQImage.save(file1 + '/image' + '%s.jpg' % (pg + 1))
pdf.close()
def ppt_to_pdf(self, outfile, infile, timeout=None):
"""将ppt 转换为pdf
函数说明:将路径为infile的ppt文件转换为pdf,保存进路径为outfile的pdf文件.
参数: outfile(str):保存文件pdf 的路径.
参数: infile(str):ppt文件的路径.
参数: timeout:转换文件时的时间延迟.
"""
args = ['libreoffice', '--headless', '--convert-to', 'pdf', '--outdir', outfile, infile]
process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout)
re.search('-> (.*?) using filter', process.stdout.decode())
def word_to_pdf(self, outfile, infile, timeout=None):
"""将ppt 转换为pdf
函数说明:将路径为infile的ppt文件转换为pdf,保存进路径为outfile的pdf文件.
参数: outfile(str):保存文件pdf 的路径.
参数: infile(str):ppt文件的路径.
参数: timeout:转换文件时的时间延迟.
"""
args = ['libreoffice', '--headless', '--convert-to', 'pdf', '--outdir', outfile, infile]
process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout)
re.search('-> (.*?) using filter', process.stdout.decode())
def file_to_zip(self, path): # 将文件夹压缩为压缩包。
filepath = path + '.zip'
if os.path.exists(filepath):
os.remove(filepath)
z = zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(path):
fpath = dirpath.replace(path, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
z.close()
#添加系统课件
class Add_System(QFrame):
def __init__(self, data):
super(Add_System, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.data = data
self.location = Select_location(self)
self.returnbut = QPushButton("返回")
self.doubleselect = QPushButton("重新选择")
self.lab = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.qtool = QToolBox()
self.qtool.setStyleSheet("QToolBox{background:rgb(150,140,150);font-weight:Bold;color:rgb(0,0,0);}")
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.doubleselect.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.setMaximumSize(100, 40)
self.doubleselect.setMaximumSize(100,40)
self.lab.setMaximumSize(200, 40)
self.returnbut.clicked.connect(self.returnfun)
self.doubleselect.clicked.connect(self.doublefun)
self.layout.addWidget(self.returnbut, 0, 0, 1, 2)
self.layout.addWidget(self.doubleselect,0,17,1,2)
self.layout.addWidget(self.lab, 1, 1, 1, 7)
self.layout.addWidget(self.qtool, 2, 1, 8, 17)
self.location.show()
def returnfun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0,Addcufile(self.data))
def surefun(self):
self.qtool.removeItem(0)
greade = self.location.getgrade()
course = self.location.getcourse()
self.window = AddsystemQlist(self, self.data,greade,course)
self.qtool.addItem(self.window, "系统文件")
def doublefun(self):
self.location.fun2()
self.location.show()
def clicked(self):
self.max = max_widget()
self.max.show()
#添加系统课件的item 设计
class AddsystemWidget(QWidget):
def __init__(self,dow,data,da):
super(AddsystemWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.dow = dow
self.data = data
self.da = da
self.imagelab = QLabel()
self.addbut = QPushButton("添加")
self.namelab = QLabel(da[0])
self.image_path = "../datas/image/image" + da[4]
total = base64.b64decode(da[3])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.addbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.imagelab.setMaximumSize(150, 150)
self.namelab.setMaximumSize(800, 80)
self.addbut.setMaximumSize(80,40)
self.imagelab.setPixmap(QPixmap(self.image_path))
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.addbut.clicked.connect(self.addfile)
self.layout.addWidget(self.imagelab, 0, 0, 4, 4)
self.layout.addWidget(self.namelab, 1, 4, 3, 4)
self.layout.addWidget(self.addbut,3,8,1,1)
def addfile(self):
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Filename")
no = len(c.fetchall())
c.execute("insert into Filename VALUES(?,?,?,?,?,?)",
('C' + str(no), self.data[0], self.data[1], self.da[0], self.da[2], self.da[4]))
c.execute("insert into Fileimage values(?,?)", ('C' + str(no), self.da[3]))
c.execute("insert into Filedate values(?,?)", ('C' + str(no), self.da[1]))
conn.commit()
c.close()
conn.close()
QMessageBox.about(self, "提示", '添加成功!!')
#添加系统课件的QList
class AddsystemQlist(QListWidget):
def __init__(self, dow, data,greade,course):
super(AddsystemQlist, self).__init__()
self.dow = dow
self.doubleClicked.connect(self.opencourse)
sqlpath = "../datas/database/Data.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
if greade[:3]=="一年级":
c.execute("select name,First_Grade_data.total,First_Grade.filename, \
First_Grade_image.total,First_Grade_image.filename from \
First_Grade,First_Grade_data,First_Grade_image where \
First_Grade.no = First_Grade_data.no and First_Grade.no =First_Grade_image.no \
and level2=(?) and level3=(?)",(greade,course))
else:
QMessageBox.about(self, "提示", '其他功能暂未实现!!')
self.datas = c.fetchall()
for da in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, AddsystemWidget(self,data,da))
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index]
zip_path = "../datas/wen/xinwen.zip"
total = base64.b64decode(da[1])
f = open(zip_path, 'wb')
f.write(total)
f.close()
self.zip_to_files(zip_path)
self.dow.clicked()
def zip_to_files(self, zippath): # 将压缩包解压
path = '../datas/tupian'
if (os.path.isdir(path)): # 判断文件夹是否存在
fileNames = glob.glob(path + r'/*')
if fileNames:
for fileName in fileNames: # 将pa 文件夹中的文件删除。
os.remove(fileName)
else:
os.mkdir(path)
zf = zipfile.ZipFile(zippath)
for fn in zf.namelist(): # 循环压缩包中的文件并保存进新文件夹。
#right_fn = fn.replace('\\\\', '_').replace('\\', '_').replace('//', '_').replace('/', '_') # 将文件名正确编码
right_fn = fn.encode('cp437').decode('gbk') # 将文件名正确编码
right_fn = path + '/' + right_fn
with open(right_fn, 'wb') as output_file: # 创建并打开新文件
with zf.open(fn, 'r') as origin_file: # 打开原文件
shutil.copyfileobj(origin_file, output_file) # 将原文件内容复制到新文件
zf.close()
os.remove(zippath)
#选择添加系统课件的内容
class Select_location(QWidget):
def __init__(self,dow):
super(Select_location, self).__init__()
self.dow = dow
self.setWindowTitle("选择添加系统文件的内容")
self.lab = QLabel("请选择添加系统文件的内容!!!!")
self.typelab = QLabel("学习阶段")
self.typebox = QComboBox()
self.greadelab = QLabel("年级")
self.greadebox = QComboBox()
self.courselab = QLabel("科目")
self.coursebox = QComboBox()
self.sure = QPushButton("确定")
self.devise_ui()
def devise_ui(self):
self.resize(750, 400)
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.move((self.screenRect.width() - 800) / 2, (self.screenRect.height() - 500) / 2) # 窗口移动至中心
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint | QtCore.Qt.Tool)
self.setWindowModality(QtCore.Qt.ApplicationModal) # 窗口置顶,父窗口不可操作
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.lab.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:28px;font-weight:Bold;font-family:Arial;}")
self.typelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.courselab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.greadelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.typebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.coursebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.greadebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.lab.setMaximumSize(400,80)
self.typelab.setMaximumSize(80,50)
self.greadelab.setMaximumSize(80,50)
self.courselab.setMaximumSize(80,50)
self.typebox.setMaximumSize(160, 50)
self.greadebox.setMaximumSize(160, 50)
self.coursebox.setMaximumSize(160, 50)
self.sure.setMaximumSize(80,50)
self.typebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.greadebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.coursebox.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.typebox.addItems(['','小学','初中','高中'])
self.typebox.currentIndexChanged.connect(self.fun1)
self.sure.clicked.connect(self.surefun)
self.layout.addWidget(self.lab,0,2,1,6)
self.layout.addWidget(self.typelab,1,0,1,1)
self.layout.addWidget(self.typebox,1,1,1,2)
self.layout.addWidget(self.greadelab,1,3,1,1)
self.layout.addWidget(self.greadebox,1,4,1,2)
self.layout.addWidget(self.courselab,1,6,1,1)
self.layout.addWidget(self.coursebox,1,7,1,2)
self.layout.addWidget(self.sure,2,8,1,1)
def fun1(self):
self.greadebox.clear()
self.coursebox.clear()
if self.typebox.currentText()=="小学":
self.greadebox.addItems(['','一年级上册','一年级下册','二年级上册','二年级下册','三年级上册','三年级下册',
'四年级上册','四年级下册','五年级上册','五年级下册','六年级上册','六年级下册'])
self.coursebox.addItems(['','语文','数学','英语'])
elif self.typebox.currentText()=="初中":
self.greadebox.addItems(['','初一上册','初一下册','初二上册','初二下册','初三上册','初三下册',])
self.coursebox.addItems(['','语文','数学','英语','物理','化学','生物','政治','历史','地理'])
elif self.typebox.currentText()=="高中":
self.greadebox.addItems(['','必修一','必修二','必修三','必修四','必修五',
'选修一','选修二','选修三','选修四','选修五'])
self.coursebox.addItems(['','语文','数学','英语','物理','化学','生物','政治','历史','地理'])
def fun2(self):
self.greadebox.clear()
self.coursebox.clear()
self.typebox.clear()
self.typebox.addItems(['', '小学', '初中', '高中'])
def surefun(self):
if(self.greadebox.currentText()==""):
QMessageBox.about(self, "提示", '年级的选项框不能为空!!')
return
elif(self.typebox.currentText()==""):
QMessageBox.about(self, "提示", '学习阶段的选项框不能为空!!')
return
elif (self.coursebox.currentText()==""):
QMessageBox.about(self, "提示", '科目的选项框不能为空!!')
return
elif (self.greadebox.currentText()[:3]!="一年级"):
QMessageBox.about(self, "抱歉", '目前只能添加小学一年级的课件!!')
return
else:
self.close()
self.dow.surefun()
def gettype(self):
return self.typebox.currentText()
def getgrade(self):
return self.greadebox.currentText()
def getcourse(self):
return self.coursebox.currentText()
#添加练习
class Addexfile(QFrame):
def __init__(self, data):
super(Addexfile, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.data = data
self.returnbut = QPushButton("返回")
self.addfile = QPushButton("添加文件")
self.addmufile = QPushButton("添加目录")
self.addsystem = QPushButton("从系统添加")
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
b = self.screenRect.height() * 1.0 / 4
a = self.screenRect.width() * 1.0 / 5
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addfile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addmufile.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addsystem.setStyleSheet("QPushButton{ font-family:'宋体';font-size:32px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.clicked.connect(self.returnfun)
self.addfile.clicked.connect(self.select_fun1)
self.addmufile.clicked.connect(self.select_fun2)
self.addsystem.clicked.connect(self.select_fun3)
self.layout.addWidget(self.returnbut, 0, 0) # 往网格的不同坐标添加不同的组件
self.layout.addWidget(self.addfile,0 , 1)
self.layout.addWidget(self.addmufile, 1, 0)
self.layout.addWidget(self.addsystem,1,1)
self.returnbut.setMaximumSize(a, b)
self.addfile.setMaximumSize(a, b)
self.addmufile.setMaximumSize(a, b)
self.addsystem.setMaximumSize(a,b)
def returnfun(self):
dow = Course_news(self.data)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, dow)
def select_fun1(self):
QMessageBox.about(self, "提示", '抱歉!!\n该功能暂时未实现!!')
def select_fun2(self):
fname = QFileDialog.getExistingDirectory(self, 'open file', '/')
if fname:
try:
files = glob.glob(fname + r'/*')
pa = files[0]
self.dow = Addexfilewin(self.data, fname)
self.dow.show()
except:
QMessageBox.about(self, "提示", '您选择的文件夹没有任何文件!!')
else:
QMessageBox.about(self, "提示", '您没有选择任何文件!!')
def select_fun3(self):
QMessageBox.about(self, "提示", '抱歉!!\n该功能暂时未实现!!')
class Addexfilewin2(QWidget):
def __init__(self,data,answer):
super(Addexfilewin2, self).__init__()
self.sure = QPushButton("保存")
self.concle = QPushButton("取消")
self.cutimage = QPushButton("上一题")
self.addimage = QPushButton("下一题")
self.imagelab = QLabel()
self.answerlab = QLabel("答案")
self.answerEdit = QLineEdit()
self.analysislab = QLabel("解析")
self.analysisEdit = QTextEdit()
self.data = data
lists = answer.split("@")
self.answers = []
for list in lists:
da = list.split("#")
self.answers.append(da)
self.a = 0
self.fname = '../datas/tupian'
self.files = glob.glob(self.fname + r'/*')
self.answer = []
self.devise_Ui()
def devise_Ui(self):
self.resize(800, 500)
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.move((self.screenRect.width() - 800) / 2, (self.screenRect.height() - 500) / 2) # 窗口移动至中心
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint | QtCore.Qt.Tool)
self.setWindowModality(QtCore.Qt.ApplicationModal) # 窗口置顶,父窗口不可操作
# self.setWindowFlags(Qt.WindowStaysOnTopHint) #窗口置顶
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.answerlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.analysislab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.concle.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addimage.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.cutimage.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.answerEdit.setFont(QFont("宋体", 14))
self.analysisEdit.setFont(QFont("宋体", 14))
self.imagelab.setMaximumSize(400,250)
self.cutimage.setMaximumSize(80,40)
self.addimage.setMaximumSize(80,40)
self.answerlab.setMaximumSize(80,40)
self.analysislab.setMaximumSize(80,40)
self.answerEdit.setMaximumSize(250,40)
self.analysisEdit.setMaximumSize(250,180)
self.pa = self.files[self.a]
self.filename = os.path.split(self.pa)[1]
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
for answer in self.answers:
if answer[0]==self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answers.remove(answer)
self.layout.addWidget(self.cutimage,0,1,1,1)
self.layout.addWidget(self.addimage,0,9,1,1)
self.layout.addWidget(self.answerlab,2,1,2,1)
self.layout.addWidget(self.answerEdit,2,2,2,3)
self.layout.addWidget(self.analysislab,4,1,2,1)
self.layout.addWidget(self.analysisEdit,4,2,4,3)
self.layout.addWidget(self.imagelab,3,6,4,4)
self.layout.addWidget(self.sure, 11, 8, 1, 1)
self.layout.addWidget(self.concle, 11, 9, 1, 1)
self.addimage.clicked.connect(self.addfun)
self.cutimage.clicked.connect(self.cutfun)
self.concle.clicked.connect(self.conclefun)
self.sure.clicked.connect(self.surefun)
def addfun(self):
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
if len(text1)==0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
elif len(text2)==0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
else:
self.a = self.a + 1
try:
self.pa = self.files[self.a]
self.answer.append([self.filename,text1,text2])
self.filename = os.path.split(self.pa)[1]
b = 0
for answer in self.answers:
if answer[0]==self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answers.remove(answer)
b=1
break
for answer in self.answer:
if answer[0]==self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answer.remove(answer)
b=1
break
if b==0:
self.answerEdit.setText("")
self.analysisEdit.setText("")
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
except:
self.a = self.a - 1
QMessageBox.about(self, "提示", '这是最后一题了!!')
def cutfun(self):
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
self.a = self.a - 1
if self.a<0:
self.a = self.a + 1
QMessageBox.about(self, "提示", '这是第一题了!!')
else:
self.answer.append([self.filename, text1, text2])
self.pa = self.files[self.a]
self.filename = os.path.split(self.pa)[1]
for answer in self.answer:
if answer[0] == self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answer.remove(answer)
break
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
def surefun(self):
a = self.a + 1
try:
pa = self.files[a]
QMessageBox.about(self, "提示", '请您把所有题目设置答案后才可以保存!!')
except:
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
if len(text1) == 0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
elif len(text2) == 0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
else:
self.answer.append([self.filename, text1, text2])
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
ab = []
for da in self.answer:
str5 = "#".join(da)
ab.append(str5)
str5 = "@".join(ab)
c.execute("update Filename2 set answer=(?) where Cno=(?)",
(str5,self.data,))
conn.commit()
c.close()
conn.close()
self.close()
def conclefun(self):
self.close()
def file_to_zip(self, path): # 将文件夹压缩为压缩包。
filepath ='../datas/tupian' + '.zip'
if os.path.exists(filepath):
os.remove(filepath)
z = zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(path):
fpath = dirpath.replace(path, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
z.close()
class Addexfilewin(QWidget):
def __init__(self,data,fname):
super(Addexfilewin, self).__init__()
self.sure = QPushButton("保存")
self.concle = QPushButton("取消")
self.cutimage = QPushButton("上一题")
self.addimage = QPushButton("下一题")
self.imagelab = QLabel()
self.answerlab = QLabel("答案")
self.answerEdit = QLineEdit()
self.analysislab = QLabel("解析")
self.analysisEdit = QTextEdit()
self.data = data
self.a = 0
self.fname = fname
self.files = glob.glob(fname + r'/*')
self.filemu = os.path.split(fname)[1]
self.answer = []
self.devise_Ui()
def devise_Ui(self):
self.resize(800, 500)
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.move((self.screenRect.width() - 800) / 2, (self.screenRect.height() - 500) / 2) # 窗口移动至中心
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint | QtCore.Qt.Tool)
self.setWindowModality(QtCore.Qt.ApplicationModal) # 窗口置顶,父窗口不可操作
# self.setWindowFlags(Qt.WindowStaysOnTopHint) #窗口置顶
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.answerlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.analysislab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.concle.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.addimage.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.cutimage.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.answerEdit.setFont(QFont("宋体", 14))
self.analysisEdit.setFont(QFont("宋体", 14))
self.imagelab.setMaximumSize(400,250)
self.cutimage.setMaximumSize(80,40)
self.addimage.setMaximumSize(80,40)
self.answerlab.setMaximumSize(80,40)
self.analysislab.setMaximumSize(80,40)
self.answerEdit.setMaximumSize(250,40)
self.analysisEdit.setMaximumSize(250,180)
self.pa = self.files[self.a]
self.filename = os.path.split(self.pa)[1]
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.layout.addWidget(self.cutimage,0,1,1,1)
self.layout.addWidget(self.addimage,0,9,1,1)
self.layout.addWidget(self.answerlab,2,1,2,1)
self.layout.addWidget(self.answerEdit,2,2,2,3)
self.layout.addWidget(self.analysislab,4,1,2,1)
self.layout.addWidget(self.analysisEdit,4,2,4,3)
self.layout.addWidget(self.imagelab,3,6,4,4)
self.layout.addWidget(self.sure, 11, 8, 1, 1)
self.layout.addWidget(self.concle, 11, 9, 1, 1)
self.addimage.clicked.connect(self.addfun)
self.cutimage.clicked.connect(self.cutfun)
self.concle.clicked.connect(self.conclefun)
self.sure.clicked.connect(self.surefun)
def addfun(self):
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
if len(text1)==0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
elif len(text2)==0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
else:
self.a = self.a + 1
try:
self.pa = self.files[self.a]
self.answer.append([self.filename,text1,text2])
self.filename = os.path.split(self.pa)[1]
b = 0
for answer in self.answer:
if answer[0]==self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answer.remove(answer)
b=1
break
if b==0:
self.answerEdit.setText("")
self.analysisEdit.setText("")
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
except:
self.a = self.a - 1
QMessageBox.about(self, "提示", '这是最后一题了!!')
def cutfun(self):
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
self.a = self.a - 1
if self.a<0:
self.a = self.a + 1
QMessageBox.about(self, "提示", '这是第一题了!!')
else:
self.answer.append([self.filename, text1, text2])
self.pa = self.files[self.a]
self.filename = os.path.split(self.pa)[1]
for answer in self.answer:
if answer[0] == self.filename:
self.answerEdit.setText(answer[1])
self.analysisEdit.setText(answer[2])
self.answer.remove(answer)
break
pixmap = QPixmap(self.pa) # 按指定路径找到图片,注意路径必须用双引号包围,不能用单引号
self.imagelab.setPixmap(pixmap) # 在label上显示图片
self.imagelab.setScaledContents(True) # 让图片自适应label大小
def surefun(self):
a = self.a + 1
try:
pa = self.files[a]
QMessageBox.about(self, "提示", '请您把所有题目设置答案后才可以保存!!')
except:
text1 = self.answerEdit.text()
text2 = self.analysisEdit.toPlainText()
if len(text1) == 0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
elif len(text2) == 0:
QMessageBox.about(self, "提示", '您没有填写答案!!')
else:
self.answer.append([self.filename, text1, text2])
self.file_to_zip(self.fname)
filepath = '../datas/tupian' + '.zip'
with open(filepath, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
filename1 = '.zip'
sqlpath = "../datas/database/ControllerSQ" + str(win.number) + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Filename2")
no = len(c.fetchall())
ab = []
for da in self.answer:
str5 = "#".join(da)
ab.append(str5)
str5 = "@".join(ab)
print(str5)
c.execute("insert into Filename2 VALUES(?,?,?,?,?,?)",
('C' + str(no), self.data[0], self.data[1], self.filemu, str5, filename1,))
c.execute("insert into Filedate2 values(?,?)", ('C' + str(no), total))
conn.commit()
c.close()
conn.close()
self.close()
def conclefun(self):
self.close()
def file_to_zip(self, path): # 将文件夹压缩为压缩包。
filepath ='../datas/tupian' + '.zip'
if os.path.exists(filepath):
os.remove(filepath)
z = zipfile.ZipFile(filepath, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(path):
fpath = dirpath.replace(path, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
z.close()
#添加课程
class AddCourse(QWidget):
def __init__(self):
super(AddCourse, self).__init__()
self.sure = QPushButton("确认")
self.concle = QPushButton("取消")
self.courselab = QLabel("课程码:")
self.namelab = QLabel("课程名:")
self.chang_image = QPushButton("换一张")
self.courselab2 = QLabel()
self.tupian = QLabel()
self.nameEdit = QLineEdit()
self.devise_Ui()
def devise_Ui(self):
self.resize(800, 500)
self.desktop = QApplication.desktop() # 获取屏幕分辨率
self.screenRect = self.desktop.screenGeometry()
self.move((self.screenRect.width() - 800) / 2, (self.screenRect.height() - 500) / 2) # 窗口移动至中心
self.setWindowFlags(
QtCore.Qt.WindowCloseButtonHint | QtCore.Qt.MSWindowsFixedSizeDialogHint | QtCore.Qt.Tool)
self.setWindowModality(QtCore.Qt.ApplicationModal) # 窗口置顶,父窗口不可操作
# self.setWindowFlags(Qt.WindowStaysOnTopHint) #窗口置顶
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.layout.setContentsMargins(100, 0, 0, 0)
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.courselab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.concle.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.chang_image.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.nameEdit.setPlaceholderText("请输入课程名")
self.nameEdit.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.chang_image.setMaximumSize(70, 40)
self.sure.setMaximumSize(60, 40)
self.concle.setMaximumSize(60, 40)
self.courselab.setMaximumSize(100, 40)
self.namelab.setMaximumSize(100, 40)
self.nameEdit.setMaximumSize(200, 40)
self.tupian.setMaximumSize(250, 250)
self.courselab2.setMaximumSize(200, 40)
self.courselab2.setStyleSheet(
"QLabel{color:rgb(125,175,250);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.layout.addWidget(self.tupian, 0, 0, 5, 5)
self.layout.addWidget(self.chang_image, 5, 1, 1, 1)
self.layout.addWidget(self.courselab, 1, 6, 1, 1)
self.layout.addWidget(self.courselab2, 1, 7, 1, 3)
self.layout.addWidget(self.namelab, 3, 6, 1, 1)
self.layout.addWidget(self.nameEdit, 3, 7, 1, 3)
self.layout.addWidget(self.sure, 6, 8, 1, 1)
self.layout.addWidget(self.concle, 6, 9, 1, 1)
self.image()
self.sure.clicked.connect(self.sure_fun)
self.chang_image.clicked.connect(self.chang_fun)
self.concle.clicked.connect(self.conclefun)
def image(self):
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select * from Course")
b = len(c.fetchall())
year = datetime.date.today().year
self.Cno = str(year) + str(b)
self.courselab2.setText(self.Cno)
self.image_path = "../datas/image/a7.jpeg"
self.file = os.path.splitext(self.image_path)[1]
self.tupian.setPixmap(QPixmap(self.image_path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
QApplication.processEvents()
def chang_fun(self):
path, _ = QFileDialog.getOpenFileName(self, '请选择文件',
'/', 'image(*.jpg)')
if path:
self.image_path = path
self.file = os.path.splitext(self.image_path)[1]
self.tupian.setPixmap(QPixmap(self.image_path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
else:
self.image()
# 让多窗口之间传递信号 刷新主窗口信息
my_Signal = QtCore.pyqtSignal(str)
def sendEditContent(self):
content = '1'
self.my_Signal.emit(content)
def closeEvent(self, event):
self.sendEditContent()
def conclefun(self):
self.close()
def save_data(self):
name = self.nameEdit.text()
filename = os.path.splitext(self.image_path)[1]
with open(self.image_path, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
conn.execute("INSERT INTO Course VALUES(?,?,?)", (self.Cno, name, 0,))
conn.execute("INSERT INTO Course_image VALUES(?,?,?)", (self.Cno, total, filename,))
conn.execute("INSERT INTO Teacher_Course VALUES(?,?)", (win.number, self.Cno,))
conn.commit()
conn.close()
def sure_fun(self):
if len(self.nameEdit.text()) == 0:
QMessageBox.about(self, "提示!", "课程名不能为空!!")
self.nameEdit.setFocus()
else:
self.save_data()
self.close()
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Class_news())
#统计信息
class Statistics_news(QFrame):
def __init__(self):
super(Statistics_news, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.returnbut = QPushButton("返回")
self.lab = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.qtool = QToolBox()
self.qtool.setStyleSheet("QToolBox{background:rgb(150,140,150);font-weight:Bold;color:rgb(0,0,0);}")
self.window = Statisticswindow(self)
self.qtool.addItem(self.window, '我的课程')
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.setMaximumSize(100, 40)
self.lab.setMaximumSize(200, 40)
self.returnbut.clicked.connect(self.returnfun)
self.layout.addWidget(self.returnbut, 0, 0, 1, 2)
self.layout.addWidget(self.lab, 1, 1, 1, 7)
self.layout.addWidget(self.qtool, 2, 0, 8, 19)
def returnfun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Function())
def clicked(self, data):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Statistics_class(data))
class StatisticsWidget(QWidget):
def __init__(self, data):
super(StatisticsWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.imagelab = QLabel()
self.namelab = QLabel(data[1])
self.numlab = QLabel("人数:")
self.numlab2 = QLabel(str(data[2]))
self.image_path = "../datas/image/image" + data[4]
total = base64.b64decode(data[3])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.numlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.numlab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.imagelab.setMaximumSize(150, 150)
self.namelab.setMaximumSize(400, 80)
self.numlab.setMaximumSize(80, 40)
self.numlab2.setMaximumSize(100, 40)
self.imagelab.setPixmap(QPixmap(self.image_path))
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.layout.addWidget(self.imagelab, 0, 0, 4, 4)
self.layout.addWidget(self.namelab, 1, 4, 3, 4)
self.layout.addWidget(self.numlab, 3, 8, 1, 1)
self.layout.addWidget(self.numlab2, 3, 9, 1, 2)
class Statisticswindow(QListWidget):
def __init__(self, dow):
super(Statisticswindow, self).__init__()
self.dow = dow
self.doubleClicked.connect(self.opencourse)
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select Course.Cno,name,numble,total,filename \
from Course,Course_image,Teacher_Course \
where Course.Cno=Course_image.Cno and Course.Cno=Teacher_Course.Cno \
and number=(?)", (win.number,))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, StatisticsWidget(data))
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:2]
self.dow.clicked(da)
class Statistics_class(QFrame):
def __init__(self,data):
super(Statistics_class, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.returnbut = QPushButton("返回")
self.select_query = QComboBox()
self.query = QLineEdit()
self.search = QPushButton("搜索")
self.data = data
self.lab = QLabel()
self.devise_Ui()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.qtool = QToolBox()
self.qtool.setStyleSheet("QToolBox{background:rgb(150,140,150);font-weight:Bold;color:rgb(0,0,0);}")
self.window = classwindow(self,self.data[0])
self.qtool.addItem(self.window, "学生学习信息")
self.select_query.addItems(['号码','姓名'])
self.select_query.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.search.setStyleSheet("QPushButton{ font-family:'宋体';font-size:20px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnbut.setStyleSheet("QPushButton{ font-family:'宋体';font-size:18px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.query.setPlaceholderText("请输入搜索内容")
self.query.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.returnbut.setMaximumSize(100, 40)
self.select_query.setMaximumSize(80, 40)
self.query.setMaximumSize(350, 40)
self.search.setMaximumSize(80, 40)
self.lab.setMaximumSize(200, 40)
self.returnbut.clicked.connect(self.returnfun)
self.search.clicked.connect(self.chang_fun)
self.layout.addWidget(self.select_query, 0, 10, 1, 1)
self.layout.addWidget(self.query, 0, 11, 1, 5)
self.layout.addWidget(self.search, 0, 16, 1, 1)
self.layout.addWidget(self.returnbut, 0, 0, 1, 2)
self.layout.addWidget(self.lab, 1, 1, 1, 7)
self.layout.addWidget(self.qtool, 2, 0, 8, 19)
def returnfun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Statistics_news())
def clicked(self, data1,data2):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Usr_report(data1,data2))
def chang_fun(self):
if (self.select_query.currentText() == '号码'):
no = self.query.text()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Join_Course where Cno=(?) and number=(?)",(self.data[0],no,))
self.datas = c.fetchall()
if len(self.datas)>0:
self.qtool.removeItem(0)
self.coursewin = classwindow2(self,self.data[0],no)
self.qtool.addItem(self.coursewin, '查找的学生')
self.query.setText("")
else:
QMessageBox.about(self, "抱歉!", "没有找到号码为:'"+no+"'的信息!!!")
elif (self.select_query.currentText() == '姓名'):
no = self.query.text()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Join_Course,User_date where\
Join_Course.number=User_date.number and Cno=(?) and name like (?)", (self.data[0],'%'+no+'%',))
self.datas = c.fetchall()
if len(self.datas)>0:
self.qtool.removeItem(0)
self.coursewin = classwindow3(self,self.data[0],'%'+no+'%')
self.qtool.addItem(self.coursewin, '查找的学生')
self.query.setText("")
else:
QMessageBox.about(self, "抱歉!", "没有找到号码为:'"+no+"'的信息!!!")
class classWidget(QWidget):
def __init__(self, data):
super(classWidget, self).__init__()
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.imagelab = QLabel()
self.namelab = QLabel(data[1])
self.courselab = QLabel("加课天数:")
self.joinlab = QLabel("学习用时:")
self.numlab = QLabel("平均学习:")
new = datetime.datetime.now()
abcd = '%Y-%m-%d %H:%M:%S'
a1 = datetime.datetime.strptime(data[2], abcd)
a = (new - a1).days + 1
self.courselab2 = QLabel(str(a)+" 天")
ab = data[3]
if (ab / 3600) > 1:
ac = str(int(ab / 3600)) + '时' + str(round((ab / 3600 - int(ab / 3600)) * 60, 2)) + "分"
else:
ac = str(round(ab / 60, 2)) + "分"
self.joinlab2 = QLabel(ac)
ad = ab / a
if (ad / 3600) > 1:
ae = str(int(ad / 3600)) + '时' + str(round((ad / 3600 - int(ad / 3600)) * 60, 2)) + "分"
else:
ae = str(round(ad / 60, 2)) + "分"
self.numlab2 = QLabel(ae)
self.image_path = "../datas/image/image" + data[5]
total = base64.b64decode(data[4])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.namelab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:28px;font-weight:Bold;font-family:Arial;}")
self.joinlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.numlab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.courselab.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:18px;font-weight:Bold;font-family:Arial;}")
self.courselab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.joinlab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.numlab2.setStyleSheet("QLabel{color:rgb(0, 255, 0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.imagelab.setMaximumSize(150, 150)
self.namelab.setMaximumSize(400, 80)
self.courselab.setMaximumSize(80, 40)
self.joinlab.setMaximumSize(80, 40)
self.numlab.setMaximumSize(80, 40)
self.courselab2.setMaximumSize(100, 40)
self.joinlab2.setMaximumSize(100, 40)
self.numlab2.setMaximumSize(100, 40)
self.imagelab.setPixmap(QPixmap(self.image_path))
self.imagelab.setScaledContents(True) # 让图片自适应label大小
self.layout.addWidget(self.imagelab, 0, 0, 4, 4)
self.layout.addWidget(self.namelab, 1, 4, 3, 4)
self.layout.addWidget(self.courselab, 1, 8, 1, 1)
self.layout.addWidget(self.joinlab, 2, 8, 1, 1)
self.layout.addWidget(self.numlab, 3, 8, 1, 1)
self.layout.addWidget(self.courselab2, 1, 9, 1, 2)
self.layout.addWidget(self.joinlab2, 2, 9, 1, 2)
self.layout.addWidget(self.numlab2, 3, 9, 1, 2)
class classwindow(QListWidget):
def __init__(self, dow,data1):
super(classwindow, self).__init__()
self.dow = dow
self.data = data1
self.doubleClicked.connect(self.opencourse)
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select Coursetime.number,name,jointime,time,total,filename \
from Coursetime,Join_Course,User_date,User_image \
where Join_Course.number=Coursetime.number and \
Coursetime.number=User_date.number and Coursetime.number=User_image.number \
and Coursetime.Cno=(?) and Join_Course.Cno=(?)", (data1,data1,))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, classWidget(data))
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:4]
self.dow.clicked(da,self.data)
class classwindow2(QListWidget):
def __init__(self, dow,data1,data2):
super(classwindow2, self).__init__()
self.dow = dow
self.data1 = data1
self.doubleClicked.connect(self.opencourse)
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select Coursetime.number,name,jointime,time,total,filename \
from Coursetime,Join_Course,User_date,User_image \
where Join_Course.number=Coursetime.number and \
Coursetime.number=User_date.number and Coursetime.number=User_image.number \
and Coursetime.Cno=(?) and Join_Course.Cno=(?) \
and Join_Course.number=(?)", (data1,data1,data2,))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, classWidget(data))
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:4]
self.dow.clicked(da,self.data1)
class classwindow3(QListWidget):
def __init__(self, dow,data1,data2):
super(classwindow3, self).__init__()
self.dow = dow
self.data1 = data1
self.doubleClicked.connect(self.opencourse)
conn = sqlite3.connect('../datas/database/Information.db')
c = conn.cursor()
c.execute("select Coursetime.number,name,jointime,time,total,filename \
from Coursetime,Join_Course,User_date,User_image \
where Join_Course.number=Coursetime.number and \
Coursetime.number=User_date.number and Coursetime.number=User_image.number \
and Coursetime.Cno=(?) and Join_Course.Cno=(?) \
and User_date.name like (?)", (data1,data1,data2,))
self.datas = c.fetchall()
for data in self.datas:
item = QListWidgetItem(self)
item.setSizeHint(QSize(800, 150))
item.setBackground(QColor(240, 240, 240))
self.setItemWidget(item, classWidget(data))
def opencourse(self):
index = self.currentIndex().row()
if index > -1:
da = self.datas[index][:4]
self.dow.clicked(da,self.data1)
class Usr_report(QFrame):
def __init__(self,data1,data2):
super(Usr_report, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.da1 = data1
self.da2 = data2
self.returnBtn = QPushButton("返回")
self.day = QLabel("学习天数:")
self.learntime = QLabel("学习总时长:")
self.avglearn = QLabel("日均学习:")
self.table = QTableWidget()
self.devise_Ui()
self.information()
def devise_Ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
# self.layout.setContentsMargins (300, 0, 0, 0)
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.day.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:'宋体';}")
self.learntime.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:'宋体';}")
self.avglearn.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:'宋体';}")
self.returnBtn.setMaximumSize(60, 40)
self.day.setMaximumSize(120, 40)
self.learntime.setMaximumSize(130, 40)
self.avglearn.setMaximumSize(120, 40)
self.returnBtn.clicked.connect(self.return_fun)
self.layout.addWidget(self.returnBtn, 0, 0, 1, 1)
self.layout.addWidget(self.day, 0, 3, 1, 1)
self.layout.addWidget(self.learntime, 0, 6, 1, 1)
self.layout.addWidget(self.avglearn, 0, 9, 1, 1)
def information(self):
new = datetime.datetime.now()
abcd = '%Y-%m-%d %H:%M:%S'
a1 = datetime.datetime.strptime(self.da1[2], abcd)
a = (new - a1).days + 1
self.day1 = QLabel(str(a) + "天")
ab = self.da1[3]
if (ab / 3600) > 1:
ac = str(int(ab / 3600)) + '时' + str(round((ab / 3600 - int(ab / 3600)) * 60, 2)) + "分"
else:
ac = str(round(ab / 60, 2)) + "分"
self.learntime1 = QLabel(ac)
ad = ab / a
if (ad / 3600) > 1:
ae = str(int(ad / 3600)) + '时' + str(round((ad / 3600 - int(ad / 3600)) * 60, 2)) + "分"
else:
ae = str(round(ad / 60, 2)) + "分"
self.avglearn1 = QLabel(ae)
self.day1.setStyleSheet("QLabel{color:rgb(0,255,0);font-size:20px;font-weight:Bold;font-family:'宋体';}")
self.learntime1.setStyleSheet(
"QLabel{color:rgb(0,255,0);font-size:20px;font-weight:Bold;font-family:'宋体';}")
self.avglearn1.setStyleSheet("QLabel{color:rgb(0,255,0);font-size:20px;font-weight:Bold;font-family:'宋体';}")
self.table.setStyleSheet("QTableWidget{background-color:rgb(255,255,255);font:13pt '宋体';font-weight:Bold;};");
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
sqlpath = "../datas/database/SQ" + self.da1[0] + "L.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from User_data where Cno=(?)",(self.da2,))
data = c.fetchall()
b = len(data)
self.table.setRowCount(b)
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(['开始学习时间', '文件类型', '文件名', '结束时间', '学习时长'])
i = 0
for variate in data:
self.table.setItem(i, 0, QTableWidgetItem(variate[0]))
self.table.setItem(i, 1, QTableWidgetItem(variate[2]))
self.table.setItem(i, 2, QTableWidgetItem(variate[3]))
self.table.setItem(i, 3, QTableWidgetItem(variate[4]))
min = (datetime.datetime.strptime(variate[4], abcd) - datetime.datetime.strptime(variate[0], abcd)).seconds
if (min / 3600) > 1:
ac = str(int(min / 3600)) + '时' + str(round((min / 3600 - int(min / 3600)) * 60, 2)) + "分"
else:
ac = str(round(min / 60, 2)) + "分"
self.table.setItem(i, 4, QTableWidgetItem(ac))
i += 1
self.day1.setMaximumSize(150, 40)
self.learntime1.setMaximumSize(150, 40)
self.avglearn1.setMaximumSize(150, 40)
self.layout.addWidget(self.day1, 0, 4, 1, 1)
self.layout.addWidget(self.learntime1, 0, 7, 1, 1)
self.layout.addWidget(self.avglearn1, 0, 10, 1, 1)
self.layout.addWidget(self.table, 3, 0, 1, 12)
def return_fun(self):
da = [self.da2,self.da1[1]]
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Statistics_class(da))
# 管理员我的界面
class Controller_myself(QFrame): # 增加一个编辑资料的按钮
def __init__(self):
super(Controller_myself, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.returnBtn = QPushButton("返回")
self.ExditBtn = QPushButton("编辑")
self.chang_image = QPushButton("换头像")
self.name = QLabel("姓名:")
self.sex = QLabel("性别:")
self.number = QLabel("手机号:")
self.year = QLabel("出生年月:")
self.school = QLabel("学校:")
self.amend = QPushButton("修改密码")
self.withdraw = QPushButton('退出')
self.tupian = QLabel()
self.devise_ui()
def devise_ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(100, 0, 0, 0)
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller_data where number=(?)",(win.number,))
self.data = c.fetchall()[0]
c.close()
conn.close()
self.name1 = QLabel(self.data[1]) # 读取数据库中的信息,将信息输出label中
self.sex1 = QLabel(self.data[3])
self.number1 = QLabel(self.data[0])
self.year1 = QLabel(self.data[2][0:4] + "年 " + self.data[2][5:] + ' 月')
self.school1 = QLabel(self.data[4])
self.returnBtn.setMaximumSize(60, 40)
self.ExditBtn.setMaximumSize(60, 40)
self.name.setMaximumSize(70, 40)
self.sex.setMaximumSize(70, 40)
self.number.setMaximumSize(70, 40)
self.school.setMaximumSize(70, 40)
self.year.setMaximumSize(100, 40)
self.name1.setMaximumSize(350, 40)
self.sex1.setMaximumSize(350, 40)
self.number1.setMaximumSize(350, 40)
self.school1.setMaximumSize(350, 40)
self.year1.setMaximumSize(350, 40)
self.amend.setMaximumSize(500, 40)
self.withdraw.setMaximumSize(500, 40)
self.chang_image.setMaximumSize(90, 40)
self.tupian.setMaximumSize(250, 250)
self.chang_image.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.name.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.year.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.sex.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.number.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.school.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.amend.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.withdraw.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.ExditBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.name1.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.sex1.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.year1.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.number1.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.school1.setStyleSheet("QLabel{color:rgb(255,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.withdraw.clicked.connect(self.return_win)
self.returnBtn.clicked.connect(self.return_fun)
self.ExditBtn.clicked.connect(self.edit_fun)
self.chang_image.clicked.connect(self.chang_fun)
self.amend.clicked.connect(self.amend_fun)
self.layout.addWidget(self.tupian, 1, 1, 4, 4)
self.layout.addWidget(self.chang_image, 5, 2, 1, 2)
self.layout.addWidget(self.returnBtn, 0, 0, 1, 1)
self.layout.addWidget(self.ExditBtn, 0, 10, 1, 1)
self.layout.addWidget(self.name, 1, 6, 1, 1)
self.layout.addWidget(self.name1, 1, 8, 1, 6)
self.layout.addWidget(self.year, 2, 6, 1, 1)
self.layout.addWidget(self.year1, 2, 8, 1, 6)
self.layout.addWidget(self.sex, 3, 6, 1, 1)
self.layout.addWidget(self.sex1, 3, 8, 1, 6)
self.layout.addWidget(self.number, 4, 6, 1, 1)
self.layout.addWidget(self.number1, 4, 8, 1, 6)
self.layout.addWidget(self.school, 5, 6, 1, 1)
self.layout.addWidget(self.school1, 5, 8, 1, 6)
self.layout.addWidget(self.amend, 7, 6, 1, 6)
self.layout.addWidget(self.withdraw, 8, 6, 1, 6)
self.image()
def image(self):
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller_image where number=(?)", (win.number,))
data = c.fetchall()[0]
c.close()
conn.close()
self.image_path = "../datas/image/image" + data[2]
total = base64.b64decode(data[1])
f = open(self.image_path, 'wb')
f.write(total)
f.close()
self.tupian.setPixmap(QPixmap(self.image_path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
QApplication.processEvents()
def chang_fun(self):
path, _ = QFileDialog.getOpenFileName(self, '请选择文件',
'/', 'image(*.jpg)')
if path:
self.file = os.path.splitext(path)[1]
self.tupian.setPixmap(QPixmap(path))
self.tupian.setScaledContents(True) # 让图片自适应label大小
with open(path, "rb") as f:
total = base64.b64encode(f.read()) # 将文件转换为字节。
f.close()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
conn.execute("update Controller_image set total = (?),filename = (?) where number = (?)",
(total, self.file, win.number))
conn.commit()
conn.close()
else:
self.image()
def amend_fun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_amend())
def return_win(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Logon())
def return_fun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Function())
def edit_fun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_informent1())
# 管理员修改密码
class Controller_amend(QFrame):
def __init__(self):
super(Controller_amend, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.usrlab = QLabel("账号:")
self.amendlab1 = QLabel("原密码:")
self.amendlab2 = QLabel("新密码:")
self.amendlab3 = QLabel("确认密码:")
self.amendedit1 = QLineEdit()
self.amendedit2 = QLineEdit()
self.amendedit3 = QLineEdit()
self.sure = QPushButton("确认修改")
self.returnBtn = QPushButton("返回")
self.devise_ui()
def devise_ui(self):
self.usrlab1 = QLabel(win.number)
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(350, 0, 0, 0)
self.usrlab.setMaximumSize(80, 40)
self.amendlab1.setMaximumSize(80, 40)
self.amendlab2.setMaximumSize(80, 40)
self.amendlab3.setMaximumSize(100, 40)
# 设置QLabel 的字体颜色,大小,
self.usrlab.setStyleSheet(
"QLabel{color:rgb(100,100,100);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.usrlab1.setStyleSheet(
"QLabel{color:rgb(100,100,100);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.amendlab1.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.amendlab2.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.amendlab3.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:20px;font-weight:Bold;font-family:Arial;}")
self.usrlab1.setMaximumSize(420, 40)
self.amendedit1.setMaximumSize(420, 40)
self.amendedit2.setMaximumSize(420, 40)
self.amendedit3.setMaximumSize(420, 40)
self.sure.setMaximumSize(420, 40)
self.amendedit1.setPlaceholderText("请输入原密码")
self.amendedit2.setPlaceholderText("请输入新密码")
self.amendedit3.setPlaceholderText("请重新输入密码")
self.amendedit1.setFont(QFont("宋体", 16)) # 设置QLineEditn 的字体及大小
self.amendedit2.setFont(QFont("宋体", 16))
self.amendedit3.setFont(QFont("宋体", 16))
self.amendedit1.setEchoMode(QLineEdit.Password)
self.amendedit2.setEchoMode(QLineEdit.Password)
self.amendedit3.setEchoMode(QLineEdit.Password)
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:28px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.returnBtn.setMaximumSize(60, 40)
self.returnBtn.clicked.connect(self.return_fun)
self.amendedit1.returnPressed.connect(self.enterPress1)
self.amendedit2.returnPressed.connect(self.enterPress2)
self.sure.clicked.connect(self.accept)
self.layout.addWidget(self.returnBtn, 0, 0, 1, 1)
self.layout.addWidget(self.usrlab, 1, 3, 1, 1)
self.layout.addWidget(self.usrlab1, 1, 5, 1, 5)
self.layout.addWidget(self.amendlab1, 2, 3, 1, 1)
self.layout.addWidget(self.amendedit1, 2, 5, 1, 5)
self.layout.addWidget(self.amendlab2, 3, 3, 1, 1)
self.layout.addWidget(self.amendedit2, 3, 5, 1, 5)
self.layout.addWidget(self.amendlab3, 4, 3, 1, 1)
self.layout.addWidget(self.amendedit3, 4, 5, 1, 5)
self.layout.addWidget(self.sure, 5, 5, 1, 5)
def return_fun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_myself())
def enterPress1(self):
if len(self.amendedit1.text()) == 0:
QMessageBox.about(self, "提示!", "原密码没有填写")
self.amendedit1.setFocus()
else:
self.amendedit2.setFocus()
def enterPress2(self):
if len(self.amendedit2.text()) == 0:
QMessageBox.about(self, "提示!", "新密码框不能为空!")
self.amendedit2.setFocus()
else:
self.amendedit3.setFocus()
def accept(self):
if len(self.amendedit1.text()) == 0:
QMessageBox.about(self, "提示!", "原密码没有填写")
self.amendedit1.setFocus()
elif len(self.amendedit2.text()) == 0:
QMessageBox.about(self, "提示!", "新密码框不能为空!")
self.amendedit2.setFocus()
elif len(self.amendedit3.text()) == 0:
QMessageBox.about(self, "提示!", "新密码框不能为空!")
self.amendedit3.setFocus()
elif self.amendedit3.text() != self.amendedit2.text():
QMessageBox.about(self, "提示!", "前后密码输入不一样!")
self.amendedit3.setFocus()
else:
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller")
sign = 0
for variate in c.fetchall():
if variate[0] == win.number and variate[2] == self.amendedit1.text():
conn.execute("update Controller set password=(?) where number=(?)", (self.amendedit2.text(), variate[0],))
conn.commit()
sign = 1
break
c.close()
conn.close()
if sign == 0:
QMessageBox.about(self, "提示!", "原密码输入错误!!")
self.amendedit1.setFocus()
else:
QMessageBox.about(self, "提示!", "修改成功!!")
time.sleep(1)
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_myself())
# 管理员我的编辑信息
class Controller_informent1(QFrame):
def __init__(self):
super(Controller_informent1, self).__init__()
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.sure = QPushButton("确认")
self.returnBtn = QPushButton("返回")
self.name = QLabel("姓名:")
self.year = QLabel("出生年月")
self.yearcb = QComboBox()
self.monthcb = QComboBox()
self.sex = QLabel("性别:")
self.sexcb = QComboBox()
self.school = QLabel("学校:")
self.nameEdit = QLineEdit()
self.schoolEiit = QLineEdit()
self.devise_ui()
def devise_ui(self):
self.horizontalLayout = QtWidgets.QHBoxLayout(self)
self.layout = QGridLayout()
self.win = QWidget()
self.win.setLayout(self.layout) # 设置顶级布局管理器
self.horizontalLayout.addWidget(self.win)
self.win.setMouseTracking(True) # 设置widget鼠标跟踪
self.layout.setContentsMargins(300, 0, 0, 0)
yearnb = []
for i in range(1980, 2020):
yearnb.append(str(i))
monthmb = []
for i in range(1, 13):
monthmb.append(str(i))
self.sex.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.returnBtn.setStyleSheet("QPushButton{ font-family:'宋体';font-size:22px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.school.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.name.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.year.setStyleSheet("QLabel{color:rgb(0,0,0);font-size:22px;font-weight:Bold;font-family:Arial;}")
self.sure.setStyleSheet("QPushButton{ font-family:'宋体';font-size:26px;color:rgb(0,0,0);}\
QPushButton{background-color:rgb(170,200, 50)}\
QPushButton:hover{background-color:rgb(50, 170, 200)}")
self.nameEdit.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.schoolEiit.setFont(QFont("宋体", 14)) # 设置QLineEditn 的字体及大小
self.name.setMaximumSize(50, 40)
self.school.setMaximumSize(50, 40)
self.returnBtn.setMaximumSize(60, 40)
self.year.setMaximumSize(95, 40)
self.sex.setMaximumSize(50, 40)
self.nameEdit.setMaximumSize(420, 40)
self.schoolEiit.setMaximumSize(420, 40)
self.sure.setMaximumSize(420, 40)
self.sexcb.setMaximumSize(420, 40)
self.yearcb.setMaximumSize(220, 40)
self.monthcb.setMaximumSize(175, 40)
self.sexcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.yearcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.monthcb.setStyleSheet("QComboBox{font-family:'宋体';font-size: 18px;}")
self.sexcb.addItems(['男', '女'])
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
c.execute("select * from Controller_data where number=(?)", (win.number,))
self.data = c.fetchall()[0]
c.close()
conn.close()
self.sexcb.setCurrentText(self.data[3]) # 设置文本的默认选项
self.yearcb.addItems(yearnb)
self.yearcb.setCurrentText(self.data[2][0:4]) # 设置文本的默认选项
self.monthcb.addItems(monthmb)
self.monthcb.setCurrentText(self.data[2][5:7]) # 设置文本的默认选项
self.nameEdit.setText(self.data[1])
self.schoolEiit.setText(self.data[4])
self.layout.addWidget(self.returnBtn, 0, 0, 1, 1)
self.layout.addWidget(self.name, 1, 3, 1, 1)
self.layout.addWidget(self.nameEdit, 1, 4, 1, 18)
self.layout.addWidget(self.sex, 2, 3, 1, 1)
self.layout.addWidget(self.sexcb, 2, 4, 1, 18)
self.layout.addWidget(self.year, 3, 3, 1, 1)
self.layout.addWidget(self.yearcb, 3, 4, 1, 8)
self.layout.addWidget(self.monthcb, 3, 9, 1, 7)
self.layout.addWidget(self.school, 4, 3, 1, 1)
self.layout.addWidget(self.schoolEiit, 4, 4, 1, 18)
self.layout.addWidget(self.sure, 5, 4, 1, 18)
self.sure.clicked.connect(self.connect_fun)
self.returnBtn.clicked.connect(self.return_fun)
def return_fun(self):
win.splitter.widget(0).setParent(None)
win.splitter.insertWidget(0, Controller_myself())
def save_data(self):
a = self.nameEdit.text()
b = self.yearcb.currentText() + '-' + self.monthcb.currentText()
c = self.sexcb.currentText()
d = self.schoolEiit.text()
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
conn.execute("update Controller_data set name =(?),birthday=(?),sex=(?),school=(?) where number=(?)",
(a, b, c, d, win.number))
conn.commit()
conn.close()
def connect_fun(self):
win.splitter.widget(0).setParent(None)
self.save_data()
Controller_myself().devise_ui()
win.splitter.insertWidget(0, Controller_myself())
# 创建保存用户信息的数据库
def found_sql():
filepath = '../datas/database'
if (not (os.path.exists(filepath))): # 创建文件夹。
os.makedirs(filepath)
filepath = '../datas/tupian'
if (not (os.path.exists(filepath))): # 创建文件夹。
os.makedirs(filepath)
filepath = '../datas/wen'
if (not (os.path.exists(filepath))): # 创建文件夹。
os.makedirs(filepath)
sqlpath = '../datas/database/Information.db'
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
try: # 超级管理员账号密码表 号码 用户名 密码
c.execute('''CREATE TABLE SuperController(number text,usrname text,password text)''')
except:
pass
try: # 管理员账号密码表 号码 用户名 密码
c.execute('''CREATE TABLE Controller(number text,usrname text,password text)''')
except:
pass
try: # 管理员信息表 号码 姓名 出生年月 性别 学校
c.execute('''CREATE TABLE Controller_data(number text,name text,birthday text,sex text,school text)''')
except:
pass
try: # 管理员头像表 号码 头像 文件后缀
c.execute('''CREATE TABLE Controller_image(number text,total LONGBLOB,filename text)''')
except:
pass
try: # 管理员账号密码表 号码 用户名 密码
c.execute('''CREATE TABLE Controller2(number text,usrname text,password text)''')
except:
pass
try: # 管理员信息表 号码 姓名 出生年月 性别 学校
c.execute('''CREATE TABLE Controller_data2(number text,name text,birthday text,sex text,school text)''')
except:
pass
try: # 管理员头像表 号码 头像 文件后缀
c.execute('''CREATE TABLE Controller_image2(number text,total LONGBLOB,filename text)''')
except:
pass
try: # 用户账号密码表 号码 用户名 密码
c.execute('''CREATE TABLE User(number text,usrname text,password text)''')
except:
pass
try: # 用户头像表 号码 头像 文件后缀
c.execute('''CREATE TABLE User_image(number text,total LONGBLOB,filename text)''')
except:
pass
try: # 用户信息表 号码 姓名 出生年月 性别 学校 年级
c.execute('''CREATE TABLE User_date(number text,name text,birthday text,sex text,school text, grade text)''')
except:
pass
try: # 用户学习表 号码 注册时间 已注册天数 学习总时间 上一次登录时间
c.execute('''CREATE TABLE Student_date(number text,time text,logonday int,stude_day double,lasttime text)''')
except:
pass
try: # 课程学习表 号码 课程码 学习时间
c.execute('''CREATE TABLE Coursetime(number text,Cno text,time double)''')
except:
pass
try: # 课程表 课程码 课程名 加课码 人数
c.execute('''CREATE TABLE Course(Cno text,name text,numble int)''')
except:
pass
try: # 课程头像表 课程码 图片 文件后缀
c.execute('''CREATE TABLE Course_image(Cno text,total LONGBLOB,filename text )''')
except:
pass
try: # 教师课程表 号码 课程码
c.execute('''CREATE TABLE Teacher_Course(number text, Cno text)''')
except:
pass
try: # 加课表 号码 课程码 加入时间
c.execute('''CREATE TABLE Join_Course(number text, Cno text, jointime text)''')
except:
pass
c.close()
conn.close()
sqlpath = "../datas/database/Data.db"
conn = sqlite3.connect(sqlpath)
c = conn.cursor()
try: # 一年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE First_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 一年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE First_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 一年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE First_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 二年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Second_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 二年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Second_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 二年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Second_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 三年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Three_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 三年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Three_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 三年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Three_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 四年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Fourth_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 四年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Fourth_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 四年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Fourth_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 五年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Fifth_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 五年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Fifth_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 五年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Fifth_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 六年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Six_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 六年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Six_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 六年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Six_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 七年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Seven_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 七年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Seven_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 七年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Seven_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 八年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Eight_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 八年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Eight_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 八年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Eight_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
try: # 九年级文件信息表 序号, 学习阶段 年级, 科目 文件名 文件格式
c.execute('''CREATE TABLE Nine_Grade(no text,level1 text,level2 text,level3 text,name text,filename text)''')
except:
pass
try: # 九年级文件数据表 序号,文件内容
c.execute('''CREATE TABLE Nine_Grade_data(no text,total LONGBLOB)''')
except:
pass
try: # 九年级文件图片数据表 序号,文件内容
c.execute('''CREATE TABLE Nine_Grade_image(no text,total LONGBLOB,filename text)''')
except:
pass
# 高中的数据库再设计
try: # 网址, 网址内容字节
c.execute('''CREATE TABLE successfulurl(url text,howbyte integer)''')
except:
pass
c.close()
conn.close()
# 主函数
if __name__ == "__main__":
found_sql()
app = QApplication(sys.argv)
win = QUnFrameWindow()
win.show()
sys.exit(app.exec_())
| [
"315320819@qq.com"
] | 315320819@qq.com |
133a679f21c28752675651c876492307c71f99c2 | cc9820ebc602f4d41ade0f6fd5e17a90ad5fcb56 | /contrib/zmq/piped_zmq/mongrel2_processors.py | d7c902594f7138ca836baa2006e3997a4fe0b3bf | [
"MIT"
] | permissive | foundit/Piped | 2ed86e30709fd98a9245c620bd48b5795bc600d1 | 78cb485772e353622c5b939f4c1560dfe37464f6 | refs/heads/develop | 2021-01-01T05:34:22.137664 | 2017-07-13T20:46:33 | 2017-07-13T20:46:33 | 1,893,880 | 8 | 4 | null | 2015-05-29T10:19:26 | 2011-06-14T11:19:39 | Python | UTF-8 | Python | false | false | 5,025 | py | # Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# Parts Copyright (c) 2010, Zed A. Shaw and Mongrel2 Project Contributors.
# See LICENSE for details.
""" Utility processors that are especially useful in ZMQ contexts. """
import json
import urlparse
import zmq
from zope import interface
from piped import util, processing
from piped.processors import base
json_encoder = util.PipedJSONEncoder()
HTTP_FORMAT = "HTTP/1.1 %(code)s %(status)s\r\n%(headers)s\r\n\r\n%(body)s"
def http_response(body, code=200, status='OK', headers=None):
payload = {'code': code, 'status': status, 'body': body}
headers = headers or dict()
headers['Content-Length'] = len(body)
payload['headers'] = "\r\n".join('%s: %s' % (k, v) for k, v in headers.items())
return HTTP_FORMAT % payload
def parse_netstring(ns):
len, rest = ns.split(':', 1)
len = int(len)
assert rest[len] == ',', "Netstring did not end in ','"
return rest[:len], rest[len + 1:]
def parse_mongrel_http_request(msg):
sender, conn_id, path, rest = msg.split(' ', 3)
headers, rest = parse_netstring(rest)
body = parse_netstring(rest)[0]
headers = json.loads(headers)
data = dict()
if headers['METHOD'] == 'JSON':
data = json.loads(body)
# The query-string is URL-encoded, so it's all ASCII at this
# point. But json.loads have made all strings into unicode, though
# it's unaware of the URL-encoding. Make sure the input to
# parse_qs is a bytestring, otherwise it gets confused.
raw_query_string = headers.get('QUERY', u'').encode('utf8')
# Now turn the raw query-(byte)-string into a dictionary,
# converting the utf8-strings into unicode-objects post-parse_qs.
query_string = dict((key.decode('utf8'), [v.decode('utf8') for v in list_of_values])
for (key, list_of_values) in urlparse.parse_qs(raw_query_string).items())
# parse_qs returns a list of values for every parameter. We
# expect most parameters to take a single value, and want those to
# be scalars.
for key, list_of_values in query_string.items():
if len(list_of_values) == 1:
query_string[key] = list_of_values[0]
return dict(
sender=sender,
conn_id=conn_id,
path=path,
headers=headers,
body=body,
data=data,
query_string=query_string
)
class MongrelRequestToBatonParser(base.Processor):
name = 'parse-msg-as-mongrel-request'
interface.classProvides(processing.IProcessor)
def get_consumers(self, baton):
if util.dict_get_path(baton, 'http_request.data.type') == 'disconnect':
return []
return super(MongrelRequestToBatonParser, self).get_consumers(baton)
def process(self, msg):
baton = dict()
baton['http_request'] = request = parse_mongrel_http_request(msg)
if request['data'].get('type') == 'disconnect':
return
baton['http_response'] = dict(uuid=request['sender'], idents=[request['conn_id']], headers=dict(), body='')
return baton
class MongrelReplySender(base.Processor):
name = 'send-mongrel-reply'
interface.classProvides(processing.IProcessor)
def __init__(self, queue_name, response_path='http_response', close=True, *a, **kw):
super(MongrelReplySender, self).__init__(*a, **kw)
self.queue_name = queue_name
self.response_path = response_path
self.close = close
def configure(self, runtime_environment):
self.dependencies = runtime_environment.create_dependency_map(self,
socket=dict(provider='zmq.socket.%s' % self.queue_name)
)
def process(self, baton):
response = util.dict_get_path(baton, self.response_path)
assert response is not None, "provide a response if you expect something sensible from this processor"
message = self._make_http_response(response)
self.dependencies.socket.send(message, flags=zmq.NOBLOCK)
if self.close:
response = dict(response) # Don't empty the body of the original response dict.
response['body'] = ''
close_message = self._make_close_response(response)
self.dependencies.socket.send(close_message)
return baton
@classmethod
def _make_http_response(cls, response):
response = dict(response)
uuid = response.pop('uuid')
idents = ' '.join(response.pop('idents'))
msg = http_response(**response)
payload = dict(uuid=uuid, ident_length=len(idents), idents=idents, msg=msg)
return "%(uuid)s %(ident_length)i:%(idents)s, %(msg)s" % payload
@classmethod
def _make_close_response(cls, response):
response = dict(response)
uuid = response.pop('uuid')
idents = ' '.join(response.pop('idents'))
payload = dict(uuid=uuid, ident_length=len(idents), idents=idents, msg='')
return "%(uuid)s %(ident_length)i:%(idents)s, " % payload
| [
"njal@karevoll.no"
] | njal@karevoll.no |
8e88365f3bd779f18cbc22eb893965c4bb8040c2 | 2feea16ff9e2e59ac0e14a344c13b7c92004c6a8 | /vote/migrations/0001_initial.py | 0c91639392329be21d6a9df84f967971f53bbbd1 | [] | no_license | ThanHuuTuan/social-network-website | 6a5f74c00a00f8b804de81bf7e32e0ba00beb14b | 363803df2516864e4dd97a43f7d77d8c514a9c94 | refs/heads/master | 2020-09-13T10:41:15.585405 | 2018-01-14T09:03:44 | 2018-01-14T09:03:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-23 03:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('activities', '0038_auto_20160923_0342'),
]
operations = [
migrations.CreateModel(
name='ActivityVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vote_activityvote_activity', to='activities.Activity')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='vote_activityvote_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"tranphong96.hbk@gmail.com"
] | tranphong96.hbk@gmail.com |
80f72925867fbfc6e6e34f1febd2942905c3dea4 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/j75.py | b85c6a298bb5d546c269f02554657ff5d141d18c | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'j75':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
07a1628fb7df74dbda9255ea2b962cc915a79877 | dac6aba35a341afecc573ba1b1c48f1a3eb09e00 | /test/functional/tool_wallet.py | 64e354a9c7e295c7165d671ac33424a68ce5397c | [
"MIT"
] | permissive | minblock/icountcoins | dd28fadc958245ac171ec523ec2a8b3c473b7946 | 8575ece9bed59101e0e753cb762ce6165b625dbe | refs/heads/master | 2021-05-22T02:24:49.713242 | 2020-04-04T06:32:45 | 2020-04-04T06:32:45 | 252,926,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,835 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/icountcoins-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-regtest'] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 0)
assert_equal(stderr, '')
assert_equal(stdout, output)
def run_test(self):
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` is an error. Use `bitcoin-wallet -help`
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
# stop the node to close the wallet to call info command
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
# mutate the wallet to check the info command output changes accordingly
self.start_node(0)
self.nodes[0].generate(1)
self.stop_node(0)
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
self.start_node(0, ['-wallet=foo'])
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
if __name__ == '__main__':
ToolWalletTest().main()
| [
"POSTMASTER@provgn.com"
] | POSTMASTER@provgn.com |
7214f6e7f197af821c8176e654a27a8b3434cc11 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04012/s001697139.py | 922318af2943bf7e13412ce29538a186a66d538a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | W = input()
print("Yes" if all(W.count(w)%2==0 for w in W) else "No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
158506f682d64d210cc6f22b146317634efbfb1c | 9ba5d85bc644cc586abc29b6c82047deb4caea1f | /leetcode/228.汇总区间.py | 7c3a2496c51dfce124d13f0dd5bbc357bba579da | [
"MIT"
] | permissive | Data-Designer/Leetcode-Travel | f01dda19a1e37a2ba9da42e8ecda304c73645d99 | 147cf44904ce73cd4fd1cecf33f1ac8a336b0e6f | refs/heads/master | 2023-07-30T22:54:53.101323 | 2021-09-14T04:34:08 | 2021-09-14T04:34:08 | 366,757,874 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | '''
Description:
version:
Author: Data Designer
Date: 2021-05-05 12:50:26
LastEditors: Data Designer
LastEditTime: 2021-05-05 13:21:09
'''
#
# @lc app=leetcode.cn id=228 lang=python3
#
# [228] 汇总区间
#
# @lc code=start
class Solution:
def summaryRanges(self, nums: List[int]) -> List[str]:
if not nums:
return []
res = []
# 两个指针
size = len(nums)
if size == 1:
return [str(nums[0])]
slow,fast = 0,0
while fast<= size-1:
while fast <=size-1 and nums[fast]-nums[slow] == fast -slow: # 边界条件
fast = fast + 1
if fast - slow ==1:
res.append(str(nums[slow]))
else:
res.append(str(nums[slow])+'->'+str(nums[fast-1]))
slow = fast
return res
# @lc code=end
| [
"zc_dlmu@163.com"
] | zc_dlmu@163.com |
0be3d2109326449942e5f67d8544cb116227a7fb | 7249970977cdbc5ffae9502278ec3a75f420b46c | /portfolio-project/portfolio/settings.py | 0f8a34cb3dfaeef86b75918c2a5ccebf0f49057e | [] | no_license | Abepena/Django-Projects | b899c72e0a73a46f8222ceddf380b52b5a6fcc1c | d4e01e45b0a0a969200a18333a4d67880282e604 | refs/heads/master | 2020-03-22T04:45:54.971020 | 2018-10-10T22:13:24 | 2018-10-10T22:13:24 | 139,519,642 | 0 | 0 | null | 2018-10-10T20:01:02 | 2018-07-03T02:45:18 | Python | UTF-8 | Python | false | false | 3,876 | py | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'awj=u@n*1+r^_xp!l380($3kv99o75i&%&06den0-_75r-=5xm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jobs',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME': 'portfoliodb',
'USER': 'postgres',
'PASSWORD': 'super-secret-password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
#Create Static Files, root , url , and directories to look for
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'portfolio/static/'),
os.path.join(BASE_DIR, 'jobs/static/'),
]
# Media Files (Images, Video, Etc)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#local_settings import to store settings more securely
try:
# Caution: anything with the same name as a variable
# in local_settings.py will be overwritten
from .local_settings import *
except:
# Do nothing if no local_settings.py file
pass
| [
"pena.abe@gmail.com"
] | pena.abe@gmail.com |
d9888d76bef83c54e8dbedd20f2fe17a31bbee79 | 229a7f69999fbb5da88f01d11f22cf77af79a999 | /adobjects/eventsourcegroup.py | ef77fa9d093abebf41b8e50fd46a66c1a14061e3 | [] | no_license | DaehwanCho/facebookads_v2 | 0f86549ab83c1adef677d831c0c16529c701c364 | 5ca5476a5e5a13b21ed6e9386133ba901b926b87 | refs/heads/master | 2023-03-19T07:58:56.961368 | 2020-04-28T19:06:45 | 2020-04-28T19:06:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,158 | py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebookads_v2.adobjects.abstractobject import AbstractObject
from facebookads_v2.adobjects.abstractcrudobject import AbstractCrudObject
from facebookads_v2.adobjects.objectparser import ObjectParser
from facebookads_v2.api import FacebookRequest
from facebookads_v2.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class EventSourceGroup(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isEventSourceGroup = True
super(EventSourceGroup, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
business = 'business'
event_sources = 'event_sources'
id = 'id'
name = 'name'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'event_source_groups'
def api_create(self, parent_id, fields=None, params=None, batch=None, pending=False):
from facebookads_v2.adobjects.business import Business
return Business(api=self._api, fbid=parent_id).create_event_source_group(fields, params, batch, pending)
def api_get(self, fields=None, params=None, batch=None, pending=False):
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_update(self, fields=None, params=None, batch=None, pending=False):
param_types = {
'event_sources': 'list<string>',
'name': 'string',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def create_shared_account(self, fields=None, params=None, batch=None, pending=False):
param_types = {
'accounts': 'list<string>',
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='POST',
endpoint='/shared_accounts',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=EventSourceGroup,
api_type='EDGE',
response_parser=ObjectParser(target_class=EventSourceGroup, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'business': 'Business',
'event_sources': 'list<ExternalEventSource>',
'id': 'string',
'name': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| [
"ian@fitbod.me"
] | ian@fitbod.me |
52c8f2a820bda42733e06cec9bba1d815e472589 | 6223dc2e5de7921696cb34fb62142fd4a4efe361 | /.metadata/.plugins/org.eclipse.core.resources/.history/1b/a0d512aaa564001418adf2b9b78fa3c6 | e912e3611ab82e431dc342e23e802cb7fff09fcd | [] | no_license | Mushirahmed/python_workspace | 5ef477b2688e8c25b1372f546752501ee53d93e5 | 46e2ed783b17450aba29e4e2df7b656522b2b03b | refs/heads/master | 2021-03-12T19:24:50.598982 | 2015-05-25T10:23:54 | 2015-05-25T10:23:54 | 24,671,376 | 0 | 1 | null | 2015-02-06T09:27:40 | 2014-10-01T08:40:33 | Python | UTF-8 | Python | false | false | 2,485 | #!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from gnuradio import gr
import gras
class ztransform(gras.Block):
"""
docstring for block ztransform
"""
def __init__(self):
gras.Block.__init__(self,
name="ztransform",
in_sig=[np.float32],
out_sig=[np.float32])
def set_parameters(self,num,den,window):
self.num = list(map(float,num.split(" ")))
self.den = list(map(float,den.split(" ")))
print("self.num")
print("self.den")
self.n = window
self.num = np.poly1d(self.num)
self.den = np.poly1d(self.den)
self.den_coeff = self.den.c
nm_coeff = self.num.c
#print self.den_coeff
self.den_ord = self.den.order
self.num_ord = self.num.order
for i in range(0,self.den_ord-self.num_ord):
nm_coeff = np.insert(nm_coeff,0,0)
self.num_coeff = nm_coeff
#print self.num_coeff
self.in_q = [0]*(self.den_ord + 1)
self.out_q = [0]*(self.den_ord + 1)
self.final_q = []
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
#print "i am in work function"
# <+signal processing here+>
ans1 = 0
ans2 = 0
for i in range(1,self.den_ord + 1):
ans1 += self.den_coeff[i]*self.out_q[len(self.out_q)-i]
self.in_q.append(float(in0[0]))
#print self.in_q
for i in range(0,self.den_ord + 1):
ans2 += self.num_coeff[i]*self.in_q[len(self.in_q)-i-1]
#print ans2
ans = ans2 - ans1
ans = ans/self.den_coeff[0]
self.out_q.append(ans)
self.out_q.pop(0)
self.in_q.pop(0)
out[0] = ans
print "OUTPUT:",out[0]
#self.final_q.append(ans)
self.consume(0,1)
self.produce(0,1)
| [
"imushir@gmail.com"
] | imushir@gmail.com | |
0b675c38b7c247e3a694123ac24a2c167d6e6da1 | e413e4020617f2645f7f3ed89ec698183c17e919 | /ftkPipeline/Scriptv3/a051_RunResc8bit.py | 9aa8cd9eda49570b5bbc7048c96e0a31e58b5df4 | [] | no_license | YanXuHappygela/Farsight-latest | 5c349421b75262f89352cc05093c04d3d6dfb9b0 | 021b1766dc69138dcd64a5f834fdb558bc558a27 | refs/heads/master | 2020-04-24T13:28:25.601628 | 2014-09-30T18:51:29 | 2014-09-30T18:51:29 | 24,650,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import fnmatch
import os
import subprocess
import os.path
# ---------------------------------------------------------------------------------------------------------------------------------------
# Create Folder
# ---------------------------------------------------------------------------------------------------------------------------------------
def main( FARSIGHT_BIN_EXE, LOCAL_DATASET_PATH_PARAMETERS, OUTPUT, INPUT, runRescale_log ):
if os.path.exists(OUTPUT+'.nrrd'):
print "Rescale Exits already exist"
else:
print "Rescale does not exist"
#runCopy_db_log = LOCAL_DATASET_PATH_LOG +'/runCopyProjections.log'
#TEMP = FARSIGHT_BIN_EXE+'/ftkMainDarpa PROJECTION '+FILE_GFP+' '+LOCAL_DATASET_PATH_DATA_DEBUG+' > '+runCopy_db_log+' 2>&1'
TEMP = FARSIGHT_BIN_EXE+'/ftkMainDarpa RESCALE_8BIT '+INPUT+'.nrrd '+OUTPUT+'.nrrd'+' >> '+runRescale_log+' 2>&1'
TEMP2 = subprocess.Popen(TEMP, shell=True)
print 'Rescale of '+INPUT
TEMP2.communicate()
TEMP_FILE = open(runRescale_log, 'a')
TEMP_FILE.write('\nCOMMAND: '+TEMP+'\n')
TEMP_FILE.close()
if __name__ == "__main__":
main( FARSIGHT_BIN_EXE, LOCAL_DATASET_PATH_PARAMETERS, OUTPUT, INPUT, runRescale_log ) | [
"xy198908@gmail.com"
] | xy198908@gmail.com |
943a2f8d43d78bf7c8120d223d6f9de88072deb3 | f5f30ff2885f946949dfbcd6f8e4bfa25dbdcb63 | /quote_balance/console_scripts.py | ffbdc0a825d5b1dc411593d98e5149e980a11e3b | [
"BSD-2-Clause"
] | permissive | edelooff/quote-balance | bae22586344550e0acd6b1ffcb4ad0462eb64b26 | 94ea1fb2b645a711ccf804ee6c69eac572f4d645 | refs/heads/master | 2020-04-02T06:52:25.804009 | 2016-08-05T22:24:49 | 2016-08-05T22:29:56 | 65,052,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import argparse
import os
from . import tree_walker, check_file_balance
def arg_config():
parser = argparse.ArgumentParser()
parser.add_argument('target', type=str, help='Target to check for balance')
parser.add_argument(
'-e', '--ext',
action='append',
dest='file_exts',
help=(
'file extension that should be matched for directory searches. '
'This option can be provided multiple times. If no extensions '
'are set, the default checks only "py" extensions'),
metavar='EXT')
parser.add_argument(
'-r', '--recursive',
action='store_true',
help='recursively check all files in target directory')
return parser.parse_args()
def check_file(filename):
"""Checks a single file and reports imbalanced quotes."""
for line, imbalance in check_file_balance(filename):
print '{name}:{line} {imbalance} quotes are imbalanced'.format(
name=filename, line=line, imbalance=' and '.join(imbalance))
def check_directory(directory, file_exts, recursive):
"""Check all matching files in the directory, recursive or not."""
for filename in tree_walker(directory, file_exts, recursive=recursive):
check_file(filename)
def main():
args = arg_config()
target = args.target
if os.path.isfile(target):
return check_file(target)
file_exts = set(args.file_exts) if args.file_exts is not None else {'py'}
return check_directory(target, file_exts, args.recursive)
| [
"elmer.delooff@gmail.com"
] | elmer.delooff@gmail.com |
291a51b21e6c5e3ce63ee64a2403c7199f465326 | 83bacfbdb7ad17cbc2fc897b3460de1a6726a3b1 | /v8_4_8/src/js/macros.py | a4370d2181ceeeedaa1c3f87890c55847886e211 | [
"Apache-2.0"
] | permissive | cool2528/miniblink49 | d909e39012f2c5d8ab658dc2a8b314ad0050d8ea | 7f646289d8074f098cf1244adc87b95e34ab87a8 | refs/heads/master | 2020-06-05T03:18:43.211372 | 2019-06-01T08:57:37 | 2019-06-01T08:59:56 | 192,294,645 | 2 | 0 | Apache-2.0 | 2019-06-17T07:16:28 | 2019-06-17T07:16:27 | null | UTF-8 | Python | false | false | 14,743 | py | # Copyright 2006-2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dictionary that is passed as defines for js2c.py.
# Used for defines that must be defined for all native JS files.
define NONE = 0;
define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
define NEW_ONE_BYTE_STRING = true;
define NEW_TWO_BYTE_STRING = false;
# Constants used for getter and setter operations.
define GETTER = 0;
define SETTER = 1;
# For date.js.
define HoursPerDay = 24;
define MinutesPerHour = 60;
define SecondsPerMinute = 60;
define msPerSecond = 1000;
define msPerMinute = 60000;
define msPerHour = 3600000;
define msPerDay = 86400000;
define msPerMonth = 2592000000;
# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
define kInvalidDate = 'Invalid Date';
define kDayZeroInJulianDay = 2440588;
define kMonthMask = 0x1e0;
define kDayMask = 0x01f;
define kYearShift = 9;
define kMonthShift = 5;
# Limits for parts of the date, so that we support all the dates that
# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
# the date (days since 1970) is in SMI range.
define kMinYear = -1000000;
define kMaxYear = 1000000;
define kMinMonth = -10000000;
define kMaxMonth = 10000000;
# Safe maximum number of arguments to push to stack, when multiplied by
# pointer size. Used by Function.prototype.apply(), Reflect.apply() and
# Reflect.construct().
define kSafeArgumentsLength = 0x800000;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
# 2^32 - 1
define kMaxUint32 = 4294967295;
# Strict mode flags for passing to %SetProperty
define kSloppyMode = 0;
define kStrictMode = 1;
# Native cache ids.
define STRING_TO_REGEXP_CACHE_ID = 0;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_DATE(arg) = (%_IsDate(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_STRONG(arg) = (%IsStrong(arg));
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
# This is the same as being either a function or an object in V8 terminology
# (including proxies).
# In addition, an undetectable object is also included by this.
macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Macro for ECMAScript 5 queries of the type:
# "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
define kBoundFunctionIndex = 0;
define kBoundThisIndex = 1;
define kBoundArgumentsStartIndex = 2;
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
macro TO_INT32(arg) = ((arg) | 0);
macro TO_UINT32(arg) = ((arg) >>> 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_LENGTH_OR_UINT32(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
macro TO_LENGTH_OR_INTEGER(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_INTEGER(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg));
macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
macro TO_NAME(arg) = (%_ToName(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
macro HAS_OWN_PROPERTY(arg, index) = (%_Call(ObjectHasOwnProperty, arg, index));
macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
# Constants used on an array to implement the properties of the RegExp object.
define REGEXP_NUMBER_OF_CAPTURES = 0;
define REGEXP_FIRST_CAPTURE = 3;
# Macros for internal slot access.
macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2);
macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4);
macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
# We can't put macros in macros so we use constants here.
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Limit according to ECMA 262 15.9.1.1
define MAX_TIME_MS = 8640000000000000;
# Limit which is MAX_TIME_MS + msPerMonth.
define MAX_TIME_BEFORE_UTC = 8640002592000000;
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro CHECK_DATE(arg) = if (!%_IsDate(arg)) %_ThrowNotDateError();
macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
macro LOCAL_MS(arg) = (%_DateField(arg, 8));
macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
macro UTC_YEAR(arg) = (%_DateField(arg, 11));
macro UTC_MONTH(arg) = (%_DateField(arg, 12));
macro UTC_DAY(arg) = (%_DateField(arg, 13));
macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
macro UTC_HOUR(arg) = (%_DateField(arg, 15));
macro UTC_MIN(arg) = (%_DateField(arg, 16));
macro UTC_SEC(arg) = (%_DateField(arg, 17));
macro UTC_MS(arg) = (%_DateField(arg, 18));
macro UTC_DAYS(arg) = (%_DateField(arg, 19));
macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
# Last input and last subject of regexp matches.
define LAST_SUBJECT_INDEX = 1;
macro LAST_SUBJECT(array) = ((array)[1]);
macro LAST_INPUT(array) = ((array)[2]);
# REGEXP_FIRST_CAPTURE
macro CAPTURE(index) = (3 + (index));
define CAPTURE0 = 3;
define CAPTURE1 = 4;
# For the regexp capture override array. This has the same
# format as the arguments to a function called from
# String.prototype.replace.
macro OVERRIDE_MATCH(override) = ((override)[0]);
macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
# 1-based so index of 1 returns the first capture
macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
# PropertyDescriptor return value indices - must match
# PropertyDescriptorIndices in runtime-object.cc.
define IS_ACCESSOR_INDEX = 0;
define VALUE_INDEX = 1;
define GETTER_INDEX = 2;
define SETTER_INDEX = 3;
define WRITABLE_INDEX = 4;
define ENUMERABLE_INDEX = 5;
define CONFIGURABLE_INDEX = 6;
# For messages.js
# Matches Script::Type from objects.h
define TYPE_NATIVE = 0;
define TYPE_EXTENSION = 1;
define TYPE_NORMAL = 2;
# Matches Script::CompilationType from objects.h
define COMPILATION_TYPE_HOST = 0;
define COMPILATION_TYPE_EVAL = 1;
define COMPILATION_TYPE_JSON = 2;
# Matches Messages::kNoLineNumberInfo from v8.h
define kNoLineNumberInfo = 0;
# Matches PropertyAttributes from property-details.h
define PROPERTY_ATTRIBUTES_NONE = 0;
define PROPERTY_ATTRIBUTES_STRING = 8;
define PROPERTY_ATTRIBUTES_SYMBOLIC = 16;
define PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
define ITERATOR_KIND_VALUES = 2;
define ITERATOR_KIND_ENTRIES = 3;
macro FIXED_ARRAY_GET(array, index) = (%_FixedArrayGet(array, (index) | 0));
macro FIXED_ARRAY_SET(array, index, value) = (%_FixedArraySet(array, (index) | 0, value));
# TODO(adamk): Find a more robust way to force Smi representation.
macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0));
macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count));
macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket)));
macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry));
macro ORDERED_HASH_TABLE_HASH_TO_BUCKET(hash, numBuckets) = (hash & ((numBuckets) - 1));
macro ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) << 1));
macro ORDERED_HASH_SET_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_SET_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_SET_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) = (3 + (numBuckets) + ((entry) * 3));
macro ORDERED_HASH_MAP_KEY_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets)));
macro ORDERED_HASH_MAP_VALUE_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 1));
macro ORDERED_HASH_MAP_CHAIN_AT(table, entry, numBuckets) = (FIXED_ARRAY_GET(table, ORDERED_HASH_MAP_ENTRY_TO_INDEX(entry, numBuckets) + 2));
# Must match OrderedHashTable::kNotFound.
define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
# SharedFlag equivalents
define kNotShared = false;
define kShared = true;
| [
"22249030@qq.com"
] | 22249030@qq.com |
4029206a74cb0e31e10ca7e3dc30388fad608155 | d8c758b6220c784b5b7fde8b0ddcacf76a6c3966 | /preprocess.py | ce66075c09090770a4c355529cc0f67318e7f9e6 | [] | no_license | RitaRamo/crawling_with_scrapy | b41de09c4d257f36d9f3b95b82d972beda649ad6 | d96e7738bf741746ea41b8c6f08a1e7a0c15cee0 | refs/heads/master | 2020-05-14T09:00:07.333829 | 2019-04-16T17:25:59 | 2019-04-16T17:25:59 | 181,732,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from bs4 import BeautifulSoup, Comment
TAGS_BLACKLIST = ['noscript', 'script', 'style',
'input', 'textarea', 'iframe', 'footer', 'form']
def extract(to_remove):
for element in to_remove:
element.extract()
def remove_tags(soup):
for tag_name in TAGS_BLACKLIST:
tag_elements = soup.findAll(tag_name)
extract(tag_elements)
def remove_comments(soup):
comment_elements = soup.findAll(
text=lambda text: isinstance(text, Comment))
extract(comment_elements)
def get_text(html):
soup = BeautifulSoup(html, 'lxml')
remove_tags(soup)
remove_comments(soup)
text = soup.get_text(" ")
return " ".join(text.split())
| [
"rita.mparada.ramos@gmail.com"
] | rita.mparada.ramos@gmail.com |
7de4c23a140f42cdbbba5ae0609eea20d1f3982d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /FLgJEC8SK2AJYLC6y_5.py | 95cbe236aa1c401c0102c41ea9671e92e21d7f3c | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
def possible_path(lst):
d={1:[2],2:[1,'H'],3:[4],4:[3,'H'],'H':[2,4]}
return all(x[1] in d[x[0]] for x in zip(lst,lst[1:]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c1eb78f39db51e874a025f85beefc14e2d389f5d | 29bec83fc600720533ad2bcf17fc90cd9ca385b7 | /0x08-python-more_classes/practice/robot_OOP.py | 5b5b54edc1869105b2e61134ab53bd08432d656e | [] | no_license | VictorZ94/holbertonschool-higher_level_programming | 73a7f504cde583f43f641e18e692e062610870a4 | ad512a1c76dc9b4c999a0ba2922c79f56206dd98 | refs/heads/master | 2023-03-25T04:38:12.708766 | 2021-03-24T01:08:47 | 2021-03-24T01:08:47 | 291,826,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | class Robot:
def __init__(self, name=None, build_year=None):
self.__name = name
#self.filds = Variable
self.__build_year = build_year
def say_hi(self):
if self.__name:
print("Hi, I am " + self.__name)
else:
print("Hi, I am a robot without a name")
def set_name(self, name):
self.__name = name
def get_name(self):
return self.__name
def set_build_year(self, by):
#setter receive parameter and pass it to getter
self.__build_year = by
def get_build_year(self):
#getter returns name variable
return self.__build_year
def __repr__(self):
return "Robot('" + self.__name + "', " + str(self.__build_year) + ")"
def __str__(self):
return "Name: " + self.__name + ", Build Year: " + str(self.__build_year)
if __name__ == "__main__":
x = Robot("Marvin", 1979)
y = Robot("Caliban", 1943)
for rob in [x, y]:
rob.say_hi()
if rob.get_name() == "Caliban":
rob.set_build_year(1993)
print("I was built in the year " + str(rob.get_build_year()) + "!")
| [
"zrvictor@outlook.com"
] | zrvictor@outlook.com |
f95375117f862312092b749273bda0520902399b | ed8bf8d548326cd80232a33fcae3135d4d4f4a1a | /examples/fauxware/solve.py | 579b296941c35da392b1453748e2f4e57d4759b5 | [] | no_license | symeonp/angr-doc | 480826e51cb16a5c007178f80073865fc0d92393 | 5d69aa70eb586cd064800cc27e6e336dd9908874 | refs/heads/master | 2021-01-12T22:46:19.390386 | 2016-01-05T20:49:22 | 2016-01-05T20:49:22 | 48,951,994 | 0 | 0 | null | 2016-01-03T16:08:32 | 2016-01-03T16:08:32 | null | UTF-8 | Python | false | false | 4,003 | py | #!/usr/bin/env python
import angr
# Look at fauxware.c! This is the source code for a "faux firmware" (@zardus
# really likes the puns) that's meant to be a simple representation of a
# firmware that can authenticate users but also has a backdoor - the backdoor
# is that anybody who provides the string "SOSNEAKY" as their password will be
# automatically authenticated.
def basic_symbolic_execution():
# We can use this as a basic demonstration of using angr for symbolic
# execution. First, we load the binary into an Angr project.
p = angr.Project('fauxware')
# Now, we want to construct a representation of symbolic program state.
# SimState objects are what angr manipulates when it symbolically executes
# binary code.
# The entry_state constructor generates a SimState that is a very generic
# representation of the possible program states at the program's entry
# point. There are more constructors, like blank_state, which constructs a
# "blank slate" state that specifies as little concrete data as possible,
# or full_init_state, which performs a slow and pedantic initialization of
# program state as it would execute through the dynamic loader.
state = p.factory.entry_state()
# States are relatively static objects, they don't do anything "smart".
# You can read data into and out of them, but that's about it.
# In order to actually perform symbolic execution, you need a Path.
# Paths wrap states and are your interface for stepping them forward and
# tracking their history.
path = p.factory.path(state)
# Now, in order to manage the symbolic execution process from a very high
# level, we have a PathGroup. Path groups are just collections of paths
# with various tags attached with a number of convenient interfaces for
# managing them.
pathgroup = p.factory.path_group(path)
# Uncomment the following line to spawn an IPython shell when the program
# gets to this point so you can poke around at the four objects we just
# constructed. Use tab-autocomplete and IPython's nifty feature where if
# you stick a question mark after the name of a function or method and hit
# enter, you are shown the documentation string for it.
# import IPython; IPython.embed()
# Now, we begin execution. This will symbolically execute the program until
# we reach a branch statement for which both branches are satisfiable.
pathgroup.step(until=lambda lpg: len(lpg.active) > 1)
# If you look at the C code, you see that the first "if" statement that the
# program can come across is comparing the result of the strcmp with the
# backdoor password. So, we have halted execution with two states, each of
# which has taken a different arm of that conditional branch. If you drop
# an IPython shell here and examine pathgroup.active[n].state.se.constraints
# you will see the encoding of the condition that was added to the state to
# constrain it to going down this path, instead of the other one. These are
# the constraints that will eventually be passed to our constraint solver
# (z3) to produce a set of concrete inputs satisfying them.
# As a matter of fact, we'll do that now.
input_0 = pathgroup.active[0].state.posix.dumps(0)
input_1 = pathgroup.active[1].state.posix.dumps(0)
# We have used a utility function on the state's posix plugin to perform a
# quick and dirty concretization of the content in file descriptor zero,
# stdin. One of these strings should contain the substring "SOSNEAKY"!
if 'SOSNEAKY' in input_0:
return input_0
else:
return input_1
def test():
pass # appease our CI infrastructure which expects this file to do something lmao
if __name__ == '__main__':
print basic_symbolic_execution()
# You should be able to run this program and pipe its into fauxware in order to
# produce a "sucessfully authenticated" message
| [
"andrew@andrewdutcher.com"
] | andrew@andrewdutcher.com |
266c5d137ecfc7ac54c4b637fa660fd3a6e375bc | c548c10c4fd0b6c1d1c10cc645cb3b90b31f2de6 | /keras/keras42_fashion3_dnn.py | b24254a02eed1096ea29ddb4a30732584ecef8aa | [] | no_license | sswwd95/Study | caf45bc3c8c4301260aaac6608042e53e60210b6 | 3c189090c76a68fb827cf8d6807ee1a5195d2b8b | refs/heads/master | 2023-06-02T21:44:00.518810 | 2021-06-26T03:01:26 | 2021-06-26T03:01:26 | 324,061,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | import numpy as np
# 1. 데이터
from tensorflow.keras.datasets import fashion_mnist
(x_train,y_train), (x_test, y_test) = fashion_mnist.load_data()
print(np.max(x_train)) #255
print(x_train.shape) #(60000, 784)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1]*x_train.shape[2])/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]*x_test.shape[2])/255.
print(x_train.shape) #(60000, 784)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) #(60000, 10)
# 2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(28*28,)))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics='acc')
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='acc', patience=20, mode='max')
model.fit(x_train,y_train, batch_size=16, epochs=500, validation_split=0.2, callbacks=[es])
#4. 평가,예측
loss, acc = model.evaluate(x_test, y_test, batch_size=16)
print('loss, acc : ', loss, acc)
y_pred = model.predict(x_test)
# cnn
# loss, acc : 0.43148916959762573 0.8651000261306763
# dnn
# loss, acc : 0.5104688405990601 0.8884000182151794 | [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
919c37e138ed7feb88aba280acbd325e04ef7f4d | 1678a0c3792eaccaff4888b704a99545515cc415 | /test.py | cb50b30f521e231c46c5d2d01e489411c489115b | [] | no_license | jsvrcek/example-app | 60363eefaf958588170994e830f1a11cc89188b6 | a1ff75541f9d82efcd5c98833e61d16d7da6a22b | refs/heads/master | 2021-01-01T16:09:20.424676 | 2017-07-20T02:25:00 | 2017-07-20T02:25:00 | 97,781,142 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | from mapproxy.proj import Proj
print("It worked.") | [
"joseph.svrcek@rgi-corp.com"
] | joseph.svrcek@rgi-corp.com |
95be7252f67172d4c7b2ad4c779fed9aae242eec | ad129f7fc03f10ef2b4734fa2c2b9cb9367c84fa | /Aula 12 - Condições - pt2/Exe041.py | 595c27c7c25edb398fae7f0dee375ccdf3f19084 | [] | no_license | LucasDatilioCarderelli/Exercises_CursoemVideo | c6dc287d7c08a0349867a17185474744513dbaac | 67c2d572a4817a52dababbca80513e4b977de670 | refs/heads/master | 2022-03-31T20:20:52.827370 | 2020-01-27T13:15:19 | 2020-01-27T13:15:19 | 236,491,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Exe041 - Digite o ano de nascimento e clasifique-o em que faixa está,
# sendo: mirim(até 9), infantil(até 14), júnior(até 19), sênior(até 25), master(acima 25).
from datetime import date
nasceu = int(input('Em que ano nasceu?: '))
idade = (date.today().year) - nasceu
print('O atleta tem {} anos'.format(idade))
if idade < 9:
print('Classificação: MIRIM')
elif idade < 14:
print('Classificação: INFANTIL')
elif idade < 19:
print('Classificação: JÚNIOR')
elif idade < 25:
print('Classificação: SÊNIOR')
else:
print('Classificação: MASTER')
| [
"noreply@github.com"
] | LucasDatilioCarderelli.noreply@github.com |
2a83c3d9eaab8aa74b431a89b46b8e95525c4c5b | 2e359c77bd9b8b1b7955b3ae5117191fa650ab72 | /app/main/views.py | 500475eb9d2dcf72fd2530f674d5410de279e1e7 | [] | no_license | sknewgiser/myFlasky | e60baddafe415ee01102f856a1e183164d7377c3 | 0ff7ad2bb5a75eb4350e424a1f4cd4ba434681e9 | refs/heads/master | 2020-05-01T11:02:08.497161 | 2016-06-27T14:07:10 | 2016-06-27T14:07:10 | 177,432,719 | 1 | 0 | null | 2019-03-24T15:25:20 | 2019-03-24T15:25:20 | null | UTF-8 | Python | false | false | 1,313 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import render_template, session, redirect, url_for, current_app
from . import main
from .forms import NameForm
from .. import db
from ..models import User
from ..email import send_email
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first() # 取出User中username与form收到的data相同的一项
if user is None:
user = User(username=form.name.data) # 创建新行
db.session.add(user) # 添加会话,这里的session是db的"会话"机制
session['known'] = False # 这里的session是Flask的请求上下文
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False),
current_time=datetime.utcnow()) | [
"lyp_login@outlook.com"
] | lyp_login@outlook.com |
3210db8214dc6c06c0da700000a852edc5f5602d | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/keras/engine/training_dataset_test.py | a5da76fe9a4f3a18cb91db9b8697a498fc03a8af | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,251 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
import io
import sys
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class BatchCounterCallback(callbacks.Callback):
def __init__(self):
self.batch_begin_count = 0
self.batch_end_count = 0
def on_batch_begin(self, *args, **kwargs):
self.batch_begin_count += 1
def on_batch_end(self, *args, **kwargs):
self.batch_end_count += 1
class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = "rmsprop"
loss = "mse"
metrics = ["mae"]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2,
)
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2,
)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = "rmsprop"
loss = "mse"
metrics = ["mae", metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat() # Infinite dataset.
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2,
)
# Test with validation split
with self.assertRaises(ValueError):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_split=0.5,
validation_steps=2,
)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegex(
ValueError, r"`sample_weight` argument is not supported .+dataset"
):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight,
)
with self.assertRaisesRegex(
ValueError,
"(you should not specify a target)|"
"(`y` argument is not supported when using dataset as input.)",
):
model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0)
# With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
with self.assertRaises(ValueError):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaises(ValueError):
model.evaluate(dataset, verbose=0)
with self.assertRaises(ValueError):
model.predict(dataset, verbose=0)
@keras_parameterized.run_with_all_model_types(exclude_models="sequential")
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_multi_input_output_dataset(self):
input_a = keras.layers.Input(shape=(3,), name="input_1")
input_b = keras.layers.Input(shape=(3,), name="input_2")
dense = keras.layers.Dense(4, name="dense")
dropout = keras.layers.Dropout(0.5, name="dropout")
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=testing_utils.should_run_eagerly(),
)
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)
output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices(
((input_a_np, input_b_np), (output_d_np, output_e_np))
)
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_tuple, steps=2, verbose=1)
# Test with dict
input_dict = {"input_1": input_a_np, "input_2": input_b_np}
if testing_utils.get_model_type() == "subclass":
output_dict = {"output_1": output_d_np, "output_2": output_e_np}
else:
output_dict = {"dense": output_d_np, "dropout": output_e_np}
dataset_dict = dataset_ops.Dataset.from_tensor_slices((input_dict, output_dict))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_dict, steps=2, verbose=1)
predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(input_dict)
predict_dataset_dict = predict_dataset_dict.repeat(100)
predict_dataset_dict = predict_dataset_dict.batch(10)
model.predict(predict_dataset_dict, steps=1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = "rmsprop"
loss = "mse"
metrics = ["mae", metrics_module.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets, sample_weights)
)
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights_correctness(self):
x = keras.layers.Input(shape=(1,), name="input")
y = keras.layers.Dense(
1, kernel_initializer="ones", bias_initializer="zeros", name="dense"
)(x)
model = keras.Model(x, y)
optimizer = "rmsprop"
loss = "mse"
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets, sample_weights)
).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)
# equals 42.5 / 4 = 10.625
self.assertEqual(result, 10.625)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = "rmsprop"
model.compile(
optimizer,
loss="sparse_categorical_crossentropy",
run_eagerly=testing_utils.should_run_eagerly(),
)
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@keras_parameterized.run_all_keras_modes
def test_dataset_fit_correctness(self):
class SumLayer(keras.layers.Layer):
def build(self, _):
self.w = self.add_weight("w", ())
def call(self, inputs):
return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0
model = keras.Sequential([SumLayer(input_shape=(2,))])
model.compile(
"rmsprop", loss="mae", run_eagerly=testing_utils.should_run_eagerly()
)
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
inputs[20:30, :] = 1
inputs[30:, :] = 4
targets = np.zeros((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(
10
)
val_dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(
10
)
history = model.fit(
train_dataset,
epochs=2,
steps_per_epoch=2,
verbose=1,
validation_data=val_dataset,
validation_steps=2,
)
self.assertAllClose(
history.history["loss"], [inputs[:20].sum() / 20, inputs[20:].sum() / 20]
)
# The validation dataset will be reset at the end of each validation run.
self.assertAllClose(
history.history["val_loss"],
[inputs[:20].sum() / 20, inputs[:20].sum() / 20],
)
# Test correctness with dataset reset.
train_dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(
10
)
val_dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(
10
)
history = model.fit(
train_dataset, epochs=2, verbose=1, validation_data=val_dataset
)
self.assertAllClose(
history.history["loss"], [inputs.sum() / 40, inputs.sum() / 40]
)
self.assertAllClose(
history.history["val_loss"], [inputs.sum() / 40, inputs.sum() / 40]
)
def test_dataset_input_shape_validation(self):
with ops.get_default_graph().as_default(), self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer="rmsprop", loss="mse")
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegex(
ValueError,
r"expected (.*?) to have shape \(3,\) but got array with shape \(1,\)",
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegex(
ValueError, r"expected (.*?) to have shape \(3,\)"
):
model.train_on_batch(dataset)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_known_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile("rmsprop", "mse", run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history["loss"], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile("rmsprop", "mse", run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN,
)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history["loss"], 2)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):
class CaptureStdout(object):
def __enter__(self):
self._stdout = sys.stdout
string_io = io.StringIO()
sys.stdout = string_io
self._stringio = string_io
return self
def __exit__(self, *args):
self.output = self._stringio.getvalue()
sys.stdout = self._stdout
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile("rmsprop", "mse", run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN,
)
batch_counter = BatchCounterCallback()
with CaptureStdout() as capture:
history = model.fit(
dataset,
epochs=2,
callbacks=[batch_counter],
validation_data=dataset.take(3),
)
lines = capture.output.splitlines()
self.assertIn("10/10", lines[-1])
self.assertLen(history.history["loss"], 2)
self.assertEqual(batch_counter.batch_begin_count, 21)
self.assertEqual(batch_counter.batch_end_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile("rmsprop", "mse", run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN,
)
batch_counter = BatchCounterCallback()
with test.mock.patch.object(logging, "warning") as mock_log:
# steps_per_epoch (200) is greater than the dataset size (100). As this is
# unexpected, training will stop and not make it to the second epoch.
history = model.fit(
dataset,
epochs=2,
verbose=1,
callbacks=[batch_counter],
steps_per_epoch=200,
)
self.assertIn(
"ran out of data; interrupting training.", str(mock_log.call_args)
)
self.assertIn(
"can generate at least "
"`steps_per_epoch * epochs` batches (in this case, 400 batches). "
"You may need to use the repeat() function when "
"building your dataset.",
str(mock_log.call_args),
)
self.assertLen(history.history["loss"], 1)
self.assertEqual(batch_counter.batch_end_count, 10)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_all_keras_modes
def test_with_external_loss(self):
inp = keras.Input(shape=(4,), name="inp1")
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.add_loss(math_ops.reduce_mean(out))
model.compile("rmsprop")
x = np.ones((10, 4))
# dataset contains only features, no labels.
dataset = dataset_ops.Dataset.from_tensor_slices(x).repeat(10).batch(10)
model.fit(dataset)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_train_eval_with_steps(self):
# See b/142880049 for more details.
inp = keras.Input(shape=(4,), name="inp1")
out = keras.layers.Dense(2)(inp)
model = keras.Model(inp, out)
model.compile(
"rmsprop", loss="mse", run_eagerly=testing_utils.should_run_eagerly()
)
inputs = np.zeros((100, 4), dtype=np.float32)
targets = np.random.randint(0, 2, size=100, dtype=np.int32)
training_ds = (
dataset_ops.Dataset.from_tensor_slices((inputs, targets)).repeat().batch(10)
)
# Create eval dataset with generator, so that dataset won't contain the
# overall size metadata. Without eval_steps, we expect to run through all
# the data in this dataset every epoch.
def gen():
for _ in range(100):
yield (
np.zeros(4, dtype=np.float32),
np.random.randint(0, 2, size=1, dtype=np.int32),
)
eval_ds = dataset_ops.Dataset.from_generator(
generator=gen, output_types=("float64", "int32"), output_shapes=([4], [1])
).batch(100)
batch_counter = BatchCounterCallback()
model.fit(
training_ds,
steps_per_epoch=10,
epochs=10,
validation_data=eval_ds,
callbacks=[batch_counter],
)
# Expect 10 batch from training per epoch.
self.assertEqual(batch_counter.batch_end_count, 100)
class TestMetricsWithDatasets(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_metrics_correctness_with_dataset(self):
layers = [
keras.layers.Dense(
8, activation="relu", input_dim=4, kernel_initializer="ones"
),
keras.layers.Dense(1, activation="sigmoid", kernel_initializer="ones"),
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss="binary_crossentropy",
metrics=["accuracy", metrics_module.BinaryAccuracy()],
optimizer="rmsprop",
run_eagerly=testing_utils.should_run_eagerly(),
)
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.0)
self.assertEqual(outs[2], 0.0)
if __name__ == "__main__":
test.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
ae94884792fb67ba8b1484f778a5f874b1a2b6e3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/120.py | 2d9966f5134b7b96b15e6ca0d0a8fa742d9cb581 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | N = int(input())
for num in range(1, N + 1):
inp = input().split()
combine = {}
opposed = set()
i = int(inp[0])
for j in range(1, 1 + i):
combine[inp[j][:2]] = inp[j][2]
combine[inp[j][1::-1]] = inp[j][2]
i = i + 1
for j in range(i + 1, i + 1 + int(inp[i])):
opposed.add(inp[j])
opposed.add(inp[j][::-1])
ans = ['@']
for elem in inp[-1]:
if ans[-1] + elem in combine:
ans[-1] = combine[ans[-1] + elem]
else:
for elem1 in ans:
if elem1 + elem in opposed:
ans = ['@']
break
else:
ans.append(elem)
ans = ', '.join(ans[1:])
print("Case #", num, ": [", ans, "]", sep = '')
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d2e6eca292c9df91bd4fd6422e9ed3c0ece225d7 | ba719722e890a7822a5533a8b6efd06cc776b17e | /Maricopa_County/fitlering Script/Surprise_85379_Maricopa_AZ.py | bd60aa3e5d180bd3dad8fa1438bf1c0cc4ca2866 | [] | no_license | devhadisov/python_selenium_zillow | 9c80566d829721dce952ab4d7a285d1fd970fe19 | e0b4f7243b548404912bdcdce4bcdf7168413242 | refs/heads/master | 2022-12-24T17:14:32.903874 | 2020-09-24T20:20:25 | 2020-09-24T20:20:25 | 298,384,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,459 | py | import selenium
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import requests
import urllib.request
import json, csv, lxml, time, re
import datetime
import hashlib
from insertdatabase import InsertDB
def main(htmlstring, driver):
table_name = "maricopa"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ko;q=0.8',
'referer': 'https://www.zillow.com/homes/85139_rb/',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
}
pagination = ""
usersSearchTerm = "85379"
west = "-112.43028831787109"
east = "-112.3211116821289"
south = "33.53811759133085"
north = "33.65736633255334"
regionId = "94953"
regionType = "7"
mapZoom = "13"
includeList = "true"
# https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={%22pagination%22:{},%22usersSearchTerm%22:%2285006%22,%22mapBounds%22:{%22west%22:-112.07973577801513,%22east%22:-112.01665022198486,%22south%22:33.43522122804253,%22north%22:33.494937169247144},%22regionSelection%22:[{%22regionId%22:94722,%22regionType%22:7}],%22isMapVisible%22:true,%22mapZoom%22:14,%22filterState%22:{%22sort%22:{%22value%22:%22globalrelevanceex%22}},%22isListVisible%22:true}&includeMap=false&includeList=true
default_first_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{0},"usersSearchTerm":"{1}","mapBounds":{"west":{2},"east":{3},"south":{4},"north":{5}},"regionSelection":[{"regionId":{6},"regionType":{7}}],"isMapVisible":true,"mapZoom":{8},"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList={9}'
first_case_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
# first_url = default_first_url.format(pagination, usersSearchTerm, west, east, south, north, regionId, regionType, mapZoom, includeList)
print(first_case_url)
# return
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
counts = 1
for page in range(1, 5):
default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + str(page) + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
if page == 1:
url = first_case_url
else:
url = default_page_url
response = requests.get(url, headers=header)
result = response.json()
properties_infos = result["searchResults"]["listResults"]
print(len(properties_infos))
for i in range(0, len(properties_infos)):
data_base = []
property_url = properties_infos[i]["detailUrl"]
status_text = properties_infos[i]["statusText"]
print(status_text, counts)
counts += 1
try:
street_add = properties_infos[i]["hdpData"]["homeInfo"]["streetAddress"]
except:
street_add = ""
try:
city = properties_infos[i]["hdpData"]["homeInfo"]["city"]
except:
city = ""
try:
state = properties_infos[i]["hdpData"]["homeInfo"]["state"]
except:
state = ""
try:
zipcode = properties_infos[i]["hdpData"]["homeInfo"]["zipcode"]
except:
zipcode = ""
property_address = street_add + ", " + city + ", " + state + " " + zipcode
if "by owner" in status_text:
print("--------------------------------------------------> : ", i + 1)
driver.get(property_url)
time.sleep(10)
# street_add = driver.find_element_by_xpath("//h1[@class='ds-address-container']/span[1]").text
# property_address = street_add + ", " + city + ", " + state + " " + zipcode
# phone_number = driver.find_element_by_xpath("//span[@class='listing-field']").text
phones = re.findall(r'[(][\d]{3}[)][ ]?[\d]{3}-[\d]{4}', driver.page_source)
for phone in range(1, len(phones) + 1):
phone_number = phones[phone - 1]
print("Property Address--------------------> : ", property_address)
print("Property Url------------------------> : ", property_url)
print("Property Status---------------------> : ", status_text)
print("Owner Phone Number------------------> : ", phone_number)
string_id = property_address + status_text + phone_number
m = hashlib.md5()
m.update(string_id.encode('utf8'))
identifier = m.hexdigest()
print("hash-------------------->", identifier)
create_time = str(datetime.datetime.now())
update_time = ""
insertdb = InsertDB()
data_base.append((property_address, street_add, city, state, zipcode, status_text, phone_number, identifier, create_time, update_time))
insertdb.insert_document(data_base, table_name)
# return
if __name__ == "__main__":
print("-----------------start---------------")
path = "driver\\chromedriver.exe"
driver = Chrome(executable_path=path)
driver.get("https://www.zillow.com/")
time.sleep(2)
driver.maximize_window()
main(driver.page_source, driver) | [
"dev.hadisov@gmail.com"
] | dev.hadisov@gmail.com |
cbd26b08ee6593f7a3a03c14c4a0e1c7a1051d0e | d4f9a423353fe79cf8824a8407690655fc1379fe | /django/virtualenv/django/lib/python2.7/site-packages/ansible-2.2.0-py2.7.egg/ansible/modules/core/cloud/amazon/ec2_vpc_net.py | 54c4307b23fbb49c7a790688d76832fd66210634 | [] | no_license | 007root/python | 9ab62d433d17c8bb57622fd1d24a3b17cb3d13ad | 16bf729e5824555eab0c9de61ce6b8b055551bd1 | refs/heads/master | 2020-06-23T09:43:05.308328 | 2020-06-09T08:31:20 | 2020-06-09T08:31:20 | 74,656,519 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,137 | py | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create or terminate AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
author: Jonathan Davila (@defionscode)
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
choices: [ 'default', 'dedicated' ]
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: yes
choices: [ 'yes', 'no' ]
dhcp_opts_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
choices: [ 'present', 'absent' ]
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
'''
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
matched_vpc = None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1:
matched_vpc = matching_vpcs[0]
elif len(matching_vpcs) > 1:
if multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return matched_vpc
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
if cmp(tags, current_tags):
if not module.check_mode:
vpc.create_tags(vpc_obj.id, tags)
return True
else:
return False
except Exception as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj.dhcp_options_id != dhcp_id:
if not module.check_mode:
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
return True
else:
return False
def get_vpc_values(vpc_obj):
if vpc_obj is not None:
vpc_values = vpc_obj.__dict__
if "region" in vpc_values:
vpc_values.pop("region")
if "item" in vpc_values:
vpc_values.pop("item")
if "connection" in vpc_values:
vpc_values.pop("connection")
return vpc_values
else:
return None
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name = dict(type='str', default=None, required=True),
cidr_block = dict(type='str', default=None, required=True),
tenancy = dict(choices=['default', 'dedicated'], default='default'),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
dhcp_opts_id = dict(type='str', default=None, required=False),
tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']),
state = dict(choices=['present', 'absent'], default='present'),
multi_ok = dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if dns_hostnames and not dns_support:
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is None:
try:
changed = True
if not module.check_mode:
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
else:
module.exit_json(changed=changed)
except BotoServerError as e:
module.fail_json(msg=e)
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_obj, tags, name):
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
if not module.check_mode:
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support)
connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames)
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if not module.check_mode:
# get the vpc obj again in case it has changed
try:
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
except BotoServerError as e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
elif state == 'absent':
# Check if VPC exists
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_obj is not None:
try:
if not module.check_mode:
connection.delete_vpc(vpc_obj.id)
vpc_obj = None
changed = True
except BotoServerError as e:
e_msg = boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| [
"wangzhishuai@gstianfu.com"
] | wangzhishuai@gstianfu.com |
c34bbc8a5f1f93bad4d3178bd354e23f55da7ac2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03211/s089250300.py | e527a16181393649ce35c1070bbbf8b7f202f33e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | s = input()
l = []
ans = 0
ans_l = []
for i in range(len(s)-2):
l.append(s[i:i+3])
for i in l:
ans = abs(int(i)-753)
ans_l.append(ans)
print(min(ans_l)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ccd944b721ea43533688111c8209f149fb569c3c | 77d4d5a1881297dce3003560e04a2e39a97d4465 | /code_chef/CHFCHK.py | 9f69147f1b66643422b26f10a1330a0a89a919aa | [] | no_license | gomsterX/competitive_programming | c34820032c24532d62325a379590a22fa812159a | 72ac1fe61604e5a5e41f336bb40377fd7e4738d7 | refs/heads/master | 2023-07-19T21:28:16.205718 | 2021-09-02T14:18:44 | 2021-09-02T14:18:44 | 271,074,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #Problem ID: CHFCHK
#Problem Name: Chef Chick
for _ in range(int(input())):
n = input()
l = list(map(int, input().split()))
l.sort()
print(l[0])
| [
"mohamedmoussaa7@gmail.com"
] | mohamedmoussaa7@gmail.com |
95d44e5cf23fbc42626c705a67cb6448244ae02e | 5d7a3dc27540e04e5cb9c8f4742830c7fca188f0 | /week-05/code/httpdate.py | 3543de4e38136299f262df0feccd7d45d8775a8f | [] | no_license | PythonCHB/PythonIntroClass | 1986e553390c6f3504e279cda23744ceacc3a292 | b49d41bd04696d45ef4394b489de408cbd3b3d32 | refs/heads/master | 2020-12-24T17:35:31.408292 | 2014-10-16T18:09:21 | 2014-10-16T18:09:21 | 4,633,372 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | #!/usr/bin/env python
"""
httpdate.py
Module that provides a function that formats a date to the HTTP 1.1 spec
"""
import datetime
def httpdate(dt):
"""Return a string representation of a date according to RFC 1123
(HTTP/1.1).
:param dt" A python datetime object (in UTC (GMT) time zone)
For example: datetime.datetime.utcnow()
"""
weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()]
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"][dt.month - 1]
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, dt.day, month,
dt.year, dt.hour, dt.minute, dt.second)
def httpdate_now():
return httpdate( datetime.datetime.utcnow() )
if __name__ == "__main__":
print "the HTTP 1.1 date string for now is:"
print httpdate_now() | [
"PythonCHB@gmail.com"
] | PythonCHB@gmail.com |
aaf2b4ad7020e6aa3c355fccb7edd6adbb177239 | 9b854d21fea95f24f7bde41e7a172e8a8a6327f9 | /tensorflow/python/kernel_tests/control_flow_ops_py_test.py | ffc640d1ba3de86f218bb626b4434cc1b6448a2a | [
"Apache-2.0"
] | permissive | devsangwoo/tensor | 84345bb05d969c732f70a8a64f2d070bf71d1f9b | 066592c9f9cdf4acdd1b9b104766271133e9088e | refs/heads/master | 2022-12-09T00:33:43.272931 | 2015-11-07T00:27:58 | 2020-01-10T07:33:06 | 232,987,148 | 1 | 0 | NOASSERTION | 2022-10-04T23:56:16 | 2020-01-10T07:06:05 | C++ | UTF-8 | Python | false | false | 228,890 | py | <<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import re
import sys
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
=======
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
import math
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.pywrap_tensorflow import StatusNotOK
def check_op_order(graph):
"""Sanity check on the ordering of op id."""
for op in graph.get_operations():
for v in op.inputs:
assert v.op._id < op._id or op.type == "Merge", (
"The id of %s must be less than the id of %s" % (v.op.name, op.name))
return True
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
<<<<<<< HEAD
for k, v in consumer_count.items():
=======
for k, v in consumer_count.iteritems():
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
if len(k.consumers()) != v:
return False
return True
<<<<<<< HEAD
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg(do_constant_folding=True):
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=do_constant_folding)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
def enqueue_print_op(s):
"""Enqueues an op that prints a message to be captured in the test."""
return logging_ops.print_v2("ControlFlowOpsTest: " + s)
def filter_test_messages(s):
"""Returns a list of messages printed by enqueue_print_op."""
prefix = "ControlFlowOpsTest: "
return [l[len(prefix):] for l in s.split("\n") if l.startswith(prefix)]
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
@test_util.run_v1_only("b/120545219")
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v3))
@test_util.run_v1_only("b/120545219")
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_deprecated_v1
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values
ind = merge_op.indices
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
@test_util.run_deprecated_v1
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.run_deprecated_v1
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
@test_util.run_deprecated_v1
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testCondOutputShape(self):
x = constant_op.constant(1.0)
b = control_flow_ops.cond(
constant_op.constant(True), lambda: math_ops.square(x),
lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.TensorShape([]))
@test_util.run_v1_only("b/120545219")
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
def testCondMismatchedIndexedSlices(self):
@def_function.function
def foo():
values = constant_op.constant([10])
indices = constant_op.constant([0])
x = ops.IndexedSlices(values, indices)
with self.assertRaisesRegexp(
TypeError, "Cannot reconcile tf.cond 0-th outputs"):
control_flow_ops.cond(
constant_op.constant(True),
lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices),
lambda: math_ops.add(x.values, 1), indices)
foo()
def testCondSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values)
self.assertAllEqual([[1], [4]], r.indices)
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondRaggedTensor(self):
rt = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.concat([rt + 2, [[100]]], axis=0)
fn2 = lambda: rt[:2] - 2
result = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3, 4, 5, 6, 7, 8, 100], result.values)
self.assertAllEqual([0, 2, 3, 6, 7], result.row_splits)
@test_util.run_v1_only("b/120545219")
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
self.evaluate(variables.global_variables_initializer())
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(
1.0, self.evaluate(control_flow_ops.cond(rv, case, lambda: t)))
@test_util.run_deprecated_v1
def testCondResourceGradShape(self):
rv1 = resource_variable_ops.ResourceVariable([1.0, 2.0])
rv2 = resource_variable_ops.ResourceVariable([3.0, 4.0])
pred = constant_op.constant(True)
result = control_flow_ops.cond(pred, lambda: rv1, lambda: rv2)
grads = gradients_impl.gradients(result, [rv1, rv2])
self.assertAllEqual(grads[0].shape.as_list(), [2])
self.assertAllEqual(grads[1].shape.as_list(), [2])
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
pred = array_ops.placeholder(dtypes.bool, [])
x = constant_op.constant([1.0, 2.0, 3.0])
y = control_flow_ops.cond(
pred, lambda: map_fn.map_fn(lambda z: z * 2.0, x),
lambda: constant_op.constant([1.0, 1.0, 1.0]))
g = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0])
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant([10])
i_32 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual([11], val)
self.assertAllEqual([0], ind)
self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self):
with self.session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.run_in_graph_and_eager_modes
def testCondPruning(self):
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
def f():
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertEqual(len(r), 2)
return r[1]
f_defun = eager_function.defun(f)
if not context.executing_eagerly():
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(f())
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
# Both v1 and v2 branch assignments should be run in defun.
self.assertEqual(1, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
=======
def isum(s):
i = tf.constant(0, name="i")
c = lambda i, s: tf.less(i, 10)
b = lambda i, s: [tf.add(i, 1), tf.add(i, s)]
_, r_s = control_flow_ops.While(c, b, [i, s])
return r_s
class ControlFlowTest(tf.test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = tf.Variable(7)
v = control_flow_ops._Identity(v)
op = tf.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(check_op_order(v.graph))
self.assertTrue(isinstance(v2, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = tf.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1")
nine = tf.constant(9)
enter_nine = control_flow_ops.enter(nine, "foo_1")
op = tf.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
tf.initialize_all_variables().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = tf.Variable(7)
p = tf.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v, p)
v2 = tf.assign(v1[1], 9)
tf.initialize_all_variables().run()
self.assertEqual(9, v2.eval())
def testEnterExit_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_op = control_flow_ops.enter(data, "foo_1", False)
exit_op = control_flow_ops.exit(enter_op)
result = exit_op.eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), result)
def testEnterMulExit_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = control_flow_ops.enter(data, "foo_1", False)
five = tf.constant(5)
enter_five = control_flow_ops.enter(five, "foo_1", False)
mul_op = tf.mul(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterNextExit_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
enter_op = control_flow_ops.enter(data, "foo_1", False)
next_op = control_flow_ops.next_iteration(enter_op)
exit_op = control_flow_ops.exit(next_op)
result = exit_op.eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), result)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = tf.constant([1, 2, 3, 4, 5, 6])
indices = tf.constant([0, 2, 4, 6, 8, 10])
data = tf.IndexedSlices(values, indices)
pred = tf.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def _testSwitchMerge_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMerge_1(self):
self._testSwitchMerge_1(use_gpu=False)
self._testSwitchMerge_1(use_gpu=True)
def testSwitchDeadBranch(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = tf.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
StatusNotOK, lambda e: 'The tensor returned for' in str(e)):
dead_branch.eval()
def testSwitchMergeIdentity_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
merge_op = control_flow_ops.merge(switch_op)[0]
id_op = tf.identity(merge_op)
result = id_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeLess_0(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
zero = tf.constant(0)
one = tf.constant(1)
less_op = tf.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeLess_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
zero = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
less_op = tf.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity_0(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
id_op = tf.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddIdentity_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
id_op = tf.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddMul_0(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
five = tf.constant(5)
mul_op = tf.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul_1(self):
with self.test_session():
data = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ports = tf.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = tf.constant(1)
add_op = tf.add(switch_op[0], one)
five = tf.constant(5)
mul_op = tf.mul(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = tf.convert_to_tensor(False)
n = tf.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_false_1(self):
with self.test_session():
false = tf.convert_to_tensor(False)
n = tf.constant(10)
enter_false = control_flow_ops.enter(false, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
n = tf.constant(10)
enter_zero = control_flow_ops.enter(zero, "foo_1", False)
enter_one = control_flow_ops.enter(one, "foo_1", False)
enter_n = control_flow_ops.enter(n, "foo_1", False)
merge_zero = control_flow_ops.merge([enter_zero, enter_zero],
name="merge_zero")[0]
merge_one = control_flow_ops.merge([enter_one, enter_one],
name="merge_one")[0]
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
less_op = tf.less(merge_n, merge_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_zero = control_flow_ops.switch(merge_zero, cond_op)
switch_one = control_flow_ops.switch(merge_one, cond_op)
switch_n = control_flow_ops.switch(merge_n, cond_op)
next_zero = control_flow_ops.next_iteration(switch_zero[1])
next_one = control_flow_ops.next_iteration(switch_one[1])
next_n = control_flow_ops.next_iteration(switch_n[1])
merge_zero.op._update_input(1, next_zero)
merge_one.op._update_input(1, next_one)
merge_n.op._update_input(1, next_n)
exit_n = control_flow_ops.exit(switch_n[0])
result = exit_n.eval()
self.assertAllEqual(10, result)
def testCondIndexedSlices(self):
with self.test_session():
values = tf.constant(10)
indices = tf.constant(0)
x = tf.IndexedSlices(values, indices)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), indices)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = tf.constant(10)
i_32 = tf.convert_to_tensor(0, name="one", dtype=tf.int32)
i_64 = tf.convert_to_tensor(0, name="one", dtype=tf.int64)
x = tf.IndexedSlices(values, i_32)
pred = tf.less(1, 2)
fn1 = lambda: tf.IndexedSlices(tf.add(x.values, 1), i_32)
fn2 = lambda: tf.IndexedSlices(tf.sub(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertTrue(check_op_order(x.values.graph))
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = tf.constant(10)
r = control_flow_ops.cond(tf.less(1, 0), lambda: tf.add(x, 1),
lambda: tf.sub(x, 1))
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = tf.constant(10)
pred = tf.less(1, 2)
fn1 = lambda: tf.add(x, 1)
fn2 = lambda: tf.sub(x, 1)
fn3 = lambda: tf.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertTrue(check_op_order(x.graph))
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = tf.Variable(7)
v2 = tf.Variable(7)
v3 = tf.Variable(7)
age = tf.constant(3)
max_age = tf.constant(2)
pred = tf.greater(age, max_age)
fn1 = lambda: [tf.assign(v1, 1).op, tf.assign(v2, 2).op]
fn2 = lambda: [tf.assign(v3, 3).op, tf.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
tf.initialize_all_variables().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertTrue(check_op_order(age.graph))
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = tf.constant(True, name="alive")
count = tf.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [tf.less(i, 3), tf.add(count, 1)],
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
<<<<<<< HEAD
self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
=======
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = tf.Variable([7])
age = tf.constant(3)
pred = tf.greater(age, 4)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
<<<<<<< HEAD
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], self.evaluate(r))
@parameterized.parameters(dtypes.float32, dtypes.float64)
@test_util.run_v1_only("Uses tf.gradients")
def testCondResourceGrad(self, dtype):
init = constant_op.constant([7.], dtype=dtype)
v1 = variables.Variable(init)
age = constant_op.constant(3., dtype=dtype)
pred = math_ops.greater(age, 4.)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, v1)[0]
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(grad, [1.])
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCond_Device(self):
x = constant_op.constant(-10.)
# True branch function defined outside of device scope
def true_fn():
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.cond(
constant_op.constant(True), true_fn, lambda: 0.)
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
def _count_matching_switch_nodes_on_device(self, run_metadata, device_str):
# Returns the number of Switch nodes with type float32 placed on
# `device_str`.
device_graphs = [
g for g in run_metadata.partition_graphs
if device_str in g.node[0].device
]
self.assertLen(device_graphs, 1)
switch_nodes = [
n for n in device_graphs[0].node if n.op == "Switch" and
n.attr["T"].type == dtypes.float32.as_datatype_enum
]
return len(switch_nodes)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnCPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# We force `arg` to be on CPU here.
with ops.device("CPU:0"):
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on CPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU"), 1)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU"), 0)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testCondSwitchColocatedWithInputWhenInputOnGPU(self):
x = array_ops.placeholder(dtypes.float32)
# `arg` is used in the cond then branch so a Switch node is created for it.
# We test that the Switch node gets placed on the same device as `arg`.
# Note: `arg` gets placed on GPU by default by the placer.
arg = x + 10.
def true_fn():
with ops.device("CPU:0"):
return arg + 1
r = control_flow_ops.cond(constant_op.constant(True), true_fn, lambda: 0.)
with session.Session() as sess:
run_metadata = config_pb2.RunMetadata()
options = config_pb2.RunOptions(output_partition_graphs=True)
sess.run(
r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 2)
# Check that the Switch for `arg` gets placed on GPU.
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "CPU"), 0)
self.assertEqual(
self._count_matching_switch_nodes_on_device(run_metadata, "GPU"), 1)
def testCondAccessTrueBranchTensorInFalseBranchRaises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def true_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="true_branch")
return inputs["c"]
def false_fn(inputs):
return array_ops.identity(inputs["c"])
pred = constant_op.constant(True)
return control_flow_ops.cond(
pred, lambda: true_fn(inputs), lambda: false_fn(inputs))
with self.assertRaisesRegexp(
ValueError,
"Tensor true_branch:0 in true_fn is accessed from false_fn."):
f()
def testSwitchCaseAccessBranch1TensorInBranch4Raises(self):
@def_function.function
def f():
c = constant_op.constant(1.)
inputs = {"c": c}
def br1_fn(inputs):
inputs["c"] = array_ops.identity(inputs["c"], name="br1_identity")
return inputs["c"]
def br4_fn(inputs):
return array_ops.identity(inputs["c"])
def other_fn():
return array_ops.identity(c)
return control_flow_ops.switch_case(
constant_op.constant(2),
[other_fn, lambda: br1_fn(inputs), other_fn, other_fn,
lambda: br4_fn(inputs)])
with self.assertRaisesRegexp(
ValueError,
"Tensor br1_identity:0 in branch 1 is accessed from branch 4."):
f()
def testCondListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertListEqual([210, 210], test_result)
def testTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y))
fn2 = lambda: (y, y)
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual((210, 210), test_result)
def testDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"a": y, "b": y}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": 210, "b": 210}, test_result)
def testEmbeddedListOutput(self):
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]]
fn2 = lambda: [[y, y]]
# Pass strict=True flag as cond_v2 allows for tensors to be
# in nested output structures as singletons
r = control_flow_ops.cond(pred, fn1, fn2, strict=True)
test_result = self.evaluate(r)
self.assertListEqual([[210, 210]], test_result)
def testEmbeddedTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y)))
fn2 = lambda: ((y, y))
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual(((210, 210)), test_result)
def testEmbeddedDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": {"c": math_ops.add(x, y)},
"b": {"d": math_ops.add(x, y)}}
fn2 = lambda: {"a": {"c": y},
"b": {"d": y}}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"c": y, "d": y}
v1_msg = "The two structures don't have the same nested structure"
v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same "
"number, type, and overall structure of return values.")
with self.assertRaisesRegexp(
TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError,
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
control_flow_ops.cond(pred, fn1, fn2)
@test_util.run_deprecated_v1
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testCondWithControl(self):
with self.cached_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
result = sess.run(r, feed_dict={control_holder: 5.})
self.assertEqual(5, result)
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], self.evaluate(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testDisableLoweringSwitchMerge(self):
if test_util.is_gpu_available():
self.skipTest(
"Single threaded executor doesn't support partitioned graphs. "
"Skipping GPU test.")
# Make pred feedable to ensure we don't constant-fold it out.
run_opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_no_lowering = config_pb2.RunMetadata()
run_metadata_with_lowering = config_pb2.RunMetadata()
config = opt_cfg(do_constant_folding=False)
pred = array_ops.placeholder_with_default(
constant_op.constant(True), shape=())
r = control_flow_ops.cond(pred, lambda: True, lambda: False)
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_with_lowering)
self.assertEqual(r_value, True)
# Use the single threaded executor, which disables control flow lowering.
config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR"
with session.Session(config=config) as sess:
r_value = sess.run(
r, options=run_opts, run_metadata=run_metadata_no_lowering)
self.assertEqual(r_value, True)
self.assertTrue( # pylint: disable=g-complex-comprehension
any("switch" in ns.node_name
for dev_stat in run_metadata_with_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
self.assertTrue( # pylint: disable=g-complex-comprehension
all("switch" not in ns.node_name
for dev_stat in run_metadata_no_lowering.step_stats.dev_stats
for ns in dev_stat.node_stats))
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, self.evaluate(grad))
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testCondComputeGradAfterSessRunFails(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"Connecting to invalid output 1 of source node cond which has 1 "
r"outputs. Try using "
"tf.compat.v1.experimental.output_all_intermediates\(True\)."):
self.evaluate(grad)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
a = x * x
return a * a
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedCondComputeGradAfterSessRun(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
def true_fn():
def inner_true_fn():
a = x * x
return a * a
def inner_false_fn():
return x * x
return control_flow_ops.cond(
constant_op.constant(True), inner_true_fn, inner_false_fn)
def false_fn():
return x * x
r = control_flow_ops.cond(pred, true_fn, false_fn)
self.assertAllEqual(r, 10000.)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(grad, 4000.)
@test_util.run_deprecated_v1
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
@test_util.run_deprecated_v1
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
@test_util.run_deprecated_v1
def testCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
with ops.device("/cpu:0"):
z = control_flow_ops.cond(pred, lambda: x * y * 2.0, lambda: 2.0)
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x)[0]
self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0)
self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
# v1 control flow gets None second derivative for some reason.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsNone(grad_grad)
return
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
# The following `grad` is a Tensor since it is the aggregation of an
# IndexedSlice and a Tensor. It is an `IndexedSlices` with control flow
# v2.
grad = gradients_impl.gradients(r, [v1])[0]
self.evaluate(variables.global_variables_initializer())
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsInstance(grad, ops.IndexedSlices)
grad_value = sess.run(grad, feed_dict={c: 1})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [1.0, 1.0])
grad_value = sess.run(grad, feed_dict={c: 3})
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [0.0, 2.0])
@test_util.run_deprecated_v1
def testCondGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the
# ResourceVariable.sparse_read gradient function returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x = constant_op.constant(1.0)
r = control_flow_ops.cond(
constant_op.constant(True),
lambda: x * math_ops.reduce_sum(var.sparse_read([1, 2])),
lambda: constant_op.constant(np.zeros((2, 3)),
dtype=dtypes.float32))
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
self.assertIsInstance(grad_val, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(grad_val), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0.]])
def testCondGrad_MultiGather(self):
# NOTE(skyewm): this test is interesting because the array_ops.gather and
# ResourceVariable.sparse_read gradient functions returns IndexedSlices.
var = resource_variable_ops.ResourceVariable(
np.ones((4, 2), dtype=np.float32))
x1 = constant_op.constant(np.ones((3, 3), dtype=np.float32))
x2 = constant_op.constant(2.0)
def true_fn():
y1 = var.sparse_read([1, 2])
y2 = array_ops.gather(x1, [2]) * x2
y3 = x2 * [1., 1., 1.]
return y1, y2, y3
def false_fn():
y1 = np.zeros((2, 2), dtype=np.float32)
y2 = array_ops.gather(x1, [2]) * x2
y3 = array_ops.gather(x1, [2])
return y1, y2, y3
@def_function.function
def foo():
r = control_flow_ops.cond(constant_op.constant(True), true_fn, false_fn)
return gradients_impl.gradients(r, [var, x1, x2])
grad = foo()
self.evaluate(variables.global_variables_initializer())
var_grad, x1_grad, x2_grad = self.evaluate(grad)
self.assertIsInstance(var_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(var_grad), [[0., 0.],
[1., 1.],
[1., 1.],
[0., 0]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertAllEqual(gradient_checker_v2._to_numpy(x1_grad), [[0., 0., 0.],
[0., 0., 0.],
[2., 2., 2.]])
self.assertIsInstance(x1_grad, ops.IndexedSlicesValue)
self.assertEqual(gradient_checker_v2._to_numpy(x2_grad), 6.)
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op."""
@eager_function.defun
def foo():
return constant_op.constant("foo"), constant_op.constant(True)
r = control_flow_ops.cond(foo()[1], lambda: 1.0, lambda: 2.0)
self.assertEqual(self.evaluate(r), 1.0)
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedConstantPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = constant_op.constant(True)
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertEqual(0.0, sess.run(result))
@test_util.run_v1_only("Tests Session.run() pruning logic.")
def testCondFeedPlaceholderWithDefaultPredicate(self):
with self.cached_session() as sess:
value = constant_op.constant(37.0)
predicate = array_ops.placeholder_with_default(
constant_op.constant(True), [])
cond_output = control_flow_ops.cond(
predicate, lambda: constant_op.constant(0.0), lambda: value)
result = array_ops.identity(cond_output)
self.assertAllEqual(37.0, sess.run(result, feed_dict={predicate: False}))
self.assertAllEqual(0.0, sess.run(result, feed_dict={predicate: True}))
self.assertAllEqual(0.0, sess.run(result))
@test_util.run_in_graph_and_eager_modes
def testCondAutoControlDeps(self):
if test_util.is_gpu_available():
self.skipTest("b/128676188 causes OOM on opensource gpu tests")
print_prefix = "testCondAutoControlDeps: "
def branch_fn():
enqueue_print_op("A")
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
return constant_op.constant(10)
def build_cond():
return control_flow_ops.cond(
constant_op.constant(True), branch_fn, lambda: 0)
def build_nested_cond():
return control_flow_ops.cond(
constant_op.constant(True), build_cond, lambda: 0)
# In v1 graph mode, pruning should make only "C" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
# This doesn't work with legacy control flow.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
@eager_function.defun
def cond():
return build_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_cond():
return build_nested_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_cond()), 10)
self.assertEqual(["A", "B", "C"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_cond():
return build_cond()
pruned_cond = wrap_function.wrap_function(pruned_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
def pruned_nested_cond():
return build_nested_cond()
pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_cond()), 10)
self.assertEqual(["C"], filter_test_messages(printed.contents()))
@test_util.run_in_graph_and_eager_modes
def testWhileAutoControlDeps(self):
# Legacy while_loop fails this test because it produces deprecation notices
# in stderr.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return
def cond(i, unused_x):
enqueue_print_op("A")
return i < 2
def body(i, x):
enqueue_print_op("B")
with ops.control_dependencies([enqueue_print_op("C")]):
x = array_ops.identity(x)
with ops.control_dependencies([enqueue_print_op("D")]):
return i + 1, x
def build_while():
return control_flow_ops.while_loop(
cond, body, [constant_op.constant(0), constant_op.constant(0)])
def build_nested_while():
return control_flow_ops.cond(
constant_op.constant(True), build_while, lambda: [0, 0])
# In v1 graph mode, pruning should make only "D" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_while()[0]), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# In defuns, all prints should execute in program order.
@eager_function.defun
def while_loop():
return build_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
@eager_function.defun
def nested_while_loop():
return build_nested_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_while_loop()), 2)
self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"],
filter_test_messages(printed.contents()))
# wrap_function should prune.
def pruned_while():
return build_while()[0]
pruned_while = wrap_function.wrap_function(pruned_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
def pruned_nested_while():
return build_nested_while()[0]
pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_while()), 2)
self.assertEqual(["D", "D"], filter_test_messages(printed.contents()))
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0).read_value()
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result, 2)
self.assertAllEqual(v.read_value(), 1.0)
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
# TODO(apassos): figure out why the reading is necessary here.
increment = v.assign_add(1.0).read_value()
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
self.evaluate(result)
self.assertAllEqual(self.evaluate(v), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = self.evaluate(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testXLAGradInLoop(self):
# We have an optimization that moves certain reduction ops, this test makes
# sure we don't do that for XLA ops.
# Use dynamic inputs, which triggers the creation of "BroadcastGradientArgs"
# and "Shape" op.
input1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
input2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
def cond(i1, i2):
return False
def body(i1, i2):
return math_ops.add(i1, i2), math_ops.add(i1, i2)
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out1, _ = control_flow_ops.while_loop(
cond, body, (input1, input2), maximum_iterations=2)
g = gradients_impl.gradients(out1, [input1])
for op in out1.graph.get_operations():
# Test that the "Shape" is directly passed to BroadcastGradientArgs
# instead of being pushed to the stack.
if op.type == "BroadcastGradientArgs":
self.assertEqual(op.inputs[0].op.type, "Shape")
self.assertEqual(op.inputs[1].op.type, "Shape")
xla_context.Exit()
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("WhileV2 does lazy evaluation of maximum_iterations")
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegexp(ValueError,
r"must be from the same graph.*"):
loop = create_while_loop()
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
if test_util.is_gpu_available():
self.skipTest("b/128646372, b/128645947 fails in opensource build")
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata_without_xla_context = config_pb2.RunMetadata()
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata_without_xla_context)
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
# With while_v2 on xla, run_metadata only contains the unlowered While
# op so node_stats does not have statistics for the pushes. So as a
# loose check we check the pushes in the lowered version.
for dev in run_metadata_without_xla_context.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_count = len([
x for x in node_stats
if re.match(r".*TensorListPushBack_?\d*", x.node_name)
])
else:
for dev in run_metadata.step_stats.dev_stats:
if "/device:CPU" in dev.device:
node_stats = dev.node_stats
stack_push_op = "StackPushV2"
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3, str(node_stats))
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
@test_util.run_deprecated_v1
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(10100, result)
@test_util.run_deprecated_v1
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2]
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.run_gpu_only
@test_util.run_deprecated_v1
def testWhile_Device(self):
# Body function defined outside of device scope
def body(x):
return math_ops.exp(x)
with ops.device("CPU:0"):
r = control_flow_ops.while_loop(
lambda x: x < 10, body, [constant_op.constant(-10.)])
self.assertIn("cpu", r.device.lower())
with session.Session() as sess:
options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(r, options=options, run_metadata=run_metadata)
# We expect that everything runs on CPU, even if GPU is available.
self.assertEqual(len(run_metadata.partition_graphs), 1)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
=======
tf.initialize_all_variables().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCondGrad_1(self):
with self.test_session():
x = tf.constant(10.0, name="x")
pred = tf.less(1, 2)
fn1 = lambda: tf.identity(x)
fn2 = lambda: tf.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
result = grad.eval()
self.assertAllEqual(1.0, result)
def testCondGrad_2(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
x = tf.constant(10.0)
pred = tf.less(c, 2)
fn1 = lambda: tf.mul(x, 42.0)
fn2 = lambda: tf.mul(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = tf.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = tf.Variable([1.0, 42.0])
c = tf.placeholder(tf.int32, shape=[])
pred = tf.less(c, 2)
fn1 = lambda: tf.identity(v1)
fn2 = lambda: tf.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = tf.gradients(r, [v1])[0]
tf.initialize_all_variables().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
def testWhileGrad_1(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, v)
result = r[0].eval()
self.assertEqual(1024.0, result)
def testWhileGrad_2(self):
with self.test_session():
a = tf.constant(3.0, name="a")
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, a)
result = r[0].eval()
self.assertEqual(216.0, result)
def testWhileGrad_3(self):
with self.test_session():
a = tf.constant(3.0, name="a")
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, v)
result = r[0].eval()
self.assertEqual(81.0, result)
def testWhileGrad_4(self):
with self.test_session():
a = tf.Variable(3.0)
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = lambda v: tf.mul(v, a)
r = control_flow_ops.While(c, b, [v], parallel_iterations=1)
r = tf.gradients(r, a)
tf.initialize_all_variables().run()
result = r[0].eval()
self.assertEqual(216.0, result)
def testWhileGrad_5(self):
with self.test_session():
x = tf.constant(3.0, name="x")
y = tf.constant(2.0, name="y")
c = lambda x, y: tf.less(x, 100.0)
def b(x, y):
y1 = tf.add(x, y)
x1 = tf.mul(x, y1)
return x1, y1
r = control_flow_ops.While(c, b, [x, y], parallel_iterations=1)
# Must use the complete r.
r = tf.gradients(r, x)
result = r[0].eval()
self.assertEqual(304.0, result)
def testWhileGrad_6(self):
with self.test_session():
i = tf.constant(0, name="i")
x = tf.constant(2.0, name="x")
c = lambda i, x: tf.less(i, 10)
def b(i, x):
x = tf.mul(x, 2.0)
i = tf.add(i, 1)
return i, x
r = control_flow_ops.While(c, b, [i, x], parallel_iterations=1)
# Must use the complete r.
r = tf.gradients(r, x)
r = r[0].eval()
self.assertEqual(1024.0, r)
def testWhileGrad_7(self):
with self.test_session():
v = tf.constant(2.0, name="v")
c = lambda v: tf.less(v, 100.0)
b = tf.square
r = control_flow_ops.While(c, b, [v], parallel_iterations=1,
back_prop=False)
r = tf.add(r, v)
r = tf.gradients(r, v)
result = r[0].eval()
self.assertEqual(1.0, result)
# Microbenchmark: 10,000 iterations took 0.21s.
def testWhile_1(self):
with self.test_session():
n = tf.constant(0)
c = lambda x: tf.less(x, 10000)
b = lambda x: tf.add(x, 1)
r = control_flow_ops.While(c, b, [n], parallel_iterations=20)
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertEqual(10000, result)
def testWhile_2(self):
with self.test_session():
s = tf.constant(0)
r = isum(s)
result = r.eval()
self.assertTrue(check_op_order(s.graph))
self.assertAllEqual(45, result)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.add(m, 1), tf.add(c, 1)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
d = tf.convert_to_tensor(100)
r = control_flow_ops.While(
lambda i, m, c, o: tf.less(i, d), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [tf.gather(x, i), tf.gather(x, i)]
o = tf.add(o, m)
o = tf.add(o, c)
i = tf.add(i, 1)
return [i, m, c, o]
i = tf.convert_to_tensor(0)
m = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor(0)
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = control_flow_ops.While(
lambda i, m, c, o: tf.less(i, s), compute, [i, m, c, o])
result = r[3].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = tf.slice(x, tf.expand_dims(i, 0), [1])
o = tf.concat(0, [o, c])
i = tf.add(i, 1)
return [i, c, o]
i = tf.convert_to_tensor(0)
c = tf.convert_to_tensor(0)
o = tf.convert_to_tensor([0])
x = tf.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = tf.size(x)
r = control_flow_ops.While(
lambda i, c, o: tf.less(i, s), compute, [i, c, o])
result = r[2].eval()
self.assertTrue(check_op_order(i.graph))
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
b = lambda x: tf.add(x, 1.0)
r = control_flow_ops.While(c, b, [n])
result = r.eval()
self.assertEqual(10.0, result)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
<<<<<<< HEAD
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
self._testWhile_Gpu_2(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShape(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
b = lambda i, x: [i + 1, x + 1]
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
# Shape of x is [2], but we specify a shape of [5].
control_flow_ops.while_loop(
c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])])
@test_util.run_in_graph_and_eager_modes
def testWhileBadBodyReturn(self):
x = constant_op.constant([2.0, 4.0], name="values")
i = constant_op.constant(0)
c = lambda i, *x: math_ops.less(i, 10)
# body accepts N values and returns N+1 values.
b = lambda i, *x: (i, i) + x
with self.assertRaisesRegexp(
ValueError,
"The two structures don't have the same nested structure."):
control_flow_ops.while_loop(c, b, [i, x])
@test_util.run_deprecated_v1
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r))
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].shape.is_compatible_with([8, 2]))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceBadShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)]
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b1(i, x): # modifies values. (shape of components is not changed.)
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
def b2(i, x): # adds new values. (shape of components is changed.)
return [
i + 1,
sparse_ops.sparse_add(
x,
sparse_tensor.SparseTensor(
indices=math_ops.cast(
array_ops.fill([1, 1], i), dtypes.int64),
values=array_ops.fill([1], 1.0),
dense_shape=x.dense_shape))
]
def b3(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
def check_shapes(r, indices, values, dense_shape):
self.assertTrue(r.indices.shape.is_compatible_with(indices))
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.dense_shape.shape.is_compatible_with(dense_shape))
# Default shape invariant; b1 only modifies values.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Default shape invariant; b2 adds new values
_, r = control_flow_ops.while_loop(c, b2, [i, x])
check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1])
# Explicit shape invariant, allowing any rank; b1 only modifies values.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Explicit shape invariant, allowing any rank; b3 modifies rank.
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
# Shape invariant with ndims=None. Technically, this isn't supported
# according to the docs, but we support it for backwards compatibility.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
_, r = control_flow_ops.while_loop(
c, b3, [i, x],
[i.get_shape(), tensor_shape.TensorShape(None)])
check_shapes(r, indices=[None, None], values=[None], dense_shape=[None])
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeSparseTensor(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: i < 10
b1 = lambda i, x: [i+1, x]
def b2(i, x): # modifies rank. (shape of all components is changed.)
return [
i + 1,
sparse_tensor.SparseTensor(
array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0,
array_ops.concat([x.dense_shape, [10]], axis=0))
]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
# Default shape invariant, but b2 modifies rank (which is not allowed).
with self.assertRaises(ValueError):
control_flow_ops.while_loop(c, b2, [i, x])
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertTrue(r.values.get_shape().is_compatible_with([None, 2]))
@test_util.disable_control_flow_v2("b/131265085")
@test_util.run_v1_only("b/131265085")
def testWhileBadShapeIndexedSlices(self):
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([[0], [3]],
dtype=dtypes.int64,
name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
c = lambda i, _: 10
b = lambda i, x: [i+1, x]
# Explicit shape invariant, with a specific (incompatible) rank.
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceRaggedTensor(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
c = lambda i, _: i < 10
def b1(i, x): # Adds new values to rows (but doesn't create new rows)
return [
i + 1,
array_ops.concat([x, x], axis=1)
]
def b2(i, x): # Adds new rows.
return [
i + 1,
array_ops.concat([x, x], axis=0)
]
def check_shapes(r, values, splits):
self.assertTrue(r.values.shape.is_compatible_with(values))
self.assertTrue(r.row_splits.shape.is_compatible_with(splits))
# Default shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(c, b1, [i, x])
check_shapes(r, values=[None], splits=[4])
# Default shape invariant; b2 adds new rows (not allowed).
if not context.executing_eagerly():
with self.assertRaises(ValueError):
_, r = control_flow_ops.while_loop(c, b2, [i, x])
# Explicit shape invariant; b1 adds new values to rows.
# (deprecated: use TensorShape instead of RaggedTensorSpec)
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, None])])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b1 adds new values to rows.
_, r = control_flow_ops.while_loop(
c, b1, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
# Explicit shape invariant; b2 adds new rows.
_, r = control_flow_ops.while_loop(
c, b2, [i, x],
[i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None],
dtypes.int32)])
check_shapes(r, values=[None], splits=[None])
def testWhileShapeInferenceRaggedTensorRaggedRank2(self):
i = constant_op.constant(0)
x = ragged_factory_ops.constant([[[1, 2], [3], [4, 5, 6]],
[[], [8, 9, 10]]])
c = lambda i, _: i < 10
def b(i, x):
return [
i + 1,
array_ops.concat([x, x[..., i:i+1]], axis=-1)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.row_splits.shape.as_list(), [3])
self.assertTrue(r.values.row_splits.shape.as_list() in ([6], [None]))
self.assertTrue(r.values.values.shape.as_list() in ([49], [None]))
def testWhileShapeInvariantTensorSpec(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, array_ops.stack([x, x]))
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
tensor_spec.TensorSpec(None, dtype=dtypes.int32)]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantWrongTypeSpecType(self):
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor([[0]], [1.0], [10])
shape_invariants = [
tensor_spec.TensorSpec([], dtype=dtypes.int32),
sparse_tensor.SparseTensorSpec([None])]
control_flow_ops.while_loop(c, b, [i, x], shape_invariants)
x2 = constant_op.constant([1])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x2], shape_invariants)
x3 = ragged_factory_ops.constant([[1, 2], [3]])
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i, x3], shape_invariants)
i2 = constant_op.constant(0.0)
with self.assertRaises(TypeError):
control_flow_ops.while_loop(c, b, [i2, x], shape_invariants)
# TODO(b/131265085) Remove this decorator when bug is fixed.
@test_util.build_as_function_and_v1_graph
def testWhileShapeInvariantBadType(self):
i = constant_op.constant(0)
x = constant_op.constant([1])
c = lambda i, _: i < 10
b = lambda i, x: (i + 1, x)
with self.assertRaises((ValueError, TypeError)):
control_flow_ops.while_loop(c, b, [i, x], ["foo", "bar"])
def _testNestedWhile_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.cached_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1])
@test_util.run_deprecated_v1
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.run_deprecated_v1
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x_init = constant_op.constant(1.0)
with ops.device("/cpu:0"):
z = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
pred, lambda: x * 2.0, lambda: 10.0)),
[0, x_init])
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x_init)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x_init)[0]
self.assertEqual(sess.run(grad, {pred: True}), 8.0)
self.assertEqual(sess.run(grad, {pred: False}), 0.0)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
return
self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0)
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
=======
with self.test_session(use_gpu=use_gpu):
n = tf.constant(1.0)
c = lambda x: tf.less(x, 10.0)
def b(x):
with tf.device("/cpu:0"):
return tf.add(x, 1.0)
r = control_flow_ops.While(c, b, [n])
result = r.eval()
self.assertEqual(10.0, result)
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = tf.constant(0)
r = tf.constant(0)
condition = lambda n_, r_: tf.less(n_, 10)
def body(n_, r_):
n_ = tf.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [n_, r_]
res = control_flow_ops.While(condition,
body,
[n, r],
parallel_iterations=1)
result = res[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(12, result)
def testWhileWithControl_2(self):
with self.test_session():
r = tf.constant(0)
condition = lambda r_: tf.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = tf.constant(12)
return [r_]
res = control_flow_ops.While(condition, body, [r], parallel_iterations=1)
result = res.eval()
self.assertTrue(check_op_order(r.graph))
self.assertAllEqual(12, result)
def testCondWhile_1(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = control_flow_ops.cond(tf.less(0, 1),
lambda: control_flow_ops.While(c, b, [n]),
lambda: n)
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(10, result)
def testCondWhile_2(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: tf.add(x, 1)
r = control_flow_ops.cond(tf.less(1, 0), lambda: tf.add(n, 1),
lambda: control_flow_ops.While(c, b, [n]))
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(10, result)
def testWhileCond_1(self):
with self.test_session():
i = tf.convert_to_tensor(0, name="i")
n = tf.convert_to_tensor(10, name="n")
one = tf.convert_to_tensor(1, name="one")
c = lambda x: tf.less(x, n)
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.add(x, one),
lambda: tf.sub(x, one))
r = control_flow_ops.While(c, b, [i])
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(10, result)
def testWhileCond_2(self):
with self.test_session():
n = tf.convert_to_tensor(0, name="n")
c = lambda x: tf.less(x, 10)
b = lambda x: control_flow_ops.cond(tf.constant(True),
lambda: tf.add(x, 1),
lambda: n)
r = control_flow_ops.While(c, b, [n])
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(10, result)
def testWhileCond_3(self):
with self.test_session():
n = tf.convert_to_tensor(0)
c = lambda x: tf.less(x, 10)
b = lambda x: control_flow_ops.cond(tf.less(0, 1),
lambda: tf.add(x, 1),
lambda: tf.sub(x, 1))
r = control_flow_ops.While(c, b, [n])
result = r.eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(10, result)
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
<<<<<<< HEAD
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
=======
r = control_flow_ops.While(loop_iterator,
loop_body,
[n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = tf.Variable([3.0, 4.0, 5.0])
select2 = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j):
return tf.less(j, 3)
def loop_body(j):
ns1 = tf.scatter_update(select1, j, 10.0)
ns2 = tf.scatter_update(select2, j, 10.0)
nj = tf.add(j, 1)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
<<<<<<< HEAD
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
result = r[1]
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
self.evaluate(variables.global_variables_initializer())
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], self.evaluate(q.dequeue()))
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess:
n = constant_op.constant(0)
c = lambda x: True
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
@test_util.run_deprecated_v1
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
r = control_flow_ops.while_loop(
lambda _: True, lambda v: v * v, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_deprecated_v1
@test_util.enable_output_all_intermediates
def testNestedWhileGradAfterSessionRun(self):
v0 = constant_op.constant(2.)
def body(v):
inner_v0 = constant_op.constant(1.)
return control_flow_ops.while_loop(
lambda _: True, lambda x: x * v, [inner_v0], maximum_iterations=2)
r = control_flow_ops.while_loop(
lambda _: True, body, [v0], maximum_iterations=3)
self.assertAllEqual(r, 256.)
grad = gradients_impl.gradients(r, v0)[0]
self.assertAllClose(grad, 1024.)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@test_util.run_deprecated_v1
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def testWhileGradInControlDeps(self):
@def_function.function
def f():
x_init = constant_op.constant(2.)
loop_cond = lambda i, x: math_ops.less(i, 2)
loop_body = lambda i, x: [i + 1, x**2]
_, x = control_flow_ops.while_loop(loop_cond, loop_body, [0, x_init])
with ops.control_dependencies([x]):
(grad,) = gradients_impl.gradients(x, x_init)
return grad
self.assertAllEqual(f(), 4. * 2.**3) # 4 * x_init ^ 3
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVariable(self):
with self.cached_session():
a = resource_variable_ops.ResourceVariable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
g = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, g[0])
def testWhileGrad_EagerResourceVariable(self):
with context.eager_mode():
a = resource_variable_ops.ResourceVariable(
np.ones([2, 2], dtype=np.float32))
v = constant_op.constant(1.0)
@eager_function.defun
def fn():
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, x * math_ops.reduce_sum(a) * v),
[0, 1.0])[1]
return gradients_impl.gradients(r, [v])[0]
self.assertEqual(self.evaluate(fn()), 32.)
def testWhileGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 2., 3., 4.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInNestedFunctionCall(self):
@def_function.function
def foo(x, var):
return x + math_ops.reduce_sum(var.sparse_read([1, 3]))
@def_function.function
def foo2(x, var):
return foo(x, var)
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo2(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.])
def testWhileGrad_ResourceVarInLoopInFunctionCall(self):
if test.is_gpu_available():
self.skipTest("b/128635252")
@def_function.function
def foo(x, var):
return control_flow_ops.while_loop(
lambda j, _: j < 3,
lambda j, y: (j + 1,
y + math_ops.reduce_sum(var.sparse_read([1, 2]))),
[0, x])[1]
@def_function.function
def bar(var):
r = control_flow_ops.while_loop(
lambda i, _: i < 2,
lambda i, x: (i + 1, foo(x, var)),
[0, 0.0])[1]
return gradients_impl.gradients(r, var)[0]
var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.])
self.evaluate(variables.global_variables_initializer())
grad = self.evaluate(bar(var))
self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 6., 6., 0.])
def testWhileCondGrad_ResourceVarInFunctionCall(self):
@def_function.function
def foo(x, var):
return x + var.sparse_read([1])[0]
def body(i, x):
return (i + 1, control_flow_ops.cond(
math_ops.equal(i % 2, 0),
lambda: foo(x, var1),
lambda: foo(x, var2)))
@def_function.function
def bar(var1, var2):
r = control_flow_ops.while_loop(
lambda i, _: i < 4, body, [0, 0.0])
return gradients_impl.gradients(r, [var1, var2])
var1 = resource_variable_ops.ResourceVariable([1., 2., 3.])
var2 = resource_variable_ops.ResourceVariable([4., 5.])
self.evaluate(variables.global_variables_initializer())
grads = self.evaluate(bar(var1, var2))
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[0]), [0., 2., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(grads[1]), [0., 2.])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x * math_ops.reduce_sum(var.sparse_read([1, 3]))),
[0, constant_op.constant(1.0)])[1]
grad = gradients_impl.gradients(r, var)[0]
self.evaluate(variables.global_variables_initializer())
grad_val = self.evaluate(grad)
arr = gradient_checker_v2._to_numpy(grad_val)
self.assertAllEqual(arr, [0., 12., 0., 12., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_MultiResourceVarSparseRead(self):
# NOTE(skyewm): this test is interesting because the gradient is the
# aggregation result of IndexedSlices and Tensors.
var1 = resource_variable_ops.ResourceVariable(np.ones(5),
dtype=dtypes.float32)
var2 = resource_variable_ops.ResourceVariable(np.ones(3),
dtype=dtypes.float32)
x1_init = constant_op.constant([0., 0.])
x2_init = constant_op.constant(1.)
x3_init = constant_op.constant(1.)
def body(i, unused_x1, x2, x3):
y1 = var1.sparse_read([1, 3])
y2 = x2 * 2
y3 = x3 * math_ops.reduce_sum(var2.sparse_read([0]))
return i + 1, y1, y2, y3
r = control_flow_ops.while_loop(
lambda i, x1, x2, x3: i < 3, body,
[0, x1_init, x2_init, x3_init])[1:]
var1_grad, var2_grad = gradients_impl.gradients(r, [var1, var2])
self.evaluate(variables.global_variables_initializer())
var1_grad_val = self.evaluate(var1_grad)
var2_grad_val = self.evaluate(var2_grad)
self.assertAllEqual(gradient_checker_v2._to_numpy(var1_grad_val),
[0., 1., 0., 1., 0.])
self.assertAllEqual(gradient_checker_v2._to_numpy(var2_grad_val),
[3., 0., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_Gather(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, x + array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
self.assertEqual(self.evaluate(y), 8.)
self.assertAllEqual(self.evaluate(grad), [24., 0., 0., 0., 0.])
@test_util.run_deprecated_v1
def testWhileGrad_GatherNoFanOut(self):
# NOTE(skyewm): this test is interesting because the gather gradient
# function returns an IndexedSlices.
x = constant_op.constant([1., 1., 1., 1., 1.])
y = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, array_ops.gather(x, [0])),
[0, x[:1]])[1]
z = y * 3.0
grad = gradients_impl.gradients(z, x)[0]
self.assertEqual(self.evaluate(y), 1.)
self.assertAllEqual(self.evaluate(grad), [3., 0., 0., 0., 0.])
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)[0]
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
@test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = self.evaluate([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.run_gpu_only
def testGpuResourceAccess(self):
with ops.device(test.gpu_device_name()):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@def_function.function
def foo():
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
constant_op.constant(True),
lambda: x + var,
lambda: x)),
[0, 0.0])[1]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(foo()), 9.0)
def testNestedResourceAccess(self):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@eager_function.defun
def test_fn():
x = constant_op.constant(0.0)
r = control_flow_ops.while_loop(
# Outer loop condition
lambda i, y: i < 2,
# Outer loop body
lambda i, y: (i + 1, y + control_flow_ops.cond(
constant_op.constant(True),
# True branch
lambda: control_flow_ops.while_loop(
# Inner loop condition
lambda j, z: j < 3,
# Inner loop body
lambda j, z: (j + 1, z + math_ops.square(var)),
# Inner initial loop value
[0, y])[1],
# False branch
lambda: (0.0))),
# Outer initial loop value
[0, x])[1]
grad = gradients_impl.gradients(r, x)[0]
return r, grad
self.evaluate(variables.global_variables_initializer())
r, grad = self.evaluate(test_fn())
# 2 * 3 * 3^2
self.assertEqual(r, 81.0)
# v1 control flow gets the wrong answer!!!
# Gradient computation:
# f(x) = x + 3^2
# inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27
# g(x) = x + inner_loop(x) = 2x + 27
# outer_loop(x) = g(g(x)) = 4x + 81
# outer_loop'(x) = 4
# Note that v1 control flow gets 4.0 as well if the cond is removed.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(grad, 4.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0])
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0])
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0])
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
@test_util.run_deprecated_v1
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return map_fn.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return map_fn.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
self.assertAllClose(2.999, var.read_value())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.run_deprecated_v1
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.run_deprecated_v1
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.deprecated_graph_mode_only
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.deprecated_graph_mode_only
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, self.evaluate(output_grad)[1])
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
map_fn.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileAndTensorArray(self):
n = constant_op.constant(3.0)
def Body(row, ta):
def InnerBody(row, col, ta):
# Note: row and col are 1-based.
ta = ta.write(
math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
return row, col + 1., ta
ta = control_flow_ops.while_loop(
lambda _, col, _1: col <= n,
InnerBody, [row, constant_op.constant(1.), ta],
return_same_structure=False)[2]
return row + 1., ta
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
ta = control_flow_ops.while_loop(
lambda row, _: row <= n,
Body, [constant_op.constant(1.), ta],
return_same_structure=False)[1]
output = array_ops.reshape(ta.stack(), [3, 3])
self.assertAllEqual(
self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
# TODO(b/117675481): This does not work with current TA. Enable with new TA.
# grad = gradients_impl.gradients(output, [n])
# self.assertEqual(self.evaluate(grad), 3.5)
@test_util.run_deprecated_v1
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any(name in op.name for op in all_ops))
@test_util.run_deprecated_v1
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
@test_util.run_deprecated_v1
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.run_v1_only("b/120545219")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
=======
r = control_flow_ops.While(loop_iterator,
loop_body,
[n],
parallel_iterations=1)
self.assertTrue(check_op_order(n.graph))
tf.initialize_all_variables().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = tf.Variable([3.0, 4.0, 5.0])
n = tf.constant(0)
def loop_iterator(j, _):
return tf.less(j, 3)
def loop_body(j, _):
ns = tf.scatter_update(select, j, 10.0)
nj = tf.add(j, 1)
return [nj, ns]
r = control_flow_ops.While(loop_iterator,
loop_body,
[n, tf.identity(select)],
parallel_iterations=1)
tf.initialize_all_variables().run()
result = r[1].eval()
self.assertTrue(check_op_order(n.graph))
self.assertAllEqual(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
c = tf.constant(0, name="c")
asn1 = tf.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return tf.less(i, 10)
# Loop body
def loop_body(i):
asn2 = tf.assign_add(var_b, asn1, name="b_add")
with tf.control_dependencies([asn2]):
ni = tf.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.While(pred, loop_body, [c],
parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = tf.Variable(0, name="a")
var_b = tf.Variable(0, name="b")
tf.initialize_all_variables().run()
# Change condition to check var_b
def pred(i):
return tf.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = tf.assign_add(var_a, tf.constant(1), name="a_add")
asn2 = tf.assign_add(var_b, tf.constant(1), name="b_add")
with tf.control_dependencies([asn1, asn2]):
inc_b = tf.identity(var_b)
return inc_b
lpa = control_flow_ops.While(pred, loop_body, [var_b], 1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
def testWhileQueue_1(self):
with self.test_session():
q = tf.FIFOQueue(-1, tf.int32)
i = tf.constant(0)
def c(i):
return tf.less(i, 10)
def b(i):
ni = tf.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.While(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testFold_1(self):
with self.test_session():
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
r = control_flow_ops.fold(
lambda a, x: tf.mul(tf.add(a, x), 2), elems, [1])
result = r.eval()
self.assertTrue(check_op_order(elems.graph))
self.assertAllEqual(np.array([208]), result)
def testFold_2(self):
with self.test_session():
elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
ten = tf.convert_to_tensor(10)
def compute(a, x):
r = tf.mul(x, ten)
return tf.add(a, r)
r = control_flow_ops.fold(compute, elems, [1])
result = r.eval()
self.assertTrue(check_op_order(elems.graph))
self.assertAllEqual([201], result)
def testOneValueCond(self):
with self.test_session():
c = tf.placeholder(tf.int32, shape=[])
one = tf.convert_to_tensor(1, name="one")
two = tf.convert_to_tensor(2, name="two")
p = tf.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, tf.Tensor))
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
<<<<<<< HEAD
@test_util.run_deprecated_v1
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1, 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2, 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3, 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5, -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6, 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = self.evaluate([c, real_v])
=======
def testExampleCond(self):
with self.test_session():
x = tf.convert_to_tensor([-2.0, 2.0], name="x")
d = tf.placeholder(tf.int32, shape=[])
def l2():
return tf.sqrt(tf.reduce_sum(tf.square(x)))
def l1():
return tf.reduce_sum(tf.abs(x))
i = control_flow_ops.cond(tf.equal(d, 2), l2, l1)
self.assertEqual(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testOneOpCond(self):
with self.test_session():
v = tf.Variable(0)
c = tf.convert_to_tensor(0)
one = tf.convert_to_tensor(1)
two = tf.convert_to_tensor(2)
p = tf.greater_equal(c, 1)
def a():
return tf.assign(v, one)
def b():
return tf.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, tf.Tensor))
tf.initialize_all_variables().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = tf.Variable(0.0)
c = tf.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(name="real_tensor",
output_tensor=v,
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
<<<<<<< HEAD
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
=======
def testWithTensorDependencies(self):
with self.test_session():
v = tf.Variable(0.0)
c1 = tf.constant(10)
c2 = tf.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v",
output_tensor=c1,
dependencies=[v.initializer])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
<<<<<<< HEAD
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
=======
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = tf.IndexedSlices(v, tf.constant([1]))
gather_v_at_1 = tf.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = tf.gather(
v_at_1_after_init.values, v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with tf.Graph().as_default():
# device set on tensor => same device on dep.
with tf.device("/job:ps"):
vd = tf.Variable([0.0])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
<<<<<<< HEAD
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
=======
vnod = tf.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertEquals(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = tf.Variable([0.0])
with tf.device("/job:worker/gpu:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
self.assertEquals("/job:worker/gpu:1", with_vdef_dep.device)
def testGroup(self):
with self.test_session() as sess:
v1 = tf.Variable([0.0])
v2 = tf.Variable([1.0])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
<<<<<<< HEAD
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = self.evaluate([v1, v2])
=======
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
<<<<<<< HEAD
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
@test_util.run_deprecated_v1
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
=======
def testMergeShapes(self):
# All inputs unknown.
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
p3 = tf.placeholder(tf.float32)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
<<<<<<< HEAD
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
=======
# All inputs known but different.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[2, 1])
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
<<<<<<< HEAD
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
@test_util.run_v1_only("b/120545219")
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
@test_util.run_deprecated_v1
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, self.evaluate(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(self.evaluate(r[1]), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
grad = gradients_impl.gradients(r, x)[0]
self.assertEqual(self.evaluate(r[1]), 65536.0)
self.assertEqual(self.evaluate(grad), 524288.0)
# while_v2 does not have stacks.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_qint, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable(
shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="")
assign_op = state_ops.assign(
var_qint, constant_op.constant(np.array([42]), dtypes.qint8))
self.evaluate(assign_op)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.ref_switch(var_qint, cond)
result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_uint64, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_deprecated_v1
def testQIntArgAndRet(self):
@function.Defun(dtypes.qint8)
def func(x):
return x
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
qint = constant_op.constant(np.array([42]), dtypes.qint8)
result = func(qint)
self.evaluate(result)
def testSparseIdentity(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Identity(st1)
self.assertAllEqual(st1.indices, st2.indices)
self.assertAllEqual(st1.values, st2.values)
self.assertAllEqual(st1.dense_shape, st2.dense_shape)
def testSparseEnterExit(self):
st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10])
st2 = control_flow_ops._Enter(st1, "foo_1")
st3 = control_flow_ops.exit(st2)
self.assertAllEqual(st1.indices, st3.indices)
self.assertAllEqual(st1.values, st3.values)
self.assertAllEqual(st1.dense_shape, st3.dense_shape)
def _buildWhileWithShapeInvariants(self, shape_invariants):
r = constant_op.constant([1, 2])
def cond(_):
return False
def body(_):
return constant_op.constant([1])
return control_flow_ops.while_loop(
cond, body, [r], shape_invariants=shape_invariants)
def testWhileOutputShapeWithShapeInvariantsUnknownRank(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape(None)])
self.assertIsNone(while_output.shape.rank)
runTest()
def testWhileOutputShapeWithShapeInvariantsPartialShape(self):
@def_function.function
def runTest():
while_output = self._buildWhileWithShapeInvariants(
[tensor_shape.TensorShape([None])])
self.assertAllEqual(while_output.shape.as_list(), [None])
runTest()
def testFunctionInWhile(self):
@def_function.function
def body(x):
return x + 1
r = control_flow_ops.while_loop(lambda x: x < 5, body, [0])
self.assertAllEqual(r, 5.)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
@test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_1/Add' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'while_2/NextIteration' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while/Const_1' as input to 'cond/while_1/add' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
=======
# All inputs known but same.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape())
self.assertEqual([], index.get_shape())
# Possibly the same but not guaranteed.
p1 = tf.placeholder(tf.float32, shape=[1, 2])
p2 = tf.placeholder(tf.float32)
p2.set_shape([None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = tf.placeholder(tf.int32)
# All inputs unknown.
p1 = tf.placeholder(tf.float32_ref)
p2 = tf.placeholder(tf.float32_ref)
p3 = tf.placeholder(tf.float32_ref)
s = control_flow_ops.ref_select(index, [p1, p2, p3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref, shape=[2, 1])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but same.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref, shape=[1, 2])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
p1 = tf.placeholder(tf.float32_ref, shape=[1, 2])
p2 = tf.placeholder(tf.float32_ref)
p2.set_shape([None, 2])
s = control_flow_ops.ref_select(index, [p1, p2])
self.assertEqual(None, s.get_shape())
class TupleTest(tf.test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable([1.0])
add1 = tf.add(
control_flow_ops.with_dependencies([v1.initializer], v1),
2.0)
v2 = tf.Variable([10.0])
add2 = tf.add(control_flow_ops.with_dependencies([v2.initializer],
v2),
20.0)
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
<<<<<<< HEAD
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
self.evaluate(t)
self.assertEquals(1, self.evaluate(var))
class AssertTest(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
if test_util.is_gpu_available():
self.skipTest("b/128646478 fails in opensource")
with self.session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names),
str(unguarded_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
variables.global_variables_initializer().run()
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
# TODO(b/117279927): Re-enable once msan failure is fixed.
def DISABLED_testCondInDefun(self):
with context.eager_mode():
@eager_function.defun
def foo(pred):
# TODO(b/111124878): this only needs to output one element.
fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))
fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))
return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)
r = foo(True)
self.assertAllEqual(r[0].numpy(), 10)
self.assertNotIsInstance(r, list)
r = foo(False)
self.assertAllEqual(r[0].numpy(), 20)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
=======
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1),
tf.constant([1]))
v2 = tf.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2),
tf.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
if __name__ == "__main__":
tf.test.main()
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| [
"devsangwoo@gmail.com"
] | devsangwoo@gmail.com |
ad3b5cc29e3c09e2396b8cb83642293b368591b3 | 145d0449b8babd749b758986a93421f6f3c59f9b | /python/deque.py | c0d3c58d56bc104b27dab87ec658fcf21f257184 | [
"MIT"
] | permissive | mattfenwick/DataStructures | cd5fcdbe2a1e2b473a4124fb3d747653b6a3f8eb | 9de052e36fb709488282938e03b5fde8ac92bc1d | refs/heads/master | 2020-05-07T12:01:25.079198 | 2014-03-08T15:39:07 | 2014-03-08T15:39:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py |
class Deque(object):
def __init__(self, front=[], back=[]):
self._front = front
self._back = back
def shift(self):
if len(self._back) == 0:
self._flip()
if len(self._back) == 0:
raise ValueError("can't shift empty deque")
return self._back.pop()
def unshift(self, elem):
self._back.append(elem)
def pop(self):
if len(self._front) == 0:
self._flip()
if len(self._front) == 0:
raise ValueError("can't pop empty deque")
return self._front.pop()
def push(self, elem):
self._front.append(elem)
def _flip(self):
if len(self._front) != 0 and len(self._back) != 0:
raise ValueError("can't flip deque with non-empty front and back")
new_back = self._front[-1::-1]
new_front = self._back[-1::-1]
self._front = new_front
self._back = new_back
def __repr__(self):
return repr(self._back[-1::-1] + self._front)
| [
"mfenwick100@gmail.com"
] | mfenwick100@gmail.com |
9be6e535d23c0b05932d0b254e86036df8312e4b | 9597e11aa9f9a57acea98361c2ba48f9d26332f2 | /google/cloud/datastore_v1/services/datastore/transports/__init__.py | 2d0659d9b786f35a986d5d147939f0bd0500b88f | [
"Apache-2.0"
] | permissive | renovate-bot/python-datastore | 9fa781d788d7874e487afe0146bec87cd63db725 | 671dc4b2b49d185d49d6a3ae04ff12b926933ae4 | refs/heads/master | 2023-08-22T02:29:20.933250 | 2021-08-19T18:56:58 | 2021-08-19T18:56:58 | 238,816,263 | 1 | 0 | Apache-2.0 | 2020-02-07T00:55:36 | 2020-02-07T00:55:36 | null | UTF-8 | Python | false | false | 1,152 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import DatastoreTransport
from .grpc import DatastoreGrpcTransport
from .grpc_asyncio import DatastoreGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DatastoreTransport]]
_transport_registry["grpc"] = DatastoreGrpcTransport
_transport_registry["grpc_asyncio"] = DatastoreGrpcAsyncIOTransport
__all__ = (
"DatastoreTransport",
"DatastoreGrpcTransport",
"DatastoreGrpcAsyncIOTransport",
)
| [
"noreply@github.com"
] | renovate-bot.noreply@github.com |
4c25c06ebf9b8bc22d801c7c29fd4d42b8ddc6ae | 0354d8e29fcbb65a06525bcac1f55fd08288b6e0 | /clients/python-flask/generated/swagger_server/models/cause_user_id_cause.py | 4f8f7914fcde8ff4d44b2b4bcadc2c93c370d8ea | [
"MIT"
] | permissive | zhiwei55/swaggy-jenkins | cdc52956a40e947067415cec8d2da1425b3d7670 | 678b5477f5f9f00022b176c34b840055fb1b0a77 | refs/heads/master | 2020-03-06T20:38:53.012467 | 2018-02-19T01:53:33 | 2018-02-19T01:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py | # coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class CauseUserIdCause(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _class: str=None, short_description: str=None, user_id: str=None, user_name: str=None):
"""
CauseUserIdCause - a model defined in Swagger
:param _class: The _class of this CauseUserIdCause.
:type _class: str
:param short_description: The short_description of this CauseUserIdCause.
:type short_description: str
:param user_id: The user_id of this CauseUserIdCause.
:type user_id: str
:param user_name: The user_name of this CauseUserIdCause.
:type user_name: str
"""
self.swagger_types = {
'_class': str,
'short_description': str,
'user_id': str,
'user_name': str
}
self.attribute_map = {
'_class': '_class',
'short_description': 'shortDescription',
'user_id': 'userId',
'user_name': 'userName'
}
self.__class = _class
self._short_description = short_description
self._user_id = user_id
self._user_name = user_name
@classmethod
def from_dict(cls, dikt) -> 'CauseUserIdCause':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The CauseUserIdCause of this CauseUserIdCause.
:rtype: CauseUserIdCause
"""
return deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""
Gets the _class of this CauseUserIdCause.
:return: The _class of this CauseUserIdCause.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""
Sets the _class of this CauseUserIdCause.
:param _class: The _class of this CauseUserIdCause.
:type _class: str
"""
self.__class = _class
@property
def short_description(self) -> str:
"""
Gets the short_description of this CauseUserIdCause.
:return: The short_description of this CauseUserIdCause.
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description: str):
"""
Sets the short_description of this CauseUserIdCause.
:param short_description: The short_description of this CauseUserIdCause.
:type short_description: str
"""
self._short_description = short_description
@property
def user_id(self) -> str:
"""
Gets the user_id of this CauseUserIdCause.
:return: The user_id of this CauseUserIdCause.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id: str):
"""
Sets the user_id of this CauseUserIdCause.
:param user_id: The user_id of this CauseUserIdCause.
:type user_id: str
"""
self._user_id = user_id
@property
def user_name(self) -> str:
"""
Gets the user_name of this CauseUserIdCause.
:return: The user_name of this CauseUserIdCause.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name: str):
"""
Sets the user_name of this CauseUserIdCause.
:param user_name: The user_name of this CauseUserIdCause.
:type user_name: str
"""
self._user_name = user_name
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
8a410271a8a9c98e06529afd8c80fd2be884af94 | 2626f6e6803c8c4341d01f57228a0fe117e3680b | /students/BrandonHenson/Lesson04/run_mailroom.py | d9d651237457dcc3a3a439afbf85193f92bc7e52 | [] | no_license | kmsnyde/SP_Online_Course2_2018 | 9e59362da253cdec558e1c2f39221c174d6216f3 | 7fe8635b47d4792a8575e589797260ad0a2b027e | refs/heads/master | 2020-03-19T17:15:03.945523 | 2018-09-05T22:28:55 | 2018-09-05T22:28:55 | 136,750,231 | 0 | 0 | null | 2018-06-09T19:01:52 | 2018-06-09T19:01:51 | null | UTF-8 | Python | false | false | 4,425 | py | # Python 220
# Lesson 4
# 7-17-18
# !/usr/bin/env python3
import os
from mailroom import Donor, Donor_list
import json
donor_history = Donor_list(Donor('Brandon Henson', [1005.49, 3116.72, 5200]),
Donor('Alicia Henson', [21.47, 1500]),
Donor('Michael Green', [2400.54]),
Donor('Brandon Henson Jr', [355.42, 579.31]),
Donor('Kaiya Henson', [636.9, 850.13, 125.23]))
prompt = ('\nSelect an option:\n'
'[1] Send A Thank You To New Or Exsisting Donor\n'
'[2] Create a Report\n'
'[3] Send letters to everyone\n'
'[4] Exit\n'
'[5] Save\n'
'[6] Load\n')
directory_prompt = ("\nChoose save location or press enter for default")
def menu_selection(prompt, dispatch_dict):
while True:
response = input(prompt)
try:
if dispatch_dict[response]() == "Exit Menu":
break
except KeyError:
print("\nPick from the listed options.")
def exit():
return "Exit Menu"
def load():
global donor_history
to_load = input("What do you want to load (with extension)?\n")
with open(to_load, 'r') as f:
donor_load = json.load(f)
donor_history = donor_history.from_json_dict(donor_load)
def save():
record_name = input("Name Of file(without extension)?")
info = donor_history.to_json()
donor_history.save(record_name, info)
def report():
donor_history.donor_report()
def make_file(letter, destination):
with open(destination, 'w') as f:
f.write(letter)
def make_destination(donor, need_dir='y', directory=""):
if need_dir == "y":
directory = input(directory_prompt)
destination = os.path.join(directory,
"{}.txt".format(donor.name.replace(' ', '_')))
return destination
def get_donation_amt(name):
while True:
try:
amount = int(input("\nHow much did {} donate: ".format(name)))
break
except ValueError:
print("\nThis only works with a number!")
return amount
def thank_everyone():
directory = input(directory_prompt)
for donor in donor_history.donor_dictionary:
make_file(donor.write_note(), make_destination(donor, 'n', directory))
def add_donation(name, amount, donor_list_obj):
new_donor = True
for donor in donor_list_obj.donor_dictionary:
if name == donor.name:
donor.new_donation(amount)
temp = donor
new_donor = False
if new_donor:
temp = Donor(name, [amount])
donor_list_obj.add_donor(temp)
return temp
def add_new_full(name="", thank_you=""):
if name == "":
name = input("\nWho is the donor?")
amount = get_donation_amt(name)
donor = add_donation(name, amount, donor_history)
if thank_you == "":
thank_you = input("\nSend a thank you to {}? (y/n): ".format(name))
if thank_you.upper() == 'Y':
make_file(donor.write_note(amount), make_destination(donor))
def send_to():
recipient = input("\nWho is the donor?\n"
"Enter a name or 'list'")
if recipient.lower() == 'list':
print(donor_history)
recipient = input("\nWho is the donor?\n")
return recipient
else:
return recipient
def thank_you():
name = send_to()
donor_exists = donor_history.check_donor(name)
if donor_exists:
donor = donor_history.get_donor(name)
new_donation = input("\n{} has donated. Another?(y/n)? ".format(name))
if new_donation.upper() == 'Y':
amount = get_donation_amt(name)
donor.new_donation(amount)
else:
amount = 0
make_file(donor.write_note(amount), make_destination(donor))
else:
add_new_full(name, 'y')
def donor_list_sum(donor_list):
tot_donations = 0
for donor in donor_list.donor_dictionary:
tot_donations += donor.total_donated
return tot_donations
arg_dict = {"1": thank_you, "2": report, "3": thank_everyone, "4": exit,
"5": save, "6": load}
if __name__ == '__main__':
menu_selection(prompt, arg_dict)
| [
"kmsnyder2@verizon.net"
] | kmsnyder2@verizon.net |
640bc6b34dd826a963ab60f2b4eeae22953d3bd8 | 9c48efbd0b87cb65d9002a4535d90cc0da7f7460 | /07-esercizi/es5_mostra_rango.py | a26721b0f97ff3d92e252192d9e5f0799418f561 | [] | no_license | amedina14/master-python | b439a69507f1e3be5c1b4e5a8001a582d66d03b7 | 12d3ba5aacbfca03a7966599afc4de41cb4af104 | refs/heads/master | 2023-02-24T09:27:23.793033 | 2021-02-01T15:27:13 | 2021-02-01T15:27:13 | 321,659,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | """
Esercizio 5
Mostra numeri tra due numeri detti dall'utente
"""
num1 = int(input("Da numero 1: "))
num2 = int(input("A numero 2: "))
if num1 < num2:
for i in range(num1,(num2 + 1)):
print(i)
else:
print("Il numero 1 deve essere minore al numero 2")
"""
else:
print("errore")
""" | [
"medinalarry96@gmail.com"
] | medinalarry96@gmail.com |
78294a652cf01ce949f60aab43754fb91611ece2 | 8a7d5d67052892dd5d2a748282958f6244d963c6 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/iam/v1/iam_v1_messages.py | f816521bbcf038fba8f70203df97ef5df2d35ae8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KisleK/capstone | 7d1d622bd5ca4cd355302778a02dc6d32ed00c88 | fcef874f4fcef4b74ca016ca7bff92677673fded | refs/heads/master | 2021-07-04T03:29:44.888340 | 2017-07-24T16:16:33 | 2017-07-24T16:16:33 | 93,699,673 | 0 | 2 | null | 2020-07-24T22:44:28 | 2017-06-08T02:34:17 | Python | UTF-8 | Python | false | false | 30,732 | py | """Generated message classes for iam version v1.
Manages identity and access control for Google Cloud Platform resources,
including the creation of service accounts, which you can use to authenticate
to Google and make API calls.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'iam'
class AuditData(_messages.Message):
"""Audit log information specific to Cloud IAM. This message is serialized
as an `Any` type in the `ServiceData` message of an `AuditLog` message.
Fields:
policyDelta: Policy delta between the original policy and the newly set
policy.
"""
policyDelta = _messages.MessageField('PolicyDelta', 1)
class Binding(_messages.Message):
"""Associates `members` with a `role`.
Fields:
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `alice@gmail.com`
or `joe@example.com`. * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address
that represents a Google group. For example, `admins@example.com`.
* `domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
members = _messages.StringField(1, repeated=True)
role = _messages.StringField(2)
class BindingDelta(_messages.Message):
"""One delta entry for Binding. Each individual change (only one member in
each entry) to a binding will be a separate entry.
Enums:
ActionValueValuesEnum: The action that was performed on a Binding.
Required
Fields:
action: The action that was performed on a Binding. Required
member: A single identity requesting access for a Cloud Platform resource.
Follows the same format of Binding.members. Required
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`. Required
"""
class ActionValueValuesEnum(_messages.Enum):
"""The action that was performed on a Binding. Required
Values:
ACTION_UNSPECIFIED: Unspecified.
ADD: Addition of a Binding.
REMOVE: Removal of a Binding.
"""
ACTION_UNSPECIFIED = 0
ADD = 1
REMOVE = 2
action = _messages.EnumField('ActionValueValuesEnum', 1)
member = _messages.StringField(2)
role = _messages.StringField(3)
class CreateServiceAccountKeyRequest(_messages.Message):
"""The service account key create request.
Enums:
KeyAlgorithmValueValuesEnum: Which type of key and algorithm to use for
the key. The default is currently a 2K RSA key. However this may change
in the future.
PrivateKeyTypeValueValuesEnum: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
Fields:
includePublicKeyData: A boolean attribute.
keyAlgorithm: Which type of key and algorithm to use for the key. The
default is currently a 2K RSA key. However this may change in the
future.
privateKeyType: The output format of the private key.
`GOOGLE_CREDENTIALS_FILE` is the default output format.
"""
class KeyAlgorithmValueValuesEnum(_messages.Enum):
"""Which type of key and algorithm to use for the key. The default is
currently a 2K RSA key. However this may change in the future.
Values:
KEY_ALG_UNSPECIFIED: An unspecified key algorithm.
KEY_ALG_RSA_1024: 1k RSA Key.
KEY_ALG_RSA_2048: 2k RSA Key.
"""
KEY_ALG_UNSPECIFIED = 0
KEY_ALG_RSA_1024 = 1
KEY_ALG_RSA_2048 = 2
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
"""The output format of the private key. `GOOGLE_CREDENTIALS_FILE` is the
default output format.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
includePublicKeyData = _messages.BooleanField(1)
keyAlgorithm = _messages.EnumField('KeyAlgorithmValueValuesEnum', 2)
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 3)
class CreateServiceAccountRequest(_messages.Message):
"""The service account create request.
Fields:
accountId: Required. The account id that is used to generate the service
account email address and a stable unique id. It is unique within a
project, must be 6-30 characters long, and match the regular expression
`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.
serviceAccount: The ServiceAccount resource to create. Currently, only the
following values are user assignable: `display_name` .
"""
accountId = _messages.StringField(1)
serviceAccount = _messages.MessageField('ServiceAccount', 2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class IamProjectsServiceAccountsCreateRequest(_messages.Message):
"""A IamProjectsServiceAccountsCreateRequest object.
Fields:
createServiceAccountRequest: A CreateServiceAccountRequest resource to be
passed as the request body.
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
"""
createServiceAccountRequest = _messages.MessageField('CreateServiceAccountRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsDeleteRequest(_messages.Message):
"""A IamProjectsServiceAccountsDeleteRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetIamPolicyRequest(_messages.Message):
"""A IamProjectsServiceAccountsGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsGetRequest(_messages.Message):
"""A IamProjectsServiceAccountsGetRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysCreateRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysCreateRequest object.
Fields:
createServiceAccountKeyRequest: A CreateServiceAccountKeyRequest resource
to be passed as the request body.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
createServiceAccountKeyRequest = _messages.MessageField('CreateServiceAccountKeyRequest', 1)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsKeysDeleteRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysDeleteRequest object.
Fields:
name: The resource name of the service account key in the following
format: `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/k
eys/{key}`. Using `-` as a wildcard for the project will infer the
project from the account. The `account` value can be the `email` address
or the `unique_id` of the service account.
"""
name = _messages.StringField(1, required=True)
class IamProjectsServiceAccountsKeysGetRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysGetRequest object.
Enums:
PublicKeyTypeValueValuesEnum: The output format of the public key
requested. X509_PEM is the default output format.
Fields:
name: The resource name of the service account key in the following
format: `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/k
eys/{key}`. Using `-` as a wildcard for the project will infer the
project from the account. The `account` value can be the `email` address
or the `unique_id` of the service account.
publicKeyType: The output format of the public key requested. X509_PEM is
the default output format.
"""
class PublicKeyTypeValueValuesEnum(_messages.Enum):
"""The output format of the public key requested. X509_PEM is the default
output format.
Values:
TYPE_NONE: <no description>
TYPE_X509_PEM_FILE: <no description>
TYPE_RAW_PUBLIC_KEY: <no description>
"""
TYPE_NONE = 0
TYPE_X509_PEM_FILE = 1
TYPE_RAW_PUBLIC_KEY = 2
name = _messages.StringField(1, required=True)
publicKeyType = _messages.EnumField('PublicKeyTypeValueValuesEnum', 2)
class IamProjectsServiceAccountsKeysListRequest(_messages.Message):
"""A IamProjectsServiceAccountsKeysListRequest object.
Enums:
KeyTypesValueValuesEnum: Filters the types of keys the user wants to
include in the list response. Duplicate key types are not allowed. If no
key type is provided, all keys are returned.
Fields:
keyTypes: Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is
provided, all keys are returned.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project, will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
"""
class KeyTypesValueValuesEnum(_messages.Enum):
"""Filters the types of keys the user wants to include in the list
response. Duplicate key types are not allowed. If no key type is provided,
all keys are returned.
Values:
KEY_TYPE_UNSPECIFIED: <no description>
USER_MANAGED: <no description>
SYSTEM_MANAGED: <no description>
"""
KEY_TYPE_UNSPECIFIED = 0
USER_MANAGED = 1
SYSTEM_MANAGED = 2
keyTypes = _messages.EnumField('KeyTypesValueValuesEnum', 1, repeated=True)
name = _messages.StringField(2, required=True)
class IamProjectsServiceAccountsListRequest(_messages.Message):
"""A IamProjectsServiceAccountsListRequest object.
Fields:
name: Required. The resource name of the project associated with the
service accounts, such as `projects/my-project-123`.
pageSize: Optional limit on the number of service accounts to include in
the response. Further accounts can subsequently be obtained by including
the ListServiceAccountsResponse.next_page_token in a subsequent request.
pageToken: Optional pagination token returned in an earlier
ListServiceAccountsResponse.next_page_token.
"""
name = _messages.StringField(1, required=True)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class IamProjectsServiceAccountsSetIamPolicyRequest(_messages.Message):
"""A IamProjectsServiceAccountsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class IamProjectsServiceAccountsSignBlobRequest(_messages.Message):
"""A IamProjectsServiceAccountsSignBlobRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
signBlobRequest: A SignBlobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signBlobRequest = _messages.MessageField('SignBlobRequest', 2)
class IamProjectsServiceAccountsSignJwtRequest(_messages.Message):
"""A IamProjectsServiceAccountsSignJwtRequest object.
Fields:
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using
`-` as a wildcard for the project will infer the project from the
account. The `account` value can be the `email` address or the
`unique_id` of the service account.
signJwtRequest: A SignJwtRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
signJwtRequest = _messages.MessageField('SignJwtRequest', 2)
class IamProjectsServiceAccountsTestIamPermissionsRequest(_messages.Message):
"""A IamProjectsServiceAccountsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class ListServiceAccountKeysResponse(_messages.Message):
"""The service account keys list response.
Fields:
keys: The public keys for the service account.
"""
keys = _messages.MessageField('ServiceAccountKey', 1, repeated=True)
class ListServiceAccountsResponse(_messages.Message):
"""The service account list response.
Fields:
accounts: The list of matching service accounts.
nextPageToken: To retrieve the next page of results, set
ListServiceAccountsRequest.page_token to this value.
"""
accounts = _messages.MessageField('ServiceAccount', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Policy(_messages.Message):
"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `Binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com", ] }, {
"role": "roles/viewer", "members": ["user:sean@example.com"]
} ] } For a description of IAM and its features, see the [IAM
developer's guide](https://cloud.google.com/iam).
Fields:
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
version: Version of the `Policy`. The default version is 0.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class PolicyDelta(_messages.Message):
"""The difference delta between two policies.
Fields:
bindingDeltas: The delta for Bindings between two policies.
"""
bindingDeltas = _messages.MessageField('BindingDelta', 1, repeated=True)
class QueryGrantableRolesRequest(_messages.Message):
"""The grantable role query request.
Fields:
fullResourceName: Required. The full resource name to query from the list
of grantable roles. The name follows the Google Cloud Platform resource
format. For example, a Cloud Platform project with id `my-project` will
be named `//cloudresourcemanager.googleapis.com/projects/my-project`.
pageSize: Optional limit on the number of roles to include in the
response.
pageToken: Optional pagination token returned in an earlier
QueryGrantableRolesResponse.
"""
fullResourceName = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
class QueryGrantableRolesResponse(_messages.Message):
"""The grantable role query response.
Fields:
nextPageToken: To retrieve the next page of results, set
`QueryGrantableRolesRequest.page_token` to this value.
roles: The list of matching roles.
"""
nextPageToken = _messages.StringField(1)
roles = _messages.MessageField('Role', 2, repeated=True)
class Role(_messages.Message):
"""A role in the Identity and Access Management API.
Fields:
description: Optional. A human-readable description for the role.
name: The name of the role. When Role is used in CreateRole, the role
name must not be set. When Role is used in output and other input such
as UpdateRole, the role name is the complete path, e.g.,
roles/logging.viewer for curated roles and
organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.
title: Optional. A human-readable title for the role. Typically this is
limited to 100 UTF-8 bytes.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
title = _messages.StringField(3)
class ServiceAccount(_messages.Message):
"""A service account in the Identity and Access Management API. To create a
service account, specify the `project_id` and the `account_id` for the
account. The `account_id` is unique within the project, and is used to
generate the service account email address and a stable `unique_id`. If the
account already exists, the account's resource name is returned in
util::Status's ResourceInfo.resource_name in the format of
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The caller
can use the name in other methods to access the account. All other methods
can identify the service account using the format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`. Using `-`
as a wildcard for the project will infer the project from the account. The
`account` value can be the `email` address or the `unique_id` of the service
account.
Fields:
displayName: Optional. A user-specified description of the service
account. Must be fewer than 100 UTF-8 bytes.
email: @OutputOnly The email address of the service account.
etag: Used to perform a consistent read-modify-write.
name: The resource name of the service account in the following format:
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.
Requests using `-` as a wildcard for the project will infer the project
from the `account` and the `account` value can be the `email` address or
the `unique_id` of the service account. In responses the resource name
will always be in the format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.
oauth2ClientId: @OutputOnly. The OAuth2 client id for the service account.
This is used in conjunction with the OAuth2 clientconfig API to make
three legged OAuth2 (3LO) flows to access the data of Google users.
projectId: @OutputOnly The id of the project that owns the service
account.
uniqueId: @OutputOnly The unique and stable id of the service account.
"""
displayName = _messages.StringField(1)
email = _messages.StringField(2)
etag = _messages.BytesField(3)
name = _messages.StringField(4)
oauth2ClientId = _messages.StringField(5)
projectId = _messages.StringField(6)
uniqueId = _messages.StringField(7)
class ServiceAccountKey(_messages.Message):
"""Represents a service account key. A service account has two sets of key-
pairs: user-managed, and system-managed. User-managed key-pairs can be
created and deleted by users. Users are responsible for rotating these keys
periodically to ensure security of their service accounts. Users retain the
private key of these key-pairs, and Google retains ONLY the public key.
System-managed key-pairs are managed automatically by Google, and rotated
daily without user intervention. The private key never leaves Google's
servers to maximize security. Public keys for all service accounts are also
published at the OAuth2 Service Account API.
Enums:
KeyAlgorithmValueValuesEnum: Specifies the algorithm (and possibly key
size) for the key.
PrivateKeyTypeValueValuesEnum: The output format for the private key. Only
provided in `CreateServiceAccountKey` responses, not in
`GetServiceAccountKey` or `ListServiceAccountKey` responses. Google
never exposes system-managed private keys, and never retains user-
managed private keys.
Fields:
keyAlgorithm: Specifies the algorithm (and possibly key size) for the key.
name: The resource name of the service account key in the following format
`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key
}`.
privateKeyData: The private key data. Only provided in
`CreateServiceAccountKey` responses. Make sure to keep the private key
data secure because it allows for the assertion of the service account
identity.
privateKeyType: The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
publicKeyData: The public key data. Only provided in
`GetServiceAccountKey` responses.
validAfterTime: The key can be used after this timestamp.
validBeforeTime: The key can be used before this timestamp.
"""
class KeyAlgorithmValueValuesEnum(_messages.Enum):
"""Specifies the algorithm (and possibly key size) for the key.
Values:
KEY_ALG_UNSPECIFIED: An unspecified key algorithm.
KEY_ALG_RSA_1024: 1k RSA Key.
KEY_ALG_RSA_2048: 2k RSA Key.
"""
KEY_ALG_UNSPECIFIED = 0
KEY_ALG_RSA_1024 = 1
KEY_ALG_RSA_2048 = 2
class PrivateKeyTypeValueValuesEnum(_messages.Enum):
"""The output format for the private key. Only provided in
`CreateServiceAccountKey` responses, not in `GetServiceAccountKey` or
`ListServiceAccountKey` responses. Google never exposes system-managed
private keys, and never retains user-managed private keys.
Values:
TYPE_UNSPECIFIED: Unspecified. Equivalent to
`TYPE_GOOGLE_CREDENTIALS_FILE`.
TYPE_PKCS12_FILE: PKCS12 format. The password for the PKCS12 file is
`notasecret`. For more information, see
https://tools.ietf.org/html/rfc7292.
TYPE_GOOGLE_CREDENTIALS_FILE: Google Credentials File format.
"""
TYPE_UNSPECIFIED = 0
TYPE_PKCS12_FILE = 1
TYPE_GOOGLE_CREDENTIALS_FILE = 2
keyAlgorithm = _messages.EnumField('KeyAlgorithmValueValuesEnum', 1)
name = _messages.StringField(2)
privateKeyData = _messages.BytesField(3)
privateKeyType = _messages.EnumField('PrivateKeyTypeValueValuesEnum', 4)
publicKeyData = _messages.BytesField(5)
validAfterTime = _messages.StringField(6)
validBeforeTime = _messages.StringField(7)
class SetIamPolicyRequest(_messages.Message):
"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class SignBlobRequest(_messages.Message):
"""The service account sign blob request.
Fields:
bytesToSign: The bytes to sign.
"""
bytesToSign = _messages.BytesField(1)
class SignBlobResponse(_messages.Message):
"""The service account sign blob response.
Fields:
keyId: The id of the key used to sign the blob.
signature: The signed blob.
"""
keyId = _messages.StringField(1)
signature = _messages.BytesField(2)
class SignJwtRequest(_messages.Message):
"""The service account sign JWT request.
Fields:
payload: The JWT payload to sign, a JSON JWT Claim set.
"""
payload = _messages.StringField(1)
class SignJwtResponse(_messages.Message):
"""The service account sign JWT response.
Fields:
keyId: The id of the key used to sign the JWT.
signedJwt: The signed JWT.
"""
keyId = _messages.StringField(1)
signedJwt = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class TestIamPermissionsRequest(_messages.Message):
"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'iam')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'iam')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'iam')
| [
"kisle.kuhn1@gmail.com"
] | kisle.kuhn1@gmail.com |
7e1e63e6d0914cb94a2e3b298ed1709387b77b63 | 574ba9b7b7f79ee06e395f697f2eb08d39081a2e | /nkms/network/protocols.py | e2eecbf55e9286e95223276bb527d8fa9e0c087e | [] | no_license | xxxAHMEDxxx/nucypher-kms | 5dcba61ee8a701dd9025dfa425a6347b3cfc80e1 | a10bdccc12374b1bdd8212f4c939f0d411729708 | refs/heads/master | 2021-07-23T23:21:46.776424 | 2017-11-05T00:18:14 | 2017-11-05T00:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | import asyncio
from kademlia.node import Node
from kademlia.protocol import KademliaProtocol
from kademlia.utils import digest
from nkms.network.constants import NODE_HAS_NO_STORAGE
from nkms.network.node import NuCypherNode
from nkms.network.routing import NuCypherRoutingTable
class NuCypherHashProtocol(KademliaProtocol):
def __init__(self, sourceNode, storage, ksize, *args, **kwargs):
super().__init__(sourceNode, storage, ksize, *args, **kwargs)
self.router = NuCypherRoutingTable(self, ksize, sourceNode)
def check_node_for_storage(self, node):
try:
return node.can_store()
except AttributeError:
return True
def rpc_ping(self, sender, nodeid, node_capabilities=[]):
source = NuCypherNode(nodeid, sender[0], sender[1], capabilities_as_strings=node_capabilities)
self.welcomeIfNewNode(source)
return self.sourceNode.id
async def callStore(self, nodeToAsk, key, value):
# nodeToAsk = NuCypherNode
if self.check_node_for_storage(nodeToAsk):
address = (nodeToAsk.ip, nodeToAsk.port)
# TODO: encrypt `value` with public key of nodeToAsk
store_future = self.store(address, self.sourceNode.id, key, value)
result = await store_future
success, data = self.handleCallResponse(result, nodeToAsk)
return success, data
else:
return NODE_HAS_NO_STORAGE, False
class NuCypherSeedOnlyProtocol(NuCypherHashProtocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def rpc_store(self, sender, nodeid, key, value):
source = Node(nodeid, sender[0], sender[1])
self.welcomeIfNewNode(source)
self.log.debug(
"got a store request from %s, but THIS VALUE WILL NOT BE STORED as this is a seed-only node." % str(
sender))
return True
| [
"justin@justinholmes.com"
] | justin@justinholmes.com |
de000d15fcb6e39a0cf59d0226b600017f317501 | 62ed242c7195788950e1d69dec8a0da0c29d0489 | /8_function/function_parameter2.py | 70e37ce9f05f02b859365ad023e1665a2e79d8d7 | [] | no_license | lmw8864/MyFirstPython_part1_python_basic | f11fbe2d524a4acfa00c2b18488e8f851de9e0ba | ab850187581f9a415066d7b75175a92023c0a691 | refs/heads/master | 2021-06-27T06:50:18.017125 | 2017-09-16T17:01:20 | 2017-09-16T17:01:20 | 103,767,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # function_parameter2.py
"""
매개변수를 임의의 숫자만큼 전달하기
매개변수를 몇 개나 받을지 미리 알 수 없을 때 매개변수 앞에 '*'를 붙여주면
파이썬이 매개변수명의 빈 튜플을 만들고 받는 값을 모두 이 튜플에 저장한다.
- 매개변수로 *toppings 를 지정하면,
→ 파이썬이 toppings 라는 빈 튜플을 만들고 받는 값을 모두 저장함.
"""
def make_pizza(*toppings):
"""주문받은 토핑 리스트 출력"""
print(toppings)
make_pizza('pepperoni') # 하나의 값을 받더라도 튜플로 저장한다.
make_pizza('mushrooms', 'green peppers', 'extra cheese')
# ('pepperoni',)
# ('mushrooms', 'green peppers', 'extra cheese')
def make_pizza(*toppings):
"""만들려고 하는 피자를 요약합니다."""
print("\nMaking a pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza('pepperoni')
make_pizza('mushrooms', 'green peppers', 'extra cheese')
# Making a pizza with the following toppings:
# - pepperoni
#
# Making a pizza with the following toppings:
# - mushrooms
# - green peppers
# - extra cheese
print("\n")
# 위치형 매개변수와 임의의 매개변수 함께 쓰기
def make_pizza(size, *toppings):
"""만들려고 하는 피자를 요약합니다."""
print("\nMaking a " + str(size) + "-inch pizza with the following toppings:")
for topping in toppings:
print("- " + topping)
make_pizza(16, 'pepperoni')
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
# Making a 16-inch pizza with the following toppings:
# - pepperoni
#
# Making a 12-inch pizza with the following toppings:
# - mushrooms
# - green peppers
# - extra cheese
| [
"lmw8864@gmail.com"
] | lmw8864@gmail.com |
300a527f303b1deae9cb0f2e2af9747afd203d3b | 35ff4e124ea73cd2630ddf25dfe019b4b4e3f5d6 | /69_SqrtX/69_SqrtX_3.py | f709058b297d6324a43b6f8ece0dda08f3ed9955 | [] | no_license | H-Cong/LeetCode | 0a2084a4845b5d7fac67c89bd72a2adf49f90c3d | d00993a88c6b34fcd79d0a6580fde5c523a2741d | refs/heads/master | 2023-03-19T15:22:00.971461 | 2021-03-11T00:33:00 | 2021-03-11T00:33:00 | 303,265,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | class Solution:
def mySqrt(self, x: int) -> int:
'''
Gradient Descent with Adaptive Learning Rates
'''
n = 0
lr = x
while abs(n**2 - x) >= 0.1:
lr = lr / 2
gradient = self._gradient(x, n)
n -= lr*gradient
return int(n + 1) if int(n + 1)**2 == x else int(n)
def _gradient(self, x, n):
v = n**2 - x
return 1 if v > 0 else -1
# TC: log(x)
# SC: O(1)
# why starts from 0?
# I think as f(x) = x^2 - n where n >= 0 is monotonically increaseing in
# the range of [0, inf). And we are looking for the point to make f(x) = 0
# When gradient (i.e. f(x)) < 0, it means that we need to move x to right
# When gradient > 0, it means that the target x is on the left of current x
# Initializing x = 0 is the only way to make this logic valid.
# ref: https://leetcode.com/problems/sqrtx/discuss/869428/Gradient-Descent-solution-for-machine-learning-interviews-O(logx)
| [
"nych1989@gmail.com"
] | nych1989@gmail.com |
c5e205dfc2e0827a38c3eb10c49570c9aeb24283 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2643/61519/310414.py | d92838a5acddda72ca16a1f452c2818c5b2bcd71 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | cus=list(input().split(","))
gru=list(input().split(","))
k=int(input())
res=[]
number=0
for i in range(len(cus)):
cus[i]=int(cus[i])
gru[i]=int(gru[i])
for i in range(len(cus)-k):
number=0
tem=[]
for j in range(len(cus)):
tem.append(gru[j])
for j in range(i,i+k):
tem[j]=0
for j in range(len(cus)):
if tem[j]==0:
number=number+cus[j]
res.append(number)
print(max(res)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
b887c469115e9112723e249596fa954101bb9f04 | 46f52778894a1e2d9de04e886335e8659c8a2ff4 | /backend/mainPage/migrations/0003_auto_20200919_1853.py | 98e7cc5ddf252360a612a9b40b1d4f09033437dc | [] | no_license | oereo/Project_C | a038dd761065a2b7b0fbc46405331e27850457ed | eb8bf0283692b422a28c134dc277e194fbe006a7 | refs/heads/master | 2022-12-23T18:38:29.493272 | 2020-10-07T17:17:15 | 2020-10-07T17:17:15 | 278,578,190 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 2.1.1 on 2020-09-19 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainPage', '0002_profile'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='safe_percent',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| [
"dlstpgns0406@gmail.com"
] | dlstpgns0406@gmail.com |
e48ca3d860f02ae03281701b9eea8aa82636adf8 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614064953.py | 3d36caf9f6f57b540137e65b38496cc62c88697f | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,332 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import requests
# # Ітеруємося по масиву з адресами зображень
# for i, pic_url in enumerate(["http://x.com/nanachi.jpg", "http://x.com/nezuko.jpg"]):
# # Відкриваємо файл базуючись на номері ітерації
# with open('pic{0}.jpg'.format(i), 'wb') as handle:
# # Отримуємо картинку
# response = requests.get(pic_url, stream=True)
# # Використовуючи умовний оператор перевіряємо чи успішно виконався запит
# if not response.ok:
# print(response)
# # Ітеруємося по байтах картинки та записуємо батчаси в 1024 до файлу
# for block in response.iter_content(1024):
# # Якщо байти закінчилися, завершуємо алгоритм
# if not block:
# break
# # Записуємо байти в файл
# handle.write(block)
# -----------------------------------------------------------------------------------------------------------
# # Створюємо клас для рахунку
# class Bank_Account:
# # В конструкторі ініціалізуємо рахунок як 0
# def __init__(self):
# self.balance=0
# print("Hello!!! Welcome to the Deposit & Withdrawal Machine")
# # В методі депозит, використовуючи функцію input() просимо ввести суму поповенння та додаємо цю суму до рахунку
# def deposit(self):
# amount=float(input("Enter amount to be Deposited: "))
# self.balance += amount
# print("\n Amount Deposited:",amount)
# # В методі депозит, використовуючи функцію input() просимо ввести суму отримання та віднімаємо цю суму від рахунку
# def withdraw(self):
# amount = float(input("Enter amount to be Withdrawn: "))
# # За допомогою умовного оператора перевіряємо чи достатнього грошей на рахунку
# if self.balance>=amount:
# self.balance-=amount
# print("\n You Withdrew:", amount)
# else:
# print("\n Insufficient balance ")
# # Виводимо бааланс на екран
# def display(self):
# print("\n Net Available Balance=",self.balance)
# # Створюємо рахунок
# s = Bank_Account()
# # Проводимо операції з рахунком
# s.deposit()
# s.withdraw()
# s.display()
# -----------------------------------------------------------------------------------------------------------
# # Створюємо рекурсивну функцію яка приймає десяткове число
# def decimalToBinary(n):
# # перевіряємо чи число юільше 1
# if(n > 1):
# # Якщо так, ділемо на 2 юез остачі та рекурсивно викликаємо функцію
# decimalToBinary(n//2)
# # Якщо ні, виводимо на остачу ділення числа на 2
# print(n%2, end=' ')
# # Створюємо функцію яка приймає бінарне число
# def binaryToDecimal(binary):
# # Створюємо додаткову змінну
# binary1 = binary
# # Ініціалізуємо ще 3 змінню даючи їм значення 0
# decimal, i, n = 0, 0, 0
# # Ітеруємося до тих пір поки передане нами число не буде 0
# while(binary != 0):
# # Отримуємо остачу від ділення нашого чила на 10 на записуємо в змінну
# dec = binary % 10
# # Додаємо до результату суму попереднього результату та добуток від dec та піднесення 2 до степеня номеру ітерації
# decimal = decimal + dec * pow(2, i)
# # Змінюємо binary
# binary = binary//10
# # Додаємо 1 до кількості ітерацій
# i += 1
# # Виводимо результат
# print(decimal)
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import re
# # В умовному операторі перевіряємо чи підходить введена пошта під знайдений з інтернету regex
# if re.match(r"[^@]+@[^@]+\.[^@]+", "nanachi@gmail.com"):
# # Якщо так, виводиму valid
# print("valid")
# -----------------------------------------------------------------------------------------------------------
# # Створення функції яка приймає текст для шифрування та здвиг
# def encrypt(text,s):
# # Створення змінної для результату
# result = ""
# # Ітеруємося по тексту використовуючи range та довжину тексту
# for i in range(len(text)):
# # Беремо літеру базуючись на номері ітерації
# char = text[i]
# # Перевіряємо чи ця літера велика
# if (char.isupper()):
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s-65) % 26 + 65)
# else:
# # Кодуємо літеру базуючись на її номері
# result += chr((ord(char) + s - 97) % 26 + 97)
# # Повертаємо результат
# return result
# -----------------------------------------------------------------------------------------------------------
# # Створення списку з телефонами
# numbers = ["0502342349", "0500897897", "0992342349"]
# # Ініціалізація змінної з результатом
# result = {}
# # Ітерації по телефонах для ініціалізації клічів результата
# for num in numbers:
# # Створення ключа бузуючись на номері оператора та присвоєння йому пустого масиву
# result[num[:3]] = []
# # Ітерації по телефонах
# for num in numbers:
# # Додавання телефону до відповідного оператора
# result[num[:3]].append(num)
# # Вивід результатту
# print(result)
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import unittest
# # Створення класу з тестами наслідуючись від unittest.TestCase
# class TestStringMethods(unittest.TestCase):
# # Створення методу з тестом
# def test_upper(self):
# # перевірка чи буде результат виконання першого аргументу дорівнювати другому
# self.assertEqual('foo'.upper(), 'FOO')
# # Запуск скрипта
# if __name__ == '__main__':
# unittest.main()
# -----------------------------------------------------------------------------------------------------------
# # Імпорт фажливих бібліотек
# import math
# # створення функції яка приймає 2 точки
# def distance(x1, y1, z1, x2, y2, z2):
# # Знаходження відстані за шкільної формулою використовуючи функції з модулю math
# d = math.sqrt(math.pow(x2 - x1, 2) +
# math.pow(y2 - y1, 2) +
# math.pow(z2 - z1, 2)* 1.0)
# # Вивід результату
# print("Distance is ")
# print(d)
# -----------------------------------------------------------------------------------------------------------
# # Відкриваємо файл для запису
# file1 = open("myfile.txt","w")
# # Записуємо Hello в файл
# file1.write("Hello")
# # Закриваємо файл
# file1.close()
# # Відкриваємо файл для зчитування
# file1 = open("myfile.txt","r+")
# # Виводимо в консоль вміст файлу
# print(file1.read())
# -----------------------------------------------------------------------------------------------------------
# # Програма Python для пошуку k найчастіших слів
# # з набору даних
# from collections import Counter
# data_set = "Welcome to the world of Geeks " \
# "This portal has been created to provide well written well" \
# "thought and well explained solutions for selected questions " \
# "If you like Geeks for Geeks and would like to contribute " \
# "here is your chance You can write article and mail your article " \
# " to contribute at geeksforgeeks org See your article appearing on " \
# "the Geeks for Geeks main page and help thousands of other Geeks. " \
# # split() повертає список усіх слів у рядку
# split_it = data_set.split()
# # Передайте список split_it екземпляру класу Counter.
# Counter = Counter(split_it)
# # most_common () виробляє k найбільш зустрісаємих слів та їхню кількість
# most_occur = Counter.most_common(4)
# # Вивід результатів
# print(most_occur)
# -----------------------------------------------------------------------------------------------------------
зущзду | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
a028d774ad4e71bb609099dde08533350e33691e | c239486d2884f03591e20c1e9419156e27805aa1 | /BOJ/구현/BOJ7568_덩치.py | 660821bd5245747d351c1dfb75189a39c11daf1d | [] | no_license | ynsseon07/Coding_with_Python | ba1fda42534e6134b58d742dc02dc204f447f57a | 4e2041198b3720d97934becdcc2603486dfb5564 | refs/heads/master | 2023-08-25T19:57:44.657541 | 2021-10-28T14:11:24 | 2021-10-28T14:11:24 | 364,128,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | N = int(input())
pp = []
for _ in range(N):
pp.append(list(map(int, input().split())))
rank = [0] * len(pp)
for i in range(len(pp)):
for j in range(len(pp)):
if i == j:
continue
if pp[i][0] < pp[j][0] and pp[i][1] < pp[j][1]:
rank[i] += 1
for r in rank:
print(r+1, end=' ') | [
"ynsseon@gmail.com"
] | ynsseon@gmail.com |
5b14d812cdc36e5cd4ee145b3af3f90357e47de3 | 2f330fc050de11676ab46b963b7878882e9b6614 | /memsource_cli/models/task_mapping_dto.py | 0961162bc49c06bd5819b8af375229ebe5c9a512 | [
"Apache-2.0"
] | permissive | zerodayz/memsource-cli-client | 609f48c18a2b6daaa639d4cb8a61da43763b5143 | c2574f1467539a49e6637c874e88d75c7ef789b3 | refs/heads/master | 2020-08-01T12:43:06.497982 | 2019-09-30T11:14:13 | 2019-09-30T11:14:13 | 210,999,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.uid_reference import UidReference # noqa: F401,E501
class TaskMappingDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'task_id': 'str',
'workflow_level': 'str',
'_resource_path': 'str',
'project': 'UidReference',
'job': 'UidReference'
}
attribute_map = {
'task_id': 'taskId',
'workflow_level': 'workflowLevel',
'_resource_path': 'resourcePath',
'project': 'project',
'job': 'job'
}
def __init__(self, task_id=None, workflow_level=None, _resource_path=None, project=None, job=None): # noqa: E501
"""TaskMappingDto - a model defined in Swagger""" # noqa: E501
self._task_id = None
self._workflow_level = None
self.__resource_path = None
self._project = None
self._job = None
self.discriminator = None
if task_id is not None:
self.task_id = task_id
if workflow_level is not None:
self.workflow_level = workflow_level
if _resource_path is not None:
self._resource_path = _resource_path
if project is not None:
self.project = project
if job is not None:
self.job = job
@property
def task_id(self):
"""Gets the task_id of this TaskMappingDto. # noqa: E501
:return: The task_id of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this TaskMappingDto.
:param task_id: The task_id of this TaskMappingDto. # noqa: E501
:type: str
"""
self._task_id = task_id
@property
def workflow_level(self):
"""Gets the workflow_level of this TaskMappingDto. # noqa: E501
:return: The workflow_level of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self._workflow_level
@workflow_level.setter
def workflow_level(self, workflow_level):
"""Sets the workflow_level of this TaskMappingDto.
:param workflow_level: The workflow_level of this TaskMappingDto. # noqa: E501
:type: str
"""
self._workflow_level = workflow_level
@property
def _resource_path(self):
"""Gets the _resource_path of this TaskMappingDto. # noqa: E501
:return: The _resource_path of this TaskMappingDto. # noqa: E501
:rtype: str
"""
return self.__resource_path
@_resource_path.setter
def _resource_path(self, _resource_path):
"""Sets the _resource_path of this TaskMappingDto.
:param _resource_path: The _resource_path of this TaskMappingDto. # noqa: E501
:type: str
"""
self.__resource_path = _resource_path
@property
def project(self):
"""Gets the project of this TaskMappingDto. # noqa: E501
:return: The project of this TaskMappingDto. # noqa: E501
:rtype: UidReference
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this TaskMappingDto.
:param project: The project of this TaskMappingDto. # noqa: E501
:type: UidReference
"""
self._project = project
@property
def job(self):
"""Gets the job of this TaskMappingDto. # noqa: E501
:return: The job of this TaskMappingDto. # noqa: E501
:rtype: UidReference
"""
return self._job
@job.setter
def job(self, job):
"""Sets the job of this TaskMappingDto.
:param job: The job of this TaskMappingDto. # noqa: E501
:type: UidReference
"""
self._job = job
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TaskMappingDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TaskMappingDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cerninr@gmail.com"
] | cerninr@gmail.com |
b30cb1dd3cc3a8ad7622a2f56bab5a2feace2e4e | 5d6851798e229846f7b0fbbb980bb30454d9d3a7 | /jams/tests/eval_test.py | 0cb3c211e609fb637d40a031d172fc8a41cb8351 | [
"ISC"
] | permissive | beckgom/jams | 24d687eb7aefee932b2f83905c3e75438fc37d93 | 439e523baeafd6b0a16393708314ff7e6cace039 | refs/heads/master | 2021-01-24T20:41:43.741991 | 2015-11-12T16:48:32 | 2015-11-12T16:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,532 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''mir_eval integration tests'''
import numpy as np
from nose.tools import raises
import jams
# Beat tracking
def create_annotation(values, namespace='beat', offset=0.0, duration=1, confidence=1):
ann = jams.Annotation(namespace=namespace)
time = np.arange(offset, offset + len(values))
if np.isscalar(duration):
time = time * duration
duration = [duration] * len(time)
if np.isscalar(confidence):
confidence = [confidence] * len(time)
for t, d, v, c in zip(time, duration, values, confidence):
ann.append(time=t, duration=d, value=v, confidence=c)
return ann
def test_beat_valid():
ref_ann = create_annotation(values=np.arange(10) % 4 + 1.,
namespace='beat')
est_ann = create_annotation(values=np.arange(9) % 4 + 1.,
namespace='beat',
offset=0.01)
jams.eval.beat(ref_ann, est_ann)
def test_beat_invalid():
ref_ann = create_annotation(values=np.arange(10) % 4 + 1.,
namespace='beat')
est_ann = create_annotation(values=np.arange(9) % 4 + 1.,
namespace='onset',
offset=0.01)
yield raises(jams.NamespaceError)(jams.eval.beat), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.beat), est_ann, ref_ann
# Onset detection
def test_onset_valid():
ref_ann = create_annotation(values=np.arange(10) % 4 + 1.,
namespace='onset')
est_ann = create_annotation(values=np.arange(9) % 4 + 1.,
namespace='onset',
offset=0.01)
jams.eval.onset(ref_ann, est_ann)
def test_onset_invalid():
ref_ann = create_annotation(values=np.arange(10) % 4 + 1.,
namespace='onset')
est_ann = create_annotation(values=np.arange(9) % 4 + 1.,
namespace='beat',
offset=0.01)
yield raises(jams.NamespaceError)(jams.eval.onset), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.onset), est_ann, ref_ann
# Chord estimation
def test_chord_valid():
ref_ann = create_annotation(values=['C', 'E', 'G:min7'],
namespace='chord')
est_ann = create_annotation(values=['D', 'E', 'G:maj'],
namespace='chord_harte')
jams.eval.chord(ref_ann, est_ann)
def test_chord_invalid():
ref_ann = create_annotation(values=['C', 'E', 'G:min7'],
namespace='chord')
est_ann = create_annotation(values=[{'tonic': 'C', 'chord': 'I'}],
namespace='chord_roman')
yield raises(jams.NamespaceError)(jams.eval.chord), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.chord), est_ann, ref_ann
est_ann = create_annotation(values=['D', 'E', 'not at all a chord'],
namespace='chord_harte',
offset=0.01)
yield raises(jams.SchemaError)(jams.eval.chord), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.chord), est_ann, ref_ann
# Segmentation
def test_segment_valid():
ref_ann = create_annotation(values=['A', 'B', 'A', 'C'],
namespace='segment_open')
est_ann = create_annotation(values=['E', 'B', 'E', 'B'],
namespace='segment_open')
jams.eval.segment(ref_ann, est_ann)
def test_segment_invalid():
ref_ann = create_annotation(values=['A', 'B', 'A', 'C'],
namespace='segment_open')
est_ann = create_annotation(values=['E', 'B', 'E', 'B'],
namespace='chord_harte')
yield raises(jams.NamespaceError)(jams.eval.segment), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.segment), est_ann, ref_ann
est_ann = create_annotation(values=['E', 'B', 'E', 'B'],
namespace='segment_tut')
yield raises(jams.SchemaError)(jams.eval.segment), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.segment), est_ann, ref_ann
# Tempo estimation
def test_tempo_valid():
ref_ann = create_annotation(values=[120.0, 60.0], confidence=[0.75, 0.25],
namespace='tempo')
est_ann = create_annotation(values=[120.0, 80.0], confidence=[0.5, 0.5],
namespace='tempo')
jams.eval.tempo(ref_ann, est_ann)
def test_tempo_invalid():
ref_ann = create_annotation(values=[120.0, 60.0], confidence=[0.75, 0.25],
namespace='tempo')
est_ann = create_annotation(values=[120.0, 80.0], confidence=[0.5, 0.5],
namespace='tag_open')
yield raises(jams.NamespaceError)(jams.eval.tempo), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.tempo), est_ann, ref_ann
est_ann = create_annotation(values=[120.0, 80.0], confidence=[-5, 1.5],
namespace='tempo')
yield raises(jams.SchemaError)(jams.eval.tempo), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.tempo), est_ann, ref_ann
# Melody
def test_melody_valid():
f1 = np.linspace(110.0, 440.0, 10)
v1 = np.sign(np.random.randn(len(f1)))
v2 = np.sign(np.random.randn(len(f1)))
ref_ann = create_annotation(values=f1 * v1,
confidence=1.0,
duration=0.01,
namespace='pitch_hz')
est_ann = create_annotation(values=f1 * v2,
confidence=1.0,
duration=0.01,
namespace='pitch_hz')
jams.eval.melody(ref_ann, est_ann)
def test_melody_invalid():
f1 = np.linspace(110.0, 440.0, 10)
v1 = np.sign(np.random.randn(len(f1)))
v2 = np.sign(np.random.randn(len(f1)))
ref_ann = create_annotation(values=f1 * v1,
confidence=1.0,
duration=0.01,
namespace='pitch_hz')
est_ann = create_annotation(values=f1 * v2,
confidence=1.0,
duration=0.01,
namespace='pitch_midi')
yield raises(jams.NamespaceError)(jams.eval.melody), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.melody), est_ann, ref_ann
est_ann = create_annotation(values=['a', 'b', 'c'],
confidence=1.0,
duration=0.01,
namespace='pitch_hz')
yield raises(jams.SchemaError)(jams.eval.melody), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.melody), est_ann, ref_ann
# Pattern discovery
def test_pattern_valid():
ref_jam = jams.load('fixtures/pattern_data.jams')
ref_ann = ref_jam.search(namespace='pattern_jku')[0]
jams.eval.pattern(ref_ann, ref_ann)
def test_pattern_invalid():
ref_jam = jams.load('fixtures/pattern_data.jams')
ref_ann = ref_jam.search(namespace='pattern_jku')[0]
est_ann = create_annotation(values=np.arange(9) % 4 + 1.,
namespace='beat',
offset=0.01)
yield raises(jams.NamespaceError)(jams.eval.pattern), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.pattern), est_ann, ref_ann
# Check for failure on a badly formed pattern
pattern = {'midi_pitch': 3, 'morph_pitch': 5, 'staff': 1,
'pattern_id': None, 'occurrence_id': 1}
est_ann = create_annotation(values=[pattern],
confidence=1.0,
duration=0.01,
namespace='pattern_jku')
yield raises(jams.SchemaError)(jams.eval.pattern), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.pattern), est_ann, ref_ann
# Hierarchical segmentation
def create_hierarchy(values, offset=0.0, duration=20):
ann = jams.Annotation(namespace='multi_segment')
for level, labels in enumerate(values):
times = np.linspace(offset, offset + duration, num=len(labels), endpoint=False)
durations = list(np.diff(times))
durations.append(duration + offset - times[-1])
for t, d, v in zip(times, durations, labels):
ann.append(time=t, duration=d, value=dict(label=v, level=level))
return ann
def test_hierarchy_valid():
ref_ann = create_hierarchy(values=['AB', 'abac'])
est_ann = create_hierarchy(values=['ABCD', 'abacbcbd'])
jams.eval.hierarchy(ref_ann, est_ann)
def test_hierarchy_invalid():
ref_ann = create_hierarchy(values=['AB', 'abac'])
est_ann = create_hierarchy(values=['ABCD', 'abacbcbd'])
est_ann.namespace = 'segment_open'
yield raises(jams.NamespaceError)(jams.eval.hierarchy), ref_ann, est_ann
yield raises(jams.NamespaceError)(jams.eval.hierarchy), est_ann, ref_ann
est_ann = create_annotation(values=['E', 'B', 'E', 'B'],
namespace='segment_tut')
est_ann.namespace = 'multi_segment'
yield raises(jams.SchemaError)(jams.eval.hierarchy), ref_ann, est_ann
yield raises(jams.SchemaError)(jams.eval.hierarchy), est_ann, ref_ann
| [
"brian.mcfee@nyu.edu"
] | brian.mcfee@nyu.edu |
265c8d793a623c29c762f7177aeb608e40f75461 | 07c136a11942305cc83f073a290a1150edfe0daa | /models.py | f40ce6e9ef2c23df191f160a4751c6426d2e033e | [] | no_license | kknet/Age-and-Gender | 467b90340d93a6e961723b046cd23476609441f2 | 25e67e9d5026a787f5dd9160a10ebc57b94e546f | refs/heads/master | 2020-08-24T14:36:51.541557 | 2019-01-30T08:37:07 | 2019-01-30T08:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,316 | py | import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import torchvision
from torch import nn
from torchsummary import summary
from config import *
from config import device
from utils import parse_args
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBlock(nn.Module):
def __init__(self, channel, reduction=16):
super(SEBlock, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.PReLU(),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class IRBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
super(IRBlock, self).__init__()
self.bn0 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, inplanes)
self.bn1 = nn.BatchNorm2d(inplanes)
self.prelu = nn.PReLU()
self.conv2 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.use_se = use_se
if self.use_se:
self.se = SEBlock(planes)
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_se:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.prelu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, use_se=True):
self.inplanes = 64
self.use_se = use_se
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.prelu = nn.PReLU()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.bn2 = nn.BatchNorm2d(512)
self.dropout = nn.Dropout()
self.fc1 = nn.Linear(512 * 7 * 7, 512)
self.age_pred = nn.Linear(512, age_num_classes)
self.fc2 = nn.Linear(512 * 7 * 7, 512)
self.gen_pred = nn.Linear(512, gen_num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_se=self.use_se))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
age_out = F.relu(self.fc1(x)) # [N, 512]
age_out = self.age_pred(age_out) # [N, 101]
gen_out = F.relu(self.fc2(x)) # [N, 512]
gen_out = self.gen_pred(gen_out) # [N, 2]
return age_out, gen_out
def resnet18(args, **kwargs):
model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(args, **kwargs):
model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(args, **kwargs):
model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs)
if args.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
class AgeGenPredModel(nn.Module):
def __init__(self):
super(AgeGenPredModel, self).__init__()
resnet = torchvision.models.resnet18(pretrained=True)
# Remove linear and pool layers (since we're not doing classification)
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
self.pool = nn.AvgPool2d(4)
self.fc1 = nn.Linear(512, 512)
self.age_pred = nn.Linear(512, age_num_classes)
self.fc2 = nn.Linear(512, 512)
self.gen_pred = nn.Linear(512, gen_num_classes)
nn.init.xavier_uniform_(self.age_pred.weight)
nn.init.xavier_uniform_(self.gen_pred.weight)
def forward(self, images):
x = self.resnet(images) # [N, 512, 1, 1]
x = self.pool(x)
x = x.view(-1, 512) # [N, 512]
age_out = F.relu(self.fc1(x)) # [N, 512]
age_out = self.age_pred(age_out) # [N, 101]
gen_out = F.relu(self.fc2(x)) # [N, 512]
gen_out = self.gen_pred(gen_out) # [N, 2]
return age_out, gen_out
if __name__ == "__main__":
args = parse_args()
model = resnet50(args).to(device)
summary(model, (3, 112, 112))
| [
"liuyang12@focusmedia.cn"
] | liuyang12@focusmedia.cn |
0db82e4db91dc685a8d5fa4f607db6916a7edad2 | 3e31bb9dd45b6fb7e97e28322f23633a3dc99f2d | /web_scraping/regex_test.py | 1c7ad087a726badbf814c46d1ca99f8b52bc0dbd | [] | no_license | peterbristow/codeinstitute-stream-two | 9dbb01230128cc1d73edd7a741020f5d1107f58d | 15a7ec241cb6438bafb9e55f20f5c9697f632efb | refs/heads/master | 2020-04-11T04:27:41.504203 | 2016-06-13T20:41:28 | 2016-06-13T20:41:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import re
# Using * asterisk - multiple occurrences of the character preceding it
print re.findall("ab*c", "ac") # ['ac']
print re.findall("ab*c", "abcd") # ['abc']
print re.findall("ab*c", "acc") # ['ac']
print re.findall("ab*c", "abcac") # ['abc', 'ac']
print re.findall("ab*c", "abdc") # []
print re.findall("ab*c", "ABC") # [] case sensitive
# Using re.IGNORECASE
print re.findall("ab*c", "ABC", re.IGNORECASE) # ['ABC']
# Using . period - any single occurrence
print re.findall("a.c", "abc") # ['abc']
print re.findall("a.c", "abbc") # []
print re.findall("a.c", "ac") # []
print re.findall("a.c", "acc") # ['acc']
# Combining . with *
print re.findall("a.*c", "abc") # ['abc']
print re.findall("a.*c", "abbc") # ['abbc']
print re.findall("a.*c", "ac") # ['ac']
# Using re.search()
results = re.search("ab*c", "ABC", re.IGNORECASE)
print results.group()
a_string = "Everything we do is <replaced> if it is indeed inside <tags>."
# Substitute the tags with 'coming up roses' using the re.sub() method
a_string = re.sub("<.*>", "coming up roses", a_string)
print a_string
another_string = "Everything we do is <replaced> if it is indeed inside <tags>."
# Make sure that both tags are replaced by using the ? too tell
# re.sub() to stop after first match of '>'
another_string = re.sub("<.*?>", "coming up roses", another_string)
print another_string
| [
"peterjb73@gmail.com"
] | peterjb73@gmail.com |
f909f2748513ae1af2c003a3453a949ee37eb3ff | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/6704-MaximumScore.py | 9181f1fc0bdc2a9d56e9392daf163fb8c633a733 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | class Solution:
def maximumScore(self, nums: List[int], k: int) -> int:
n = len(nums)
l,r = k,k
ans = 0
while True:
while r < n and nums[r] >= nums[k]:
r += 1
while l >= 0 and nums[l] >= nums[k]:
l -= 1
ans = max(ans,(r-l-1)*nums[k])
if l < 0 and r == n:
break
if l >= 0 and r < n:
nums[k] = max(nums[l],nums[r])
elif l < 0:
nums[k] = nums[r]
else:
nums[k] = nums[l]
return ans
| [
"guoyuhang0921@gmail.com"
] | guoyuhang0921@gmail.com |
dde9f9a87549bb12202e514573e9252914f2dcc0 | a0659e58f8073485674d7bc4092f9a04174fb7c7 | /Lib/objc/_C2.py | f3e7b02d732db535decb0efe9163a2c39ebfceaf | [
"MIT"
] | permissive | kanishpatel/Pyto | 74f75de8e06e6120324458346a9c65b73191b935 | feec7a1a54f635a6375fa7ede074ff35afbfbb95 | refs/heads/main | 2023-01-30T15:16:37.095828 | 2020-12-04T14:11:44 | 2020-12-04T14:11:44 | 318,703,734 | 0 | 0 | MIT | 2020-12-05T04:46:16 | 2020-12-05T04:46:15 | null | UTF-8 | Python | false | false | 1,864 | py | '''
Classes from the 'C2' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
C2SessionPool = _Class('C2SessionPool')
C2RequestManager = _Class('C2RequestManager')
C2Session = _Class('C2Session')
C2RequestOptions = _Class('C2RequestOptions')
C2SessionTask = _Class('C2SessionTask')
C2SessionCallbackMetrics = _Class('C2SessionCallbackMetrics')
C2DeviceInfo = _Class('C2DeviceInfo')
C2SessionTLSCache = _Class('C2SessionTLSCache')
C2NetworkingDelegateURLSessionDataTask = _Class('C2NetworkingDelegateURLSessionDataTask')
C2NetworkingDelegateURLSession = _Class('C2NetworkingDelegateURLSession')
C2Logging = _Class('C2Logging')
C2Metric = _Class('C2Metric')
C2SessionGroup = _Class('C2SessionGroup')
C2ReportMetrics = _Class('C2ReportMetrics')
C2MetricRequestOptions = _Class('C2MetricRequestOptions')
C2MetricOperationOptions = _Class('C2MetricOperationOptions')
C2MetricOperationGroupOptions = _Class('C2MetricOperationGroupOptions')
C2MetricOptions = _Class('C2MetricOptions')
C2Time = _Class('C2Time')
C2RoutingTable = _Class('C2RoutingTable')
C2Route = _Class('C2Route')
C2MPDeviceInfo = _Class('C2MPDeviceInfo')
C2MPCloudKitOperationGroupInfo = _Class('C2MPCloudKitOperationGroupInfo')
C2MPMetric = _Class('C2MPMetric')
C2MPServerInfo = _Class('C2MPServerInfo')
C2MPGenericEventMetric = _Class('C2MPGenericEventMetric')
C2MPError = _Class('C2MPError')
C2MPGenericEvent = _Class('C2MPGenericEvent')
C2MPCloudKitInfo = _Class('C2MPCloudKitInfo')
C2MPNetworkEvent = _Class('C2MPNetworkEvent')
C2MPCloudKitOperationInfo = _Class('C2MPCloudKitOperationInfo')
C2MPGenericEventMetricValue = _Class('C2MPGenericEventMetricValue')
C2MPInternalTestConfig = _Class('C2MPInternalTestConfig')
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
6d43ccff678343c91363b14927542e7a10967eda | 3c1ad0919924ed8d96ae5f9d9a10b97cfdf1ee38 | /LSA_cgi/simple-ajax-py.py | 72599d2c97742a8da73234b6978640237744c875 | [] | no_license | emonson/CopyrightScripts | 4439ba584840e74ebdc5ab6083887e530757de64 | 862e5d2eb0af848647bf1cb2d95519071a00adc0 | refs/heads/master | 2020-05-18T15:37:15.926524 | 2017-03-16T14:51:08 | 2017-03-16T14:51:08 | 1,569,450 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #!/usr/bin/python
import os, stat
import cgi
import cgitb
cgitb.enable()
# Trying to add to PYTHONPATH
import sys
sys.path.insert(0, '/Users/emonson/Programming/VTK_git/VTK/build/bin')
sys.path.insert(0,'/Users/emonson/Programming/VTK_git/VTK/build/Wrapping/Python')
import vtk
form = cgi.FieldStorage()
secret_word = form.getvalue('w','_blank_')
remote_host = os.environ['REMOTE_ADDR']
# Owner ends up being _www and permissions 0644
out_file = '/Users/Shared/junk.txt'
f = open(out_file, 'w')
f.write(secret_word)
f.close()
# File permissions
# st = os.stat('sync_get_NotesJournal.sh')
# posix.stat_result(st_mode=33188, st_ino=845809, st_dev=234881032L, st_nlink=1, st_uid=501, st_gid=20, st_size=525, st_atime=1281026596, st_mtime=1247759591, st_ctime=1262712197)
# stat.ST_MODE # 0
# stat.S_IMODE(st[stat.ST_MODE]) # 420
# oct(stat.S_IMODE(st[0])) # '0644'
# bin(420) # '0b110100100'
# os.chmod(out_file, stat.S_IMODE(0b110110110))
os.chmod(out_file, stat.S_IMODE(0o0666))
print "Content-type:text/html\r\n\r\n"
print "<p>Your word is: <b>%s</b> and your IP address is: <b>%s</b></p>" % (secret_word, remote_host)
# print "Content-type:text/html\r\n\r\n"
# print "<html>"
# print "<head>"
# print "<title>Hello - Second CGI Program</title>"
# print "</head>"
# print "<body>"
# print "<h2>Hello %s</h2>" % (secret_word,)
# print "</body>"
# print "</html>"
# $query = new CGI;
#
# $secretword = $query->param('w');
# $remotehost = $query->remote_host();
#
# print $query->header;
# print "<p>The secret word is <b>$secretword</b> and your IP is <b>$remotehost</b>.</p>";
#
| [
"emonson@cs.duke.edu"
] | emonson@cs.duke.edu |
a9a9634f65bd35da3fd546d96a14c239e1c23448 | e86851297175203451374021595659adbd516b59 | /scripts/deploy_to_netlify.py | d73ded3eed7fecbff06eb62db2809fad5ff3805c | [
"MIT"
] | permissive | stcolumbas/free-church-psalms | f0417d07af449300a5ada758dc95e153712b0e9e | 0eee5faa19306a79d77a55019ff82fcba72fc9b4 | refs/heads/master | 2022-12-16T15:31:44.907547 | 2017-12-08T22:53:40 | 2017-12-08T22:53:40 | 28,723,518 | 2 | 0 | null | 2022-12-07T23:51:49 | 2015-01-02T19:23:24 | Elm | UTF-8 | Python | false | false | 1,824 | py | #!/usr/bin/env python
import os
from hashlib import sha1
import requests
SITE_ID = '8954ba2a-fa5e-447b-ada8-09c4b5ce8b29'
BASE_URL = 'https://api.netlify.com/api/v1/'
token = os.environ.get('NETLIFY_TOKEN')
def hash_file(path):
with open(path, 'rb') as f:
return sha1(f.read()).hexdigest()
def main():
hash_to_path = dict()
uri_to_hash = dict()
hash_to_uri = dict()
for root, dirs, files in os.walk('dist'):
for f in files:
full_path = os.path.join(root, f)
hash_ = hash_file(full_path)
hash_to_path[hash_] = full_path
uri_to_hash[full_path.replace('dist/', '/')] = hash_
hash_to_uri[hash_] = full_path.replace('dist/', '/')
#post
resp = requests.post(
f'{BASE_URL}sites/{SITE_ID}/deploys',
json={'files': uri_to_hash},
headers={'Authorization': f'Bearer {token}'},
)
resp.raise_for_status()
resp_data = resp.json()
# put files
deploy_id = resp_data['id']
required_files = resp_data['required']
if deploy_id is None or (not required_files):
print('No files to upload, stopping')
return
else:
print(f'{len(required_files)} files to upload:')
for rf in required_files:
path_to_file = hash_to_path[rf]
uri = hash_to_uri[rf]
print(f'Uploading {uri}...')
with open(path_to_file, 'rb') as f:
resp = requests.put(
f'{BASE_URL}deploys/{deploy_id}/files{uri}',
headers={
'content-type':'application/octet-stream',
'Authorization': f'Bearer {token}'
},
data=f.read(),
)
resp.raise_for_status()
print('Deploy successful')
if __name__ == '__main__':
main()
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
1dbb6e16d65425e5a0fb377d90cc2c2dabce79de | 2da6133f3cd5c5fc19355292d60253b8c0dbcd49 | /.history/antz/urls_20200404013558.py | ab219bb1e3092201334fc9cc3bfc2abe38597398 | [] | no_license | mirfarzam/python-advance-jadi-maktabkhooneh | b24f5c03ab88e3b12c166a439b925af92f50de49 | d9bcecae73fd992f1290c6fd76761683bb512825 | refs/heads/master | 2021-05-25T21:33:37.782734 | 2020-04-07T22:39:28 | 2020-04-07T22:39:28 | 253,927,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('cars/')
] | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
97ead138762bf5e3f051a977b15614e25761f09f | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/show_details_of_app_v2_request.py | cc75e5f86cf7d2e5e920f4bb52bf6943d5a2b852 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,835 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDetailsOfAppV2Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, app_id=None):
"""ShowDetailsOfAppV2Request
The model defined in huaweicloud sdk
:param instance_id: 实例ID
:type instance_id: str
:param app_id: 应用编号
:type app_id: str
"""
self._instance_id = None
self._app_id = None
self.discriminator = None
self.instance_id = instance_id
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowDetailsOfAppV2Request.
实例ID
:return: The instance_id of this ShowDetailsOfAppV2Request.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowDetailsOfAppV2Request.
实例ID
:param instance_id: The instance_id of this ShowDetailsOfAppV2Request.
:type instance_id: str
"""
self._instance_id = instance_id
@property
def app_id(self):
"""Gets the app_id of this ShowDetailsOfAppV2Request.
应用编号
:return: The app_id of this ShowDetailsOfAppV2Request.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ShowDetailsOfAppV2Request.
应用编号
:param app_id: The app_id of this ShowDetailsOfAppV2Request.
:type app_id: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDetailsOfAppV2Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
447d5d3897c97297582aad9f15348cb2a248c320 | 28129a9c44f3891eb5b3ce8c7fc530252b1c3840 | /algorithms/sorts/test_sort.py | cc829fa3ffc63abb8690c1186e21b5c443cef71c | [] | no_license | ngocyen3006/learn-python | 55eeb221f5a836ebee8c197fc3fddf6c585f02a6 | ec2f35a87f846385f7353e7ef4900e5f80cfdb0a | refs/heads/master | 2020-03-26T16:35:59.151230 | 2019-05-08T07:26:50 | 2019-05-08T07:26:50 | 145,112,258 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | import unittest
import random
def gen(n):
return [random.randint(0, 100) for i in range(n)]
# Use wrapper class to prevent this class being discovered by nosetest.
class Wrapper:
class TestSort(unittest.TestCase): # Class TestSort inherits from class TestCase of module unittest
# this is an abstract method, which will be overrided bu subclass.
def sortMethod(self, arr):
pass
def test_empty(self):
emptyArr = []
self.sortMethod(emptyArr)
self.assertEqual(emptyArr, [], "sort an empty array should return an empty array")
def test_singleElement(self):
single = [1]
self.sortMethod(single)
self.assertEqual(single, [1])
def test_repeatedElements(self):
elem = 1
repeated = [elem for i in range(5)]
self.sortMethod(repeated)
# sort method should not change the size of the array
self.assertEqual(len(repeated), 5)
# after sort, all element will be the same
self.assertTrue(all([x == elem for x in repeated]))
def test_sort(self):
a = [2, 3, 1, 6, 7, 5, 4, 8, 9, 10, 15,14, 13, 12, 11]
self.sortMethod(a)
self.assertEqual(a, list(range(1, 16)))
def test_randomInput(self):
for i in range(10):
n = random.randint(3, 10)
randomArr = gen(n)
self.sortMethod(randomArr)
for j in range(n-1):
if randomArr[j] > randomArr[j+1]: # test case fail, sortMethod is wrong
print(randomArr)
self.fail("sort method provide wrong result for random arr")
if __name__ == '__main__':
unittest.main()
| [
"ngocyen300693@gmail.com"
] | ngocyen300693@gmail.com |
3ec725e3fe5085cd6b29bb34f56d8ca10493be16 | 564cef7c58ed45635f7a09344e6c22c27f3b32f3 | /exercise_library/auto_corrector.py | 7e6dc73ecaf27f4460dcb6cac768d154f58f0db5 | [] | no_license | jjdixon/exercise-library | b7e399626b9e5f898471c03e7533502f281322d3 | 06ea97a1c9c17b533e3f5812dd51685a0349f8af | refs/heads/master | 2020-12-25T05:18:33.275285 | 2015-03-02T19:23:13 | 2015-03-02T19:23:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,126 | py | import re
from collections import defaultdict
from exercise_library.exercise_cacher import ExerciseCacher
class SpellChecker(object):
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
model = defaultdict(int)
for f in features:
model[f] += 1
return model
exercises = ExerciseCacher().exercises
NWORDS = train(
words(
" ".join(
[dict_obj["name"] for dict_obj in exercises]
)
)
)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def _edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b) > 1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def _known_edits2(self, word):
return set(e2 for e1 in self._edits1(word) for e2 in self._edits1(e1) if e2 in self.NWORDS)
def _known(self, words):
return set(w for w in words if w in self.NWORDS)
def correct_token(self, token):
candidates = self._known([token]) or self._known(self._edits1(token)) or self._known_edits2(token) or [token]
return max(candidates, key=self.NWORDS.get)
def correct_phrase(self, text):
tokens = text.split()
return [self.correct_token(token) for token in tokens]
class AutoCompleter(object):
MIN_N_GRAM_SIZE = 1
exercise_name_to_dict = {}
exercises = ExerciseCacher().exercises
token_to_exercise_name = defaultdict(list)
n_gram_to_tokens = defaultdict(set)
for exercise in exercises:
exercise_name = exercise["name"]
exercise_name = exercise_name.lower().replace("-", " ").replace("(", " ").replace(")", " ").replace("'", " ")
exercise_name = " ".join(exercise_name.split())
exercise_name_to_dict[exercise_name] = exercise
tokens = exercise_name.split()
for token in tokens:
token_to_exercise_name[token].append(exercise_name)
if len(token) < MIN_N_GRAM_SIZE:
n_gram_to_tokens[token].add(token)
for string_size in xrange(MIN_N_GRAM_SIZE, len(token) + 1):
n_gram = token[:string_size]
n_gram_to_tokens[n_gram].add(token)
@classmethod
def get_exercise_dict_from_name(cls, exercise_name):
return cls.exercise_name_to_dict.get(exercise_name, {})
def _get_real_tokens_from_possible_n_grams(self, tokens):
real_tokens = []
for token in tokens:
token_set = self.n_gram_to_tokens.get(token, set())
real_tokens.extend(list(token_set))
return real_tokens
def _get_scored_exercises_uncollapsed(self, real_tokens):
exercises__scores = []
for token in real_tokens:
possible_exercises = self.token_to_exercise_name.get(token, [])
for exercise_name in possible_exercises:
score = float(len(token)) / len(exercise_name.replace(" ", ""))
exercises__scores.append((exercise_name, score))
return exercises__scores
def _combined_exercise_scores(self, exercises__scores, num_tokens):
collapsed_exercise_to_score = defaultdict(int)
collapsed_exercise_to_occurence = defaultdict(int)
for exercise, score in exercises__scores:
collapsed_exercise_to_score[exercise] += score
collapsed_exercise_to_occurence[exercise] += 1
for exercise in collapsed_exercise_to_score.keys():
collapsed_exercise_to_score[exercise] *= collapsed_exercise_to_occurence[exercise] / float(num_tokens)
return collapsed_exercise_to_score
def _filtered_results(self, exercises__scores):
min_results = 5
max_results = 10
score_threshold = 0.2
max_possibles = exercises__scores[:max_results]
if exercises__scores and exercises__scores[0][1] == 1.0:
exact_match_str = exercises__scores[0][0]
exercises__scores = [tuple_obj for tuple_obj in exercises__scores if len(tuple_obj[0]) >= len(exact_match_str)]
possibles_within_thresh = [tuple_obj for tuple_obj in exercises__scores if tuple_obj[1] >= score_threshold]
min_possibles = possibles_within_thresh if len(possibles_within_thresh) > min_results else max_possibles[:min_results]
return [tuple_obj[0] for tuple_obj in min_possibles]
def guess_exercises(self, tokens):
real_tokens = self._get_real_tokens_from_possible_n_grams(tokens)
exercises__scores = self._get_scored_exercises_uncollapsed(real_tokens)
collapsed_exercise_to_score = self._combined_exercise_scores(exercises__scores, len(tokens))
exercises__scores = collapsed_exercise_to_score.items()
exercises__scores.sort(key=lambda t: t[1], reverse=True)
return self._filtered_results(exercises__scores)
| [
"slobdell@hearsaycorp.com"
] | slobdell@hearsaycorp.com |
655bd5532b790642e2c48e5016801ab1d1f14da9 | 1e9d743cd42db052a26ac716ebdacc082db70871 | /coding/leetcode/297-serialize-and-deserialize-binary-tree/dfs.py | 4c1189f3ef86424e30f2e50ff4afb9da695fdea9 | [] | no_license | teckoo/interview_public | a993b03cdf2b2f2606207463d841b01d93f12118 | 30198097904994e34f8321926ad2a2cadc8b5940 | refs/heads/master | 2023-04-06T10:35:49.390343 | 2021-04-22T03:38:24 | 2021-04-22T03:38:24 | 320,933,226 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def rserialize(root, string):
""" a recursive helper function for the serialize() function."""
# check base case
if root is None:
string += 'None,'
else:
string += str(root.val) + ','
string = rserialize(root.left, string)
string = rserialize(root.right, string)
return string
return rserialize(root, '')
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def rdeserialize(l):
""" a recursive helper function for deserialization."""
if l[0] == 'None':
l.pop(0)
return None
root = TreeNode(l[0])
l.pop(0)
root.left = rdeserialize(l)
root.right = rdeserialize(l)
return root
data_list = data.split(',')
root = rdeserialize(data_list)
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| [
"c2.programmer@gmail.com"
] | c2.programmer@gmail.com |
d1ba17a6139aebba2d6cfd140f23e827a2ed6211 | b75a1300f7196269171d04a8be6e4341e00e1359 | /pipelines/process_study_driver.py | d2d0261d738cd54a04cc144799cff4ca85dd55e3 | [] | no_license | lemwill/spikeforest | a32120e6f3484c56ef66398cc8a0d859bde7eaa1 | f36ef7bed0ddbba651edff67c31db52ae372257f | refs/heads/master | 2023-03-17T23:20:01.907903 | 2019-03-15T12:51:15 | 2019-03-15T12:51:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | #!/usr/bin/env python
import os
import sys
import argparse
from kbucket import client as kb
from pairio import client as pa
from process_study import Study
def main(*,command,mode='local'):
# Select the study
study_dir='kbucket://b5ecdf1474c5/spikeforest/gen_synth_datasets/datasets_noise10_K20'
study_name='synth_jfm_noise10_K20'
# The following are relevant when mode='remote'
PAIRIO_USER='spikeforest'
KBUCKET_SHARE_ID='magland.spikeforest'
# Specify whether we want to read/write remotely
if mode=='local':
read_local=True; write_local=True; read_remote=False ;write_remote=False
load_local=True; load_remote=True; save_remote=False
elif mode=='remote':
read_local=False; write_local=False; read_remote=True; write_remote=True
load_local=False; load_remote=True; save_remote=True
if write_remote:
PAIRIO_TOKEN=os.getenv('SPIKEFOREST_PAIRIO_TOKEN')
pa.setConfig(user=PAIRIO_USER,token=PAIRIO_TOKEN)
if save_remote:
KBUCKET_UPLOAD_TOKEN=os.getenv('SPIKEFOREST_KBUCKET_TOKEN')
kb.setConfig(upload_share_id=KBUCKET_SHARE_ID,upload_token=KBUCKET_UPLOAD_TOKEN)
kb.testSaveRemote()
else:
raise Exception('Missing or invalid mode:',mode)
pa.setConfig(read_local=read_local,write_local=write_local,read_remote=read_remote,write_remote=write_remote)
pa.setConfig(collections=[PAIRIO_USER])
kb.setConfig(load_local=load_local,load_remote=load_remote,save_remote=save_remote)
kb.setConfig(share_ids=[KBUCKET_SHARE_ID])
study=Study(study_dir=study_dir,study_name=study_name)
if command=='process':
study.process()
elif command=='clear':
study.clearResults()
elif command=='save':
results=study.getResults()
print ('Saving {} results...'.format(len(results)))
key=dict(
name='spikeforest_results',
study_name=study_name
)
kb.saveObject(key=key,object=results)
print ('Saved under key:')
print (key)
else:
raise Exception('Unrecognized command: '+command)
def print_usage():
print ('Usage:')
print ('./process_study_driver.py process')
print ('./process_study_driver.py save')
print ('./process_study_driver.py clear')
if __name__== "__main__":
parser = argparse.ArgumentParser(description = 'Process a spikeforest study')
parser.add_argument('command', help='process, save, or clear')
parser.add_argument('--mode', help='local or remote')
args = parser.parse_args()
main(
command=args.command,
mode=args.mode
)
| [
"jeremy.magland@gmail.com"
] | jeremy.magland@gmail.com |
623ff997d9ac5e130cd2774d3610ebffd5cb1c45 | 1b719cb657087c1221eeab0e7f667aba1fac8760 | /Python Basics/Tips & tricks/numbers.py | d73219ca6f773f81350008cbd4a547d2f0a0ece3 | [] | no_license | Omega97/Learning-Python-Basics | 141a84f9cc3897e8c3fd45cb4b85547c86d3916a | 73bbba6c58b323786945e27be8a212b662eac8a0 | refs/heads/master | 2020-06-29T07:19:19.814817 | 2019-08-04T09:22:27 | 2019-08-04T09:22:27 | 200,472,880 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py |
n = 100_000_000
print(n)
print(select_frames'{n:,}')
| [
"noreply@github.com"
] | Omega97.noreply@github.com |
8bff251d955aeee4159ac2339ee6db119dbe244e | 015e0d41cf9cf85c1a0bfd28c00d0c00ebedcb39 | /metric/coco_scores.py | 8190541f9af79365c9b1cedf78dcc7192a8f5f51 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | zeta1999/gan-compression | 2b074676e49bda43439fd224ca5f8e2bfae13309 | 3224be53f334afe70f7da665906d1ada06233da5 | refs/heads/master | 2023-01-13T06:45:11.676868 | 2020-11-20T12:21:35 | 2020-11-20T12:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | import os
import cv2
import numpy as np
import torch
from PIL import Image
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
class CocoStuff164k(Dataset):
def __init__(self, root, images, names):
self.root = root
self.ignore_label = 255
self.mean_bgr = np.array((104.008, 116.669, 122.675))
self.label_paths = []
self.images = images
self.names = names
self._set_files()
cv2.setNumThreads(0)
def _set_files(self):
label_paths = []
for name in self.names:
path = os.path.join(self.root, 'val_label', '%s.png' % name)
assert os.path.exists(path)
label_paths.append(path)
self.label_paths = label_paths
def _load_data(self, index):
# Set paths
image_id = self.names[index]
label_path = self.label_paths[index]
# Load an image and label
image = self.images[index]
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
return image_id, image, label
def __getitem__(self, index):
image_id, image, label = self._load_data(index)
h, w = label.shape
image_pil = Image.fromarray(image)
if image_pil.size[0] != w or image_pil.size[1] != h:
image_pil = image_pil.resize((w, h), Image.BICUBIC)
image = np.asarray(image_pil)
image = np.flip(image, axis=2)
# Mean subtraction
image = image - self.mean_bgr
# HWC -> CHW
image = image.transpose(2, 0, 1)
return image_id, image.astype(np.float32), label.astype(np.int64)
def __len__(self):
return len(self.names)
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask],
minlength=n_class ** 2,
).reshape(n_class, n_class)
return hist
def compute_scores(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
accu = np.diag(hist).sum() / hist.sum()
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
valid = hist.sum(axis=1) > 0 # added
mIoU = np.nanmean(iu[valid])
return accu * 100, mIoU * 100
def test(fakes, names, model, device, data_dir, batch_size=1, num_workers=0, tqdm_position=None):
dataset = CocoStuff164k(data_dir, fakes, names)
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
num_workers=num_workers, shuffle=False)
preds, gts = [], []
if tqdm_position is None or tqdm_position >= 0:
import tqdm
dataloader_tqdm = tqdm.tqdm(dataloader, desc='Coco Scores', position=tqdm_position, leave=False)
else:
dataloader_tqdm = dataloader
with torch.no_grad():
for image_ids, images, gt_labels in dataloader_tqdm:
images = images.to(device)
logits = model(images)
_, H, W = gt_labels.shape
if logits.shape[-2] != H or logits.shape[-1] != W:
logits = F.interpolate(
logits, size=(H, W), mode="bilinear", align_corners=False
)
probs = F.softmax(logits, dim=1)
labels = torch.argmax(probs, dim=1)
preds += list(labels.cpu().numpy())
gts += list(gt_labels.numpy())
return compute_scores(gts, preds, n_class=182)
| [
"lmxyy1999@foxmail.com"
] | lmxyy1999@foxmail.com |
5f67ded098c37a29160d3af7fa7a1f691310e71a | e298bf40ae88c2bd8e0a07f3e92f3e08a92edcc6 | /nova/cmd/api.py | f56abfac63f42b7ef5e2630925dcc9a63d0ebe0d | [] | no_license | KevinKaiQian/polar-bear | 46a814c746246394f76505846166673a049f12f2 | 61d4e0ccd7328a6aa543af3b75e5f7fedf98bf8e | refs/heads/master | 2022-04-29T02:15:35.536039 | 2021-05-19T12:33:07 | 2021-05-19T12:33:07 | 172,068,536 | 2 | 0 | null | 2022-03-29T21:56:51 | 2019-02-22T13:11:58 | Python | UTF-8 | Python | false | false | 1,454 | py | import six
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from nova import exception
from nova.i18n import _LE, _LW
from nova import service
from nova import utils
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import rpc
from nova import objects
from oslo_config import cfg
CONF = cfg.CONF
from oslo_log import log as logging
#LOG = logging.getLogger(__name__)
def main():
#logging.setup(CONF, "nova")
#import pdb;pdb.set_trace()
rpc.set_defaults(control_exchange='nova')
rpc.init(CONF)
objects.register_all()
sqlalchemy_api.configure(CONF)
log = logging.getLogger(__name__)
launcher = service.process_launcher()
started = 0
for api in CONF.enabled_apis:
should_use_ssl = api in CONF.enabled_ssl_apis
try:
server = service.WSGIService(api, use_ssl=should_use_ssl)
#import pdb;pdb.set_trace()
launcher.launch_service(server, workers=server.workers or 1)
started += 1
except exception.PasteAppNotFound as ex:
log.warning(
_LW("%s. ``enabled_apis`` includes bad values. "
"Fix to remove this warning."), six.text_type(ex))
if started == 0:
log.error(_LE('No APIs were started. '
'Check the enabled_apis config option.'))
sys.exit(1)
launcher.wait()
if __name__ == "__main__":
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
baec54e7a2c40ee0b02bb845aef4de9c59c60e57 | 80092fa3b60a00744effb5037324527729d89648 | /Multiple_Plots/multiple_plots_6.py | f20edb93f695c1e22be9d950edad024a9885f424 | [] | no_license | zjxpirate/Data-Analyst-DATAQUEST | 7820d4b218a1ccd49de9eac56bc92dc10917baa9 | 6bd56878cff00b52ca22aba9be7b52be96bb42bd | refs/heads/master | 2020-04-18T00:36:55.719894 | 2019-04-09T00:14:49 | 2019-04-09T00:14:49 | 167,084,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py |
# 1 begins here:
import pandas as pd
import matplotlib.pyplot as plt
unrate = pd.read_csv('unrate.csv')
# unrate['DATE'] = pd.to_datetime(unrate['DATE'])
#
# first_twelve = unrate[0:12]
#
# plt.plot(first_twelve['DATE'], first_twelve['VALUE'])
#
# plt.xticks(rotation=0)
#
# plt.xlabel('Month')
#
# plt.ylabel('Unemployment Rate')
#
# plt.title('Monthly Unemployment Trends, 1948')
#plt.show()
# 2 begins here:
#fig = plt.figure()
#ax1 = fig.add_subplot(2,1,1)
#ax2 = fig.add_subplot(2,1,2)
#plt.show()
# 3 begins here:
# fig = plt.figure()
#
# ax1 = fig.add_subplot(2, 1, 1)
# ax2 = fig.add_subplot(2, 1, 2)
#plt.show()
# 5 begins here:
#
# fig = plt.figure()
# ax1 = fig.add_subplot(2,1,1)
# ax2 = fig.add_subplot(2,1,2)
#
# ax1.plot(unrate[0:12]['DATE'], unrate[0:12]['VALUE'])
# ax2.plot(unrate[12:24]['DATE'], unrate[12:24]['VALUE'])
#
# plt.show()
# 6 begins here:
fig = plt.figure(figsize=(15, 8))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
ax1.plot(unrate[0:12]['DATE'], unrate[0:12]['VALUE'])
ax1.set_title('Monthly Unemployment Rate, 1948')
ax2.plot(unrate[12:24]['DATE'], unrate[12:24]['VALUE'])
ax2.set_title('Monthly Unemployment Rate, 1949')
plt.show()
| [
"j_zhang21@u.pacific.edu"
] | j_zhang21@u.pacific.edu |
8bdab363b2e9bacfc34fe82e3b02c0eadae830b5 | def8024e06c442a3b033df8d8f4cbbad87e46e96 | /database/pg.py | 456e389b571c72a477f7153fe5100e0c79e839de | [] | no_license | yhkl-dev/cloud_m | ae48cf5c71061b3e060caca80dbe9d5aac345473 | 491dac7ee707df708437ecd283ecde2ef617fa82 | refs/heads/main | 2023-03-10T22:49:03.478569 | 2021-03-03T15:21:38 | 2021-03-03T15:21:38 | 344,126,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | import psycopg2
from psycopg2 import pool, extras
from config.config import GlobalConfig
from contextlib import contextmanager
from threading import Semaphore
from psycopg2 import pool, extensions
CONFIG = GlobalConfig()
class ReallyThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, key=None):
self._semaphore.acquire()
return super().getconn(key)
def putconn(self, *args, **kwargs):
super().putconn(*args, **kwargs)
self._semaphore.release()
cnxpool = ReallyThreadedConnectionPool(5, 10, **CONFIG.postgres_dict)
@contextmanager
def get_cursor():
try:
con = cnxpool.getconn()
cursor = con.cursor()
yield cursor
except psycopg2.Error as e:
print(e)
finally:
cursor.close()
cnxpool.putconn(con)
class PyPgsql(object):
@staticmethod
def get_all(sql):
with get_cursor() as cursor:
cursor.execute(sql)
return cursor.fetchall()
class POSTGERS:
def get_all_tables(self):
SQL = """
SELECT tablename FROM pg_tables where schemaname = 'public';
"""
result = PyPgsql.get_all(SQL)
print(result)
return [r[0] for r in result]
def get_table_structures(self, table_name: str):
column_type_list = []
SQL = """
SELECT a.attnum,
a.attname AS field,
t.typname AS type,
a.attlen AS length,
a.atttypmod AS lengthvar,
a.attnotnull AS notnull,
b.description AS comment
FROM pg_class c,
pg_attribute a
LEFT OUTER JOIN pg_description b ON a.attrelid=b.objoid AND a.attnum = b.objsubid,
pg_type t
WHERE c.relname = '{table_name}'
and a.attnum > 0
and a.attrelid = c.oid
and a.atttypid = t.oid
ORDER BY a.attnum;
""".format(table_name=table_name)
x = PyPgsql.get_all(SQL)
print(table_name)
if x is not None:
for column in x:
print(column)
column_type_list.append(column[2])
return list(set(column_type_list))
| [
"kaiyang939325@gmail.com"
] | kaiyang939325@gmail.com |
377850eac06f4f7b1668514b1bcacbc349c0f814 | d8b647e50e356646760b94051652e4fa5ac86c83 | /setup.py | 6a7d79a6d59f9a029e31ea20b1b937253c2f196b | [] | no_license | ffxf/tortuga-kit-gceadapter | 5fb74bc8bdc61bc9d386b76db1951958d9ce19c3 | 7cea8f34cff01d7d4743d99c9db2898fbc2a37b6 | refs/heads/master | 2021-01-25T14:26:59.694308 | 2018-03-02T17:07:51 | 2018-03-02T17:07:51 | 123,699,424 | 0 | 0 | null | 2018-03-03T14:30:40 | 2018-03-03T14:30:40 | null | UTF-8 | Python | false | false | 1,050 | py | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
setup(
name='tortuga-gce-adapter',
version='6.3.0',
url='http://univa.com',
author='Univa Corp',
author_email='info@univa.com',
license='Commercial',
packages=find_packages(exclude=['tortuga_kits']),
namespace_packages=[
'tortuga',
'tortuga.resourceAdapter'
],
zip_safe=False,
install_requires=[
'google-api-python-client',
'gevent',
]
)
| [
"mfrisch@univa.com"
] | mfrisch@univa.com |
0127385c11bd1a9c9bd072ebe03e6ade4983c466 | c7061fb106b801c12fb40ff331d927a5bb24da80 | /BasicExerciseAndKnowledge/w3cschool/n5_sortion.py | 83e4f19a347a5789574bab81d7f1d19b99ab8c73 | [
"MIT"
] | permissive | Jonathan1214/learn-python | 34e6b5612beeb1a46b5964b0a4e306656355fe84 | 19d0299b30e953069f19402bff5c464c4d5580be | refs/heads/master | 2020-03-27T09:03:16.785034 | 2018-08-31T02:48:34 | 2018-08-31T02:48:34 | 146,310,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #coding:utf-8
# 题目:输入三个整数x,y,z,请把这三个数由小到大输出。
# 有了sort函数感觉就是再开挂啊!!
import re
put_in = ['12 234 21']
lt = []
for num in put_in:
nums = re.split(r'[\D]', num)
for n in nums:
lt.append(int(n))
lt = sorted(lt)
print lt | [
"jonathan1214@foxmail.com"
] | jonathan1214@foxmail.com |
f547b32987825ea3be7a56d9d96f5a6d0c9e4d4b | 3def6d5ac41b0956bee326b2cda7e11603eac121 | /gjhandler/__init__.py | 153e7933f581029a3d84469d22fd1e3713ac3369 | [
"MIT"
] | permissive | sfujiwara/gjhandler | 2eca3ea5ae1e4011031f70456f1d7e257d774f91 | 9d10b7ce071fb3dd0b50ee391886a68f9d35e165 | refs/heads/master | 2021-01-18T17:25:58.869822 | 2016-09-01T02:38:06 | 2016-09-01T02:38:06 | 67,048,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # -*- coding: utf-8 -*-
import json
import logging.handlers
import math
class GoogleJsonHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename):
super(GoogleJsonHandler, self).__init__(
filename,
maxBytes=8*1024*1024,
backupCount=5
)
def format(self, record):
message = super(GoogleJsonHandler, self).format(record)
subsecond, second = math.modf(record.created)
payload = {
"message": message,
"timestamp": {
"seconds": int(second),
"nanos": int(subsecond * 1e9)
},
"thread": record.thread,
"severity": record.levelname,
}
return json.dumps(payload, ensure_ascii=False)
| [
"shuhei.fujiwara@gmail.com"
] | shuhei.fujiwara@gmail.com |
1b27a30f4016c4c94bde52abcb1e8994dadfe4c2 | 594a3eaa550d9374e50db3a5e5358d6fff2f4de6 | /run_fortran/__main__.py | 767fca72bab6cc5d53671f109958592060bd9346 | [
"MIT"
] | permissive | lycantropos/run-fortran | 8d9064764feaa800be6a95d29e23c5d7a8202717 | 13b6ab3b1e2eb0ab56f4f189f8ad665899f65bae | refs/heads/master | 2021-07-24T11:27:42.787948 | 2021-07-12T10:09:49 | 2021-07-12T10:09:49 | 89,684,575 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,559 | py | import copy
import json
import os
import re
import sys
from collections import OrderedDict
from functools import partial
from itertools import chain
from typing import (Any,
Container,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Union)
import click
OUTPUT_FILE_EXTENSION = '.json'
FORTRAN_FILES_EXTENSIONS = {'.f77', '.f90', '.f95', '.f03', '.f', '.for'}
MODULE_USAGE_PATTERN = re.compile(r'(?<=\buse)\s*'
r'(?:,\s*'
r'(?P<intrinsic>intrinsic|non_intrinsic)'
r'\s*::)?'
r'\s(?P<module>\w+)',
re.IGNORECASE)
MODULE_DEFINITION_PATTERN = re.compile(
r'(?<=\bmodule\s)(?!\s*procedure)(\s*\w+)', re.IGNORECASE)
STRING_LITERAL_PATTERN = re.compile('|'.join([
'\'[^\']*\'', # for single quoted literal constants
'\"[^\"]*\"' # for double quoted literal constants
]))
Module = NamedTuple('Module', [('name', str), ('is_intrinsic', bool)])
Namespace = NamedTuple('Namespace',
[('defined', Set[Module]), ('used', Set[Module])])
@click.command()
@click.option('--intrinsic-modules-names', '-i',
default='IEEE_Arithmetic,IEEE_Features,IEEE_Exceptions,'
'ISO_C_Binding,ISO_Fortran_env',
help='Comma-separated list of intrinsic modules.')
@click.option('--sep', '-s',
default=' ',
help='Separator between resulted modules paths.')
@click.option('--output-file-name', '-o',
default=None,
type=str,
help='File name to save modules relations to '
'(".json" extension will be added).')
@click.argument('paths',
nargs=-1,
type=click.Path(exists=True,
file_okay=False,
dir_okay=True))
def main(intrinsic_modules_names: str,
sep: str,
output_file_name: Optional[str],
paths: List[str]) -> None:
"""Orders paths by modules names."""
intrinsic_modules_names = set(
map(str.strip, intrinsic_modules_names.lower().split(',')))
fortran_files_paths = chain.from_iterable(map(to_fortran_files_paths,
paths))
namespaces_by_paths = {path: path_to_namespace(path)
for path in fortran_files_paths}
unfold_namespaces(namespaces_by_paths, intrinsic_modules_names)
sorted_namespaces_by_paths = OrderedDict(sort_namespaces_with_paths(
namespaces_by_paths.items()))
if output_file_name is not None:
output_file_name += OUTPUT_FILE_EXTENSION
with open(output_file_name,
mode='w',
encoding='utf-8') as output_file:
json.dump(namespaces_by_paths_to_json(sorted_namespaces_by_paths),
output_file,
indent=True)
result = sep.join(sorted_namespaces_by_paths.keys())
sys.stdout.write(result)
def path_to_namespace(path: str) -> Namespace:
defined_modules, used_modules = set(), set()
for line in parse_normalized_lines(path):
defined_modules.update(to_defined_modules(line))
used_modules.update(to_used_modules(line))
return Namespace(defined=defined_modules,
used=used_modules)
def namespaces_by_paths_to_json(namespaces_by_paths: Dict[str, Namespace]
) -> Dict[str, Any]:
return OrderedDict((path, namespace_to_json(namespace))
for path, namespace in namespaces_by_paths.items())
def namespace_to_json(namespace: Namespace) -> Dict[str, List[Any]]:
return OrderedDict(defined=[module_to_json(module)
for module in namespace.defined],
used=[module_to_json(module)
for module in namespace.used])
def module_to_json(module: Module) -> Dict[str, Union[str, bool]]:
return {module.name: module.is_intrinsic}
def sort_namespaces_with_paths(namespaces_with_paths
: Iterable[Tuple[str, Namespace]]
) -> Iterable[Tuple[str, Namespace]]:
result = []
for path, namespace in namespaces_with_paths:
index_by_defined = min(
(index
for index, (_, other_namespace) in enumerate(
result,
# insertion should be before namespace in which
# one of current namespace's defined modules is used
start=0)
if not other_namespace.used.isdisjoint(namespace.defined)),
default=0)
index_by_used = max(
(index
for index, (_, other_namespace) in enumerate(
result,
# insertion should be after namespace in which
# one of current namespace's used modules is defined
start=1)
if not other_namespace.defined.isdisjoint(namespace.used)),
default=0)
result.insert(max(index_by_defined, index_by_used), (path, namespace))
return result
def unfold_namespaces(namespaces_by_paths: Dict[str, Namespace],
intrinsic_modules_names: Container[str]) -> None:
namespaces_by_paths_copy = copy.deepcopy(namespaces_by_paths)
for module_path, original_namespace in namespaces_by_paths_copy.items():
unprocessed_modules = copy.deepcopy(original_namespace.used)
try:
while True:
used_module = unprocessed_modules.pop()
if used_module.is_intrinsic:
continue
used_module_path = to_module_path(
used_module,
namespaces_by_paths=namespaces_by_paths_copy,
intrinsic_modules_names=intrinsic_modules_names)
if used_module_path is None:
continue
used_module_namespace = namespaces_by_paths[used_module_path]
unprocessed_modules |= used_module_namespace.used
namespace = namespaces_by_paths[module_path]
unfolded_used_modules = (namespace.used
| used_module_namespace.defined
| used_module_namespace.used)
namespaces_by_paths[module_path] = (
namespace._replace(used=unfolded_used_modules))
except KeyError:
continue
def to_module_path(module: Module,
*,
namespaces_by_paths: Dict[str, Namespace],
intrinsic_modules_names: Container[str]) -> Optional[str]:
candidates = [path
for path, namespace in namespaces_by_paths.items()
if module in namespace.defined]
try:
result, = candidates
except ValueError as error:
if candidates:
raise ValueError('Found {count} appearances of module "{name}" '
'in modules definitions at {paths}.'
.format(count=len(candidates),
name=module,
paths=', '.join(candidates))) from error
elif module.name not in intrinsic_modules_names:
raise ValueError('Module "{name}" is not found '
'in modules definitions.'
.format(name=module.name)) from error
else:
return None
return result
def to_fortran_files_paths(directory_path: str) -> Iterator[str]:
directory_path = os.path.abspath(directory_path)
for root_name, directories_names, files_names in os.walk(directory_path):
fortran_files_names = filter(is_fortran_file, files_names)
abs_path_getter = partial(os.path.join, root_name)
yield from map(abs_path_getter, fortran_files_names)
def is_fortran_file(file_name: str) -> bool:
_, extension = os.path.splitext(file_name)
return extension in FORTRAN_FILES_EXTENSIONS
def parse_normalized_lines(file_path: str) -> Iterable[str]:
with open(file_path) as file:
yield from map(normalize_line, file)
def normalize_line(line: str) -> str:
stripped_statement = line.strip(' ')
line_without_string_literals = STRING_LITERAL_PATTERN.sub(
'', stripped_statement)
try:
result, _ = line_without_string_literals.split('!',
maxsplit=1)
except ValueError:
# no comments found
result = line_without_string_literals
return result.lower()
def to_defined_modules(line: str) -> Iterable[Module]:
return [Module(name=name,
is_intrinsic=False)
for name in MODULE_DEFINITION_PATTERN.findall(line)]
def to_used_modules(text: str) -> Iterable[Module]:
return [Module(name=name,
is_intrinsic=intrinsic_marker == 'intrinsic')
for intrinsic_marker, name in MODULE_USAGE_PATTERN.findall(text)]
if __name__ == '__main__':
main()
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
e90d519a2ded43336a44dff2304315ad4182fa9f | 41b031bd7449c75d215478dbb109c823c1c2bccc | /scraps/replacenan.py | 0987196694d0f31f6b8176ded595c78f39986208 | [
"MIT"
] | permissive | woutdenolf/spectrocrunch | dfa667528aa2bb9845d371fef29c4659bcd7392e | 944637c76671adc2fdb909f7a62196bde27c9d22 | refs/heads/master | 2022-12-10T14:31:28.101355 | 2022-11-30T10:00:49 | 2022-11-30T10:00:49 | 79,220,131 | 4 | 0 | MIT | 2022-11-30T10:00:49 | 2017-01-17T11:16:57 | Python | UTF-8 | Python | false | false | 790 | py | # -*- coding: utf-8 -*-
import h5py
import numpy as np
# f = h5py.File(filename)
# data = f["detector0"]["sample"]["data"]
# filename = "/data/visitor/hg94/id21/ffproc/results/600Vmap1/map1.align.h5"
# data[np.isnan(data)] = 0
# filename = "/data/visitor/hg94/id21/ffproc/results/SKA2116_1.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
# filename = "/data/visitor/hg94/id21/ffproc/results/20Vpow_1.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
# filename = "/data/visitor/hg94/id21/ffproc/results/SKC1129_2.h5"
# f = h5py.File(filename)
# data = f["Aligned"]["NXdata"]["data"]
# data[np.isnan(data)] = 0
# data[:] = -np.log(data)
| [
"woutdenolf@users.sf.net"
] | woutdenolf@users.sf.net |
99939a85d3de3cf8133c71badb9e55e260e45e0d | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/Y/yomalM/jan11-match-result-premier-league.py | 9c1efa0db2483dcaad83cb5cfd0304710ab23f0b | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | py | ############
#Note from creator(Yomal Mudalige)- I have been tried to change column orders, however still it is not finalized. Hence I added prefix 'A', 'B'.etc. Thanks
#Reference - Scraperwiki Tutorial 3
##################
from scraperwiki.apiwrapper import getKeys, getData, getDataByDate, getDataByLocation
import datetime #to display update time
sourcescraper = "test_pl"
limit = 20
offset = 0
keys = getKeys(sourcescraper)
keys.sort() # alphabetically
d= datetime.datetime.today()
print '<th><font color =black face=verdana size=2>Last Update:</th>'
print d
print '</br>'
print '</br>'
print '<th><font color=990033 face=verdana size=5>January 2011- English Premier League</th>'
print '</br>'
print '</br>'
print '<th><font color=blue face=tahoma size=3><a href="http://scraperwiki.com/views/premier-league-table-201011-view/full/">Back to Points Table</a></th>'
print '<table border="5" cellpadding="15" style="border-collapse:collapse;">'
# column headings
print "<tr>",
for key in keys:
print "<th>%s</th>" % key,
print "</tr>"
# rows
for row in getData(sourcescraper, limit, offset):
print "<tr>",
for key in keys:
print "<td>%s</td>" % row.get(key),
print "</tr>"
print "</table>"
############
#Note from creator(Yomal Mudalige)- I have been tried to change column orders, however still it is not finalized. Hence I added prefix 'A', 'B'.etc. Thanks
#Reference - Scraperwiki Tutorial 3
##################
from scraperwiki.apiwrapper import getKeys, getData, getDataByDate, getDataByLocation
import datetime #to display update time
sourcescraper = "test_pl"
limit = 20
offset = 0
keys = getKeys(sourcescraper)
keys.sort() # alphabetically
d= datetime.datetime.today()
print '<th><font color =black face=verdana size=2>Last Update:</th>'
print d
print '</br>'
print '</br>'
print '<th><font color=990033 face=verdana size=5>January 2011- English Premier League</th>'
print '</br>'
print '</br>'
print '<th><font color=blue face=tahoma size=3><a href="http://scraperwiki.com/views/premier-league-table-201011-view/full/">Back to Points Table</a></th>'
print '<table border="5" cellpadding="15" style="border-collapse:collapse;">'
# column headings
print "<tr>",
for key in keys:
print "<th>%s</th>" % key,
print "</tr>"
# rows
for row in getData(sourcescraper, limit, offset):
print "<tr>",
for key in keys:
print "<td>%s</td>" % row.get(key),
print "</tr>"
print "</table>"
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
d479dd125c06d779076e5385e954ef319cd87dc7 | d5f75adf5603927396bdecf3e4afae292143ddf9 | /python/paddle/fluid/incubate/fleet/parameter_server/ir/heter_trainer_pass.py | 0018b73e264798dd21450564812dfca5ec992038 | [
"Apache-2.0"
] | permissive | jiweibo/Paddle | 8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4 | 605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74 | refs/heads/develop | 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 | Apache-2.0 | 2023-04-04T02:42:53 | 2019-07-11T03:51:12 | Python | UTF-8 | Python | false | false | 3,295 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import warnings
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.transpiler.details.program_utils import delete_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_heter_ops
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import union_forward_gradient_op
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_heter_program
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import create_trainer_program
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_block_joints
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import find_op_input_output
from paddle.fluid.incubate.fleet.parameter_server.ir.trainer_pass import get_vars_name_in_block
def split_heter_worker_ops_pass(program, config, stage_id, device):
"""
split heter worker program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create heter worker program, add listen&serv op
"""
default_deveice = "cpu"
program, heter_ops, _, program_block_ops = find_heter_ops(
program, default_deveice)
if len(heter_ops) == 0:
warnings.warn(
"Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code."
)
return program
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops, heter_ops)
heter_program = framework.Program()
create_heter_program(program, config, heter_program, program_block_ops,
heter_ops, block_vars_detail, device, stage_id)
return heter_program
def split_trainer_ops_pass(program, config, default_device="cpu"):
"""
split cpu-trainer program from origin-program
1. find heter op (located on different device)
2. find input&output of every heter-block
3. create cpu-trainer program, add send&recv op
"""
# Todo: support user define default_device (MrChengmo)
default_device_ = default_device
program, heter_ops, default_ops, program_block_ops = find_heter_ops(
program, default_device_)
program_block_ops = union_forward_gradient_op(program_block_ops)
block_vars_detail = find_block_joints(program, program_block_ops, heter_ops)
trainer_program = program.clone()
create_trainer_program(trainer_program, program, config, program_block_ops,
block_vars_detail)
return trainer_program
| [
"noreply@github.com"
] | jiweibo.noreply@github.com |
a875b5e5061429293af6336e793068a510e23783 | b7429c03761db1d1a58494c92da355542dba86e7 | /Python高效开发实战——Django、Tornado、Flask、Twisted/src/chapter9/9-5.py | e87ae9db8f8afd62caec2efd68829be229947ab2 | [] | no_license | daedalaus/practice | 90abc1cd15ca5230a8a9deb2bbc532c3f36f307b | 916a3269cb3946f33bc87b289c5f20f26c265436 | refs/heads/master | 2020-11-30T17:16:29.337509 | 2020-01-18T08:48:49 | 2020-01-18T08:48:49 | 230,448,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
import threading
import time
import datetime
host = '127.0.0.1'
port = 8007
class Echo(DatagramProtocol): # 定义DatagramProtocol子类
def startProtocol(self): # 连接成功后被调用
self.transport.connect(host, port) # 指定对方地址/端口
self.transport.write(b'Here is the first connected message') # 发送数据
print('Connection created!')
def datagramReceived(self, datagram, addr): # 收到数据时被调用
print(datagram.decode('utf-8'))
def connectionRefused(self): # 每次通信失败后被调用
print('sent failed!')
def stopProtocol(self): # Protocol被关闭时被调用
print('Connection closed!')
protocol = Echo() # 实例化Portocol子类
def routine(): # 每隔5秒向服务器发送消息
time.sleep(1)
while True:
protocol.transport.write(('%s: say hello to myself.' % datetime.datetime.now()).encode('utf-8'))
time.sleep(5)
threading.Thread(target=routine).start()
reactor.listenUDP(port, protocol) # 消息接收者
reactor.run() # 挂起运行
| [
"lzy_2318@163.com"
] | lzy_2318@163.com |
5469d275c96e668c247f7b4dfeb30ac7a2ada67e | 7dbd800d64919e93805c16f665e78c81ca2b5278 | /py4e/assignment10/words_counting.py | bf159549ea48bd0ac02e76d938c5b5bbc9c5854b | [] | no_license | merak0514/python | f6ea0ae372a6c52e2a5fae7c78f3bce13770b774 | 3451854a07b97fadf6ffd4d8f41863181a2243cb | refs/heads/master | 2022-05-09T23:58:15.462620 | 2022-04-04T12:24:09 | 2022-04-04T12:24:09 | 123,144,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # count words
import string
fname = input('File name: ')
try:
fhand = open(fname)
except:
print('Error')
quit()
wordlist = {}
for line in fhand:
line = line.rstrip()
line = line.translate(line.maketrans('', '', string.punctuation))
line = line.lower()
words = line.split()
for word in words:
wordlist[word] = wordlist.get(word, 0) + 1
if 'i' in wordlist.keys():
wordlist['I'] = wordlist.pop('i') # HIGH LIGHT
new_wordlist = sorted(
[(k, v) for (v, k) in wordlist.items()], reverse=True) # HIGH LIGHT
print('Most used words: ')
for k, v in new_wordlist[0:9]:
print(v, k)
| [
"2692195809@qq.com"
] | 2692195809@qq.com |
49ed7ee7902cbe3fdd70e37b96b8de65bdc08c75 | af5c5761c21f9251cc89565ab3b40b0dd560555f | /doc/.src/book/src/ex_approx1D_session.py | 771173796d96b928e1a913e907b486eb1ff1de7b | [] | no_license | kent-and/fem-book | d67e282a47b23650b8d9ee1c7a04989f92121848 | b24022ad59bb2185f7524b71fba58e5d994ab806 | refs/heads/master | 2020-05-20T22:26:24.287598 | 2016-08-17T19:47:23 | 2016-08-17T19:47:23 | 53,691,843 | 1 | 0 | null | 2016-03-11T19:33:14 | 2016-03-11T19:33:13 | null | UTF-8 | Python | false | false | 496 | py | # to be run by scitools file2interactive
from approx1D import *
x = sym.Symbol('x')
f = 10*(x-1)**2-1
u = least_squares(f=f, psi=[1, x, x**2], Omega=[1, 2])
print u
print sym.expand(f)
# show how equal x**i functions are (ill-conditioning)
import numpy as np
x = np.linspace(1, 2, 1001)
import matplotlib.pyplot as plt
#import scitools.std as plt
for i in range(15):
plt.plot(x, x**i, '-')
plt.hold('on')
plt.savefig('tmp_basis_xini.pdf')
plt.savefig('tmp_basis_xini.png')
plt.show()
| [
"hpl@simula.no"
] | hpl@simula.no |
cfff7859b73b0c1cd037a8cc56e5904ef005f50f | 5bea85271fa7d8d950f6f75ef251e2fe4a3672b8 | /model/vae.py | 43efcd8a555657c9aeb62ef19ab4c9a62a73e053 | [] | no_license | umutkucukaslan/thesis | ca4095c8d9caa128edb3be3dfdbd031c2d22a28f | 149a9612e9f76fbaff227d8aaa9b949aa9ed33c7 | refs/heads/master | 2023-03-26T13:00:27.677705 | 2021-02-05T22:19:49 | 2021-02-05T22:19:49 | 221,968,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py |
import tensorflow as tf
def build_encoder(input_shape=(128, 128, 3), output_shape=128, filters=(32, 64, 128), kernel_size=5,
pool_size=(2, 2), batch_normalization=False, activation="relu", name='encoder', alpha=0.2):
"""
Assumes input is in the range [-1, 1]
:param input_shape:
:param output_shape:
:param filters:
:param kernel_size:
:param pool_size:
:param batch_normalization:
:param activation: None, "relu", "leakyrelu" or "swish"
:param name:
:param alpha:
:return:
"""
inputs = tf.keras.Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
x = inputs
for i in range(len(filters)):
x = tf.keras.layers.Conv2D(filters=filters[i],
kernel_size=kernel_size,
padding='same',
activation=None)(x)
if activation == "relu":
x = tf.keras.layers.ReLU()(x)
elif activation == "leakyrelu":
x = tf.keras.layers.LeakyReLU(alpha)(x)
elif activation == "swish":
x = tf.nn.swish(x)
x = tf.keras.layers.MaxPool2D(pool_size=pool_size, padding='same')(x)
if batch_normalization:
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Flatten()(x)
output_mean = tf.keras.layers.Dense(output_shape, activation=None, name='mean')(x)
output_std = tf.keras.layers.Dense(output_shape, activation=None, name='std')(x)
return tf.keras.Model(inputs=inputs, outputs=[output_std, output_mean], name=name)
| [
"umutkucukaslan@hotmail.com"
] | umutkucukaslan@hotmail.com |
1d193ed151a98e53a1500f108127e2414d73cb61 | 04dd31f1d4c84b96777107701e727c54c4fc704e | /MINE_PY27/ThePracticeOfML/FirstEditionMaster/7-9.PY | b3626c77bc7424c05457cd67cca316ba71843ca2 | [] | no_license | stormstone/LEARN-PYTHON | 8df6ee96ebf1d2da7703d726dd18061956e2412f | 23636e4c267f82815be5e203d0f4fd66acd18ccf | refs/heads/master | 2021-01-01T18:10:47.650160 | 2017-12-21T14:14:59 | 2017-12-21T14:14:59 | 98,271,596 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#code:myhaspl@qq.com
#7-9.py
import mplannliner as nplann
traindata1=[[[9,25],-1],[[5,8],-1],[[15,31],-1],[[35,62],-1],[[19,40],-1],[[28,65],1],[[20,59],1],[[9,41],1],[[12,60],1],[[2,37],1]]
myann=nplann.Mplannliner()
#样本初始化
myann.samples_init(traindata1)
#学习率初始化
myann.a_init(0.1)
#搜索时间常数初始化
myann.r_init(50)
#最大训练次数
myann.maxtry_init(500)
#期望最小误差
myann.e_init(0.05)
#训练
myann.train()
#仿真,测试,对未知样本分类
myc=myann.simulate([35,68])
print "[35,68]"
if myc==1:
print u"正类"
else:
print u"负类"
#将测试点在最终效果图上显示出来,将它加入drawponint集,测试点表现为"*",并且色彩由其最终的分类结果而决定
myann.drawponint_add([35,68])
myc=myann.simulate([35,82])
print "[35,82]"
if myc==1:
print u"正类"
else:
print u"负类"
myann.drawponint_add([35,82])
myann.draw2d()
#下面直接使用默认参数进行训练
traindata2=[[[9,25,30],-1],[[5,8,12],-1],[[15,31,49],-1],[[35,62,108],-1],[[19,40,60],-1],[[28,65,98],1],[[20,59,72],1],[[9,41,38],1],[[12,60,46],1],[[2,37,18],1]]
myann2=nplann.Mplannliner()
myann2.samples_init(traindata2)
myann2.train()
myc=myann2.simulate([35,68,110])
print "[35,68,110]"
if myc==1:
print u"正类"
else:
print u"负类" | [
"2499144744@qq.com"
] | 2499144744@qq.com |
0ad1badda72db888146eeae0cbbc7f9ec618fa09 | c641eca2b95da76ab0b0fc0ce3156e496559cebd | /src/0106/solution.py | 6f09f0e7cb4230ad2aa68d13f1ddc9a5a9611f8c | [
"MIT"
] | permissive | jiangshanmeta/lintcode | 8915365ea5c8d602fca192e33e374be26130d4e6 | 9cc07edcaa3e7ec8e7b76a19020e6743ebc03875 | refs/heads/master | 2023-08-07T22:22:15.974814 | 2023-08-06T08:49:58 | 2023-08-06T08:49:58 | 212,155,398 | 8 | 2 | MIT | 2023-08-06T08:50:00 | 2019-10-01T17:18:04 | JavaScript | UTF-8 | Python | false | false | 925 | py | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
if head is None :
return head
if head.next is None :
return TreeNode(head.val)
prev = None
fast = head
slow = head
while fast and fast.next :
prev = slow
slow = slow.next
fast = fast.next.next
prev.next = None
root = TreeNode(slow.val)
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(slow.next)
return root
| [
"540118044@qq.com"
] | 540118044@qq.com |
01e502bb11aaf0794e54e08623d23cca47c35bc0 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/services/services/dynamic_search_ads_search_term_view_service/client.py | 58065d2c0dc7a34d142d51ebba7ce76a932cffd6 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 19,697 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
dynamic_search_ads_search_term_view,
)
from google.ads.googleads.v8.services.types import (
dynamic_search_ads_search_term_view_service,
)
from .transports.base import (
DynamicSearchAdsSearchTermViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import DynamicSearchAdsSearchTermViewServiceGrpcTransport
class DynamicSearchAdsSearchTermViewServiceClientMeta(type):
"""Metaclass for the DynamicSearchAdsSearchTermViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DynamicSearchAdsSearchTermViewServiceTransport]]
_transport_registry[
"grpc"
] = DynamicSearchAdsSearchTermViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DynamicSearchAdsSearchTermViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DynamicSearchAdsSearchTermViewServiceClient(
metaclass=DynamicSearchAdsSearchTermViewServiceClientMeta
):
"""Service to fetch dynamic search ads views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DynamicSearchAdsSearchTermViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
DynamicSearchAdsSearchTermViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def dynamic_search_ads_search_term_view_path(
customer_id: str,
ad_group_id: str,
search_term_fingerprint: str,
headline_fingerprint: str,
landing_page_fingerprint: str,
page_url_fingerprint: str,
) -> str:
"""Return a fully-qualified dynamic_search_ads_search_term_view string."""
return "customers/{customer_id}/dynamicSearchAdsSearchTermViews/{ad_group_id}~{search_term_fingerprint}~{headline_fingerprint}~{landing_page_fingerprint}~{page_url_fingerprint}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
search_term_fingerprint=search_term_fingerprint,
headline_fingerprint=headline_fingerprint,
landing_page_fingerprint=landing_page_fingerprint,
page_url_fingerprint=page_url_fingerprint,
)
@staticmethod
def parse_dynamic_search_ads_search_term_view_path(
path: str,
) -> Dict[str, str]:
"""Parse a dynamic_search_ads_search_term_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/dynamicSearchAdsSearchTermViews/(?P<ad_group_id>.+?)~(?P<search_term_fingerprint>.+?)~(?P<headline_fingerprint>.+?)~(?P<landing_page_fingerprint>.+?)~(?P<page_url_fingerprint>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, DynamicSearchAdsSearchTermViewServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the dynamic search ads search term view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DynamicSearchAdsSearchTermViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(
transport, DynamicSearchAdsSearchTermViewServiceTransport
):
# transport is a DynamicSearchAdsSearchTermViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = DynamicSearchAdsSearchTermViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_dynamic_search_ads_search_term_view(
self,
request: dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dynamic_search_ads_search_term_view.DynamicSearchAdsSearchTermView:
r"""Returns the requested dynamic search ads search term view in
full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetDynamicSearchAdsSearchTermViewRequest`):
The request object. Request message for
[DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView][google.ads.googleads.v8.services.DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView].
resource_name (:class:`str`):
Required. The resource name of the
dynamic search ads search term view to
fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.DynamicSearchAdsSearchTermView:
A dynamic search ads search term
view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest,
):
request = dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_dynamic_search_ads_search_term_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("DynamicSearchAdsSearchTermViewServiceClient",)
| [
"noreply@github.com"
] | Z2Xsoft.noreply@github.com |
4ae1478682dfe9974355b06fd2ab3ad41d0ce6fa | d8d9dec0450d5f7fe15f9d5a42d7c2998d4cb649 | /tools/import-categories.py | da45ddc3a16e9c0df8d2bb7f816e67caccaa6e1f | [] | no_license | hansenanders/bank | 678c2af2e3d12502bf49af28ec7758dc4af915ed | e79f0957f51e3360c6f01916e87dab79790fa9d9 | refs/heads/master | 2021-01-12T15:07:25.020993 | 2016-11-09T19:26:23 | 2016-11-09T19:26:23 | 71,704,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | #!/usr/bin/env python3
import argparse
import sqlite3
import json
import sys
parser = argparse.ArgumentParser(description='import categories to sqlitedb.')
parser.add_argument(
'-d', '--sqlite-db-file', required=True, action='store', help='path to SQLiteDB')
parser.add_argument('--data-file', required=True, help='data file path')
args = parser.parse_args()
db = sqlite3.connect(args.sqlite_db_file)
with open(args.data_file) as f:
data = json.load(f)
cur = db.cursor()
for d in data:
try:
query = '''INSERT INTO categories (name, type) VALUES ("{}", "{}")'''.format(d, data[d])
print(query)
cur.execute(query)
db.commit()
except Exception as e:
print(e)
db.rollback()
cur.execute("SELECT * FROM categories")
data = cur.fetchall()
print(data)
db.close()
| [
"you@example.com"
] | you@example.com |
726b5f70d7d7ab3a0b0228386190ec07a7ec2710 | 54408d1b51d1557dea78e0c1dc970240c428a25a | /python/tree/binary_tree_level_order_traversal.py | e4d8b0f7d006c52364fa6d8c51afb5063c397111 | [
"Apache-2.0"
] | permissive | MiKueen/interview | 5d115f570b7d2fea607bf2b4dafdba92f035be82 | 45741d49b49a9817d43b27b05757d95f3b9d9775 | refs/heads/master | 2020-08-13T22:57:55.968032 | 2019-10-14T13:56:07 | 2019-10-14T13:56:07 | 215,052,698 | 0 | 0 | Apache-2.0 | 2019-10-14T13:34:40 | 2019-10-14T13:34:39 | null | UTF-8 | Python | false | false | 1,253 | py | '''
Author : MiKueen
Level : Medium
Problem Statement : Binary Tree Level Order Traversal
https://leetcode.com/problems/binary-tree-level-order-traversal/
Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
'''
from collections import deque
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
level, res = deque([root]), []
while level:
next_level = []
for i in range(len(level)):
node = level.popleft()
if node.left:
level.append(node.left)
if node.right:
level.append(node.right)
next_level.append(node.val)
res.append(next_level)
return res
| [
"keshvi2298@gmail.com"
] | keshvi2298@gmail.com |
6d6e09031e8fdc4910811e20775215820b3f3f11 | 31ff0321d4aa46e43c193e82cf96806df127d63e | /source/code/glyphNanny/defaults.py | a1f05f71eaabc0b669226549613613353c1d8d29 | [
"MIT"
] | permissive | typesupply/glyph-nanny | 0c76fa7d3abb4914782669dc053ce6ed6738a1e5 | dd9db49ca8a7725bb311d89d9ee1efc866298f20 | refs/heads/master | 2023-04-13T05:58:16.149056 | 2023-04-03T20:43:03 | 2023-04-03T20:43:03 | 22,653,004 | 38 | 10 | MIT | 2023-04-03T20:43:05 | 2014-08-05T17:37:41 | Python | UTF-8 | Python | false | false | 3,826 | py | from mojo.extensions import (
registerExtensionDefaults,
getExtensionDefault,
setExtensionDefault
)
from .tests.registry import testRegistry
defaultKeyStub = "com.typesupply.GlyphNanny2."
defaults = {
defaultKeyStub + "displayLiveReport" : True,
defaultKeyStub + "testDuringDrag" : False,
defaultKeyStub + "displayTitles" : True,
defaultKeyStub + "colorInform" : (0, 0, 0.7, 0.3),
defaultKeyStub + "colorReview" : (1, 0.7, 0, 0.7),
defaultKeyStub + "colorRemove" : (1, 0, 0, 0.5),
defaultKeyStub + "colorInsert" : (0, 1, 0, 0.75),
defaultKeyStub + "lineWidthRegular" : 1,
defaultKeyStub + "lineWidthHighlight" : 4,
defaultKeyStub + "textFont" : "system",
defaultKeyStub + "textFontWeight" : "medium",
defaultKeyStub + "textPointSize" : 10,
}
for testIdentifier in testRegistry.keys():
defaults[defaultKeyStub + "testState." + testIdentifier] = True
registerExtensionDefaults(defaults)
# -----
# Tests
# -----
def getTestState(testIdentifier):
return getExtensionDefault(defaultKeyStub + "testState." + testIdentifier)
def setTestState(testIdentifier, value):
setExtensionDefault(defaultKeyStub + "testState." + testIdentifier, value)
# -------
# Display
# -------
# Live Report
def getDisplayLiveReport():
return getExtensionDefault(defaultKeyStub + "displayLiveReport")
def setDisplayLiveReport(value):
setExtensionDefault(defaultKeyStub + "displayLiveReport", value)
# Test During Drag
def getTestDuringDrag():
return getExtensionDefault(defaultKeyStub + "testDuringDrag")
def setTestDuringDrag(value):
setExtensionDefault(defaultKeyStub + "testDuringDrag", value)
# Titles
def getDisplayTitles():
return getExtensionDefault(defaultKeyStub + "displayTitles")
def setDisplayTitles(value):
setExtensionDefault(defaultKeyStub + "displayTitles", value)
# ------
# Colors
# ------
# Inform
def getColorInform():
return getExtensionDefault(defaultKeyStub + "colorInform")
def setColorInform(value):
setExtensionDefault(defaultKeyStub + "colorInform", value)
# Review
def getColorReview():
return getExtensionDefault(defaultKeyStub + "colorReview")
def setColorReview(value):
setExtensionDefault(defaultKeyStub + "colorReview", value)
# Remove
def getColorRemove():
return getExtensionDefault(defaultKeyStub + "colorRemove")
def setColorRemove(value):
setExtensionDefault(defaultKeyStub + "colorRemove", value)
# Insert
def getColorInsert():
return getExtensionDefault(defaultKeyStub + "colorInsert")
def setColorInsert(value):
setExtensionDefault(defaultKeyStub + "colorInsert", value)
# -----------
# Line Widths
# -----------
# Line: Regular
def getLineWidthRegular():
return getExtensionDefault(defaultKeyStub + "lineWidthRegular")
def setLineWidthRegular(value):
setExtensionDefault(defaultKeyStub + "lineWidthRegular", value)
# Line: Highlight
def getLineWidthHighlight():
return getExtensionDefault(defaultKeyStub + "lineWidthHighlight")
def setLineWidthHighlight(value):
setExtensionDefault(defaultKeyStub + "lineWidthHighlight", value)
# ----
# Text
# ----
def getTextFont():
data = dict(
font=getExtensionDefault(defaultKeyStub + "textFont"),
weight=getExtensionDefault(defaultKeyStub + "textFontWeight"),
pointSize=getExtensionDefault(defaultKeyStub + "textPointSize"),
)
return data
def setTextFont(data):
font = data.get("font")
if font is not None:
setExtensionDefault(defaultKeyStub + "textFont", font)
weight = data.get("textFontWeight")
if weight is not None:
setExtensionDefault(defaultKeyStub + "textFontWeight", weight)
pointSize = data.get("pointSize")
if pointSize is not None:
setExtensionDefault(defaultKeyStub + "textPointSize", pointSize)
| [
"tal@typesupply.com"
] | tal@typesupply.com |
7c3f6a0d31b8b6fd29726bd1b1238609fb158ee8 | 83a63364499df53dec8d0175370c959a231563e9 | /blog/tangerine/migrations/0008_auto_20171119_0930.py | 506a228963f2730ccb473da4bab337a10b0613b7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | shacker/tangerine | f328750467d21e9be2642c833c0cf5e44f7802aa | 0ca1571a52f6901d1ae10243c4514630853d51ed | refs/heads/main | 2023-05-10T23:25:09.294128 | 2023-04-29T07:19:19 | 2023-04-29T07:19:19 | 108,086,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 2.1.dev20171104145028 on 2017-11-19 17:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tangerine', '0007_auto_20171118_1743'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(blank=True, help_text='ForeignKey to User object; used for authenticated commenters only.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"shacker@birdhouse.org"
] | shacker@birdhouse.org |
f5a68a18e790ae404066775e19811cf9838c0a67 | 8d9c2748b1586e772ee5475f3733528b07a2631f | /checkov/version.py | 7ae96e3a8aeb3d95c10d77776592d589bc87b3c0 | [
"Apache-2.0"
] | permissive | stevieg27/checkov | e71256e3b6b3ba715996f8445d3c99c44817aed1 | 53631964b08039ba4bb90e08c45b9ef3cbbb2e46 | refs/heads/master | 2023-02-02T00:43:29.045174 | 2020-12-14T09:52:04 | 2020-12-14T09:55:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | version = '1.0.671'
| [
"action@github.com"
] | action@github.com |
a738842a3973e0eda8c51df858a61a1f65242169 | 37fef592f365194c28579f95abd222cc4e1243ae | /Financial_Modeling/Project 4.py | 3e03974ee863c96966c95fe38305908e45d82a6c | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 1,665 | py | # Libraries
import streamlit as st
import pandas as pd
import matplotlib as mp
import plotly.express as px
import os, os.path
import warnings
import numpy as np
from datetime import datetime
import random
warnings.simplefilter(action='ignore', category=FutureWarning)
import math
st.set_page_config(
page_title = "Project 4 - Full DCF Valuation",
layout = 'wide'
)
st.title('Financial Modeling using Python')
st.header("Project 4 - Full DCF Valuation")
st.subheader("The Problem")
with st.expander(" "):
st.write("""
The purpose of this exercise is to complete a full discounted cash flow valuation of a stock from end to end, complete
with all of the additional analyses you learned throughout the course. You can pick any publicly traded stock for
your valuation. You must find the data on your own and research the company’s operations. Ultimately the main
output is your valuation of the stock, but you must also provide a written justification of why you believe this value
to be correct. You must discuss and show how variable this estimate is, as well as what could have large effects on
the valuation. You should also consider several realistic scenarios based on states of the economy, and how these
scenarios affect the valuation.
Some of the components of your project should include:
• WACC estimation
• FCF estimation and forecasting (must forecast financial statements, not only FCFs directly, though that can
be an extra check)
• Terminal value estimation using both perpetuity growth and various exit multiples
• Monte carlo simulation
• Sensitivity analysis
• Scenario analysis
• Visualization
""")
| [
"edimaudo@gmail.com"
] | edimaudo@gmail.com |
c6fe08208cd171c3893d2b8d5e0cbe8c5b590934 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/cf8e84fc6289f0d1a9ec07b91f1e7f1b2716028c-<api_call_for_rule>-fix.py | de82b86e6f485096ebccf7e61c3e9cd1af3ec5a3 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | def api_call_for_rule(module, api_call_object):
is_access_rule = (True if ('access' in api_call_object) else False)
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {
'changed': False,
}
if module.check_mode:
return result
version = ((('v' + module.params['version']) + '/') if module.params.get('version') else '')
if is_access_rule:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['action', 'position'])
else:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['position'])
payload_for_equals = {
'type': api_call_object,
'params': copy_payload_without_some_params,
}
(equals_code, equals_response) = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
if ((equals_code == 400) or (equals_code == 500)):
module.fail_json(msg=equals_response)
if (module.params['state'] == 'present'):
if (equals_code == 200):
if equals_response['equals']:
if (not is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule)):
equals_response['equals'] = False
if (not equals_response['equals']):
if ('position' in payload):
payload['new-position'] = payload['position']
del payload['position']
(code, response) = send_request(connection, version, ('set-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
pass
elif (equals_code == 404):
(code, response) = send_request(connection, version, ('add-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif (module.params['state'] == 'absent'):
if (equals_code == 200):
(code, response) = send_request(connection, version, ('delete-' + api_call_object), payload)
if (code != 200):
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif (equals_code == 404):
pass
return result | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
7535defa22dff0766106cbd29c7abea90b76fd07 | 94e06376dc265c7bf1a2e51acb9714d02b21503a | /爬虫项目/58house/venv/Lib/site-packages/sqlalchemy/sql/selectable.py | b5bfdcbfe9f479a777e6048932b2764f1a19ed82 | [] | no_license | zhangquanliang/python | 4b2db32bed4e4746c8c49c309563f456dc41c6be | f45ef96e385b1cd6c5dfb53bf81042d953a9ec46 | refs/heads/master | 2021-04-26T23:30:12.217397 | 2019-03-20T06:18:14 | 2019-03-20T06:18:14 | 124,005,916 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 127,948 | py | # sql/selectable.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.FromClause` class of SQL expression elements, representing
SQL tables and derived rowsets.
"""
from .elements import ClauseElement, TextClause, ClauseList, \
and_, Grouping, UnaryExpression, literal_column, BindParameter
from .elements import _clone, \
_literal_as_text, _interpret_as_column_or_from, _expand_cloned,\
_select_iterables, _anonymous_label, _clause_element_as_expr,\
_cloned_intersection, _cloned_difference, True_, \
_literal_as_label_reference, _literal_and_labels_as_label_reference
from .base import Immutable, Executable, _generative, \
ColumnCollection, ColumnSet, _from_objects, Generative
from . import type_api
from .. import inspection
from .. import util
from .. import exc
from operator import attrgetter
from . import operators
import operator
import collections
from .annotation import Annotated
import itertools
from sqlalchemy.sql.visitors import Visitable
def _interpret_as_from(element):
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, util.string_types):
util.warn_limited(
"Textual SQL FROM expression %(expr)r should be "
"explicitly declared as text(%(expr)r), "
"or use table(%(expr)r) for more specificity",
{"expr": util.ellipses_string(element)})
return TextClause(util.text_type(element))
try:
return insp.selectable
except AttributeError:
raise exc.ArgumentError("FROM expression expected")
def _interpret_as_select(element):
element = _interpret_as_from(element)
if isinstance(element, Alias):
element = element.original
if not isinstance(element, SelectBase):
element = element.select()
return element
class _OffsetLimitParam(BindParameter):
@property
def _limit_offset_value(self):
return self.effective_value
def _offset_or_limit_clause(element, name=None, type_=None):
"""Convert the given value to an "offset or limit" clause.
This handles incoming integers and converts to an expression; if
an expression is already given, it is passed through.
"""
if element is None:
return None
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, Visitable):
return element
else:
value = util.asint(element)
return _OffsetLimitParam(name, value, type_=type_, unique=True)
def _offset_or_limit_clause_asint(clause, attrname):
"""Convert the "offset or limit" clause of a select construct to an
integer.
This is only possible if the value is stored as a simple bound parameter.
Otherwise, a compilation error is raised.
"""
if clause is None:
return None
try:
value = clause._limit_offset_value
except AttributeError:
raise exc.CompileError(
"This SELECT structure does not use a simple "
"integer value for %s" % attrname)
else:
return util.asint(value)
def subquery(alias, *args, **kwargs):
r"""Return an :class:`.Alias` object derived
from a :class:`.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def alias(selectable, name=None, flat=False):
"""Return an :class:`.Alias` object.
An :class:`.Alias` represents any :class:`.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`~.FromClause.alias` method
available on all :class:`.FromClause` subclasses.
When an :class:`.Alias` is created from a :class:`.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`.select` objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
:param flat: Will be passed through to if the given selectable
is an instance of :class:`.Join` - see :meth:`.Join.alias`
for details.
.. versionadded:: 0.9.0
"""
return _interpret_as_from(selectable).alias(name=name, flat=flat)
def lateral(selectable, name=None):
"""Return a :class:`.Lateral` object.
:class:`.Lateral` is an :class:`.Alias` subclass that represents
a subquery with the LATERAL keyword applied to it.
The special behavior of a LATERAL subquery is that it appears in the
FROM clause of an enclosing SELECT, but may correlate to other
FROM clauses of that SELECT. It is a special case of subquery
only supported by a small number of backends, currently more recent
PostgreSQL versions.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
return _interpret_as_from(selectable).lateral(name=name)
def tablesample(selectable, sampling, name=None, seed=None):
"""Return a :class:`.TableSample` object.
:class:`.TableSample` is an :class:`.Alias` subclass that represents
a table with the TABLESAMPLE clause applied to it.
:func:`~.expression.tablesample`
is also available from the :class:`.FromClause` class via the
:meth:`.FromClause.tablesample` method.
The TABLESAMPLE clause allows selecting a randomly selected approximate
percentage of rows from a table. It supports multiple sampling methods,
most commonly BERNOULLI and SYSTEM.
e.g.::
from sqlalchemy import func
selectable = people.tablesample(
func.bernoulli(1),
name='alias',
seed=func.random())
stmt = select([selectable.c.people_id])
Assuming ``people`` with a column ``people_id``, the above
statement would render as::
SELECT alias.people_id FROM
people AS alias TABLESAMPLE bernoulli(:bernoulli_1)
REPEATABLE (random())
.. versionadded:: 1.1
:param sampling: a ``float`` percentage between 0 and 100 or
:class:`.functions.Function`.
:param name: optional alias name
:param seed: any real-valued SQL expression. When specified, the
REPEATABLE sub-clause is also rendered.
"""
return _interpret_as_from(selectable).tablesample(
sampling, name=name, seed=seed)
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
is_selectable = True
@property
def selectable(self):
return self
class HasPrefixes(object):
_prefixes = ()
@_generative
def prefix_with(self, *expr, **kw):
r"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`.prefix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
dialect = kw.pop('dialect', None)
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" %
",".join(kw))
self._setup_prefixes(expr, dialect)
def _setup_prefixes(self, prefixes, dialect=None):
self._prefixes = self._prefixes + tuple(
[(_literal_as_text(p, warn=False), dialect) for p in prefixes])
class HasSuffixes(object):
_suffixes = ()
@_generative
def suffix_with(self, *expr, **kw):
r"""Add one or more expressions following the statement as a whole.
This is used to support backend-specific suffix keywords on
certain constructs.
E.g.::
stmt = select([col1, col2]).cte().suffix_with(
"cycle empno set y_cycle to 1 default 0", dialect="oracle")
Multiple suffixes can be specified by multiple calls
to :meth:`.suffix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
will be rendered following the target clause.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this suffix to only that dialect.
"""
dialect = kw.pop('dialect', None)
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" %
",".join(kw))
self._setup_suffixes(expr, dialect)
def _setup_suffixes(self, suffixes, dialect=None):
self._suffixes = self._suffixes + tuple(
[(_literal_as_text(p, warn=False), dialect) for p in suffixes])
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
The most common forms of :class:`.FromClause` are the
:class:`.Table` and the :func:`.select` constructs. Key
features common to all :class:`.FromClause` objects include:
* a :attr:`.c` collection, which provides per-name access to a collection
of :class:`.ColumnElement` objects.
* a :attr:`.primary_key` attribute, which is a collection of all those
:class:`.ColumnElement` objects that indicate the ``primary_key`` flag.
* Methods to generate various derivations of a "from" clause, including
:meth:`.FromClause.alias`, :meth:`.FromClause.join`,
:meth:`.FromClause.select`.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
_is_join = False
_is_select = False
_is_from_container = False
_textual = False
"""a marker that allows us to easily distinguish a :class:`.TextAsFrom`
or similar object from other kinds of :class:`.FromClause` objects."""
schema = None
"""Define the 'schema' attribute for this :class:`.FromClause`.
This is typically ``None`` for most objects except that of
:class:`.Table`, where it is taken as the value of the
:paramref:`.Table.schema` argument.
"""
def _translate_schema(self, effective_schema, map_):
return effective_schema
_memoized_property = util.group_expirable_memoized_property(["_columns"])
@util.deprecated(
'1.1',
message="``FromClause.count()`` is deprecated. Counting "
"rows requires that the correct column expression and "
"accommodations for joins, DISTINCT, etc. must be made, "
"otherwise results may not be what's expected. "
"Please use an appropriate ``func.count()`` expression "
"directly.")
@util.dependencies("sqlalchemy.sql.functions")
def count(self, functions, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.FromClause`.
The function generates COUNT against the
first column in the primary key of the table, or against
the first column in the table overall. Explicit use of
``func.count()`` should be preferred::
row_count = conn.scalar(
select([func.count('*')]).select_from(table)
)
.. seealso::
:data:`.func`
"""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return Select(
[functions.func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`.FromClause`.
.. seealso::
:func:`~.sql.expression.select` - general purpose
method which allows for arbitrary column lists.
"""
return Select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False, full=False):
"""Return a :class:`.Join` from this :class:`.FromClause`
to another :class:`FromClause`.
E.g.::
from sqlalchemy import join
j = user_table.join(address_table,
user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
:param right: the right side of the join; this is any
:class:`.FromClause` object such as a :class:`.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
:param full: if True, render a FULL OUTER JOIN, instead of LEFT OUTER
JOIN. Implies :paramref:`.FromClause.join.isouter`.
.. versionadded:: 1.1
.. seealso::
:func:`.join` - standalone function
:class:`.Join` - the type of object produced
"""
return Join(self, right, onclause, isouter, full)
def outerjoin(self, right, onclause=None, full=False):
"""Return a :class:`.Join` from this :class:`.FromClause`
to another :class:`FromClause`, with the "isouter" flag set to
True.
E.g.::
from sqlalchemy import outerjoin
j = user_table.outerjoin(address_table,
user_table.c.id == address_table.c.user_id)
The above is equivalent to::
j = user_table.join(
address_table,
user_table.c.id == address_table.c.user_id,
isouter=True)
:param right: the right side of the join; this is any
:class:`.FromClause` object such as a :class:`.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
:param full: if True, render a FULL OUTER JOIN, instead of
LEFT OUTER JOIN.
.. versionadded:: 1.1
.. seealso::
:meth:`.FromClause.join`
:class:`.Join`
"""
return Join(self, right, onclause, True, full)
def alias(self, name=None, flat=False):
"""return an alias of this :class:`.FromClause`.
This is shorthand for calling::
from sqlalchemy import alias
a = alias(self, name=name)
See :func:`~.expression.alias` for details.
"""
return Alias(self, name)
def lateral(self, name=None):
"""Return a LATERAL alias of this :class:`.FromClause`.
The return value is the :class:`.Lateral` construct also
provided by the top-level :func:`~.expression.lateral` function.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
return Lateral(self, name)
def tablesample(self, sampling, name=None, seed=None):
"""Return a TABLESAMPLE alias of this :class:`.FromClause`.
The return value is the :class:`.TableSample` construct also
provided by the top-level :func:`~.expression.tablesample` function.
.. versionadded:: 1.1
.. seealso::
:func:`~.expression.tablesample` - usage guidelines and parameters
"""
return TableSample(self, sampling, name, seed)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
@util.dependencies("sqlalchemy.sql.util")
def replace_selectable(self, sqlutil, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given
:class:`.ColumnElement` is actually present within a sub-element
of this :class:`.FromClause`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])
).intersection(expanded_proxy_set):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c._all_columns
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (not require_embedded
or embedded(expanded_proxy_set, target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(
operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(
operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""A named-based collection of :class:`.ColumnElement` objects
maintained by this :class:`.FromClause`.
The :attr:`.columns`, or :attr:`.c` collection, is the gateway
to the construction of SQL expressions using table-bound or
other selectable-bound columns::
select([mytable]).where(mytable.c.somecolumn == 1)
"""
if '_columns' not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(attrgetter('columns'),
doc="An alias for the :attr:`.columns` attribute.")
_select_iterable = property(attrgetter('columns'))
def _init_collections(self):
assert '_columns' not in self.__dict__
assert 'primary_key' not in self.__dict__
assert 'foreign_keys' not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
@property
def _cols_populated(self):
return '_columns' in self.__dict__
def _populate_column_collection(self):
"""Called on subclasses to establish the .c collection.
Each implementation has a different way of establishing
this collection.
"""
def _refresh_for_new_column(self, column):
"""Given a column added to the .c collection of an underlying
selectable, produce the local version of that column, assuming this
selectable ultimately should proxy this column.
this is used to "ping" a derived selectable to add a new column
to its .c. collection when a Column has been added to one of the
Table objects it ultimtely derives from.
If the given selectable hasn't populated its .c. collection yet,
it should at least pass on the message to the contained selectables,
but it will return None.
This method is currently used by Declarative to allow Table
columns to be added to a partially constructed inheritance
mapping that may have already produced joins. The method
isn't public right now, as the full span of implications
and/or caveats aren't yet clear.
It's also possible that this functionality could be invoked by
default via an event, which would require that
selectables maintain a weak referencing collection of all
derivations.
"""
if not self._cols_populated:
return None
elif (column.key in self.columns and
self.columns[column.key] is column):
return column
else:
return None
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`.FromClause`
elements.
The public constructor function for :class:`.Join` is the module-level
:func:`.join()` function, as well as the :meth:`.FromClause.join` method
of any :class:`.FromClause` (e.g. such as :class:`.Table`).
.. seealso::
:func:`.join`
:meth:`.FromClause.join`
"""
__visit_name__ = 'join'
_is_join = True
def __init__(self, left, right, onclause=None, isouter=False, full=False):
"""Construct a new :class:`.Join`.
The usual entrypoint here is the :func:`~.expression.join`
function or the :meth:`.FromClause.join` method of any
:class:`.FromClause` object.
"""
self.left = _interpret_as_from(left)
self.right = _interpret_as_from(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.full = full
@classmethod
def _create_outerjoin(cls, left, right, onclause=None, full=False):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.outerjoin()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return cls(left, right, onclause, isouter=True, full=full)
@classmethod
def _create_join(cls, left, right, onclause=None, isouter=False,
full=False):
"""Produce a :class:`.Join` object, given two :class:`.FromClause`
expressions.
E.g.::
j = join(user_table, address_table,
user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
Similar functionality is available given any
:class:`.FromClause` object (e.g. such as a :class:`.Table`) using
the :meth:`.FromClause.join` method.
:param left: The left side of the join.
:param right: the right side of the join; this is any
:class:`.FromClause` object such as a :class:`.Table` object, and
may also be a selectable-compatible object such as an ORM-mapped
class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
:param full: if True, render a FULL OUTER JOIN, instead of JOIN.
.. versionadded:: 1.1
.. seealso::
:meth:`.FromClause.join` - method form, based on a given left side
:class:`.Join` - the type of object produced
"""
return cls(left, right, onclause, isouter, full)
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or \
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return FromGrouping(self)
@util.dependencies("sqlalchemy.sql.util")
def _populate_column_collection(self, sqlutil):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self.primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _refresh_for_new_column(self, column):
col = self.left._refresh_for_new_column(column)
if col is None:
col = self.right._refresh_for_new_column(column)
if col is not None:
if self._cols_populated:
self._columns[col._label] = col
self.foreign_keys.update(col.foreign_keys)
if col.primary_key:
self.primary_key.add(col)
return col
return None
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return self._join_condition(left, right, a_subset=left_right)
@classmethod
def _join_condition(cls, a, b, ignore_nonexistent_tables=False,
a_subset=None,
consider_as_foreign_keys=None):
"""create a join condition between two tables or selectables.
e.g.::
join_condition(tablea, tableb)
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
to join, or no way to join, an error is raised.
:param ignore_nonexistent_tables: Deprecated - this
flag is no longer used. Only resolution errors regarding
the two given tables are propagated.
:param a_subset: An optional expression that is a sub-component
of ``a``. An attempt will be made to join to just this sub-component
first before looking at the full ``a`` construct, and if found
will be successful even if there are other ways to join to ``a``.
This allows the "right side" of a join to be passed thereby
providing a "natural join".
"""
constraints = cls._joincond_scan_left_right(
a, a_subset, b, consider_as_foreign_keys)
if len(constraints) > 1:
cls._joincond_trim_constraints(
a, b, constraints, consider_as_foreign_keys)
if len(constraints) == 0:
if isinstance(b, FromGrouping):
hint = " Perhaps you meant to convert the right side to a "\
"subquery using alias()?"
else:
hint = ""
raise exc.NoForeignKeysError(
"Can't find any foreign key relationships "
"between '%s' and '%s'.%s" %
(a.description, b.description, hint))
crit = [(x == y) for x, y in list(constraints.values())[0]]
if len(crit) == 1:
return (crit[0])
else:
return and_(*crit)
@classmethod
def _joincond_scan_left_right(
cls, a, a_subset, b, consider_as_foreign_keys):
constraints = collections.defaultdict(list)
for left in (a_subset, a):
if left is None:
continue
for fk in sorted(
b.foreign_keys,
key=lambda fk: fk.parent._creation_order):
if consider_as_foreign_keys is not None and \
fk.parent not in consider_as_foreign_keys:
continue
try:
col = fk.get_referent(left)
except exc.NoReferenceError as nrte:
if nrte.table_name == left.name:
raise
else:
continue
if col is not None:
constraints[fk.constraint].append((col, fk.parent))
if left is not b:
for fk in sorted(
left.foreign_keys,
key=lambda fk: fk.parent._creation_order):
if consider_as_foreign_keys is not None and \
fk.parent not in consider_as_foreign_keys:
continue
try:
col = fk.get_referent(b)
except exc.NoReferenceError as nrte:
if nrte.table_name == b.name:
raise
else:
continue
if col is not None:
constraints[fk.constraint].append((col, fk.parent))
if constraints:
break
return constraints
@classmethod
def _joincond_trim_constraints(
cls, a, b, constraints, consider_as_foreign_keys):
# more than one constraint matched. narrow down the list
# to include just those FKCs that match exactly to
# "consider_as_foreign_keys".
if consider_as_foreign_keys:
for const in list(constraints):
if set(f.parent for f in const.elements) != set(
consider_as_foreign_keys):
del constraints[const]
# if still multiple constraints, but
# they all refer to the exact same end result, use it.
if len(constraints) > 1:
dedupe = set(tuple(crit) for crit in constraints.values())
if len(dedupe) == 1:
key = list(constraints)[0]
constraints = {key: constraints[key]}
if len(constraints) != 1:
raise exc.AmbiguousForeignKeysError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description))
def select(self, whereclause=None, **kwargs):
r"""Create a :class:`.Select` from this :class:`.Join`.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\
where(whereclause).\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
collist = [self.left, self.right]
return Select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
@util.dependencies("sqlalchemy.sql.util")
def alias(self, sqlutil, name=None, flat=False):
r"""return an alias of this :class:`.Join`.
The default behavior here is to first produce a SELECT
construct from this :class:`.Join`, then to produce an
:class:`.Alias` from that. So given a join of the form::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
The JOIN by itself would look like::
table_a JOIN table_b ON table_a.id = table_b.a_id
Whereas the alias of the above, ``j.alias()``, would in a
SELECT context look like::
(SELECT table_a.id AS table_a_id, table_b.id AS table_b_id,
table_b.a_id AS table_b_a_id
FROM table_a
JOIN table_b ON table_a.id = table_b.a_id) AS anon_1
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\
select_from(j).\
with_labels(True).\
correlate(False),
name=name
)
The selectable produced by :meth:`.Join.alias` features the same
columns as that of the two individual selectables presented under
a single name - the individual columns are "auto-labeled", meaning
the ``.c.`` collection of the resulting :class:`.Alias` represents
the names of the individual columns using a
``<tablename>_<columname>`` scheme::
j.c.table_a_id
j.c.table_b_a_id
:meth:`.Join.alias` also features an alternate
option for aliasing joins which produces no enclosing SELECT and
does not normally apply labels to the column names. The
``flat=True`` option will call :meth:`.FromClause.alias`
against the left and right sides individually.
Using this option, no new ``SELECT`` is produced;
we instead, from a construct as below::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
j = j.alias(flat=True)
we get a result like this::
table_a AS table_a_1 JOIN table_b AS table_b_1 ON
table_a_1.id = table_b_1.a_id
The ``flat=True`` argument is also propagated to the contained
selectables, so that a composite join such as::
j = table_a.join(
table_b.join(table_c,
table_b.c.id == table_c.c.b_id),
table_b.c.a_id == table_a.c.id
).alias(flat=True)
Will produce an expression like::
table_a AS table_a_1 JOIN (
table_b AS table_b_1 JOIN table_c AS table_c_1
ON table_b_1.id = table_c_1.b_id
) ON table_a_1.id = table_b_1.a_id
The standalone :func:`~.expression.alias` function as well as the
base :meth:`.FromClause.alias` method also support the ``flat=True``
argument as a no-op, so that the argument can be passed to the
``alias()`` method of any selectable.
.. versionadded:: 0.9.0 Added the ``flat=True`` option to create
"aliases" of joins without enclosing inside of a SELECT
subquery.
:param name: name given to the alias.
:param flat: if True, produce an alias of the left and right
sides of this :class:`.Join` and return the join of those
two selectables. This produces join expression that does not
include an enclosing SELECT.
.. versionadded:: 0.9.0
.. seealso::
:func:`~.expression.alias`
"""
if flat:
assert name is None, "Can't send name argument with flat"
left_a, right_a = self.left.alias(flat=True), \
self.right.alias(flat=True)
adapter = sqlutil.ClauseAdapter(left_a).\
chain(sqlutil.ClauseAdapter(right_a))
return left_a.join(right_a, adapter.traverse(self.onclause),
isouter=self.isouter, full=self.full)
else:
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`~.expression.alias` module
level function as well as the :meth:`.FromClause.alias` method available
on all :class:`.FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
_is_from_container = True
def __init__(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, 'name', None)
name = _anonymous_label('%%(%d %s)s' % (id(self), name
or 'anon'))
self.name = name
def self_group(self, against=None):
if isinstance(against, CompoundSelect) and \
isinstance(self.original, Select) and \
self.original._needs_parens_for_grouping():
return FromGrouping(self)
return super(Alias, self).self_group(against=against)
@property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns._all_columns:
col._make_proxy(self)
def _refresh_for_new_column(self, column):
col = self.element._refresh_for_new_column(column)
if col is not None:
if not self._cols_populated:
return None
else:
return col._make_proxy(self)
else:
return None
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class Lateral(Alias):
"""Represent a LATERAL subquery.
This object is constructed from the :func:`~.expression.lateral` module
level function as well as the :meth:`.FromClause.lateral` method available
on all :class:`.FromClause` subclasses.
While LATERAL is part of the SQL standard, curently only more recent
PostgreSQL versions provide support for this keyword.
.. versionadded:: 1.1
.. seealso::
:ref:`lateral_selects` - overview of usage.
"""
__visit_name__ = 'lateral'
class TableSample(Alias):
"""Represent a TABLESAMPLE clause.
This object is constructed from the :func:`~.expression.tablesample` module
level function as well as the :meth:`.FromClause.tablesample` method available
on all :class:`.FromClause` subclasses.
.. versionadded:: 1.1
.. seealso::
:func:`~.expression.tablesample`
"""
__visit_name__ = 'tablesample'
def __init__(self, selectable, sampling,
name=None,
seed=None):
self.sampling = sampling
self.seed = seed
super(TableSample, self).__init__(selectable, name=name)
@util.dependencies("sqlalchemy.sql.functions")
def _get_method(self, functions):
if isinstance(self.sampling, functions.Function):
return self.sampling
else:
return functions.func.system(self.sampling)
class CTE(Generative, HasSuffixes, Alias):
"""Represent a Common Table Expression.
The :class:`.CTE` object is obtained using the
:meth:`.SelectBase.cte` method from any selectable.
See that method for complete examples.
.. versionadded:: 0.7.6
"""
__visit_name__ = 'cte'
def __init__(self, selectable,
name=None,
recursive=False,
_cte_alias=None,
_restates=frozenset(),
_suffixes=None):
self.recursive = recursive
self._cte_alias = _cte_alias
self._restates = _restates
if _suffixes:
self._suffixes = _suffixes
super(CTE, self).__init__(selectable, name=name)
def _copy_internals(self, clone=_clone, **kw):
super(CTE, self)._copy_internals(clone, **kw)
if self._cte_alias is not None:
self._cte_alias = clone(self._cte_alias, **kw)
self._restates = frozenset([
clone(elem, **kw) for elem in self._restates
])
@util.dependencies("sqlalchemy.sql.dml")
def _populate_column_collection(self, dml):
if isinstance(self.element, dml.UpdateBase):
for col in self.element._returning:
col._make_proxy(self)
else:
for col in self.element.columns._all_columns:
col._make_proxy(self)
def alias(self, name=None, flat=False):
return CTE(
self.original,
name=name,
recursive=self.recursive,
_cte_alias=self,
_suffixes=self._suffixes
)
def union(self, other):
return CTE(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self]),
_suffixes=self._suffixes
)
def union_all(self, other):
return CTE(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self]),
_suffixes=self._suffixes
)
class HasCTE(object):
"""Mixin that declares a class to include CTE support.
.. versionadded:: 1.1
"""
def cte(self, name=None, recursive=False):
r"""Return a new :class:`.CTE`, or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
CTEs can also be applied to DML constructs UPDATE, INSERT
and DELETE on some databases, both as a source of CTE rows
when combined with RETURNING, as well as a consumer of
CTE rows.
SQLAlchemy detects :class:`.CTE` objects, which are treated
similarly to :class:`.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
.. versionchanged:: 1.1 Added support for UPDATE/INSERT/DELETE as
CTE, CTEs added to UPDATE/INSERT/DELETE.
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples include two from PostgreSQL's documentation at
http://www.postgresql.org/docs/current/static/queries-with.html,
as well as additional examples.
Example 1, non recursive::
from sqlalchemy import (Table, Column, String, Integer,
MetaData, select, func)
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import (Table, Column, String, Integer,
MetaData, select, func)
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\
where(parts.c.part=='our part').\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.sub_part,
parts_alias.c.part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
]).\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
Example 3, an upsert using UPDATE and INSERT with CTEs::
from datetime import date
from sqlalchemy import (MetaData, Table, Column, Integer,
Date, select, literal, and_, exists)
metadata = MetaData()
visitors = Table('visitors', metadata,
Column('product_id', Integer, primary_key=True),
Column('date', Date, primary_key=True),
Column('count', Integer),
)
# add 1 visitors for the product_id == 1
product_id = 1
day = date.today()
count = 1
update_cte = (
visitors.update()
.where(and_(visitors.c.product_id == product_id,
visitors.c.date == day))
.values(count=visitors.c.count + count)
.returning(literal(1))
.cte('update_cte')
)
upsert = visitors.insert().from_select(
[visitors.c.product_id, visitors.c.date, visitors.c.count],
select([literal(product_id), literal(day), literal(count)])
.where(~exists(update_cte.select()))
)
connection.execute(upsert)
.. seealso::
:meth:`.orm.query.Query.cte` - ORM version of
:meth:`.HasCTE.cte`.
"""
return CTE(self, name=name, recursive=recursive)
class FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def is_derived_from(self, element):
return self.element.is_derived_from(element)
def alias(self, **kw):
return FromGrouping(self.element.alias(**kw))
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element}
def __setstate__(self, state):
self.element = state['element']
class TableClause(Immutable, FromClause):
"""Represents a minimal "table" construct.
This is a lightweight table object that has only a name and a
collection of columns, which are typically produced
by the :func:`.expression.column` function::
from sqlalchemy import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`.TableClause` construct serves as the base for
the more commonly used :class:`~.schema.Table` object, providing
the usual set of :class:`~.expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`~.schema.Table`, including constraints, references to other
tables, or support for :class:`.MetaData`-level services. It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`~.schema.Table`
is not on hand.
"""
__visit_name__ = 'table'
named_with_column = True
implicit_returning = False
""":class:`.TableClause` doesn't support having a primary key or column
-level defaults, so implicit returning doesn't apply."""
_autoincrement_column = None
"""No PK or default support so no autoincrement column."""
def __init__(self, name, *columns):
"""Produce a new :class:`.TableClause`.
The object returned is an instance of :class:`.TableClause`, which
represents the "syntactical" portion of the schema-level
:class:`~.schema.Table` object.
It may be used to construct lightweight table constructs.
.. versionchanged:: 1.0.0 :func:`.expression.table` can now
be imported from the plain ``sqlalchemy`` namespace like any
other SQL element.
:param name: Name of the table.
:param columns: A collection of :func:`.expression.column` constructs.
"""
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
def _init_collections(self):
pass
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
def append_column(self, c):
self._columns[c.key] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
@util.dependencies("sqlalchemy.sql.dml")
def insert(self, dml, values=None, inline=False, **kwargs):
"""Generate an :func:`.insert` construct against this
:class:`.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`.insert` for argument and usage information.
"""
return dml.Insert(self, values=values, inline=inline, **kwargs)
@util.dependencies("sqlalchemy.sql.dml")
def update(
self, dml, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`.update` construct against this
:class:`.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`.update` for argument and usage information.
"""
return dml.Update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
@util.dependencies("sqlalchemy.sql.dml")
def delete(self, dml, whereclause=None, **kwargs):
"""Generate a :func:`.delete` construct against this
:class:`.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`.delete` for argument and usage information.
"""
return dml.Delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class ForUpdateArg(ClauseElement):
@classmethod
def parse_legacy_select(self, arg):
"""Parse the for_update argument of :func:`.select`.
:param mode: Defines the lockmode to use.
``None`` - translates to no lockmode
``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
``'nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
``'read_nowait'`` - translates to ``FOR SHARE NOWAIT``
(supported by PostgreSQL). ``FOR SHARE`` and
``FOR SHARE NOWAIT`` (PostgreSQL).
"""
if arg in (None, False):
return None
nowait = read = False
if arg == 'nowait':
nowait = True
elif arg == 'read':
read = True
elif arg == 'read_nowait':
read = nowait = True
elif arg is not True:
raise exc.ArgumentError("Unknown for_update argument: %r" % arg)
return ForUpdateArg(read=read, nowait=nowait)
@property
def legacy_for_update_value(self):
if self.read and not self.nowait:
return "read"
elif self.read and self.nowait:
return "read_nowait"
elif self.nowait:
return "nowait"
else:
return True
def __eq__(self, other):
return (
isinstance(other, ForUpdateArg) and
other.nowait == self.nowait and
other.read == self.read and
other.skip_locked == self.skip_locked and
other.key_share == self.key_share and
other.of is self.of
)
def __hash__(self):
return id(self)
def _copy_internals(self, clone=_clone, **kw):
if self.of is not None:
self.of = [clone(col, **kw) for col in self.of]
def __init__(
self, nowait=False, read=False, of=None,
skip_locked=False, key_share=False):
"""Represents arguments specified to :meth:`.Select.for_update`.
.. versionadded:: 0.9.0
"""
self.nowait = nowait
self.read = read
self.skip_locked = skip_locked
self.key_share = key_share
if of is not None:
self.of = [_interpret_as_column_or_from(elem)
for elem in util.to_list(of)]
else:
self.of = None
class SelectBase(HasCTE, Executable, FromClause):
"""Base class for SELECT statements.
This includes :class:`.Select`, :class:`.CompoundSelect` and
:class:`.TextAsFrom`.
"""
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`ScalarSelect`.
"""
return ScalarSelect(self)
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`~.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
@_generative
@util.deprecated('0.6',
message="``autocommit()`` is deprecated. Use "
":meth:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True.
"""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@property
def _from_objects(self):
return [self]
class GenerativeSelect(SelectBase):
"""Base class for SELECT statements where additional elements can be
added.
This serves as the base for :class:`.Select` and :class:`.CompoundSelect`
where elements such as ORDER BY, GROUP BY can be added and column
rendering can be controlled. Compare to :class:`.TextAsFrom`, which,
while it subclasses :class:`.SelectBase` and is also a SELECT construct,
represents a fixed textual string which cannot be altered at this level,
only wrapped as a subquery.
.. versionadded:: 0.9.0 :class:`.GenerativeSelect` was added to
provide functionality specific to :class:`.Select` and
:class:`.CompoundSelect` while allowing :class:`.SelectBase` to be
used for other SELECT-like objects, e.g. :class:`.TextAsFrom`.
"""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit_clause = None
_offset_clause = None
_for_update_arg = None
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
if for_update is not False:
self._for_update_arg = (ForUpdateArg.
parse_legacy_select(for_update))
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if limit is not None:
self._limit_clause = _offset_or_limit_clause(limit)
if offset is not None:
self._offset_clause = _offset_or_limit_clause(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(
*util.to_list(order_by),
_literal_as_text=_literal_and_labels_as_label_reference)
if group_by is not None:
self._group_by_clause = ClauseList(
*util.to_list(group_by),
_literal_as_text=_literal_as_label_reference)
@property
def for_update(self):
"""Provide legacy dialect support for the ``for_update`` attribute.
"""
if self._for_update_arg is not None:
return self._for_update_arg.legacy_for_update_value
else:
return None
@for_update.setter
def for_update(self, value):
self._for_update_arg = ForUpdateArg.parse_legacy_select(value)
@_generative
def with_for_update(self, nowait=False, read=False, of=None,
skip_locked=False, key_share=False):
"""Specify a ``FOR UPDATE`` clause for this :class:`.GenerativeSelect`.
E.g.::
stmt = select([table]).with_for_update(nowait=True)
On a database like PostgreSQL or Oracle, the above would render a
statement like::
SELECT table.a, table.b FROM table FOR UPDATE NOWAIT
on other backends, the ``nowait`` option is ignored and instead
would produce::
SELECT table.a, table.b FROM table FOR UPDATE
When called with no arguments, the statement will render with
the suffix ``FOR UPDATE``. Additional arguments can then be
provided which allow for common database-specific
variants.
:param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle
and PostgreSQL dialects.
:param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL,
``FOR SHARE`` on PostgreSQL. On PostgreSQL, when combined with
``nowait``, will render ``FOR SHARE NOWAIT``.
:param of: SQL expression or list of SQL expression elements
(typically :class:`.Column` objects or a compatible expression) which
will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL
and Oracle. May render as a table or as a column depending on
backend.
:param skip_locked: boolean, will render ``FOR UPDATE SKIP LOCKED``
on Oracle and PostgreSQL dialects or ``FOR SHARE SKIP LOCKED`` if
``read=True`` is also specified.
.. versionadded:: 1.1.0
:param key_share: boolean, will render ``FOR NO KEY UPDATE``,
or if combined with ``read=True`` will render ``FOR KEY SHARE``,
on the PostgreSQL dialect.
.. versionadded:: 1.1.0
"""
self._for_update_arg = ForUpdateArg(nowait=nowait, read=read, of=of,
skip_locked=skip_locked,
key_share=key_share)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
@property
def _limit(self):
"""Get an integer value for the limit. This should only be used
by code that cannot support a limit as a BindParameter or
other custom clause as it will throw an exception if the limit
isn't currently set to an integer.
"""
return _offset_or_limit_clause_asint(self._limit_clause, "limit")
@property
def _simple_int_limit(self):
"""True if the LIMIT clause is a simple integer, False
if it is not present or is a SQL expression.
"""
return isinstance(self._limit_clause, _OffsetLimitParam)
@property
def _simple_int_offset(self):
"""True if the OFFSET clause is a simple integer, False
if it is not present or is a SQL expression.
"""
return isinstance(self._offset_clause, _OffsetLimitParam)
@property
def _offset(self):
"""Get an integer value for the offset. This should only be used
by code that cannot support an offset as a BindParameter or
other custom clause as it will throw an exception if the
offset isn't currently set to an integer.
"""
return _offset_or_limit_clause_asint(self._offset_clause, "offset")
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied.
This is a numerical value which usually renders as a ``LIMIT``
expression in the resulting select. Backends that don't
support ``LIMIT`` will attempt to provide similar
functionality.
.. versionchanged:: 1.0.0 - :meth:`.Select.limit` can now
accept arbitrary SQL expressions as well as integer values.
:param limit: an integer LIMIT parameter, or a SQL expression
that provides an integer result.
"""
self._limit_clause = _offset_or_limit_clause(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied.
This is a numeric value which usually renders as an ``OFFSET``
expression in the resulting select. Backends that don't
support ``OFFSET`` will attempt to provide similar
functionality.
.. versionchanged:: 1.0.0 - :meth:`.Select.offset` can now
accept arbitrary SQL expressions as well as integer values.
:param offset: an integer OFFSET parameter, or a SQL expression
that provides an integer result.
"""
self._offset_clause = _offset_or_limit_clause(offset)
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
This is an **in-place** mutation method; the
:meth:`~.GenerativeSelect.order_by` method is preferred, as it
provides standard :term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(
*clauses,
_literal_as_text=_literal_and_labels_as_label_reference)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
This is an **in-place** mutation method; the
:meth:`~.GenerativeSelect.group_by` method is preferred, as it
provides standard :term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(
*clauses, _literal_as_text=_literal_as_label_reference)
@property
def _label_resolve_dict(self):
raise NotImplementedError()
def _copy_internals(self, clone=_clone, **kw):
if self._limit_clause is not None:
self._limit_clause = clone(self._limit_clause, **kw)
if self._offset_clause is not None:
self._offset_clause = clone(self._offset_clause, **kw)
class CompoundSelect(GenerativeSelect):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations.
.. seealso::
:func:`.union`
:func:`.union_all`
:func:`.intersect`
:func:`.intersect_all`
:func:`.except`
:func:`.except_all`
"""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
_is_from_container = True
def __init__(self, keyword, *selects, **kwargs):
self._auto_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c._all_columns)
elif len(s.c._all_columns) != numcols:
raise exc.ArgumentError(
'All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' %
(1, len(self.selects[0].c._all_columns),
n + 1, len(s.c._all_columns))
)
self.selects.append(s.self_group(against=self))
GenerativeSelect.__init__(self, **kwargs)
@property
def _label_resolve_dict(self):
d = dict(
(c.key, c) for c in self.c
)
return d, d, d
@classmethod
def _create_union(cls, *selects, **kwargs):
r"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
@classmethod
def _create_union_all(cls, *selects, **kwargs):
r"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
@classmethod
def _create_except(cls, *selects, **kwargs):
r"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
@classmethod
def _create_except_all(cls, *selects, **kwargs):
r"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
@classmethod
def _create_intersect(cls, *selects, **kwargs):
r"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
@classmethod
def _create_intersect_all(cls, *selects, **kwargs):
r"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(
CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c._all_columns for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(
self, name=cols[0]._label if self.use_labels else None,
key=cols[0]._key_label if self.use_labels else None)
# hand-construct the "_proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy._proxies = [
c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)]
def _refresh_for_new_column(self, column):
for s in self.selects:
s._refresh_for_new_column(column)
if not self._cols_populated:
return None
raise NotImplementedError("CompoundSelect constructs don't support "
"addition of columns to underlying "
"selectables")
def _copy_internals(self, clone=_clone, **kw):
super(CompoundSelect, self)._copy_internals(clone, **kw)
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in (
'_order_by_clause', '_group_by_clause', '_for_update_arg'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
"""Represents a ``SELECT`` statement.
"""
__visit_name__ = 'select'
_prefixes = ()
_suffixes = ()
_hints = util.immutabledict()
_statement_hints = ()
_distinct = False
_from_cloned = None
_correlate = ()
_correlate_except = None
_memoized_property = SelectBase._memoized_property
_is_select = True
def __init__(self,
columns=None,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
suffixes=None,
**kwargs):
"""Construct a new :class:`.Select`.
Similar functionality is also available via the
:meth:`.FromClause.select` method on any :class:`.FromClause`.
All arguments which accept :class:`.ClauseElement` arguments also
accept string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`.select`.
:param columns:
A list of :class:`.ColumnElement` or :class:`.FromClause`
objects which will form the columns clause of the resulting
statement. For those objects that are instances of
:class:`.FromClause` (typically :class:`.Table` or :class:`.Alias`
objects), the :attr:`.FromClause.c` collection is extracted
to form a collection of :class:`.ColumnElement` objects.
This parameter will also accept :class:`.Text` constructs as
given, as well as ORM-mapped classes.
.. note::
The :paramref:`.select.columns` parameter is not available
in the method form of :func:`.select`, e.g.
:meth:`.FromClause.select`.
.. seealso::
:meth:`.Select.column`
:meth:`.Select.with_only_columns`
:param whereclause:
A :class:`.ClauseElement` expression which will be used to form the
``WHERE`` clause. It is typically preferable to add WHERE
criterion to an existing :class:`.Select` using method chaining
with :meth:`.Select.where`.
.. seealso::
:meth:`.Select.where`
:param from_obj:
A list of :class:`.ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. This is equivalent
to calling :meth:`.Select.select_from` using method chaining on
an existing :class:`.Select` object.
.. seealso::
:meth:`.Select.select_from` - full description of explicit
FROM clause specification.
:param autocommit:
Deprecated. Use ``.execution_options(autocommit=<True|False>)``
to set the autocommit option.
.. seealso::
:meth:`.Executable.execution_options`
:param bind=None:
an :class:`~.Engine` or :class:`~.Connection` instance
to which the
resulting :class:`.Select` object will be bound. The
:class:`.Select` object will otherwise automatically bind to
whatever :class:`~.base.Connectable` instances can be located within
its contained :class:`.ClauseElement` members.
:param correlate=True:
indicates that this :class:`.Select` object should have its
contained :class:`.FromClause` elements "correlated" to an enclosing
:class:`.Select` object. It is typically preferable to specify
correlations on an existing :class:`.Select` construct using
:meth:`.Select.correlate`.
.. seealso::
:meth:`.Select.correlate` - full description of correlation.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the PostgreSQL dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available on an existing :class:`.Select`
object via the :meth:`~.Select.distinct` method.
.. seealso::
:meth:`.Select.distinct`
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
.. deprecated:: 0.9.0 - use
:meth:`.Select.with_for_update` to specify the
structure of the ``FOR UPDATE`` clause.
``for_update`` accepts various string values interpreted by
specific backends, including:
* ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``;
on PostgreSQL, translates to ``FOR SHARE``.
* ``"nowait"`` - on PostgreSQL and Oracle, translates to
``FOR UPDATE NOWAIT``.
* ``"read_nowait"`` - on PostgreSQL, translates to
``FOR SHARE NOWAIT``.
.. seealso::
:meth:`.Select.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
:param group_by:
a list of :class:`.ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select. This parameter
is typically specified more naturally using the
:meth:`.Select.group_by` method on an existing :class:`.Select`.
.. seealso::
:meth:`.Select.group_by`
:param having:
a :class:`.ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used. This parameter
is typically specified more naturally using the
:meth:`.Select.having` method on an existing :class:`.Select`.
.. seealso::
:meth:`.Select.having`
:param limit=None:
a numerical value which usually renders as a ``LIMIT``
expression in the resulting select. Backends that don't
support ``LIMIT`` will attempt to provide similar
functionality. This parameter is typically specified more naturally
using the :meth:`.Select.limit` method on an existing
:class:`.Select`.
.. seealso::
:meth:`.Select.limit`
:param offset=None:
a numeric value which usually renders as an ``OFFSET``
expression in the resulting select. Backends that don't
support ``OFFSET`` will attempt to provide similar
functionality. This parameter is typically specified more naturally
using the :meth:`.Select.offset` method on an existing
:class:`.Select`.
.. seealso::
:meth:`.Select.offset`
:param order_by:
a scalar or list of :class:`.ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
This parameter is typically specified more naturally using the
:meth:`.Select.order_by` method on an existing :class:`.Select`.
.. seealso::
:meth:`.Select.order_by`
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`.Select` object will use these
names as well for targeting column members.
This parameter can also be specified on an existing
:class:`.Select` object using the :meth:`.Select.apply_labels`
method.
.. seealso::
:meth:`.Select.apply_labels`
"""
self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_text(e)
for e in util.to_list(distinct)
]
if from_obj is not None:
self._from_obj = util.OrderedSet(
_interpret_as_from(f)
for f in util.to_list(from_obj))
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(
whereclause).self_group(against=operators._asbool)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(
having).self_group(against=operators._asbool)
else:
self._having = None
if prefixes:
self._setup_prefixes(prefixes)
if suffixes:
self._setup_suffixes(suffixes)
GenerativeSelect.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
for item in itertools.chain(
_from_objects(*self._raw_columns),
_from_objects(self._whereclause)
if self._whereclause is not None else (),
self._from_obj
):
if item is self:
raise exc.InvalidRequestError(
"select() construct refers to itself as a FROM")
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
def _get_display_froms(self, explicit_correlate_froms=None,
implicit_correlate_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(itertools.chain(*[
_expand_cloned(f._hide_froms)
for f in froms]))
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f] for f in
toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if self._correlate:
to_correlate = self._correlate
if to_correlate:
froms = [
f for f in froms if f not in
_cloned_intersection(
_cloned_intersection(
froms, explicit_correlate_froms or ()),
to_correlate
)
]
if self._correlate_except is not None:
froms = [
f for f in froms if f not in
_cloned_difference(
_cloned_intersection(
froms, explicit_correlate_froms or ()),
self._correlate_except
)
]
if self._auto_correlate and \
implicit_correlate_froms and \
len(froms) > 1:
froms = [
f for f in froms if f not in
_cloned_intersection(froms, implicit_correlate_froms)
]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses "
"due to auto-correlation; "
"specify correlate(<tables>) "
"to control correlation "
"manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
Hints here are specific to the backend database and may include
directives such as isolation levels, file directives, fetch directives,
etc.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Select.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
r"""Add an indexing or other executional context hint for the given
selectable to this :class:`.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\
with_hint(mytable, "index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\
with_hint(mytable, "index(%(name)s ix_mytable)", 'oracle').\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
.. seealso::
:meth:`.Select.with_statement_hint`
"""
if selectable is None:
self._statement_hints += ((dialect_name, text), )
else:
self._hints = self._hints.union(
{(selectable, dialect_name): text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select "
"object to return a 'scalar' version "
"of this Select.")
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
@_memoized_property
def _label_resolve_dict(self):
with_cols = dict(
(c._resolve_label or c._label or c.key, c)
for c in _select_iterables(self._raw_columns)
if c._allow_label_resolve)
only_froms = dict(
(c.key, c) for c in
_select_iterables(self.froms) if c._allow_label_resolve)
only_cols = with_cols.copy()
for key, value in only_froms.items():
with_cols.setdefault(key, value)
return with_cols, only_froms, only_cols
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
super(Select, self)._copy_internals(clone, **kw)
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict(
(f, clone(f, **kw)) for f in self._from_obj.union(self._froms))
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(from_cloned[f] for f in
self._from_obj)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with its previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f) for f in
self._correlate).union(self._correlate)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supersede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause', '_for_update_arg':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
E.g.::
my_select = my_select.column(table.c.new_column)
See the documentation for :meth:`.Select.with_only_columns`
for guidelines on adding /replacing the columns of a
:class:`.Select` object.
"""
self.append_column(column)
@util.dependencies("sqlalchemy.sql.util")
def reduce_columns(self, sqlutil, only_synonyms=True):
"""Return a new :func`.select` construct with redundantly
named, equivalently-valued columns removed from the columns clause.
"Redundant" here means two columns where one refers to the
other either based on foreign key, or via a simple equality
comparison in the WHERE clause of the statement. The primary purpose
of this method is to automatically construct a select statement
with all uniquely-named columns, without the need to use
table-qualified labels as :meth:`.apply_labels` does.
When columns are omitted based on foreign key, the referred-to
column is the one that's kept. When columns are omitted based on
WHERE eqivalence, the first column in the columns clause is the
one that's kept.
:param only_synonyms: when True, limit the removal of columns
to those which have the same name as the equivalent. Otherwise,
all columns that are equivalent to another are removed.
.. versionadded:: 0.8
"""
return self.with_only_columns(
sqlutil.reduce_columns(
self.inner_columns,
only_synonyms=only_synonyms,
*(self._whereclause, ) + tuple(self._from_obj)
)
)
@_generative
def with_only_columns(self, columns):
r"""Return a new :func:`.select` construct with its columns
clause replaced with the given columns.
This method is exactly equivalent to as if the original
:func:`.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print s1
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\
... select_from(table1.join(table2,
... table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to :meth:`.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`.select` construct in the first place with the given
columns, the columns passed to :meth:`.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`.select` construct, not those which are available
from the ``.c`` collection of that :func:`.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
r"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_label_reference(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def select_from(self, fromclause):
r"""return a new :func:`.select` construct with the
given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`.Table` or other selectable
will have no effect. Passing a :class:`.Join` that refers
to an already present :class:`.Table` or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead
rendering it into a JOIN clause.
While the typical purpose of :meth:`.Select.select_from` is to
replace the default, derived FROM clause with a join, it can
also be called with individual table elements, multiple times
if desired, in the case that the FROM clause cannot be fully
derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
r"""return a new :class:`.Select` which will correlate the given FROM
clauses to that of an enclosing :class:`.Select`.
Calling this method turns off the :class:`.Select` object's
default behavior of "auto-correlation". Normally, FROM elements
which appear in a :class:`.Select` that encloses this one via
its :term:`WHERE clause`, ORDER BY, HAVING or
:term:`columns clause` will be omitted from this :class:`.Select`
object's :term:`FROM clause`.
Setting an explicit correlation collection using the
:meth:`.Select.correlate` method provides a fixed list of FROM objects
that can potentially take place in this process.
When :meth:`.Select.correlate` is used to apply specific FROM clauses
for correlation, the FROM elements become candidates for
correlation regardless of how deeply nested this :class:`.Select`
object is, relative to an enclosing :class:`.Select` which refers to
the same FROM object. This is in contrast to the behavior of
"auto-correlation" which only correlates to an immediate enclosing
:class:`.Select`. Multi-level correlation ensures that the link
between enclosed and enclosing :class:`.Select` is always via
at least one WHERE/ORDER BY/HAVING/columns clause in order for
correlation to take place.
If ``None`` is passed, the :class:`.Select` object will correlate
none of its FROM entries, and all will render unconditionally
in the local FROM clause.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate collection.
.. versionchanged:: 0.8.0 ORM-mapped classes are accepted by
:meth:`.Select.correlate`.
.. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no
longer unconditionally removes entries from the FROM clause;
instead, the candidate FROM entries must also be matched by a FROM
entry located in an enclosing :class:`.Select`, which ultimately
encloses this one as present in the WHERE clause, ORDER BY clause,
HAVING clause, or columns clause of an enclosing :meth:`.Select`.
.. versionchanged:: 0.8.2 explicit correlation takes place
via any level of nesting of :class:`.Select` objects; in previous
0.8 versions, correlation would only occur relative to the
immediate enclosing :class:`.Select` construct.
.. seealso::
:meth:`.Select.correlate_except`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclauses)
@_generative
def correlate_except(self, *fromclauses):
r"""return a new :class:`.Select` which will omit the given FROM
clauses from the auto-correlation process.
Calling :meth:`.Select.correlate_except` turns off the
:class:`.Select` object's default behavior of
"auto-correlation" for the given FROM elements. An element
specified here will unconditionally appear in the FROM list, while
all other FROM elements remain subject to normal auto-correlation
behaviors.
.. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except`
method was improved to fully prevent FROM clauses specified here
from being omitted from the immediate FROM clause of this
:class:`.Select`.
If ``None`` is passed, the :class:`.Select` object will correlate
all of its FROM entries.
.. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will
correctly auto-correlate all FROM clauses.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate-exception collection.
.. seealso::
:meth:`.Select.correlate`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
self._correlate_except = set(self._correlate_except or ()).union(
_interpret_as_from(f) for f in fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.correlate` method is preferred, as it provides
standard :term:`method chaining`.
"""
self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause)
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
E.g.::
my_select.append_column(some_table.c.new_column)
This is an **in-place** mutation method; the
:meth:`~.Select.column` method is preferred, as it provides standard
:term:`method chaining`.
See the documentation for :meth:`.Select.with_only_columns`
for guidelines on adding /replacing the columns of a
:class:`.Select` object.
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
if isinstance(column, ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.prefix_with` method is preferred, as it provides
standard :term:`method chaining`.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.where` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._whereclause = and_(
True_._ifnone(self._whereclause), whereclause)
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.having` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._having = and_(True_._ifnone(self._having), having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
This is an **in-place** mutation method; the
:meth:`~.Select.select_from` method is preferred, as it provides
standard :term:`method chaining`.
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
self._from_obj = self._from_obj.union([fromclause])
@_memoized_property
def _columns_plus_names(self):
if self.use_labels:
names = set()
def name_for_col(c):
if c._label is None or not c._render_label_in_columns_clause:
return (None, c)
name = c._label
if name in names:
name = c.anon_label
else:
names.add(name)
return name, c
return [
name_for_col(c)
for c in util.unique_list(
_select_iterables(self._raw_columns))
]
else:
return [
(None, c)
for c in util.unique_list(
_select_iterables(self._raw_columns))
]
def _populate_column_collection(self):
for name, c in self._columns_plus_names:
if not hasattr(c, '_make_proxy'):
continue
if name is None:
key = None
elif self.use_labels:
key = c._key_label
if key is not None and key in self.c:
key = c.anon_label
else:
key = None
c._make_proxy(self, key=key,
name=name,
name_is_truncatable=True)
def _refresh_for_new_column(self, column):
for fromclause in self._froms:
col = fromclause._refresh_for_new_column(column)
if col is not None:
if col in self.inner_columns and self._cols_populated:
our_label = col._key_label if self.use_labels else col.key
if our_label not in self.c:
return col._make_proxy(
self,
name=col._label if self.use_labels else None,
key=col._key_label if self.use_labels else None,
name_is_truncatable=True)
return None
return None
def _needs_parens_for_grouping(self):
return (
self._limit_clause is not None or
self._offset_clause is not None or
bool(self._order_by_clause.clauses)
)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if isinstance(against, CompoundSelect) and \
not self._needs_parens_for_grouping():
return self
return FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return CompoundSelect._create_union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return CompoundSelect._create_union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return CompoundSelect._create_except(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return CompoundSelect._create_intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class ScalarSelect(Generative, Grouping):
_from_objects = []
_is_from_container = True
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly '
'within a column-level expression.')
c = columns
@_generative
def where(self, crit):
"""Apply a WHERE clause to the SELECT statement referred to
by this :class:`.ScalarSelect`.
"""
self.element = self.element.where(crit)
def self_group(self, **kwargs):
return self
class Exists(UnaryExpression):
"""Represent an ``EXISTS`` clause.
"""
__visit_name__ = UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
"""Construct a new :class:`.Exists` against an existing
:class:`.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==1)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==1)
"""
if args and isinstance(args[0], (SelectBase, ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = Select(*args, **kwargs).as_scalar().self_group()
UnaryExpression.__init__(self, s, operator=operators.exists,
type_=type_api.BOOLEANTYPE,
wraps_column_expression=True)
def select(self, whereclause=None, **params):
return Select([self], whereclause, **params)
def correlate(self, *fromclause):
e = self._clone()
e.element = self.element.correlate(*fromclause).self_group()
return e
def correlate_except(self, *fromclause):
e = self._clone()
e.element = self.element.correlate_except(*fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`.Exists` construct, applying the given
expression to the :meth:`.Select.select_from` method of the select
statement contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class TextAsFrom(SelectBase):
"""Wrap a :class:`.TextClause` construct within a :class:`.SelectBase`
interface.
This allows the :class:`.TextClause` object to gain a ``.c`` collection
and other FROM-like capabilities such as :meth:`.FromClause.alias`,
:meth:`.SelectBase.cte`, etc.
The :class:`.TextAsFrom` construct is produced via the
:meth:`.TextClause.columns` method - see that method for details.
.. versionadded:: 0.9.0
.. seealso::
:func:`.text`
:meth:`.TextClause.columns`
"""
__visit_name__ = "text_as_from"
_textual = True
def __init__(self, text, columns, positional=False):
self.element = text
self.column_args = columns
self.positional = positional
@property
def _bind(self):
return self.element._bind
@_generative
def bindparams(self, *binds, **bind_as_values):
self.element = self.element.bindparams(*binds, **bind_as_values)
def _populate_column_collection(self):
for c in self.column_args:
c._make_proxy(self)
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.element = clone(self.element, **kw)
def _scalar_type(self):
return self.column_args[0].type
class AnnotatedFromClause(Annotated):
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
element.c
Annotated.__init__(self, element, values)
| [
"1007228376@qq.com"
] | 1007228376@qq.com |
84e81158f9ce9e4b792edce641911b13c4c00de8 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/baekjoon/11399.py | 6f5c2c8eb7384490b7d3a194fc1b4bd34f796f13 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | N = int(input())
arr = list(map(int,input().split()))
arr.sort()
ans = 0
for i in range(len(arr)):
ans+=arr[i]*(N-i)
print(ans) | [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
b1c964b0f5a6f5e482f4c668f25f65cf1f10ce37 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.11.36/1569574793.py | 9344099c29c8565bef8b57eea46b4316b4188abe | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,919 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def nwords(s: str) -> int:
"""Count the number of words in a given string. Words are separated by at least one char in string.whitespace
Args:
s (str): A string whose words are counted
Returns:
int: Number of words in the given string
"""
n = 0
words = []
current_word = ""
for ch in s:
if ch not in string.whitespace:
# if its not whitespace, its a letter
current_word += ch
else:
# only append word if its actually a word
# (not only whitespace characters before)
if len(current_word) > 0:
words.append(current_word)
current_word = ""
if len(current_word) > 0:
words.append(current_word)
return len(words)
## Lösung Teil 2.
def word_count_iter(it) -> tuple:
"""Takes an iterable that yields a str in every iteration and returns a tuple
with the number of lines, words and characters
Args:
it (iterable)
Returns:
tuple
"""
lines, words, chars = 0, 0, 0
for i in it:
lines += 1
words += nwords(i)
chars += len(i)
return lines, words, chars
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(6))
count = 0
original = None
def coverage(func):
nonlocal covered, target, count, original
def wrapper(it):
nonlocal covered, count
lit = list (it)
r = func (lit)
count += 1
if lit == []:
covered.add(0)
elif len (lit) == 1:
covered.add(1)
else:
covered.add(2)
if "" in lit:
covered.add (3)
if len (lit) > 1:
if [line for line in lit if [x for x in line if x in string.whitespace]]:
covered.add (4)
else:
covered.add (5)
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
if func == "original": return original
original = func
functools.update_wrapper (wrapper, func)
return wrapper
return coverage
coverage = mk_coverage()
try:
word_count_iter = coverage(word_count_iter)
except:
pass
## Lösung Teil 3. (Tests)
def test_word_count_iter():
iter1 = ["Hello, World", "Hallo, Welt"]
iter2 = [" "]
iter3 = [" ", ",,,,,"]
assert word_count_iter(iter1) == (2, 4, 23)
assert word_count_iter(iter2) == (1, 0, 5)
assert word_counter_iter(iter3) == (2, 0, 10)
## revert
try:
word_count_iter = word_count_iter.__wrapped__
except:
pass
## Lösung Teil 4.
######################################################################
## hidden test code
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_nwords (self):
assert nwords
assert 's' in getfullargspec(nwords).args
def test_word_count_iter(self):
assert word_count_iter
def test_word_count(self):
assert word_count
assert 'file' in getfullargspec(word_count).args
class TestGrades:
def test_docstring_present(self):
assert nwords.__doc__ is not None
assert word_count_iter.__doc__ is not None
assert word_count.__doc__ is not None
def test_typing_present (self):
assert nwords.__annotations__ == self.nwords_oracle.__annotations__
assert word_count_iter.__annotations__ == self.word_count_iter_oracle.__annotations__
assert word_count.__annotations__ == self.word_count_oracle.__annotations__
def nwords_oracle (self, s:str) -> int:
return len (s.split())
def test_nwords(self):
charset = string.printable
for i in range (100):
s = ''.join (random.choice (charset) for j in range (1000))
assert nwords (s) == self.nwords_oracle (s)
def word_count_iter_oracle(self, iter):
lines = 0
words = 0
chars = 0
for line in iter:
lines += 1
chars += len(line)
r = line.split()
words += len(r)
return (lines, words, chars)
def test_wci_empty (self):
assert word_count_iter ([]) == (0,0,0)
def test_wci_one (self):
assert word_count_iter (["a"]) == (1, 1, 1)
def test_wci_simple (self):
for i in range (50):
assert word_count_iter (i * ["a"]) == (i,i,i)
def test_wci_scale (self):
for i in range (20):
assert word_count_iter (i * ["a bb"]) == (i, 2*i, 4*i)
def test_word_count_iter(self):
charset = string.printable
for i in range (100):
l = random.randrange (10)
subject = [''.join (random.choice (charset) for j in range (1000)) for k in range(l)]
assert word_count_iter (subject) == self.word_count_iter_oracle (subject)
def word_count_oracle(self,file:str):
return self.word_count_iter_oracle (open (file))
def test_some_known_files(self):
count = 3
try:
assert word_count ("/usr/share/dict/words") == (235886, 235886, 2493109)
except:
count = count - 1
try:
assert word_count ("/usr/share/doc/libpython3.6-minimal/copyright") == (995, 7030, 49855)
except:
count = count - 1
try:
f = "/data/test_code.py"
assert word_count (f) == self.word_count_oracle (f)
except:
count = count - 1
assert count > 0
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
6080231deaceb0a21438174ebaeac53b7355ac8c | d5f611f9abadc91adc18cf958d9874a14e249b32 | /traffic/core/distance.py | dd6658d7a58149ca17872b7191849abda1c4dfca | [
"MIT"
] | permissive | avgisSI/traffic | c01943a6814f64a7d4f14f71696cbb9e90c4bc10 | d7f21401cba3a393dec082b1a7d8a152cc7a995b | refs/heads/master | 2023-07-22T17:18:48.421208 | 2021-09-07T12:43:50 | 2021-09-07T12:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | import logging
from typing import TYPE_CHECKING, NamedTuple, Optional
import numpy as np
import pandas as pd
from . import geodesy as geo
if TYPE_CHECKING:
from ..data.basic.airports import Airports # noqa: F401
from .mixins import PointMixin # noqa: F401
from .structure import Airport
def closest_point(
data: pd.DataFrame,
point: Optional["PointMixin"] = None,
*args,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
) -> pd.Series:
if point is not None:
latitude = point.latitude
longitude = point.longitude
name = point.name
else:
name = "unnamed"
assert latitude is not None and longitude is not None
dist_vect = geo.distance(
data.latitude.values,
data.longitude.values,
latitude * np.ones(len(data.latitude)),
longitude * np.ones(len(data.longitude)),
)
argmin = dist_vect.argmin()
elt = data.iloc[argmin]
return pd.Series(
{**dict(elt), **{"distance": dist_vect[argmin], "point": name}},
name=elt.name,
)
def guess_airport(
point: Optional[NamedTuple] = None,
*args,
latitude: Optional[float] = None,
longitude: Optional[float] = None,
dataset: Optional["Airports"] = None,
warning_distance: Optional[float] = None,
) -> "Airport":
if dataset is None:
from ..data import airports
dataset = airports
# TODO define a protocol instead of PointMixin
if point is not None:
longitude = point.longitude # type: ignore
latitude = point.latitude # type: ignore
if any((longitude is None, latitude is None)):
raise RuntimeError("latitude or longitude are None")
airport_data = closest_point(
dataset.data, latitude=latitude, longitude=longitude
)
airport = dataset[airport_data.icao]
assert airport is not None
airport.distance = airport_data.distance # type: ignore
if warning_distance is not None and airport.distance > warning_distance:
logging.warning(
f"Closest airport is more than {warning_distance*1e-3}km away "
f" (distance={airport.distance})"
)
return airport
| [
"git@xoolive.org"
] | git@xoolive.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.